diff --git a/.gitignore b/.gitignore new file mode 100644 index 000000000..f2b475d4e --- /dev/null +++ b/.gitignore @@ -0,0 +1,24 @@ +# Compiled python modules. +*.pyc + +# Byte-compiled +_pycache__/ +.cache/ + +# Python egg metadata, regenerated from source files by setuptools. +/*.egg-info +.eggs/ + +# PyPI distribution artifacts. +build/ +dist/ + +# Sublime project files +*.sublime-project +*.sublime-workspace + +# Tests +.pytest_cache/ + +# Other +*.DS_Store diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 000000000..0bc70f988 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,33 @@ +sudo: required +language: python +cache: pip +git: + depth: 3 + quiet: true +services: + - docker +python: + - "3.6" +env: + global: + - T2T_PROBLEM=algorithmic_reverse_binary40_test + - T2T_DATA_DIR=/tmp/t2t-data + - T2T_TRAIN_DIR=/tmp/t2t-train + - TF_LATEST="1.15.*" + # This is necessary to have gsutil work with Python 2.7 + - BOTO_CONFIG=/dev/null + matrix: + - TF_VERSION="1.15.*" +install: + - ./oss_scripts/oss_pip_install.sh +script: + - ./oss_scripts/oss_tests.sh + - ./oss_scripts/oss_integration_test.sh + + # Conditional commands should each be in a separate block to get proper + # errors on Travis. + # + # TODO(afrozm): Re-enable if this becomes an issue. + # - if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then + # pylint -j 2 tensor2tensor; + # fi diff --git a/AUTHORS b/AUTHORS index 38e5bc724..b4762f933 100644 --- a/AUTHORS +++ b/AUTHORS @@ -5,3 +5,4 @@ # of contributors, see the revision history in source control. Google Inc. +Artit Wangperawong \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ae319c70a..c66b4029c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,5 +1,15 @@ # How to Contribute +# Issues + +* Please tag your issue with `bug`, `feature request`, or `question` to help us + effectively respond. +* Please include the versions of TensorFlow and Tensor2Tensor you are running + (run `pip list | grep tensor`) +* Please provide the command line you ran as well as the log output. + +# Pull Requests + We'd love to accept your patches and contributions to this project. There are just a few small guidelines you need to follow. diff --git a/ISSUE_TEMPLATE.md b/ISSUE_TEMPLATE.md new file mode 100644 index 000000000..c5ae7c6a6 --- /dev/null +++ b/ISSUE_TEMPLATE.md @@ -0,0 +1,27 @@ +### Description + +... + +### Environment information + +``` +OS: + +$ pip freeze | grep tensor +# your output here + +$ python -V +# your output here +``` + +### For bugs: reproduction and error logs + +``` +# Steps to reproduce: +... +``` + +``` +# Error logs: +... +``` diff --git a/README.md b/README.md index 6d477c900..7a3e115e2 100644 --- a/README.md +++ b/README.md @@ -1,25 +1,251 @@ -# T2T: Tensor2Tensor Transformers +# Tensor2Tensor -[T2T](https://github.com/tensorflow/tensor2tensor) is a modular and extensible -library and binaries for supervised learning with TensorFlow and with a focus on -sequence tasks. Actively used and maintained by researchers and engineers within -Google Brain, T2T strives to maximize idea bandwidth and minimize execution -latency. +[![PyPI +version](https://badge.fury.io/py/tensor2tensor.svg)](https://badge.fury.io/py/tensor2tensor) +[![GitHub +Issues](https://img.shields.io/github/issues/tensorflow/tensor2tensor.svg)](https://github.com/tensorflow/tensor2tensor/issues) +[![Contributions +welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg)](CONTRIBUTING.md) +[![Gitter](https://img.shields.io/gitter/room/nwjs/nw.js.svg)](https://gitter.im/tensor2tensor/Lobby) +[![License](https://img.shields.io/badge/License-Apache%202.0-brightgreen.svg)](https://opensource.org/licenses/Apache-2.0) +[![Travis](https://img.shields.io/travis/tensorflow/tensor2tensor.svg)](https://travis-ci.org/tensorflow/tensor2tensor) +[![Run on FH](https://static.floydhub.com/button/button-small.svg)](https://floydhub.com/run) -T2T is particularly well-suited to researchers working on sequence tasks. We're -eager to collaborate with you on extending T2T's powers, so please feel free to -open an issue on GitHub to kick off a discussion and send along pull requests, -See [our contribution doc](CONTRIBUTING.md) for details and our [open -issues](https://github.com/tensorflow/tensor2tensor/issues). +[Tensor2Tensor](https://github.com/tensorflow/tensor2tensor), or +[T2T](https://github.com/tensorflow/tensor2tensor) for short, is a library +of deep learning models and datasets designed to make deep learning more +accessible and [accelerate ML +research](https://research.googleblog.com/2017/06/accelerating-deep-learning-research.html). -## T2T overview + +T2T was developed by researchers and engineers in the +[Google Brain team](https://research.google.com/teams/brain/) and a community +of users. It is now deprecated — we keep it running and welcome +bug-fixes, but encourage users to use the successor library [Trax](https://github.com/google/trax). + +### Quick Start + +[This iPython notebook](https://colab.research.google.com/github/tensorflow/tensor2tensor/blob/master/tensor2tensor/notebooks/hello_t2t.ipynb) +explains T2T and runs in your browser using a free VM from Google, +no installation needed. Alternatively, here is a one-command version that +installs T2T, downloads MNIST, trains a model and evaluates it: + +``` +pip install tensor2tensor && t2t-trainer \ + --generate_data \ + --data_dir=~/t2t_data \ + --output_dir=~/t2t_train/mnist \ + --problem=image_mnist \ + --model=shake_shake \ + --hparams_set=shake_shake_quick \ + --train_steps=1000 \ + --eval_steps=100 +``` + +### Contents + +* [Suggested Datasets and Models](#suggested-datasets-and-models) + * [Mathematical Language Understanding](#mathematical-language-understanding) + * [Story, Question and Answer](#story-question-and-answer) + * [Image Classification](#image-classification) + * [Image Generation](#image-generation) + * [Language Modeling](#language-modeling) + * [Sentiment Analysis](#sentiment-analysis) + * [Speech Recognition](#speech-recognition) + * [Summarization](#summarization) + * [Translation](#translation) +* [Basics](#basics) + * [Walkthrough](#walkthrough) + * [Installation](#installation) + * [Features](#features) +* [T2T Overview](#t2t-overview) + * [Datasets](#datasets) + * [Problems and Modalities](#problems-and-modalities) + * [Models](#models) + * [Hyperparameter Sets](#hyperparameter-sets) + * [Trainer](#trainer) +* [Adding your own components](#adding-your-own-components) +* [Adding a dataset](#adding-a-dataset) +* [Papers](#papers) +* [Run on FloydHub](#run-on-floydhub) + +## Suggested Datasets and Models + +Below we list a number of tasks that can be solved with T2T when +you train the appropriate model on the appropriate problem. +We give the problem and model below and we suggest a setting of +hyperparameters that we know works well in our setup. We usually +run either on Cloud TPUs or on 8-GPU machines; you might need +to modify the hyperparameters if you run on a different setup. + +### Mathematical Language Understanding + +For evaluating mathematical expressions at the character level involving addition, subtraction and multiplication of both positive and negative decimal numbers with variable digits assigned to symbolic variables, use + +* the [MLU](https://art.wangperawong.com/mathematical_language_understanding_train.tar.gz) data-set: + `--problem=algorithmic_math_two_variables` + +You can try solving the problem with different transformer models and hyperparameters as described in the [paper](https://arxiv.org/abs/1812.02825): +* Standard transformer: +`--model=transformer` +`--hparams_set=transformer_tiny` +* Universal transformer: +`--model=universal_transformer` +`--hparams_set=universal_transformer_tiny` +* Adaptive universal transformer: +`--model=universal_transformer` +`--hparams_set=adaptive_universal_transformer_tiny` + +### Story, Question and Answer + +For answering questions based on a story, use + +* the [bAbi](https://research.fb.com/downloads/babi/) data-set: + `--problem=babi_qa_concat_task1_1k` + +You can choose the bAbi task from the range [1,20] and the subset from 1k or +10k. To combine test data from all tasks into a single test set, use +`--problem=babi_qa_concat_all_tasks_10k` + +### Image Classification + +For image classification, we have a number of standard data-sets: + +* ImageNet (a large data-set): `--problem=image_imagenet`, or one + of the re-scaled versions (`image_imagenet224`, `image_imagenet64`, + `image_imagenet32`) +* CIFAR-10: `--problem=image_cifar10` (or + `--problem=image_cifar10_plain` to turn off data augmentation) +* CIFAR-100: `--problem=image_cifar100` +* MNIST: `--problem=image_mnist` + +For ImageNet, we suggest to use the ResNet or Xception, i.e., +use `--model=resnet --hparams_set=resnet_50` or +`--model=xception --hparams_set=xception_base`. +Resnet should get to above 76% top-1 accuracy on ImageNet. + +For CIFAR and MNIST, we suggest to try the shake-shake model: +`--model=shake_shake --hparams_set=shakeshake_big`. +This setting trained for `--train_steps=700000` should yield +close to 97% accuracy on CIFAR-10. + +### Image Generation + +For (un)conditional image generation, we have a number of standard data-sets: + +* CelebA: `--problem=img2img_celeba` for image-to-image translation, namely, + superresolution from 8x8 to 32x32. +* CelebA-HQ: `--problem=image_celeba256_rev` for a downsampled 256x256. +* CIFAR-10: `--problem=image_cifar10_plain_gen_rev` for class-conditional + 32x32 generation. +* LSUN Bedrooms: `--problem=image_lsun_bedrooms_rev` +* MS-COCO: `--problem=image_text_ms_coco_rev` for text-to-image generation. +* Small ImageNet (a large data-set): `--problem=image_imagenet32_gen_rev` for + 32x32 or `--problem=image_imagenet64_gen_rev` for 64x64. + +We suggest to use the Image Transformer, i.e., `--model=imagetransformer`, or +the Image Transformer Plus, i.e., `--model=imagetransformerpp` that uses +discretized mixture of logistics, or variational auto-encoder, i.e., +`--model=transformer_ae`. +For CIFAR-10, using `--hparams_set=imagetransformer_cifar10_base` or +`--hparams_set=imagetransformer_cifar10_base_dmol` yields 2.90 bits per +dimension. For Imagenet-32, using +`--hparams_set=imagetransformer_imagenet32_base` yields 3.77 bits per dimension. + +### Language Modeling + +For language modeling, we have these data-sets in T2T: + +* PTB (a small data-set): `--problem=languagemodel_ptb10k` for + word-level modeling and `--problem=languagemodel_ptb_characters` + for character-level modeling. +* LM1B (a billion-word corpus): `--problem=languagemodel_lm1b32k` for + subword-level modeling and `--problem=languagemodel_lm1b_characters` + for character-level modeling. + +We suggest to start with `--model=transformer` on this task and use +`--hparams_set=transformer_small` for PTB and +`--hparams_set=transformer_base` for LM1B. + +### Sentiment Analysis + +For the task of recognizing the sentiment of a sentence, use + +* the IMDB data-set: `--problem=sentiment_imdb` + +We suggest to use `--model=transformer_encoder` here and since it is +a small data-set, try `--hparams_set=transformer_tiny` and train for +few steps (e.g., `--train_steps=2000`). + +### Speech Recognition + +For speech-to-text, we have these data-sets in T2T: + +* Librispeech (US English): `--problem=librispeech` for + the whole set and `--problem=librispeech_clean` for a smaller + but nicely filtered part. + +* Mozilla Common Voice (US English): `--problem=common_voice` for the whole set + `--problem=common_voice_clean` for a quality-checked subset. + +### Summarization + +For summarizing longer text into shorter one we have these data-sets: + +* CNN/DailyMail articles summarized into a few sentences: + `--problem=summarize_cnn_dailymail32k` + +We suggest to use `--model=transformer` and +`--hparams_set=transformer_prepend` for this task. +This yields good ROUGE scores. + +### Translation + +There are a number of translation data-sets in T2T: + +* English-German: `--problem=translate_ende_wmt32k` +* English-French: `--problem=translate_enfr_wmt32k` +* English-Czech: `--problem=translate_encs_wmt32k` +* English-Chinese: `--problem=translate_enzh_wmt32k` +* English-Vietnamese: `--problem=translate_envi_iwslt32k` +* English-Spanish: `--problem=translate_enes_wmt32k` + +You can get translations in the other direction by appending `_rev` to +the problem name, e.g., for German-English use +`--problem=translate_ende_wmt32k_rev` +(note that you still need to download the original data with t2t-datagen +`--problem=translate_ende_wmt32k`). + +For all translation problems, we suggest to try the Transformer model: +`--model=transformer`. At first it is best to try the base setting, +`--hparams_set=transformer_base`. When trained on 8 GPUs for 300K steps +this should reach a BLEU score of about 28 on the English-German data-set, +which is close to state-of-the art. If training on a single GPU, try the +`--hparams_set=transformer_base_single_gpu` setting. For very good results +or larger data-sets (e.g., for English-French), try the big model +with `--hparams_set=transformer_big`. + +See this [example](https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/notebooks/Transformer_translate.ipynb) to know how the translation works. + +## Basics + +### Walkthrough + +Here's a walkthrough training a good English-to-German translation +model using the Transformer model from [*Attention Is All You +Need*](https://arxiv.org/abs/1706.03762) on WMT data. ``` pip install tensor2tensor -PROBLEM=wmt_ende_tokens_32k +# See what problems, models, and hyperparameter sets are available. +# You can easily swap between them (and add new ones). +t2t-trainer --registry_help + +PROBLEM=translate_ende_wmt32k MODEL=transformer -HPARAMS=transformer_base +HPARAMS=transformer_base_single_gpu + DATA_DIR=$HOME/t2t_data TMP_DIR=/tmp/t2t_datagen TRAIN_DIR=$HOME/t2t_train/$PROBLEM/$MODEL-$HPARAMS @@ -32,12 +258,11 @@ t2t-datagen \ --tmp_dir=$TMP_DIR \ --problem=$PROBLEM -mv $TMP_DIR/tokens.vocab.32768 $DATA_DIR - # Train +# * If you run out of memory, add --hparams='batch_size=1024'. t2t-trainer \ --data_dir=$DATA_DIR \ - --problems=$PROBLEM \ + --problem=$PROBLEM \ --model=$MODEL \ --hparams_set=$HPARAMS \ --output_dir=$TRAIN_DIR @@ -47,89 +272,209 @@ t2t-trainer \ DECODE_FILE=$DATA_DIR/decode_this.txt echo "Hello world" >> $DECODE_FILE echo "Goodbye world" >> $DECODE_FILE +echo -e 'Hallo Welt\nAuf Wiedersehen Welt' > ref-translation.de BEAM_SIZE=4 ALPHA=0.6 -t2t-trainer \ +t2t-decoder \ --data_dir=$DATA_DIR \ - --problems=$PROBLEM \ + --problem=$PROBLEM \ --model=$MODEL \ --hparams_set=$HPARAMS \ --output_dir=$TRAIN_DIR \ - --train_steps=0 \ - --eval_steps=0 \ - --beam_size=$BEAM_SIZE \ - --alpha=$ALPHA \ - --decode_from_file=$DECODE_FILE + --decode_hparams="beam_size=$BEAM_SIZE,alpha=$ALPHA" \ + --decode_from_file=$DECODE_FILE \ + --decode_to_file=translation.en + +# See the translations +cat translation.en + +# Evaluate the BLEU score +# Note: Report this BLEU score in papers, not the internal approx_bleu metric. +t2t-bleu --translation=translation.en --reference=ref-translation.de +``` + +### Installation + + +``` +# Assumes tensorflow or tensorflow-gpu installed +pip install tensor2tensor + +# Installs with tensorflow-gpu requirement +pip install tensor2tensor[tensorflow_gpu] + +# Installs with tensorflow (cpu) requirement +pip install tensor2tensor[tensorflow] +``` + +Binaries: -cat $DECODE_FILE.$MODEL.$HPARAMS.beam$BEAM_SIZE.alpha$ALPHA.decodes ``` +# Data generator +t2t-datagen -T2T modularizes training into several components, each of which can be seen in -use in the above commands. +# Trainer +t2t-trainer --registry_help +``` + +Library usage: -See the models, problems, and hyperparameter sets that are available: +``` +python -c "from tensor2tensor.models.transformer import Transformer" +``` -`t2t-trainer --registry_help` +### Features -### Datasets +* Many state of the art and baseline models are built-in and new models can be + added easily (open an issue or pull request!). +* Many datasets across modalities - text, audio, image - available for + generation and use, and new ones can be added easily (open an issue or pull + request for public datasets!). +* Models can be used with any dataset and input mode (or even multiple); all + modality-specific processing (e.g. embedding lookups for text tokens) is done + with `bottom` and `top` transformations, which are specified per-feature in the + model. +* Support for multi-GPU machines and synchronous (1 master, many workers) and + asynchronous (independent workers synchronizing through a parameter server) + [distributed training](https://tensorflow.github.io/tensor2tensor/distributed_training.html). +* Easily swap amongst datasets and models by command-line flag with the data + generation script `t2t-datagen` and the training script `t2t-trainer`. +* Train on [Google Cloud ML](https://tensorflow.github.io/tensor2tensor/cloud_mlengine.html) and [Cloud TPUs](https://tensorflow.github.io/tensor2tensor/cloud_tpu.html). -**Datasets** are all standardized on TFRecord files with `tensorflow.Example` -protocol buffers. All datasets are registered and generated with the -[data -generator](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/bin/t2t-datagen) -and many common sequence datasets are already available for generation and use. +## T2T overview -### Problems and Modalities +### Problems -**Problems** define training-time hyperparameters for the dataset and task, -mainly by setting input and output **modalities** (e.g. symbol, image, audio, -label) and vocabularies, if applicable. All problems are defined in -[`problem_hparams.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/problem_hparams.py). -**Modalities**, defined in -[`modality.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/utils/modality.py), -abstract away the input and output data types so that **models** may deal with -modality-independent tensors. +**Problems** consist of features such as inputs and targets, and metadata such +as each feature's modality (e.g. symbol, image, audio) and vocabularies. Problem +features are given by a dataset, which is stored as a `TFRecord` file with +`tensorflow.Example` protocol buffers. All +problems are imported in +[`all_problems.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/all_problems.py) +or are registered with `@registry.register_problem`. Run +[`t2t-datagen`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/bin/t2t-datagen) +to see the list of available problems and download them. ### Models -**`T2TModel`s** define the core tensor-to-tensor transformation, independent of -input/output modality or task. Models take dense tensors in and produce dense -tensors that may then be transformed in a final step by a **modality** depending -on the task (e.g. fed through a final linear transform to produce logits for a -softmax over classes). All models are imported in -[`models.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/models/models.py), -inherit from `T2TModel` - defined in -[`t2t_model.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/utils/t2t_model.py) -- and are registered with +**`T2TModel`s** define the core tensor-to-tensor computation. They apply a +default transformation to each input and output so that models may deal with +modality-independent tensors (e.g. embeddings at the input; and a linear +transform at the output to produce logits for a softmax over classes). All +models are imported in the +[`models` subpackage](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/models/__init__.py), +inherit from [`T2TModel`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/utils/t2t_model.py), +and are registered with [`@registry.register_model`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/utils/registry.py). ### Hyperparameter Sets -**Hyperparameter sets** are defined and registered in code with -[`@registry.register_hparams`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/utils/registry.py) -and are encoded in -[`tf.contrib.training.HParams`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/training/python/training/hparam.py) -objects. The `HParams` are available to both the problem specification and the -model. A basic set of hyperparameters are defined in -[`common_hparams.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/models/common_hparams.py) +**Hyperparameter sets** are encoded in +[`HParams`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/utils/hparam.py) +objects, and are registered with +[`@registry.register_hparams`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/utils/registry.py). +Every model and problem has a `HParams`. A basic set of hyperparameters are +defined in +[`common_hparams.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/layers/common_hparams.py) and hyperparameter set functions can compose other hyperparameter set functions. ### Trainer -The **trainer** binary is the main entrypoint for training, evaluation, and +The **trainer** binary is the entrypoint for training, evaluation, and inference. Users can easily switch between problems, models, and hyperparameter -sets by using the `--model`, `--problems`, and `--hparams_set` flags. Specific -hyperparameters can be overriden with the `--hparams` flag. `--schedule` and +sets by using the `--model`, `--problem`, and `--hparams_set` flags. Specific +hyperparameters can be overridden with the `--hparams` flag. `--schedule` and related flags control local and distributed training/evaluation -([distributed training documentation](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/docs/distributed_training.md)). +([distributed training documentation](https://github.com/tensorflow/tensor2tensor/tree/master/docs/distributed_training.md)). + +## Adding your own components + +T2T's components are registered using a central registration mechanism that +enables easily adding new ones and easily swapping amongst them by command-line +flag. You can add your own components without editing the T2T codebase by +specifying the `--t2t_usr_dir` flag in `t2t-trainer`. + +You can do so for models, hyperparameter sets, modalities, and problems. Please +do submit a pull request if your component might be useful to others. + +See the [`example_usr_dir`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/test_data/example_usr_dir) +for an example user directory. ## Adding a dataset -See the data generators -[README](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/README.md). +To add a new dataset, subclass +[`Problem`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/problem.py) +and register it with `@registry.register_problem`. See +[`TranslateEndeWmt8k`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/translate_ende.py) +for an example. Also see the [data generators +README](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/README.md). + +## Run on FloydHub + +[![Run on FloydHub](https://static.floydhub.com/button/button.svg)](https://floydhub.com/run) + +Click this button to open a [Workspace](https://blog.floydhub.com/workspaces/) on [FloydHub](https://www.floydhub.com/?utm_medium=readme&utm_source=tensor2tensor&utm_campaign=jul_2018). You can use the workspace to develop and test your code on a fully configured cloud GPU machine. + +Tensor2Tensor comes preinstalled in the environment, you can simply open a [Terminal](https://docs.floydhub.com/guides/workspace/#using-terminal) and run your code. + +```bash +# Test the quick-start on a Workspace's Terminal with this command +t2t-trainer \ + --generate_data \ + --data_dir=./t2t_data \ + --output_dir=./t2t_train/mnist \ + --problem=image_mnist \ + --model=shake_shake \ + --hparams_set=shake_shake_quick \ + --train_steps=1000 \ + --eval_steps=100 +``` + +Note: Ensure compliance with the FloydHub [Terms of Service](https://www.floydhub.com/about/terms). + +## Papers + +When referencing Tensor2Tensor, please cite [this +paper](https://arxiv.org/abs/1803.07416). + +``` +@article{tensor2tensor, + author = {Ashish Vaswani and Samy Bengio and Eugene Brevdo and + Francois Chollet and Aidan N. Gomez and Stephan Gouws and Llion Jones and + \L{}ukasz Kaiser and Nal Kalchbrenner and Niki Parmar and Ryan Sepassi and + Noam Shazeer and Jakob Uszkoreit}, + title = {Tensor2Tensor for Neural Machine Translation}, + journal = {CoRR}, + volume = {abs/1803.07416}, + year = {2018}, + url = {http://arxiv.org/abs/1803.07416}, +} +``` + +Tensor2Tensor was used to develop a number of state-of-the-art models +and deep learning methods. Here we list some papers that were based on T2T +from the start and benefited from its features and architecture in ways +described in the [Google Research Blog post introducing +T2T](https://research.googleblog.com/2017/06/accelerating-deep-learning-research.html). ---- +* [Attention Is All You Need](https://arxiv.org/abs/1706.03762) +* [Depthwise Separable Convolutions for Neural Machine + Translation](https://arxiv.org/abs/1706.03059) +* [One Model To Learn Them All](https://arxiv.org/abs/1706.05137) +* [Discrete Autoencoders for Sequence Models](https://arxiv.org/abs/1801.09797) +* [Generating Wikipedia by Summarizing Long + Sequences](https://arxiv.org/abs/1801.10198) +* [Image Transformer](https://arxiv.org/abs/1802.05751) +* [Training Tips for the Transformer Model](https://arxiv.org/abs/1804.00247) +* [Self-Attention with Relative Position Representations](https://arxiv.org/abs/1803.02155) +* [Fast Decoding in Sequence Models using Discrete Latent Variables](https://arxiv.org/abs/1803.03382) +* [Adafactor: Adaptive Learning Rates with Sublinear Memory Cost](https://arxiv.org/abs/1804.04235) +* [Universal Transformers](https://arxiv.org/abs/1807.03819) +* [Attending to Mathematical Language with Transformers](https://arxiv.org/abs/1812.02825) +* [The Evolved Transformer](https://arxiv.org/abs/1901.11117) +* [Model-Based Reinforcement Learning for Atari](https://arxiv.org/abs/1903.00374) +* [VideoFlow: A Flow-Based Generative Model for Video](https://arxiv.org/abs/1903.01434) -*Note: This is not an official Google product.* +*NOTE: This is not an official Google product.* diff --git a/docs/cloud_mlengine.md b/docs/cloud_mlengine.md new file mode 100644 index 000000000..83ebe7e57 --- /dev/null +++ b/docs/cloud_mlengine.md @@ -0,0 +1,90 @@ +# Running on Cloud ML Engine + +Google Cloud Platform offers a managed training environment for TensorFlow +models called [Cloud ML Engine](https://cloud.google.com/ml-engine/) and +you can easily launch Tensor2Tensor on it, including for hyperparameter tuning. + +# Launch + +It's the same `t2t-trainer` you know and love with the addition of the +`--cloud_mlengine` flag, which by default will launch on a 1-GPU machine +in the default compute region. See the +[docs for `gcloud compute`](https://cloud.google.com/compute/docs/gcloud-compute/#set_default_zone_and_region_in_your_local_client) +to learn how to set the default compute region. + +``` +# Note that both the data dir and output dir have to be on GCS +DATA_DIR=gs://my-bucket/data +OUTPUT_DIR=gs://my-bucket/train +t2t-trainer \ + --problem=translate_ende_wmt32k \ + --model=transformer \ + --hparams_set=transformer_base \ + --data_dir=$DATA_DIR \ + --output_dir=$OUTPUT_DIR \ + --cloud_mlengine +``` + +By passing `--worker_gpu=4` or `--worker_gpu=8` it will automatically launch on +machines with 4 or 8 GPUs. + +You can additionally pass the `--cloud_mlengine_master_type` to select another +kind of machine (see the [docs for +`masterType`](https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#traininginput) +for options, including +[ML Engine machine +types](https://cloud.google.com/ml-engine/docs/training-overview) +and their +[specs](https://cloud.google.com/compute/docs/machine-types)). +If you provide this flag yourself, make sure you pass the +correct value for `--worker_gpu` (for non-GPU machines, you should pass +`--worker_gpu=0`). + +**Note**: `t2t-trainer` only currently supports launching with single machines, +possibly with multiple GPUs. Multi-machine setups are not yet supported out of +the box with the `--cloud_mlengine` flag, though multi-machine should in +principle work just fine. Contributions/testers welcome. + + +## `--t2t_usr_dir` + +Launching on Cloud ML Engine works with `--t2t_usr_dir` as well as long as the +directory is fully self-contained (i.e. the imports only refer to other modules +in the directory). If there are additional PyPI dependencies that you need, you +can include a `requirements.txt` file in the directory specified by +`t2t_usr_dir`. + +# Hyperparameter Tuning + +Hyperparameter tuning with `t2t-trainer` and Cloud ML Engine is also a breeze +with `--hparams_range` and the `--autotune_*` flags: + +``` +t2t-trainer \ + --problem=translate_ende_wmt32k \ + --model=transformer \ + --hparams_set=transformer_base \ + --data_dir=$DATA_DIR \ + --output_dir=$OUTPUT_DIR \ + --cloud_mlengine \ + --hparams_range=transformer_base_range \ + --autotune_objective='metrics-translate_ende_wmt32k/neg_log_perplexity' \ + --autotune_maximize \ + --autotune_max_trials=100 \ + --autotune_parallel_trials=3 +``` + +The `--hparams_range` specifies the search space and should be registered with +`@register_ranged_hparams`. It defines a `RangedHParams` object that sets +search ranges and scales for various parameters. See `transformer_base_range` +in +[`transformer.py`](https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py) +for an example. + +The metric name passed as `--autotune_objective` should be exactly what you'd +see in TensorBoard. To minimize a metric, set `--autotune_maximize=False`. + +You control how many total trials to run with `--autotune_max_trials` and the +number of jobs to launch in parallel with `--autotune_parallel_trials`. + +Happy tuning! diff --git a/docs/cloud_tpu.md b/docs/cloud_tpu.md new file mode 100644 index 000000000..c0625e132 --- /dev/null +++ b/docs/cloud_tpu.md @@ -0,0 +1,50 @@ +# Running on Cloud TPUs + +Tensor2Tensor supports running on Google Cloud Platforms TPUs, chips +specialized for ML training. See the official tutorials for [running the +T2T Transformer for text on Cloud TPUs](https://cloud.google.com/tpu/docs/tutorials/transformer) and +[Transformer for Speech Recognition](https://cloud.google.com/tpu/docs/tutorials/automated-speech-recognition). + +## Other models on TPU + +Many of Tensor2Tensor's models work on TPU. + +You can provision a VM and TPU with `ctpu up`. Use the `t2t-trainer` command +on the VM as usual with the additional flags `--use_tpu` and +`--cloud_tpu_name=$TPU_NAME`. + +Note that because the `TPUEstimator` does not catch the `OutOfRangeError` +during evaluation, you should ensure that `--eval_steps` is small enough to +not exhaust the evaluation data. + +A non-exhaustive list of T2T models that work on TPU: + +* Image generation: `imagetransformer` with `imagetransformer_base_tpu` (or + `imagetransformer_tiny_tpu`) +* Super-resolution: `img2img_transformer` with `img2img_transformer_base_tpu` + (or `img2img_transformer_tiny_tpu`) +* `resnet` with `resnet_50` (or `resnet_18` or `resnet_34`) +* `revnet` with `revnet_104` (or `revnet_38_cifar`) +* `shake_shake` with `shakeshake_tpu` (or `shakeshake_small`) + +## Example invocation + +Use `ctpu up` to bring up the VM and TPU machines; once the machines are ready +it will SSH you into the VM and you can run the following: + +``` +# DATA_DIR and OUT_DIR should be GCS buckets +# TPU_NAME should have been set automatically by the ctpu tool + +t2t-trainer \ + --model=shake_shake \ + --hparams_set=shakeshake_tpu \ + --problem=image_cifar10 \ + --train_steps=180000 \ + --eval_steps=9 \ + --local_eval_frequency=100 \ + --data_dir=$DATA_DIR \ + --output_dir=$OUT_DIR \ + --use_tpu \ + --cloud_tpu_name=$TPU_NAME +``` diff --git a/docs/distributed_training.md b/docs/distributed_training.md new file mode 100644 index 000000000..f59974623 --- /dev/null +++ b/docs/distributed_training.md @@ -0,0 +1,220 @@ +# Distributed Training + +The `t2t-trainer` supports both synchronous and asynchronous distributed +training. + +Note that it is almost always more efficient to train on a single machine with +multiple GPUs/TPUs. Async training is less stable than sync training, and sync +training is much faster on 1 machine than on multiple. For these reasons, we +almost always train on single machines with multiple GPUs/TPUs. + +T2T uses TensorFlow Estimators and so distributed training is configured with +the `TF_CONFIG` environment variable that is read by the +[RunConfig](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/estimator/run_config.py) +along with a set of flags that T2T uses to distribute the computation. + +## Shared output directory + +When using multiple machines, it is necessary that all nodes use the same +`--output_dir`, which means that it should be set to a Google Cloud Storage +bucket (`gs://...`) or a directory on a shared network filesystem. + +## Utility to produce `TF_CONFIG` and flags + +[`t2t-make-tf-configs`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/bin/t2t-make-tf-configs) +generates the `TF_CONFIG` json strings and the necessary command-line flags for +the jobs. + +Given a set of master and parameter server addresses, the script outputs, for +each job, a line with the `TF_CONFIG` environment variable and the command-line +flags necessary for distributed training. For each job, you should invoke the +`t2t-trainer` with the `TF_CONFIG` value and flags that are output. + +## Eval jobs + +Eval jobs should set the following flags and do not need the `TF_CONFIG` +environment variable to be set as the eval jobs run locally and do not +communicate to the other jobs (the eval jobs read the model checkpoints that the +trainer writes out): + +- `--schedule=continuous_eval_on_train_data` or + `--schedule=continuous_eval` (for dev data) +- `--worker_job='/job:localhost'` +- `--output_dir=$TRAIN_DIR` + +**Note that evaluation does not work distributed.** That is, distributed jobs +should always use `--schedule=train`. + +## Examples + +### Sync training across multiple workers + +In this scenario, you wish to do synchronous training across multiple workers. +Note that it is easier to simply use 1 worker with multiple GPUs and set +`--worker_gpu=8`, but there may be cases where you may want to have multiple +machines. + +You will need 1 `ip:port` for the master and then 1 `ip:port` for each worker. + +For this example we'll use 2 workers and these addresses: + +``` +# Master +10.0.0.1:5555 + +# Worker 1 +10.0.0.2:5555 + +# Worker 2 +10.0.0.3:5555 +``` + +Next we generate the `TF_CONFIG` and command-line-flags for each job. + +``` +$ t2t-make-tf-configs --masters='10.0.0.1:5555' --ps='10.0.0.2:5555,10.0.0.3:5555' +Assuming SYNC distributed training with a single master and 2 workers +'{"cluster": {"master": ["10.0.0.1:5555"], "ps": ["10.0.0.2:5555", "10.0.0.3:5555"]}, "environment": "cloud", "task": {"index": 0, "type": "master"}}' --master=grpc://10.0.0.1:5555 --ps_replicas=2 --worker_replicas=1 --worker_gpu=0 --worker_id=0 --ps_gpu=1 --sync --schedule=train --worker_job='/job:master' +'{"cluster": {"master": ["10.0.0.1:5555"], "ps": ["10.0.0.2:5555", "10.0.0.3:5555"]}, "environment": "cloud", "task": {"index": 0, "type": "ps"}}' --schedule=run_std_server +'{"cluster": {"master": ["10.0.0.1:5555"], "ps": ["10.0.0.2:5555", "10.0.0.3:5555"]}, "environment": "cloud", "task": {"index": 1, "type": "ps"}}' --schedule=run_std_server +``` + +The output here is 1 line per job. Each line contains the `TF_CONFIG` to set +for that job as well as the command-line flags to set for that job. + +It is a bit confusing that the workers are being passed to the `--ps` flag, but +this is correct. When running in `--sync` mode, the `ps` are actually the +workers. You can see in the next example below that when `--sync=False`, i.e. +async mode, that the `ps` are in fact being used as parameter servers. + +Here's how we would start each job on their respective machines (the +commands below assume that you're ssh'd into that job's machine): + +**Master**: + +``` +$ export TF_CONFIG='{"cluster": {"master": ["10.0.0.1:5555"], "ps": ["10.0.0.2:5555", "10.0.0.3:5555"]}, "environment": "cloud", "task": {"index": 0, "type": "master"}}' +$ t2t-trainer \ + --master=grpc://10.0.0.1:5555 \ + --ps_replicas=2 \ + --worker_replicas=1 \ + --worker_gpu=0 \ + --worker_id=0 \ + --ps_gpu=1 \ + --sync \ + --schedule=train \ + --worker_job='/job:master' \ + --model=transformer \ + --hparams_set=transformer_base \ + --problem=translate_ende_wmt32k +``` + +**Worker 1**: + +``` +$ export TF_CONFIG='{"cluster": {"master": ["10.0.0.1:5555"], "ps": ["10.0.0.2:5555", "10.0.0.3:5555"]}, "environment": "cloud", "task": {"index": 0, "type": "ps"}}' +$ t2t-trainer --schedule=run_std_server +``` + +**Worker 2**: + +``` +$ export TF_CONFIG='{"cluster": {"master": ["10.0.0.1:5555"], "ps": ["10.0.0.2:5555", "10.0.0.3:5555"]}, "environment": "cloud", "task": {"index": 1, "type": "ps"}}' +$ t2t-trainer --schedule=run_std_server +``` + +Note that if you have more than 1 GPU on each worker machine, make sure to +modify the `--ps_gpu` passed to the master. + +### Async training across multiple workers + +In this scenario, you wish to do asynchronous training across multiple workers +with 1+ shared parameter servers. + +Note that async training is usually less stable than sync training and for that +reason we almost always prefer sync training, but there may be cases where you +want to do async distributed training. + +For this example we'll use 2 workers and 2 parameter servers: + +``` +# Worker 1 +10.0.0.1:5555 + +# Worker 2 +10.0.0.2:5555 + +# PS 1 +10.0.0.3:5555 + +# PS 2 +10.0.0.4:5555 +``` + +Next we generate the `TF_CONFIG` and command-line-flags for each job. + +``` +$ t2t-make-tf-configs --masters='10.0.0.1:5555,10.0.0.2:5555' --ps='10.0.0.3:5555,10.0.0.4:5555' +Assuming ASYNC distributed training with 2 workers and 2 parameter servers +'{"task": {"index": 0, "type": "chief"}, "cluster": {"chief": ["10.0.0.1:5555"], "ps": ["10.0.0.3:5555", "10.0.0.4:5555"], "worker": ["10.0.0.2:5555"]}, "environment": "cloud"}' --master=grpc://10.0.0.1:5555 --ps_replicas=2 --worker_replicas=2 --worker_gpu=1 --worker_id=0 --ps_gpu=0 --schedule=train --worker_job='/job:chief' +'{"task": {"index": 0, "type": "worker"}, "cluster": {"chief": ["10.0.0.1:5555"], "ps": ["10.0.0.3:5555", "10.0.0.4:5555"], "worker": ["10.0.0.2:5555"]}, "environment": "cloud"}' --master=grpc://10.0.0.2:5555 --ps_replicas=2 --worker_replicas=2 --worker_gpu=1 --worker_id=1 --ps_gpu=0 --schedule=train --worker_job='/job:worker' +'{"task": {"index": 0, "type": "ps"}, "cluster": {"chief": ["10.0.0.1:5555"], "ps": ["10.0.0.3:5555", "10.0.0.4:5555"], "worker": ["10.0.0.2:5555"]}, "environment": "cloud"}' --schedule=run_std_server +'{"task": {"index": 1, "type": "ps"}, "cluster": {"chief": ["10.0.0.1:5555"], "ps": ["10.0.0.3:5555", "10.0.0.4:5555"], "worker": ["10.0.0.2:5555"]}, "environment": "cloud"}' --schedule=run_std_server +``` + +Here's how we would start each job on their respective machines (the +commands below assume that you're ssh'd into that job's machine): + +**Worker 1**: + +``` +$ export TF_CONFIG='{"task": {"index": 0, "type": "chief"}, "cluster": {"chief": ["10.0.0.1:5555"], "ps": ["10.0.0.3:5555", "10.0.0.4:5555"], "worker": ["10.0.0.2:5555"]}, "environment": "cloud"}' +$ t2t-trainer \ + --master=grpc://10.0.0.1:5555 \ + --ps_replicas=2 \ + --worker_replicas=2 \ + --worker_gpu=1 \ + --worker_id=0 \ + --ps_gpu=0 \ + --schedule=train \ + --worker_job='/job:chief' \ + --model=transformer \ + --hparams_set=transformer_base \ + --problem=translate_ende_wmt32k +``` + +**Worker 2**: + +``` +$ export TF_CONFIG='{"task": {"index": 0, "type": "worker"}, "cluster": {"chief": ["10.0.0.1:5555"], "ps": ["10.0.0.3:5555", "10.0.0.4:5555"], "worker": ["10.0.0.2:5555"]}, "environment": "cloud"}' +$ t2t-trainer \ + --master=grpc://10.0.0.2:5555 \ + --ps_replicas=2 \ + --worker_replicas=2 \ + --worker_gpu=1 \ + --worker_id=1 \ + --ps_gpu=0 \ + --schedule=train \ + --worker_job='/job:worker' \ + --model=transformer \ + --hparams_set=transformer_base \ + --problem=translate_ende_wmt32k +``` + +**PS 1**: + +``` +$ export TF_CONFIG='{"task": {"index": 0, "type": "ps"}, "cluster": {"chief": ["10.0.0.1:5555"], "ps": ["10.0.0.3:5555", "10.0.0.4:5555"], "worker": ["10.0.0.2:5555"]}, "environment": "cloud"}' +$ t2t-trainer --schedule=run_std_server +``` + +**PS 2**: + +``` +$ export TF_CONFIG='{"task": {"index": 1, "type": "ps"}, "cluster": {"chief": ["10.0.0.1:5555"], "ps": ["10.0.0.3:5555", "10.0.0.4:5555"], "worker": ["10.0.0.2:5555"]}, "environment": "cloud"}' +$ t2t-trainer --schedule=run_std_server +``` + +Increase `--worker_gpu` on each of the workers if you have multiple GPUs. If the +parameter servers are also using GPUs, set `--ps_gpu` to the number of GPUs on +the parameter servers. diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 000000000..26298a9d2 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,127 @@ +# Tensor2Tensor Documentation + +[![PyPI +version](https://badge.fury.io/py/tensor2tensor.svg)](https://badge.fury.io/py/tensor2tensor) +[![GitHub +Issues](https://img.shields.io/github/issues/tensorflow/tensor2tensor.svg)](https://github.com/tensorflow/tensor2tensor/issues) +[![Contributions +welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg)](CONTRIBUTING.md) +[![Gitter](https://img.shields.io/gitter/room/nwjs/nw.js.svg)](https://gitter.im/tensor2tensor/Lobby) +[![License](https://img.shields.io/badge/License-Apache%202.0-brightgreen.svg)](https://opensource.org/licenses/Apache-2.0) + +[Tensor2Tensor](https://github.com/tensorflow/tensor2tensor), or +[T2T](https://github.com/tensorflow/tensor2tensor) for short, is a library +of deep learning models and datasets designed to make deep learning more +accessible and [accelerate ML +research](https://research.googleblog.com/2017/06/accelerating-deep-learning-research.html). + + +## Introduction + +* [Walkthrough](walkthrough.md): Install and run. +* [IPython notebook](https://colab.research.google.com/github/tensorflow/tensor2tensor/blob/master/tensor2tensor/notebooks/hello_t2t.ipynb): Get a hands-on experience. + +## Basics + +* [Overview](overview.md): How all parts of T2T code are connected. +* [New Problem](new_problem.md): Train T2T models on your data. +* [New Model](new_model.md): Create your own T2T model. + +## Training in the cloud + +* [Training on Google Cloud ML](cloud_mlengine.md) +* [Training on Google Cloud TPUs](cloud_tpu.md) +* [Distributed Training](distributed_training.md) + +## Solving your task + +Below we list a number of tasks that can be solved with T2T when +you train the appropriate model on the appropriate problem. +We give the problem and model below and we suggest a setting of +hyperparameters that we know works well in our setup. We usually +run either on Cloud TPUs or on 8-GPU machines; you might need +to modify the hyperparameters if you run on a different setup. + +### Image Classification + +For image classification, we have a number of standard data-sets: +* ImageNet (a large data-set): `--problem=image_imagenet`, or one + of the re-scaled versions (`image_imagenet224`, `image_imagenet64`, + `image_imagenet32`) +* CIFAR-10: `--problem=image_cifar10` (or + `--problem=image_cifar10_plain` to turn off data augmentation) +* CIFAR-100: `--problem=image_cifar100` +* MNIST: `--problem=image_mnist` + +For ImageNet, we suggest to use the ResNet or Xception, i.e., +use `--model=resnet --hparams_set=resnet_50` or +`--model=xception --hparams_set=xception_base`. +Resnet should get to above 76% top-1 accuracy on ImageNet. + +For CIFAR and MNIST, we suggest to try the shake-shake model: +`--model=shake_shake --hparams_set=shakeshake_big`. +This setting trained for `--train_steps=700000` should yield +close to 97% accuracy on CIFAR-10. + +### Language Modeling + +For language modeling, we have these data-sets in T2T: +* PTB (a small data-set): `--problem=languagemodel_ptb10k` for + word-level modeling and `--problem=languagemodel_ptb_characters` + for character-level modeling. +* LM1B (a billion-word corpus): `--problem=languagemodel_lm1b32k` for + subword-level modeling and `--problem=languagemodel_lm1b_characters` + for character-level modeling. + +We suggest to start with `--model=transformer` on this task and use +`--hparams_set=transformer_small` for PTB and +`--hparams_set=transformer_base` for LM1B. + +### Sentiment Analysis + +For the task of recognizing the sentiment of a sentence, use +* the IMDB data-set: `--problem=sentiment_imdb` + +We suggest to use `--model=transformer_encoder` here and since it is +a small data-set, try `--hparams_set=transformer_tiny` and train for +few steps (e.g., `--train_steps=2000`). + +### Speech Recognition + +For speech-to-text, we have these data-sets in T2T: +* Librispeech (English speech to text): `--problem=librispeech` for + the whole set and `--problem=librispeech_clean` for a smaller + but nicely filtered part. + +### Summarization + +For summarizing longer text into shorter one we have these data-sets: +* CNN/DailyMail articles summarized into a few sentences: + `--problem=summarize_cnn_dailymail32k` + +We suggest to use `--model=transformer` and +`--hparams_set=transformer_prepend` for this task. +This yields good ROUGE scores. + +### Translation + +There are a number of translation data-sets in T2T: +* English-German: `--problem=translate_ende_wmt32k` +* English-French: `--problem=translate_enfr_wmt32k` +* English-Czech: `--problem=translate_encs_wmt32k` +* English-Chinese: `--problem=translate_enzh_wmt32k` +* English-Vietnamese: `--problem=translate_envi_iwslt32k` +* English-Spanish: `--problem=translate_enes_wmt32k` + +You can get translations in the other direction by appending `_rev` to +the problem name, e.g., for German-English use +`--problem=translate_ende_wmt32k_rev`. + +For all translation problems, we suggest to try the Transformer model: +`--model=transformer`. At first it is best to try the base setting, +`--hparams_set=transformer_base`. When trained on 8 GPUs for 300K steps +this should reach a BLEU score of about 28 on the English-German data-set, +which is close to state-of-the art. If training on a single GPU, try the +`--hparams_set=transformer_base_single_gpu` setting. For very good results +or larger data-sets (e.g., for English-French), try the big model +with `--hparams_set=transformer_big`. diff --git a/docs/multi_problem.md b/docs/multi_problem.md new file mode 100644 index 000000000..d4e37d09d --- /dev/null +++ b/docs/multi_problem.md @@ -0,0 +1,188 @@ +# Multi-problem training + +Multi-problem training is possible by defining [MultiProblem](https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/multi_problem.py) sub-classes that specify a list of [Problem](https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/problem.py) objects to include in training. In some cases, multi-problem training can be used to improve performance compared to training on individual problems. + +In the following sections we'll discuss MultiProblem from a usage perspective followed by that of someone wishing to build upon it. + +Please note the [T2T Walkthrough](https://github.com/tensorflow/tensor2tensor/blob/master/docs/walkthrough.md) documentation is a good place to start to understand the variety of component concepts we'll build on here. + +## Usage + +### Problem definition and datagen + +In this discussion we'll consider the following (large) multi-problem that includes ten different sub-problems. These include: + +1. A [language modeling](https://en.wikipedia.org/wiki/Language_model) [problem](https://github.com/tensorflow/tensor2tensor/blob/0dff89d64c3406d42717280cb9135a5ce7af793c/tensor2tensor/data_generators/wiki_lm.py#L223) operating on a corpus of German, English, French, and Romanian language wikipedia articles. +2. Multiple compatible pairwise language translation problems (En -> De, En -> Fr, En -> Ro, De -> En, Fr -> En, Ro -> En) +3. A compatible [version](https://github.com/tensorflow/tensor2tensor/blob/ef12bee72270b322165d073c39a650a189de39aa/tensor2tensor/data_generators/cnn_dailymail.py#L267) of the combined CNN/DailyMail news article summarization problem. +4. A compatible [version](https://github.com/tensorflow/tensor2tensor/blob/ef12bee72270b322165d073c39a650a189de39aa/tensor2tensor/data_generators/multinli.py#L155) of the [MultiNLI](https://www.nyu.edu/projects/bowman/multinli/) textual entailment classification problem. +5. A compatible [version](https://github.com/tensorflow/tensor2tensor/blob/1de13dbebccb415d89b0658e18a57e9607bafd32/tensor2tensor/data_generators/squad.py#L126) of the [SQuAD](https://rajpurkar.github.io/SQuAD-explorer/) question/answer problem. + +```python + +@registry.register_problem +class LanguagemodelMultiWikiTranslate(multi_problem.MultiProblem): + """Wiki multi-lingual LM and multiple translations.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelMultiWikiTranslate, self).__init__( + was_reversed, was_copy) + self.task_list.append(wiki_lm.LanguagemodelDeEnFrRoWiki64k()) + self.task_list.append(translate_ende.TranslateEndeWmtMulti64k()) + self.task_list.append(translate_enfr.TranslateEnfrWmtMulti64k()) + self.task_list.append(translate_enro.TranslateEnroWmtMultiTiny64k()) + self.task_list.append(translate_ende.TranslateEndeWmtMulti64k( + was_reversed=True)) + self.task_list.append(translate_enfr.TranslateEnfrWmtMulti64k( + was_reversed=True)) + self.task_list.append(translate_enro.TranslateEnroWmtMultiTiny64k( + was_reversed=True)) + self.task_list.append( + cnn_dailymail.SummarizeCnnDailymailWikiLMMultiVocab64k()) + self.task_list.append(multinli.MultiNLIWikiLMMultiVocab64k()) + self.task_list.append(squad.SquadConcatMulti64k()) + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + +``` + +The word "compatible" was used a lot above! That's because each of these problems have been modified to use the vocabulary produced by the Wikipedia-based language modeling problem, e.g. the following + +```python +@registry.register_problem +class SummarizeCnnDailymailWikiLMMultiVocab64k(SummarizeCnnDailymail32k): + """Summarize CNN and Daily Mail articles using multi-lingual 64k vocab.""" + + @property + def vocab_filename(self): + return wiki_lm.LanguagemodelDeEnFrRoWiki64k().vocab_filename +``` + +**Important note:** It's easy to miss the key point that, as implemented currently, the first task in the task list must be a language modelling problem and each included task must be modified to use the resulting vocabulary. + +With a properly defined and registered multi-problem we can now run datagen as follows: + +```bash + +t2t-datagen --problem=languagemodel_multi_wiki_translate + +``` + +This will take approximately the following amount of space (and several hours): + +```bash +(t2t) username@instance-2:~$ du -sh /tmp +99G /tmp +(t2t) username@instance-2:~$ du -sh /tmp/t2t_datagen +81G /tmp/t2t_datagen +``` + +### Training + +Next we're ready to try training a model on this MultiProblem. Note that by not specifying `--data_dir` above TFExample's were by default generated into /tmp so that's what we'll explicitly provide here. + +```bash + +t2t-trainer --problem=languagemodel_multi_wiki_translate \ + --model=transformer \ + --hparams_set=transformer_tall_pretrain_lm_tpu_adafactor_large \ + --output_dir ~/t2t_train/transformer_multi_2jan19 \ + --data_dir=/tmp \ + --train_steps=1 \ + --eval_steps=1 + +``` + +The `hparams_set` parameter we provided above was [transformer_tall_pretrain_lm_tpu_adafactor_large](https://github.com/tensorflow/tensor2tensor/blob/08e83030acf3ef13d15ad6eaefaa0a67fb20b59d/tensor2tensor/models/transformer.py#L1721), also provided below: + +```python + +@registry.register_hparams +def transformer_tall_pretrain_lm_tpu_adafactor_large(): + """Hparams for transformer on LM pretraining on TPU, large model.""" + hparams = transformer_tall_pretrain_lm_tpu_adafactor() + hparams.hidden_size = 1024 + hparams.num_heads = 16 + hparams.filter_size = 32768 # max fitting in 16G memory is 49152, batch 2 + hparams.batch_size = 4 + hparams.multiproblem_mixing_schedule = "constant" + # Task order: lm/en-de/en-fr/en-ro/de-en/fr-en/ro-en/cnndm/mnli/squad. + hparams.multiproblem_per_task_threshold = "320,80,160,2,80,160,2,20,5,5" + return hparams + +``` + +Here it's worth noting a couple things, one that we have specified a `multi_problem_mixing_schedule` (which is required), consumed by [MultiProblem.mix_data](https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/multi_problem.py#L280). When set to "constant" the strategy for sampling examples is not a function of step and is proportional only to the per-task "thresholds" which are by default equal (sample examples from each problem with equal probability). + +But notice we have also specified the (non-required) `multiproblem_per_task_threshold` parameter, also consumed by mix_data, and specifically used by [sample_task](https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/multi_problem.py#L340) which defines non-uniform thresholds to inform a weighted random sampling. E.g. for two problems with weights 1 and 9 the first would be sampled 1/10 of the time and the other 9/10. + +### Inference + +You can try translating from English to German using a model previously trained on `LanguagemodelMultiWikiTranslate` (the one shown above) ([gs://tensor2tensor-checkpoints/transformer_multi_2jan19/](https://console.cloud.google.com/storage/browser/tensor2tensor-checkpoints/transformer_multi_2jan19/)). Just copy the checkpoint down to a local directory such as the one given via `--output_dir` below: + +```bash + +t2t-decoder --problem=languagemodel_multi_wiki_translate \ + --model=transformer \ + --hparams_set=transformer_tall_pretrain_lm_tpu_adafactor_large \ + --decode_hparams='batch_size=1,multiproblem_task_id=64510' \ + --hparams="" \ + --output_dir=~/t2t_train/transformer_multi_2jan19 \ + --decode_from_file ~/newstest2014.en \ + --data_dir=~/t2t_train/transformer_multi_2jan19 + +``` + +Here we'll point `--data_dir` to the checkpoint directory which includes the vocab file `vocab.languagemodel_de_en_fr_ro_wiki64k.64000.subwords`; typically data_dir would point to the directory containing your TFRecord example dataset(s). + +The file passed to `--decode_from_file` is simply a file with one sentence to translate on each line (in its original form, not post-vocabulary-encoded). + +A key requirement for multi-problem inference is that we specify the ID of the problem for which we want to perform inference. But wait, why is the task ID 64510? We can see from the code for [`MultiProblem.update_task_ids`](https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/multi_problem.py#L386) that TID's have a place at the end of the vocabulary. + +```python + +class MultiProblem(problem.Problem): + """MultiProblem base class.""" + + ... + + def update_task_ids(self, encoder_vocab_size): + """Generate task_ids for each problem. + These ids correspond to the index of the task in the task_list. + Args: + encoder_vocab_size: the size of the vocab which is used to compute + the index offset. + """ + for idx, task in enumerate(self.task_list): + task.set_task_id(idx + encoder_vocab_size) + tf.logging.info("Task %d (%s) has id %d." % + (idx, task.name, task.task_id)) + +``` + +We can look up the task_id that is assigned to each task we may want to use for inference by instantiating the MultiProblem subclass and obtaining the value, in this case via the following: + +```python + +task_index = 1 # The second task in the list is En -> De +LanguagemodelMultiWikiTranslate().task_list[task_index].task_id + +``` + +For me running the `t2t-decode` command provided above gave the following output: + +```bash +... + +INFO:tensorflow:Running local_init_op. +INFO:tensorflow:Done running local_init_op. +INFO:tensorflow:Inference results INPUT: hello world was the news of the day +INFO:tensorflow:Inference results OUTPUT: Hallo Welt war die Nachricht des Tages +INFO:tensorflow:Elapsed Time: 37.15079 +INFO:tensorflow:Averaged Single Token Generation Time: 3.3009222 (time 36.3101439 count 11) + +... + +``` diff --git a/docs/new_model.md b/docs/new_model.md new file mode 100644 index 000000000..d0df86f74 --- /dev/null +++ b/docs/new_model.md @@ -0,0 +1,116 @@ +# T2T: Create Your Own Model + +[![PyPI +version](https://badge.fury.io/py/tensor2tensor.svg)](https://badge.fury.io/py/tensor2tensor) +[![GitHub +Issues](https://img.shields.io/github/issues/tensorflow/tensor2tensor.svg)](https://github.com/tensorflow/tensor2tensor/issues) +[![Contributions +welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg)](../CONTRIBUTING.md) +[![Gitter](https://img.shields.io/gitter/room/nwjs/nw.js.svg)](https://gitter.im/tensor2tensor/Lobby) +[![License](https://img.shields.io/badge/License-Apache%202.0-brightgreen.svg)](https://opensource.org/licenses/Apache-2.0) + +Here we show how to create your own model in T2T. + +## The T2TModel class - abstract base class for models + + `T2TModel` has three typical usages: + +1. Estimator: The method `make_estimator_model_fn` builds a `model_fn` for the + tf.Estimator workflow of training, evaluation, and prediction. It performs + the method `call`, which performs the core computation, followed by + `estimator_spec_train`, `estimator_spec_eval`, or `estimator_spec_predict` + depending on the tf.Estimator mode. +2. Layer: The method `call` enables `T2TModel` to be used a callable by itself. + It calls the following methods: + + * `bottom`, which transforms features according to `problem_hparams`' + input and target `Modality`s; + * `body`, which takes features and performs the core model computation to + return output and any auxiliary loss terms; + * `top`, which takes features and the body output, and transforms them + according to `problem_hparams`' input and target `Modality`s to return + the final logits; + * `loss`, which takes the logits, forms any missing training loss, and + sums all loss terms. + +3. Inference: The method `infer` enables `T2TModel` to make sequence + predictions by itself. + +## Creating your own model + +1. Create a class that extends `T2TModel`. This example creates a copy of an + existing basic fully-connected network: + + ```python + from tensor2tensor.utils import t2t_model + + class MyFC(t2t_model.T2TModel): + pass + ``` + +2. Implement the `body` method: + + ```python + class MyFC(t2t_model.T2TModel): + def body(self, features): + hparams = self.hparams + x = features["inputs"] + shape = common_layers.shape_list(x) + x = tf.reshape(x, [-1, shape[1] * shape[2] * shape[3]]) # Flatten input as in T2T they are all 4D vectors + for i in range(hparams.num_hidden_layers): # create layers + x = tf.layers.dense(x, hparams.hidden_size, name="layer_%d" % i) + x = tf.nn.dropout(x, keep_prob=1.0 - hparams.dropout) + x = tf.nn.relu(x) + return tf.expand_dims(tf.expand_dims(x, axis=1), axis=1) # 4D For T2T. + ``` + + Method Signature: + + * Args: + + * features: dict of str to Tensor, where each Tensor has shape + [batch_size, ..., hidden_size]. It typically contains keys `inputs` + and `targets`. + + * Returns one of: + + * output: Tensor of pre-logit activations with shape [batch_size, ..., + hidden_size]. + * losses: Either single loss as a scalar, a list, a Tensor (to be + averaged), or a dictionary of losses. If losses is a dictionary with + the key "training", losses["training"] is considered the final + training loss and output is considered logits; self.top and + self.loss will be skipped. + +3. Register your model: + + ```python + from tensor2tensor.utils import registry + + @registry.register_model + class MyFC(t2t_model.T2TModel): + # ... + ``` + +4. Use it with t2t tools as any other model: + + Have in mind that names are translated from camel case to snake_case `MyFC` + -> `my_fc` and that you need to point t2t to the directory containing your + model with the `--t2t_usr_dir` flag. For example if you want to train a + model on gcloud with 1 GPU worker on the IMDB sentiment task, you can run + your model by executing the following command from your model class + directory. + + ```bash + t2t-trainer \ + --model=my_fc \ + --t2t_usr_dir=. + --cloud_mlengine --worker_gpu=1 \ + --generate_data \ + --data_dir='gs://data' \ + --output_dir='gs://out' \ + --problem=sentiment_imdb \ + --hparams_set=basic_fc_small \ + --train_steps=10000 \ + --eval_steps=10 \ + ``` diff --git a/docs/new_problem.md b/docs/new_problem.md new file mode 100644 index 000000000..13f012b79 --- /dev/null +++ b/docs/new_problem.md @@ -0,0 +1,243 @@ +# T2T: Train on Your Own Data + +[![PyPI +version](https://badge.fury.io/py/tensor2tensor.svg)](https://badge.fury.io/py/tensor2tensor) +[![GitHub +Issues](https://img.shields.io/github/issues/tensorflow/tensor2tensor.svg)](https://github.com/tensorflow/tensor2tensor/issues) +[![Contributions +welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg)](CONTRIBUTING.md) +[![Gitter](https://img.shields.io/gitter/room/nwjs/nw.js.svg)](https://gitter.im/tensor2tensor/Lobby) +[![License](https://img.shields.io/badge/License-Apache%202.0-brightgreen.svg)](https://opensource.org/licenses/Apache-2.0) + +Another good overview of this part together with training is given in +[The Cloud ML Poetry Blog +Post](https://cloud.google.com/blog/big-data/2018/02/cloud-poetry-training-and-hyperparameter-tuning-custom-text-models-on-cloud-ml-engine) + +Let's add a new dataset together and train the +[Transformer](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/models/transformer.py) +model on it. We'll give the model a line of poetry, and it will learn to +generate the next line. + +# Defining the `Problem` + +For each problem we want to tackle we create a new subclass of `Problem` and +register it. Let's call our problem `PoetryLines`. + +Since many text-to-text problems share similar methods, there's already a class +called +[`Text2TextProblem`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/text_problems.py) +that extends the base problem class +[`Problem`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/problem.py) +and makes it easy to add text-to-text problems. + +In that same file, there are other base classes that make it easy to add text +classification tasks (`Text2ClassProblem`) and language modeling tasks +(`Text2SelfProblem`). + +For our problem, let's create the file `poetry_lines.py` and add our new +problem, `PoetryLines`, which extends `Text2TextProblem` and register it so that +it is accessible by command-line flag. + +Here's the Problem in full. We'll go step by step through it. + +```python +import re + +from gutenberg import acquire +from gutenberg import cleanup + +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + +@registry.register_problem +class PoetryLines(text_problems.Text2TextProblem): + """Predict next line of poetry from the last line. From Gutenberg texts.""" + + @property + def approx_vocab_size(self): + return 2**13 # ~8k + + @property + def is_generate_per_split(self): + # generate_data will shard the data into TRAIN and EVAL for us. + return False + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each.""" + # 10% evaluation data + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 9, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + del data_dir + del tmp_dir + del dataset_split + + + books = [ + # bookid, skip N lines + (19221, 223), + (15553, 522), + ] + + for (book_id, toskip) in books: + text = cleanup.strip_headers(acquire.load_etext(book_id)).strip() + lines = text.split("\n")[toskip:] + prev_line = None + ex_count = 0 + for line in lines: + # Any line that is all upper case is a title or author name + if not line or line.upper() == line: + prev_line = None + continue + + line = re.sub("[^a-z]+", " ", line.strip().lower()) + if prev_line and line: + yield { + "inputs": prev_line, + "targets": line, + } + ex_count += 1 + prev_line = line +``` + +## Vocabulary specification + +The text generated is encoded with a vocabulary for training. By default, it is +a `SubwordTextEncoder` that is built with an approximate vocab size specified by +the user. It's fully invertible (no out-of-vocab tokens) with a fixed-size vocab +which makes it ideal for text problems. + +You can also choose to use a character-level encoder or a token encoder where +you provide the vocab file yourself. See `Text2TextProblem.vocab_type`. + +Here we specify that we're going to have a vocabulary with approximately 8,000 +subwords. + +```python + @property + def approx_vocab_size(self): + return 2**13 # ~8k +``` + +## Splitting data between Train and Eval + +By setting `is_generate_per_split=False`, the `generate_samples` method will +only be called once and the data will automatically be split across training and +evaluation data for us. This is useful because for our dataset we don't have +pre-existing "training" and "evaluation" sets. If we did, we'd set +`is_generate_per_split=True` so that `generate_samples` was called once per data +split. + +The `dataset_splits` method determines the fraction that goes to each split. The +training data will be generated into 9 files and the evaluation data into 1. +90% of the data will be for training. 10% of the data will be for evaluation. + +```python + @property + def is_generate_per_split(self): + # generate_data will shard the data into TRAIN and EVAL for us. + return False + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each.""" + # 10% evaluation data + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 9, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] +``` + +## Generating samples + +`generate_samples` is the bulk of the code where we actually produce +dictionaries of poetry line pairs ("inputs" and "targets"). + +Some problems might require downloading, which can be done into `tmp_dir`. Some +problems may use their own token vocabulary file, in which case it can be copied +into `data_dir` before yielding samples. + +Here we iterate through the lines of a couple books of poetry and produce pairs +of lines for the model to train against. + +```python + def generate_samples(self, data_dir, tmp_dir, dataset_split): + del data_dir + del tmp_dir + del dataset_split + + books = [ + # bookid, skip N lines + (19221, 223), + (15553, 522), + ] + + for (book_id, toskip) in books: + text = cleanup.strip_headers(acquire.load_etext(book_id)).strip() + lines = text.split("\n")[toskip:] + prev_line = None + ex_count = 0 + for line in lines: + # Any line that is all upper case is a title or author name + if not line or line.upper() == line: + prev_line = None + continue + + line = re.sub("[^a-z]+", " ", line.strip().lower()) + if prev_line and line: + yield { + "inputs": prev_line, + "targets": line, + } + ex_count += 1 + prev_line = line +``` + +That's all for the problem specification! We're ready to generate the data. + +# Run data generation + +You can generate data for your problem with `t2t-datagen` and the +`--t2t_usr_dir` flag, which points to the directory containing an `__init__.py` +file that imports the `poetry_lines` file we just wrote. See setup below. + +```bash +USR_DIR=... +PROBLEM=poetry_lines +DATA_DIR=$HOME/t2t_data +TMP_DIR=/tmp/t2t_datagen +mkdir -p $DATA_DIR $TMP_DIR + +t2t-datagen \ + --t2t_usr_dir=$USR_DIR \ + --data_dir=$DATA_DIR \ + --tmp_dir=$TMP_DIR \ + --problem=$PROBLEM +``` + +`PROBLEM` is the name of the class that was registered with +`@registry.register_problem`, but converted from `CamelCase` to `snake_case`. + +`USR_DIR` is a directory with the `poetry_lines.py` file and an +`__init__.py` file that imports it (`from . import poetry_lines`). + +If you plan to contribute problems to the tensor2tensor repository, you can +clone the repository and install it in developer mode with `pip install -e .`. + +# Train! + +You can train exactly as you do in the [walkthrough](walkthrough.md) with flags +`--problem=poetry_lines` and `--t2t_usr_dir=$USR_DIR`. + +All done. Let us know what amazing poetry your model writes! diff --git a/docs/overview.md b/docs/overview.md new file mode 100644 index 000000000..9ea87bc50 --- /dev/null +++ b/docs/overview.md @@ -0,0 +1,175 @@ +# T2T: Life of an Example + +[![PyPI +version](https://badge.fury.io/py/tensor2tensor.svg)](https://badge.fury.io/py/tensor2tensor) +[![GitHub +Issues](https://img.shields.io/github/issues/tensorflow/tensor2tensor.svg)](https://github.com/tensorflow/tensor2tensor/issues) +[![Contributions +welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg)](CONTRIBUTING.md) +[![Gitter](https://img.shields.io/gitter/room/nwjs/nw.js.svg)](https://gitter.im/tensor2tensor/Lobby) +[![License](https://img.shields.io/badge/License-Apache%202.0-brightgreen.svg)](https://opensource.org/licenses/Apache-2.0) + +This doc explains how a training example flows through T2T, from data generation +to training, evaluation, and decoding. + +Some key files and their functions: + +* [`t2t_trainer.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/bin/t2t_trainer.py) and [`trainer_lib.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/utils/trainer_lib.py): + Main entrypoint for training and evaluation. Constructs and runs all the + main components of the system (the `Problem`, the `HParams`, the + `Estimator`, the `Experiment`, the `input_fn`s and `model_fn`). +* [`common_hparams.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/layers/common_hparams.py): + `basic_params1` serves as the base for all model hyperparameters. Registered + model hparams functions always start with this default set of + hyperparameters. +* [`problem.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/problem.py): + Every dataset in T2T subclasses `Problem`. `Problem.input_fn` is the + Estimator input function. +* [`t2t_model.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/utils/t2t_model.py): + Every model in T2T subclasses `T2TModel`. `T2TModel.estimator_model_fn` is + the Estimator model function. + +## Data Generation + +The `t2t-datagen` binary is the entrypoint for data generation. It simply looks +up the `Problem` specified by `--problem` and calls +`Problem.generate_data(data_dir, tmp_dir)`. + +All `Problem`s are expected to generate 2 sharded `TFRecords` files - 1 for +training and 1 for evaluation - with `tensorflow.Example` protocol buffers. The +expected names of the files are given by `Problem.{training, dev}_filepaths`. +Typically, the features in the `Example` will be `"inputs"` and `"targets"`; +however, some tasks have a different on-disk representation that is converted to +`"inputs"` and `"targets"` online in the input pipeline (e.g. image features are +typically stored with features `"image/encoded"` and `"image/format"` and the +decoding happens in the input pipeline). + +For tasks that require a vocabulary, this is also the point at which the +vocabulary is generated and all examples are encoded. + +There are several utility functions in +[`generator_utils`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/generator_utils.py) +that are commonly used by `Problem`s to generate data. Several are highlighted +below: + +* `generate_dataset_and_shuffle`: given 2 generators, 1 for training and 1 for + eval, yielding dictionaries of `>`, will produce sharded and shuffled `TFRecords` files with + `tensorflow.Example` protos. +* `maybe_download`: downloads a file at a URL to the given directory and + filename (see `maybe_download_from_drive` if the URL points to Google + Drive). +* `get_or_generate_vocab_inner`: given a target vocabulary size and a + generator that yields lines or tokens from the dataset, will build a + `SubwordTextEncoder` along with a backing vocabulary file that can be used + to map input strings to lists of ids. + [`SubwordTextEncoder`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/text_encoder.py) + uses word pieces and its encoding is fully invertible. + +## Data Input Pipeline + +Once the data is produced on disk, training, evaluation, and inference (if +decoding from the dataset) consume it by way of the T2T input pipeline, defined +by `Problem.input_fn`. + +The entire input pipeline is implemented with the new `tf.data.Dataset` API. + +The input function has 2 main parts: first, reading and processing individual +examples, which is done is `Problem.dataset`, and second, batching, which is +done in `Problem.input_fn` after the call to `Problem.dataset`. + +`Problem` subclasses may override the entire `input_fn` or portions of it (e.g. +`example_reading_spec` to indicate the names, types, and shapes of features on +disk). Typically they only override portions. + +### Batching + +Problems that have fixed size features (e.g. image problems) can use +`hp.batch_size` to set the batch size. + +Variable length Problems are bucketed by sequence length and then batched out of +those buckets. This significantly improves performance over a naive batching +scheme for variable length sequences because each example in a batch must be +padded to match the example with the maximum length in the batch. + +Controlling hparams: + +* `hp.batch_size`: the approximate total number of tokens in + the batch (i.e. long sequences will have smaller actual batch size and short + sequences will have a larger actual batch size in order to generally have an + equal number of tokens in the batch). +* `hp.max_length`: For variable length features, sequences with length longer + than this will be dropped during training (and also during eval if + `hp.eval_drop_long_sequences` is `True`). If not set, the maximum length of + examples is set to `hp.batch_size`. +* `hp.batch_size_multiplier`: multiplier for the maximum length +* `hp.min_length_bucket`: example length for the smallest bucket (i.e. the + smallest bucket will bucket examples up to this length). +* `hp.length_bucket_step`: controls how spaced out the length buckets are. + +## Building the Model + +At this point, the input features typically have `"inputs"` and `"targets"`, +each of which is a batched 4-D Tensor (e.g. of shape `[batch_size, +sequence_length, 1, 1]` for text input or `[batch_size, height, width, 3]` for +image input). + +The Estimator model function is created by `T2TModel.estimator_model_fn`, which +may be overridden in its entirety by subclasses if desired. Typically, +subclasses only override `T2TModel.body`. + +The model function constructs a `T2TModel`, calls it, and then calls +`T2TModel.{estimator_spec_train, estimator_spec_eval, estimator_spec_predict}` +depending on the mode. + +A call of a `T2TModel` internally calls `bottom`, `body`, `top`, and `loss`, all +of which can be overridden by subclasses (typically only `body` is). + +The default implementations of `bottom`, `top`, and `loss` depend on the +`Modality` specified for the input and target features (e.g. +`SymbolModality.bottom` embeds integer tokens and `SymbolModality.loss` is +`softmax_cross_entropy`). + +## `Estimator` and `Experiment` + +The actual training loop and related services (checkpointing, summaries, +continuous evaluation, etc.) are all handled by `Estimator` and `Experiment` +objects. `t2t_trainer.py` is the main entrypoint and uses `trainer_lib.py` +to construct the various components. + +## Decoding + +* [`t2t_decoder.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/bin/t2t-decoder) +* [`decoding.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/utils/decoding.py) + +## System Overview for Train/Eval + +See `t2t_trainer.py` and `trainer_lib.py`. + +* Create HParams +* Create `RunConfig`, including `Parallelism` object (i.e. `data_parallelism`) +* Create `Experiment`, including hooks +* Create `Estimator` + * `T2TModel.estimator_model_fn` + * `model(features)` + * `model.model_fn` + * `model.bottom` + * `model.body` + * `model.top` + * `model.loss` + * [TRAIN] `model.estimator_spec_train` + * `train_op = model.optimize` + * [EVAL] `model.estimator_spec_eval` + * Create metrics +* Create input functions + * `Problem.input_fn` + * `Problem.dataset` + * Batching +* Create hooks +* Run Experiment --schedule (e.g. `exp.continuous_train_and_eval()`) + * `estimator.train` + * `train_op = model_fn(input_fn(mode=TRAIN))` + * Run train op + * `estimator.evaluate` + * `metrics = model_fn(input_fn(mode=EVAL))` + * Accumulate metrics diff --git a/docs/tutorials/asr_with_transformer.md b/docs/tutorials/asr_with_transformer.md new file mode 100644 index 000000000..ee8aa0f53 --- /dev/null +++ b/docs/tutorials/asr_with_transformer.md @@ -0,0 +1,4 @@ +# Automated Speech Recognition with the Transformer model + +See the +[official tutorial](https://cloud.google.com/tpu/docs/tutorials/automated-speech-recognition). diff --git a/docs/walkthrough.md b/docs/walkthrough.md new file mode 100644 index 000000000..7a3e115e2 --- /dev/null +++ b/docs/walkthrough.md @@ -0,0 +1,480 @@ +# Tensor2Tensor + +[![PyPI +version](https://badge.fury.io/py/tensor2tensor.svg)](https://badge.fury.io/py/tensor2tensor) +[![GitHub +Issues](https://img.shields.io/github/issues/tensorflow/tensor2tensor.svg)](https://github.com/tensorflow/tensor2tensor/issues) +[![Contributions +welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg)](CONTRIBUTING.md) +[![Gitter](https://img.shields.io/gitter/room/nwjs/nw.js.svg)](https://gitter.im/tensor2tensor/Lobby) +[![License](https://img.shields.io/badge/License-Apache%202.0-brightgreen.svg)](https://opensource.org/licenses/Apache-2.0) +[![Travis](https://img.shields.io/travis/tensorflow/tensor2tensor.svg)](https://travis-ci.org/tensorflow/tensor2tensor) +[![Run on FH](https://static.floydhub.com/button/button-small.svg)](https://floydhub.com/run) + +[Tensor2Tensor](https://github.com/tensorflow/tensor2tensor), or +[T2T](https://github.com/tensorflow/tensor2tensor) for short, is a library +of deep learning models and datasets designed to make deep learning more +accessible and [accelerate ML +research](https://research.googleblog.com/2017/06/accelerating-deep-learning-research.html). + + +T2T was developed by researchers and engineers in the +[Google Brain team](https://research.google.com/teams/brain/) and a community +of users. It is now deprecated — we keep it running and welcome +bug-fixes, but encourage users to use the successor library [Trax](https://github.com/google/trax). + +### Quick Start + +[This iPython notebook](https://colab.research.google.com/github/tensorflow/tensor2tensor/blob/master/tensor2tensor/notebooks/hello_t2t.ipynb) +explains T2T and runs in your browser using a free VM from Google, +no installation needed. Alternatively, here is a one-command version that +installs T2T, downloads MNIST, trains a model and evaluates it: + +``` +pip install tensor2tensor && t2t-trainer \ + --generate_data \ + --data_dir=~/t2t_data \ + --output_dir=~/t2t_train/mnist \ + --problem=image_mnist \ + --model=shake_shake \ + --hparams_set=shake_shake_quick \ + --train_steps=1000 \ + --eval_steps=100 +``` + +### Contents + +* [Suggested Datasets and Models](#suggested-datasets-and-models) + * [Mathematical Language Understanding](#mathematical-language-understanding) + * [Story, Question and Answer](#story-question-and-answer) + * [Image Classification](#image-classification) + * [Image Generation](#image-generation) + * [Language Modeling](#language-modeling) + * [Sentiment Analysis](#sentiment-analysis) + * [Speech Recognition](#speech-recognition) + * [Summarization](#summarization) + * [Translation](#translation) +* [Basics](#basics) + * [Walkthrough](#walkthrough) + * [Installation](#installation) + * [Features](#features) +* [T2T Overview](#t2t-overview) + * [Datasets](#datasets) + * [Problems and Modalities](#problems-and-modalities) + * [Models](#models) + * [Hyperparameter Sets](#hyperparameter-sets) + * [Trainer](#trainer) +* [Adding your own components](#adding-your-own-components) +* [Adding a dataset](#adding-a-dataset) +* [Papers](#papers) +* [Run on FloydHub](#run-on-floydhub) + +## Suggested Datasets and Models + +Below we list a number of tasks that can be solved with T2T when +you train the appropriate model on the appropriate problem. +We give the problem and model below and we suggest a setting of +hyperparameters that we know works well in our setup. We usually +run either on Cloud TPUs or on 8-GPU machines; you might need +to modify the hyperparameters if you run on a different setup. + +### Mathematical Language Understanding + +For evaluating mathematical expressions at the character level involving addition, subtraction and multiplication of both positive and negative decimal numbers with variable digits assigned to symbolic variables, use + +* the [MLU](https://art.wangperawong.com/mathematical_language_understanding_train.tar.gz) data-set: + `--problem=algorithmic_math_two_variables` + +You can try solving the problem with different transformer models and hyperparameters as described in the [paper](https://arxiv.org/abs/1812.02825): +* Standard transformer: +`--model=transformer` +`--hparams_set=transformer_tiny` +* Universal transformer: +`--model=universal_transformer` +`--hparams_set=universal_transformer_tiny` +* Adaptive universal transformer: +`--model=universal_transformer` +`--hparams_set=adaptive_universal_transformer_tiny` + +### Story, Question and Answer + +For answering questions based on a story, use + +* the [bAbi](https://research.fb.com/downloads/babi/) data-set: + `--problem=babi_qa_concat_task1_1k` + +You can choose the bAbi task from the range [1,20] and the subset from 1k or +10k. To combine test data from all tasks into a single test set, use +`--problem=babi_qa_concat_all_tasks_10k` + +### Image Classification + +For image classification, we have a number of standard data-sets: + +* ImageNet (a large data-set): `--problem=image_imagenet`, or one + of the re-scaled versions (`image_imagenet224`, `image_imagenet64`, + `image_imagenet32`) +* CIFAR-10: `--problem=image_cifar10` (or + `--problem=image_cifar10_plain` to turn off data augmentation) +* CIFAR-100: `--problem=image_cifar100` +* MNIST: `--problem=image_mnist` + +For ImageNet, we suggest to use the ResNet or Xception, i.e., +use `--model=resnet --hparams_set=resnet_50` or +`--model=xception --hparams_set=xception_base`. +Resnet should get to above 76% top-1 accuracy on ImageNet. + +For CIFAR and MNIST, we suggest to try the shake-shake model: +`--model=shake_shake --hparams_set=shakeshake_big`. +This setting trained for `--train_steps=700000` should yield +close to 97% accuracy on CIFAR-10. + +### Image Generation + +For (un)conditional image generation, we have a number of standard data-sets: + +* CelebA: `--problem=img2img_celeba` for image-to-image translation, namely, + superresolution from 8x8 to 32x32. +* CelebA-HQ: `--problem=image_celeba256_rev` for a downsampled 256x256. +* CIFAR-10: `--problem=image_cifar10_plain_gen_rev` for class-conditional + 32x32 generation. +* LSUN Bedrooms: `--problem=image_lsun_bedrooms_rev` +* MS-COCO: `--problem=image_text_ms_coco_rev` for text-to-image generation. +* Small ImageNet (a large data-set): `--problem=image_imagenet32_gen_rev` for + 32x32 or `--problem=image_imagenet64_gen_rev` for 64x64. + +We suggest to use the Image Transformer, i.e., `--model=imagetransformer`, or +the Image Transformer Plus, i.e., `--model=imagetransformerpp` that uses +discretized mixture of logistics, or variational auto-encoder, i.e., +`--model=transformer_ae`. +For CIFAR-10, using `--hparams_set=imagetransformer_cifar10_base` or +`--hparams_set=imagetransformer_cifar10_base_dmol` yields 2.90 bits per +dimension. For Imagenet-32, using +`--hparams_set=imagetransformer_imagenet32_base` yields 3.77 bits per dimension. + +### Language Modeling + +For language modeling, we have these data-sets in T2T: + +* PTB (a small data-set): `--problem=languagemodel_ptb10k` for + word-level modeling and `--problem=languagemodel_ptb_characters` + for character-level modeling. +* LM1B (a billion-word corpus): `--problem=languagemodel_lm1b32k` for + subword-level modeling and `--problem=languagemodel_lm1b_characters` + for character-level modeling. + +We suggest to start with `--model=transformer` on this task and use +`--hparams_set=transformer_small` for PTB and +`--hparams_set=transformer_base` for LM1B. + +### Sentiment Analysis + +For the task of recognizing the sentiment of a sentence, use + +* the IMDB data-set: `--problem=sentiment_imdb` + +We suggest to use `--model=transformer_encoder` here and since it is +a small data-set, try `--hparams_set=transformer_tiny` and train for +few steps (e.g., `--train_steps=2000`). + +### Speech Recognition + +For speech-to-text, we have these data-sets in T2T: + +* Librispeech (US English): `--problem=librispeech` for + the whole set and `--problem=librispeech_clean` for a smaller + but nicely filtered part. + +* Mozilla Common Voice (US English): `--problem=common_voice` for the whole set + `--problem=common_voice_clean` for a quality-checked subset. + +### Summarization + +For summarizing longer text into shorter one we have these data-sets: + +* CNN/DailyMail articles summarized into a few sentences: + `--problem=summarize_cnn_dailymail32k` + +We suggest to use `--model=transformer` and +`--hparams_set=transformer_prepend` for this task. +This yields good ROUGE scores. + +### Translation + +There are a number of translation data-sets in T2T: + +* English-German: `--problem=translate_ende_wmt32k` +* English-French: `--problem=translate_enfr_wmt32k` +* English-Czech: `--problem=translate_encs_wmt32k` +* English-Chinese: `--problem=translate_enzh_wmt32k` +* English-Vietnamese: `--problem=translate_envi_iwslt32k` +* English-Spanish: `--problem=translate_enes_wmt32k` + +You can get translations in the other direction by appending `_rev` to +the problem name, e.g., for German-English use +`--problem=translate_ende_wmt32k_rev` +(note that you still need to download the original data with t2t-datagen +`--problem=translate_ende_wmt32k`). + +For all translation problems, we suggest to try the Transformer model: +`--model=transformer`. At first it is best to try the base setting, +`--hparams_set=transformer_base`. When trained on 8 GPUs for 300K steps +this should reach a BLEU score of about 28 on the English-German data-set, +which is close to state-of-the art. If training on a single GPU, try the +`--hparams_set=transformer_base_single_gpu` setting. For very good results +or larger data-sets (e.g., for English-French), try the big model +with `--hparams_set=transformer_big`. + +See this [example](https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/notebooks/Transformer_translate.ipynb) to know how the translation works. + +## Basics + +### Walkthrough + +Here's a walkthrough training a good English-to-German translation +model using the Transformer model from [*Attention Is All You +Need*](https://arxiv.org/abs/1706.03762) on WMT data. + +``` +pip install tensor2tensor + +# See what problems, models, and hyperparameter sets are available. +# You can easily swap between them (and add new ones). +t2t-trainer --registry_help + +PROBLEM=translate_ende_wmt32k +MODEL=transformer +HPARAMS=transformer_base_single_gpu + +DATA_DIR=$HOME/t2t_data +TMP_DIR=/tmp/t2t_datagen +TRAIN_DIR=$HOME/t2t_train/$PROBLEM/$MODEL-$HPARAMS + +mkdir -p $DATA_DIR $TMP_DIR $TRAIN_DIR + +# Generate data +t2t-datagen \ + --data_dir=$DATA_DIR \ + --tmp_dir=$TMP_DIR \ + --problem=$PROBLEM + +# Train +# * If you run out of memory, add --hparams='batch_size=1024'. +t2t-trainer \ + --data_dir=$DATA_DIR \ + --problem=$PROBLEM \ + --model=$MODEL \ + --hparams_set=$HPARAMS \ + --output_dir=$TRAIN_DIR + +# Decode + +DECODE_FILE=$DATA_DIR/decode_this.txt +echo "Hello world" >> $DECODE_FILE +echo "Goodbye world" >> $DECODE_FILE +echo -e 'Hallo Welt\nAuf Wiedersehen Welt' > ref-translation.de + +BEAM_SIZE=4 +ALPHA=0.6 + +t2t-decoder \ + --data_dir=$DATA_DIR \ + --problem=$PROBLEM \ + --model=$MODEL \ + --hparams_set=$HPARAMS \ + --output_dir=$TRAIN_DIR \ + --decode_hparams="beam_size=$BEAM_SIZE,alpha=$ALPHA" \ + --decode_from_file=$DECODE_FILE \ + --decode_to_file=translation.en + +# See the translations +cat translation.en + +# Evaluate the BLEU score +# Note: Report this BLEU score in papers, not the internal approx_bleu metric. +t2t-bleu --translation=translation.en --reference=ref-translation.de +``` + +### Installation + + +``` +# Assumes tensorflow or tensorflow-gpu installed +pip install tensor2tensor + +# Installs with tensorflow-gpu requirement +pip install tensor2tensor[tensorflow_gpu] + +# Installs with tensorflow (cpu) requirement +pip install tensor2tensor[tensorflow] +``` + +Binaries: + +``` +# Data generator +t2t-datagen + +# Trainer +t2t-trainer --registry_help +``` + +Library usage: + +``` +python -c "from tensor2tensor.models.transformer import Transformer" +``` + +### Features + +* Many state of the art and baseline models are built-in and new models can be + added easily (open an issue or pull request!). +* Many datasets across modalities - text, audio, image - available for + generation and use, and new ones can be added easily (open an issue or pull + request for public datasets!). +* Models can be used with any dataset and input mode (or even multiple); all + modality-specific processing (e.g. embedding lookups for text tokens) is done + with `bottom` and `top` transformations, which are specified per-feature in the + model. +* Support for multi-GPU machines and synchronous (1 master, many workers) and + asynchronous (independent workers synchronizing through a parameter server) + [distributed training](https://tensorflow.github.io/tensor2tensor/distributed_training.html). +* Easily swap amongst datasets and models by command-line flag with the data + generation script `t2t-datagen` and the training script `t2t-trainer`. +* Train on [Google Cloud ML](https://tensorflow.github.io/tensor2tensor/cloud_mlengine.html) and [Cloud TPUs](https://tensorflow.github.io/tensor2tensor/cloud_tpu.html). + +## T2T overview + +### Problems + +**Problems** consist of features such as inputs and targets, and metadata such +as each feature's modality (e.g. symbol, image, audio) and vocabularies. Problem +features are given by a dataset, which is stored as a `TFRecord` file with +`tensorflow.Example` protocol buffers. All +problems are imported in +[`all_problems.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/all_problems.py) +or are registered with `@registry.register_problem`. Run +[`t2t-datagen`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/bin/t2t-datagen) +to see the list of available problems and download them. + +### Models + +**`T2TModel`s** define the core tensor-to-tensor computation. They apply a +default transformation to each input and output so that models may deal with +modality-independent tensors (e.g. embeddings at the input; and a linear +transform at the output to produce logits for a softmax over classes). All +models are imported in the +[`models` subpackage](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/models/__init__.py), +inherit from [`T2TModel`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/utils/t2t_model.py), +and are registered with +[`@registry.register_model`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/utils/registry.py). + +### Hyperparameter Sets + +**Hyperparameter sets** are encoded in +[`HParams`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/utils/hparam.py) +objects, and are registered with +[`@registry.register_hparams`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/utils/registry.py). +Every model and problem has a `HParams`. A basic set of hyperparameters are +defined in +[`common_hparams.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/layers/common_hparams.py) +and hyperparameter set functions can compose other hyperparameter set functions. + +### Trainer + +The **trainer** binary is the entrypoint for training, evaluation, and +inference. Users can easily switch between problems, models, and hyperparameter +sets by using the `--model`, `--problem`, and `--hparams_set` flags. Specific +hyperparameters can be overridden with the `--hparams` flag. `--schedule` and +related flags control local and distributed training/evaluation +([distributed training documentation](https://github.com/tensorflow/tensor2tensor/tree/master/docs/distributed_training.md)). + +## Adding your own components + +T2T's components are registered using a central registration mechanism that +enables easily adding new ones and easily swapping amongst them by command-line +flag. You can add your own components without editing the T2T codebase by +specifying the `--t2t_usr_dir` flag in `t2t-trainer`. + +You can do so for models, hyperparameter sets, modalities, and problems. Please +do submit a pull request if your component might be useful to others. + +See the [`example_usr_dir`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/test_data/example_usr_dir) +for an example user directory. + +## Adding a dataset + +To add a new dataset, subclass +[`Problem`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/problem.py) +and register it with `@registry.register_problem`. See +[`TranslateEndeWmt8k`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/translate_ende.py) +for an example. Also see the [data generators +README](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/README.md). + +## Run on FloydHub + +[![Run on FloydHub](https://static.floydhub.com/button/button.svg)](https://floydhub.com/run) + +Click this button to open a [Workspace](https://blog.floydhub.com/workspaces/) on [FloydHub](https://www.floydhub.com/?utm_medium=readme&utm_source=tensor2tensor&utm_campaign=jul_2018). You can use the workspace to develop and test your code on a fully configured cloud GPU machine. + +Tensor2Tensor comes preinstalled in the environment, you can simply open a [Terminal](https://docs.floydhub.com/guides/workspace/#using-terminal) and run your code. + +```bash +# Test the quick-start on a Workspace's Terminal with this command +t2t-trainer \ + --generate_data \ + --data_dir=./t2t_data \ + --output_dir=./t2t_train/mnist \ + --problem=image_mnist \ + --model=shake_shake \ + --hparams_set=shake_shake_quick \ + --train_steps=1000 \ + --eval_steps=100 +``` + +Note: Ensure compliance with the FloydHub [Terms of Service](https://www.floydhub.com/about/terms). + +## Papers + +When referencing Tensor2Tensor, please cite [this +paper](https://arxiv.org/abs/1803.07416). + +``` +@article{tensor2tensor, + author = {Ashish Vaswani and Samy Bengio and Eugene Brevdo and + Francois Chollet and Aidan N. Gomez and Stephan Gouws and Llion Jones and + \L{}ukasz Kaiser and Nal Kalchbrenner and Niki Parmar and Ryan Sepassi and + Noam Shazeer and Jakob Uszkoreit}, + title = {Tensor2Tensor for Neural Machine Translation}, + journal = {CoRR}, + volume = {abs/1803.07416}, + year = {2018}, + url = {http://arxiv.org/abs/1803.07416}, +} +``` + +Tensor2Tensor was used to develop a number of state-of-the-art models +and deep learning methods. Here we list some papers that were based on T2T +from the start and benefited from its features and architecture in ways +described in the [Google Research Blog post introducing +T2T](https://research.googleblog.com/2017/06/accelerating-deep-learning-research.html). + +* [Attention Is All You Need](https://arxiv.org/abs/1706.03762) +* [Depthwise Separable Convolutions for Neural Machine + Translation](https://arxiv.org/abs/1706.03059) +* [One Model To Learn Them All](https://arxiv.org/abs/1706.05137) +* [Discrete Autoencoders for Sequence Models](https://arxiv.org/abs/1801.09797) +* [Generating Wikipedia by Summarizing Long + Sequences](https://arxiv.org/abs/1801.10198) +* [Image Transformer](https://arxiv.org/abs/1802.05751) +* [Training Tips for the Transformer Model](https://arxiv.org/abs/1804.00247) +* [Self-Attention with Relative Position Representations](https://arxiv.org/abs/1803.02155) +* [Fast Decoding in Sequence Models using Discrete Latent Variables](https://arxiv.org/abs/1803.03382) +* [Adafactor: Adaptive Learning Rates with Sublinear Memory Cost](https://arxiv.org/abs/1804.04235) +* [Universal Transformers](https://arxiv.org/abs/1807.03819) +* [Attending to Mathematical Language with Transformers](https://arxiv.org/abs/1812.02825) +* [The Evolved Transformer](https://arxiv.org/abs/1901.11117) +* [Model-Based Reinforcement Learning for Atari](https://arxiv.org/abs/1903.00374) +* [VideoFlow: A Flow-Based Generative Model for Video](https://arxiv.org/abs/1903.01434) + +*NOTE: This is not an official Google product.* diff --git a/floyd.yml b/floyd.yml new file mode 100644 index 000000000..2ca96ec31 --- /dev/null +++ b/floyd.yml @@ -0,0 +1,2 @@ +env: tensorflow-1.12 +machine: gpu diff --git a/floyd_requirements.txt b/floyd_requirements.txt new file mode 100644 index 000000000..fe920060a --- /dev/null +++ b/floyd_requirements.txt @@ -0,0 +1 @@ +tensor2tensor diff --git a/oss_scripts/oss_integration_test.sh b/oss_scripts/oss_integration_test.sh new file mode 100755 index 000000000..1700cfae0 --- /dev/null +++ b/oss_scripts/oss_integration_test.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# Note that this test script requires docker to be installed and running. + +set -v # print commands as they're executed +set -e # fail and exit on any command erroring + +: "${TF_VERSION:?}" +: "${TF_LATEST:?}" +: "${T2T_DATA_DIR:?}" +: "${T2T_TRAIN_DIR:?}" +: "${T2T_PROBLEM:?}" + +# Test --t2t_usr_dir +t2t-trainer --registry_help --t2t_usr_dir=./tensor2tensor/test_data/example_usr_dir 2>&1 | grep my_very_own_hparams && echo passed + +# Run data generation, training, and decoding on a dummy problem +t2t-datagen --problem=$T2T_PROBLEM --data_dir=$T2T_DATA_DIR +t2t-trainer --problem=$T2T_PROBLEM --data_dir=$T2T_DATA_DIR --model=transformer --hparams_set=transformer_tiny --train_steps=5 --eval_steps=5 --output_dir=$T2T_TRAIN_DIR +t2t-decoder --problem=$T2T_PROBLEM --data_dir=$T2T_DATA_DIR --model=transformer --hparams_set=transformer_tiny --output_dir=$T2T_TRAIN_DIR --decode_hparams='num_samples=10' + +# Test serving +if [[ "$TF_VERSION" == "$TF_LATEST" ]] +then + # Export for serving + pip install tensorflow_hub + t2t-exporter \ + --problem=$T2T_PROBLEM \ + --data_dir=$T2T_DATA_DIR \ + --model=transformer \ + --hparams_set=transformer_tiny \ + --output_dir=$T2T_TRAIN_DIR + + # Run model server + server_port=8500 + model_name=my_model + docker run -d -p $server_port:$server_port \ + --mount type=bind,source=$T2T_TRAIN_DIR/export,target=/models/$model_name \ + -e MODEL_NAME=$model_name -t tensorflow/serving + sleep 10 + + # Query + pip install tensorflow-serving-api=="$TF_VERSION" + t2t-query-server \ + --server=localhost:$server_port \ + --servable_name=$model_name \ + --problem=$T2T_PROBLEM \ + --data_dir=$T2T_DATA_DIR \ + --inputs_once='1 0 1 0 1 0' +fi diff --git a/oss_scripts/oss_pip_install.sh b/oss_scripts/oss_pip_install.sh new file mode 100755 index 000000000..c86e619ea --- /dev/null +++ b/oss_scripts/oss_pip_install.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +set -v # print commands as they're executed +set -e # fail and exit on any command erroring + +: "${TF_VERSION:?}" + +# Make sure we have the latest pip and setuptools installed. +pip install -q -U pip +pip install -q -U setuptools + +# Make sure we have the latest version of numpy - avoid problems we were +# seeing with Python 3 +pip install -q -U numpy +pip install -q "tensorflow==$TF_VERSION" + +# Just print the version again to make sure. +python -c 'import tensorflow as tf; print(tf.__version__)' + +# First ensure that the base dependencies are sufficient for a full import +pip install -q -e . +t2t-trainer --registry_help 2>&1 >/dev/null +t2t-datagen 2>&1 | grep translate_ende 2>&1 >/dev/null && echo passed + +# Then install the test dependencies +pip install -q -e .[tests,allen] +# Make sure to install the atari extras for gym +pip install "gym[atari]" diff --git a/oss_scripts/oss_release.sh b/oss_scripts/oss_release.sh new file mode 100755 index 000000000..ea0e1412c --- /dev/null +++ b/oss_scripts/oss_release.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -v # print commands as they're executed +set -e # fail and exit on any command erroring + +GIT_COMMIT_ID=${1:-""} +[[ -z $GIT_COMMIT_ID ]] && echo "Must provide a commit" && exit 1 + +TMP_DIR=$(mktemp -d) +pushd $TMP_DIR + +echo "Cloning tensor2tensor and checking out commit $GIT_COMMIT_ID" +git clone https://github.com/tensorflow/tensor2tensor.git +cd tensor2tensor +git checkout $GIT_COMMIT_ID + +# Without `python -m` we sometimes get module not callable error: +# https://stackoverflow.com/questions/58451650/pip-no-longer-working-after-update-error-module-object-is-not-callable +python -m pip install wheel twine pyopenssl + +# Build the distribution +echo "Building distribution" +python setup.py sdist +python setup.py bdist_wheel --universal + +# Publish to PyPI +echo "Publishing to PyPI" +twine upload dist/* + +# Cleanup +rm -rf build/ dist/ tensor2tensor.egg-info/ +popd +rm -rf $TMP_DIR diff --git a/oss_scripts/oss_tests.sh b/oss_scripts/oss_tests.sh new file mode 100755 index 000000000..512015ff6 --- /dev/null +++ b/oss_scripts/oss_tests.sh @@ -0,0 +1,189 @@ +#!/bin/bash + +set -v # print commands as they're executed + +# Instead of exiting on any failure with "set -e", we'll call set_status after +# each command and exit $STATUS at the end. +STATUS=0 +function set_status() { + local last_status=$? + if [[ $last_status -ne 0 ]] + then + echo "<<<<<>>>>> Exit code: $last_status" + fi + STATUS=$(($last_status || $STATUS)) +} + +# Check env vars set +echo "${TF_VERSION:?}" && \ +echo "${TF_LATEST:?}" && \ +echo "${TRAVIS_PYTHON_VERSION:?}" +set_status +if [[ $STATUS -ne 0 ]] +then + exit $STATUS +fi + +# Check import +python -c "from tensor2tensor.models import transformer; print(transformer.Transformer.__name__)" +set_status + +# We need to run some tests separately (because they enable eager or due to +# other reasons). We also test the tests in the top-level-directories separately +# to get more readable error messages. + +# Tested separately: +# * registry_test +# * trainer_lib_test +# * visualization_test +# * trainer_model_based_test +# * allen_brain_test +# * models/research + + +# algorithmic_math_test: flaky +# subword_text_encoder_ops_test, pack_sequences_ops_test: interface with C++ ops +pytest --disable-warnings \ + --ignore=tensor2tensor/data_generators/algorithmic_math_test.py \ + --ignore=tensor2tensor/data_generators/allen_brain_test.py \ + --ignore=tensor2tensor/data_generators/ops/pack_sequences_ops_test.py \ + --ignore=tensor2tensor/data_generators/ops/subword_text_encoder_ops_test.py \ + --ignore=tensor2tensor/data_generators/problem_test.py \ + --deselect=tensor2tensor/data_generators/generator_utils_test.py::GeneratorUtilsTest::testDatasetPacking \ + tensor2tensor/data_generators +set_status + + +pytest --disable-warnings \ + --ignore=tensor2tensor/envs/mujoco_problems_test.py \ + --ignore=tensor2tensor/envs/rendered_env_problem_test.py \ + tensor2tensor/envs/ +set_status + + +pytest --disable-warnings \ + --ignore=tensor2tensor/layers/common_attention_test.py \ + --ignore=tensor2tensor/layers/common_layers_test.py \ + --ignore=tensor2tensor/layers/common_video_test.py \ + --ignore=tensor2tensor/layers/discretization_test.py \ + --ignore=tensor2tensor/layers/latent_layers_test.py \ + --ignore=tensor2tensor/layers/modalities_test.py \ + --ignore=tensor2tensor/layers/ngram_test.py \ + tensor2tensor/layers/ +set_status + + +# TODO(davidso): Re-enable EvolvedTransformer when possible. +pytest --disable-warnings \ + --ignore=tensor2tensor/models/evolved_transformer_test.py \ + --ignore=tensor2tensor/models/research \ + --ignore=tensor2tensor/models/video/nfg_conv3d_test.py \ + --ignore=tensor2tensor/models/video/nfg_conv_lstm_test.py \ + --ignore=tensor2tensor/models/video/nfg_conv_test.py \ + --ignore=tensor2tensor/models/video/nfg_uncond_test.py \ + tensor2tensor/models/ +set_status + + +# test_utils.py is not a test, but pytest thinks it is. +pytest --disable-warnings \ + --ignore=tensor2tensor/utils/registry_test.py \ + --ignore=tensor2tensor/utils/t2t_model_test.py \ + --ignore=tensor2tensor/utils/test_utils.py \ + --ignore=tensor2tensor/utils/test_utils_test.py \ + --ignore=tensor2tensor/utils/trainer_lib_test.py \ + tensor2tensor/utils/ +set_status + + +# These tests enable eager, so are tested separately. +pytest --disable-warnings \ + tensor2tensor/data_generators/problem_test.py \ + tensor2tensor/layers/common_attention_test.py \ + tensor2tensor/layers/common_layers_test.py \ + tensor2tensor/layers/common_video_test.py \ + tensor2tensor/layers/discretization_test.py \ + tensor2tensor/layers/latent_layers_test.py \ + tensor2tensor/layers/modalities_test.py \ + tensor2tensor/layers/ngram_test.py \ + tensor2tensor/utils/t2t_model_test.py \ + tensor2tensor/utils/test_utils_test.py \ + --deselect=tensor2tensor/layers/common_layers_test.py::CommonLayersTest::testFactoredTensorImplicitConversion \ + --deselect=tensor2tensor/layers/modalities_test.py::ModalityTest::testSymbolModalityTargetsFactored \ + --deselect=tensor2tensor/layers/common_video_test.py::CommonVideoTest::testGifSummary +set_status + + +pytest --disable-warnings tensor2tensor/utils/registry_test.py +set_status + +pytest --disable-warnings tensor2tensor/utils/trainer_lib_test.py +set_status + +pytest --disable-warnings tensor2tensor/visualization/visualization_test.py +set_status + +pytest --disable-warnings tensor2tensor/data_generators/allen_brain_test.py +set_status + +# All other tests not tested above. + +# trax tests need C++ +# TODO(afrozm): Enable trax tests they currently need GLIBCXX_3.4.21 +# Travis Error: +# ImportError: /usr/lib/x86_64-linux-gnu/libstdc++.so.6: version `GLIBCXX_3.4.21' not found (required by /home/travis/virtualenv/python3.6.3/lib/python3.6/site-packages/jaxlib/_pywrap_xla.so) +pytest --disable-warnings \ + --ignore=tensor2tensor/bin/t2t_trainer_test.py \ + --ignore=tensor2tensor/data_generators \ + --ignore=tensor2tensor/envs \ + --ignore=tensor2tensor/layers \ + --ignore=tensor2tensor/models \ + --ignore=tensor2tensor/rl \ + --ignore=tensor2tensor/trax \ + --ignore=tensor2tensor/utils \ + --ignore=tensor2tensor/visualization \ + --deselect=tensor2tensor/utils/beam_search_test.py::BeamSearchTest::testTPUBeam +set_status + + +# TODO(afrozm): Enable this unconditionally? + +## Test models/research only against tf-nightly +#if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]] +#then +# # Ignores: +# # * Glow requires the CIFAR-10 dataset to be generated +# pytest --disable-warnings tensor2tensor/models/research \ +# --ignore=tensor2tensor/models/research/glow_test.py +# set_status +#fi + +if [[ "$TF_VERSION" == "$TF_LATEST" ]] +then + jupyter nbconvert --ExecutePreprocessor.kernel_name=python3 \ + --ExecutePreprocessor.timeout=600 --to notebook --execute \ + tensor2tensor/notebooks/hello_t2t.ipynb; + set_status + + jupyter nbconvert --ExecutePreprocessor.kernel_name=python3 \ + --ExecutePreprocessor.timeout=600 --to notebook --execute \ + tensor2tensor/notebooks/t2t_problem.ipynb; + set_status + + # TODO(afrozm): Once we drop support for 1.10 we can get rid of this. + pytest --disable-warnings \ + tensor2tensor/utils/beam_search_test.py::BeamSearchTest::testTPUBeam + set_status + + # TODO(afrozm): Enable other tests in the RL directory. + # Can't add disable warning here since it parses flags. + pytest tensor2tensor/rl/trainer_model_based_test.py + set_status + +fi + +# Test --t2t_usr_dir +t2t-trainer --registry_help --t2t_usr_dir=./tensor2tensor/test_data/example_usr_dir 2>&1 | grep my_very_own_hparams && echo passed +set_status + +exit $STATUS diff --git a/pylintrc b/pylintrc new file mode 100644 index 000000000..ab45e0220 --- /dev/null +++ b/pylintrc @@ -0,0 +1,221 @@ + + +[MASTER] + +# Pickle collected data for later comparisons. +persistent=no + +# Set the cache size for astng objects. +cache-size=500 + +# Ignore Py3 files +ignore=get_references_web.py,get_references_web_single_group.py + + +[REPORTS] + +# Set the output format. +# output-format=sorted-text + +# Put messages in a separate file for each module / package specified on the +# command line instead of printing them on stdout. Reports (if any) will be +# written in a file name "pylint_global.[txt|html]". +files-output=no + +# Tells whether to display a full report or only the messages. +reports=no + +# Disable the report(s) with the given id(s). +disable-report=R0001,R0002,R0003,R0004,R0101,R0102,R0201,R0202,R0220,R0401,R0402,R0701,R0801,R0901,R0902,R0903,R0904,R0911,R0912,R0913,R0914,R0915,R0921,R0922,R0923 + +# Error message template (continued on second line) +msg-template={msg_id}:{line:3} {obj}: {msg} [{symbol}] + + +[MESSAGES CONTROL] +# List of checkers and warnings to enable. +enable=indexing-exception,old-raise-syntax + +# List of checkers and warnings to disable. +disable=design,similarities,no-self-use,attribute-defined-outside-init,locally-disabled,star-args,pointless-except,bad-option-value,global-statement,fixme,suppressed-message,useless-suppression,locally-enabled,file-ignored,multiple-imports,c-extension-no-member,trailing-newlines,unsubscriptable-object,misplaced-comparison-constant,no-member,abstract-method,no-else-return,missing-docstring,wrong-import-order,protected-access,inconsistent-return-statements,invalid-unary-operand-type,import-error,no-name-in-module,arguments-differ,not-context-manager,unused-argument + +[BASIC] + +# Required attributes for module, separated by a comma +required-attributes= + +# Regular expression which should only match the name +# of functions or classes which do not require a docstring. +no-docstring-rgx=(__.*__|main) + +# Min length in lines of a function that requires a docstring. +docstring-min-length=10 + +# Regular expression which should only match correct module names. The +# leading underscore is sanctioned for private modules by Google's style +# guide. +# +# There are exceptions to the basic rule (_?[a-z][a-z0-9_]*) to cover +# requirements of Python's module system. +module-rgx=^(_?[a-z][a-z0-9_]*)|__init__$ + +# Regular expression which should only match correct module level names +const-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ + +# Regular expression which should only match correct class attribute +class-attribute-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ + +# Regular expression which should only match correct class names +class-rgx=^_?[A-Z][a-zA-Z0-9]*$ + +# Regular expression which should only match correct function names. +# 'camel_case' and 'snake_case' group names are used for consistency of naming +# styles across functions and methods. +function-rgx=^(?:(?PsetUp|tearDown|setUpModule|tearDownModule)|(?P_?[A-Z][a-zA-Z0-9]*)|(?P_?[a-z][a-z0-9_]*))$ + + +# Regular expression which should only match correct method names. +# 'camel_case' and 'snake_case' group names are used for consistency of naming +# styles across functions and methods. 'exempt' indicates a name which is +# consistent with all naming styles. +method-rgx=(?x) + ^(?:(?P_[a-z0-9_]+__|runTest|setUp|tearDown|setUpTestCase + |tearDownTestCase|setupSelf|tearDownClass|setUpClass + |(test|assert)_*[A-Z0-9][a-zA-Z0-9_]*|next) + |(?P_{0,2}[A-Z][a-zA-Z0-9_]*) + |(?P_{0,2}[a-z][a-z0-9_]*))$ + + +# Regular expression which should only match correct instance attribute names +attr-rgx=^_{0,2}[a-z][a-z0-9_]*$ + +# Regular expression which should only match correct argument names +argument-rgx=^[a-z][a-z0-9_]*$ + +# Regular expression which should only match correct variable names +variable-rgx=^[a-z][a-z0-9_]*$ + +# Regular expression which should only match correct list comprehension / +# generator expression variable names +inlinevar-rgx=^[a-z][a-z0-9_]*$ + +# Good variable names which should always be accepted, separated by a comma +good-names=main,_ + +# Bad variable names which should always be refused, separated by a comma +bad-names= + +# List of builtins function names that should not be used, separated by a comma +bad-functions=input,apply,reduce + +# List of decorators that define properties, such as abc.abstractproperty. +property-classes=abc.abstractproperty + + +[TYPECHECK] + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# List of decorators that create context managers from functions, such as +# contextlib.contextmanager. +contextmanager-decorators=contextlib.contextmanager,contextlib2.contextmanager + + +[VARIABLES] + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# A regular expression matching names used for dummy variables (i.e. not used). +dummy-variables-rgx=^\*{0,2}(_$|unused_|dummy_) + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__,__new__,setUp + +# "class_" is also a valid for the first argument to a class method. +valid-classmethod-first-arg=cls,class_ + + +[EXCEPTIONS] + +overgeneral-exceptions=StandardError,Exception,BaseException + + +[IMPORTS] + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub,TERMIOS,Bastion,rexec,sets + + +[FORMAT] + +# Maximum number of characters on a single line. +max-line-length=80 + +# Regexp for a line that is allowed to be longer than the limit. +# This "ignore" regex is today composed of several independent parts: +# (1) Long import lines +# (2) URLs in comments or pydocs. Detecting URLs by regex is a hard problem and +# no amount of tweaking will make a perfect regex AFAICT. This one is a good +# compromise. +# (3) Constant string literals at the start of files don't need to be broken +# across lines. Allowing long paths and urls to be on a single +# line. Also requires that the string not be a triplequoted string. +ignore-long-lines=(?x) + (^\s*(import|from)\s + |^\s*(\#\ )??$ + |^[a-zA-Z_][a-zA-Z0-9_]*\s*=\s*("[^"]\S+"|'[^']\S+') + ) + +# Maximum number of lines in a module +max-module-lines=99999 + +# String used as indentation unit. We differ from PEP8's normal 4 spaces. +indent-string=' ' + +# Do not warn about multiple statements on a single line for constructs like +# if test: stmt +single-line-if-stmt=y + +# Make sure : in dicts and trailing commas are checked for whitespace. +no-space-check= + + +[LOGGING] + +# Add logging modules. +logging-modules=logging,absl.logging + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes= + + +# Maximum line length for lambdas +short-func-length=1 + +# List of module members that should be marked as deprecated. +# All of the string functions are listed in 4.1.4 Deprecated string functions +# in the Python 2.4 docs. +deprecated-members=string.atof,string.atoi,string.atol,string.capitalize,string.expandtabs,string.find,string.rfind,string.index,string.rindex,string.count,string.lower,string.split,string.rsplit,string.splitfields,string.join,string.joinfields,string.lstrip,string.rstrip,string.strip,string.swapcase,string.translate,string.upper,string.ljust,string.rjust,string.center,string.zfill,string.replace,sys.exitfunc,sys.maxint + + +# List of exceptions that do not need to be mentioned in the Raises section of +# a docstring. +ignore-exceptions=AssertionError,NotImplementedError,StopIteration,TypeError + + +# Number of spaces of indent required when the last token on the preceding line +# is an open (, [, or {. +indent-after-paren=4 diff --git a/setup.py b/setup.py index bb77e7be5..236867727 100644 --- a/setup.py +++ b/setup.py @@ -5,20 +5,89 @@ setup( name='tensor2tensor', - version='1.0.2', + version='1.15.7', description='Tensor2Tensor', + long_description=( + 'Tensor2Tensor, or T2T for short, is a library of ' + 'deep learning models and datasets designed to make deep ' + 'learning more accessible and accelerate ML research. ' + 'T2T was developed by researchers and engineers in the Google ' + 'Brain team and a community of users. It is now in maintenance ' + 'mode -- we keep it running and welcome bug-fixes, but encourage ' + 'users to use the successor library Trax.'), author='Google Inc.', author_email='no-reply@google.com', url='http://github.com/tensorflow/tensor2tensor', license='Apache 2.0', packages=find_packages(), - scripts=['tensor2tensor/bin/t2t-trainer', 'tensor2tensor/bin/t2t-datagen'], + package_data={ + 'tensor2tensor.data_generators': ['test_data/*'], + 'tensor2tensor.data_generators.wikisum': ['test_data/*'], + 'tensor2tensor.visualization': [ + 'attention.js', 'TransformerVisualization.ipynb' + ], + }, + scripts=[ + 'tensor2tensor/bin/t2t-trainer', + 'tensor2tensor/bin/t2t-datagen', + 'tensor2tensor/bin/t2t-decoder', + 'tensor2tensor/bin/t2t-make-tf-configs', + 'tensor2tensor/bin/t2t-eval', + 'tensor2tensor/bin/t2t-exporter', + 'tensor2tensor/bin/t2t-query-server', + 'tensor2tensor/bin/t2t-insights-server', + 'tensor2tensor/bin/t2t-avg-all', + 'tensor2tensor/bin/t2t-bleu', + 'tensor2tensor/bin/t2t-translate-all', + ], install_requires=[ + 'absl-py', + 'bz2file', + 'dopamine-rl', + 'flask', + 'future', + 'gevent', + 'gin-config', + 'google-api-python-client', + 'gunicorn', + 'gym', + 'h5py', + 'kfac', + 'mesh-tensorflow', 'numpy', + 'oauth2client', + 'opencv-python', + 'Pillow', + 'pypng', + 'requests', + 'scipy', + 'six>=1.12.0', 'sympy', - 'six', - 'tensorflow-gpu>=1.2.0rc1', + 'tensorflow-addons', + 'tensorflow-datasets', + 'tensorflow-gan', + 'tensorflow-probability==0.7.0', + 'tf_slim', + 'tqdm', ], + extras_require={ + 'tensorflow': ['tensorflow>=1.15.0'], + 'tensorflow-hub': ['tensorflow-hub>=0.1.1'], + 'tests': [ + # Needed to fix a Travis pytest error. + # https://github.com/Julian/jsonschema/issues/449#issuecomment-411406525 + 'attrs>=17.4.0', + 'pytest>=3.8.0', + 'mock', + 'jupyter', + 'matplotlib', + # Need atari extras for Travis tests, but because gym is already in + # install_requires, pip skips the atari extras, so we instead do an + # explicit pip install gym[atari] for the tests. + # 'gym[atari]', + ], + 'allen': ['Pillow==5.1.0', 'pandas==0.23.0'], + }, classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', @@ -26,4 +95,8 @@ 'License :: OSI Approved :: Apache Software License', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], - keywords='tensorflow',) + dependency_links=[ + 'git+https://github.com/tensorflow/cleverhans.git#egg=cleverhans' + ], + keywords='tensorflow machine learning', +) diff --git a/tensor2tensor/__init__.py b/tensor2tensor/__init__.py index 27d533abc..ff174dd63 100644 --- a/tensor2tensor/__init__.py +++ b/tensor2tensor/__init__.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensor2tensor/bin/__init__.py b/tensor2tensor/bin/__init__.py new file mode 100644 index 000000000..ff174dd63 --- /dev/null +++ b/tensor2tensor/bin/__init__.py @@ -0,0 +1,15 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tensor2tensor/bin/build_vocab.py b/tensor2tensor/bin/build_vocab.py new file mode 100644 index 000000000..be3b59fe7 --- /dev/null +++ b/tensor2tensor/bin/build_vocab.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Build vocab for a subclass of Text2TextProblem. + +build_vocab \ + --problem=program_search_algolisp \ + --data_dir=~/t2t_data \ + --tmp_dir=~/t2t_data/tmp +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from tensor2tensor import problems as problems_lib # pylint: disable=unused-import +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string("data_dir", "/tmp/t2t/data_dir", + "Directory to place the generated vocabulary file in.") + +flags.DEFINE_string("tmp_dir", "/tmp/t2t/tmp_dir", + "Temporary storage directory.") + +flags.DEFINE_string("problem", None, + "Problem to generate the vocabulary file for.") + +flags.mark_flag_as_required("problem") + + +def main(_): + problem = registry.problem(FLAGS.problem) + + # We make the assumption that the problem is a subclass of Text2TextProblem. + assert isinstance(problem, text_problems.Text2TextProblem) + + data_dir = os.path.expanduser(FLAGS.data_dir) + tmp_dir = os.path.expanduser(FLAGS.tmp_dir) + + tf.gfile.MakeDirs(data_dir) + tf.gfile.MakeDirs(tmp_dir) + + tf.logging.info("Saving vocabulary to data_dir: %s" % data_dir) + + problem.get_or_create_vocab(data_dir, tmp_dir) + + tf.logging.info("Saved vocabulary file: " + + os.path.join(data_dir, problem.vocab_filename)) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/make_tf_configs.py b/tensor2tensor/bin/make_tf_configs.py index c67bac9b9..77b80f450 100644 --- a/tensor2tensor/bin/make_tf_configs.py +++ b/tensor2tensor/bin/make_tf_configs.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,67 +17,95 @@ Usage: -`make_tf_configs.py --workers="server1:1234" --ps="server3:2134,server4:2334"` +`t2t-make-tf-configs --masters="server1:1234" --ps="server3:2134,server4:2334"` -Outputs 1 line per job to stdout, first the workers, then the parameter servers. +Outputs 1 line per job to stdout, first the masters, then the parameter servers. Each line has the TF_CONFIG, then a tab, then the command line flags for that job. -If there is a single worker, workers will have the `--sync` flag. +If there is a single master, it will have the `--sync` flag. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import json - -# Dependency imports - -import tensorflow as tf +import tensorflow.compat.v1 as tf flags = tf.flags FLAGS = flags.FLAGS -flags.DEFINE_string("workers", "", "Comma-separated list of worker addresses") +flags.DEFINE_string("masters", "", "Comma-separated list of master addresses") flags.DEFINE_string("ps", "", "Comma-separated list of ps addresses") def main(_): - if not (FLAGS.workers and FLAGS.ps): - raise ValueError("Must provide --workers and --ps") + if not (FLAGS.masters and FLAGS.ps): + raise ValueError("Must provide --masters and --ps") - workers = FLAGS.workers.split(",") + masters = FLAGS.masters.split(",") ps = FLAGS.ps.split(",") - cluster = {"ps": ps, "worker": workers} - - for task_type, jobs in [("worker", workers), ("ps", ps)]: - for idx, job in enumerate(jobs): - if task_type == "worker": - cmd_line_flags = " ".join([ - "--master=%s" % job, - "--ps_replicas=%d" % len(ps), - "--worker_replicas=%d" % len(workers), - "--worker_gpu=1", - "--worker_id=%d" % idx, - "--ps_gpu=1", - "--schedule=train", - "--sync" if len(workers) == 1 else "", - ]) + is_sync = len(masters) == 1 + if is_sync: + print("Assuming SYNC distributed training with a single master and %d " + "workers" % len(ps)) + cluster = {"ps": ps, "master": masters} + else: + print("Assuming ASYNC distributed training with %d workers and %d " + "parameter servers" % (len(masters), len(ps))) + cluster = {"ps": ps, "chief": [masters[0]], "worker": masters[1:]} + + # Trainer configs + for idx, addr in enumerate(masters): + cmd_line_flags = [ + "--master=grpc://%s" % addr, + "--ps_replicas=%d" % len(ps), + "--worker_replicas=%d" % len(masters), + "--worker_gpu=%d" % (0 if is_sync else 1), + "--worker_id=%d" % idx, + "--ps_gpu=%d" % (1 if is_sync else 0), + "--sync" if is_sync else "", + "--schedule=train", + ] + if is_sync: + task_type = "master" + cmd_line_flags.append("--worker_job='/job:master'") + else: + if idx == 0: + task_type = "chief" + idx = 0 + cmd_line_flags.append("--worker_job='/job:chief'") else: - cmd_line_flags = " ".join([ - "--schedule=run_std_server", - ]) - - tf_config = json.dumps({ - "cluster": cluster, - "task": { - "type": task_type, - "index": idx - } - }) - print(tf_config + "\t" + cmd_line_flags) + task_type = "worker" + idx -= 1 + cmd_line_flags.append("--worker_job='/job:worker'") + + tf_config = json.dumps({ + "cluster": cluster, + "task": { + "type": task_type, + "index": idx + }, + "environment": "cloud", + }) + cmd_line_flags = " ".join(cmd_line_flags) + print("'%s'\t%s" % (tf_config, cmd_line_flags)) + + # Std server configs + for idx, addr in enumerate(ps): + tf_config = json.dumps({ + "cluster": cluster, + "task": { + "type": "ps", + "index": idx + }, + "environment": "cloud", + }) + cmd_line_flags = "--schedule=run_std_server" + print("'%s'\t%s" % (tf_config, cmd_line_flags)) if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) tf.app.run() diff --git a/tensor2tensor/bin/t2t-avg-all b/tensor2tensor/bin/t2t-avg-all new file mode 100755 index 000000000..696a20b5b --- /dev/null +++ b/tensor2tensor/bin/t2t-avg-all @@ -0,0 +1,17 @@ +#!/usr/bin/env python +"""t2t-avg-all.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.bin import t2t_avg_all + +import tensorflow as tf + +def main(argv): + t2t_avg_all.main(argv) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t-bleu b/tensor2tensor/bin/t2t-bleu new file mode 100755 index 000000000..6c51e480b --- /dev/null +++ b/tensor2tensor/bin/t2t-bleu @@ -0,0 +1,18 @@ +#!/usr/bin/env python +"""t2t-bleu.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.bin import t2t_bleu + +import tensorflow as tf + +def main(argv): + t2t_bleu.main(argv) + + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t-datagen b/tensor2tensor/bin/t2t-datagen old mode 100644 new mode 100755 index be613b829..2150027af --- a/tensor2tensor/bin/t2t-datagen +++ b/tensor2tensor/bin/t2t-datagen @@ -1,361 +1,28 @@ #!/usr/bin/env python -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. +"""Data generation for Tensor2Tensor. -"""Produces the training and dev data for --problem into --data_dir. +This script is used to generate data to train your models +for a number problems for which open-source data is available. -generator.py produces sharded and shuffled TFRecord files of tensorflow.Example -protocol buffers for a variety of datasets registered in this file. +For example, to generate data for MNIST run this: -All datasets are registered in _SUPPORTED_PROBLEM_GENERATORS. Each entry maps a -string name (selectable on the command-line with --problem) to a function that -takes 2 arguments - input_directory and mode (one of "train" or "dev") - and -yields for each training example a dictionary mapping string feature names to -lists of {string, int, float}. The generator will be run once for each mode. +t2t-datagen \ + --problem=image_mnist \ + --data_dir=~/t2t_data \ + --tmp_dir=~/t2t_data/tmp """ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function -import random -import tempfile +from tensor2tensor.bin import t2t_datagen -# Dependency imports +import tensorflow.compat.v1 as tf -import numpy as np - -from tensor2tensor.data_generators import algorithmic -from tensor2tensor.data_generators import algorithmic_math -from tensor2tensor.data_generators import audio -from tensor2tensor.data_generators import generator_utils -from tensor2tensor.data_generators import image -from tensor2tensor.data_generators import snli -from tensor2tensor.data_generators import wmt -from tensor2tensor.data_generators import wsj_parsing - -import tensorflow as tf - -flags = tf.flags -FLAGS = flags.FLAGS - -flags.DEFINE_string("data_dir", "", "Data directory.") -flags.DEFINE_string("tmp_dir", "/tmp/t2t_datagen", - "Temporary storage directory.") -flags.DEFINE_string("problem", "", - "The name of the problem to generate data for.") -flags.DEFINE_integer("num_shards", 1, "How many shards to use.") -flags.DEFINE_integer("max_cases", 0, - "Maximum number of cases to generate (unbounded if 0).") -flags.DEFINE_integer("random_seed", 429459, "Random seed to use.") - -# Mapping from problems that we can generate data for to their generators. -# pylint: disable=g-long-lambda -_SUPPORTED_PROBLEM_GENERATORS = { - "algorithmic_identity_binary40": ( - lambda: algorithmic.identity_generator(2, 40, 100000), - lambda: algorithmic.identity_generator(2, 400, 10000)), - "algorithmic_identity_decimal40": ( - lambda: algorithmic.identity_generator(10, 40, 100000), - lambda: algorithmic.identity_generator(10, 400, 10000)), - "algorithmic_shift_decimal40": ( - lambda: algorithmic.shift_generator(20, 10, 40, 100000), - lambda: algorithmic.shift_generator(20, 10, 80, 10000)), - "algorithmic_reverse_binary40": ( - lambda: algorithmic.reverse_generator(2, 40, 100000), - lambda: algorithmic.reverse_generator(2, 400, 10000)), - "algorithmic_reverse_decimal40": ( - lambda: algorithmic.reverse_generator(10, 40, 100000), - lambda: algorithmic.reverse_generator(10, 400, 10000)), - "algorithmic_addition_binary40": ( - lambda: algorithmic.addition_generator(2, 40, 100000), - lambda: algorithmic.addition_generator(2, 400, 10000)), - "algorithmic_addition_decimal40": ( - lambda: algorithmic.addition_generator(10, 40, 100000), - lambda: algorithmic.addition_generator(10, 400, 10000)), - "algorithmic_multiplication_binary40": ( - lambda: algorithmic.multiplication_generator(2, 40, 100000), - lambda: algorithmic.multiplication_generator(2, 400, 10000)), - "algorithmic_multiplication_decimal40": ( - lambda: algorithmic.multiplication_generator(10, 40, 100000), - lambda: algorithmic.multiplication_generator(10, 400, 10000)), - "algorithmic_algebra_inverse": ( - lambda: algorithmic_math.algebra_inverse(26, 0, 2, 100000), - lambda: algorithmic_math.algebra_inverse(26, 3, 3, 10000)), - "algorithmic_algebra_simplify": ( - lambda: algorithmic_math.algebra_simplify(8, 0, 2, 100000), - lambda: algorithmic_math.algebra_simplify(8, 3, 3, 10000)), - "algorithmic_calculus_integrate": ( - lambda: algorithmic_math.calculus_integrate(8, 0, 2, 100000), - lambda: algorithmic_math.calculus_integrate(8, 3, 3, 10000)), - "wmt_parsing_characters": ( - lambda: wmt.parsing_character_generator(FLAGS.tmp_dir, True), - lambda: wmt.parsing_character_generator(FLAGS.tmp_dir, False)), - "wmt_parsing_tokens_8k": ( - lambda: wmt.parsing_token_generator(FLAGS.tmp_dir, True, 2**13), - lambda: wmt.parsing_token_generator(FLAGS.tmp_dir, False, 2**13)), - "wsj_parsing_tokens_16k": ( - lambda: wsj_parsing.parsing_token_generator(FLAGS.tmp_dir, True, - 2**14, 2**9), - lambda: wsj_parsing.parsing_token_generator(FLAGS.tmp_dir, False, - 2**14, 2**9)), - "wsj_parsing_tokens_32k": ( - lambda: wsj_parsing.parsing_token_generator(FLAGS.tmp_dir, True, - 2**15, 2**9), - lambda: wsj_parsing.parsing_token_generator(FLAGS.tmp_dir, False, - 2**15, 2**9)), - "wmt_enfr_characters": ( - lambda: wmt.enfr_character_generator(FLAGS.tmp_dir, True), - lambda: wmt.enfr_character_generator(FLAGS.tmp_dir, False)), - "wmt_enfr_tokens_8k": ( - lambda: wmt.enfr_wordpiece_token_generator(FLAGS.tmp_dir, True, 2**13), - lambda: wmt.enfr_wordpiece_token_generator(FLAGS.tmp_dir, False, 2**13) - ), - "wmt_enfr_tokens_32k": ( - lambda: wmt.enfr_wordpiece_token_generator(FLAGS.tmp_dir, True, 2**15), - lambda: wmt.enfr_wordpiece_token_generator(FLAGS.tmp_dir, False, 2**15) - ), - "wmt_enfr_tokens_128k": ( - lambda: wmt.enfr_wordpiece_token_generator(FLAGS.tmp_dir, True, 2**17), - lambda: wmt.enfr_wordpiece_token_generator(FLAGS.tmp_dir, False, 2**17) - ), - "wmt_ende_characters": ( - lambda: wmt.ende_character_generator(FLAGS.tmp_dir, True), - lambda: wmt.ende_character_generator(FLAGS.tmp_dir, False)), - "wmt_ende_bpe32k": ( - lambda: wmt.ende_bpe_token_generator(FLAGS.tmp_dir, True), - lambda: wmt.ende_bpe_token_generator(FLAGS.tmp_dir, False)), - "wmt_ende_tokens_8k": ( - lambda: wmt.ende_wordpiece_token_generator(FLAGS.tmp_dir, True, 2**13), - lambda: wmt.ende_wordpiece_token_generator(FLAGS.tmp_dir, False, 2**13) - ), - "wmt_ende_tokens_32k": ( - lambda: wmt.ende_wordpiece_token_generator(FLAGS.tmp_dir, True, 2**15), - lambda: wmt.ende_wordpiece_token_generator(FLAGS.tmp_dir, False, 2**15) - ), - "wmt_ende_tokens_128k": ( - lambda: wmt.ende_wordpiece_token_generator(FLAGS.tmp_dir, True, 2**17), - lambda: wmt.ende_wordpiece_token_generator(FLAGS.tmp_dir, False, 2**17) - ), - "image_mnist_tune": ( - lambda: image.mnist_generator(FLAGS.tmp_dir, True, 55000), - lambda: image.mnist_generator(FLAGS.tmp_dir, True, 5000, 55000)), - "image_mnist_test": ( - lambda: image.mnist_generator(FLAGS.tmp_dir, True, 60000), - lambda: image.mnist_generator(FLAGS.tmp_dir, False, 10000)), - "image_cifar10_tune": ( - lambda: image.cifar10_generator(FLAGS.tmp_dir, True, 48000), - lambda: image.cifar10_generator(FLAGS.tmp_dir, True, 2000, 48000)), - "image_cifar10_test": ( - lambda: image.cifar10_generator(FLAGS.tmp_dir, True, 50000), - lambda: image.cifar10_generator(FLAGS.tmp_dir, False, 10000)), - "image_mscoco_characters_tune": ( - lambda: image.mscoco_generator(FLAGS.tmp_dir, True, 70000), - lambda: image.mscoco_generator(FLAGS.tmp_dir, True, 10000, 70000)), - "image_mscoco_characters_test": ( - lambda: image.mscoco_generator(FLAGS.tmp_dir, True, 80000), - lambda: image.mscoco_generator(FLAGS.tmp_dir, False, 40000)), - "image_mscoco_tokens_8k_tune": ( - lambda: image.mscoco_generator( - FLAGS.tmp_dir, - True, - 70000, - vocab_filename="tokens.vocab.%d" % 2**13, - vocab_size=2**13), - lambda: image.mscoco_generator( - FLAGS.tmp_dir, - True, - 10000, - 70000, - vocab_filename="tokens.vocab.%d" % 2**13, - vocab_size=2**13)), - "image_mscoco_tokens_8k_test": ( - lambda: image.mscoco_generator( - FLAGS.tmp_dir, - True, - 80000, - vocab_filename="tokens.vocab.%d" % 2**13, - vocab_size=2**13), - lambda: image.mscoco_generator( - FLAGS.tmp_dir, - False, - 40000, - vocab_filename="tokens.vocab.%d" % 2**13, - vocab_size=2**13)), - "image_mscoco_tokens_32k_tune": ( - lambda: image.mscoco_generator( - FLAGS.tmp_dir, - True, - 70000, - vocab_filename="tokens.vocab.%d" % 2**15, - vocab_size=2**15), - lambda: image.mscoco_generator( - FLAGS.tmp_dir, - True, - 10000, - 70000, - vocab_filename="tokens.vocab.%d" % 2**15, - vocab_size=2**15)), - "image_mscoco_tokens_32k_test": ( - lambda: image.mscoco_generator( - FLAGS.tmp_dir, - True, - 80000, - vocab_filename="tokens.vocab.%d" % 2**15, - vocab_size=2**15), - lambda: image.mscoco_generator( - FLAGS.tmp_dir, - False, - 40000, - vocab_filename="tokens.vocab.%d" % 2**15, - vocab_size=2**15)), - "image_mscoco_tokens_128k_tune": ( - lambda: image.mscoco_generator( - FLAGS.tmp_dir, - True, - 70000, - vocab_filename="tokens.vocab.%d" % 2**17, - vocab_size=2**17), - lambda: image.mscoco_generator( - FLAGS.tmp_dir, - True, - 10000, - 70000, - vocab_filename="tokens.vocab.%d" % 2**17, - vocab_size=2**17)), - "image_mscoco_tokens_128k_test": ( - lambda: image.mscoco_generator( - FLAGS.tmp_dir, - True, - 80000, - vocab_filename="tokens.vocab.%d" % 2**17, - vocab_size=2**17), - lambda: image.mscoco_generator( - FLAGS.tmp_dir, - False, - 40000, - vocab_filename="tokens.vocab.%d" % 2**17, - vocab_size=2**17)), - "snli_32k": ( - lambda: snli.snli_token_generator(FLAGS.tmp_dir, True, 2**15), - lambda: snli.snli_token_generator(FLAGS.tmp_dir, False, 2**15), - ), - "audio_timit_characters_tune": ( - lambda: audio.timit_generator(FLAGS.tmp_dir, True, 1374), - lambda: audio.timit_generator(FLAGS.tmp_dir, True, 344, 1374)), - "audio_timit_characters_test": ( - lambda: audio.timit_generator(FLAGS.tmp_dir, True, 1718), - lambda: audio.timit_generator(FLAGS.tmp_dir, False, 626)), - "audio_timit_tokens_8k_tune": ( - lambda: audio.timit_generator( - FLAGS.tmp_dir, - True, - 1374, - vocab_filename="tokens.vocab.%d" % 2**13, - vocab_size=2**13), - lambda: audio.timit_generator( - FLAGS.tmp_dir, - True, - 344, - 1374, - vocab_filename="tokens.vocab.%d" % 2**13, - vocab_size=2**13)), - "audio_timit_tokens_8k_test": ( - lambda: audio.timit_generator( - FLAGS.tmp_dir, - True, - 1718, - vocab_filename="tokens.vocab.%d" % 2**13, - vocab_size=2**13), - lambda: audio.timit_generator( - FLAGS.tmp_dir, - False, - 626, - vocab_filename="tokens.vocab.%d" % 2**13, - vocab_size=2**13)), - "audio_timit_tokens_32k_tune": ( - lambda: audio.timit_generator( - FLAGS.tmp_dir, - True, - 1374, - vocab_filename="tokens.vocab.%d" % 2**15, - vocab_size=2**15), - lambda: audio.timit_generator( - FLAGS.tmp_dir, - True, - 344, - 1374, - vocab_filename="tokens.vocab.%d" % 2**15, - vocab_size=2**15)), - "audio_timit_tokens_32k_test": ( - lambda: audio.timit_generator( - FLAGS.tmp_dir, - True, - 1718, - vocab_filename="tokens.vocab.%d" % 2**15, - vocab_size=2**15), - lambda: audio.timit_generator( - FLAGS.tmp_dir, - False, - 626, - vocab_filename="tokens.vocab.%d" % 2**15, - vocab_size=2**15)), -} - -# pylint: enable=g-long-lambda - -UNSHUFFLED_SUFFIX = "-unshuffled" - - -def set_random_seed(): - """Set the random seed from flag everywhere.""" - tf.set_random_seed(FLAGS.random_seed) - random.seed(FLAGS.random_seed) - np.random.seed(FLAGS.random_seed) - - -def main(_): - tf.logging.set_verbosity(tf.logging.INFO) - if FLAGS.problem not in _SUPPORTED_PROBLEM_GENERATORS: - problems_str = "\n * ".join(sorted(_SUPPORTED_PROBLEM_GENERATORS)) - error_msg = ("You must specify one of the supported problems to " - "generate data for:\n * " + problems_str + "\n") - raise ValueError(error_msg) - - if not FLAGS.data_dir: - FLAGS.data_dir = tempfile.gettempdir() - tf.logging.warning("It is strongly recommended to specify --data_dir. " - "Data will be written to default data_dir=%s.", - FLAGS.data_dir) - - set_random_seed() - - training_gen, dev_gen = _SUPPORTED_PROBLEM_GENERATORS[FLAGS.problem] - - tf.logging.info("Generating training data for %s.", FLAGS.problem) - train_output_files = generator_utils.generate_files( - training_gen(), FLAGS.problem + UNSHUFFLED_SUFFIX + "-train", - FLAGS.data_dir, FLAGS.num_shards, FLAGS.max_cases) - - tf.logging.info("Generating development data for %s.", FLAGS.problem) - dev_output_files = generator_utils.generate_files( - dev_gen(), FLAGS.problem + UNSHUFFLED_SUFFIX + "-dev", FLAGS.data_dir, 1) - - tf.logging.info("Shuffling data...") - for fname in train_output_files + dev_output_files: - records = generator_utils.read_records(fname) - random.shuffle(records) - out_fname = fname.replace(UNSHUFFLED_SUFFIX, "") - generator_utils.write_records(records, out_fname) - tf.gfile.Remove(fname) +def main(argv): + t2t_datagen.main(argv) if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) tf.app.run() diff --git a/tensor2tensor/bin/t2t-decoder b/tensor2tensor/bin/t2t-decoder new file mode 100755 index 000000000..9bcca1e9b --- /dev/null +++ b/tensor2tensor/bin/t2t-decoder @@ -0,0 +1,17 @@ +#!/usr/bin/env python +"""t2t-decoder.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.bin import t2t_decoder + +import tensorflow as tf + +def main(argv): + t2t_decoder.main(argv) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t-eval b/tensor2tensor/bin/t2t-eval new file mode 100755 index 000000000..fc409359f --- /dev/null +++ b/tensor2tensor/bin/t2t-eval @@ -0,0 +1,28 @@ +#!/usr/bin/env python +"""Run t2t-eval from a trained checkpoint. + +This script is used to run evaluation from a trained checkpoint. Example +to run evaluation on the test set when trained checkpoint is in /output_dir. + +t2t-eval \ + --problem=image_mnist \ + --model=imagetransformer \ + --data_dir=~/t2t + --output_dir=/output_dir \ + --eval_use_test_set=True \ +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.bin import t2t_eval + +import tensorflow as tf + +def main(argv): + t2t_eval.main(argv) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t-exporter b/tensor2tensor/bin/t2t-exporter new file mode 100755 index 000000000..3166b0ee3 --- /dev/null +++ b/tensor2tensor/bin/t2t-exporter @@ -0,0 +1,17 @@ +#!/usr/bin/env python +"""t2t-exporter.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.serving import export + +import tensorflow as tf + +def main(argv): + export.main(argv) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t-insights-server b/tensor2tensor/bin/t2t-insights-server new file mode 100755 index 000000000..e757b783c --- /dev/null +++ b/tensor2tensor/bin/t2t-insights-server @@ -0,0 +1,17 @@ +#!/usr/bin/env python +"""t2t-insights-server.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.insights import server + +import tensorflow as tf + +def main(argv): + server.main(argv) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t-make-tf-configs b/tensor2tensor/bin/t2t-make-tf-configs new file mode 100755 index 000000000..7142e9673 --- /dev/null +++ b/tensor2tensor/bin/t2t-make-tf-configs @@ -0,0 +1,17 @@ +#!/usr/bin/env python +"""t2t-make-tf-configs.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.bin import make_tf_configs + +import tensorflow as tf + +def main(argv): + make_tf_configs.main(argv) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t-query-server b/tensor2tensor/bin/t2t-query-server new file mode 100755 index 000000000..a354819db --- /dev/null +++ b/tensor2tensor/bin/t2t-query-server @@ -0,0 +1,17 @@ +#!/usr/bin/env python +"""t2t-query-server.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.serving import query + +import tensorflow as tf + +def main(argv): + query.main(argv) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t-trainer b/tensor2tensor/bin/t2t-trainer old mode 100644 new mode 100755 index 9fe799e1f..850990bb5 --- a/tensor2tensor/bin/t2t-trainer +++ b/tensor2tensor/bin/t2t-trainer @@ -1,55 +1,33 @@ #!/usr/bin/env python -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -r"""Trainer for T2T models. - -This binary perform training, evaluation, and inference using -the Estimator API with tf.learn Experiment objects. - -To train your model, for example: - t2t-trainer \ - --data_dir ~/data \ - --problems=algorithmic_identity_binary40 \ - --model=transformer - --hparams_set=transformer_base +"""Trainer for Tensor2Tensor. + +This script is used to train your models in Tensor2Tensor. + +For example, to train a shake-shake model on MNIST run this: + +t2t-trainer \ + --generate_data \ + --problem=image_mnist \ + --data_dir=~/t2t_data \ + --tmp_dir=~/t2t_data/tmp + --model=shake_shake \ + --hparams_set=shake_shake_quick \ + --output_dir=~/t2t_train/mnist1 \ + --train_steps=1000 \ + --eval_steps=100 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function -# Dependency imports +from tensor2tensor.bin import t2t_trainer -from tensor2tensor.utils import trainer_utils as utils +import tensorflow.compat.v1 as tf -import tensorflow as tf - -FLAGS = tf.flags.FLAGS - - -def main(_): - tf.logging.set_verbosity(tf.logging.INFO) - utils.log_registry() - utils.validate_flags() - utils.run( - data_dir=FLAGS.data_dir, - model=FLAGS.model, - output_dir=FLAGS.output_dir, - train_steps=FLAGS.train_steps, - eval_steps=FLAGS.eval_steps, - schedule=FLAGS.schedule) +def main(argv): + t2t_trainer.main(argv) if __name__ == "__main__": - tf.app.run() + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run(main) diff --git a/tensor2tensor/bin/t2t-translate-all b/tensor2tensor/bin/t2t-translate-all new file mode 100755 index 000000000..9e8ee219b --- /dev/null +++ b/tensor2tensor/bin/t2t-translate-all @@ -0,0 +1,18 @@ +#!/usr/bin/env python +"""t2t-translate-all.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.bin import t2t_translate_all + +import tensorflow as tf + +def main(argv): + t2t_translate_all.main(argv) + + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t_attack.py b/tensor2tensor/bin/t2t_attack.py new file mode 100644 index 000000000..336419cfd --- /dev/null +++ b/tensor2tensor/bin/t2t_attack.py @@ -0,0 +1,281 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Adversarially attack a model. + +This script adversarially attacks a model and evaluates accuracy at various + epsilons. + +Params such as which epsilons to evaluate at and the attack algorithm are + specified by attack_params, see models/resnet.py for examples. + +--ignore_incorrect will only attack those examples that are already correctly + classified by the model. + +--surrogate_attack will attack a model (A) and evaluate adversarial examples for + A on a different model (B). + +Example run: +- train a resnet on cifar10: + bin/t2t_trainer.py --problem=image_cifar10 --hparams_set=resnet_cifar_32 \ + --model=resnet + +- evaluate robustness using the FGSM attack: + bin/t2t_attack.py --attack_params_set=resnet_fgsm --problem=image_cifar10\ + --hparams_set=resnet_cifar_32 --model=resnet +""" + +import os + +from tensor2tensor.bin import t2t_trainer +from tensor2tensor.data_generators import problem as problem_lib # pylint: disable=unused-import +from tensor2tensor.utils import adv_attack_utils +from tensor2tensor.utils import cloud_mlengine +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model +from tensor2tensor.utils import trainer_lib +from tensor2tensor.utils import usr_dir + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +flags = tf.flags +FLAGS = flags.FLAGS + +# See flags.py for additional command-line flags. +flags.DEFINE_string("attack_params_set", None, + "Which attack parameters to use.") +flags.DEFINE_boolean("surrogate_attack", False, + "Perform an attack on a surrogate model.") +flags.DEFINE_string("surrogate_model", None, "Surrogate model to attack.") +flags.DEFINE_string("surrogate_hparams_set", None, + "Surrogate model's hyperparameter set.") +flags.DEFINE_string("surrogate_output_dir", None, + "Directory storing surrogate model's weights.") +flags.DEFINE_boolean( + "ignore_incorrect", False, "Ignore examples that are " + "incorrectly classified to begin with.") + + +def create_attack_params(): + return registry.attack_params(FLAGS.attack_params_set) + + +def create_attack(attack): + return registry.attack(attack) + + +def create_surrogate_hparams(): + return trainer_lib.create_hparams(FLAGS.surrogate_hparams_set, None) + + +def create_surrogate_run_config(hp): + """Create a run config. + + Args: + hp: model hyperparameters + Returns: + a run config + """ + save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency) + save_ckpt_secs = FLAGS.save_checkpoints_secs or None + if save_ckpt_secs: + save_ckpt_steps = None + assert FLAGS.surrogate_output_dir + # the various custom getters we have written do not play well together yet. + # TODO(noam): ask rsepassi for help here. + daisy_chain_variables = ( + hp.daisy_chain_variables and hp.activation_dtype == "float32" and + hp.weight_dtype == "float32") + return trainer_lib.create_run_config( + model_name=FLAGS.model, + model_dir=os.path.expanduser(FLAGS.surrogate_output_dir), + master=FLAGS.master, + iterations_per_loop=FLAGS.iterations_per_loop, + num_shards=FLAGS.tpu_num_shards, + log_device_placement=FLAGS.log_device_placement, + save_checkpoints_steps=save_ckpt_steps, + save_checkpoints_secs=save_ckpt_secs, + keep_checkpoint_max=FLAGS.keep_checkpoint_max, + keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours, + num_gpus=FLAGS.worker_gpu, + gpu_order=FLAGS.gpu_order, + num_async_replicas=FLAGS.worker_replicas, + gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction, + enable_graph_rewriter=FLAGS.enable_graph_rewriter, + use_tpu=FLAGS.use_tpu, + schedule=FLAGS.schedule, + no_data_parallelism=hp.no_data_parallelism, + daisy_chain_variables=daisy_chain_variables, + ps_replicas=FLAGS.ps_replicas, + ps_job=FLAGS.ps_job, + ps_gpu=FLAGS.ps_gpu, + sync=FLAGS.sync, + worker_id=FLAGS.worker_id, + worker_job=FLAGS.worker_job, + random_seed=FLAGS.random_seed, + tpu_infeed_sleep_secs=FLAGS.tpu_infeed_sleep_secs, + inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads, + log_step_count_steps=FLAGS.log_step_count_steps, + intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads) + + +def prepare_data(problem, hparams, params, config): + """Construct input pipeline.""" + input_fn = problem.make_estimator_input_fn( + tf_estimator.ModeKeys.EVAL, hparams, force_repeat=True) + dataset = input_fn(params, config) + features, _ = dataset.make_one_shot_iterator().get_next() + inputs, labels = features["targets"], features["inputs"] + inputs = tf.to_float(inputs) + input_shape = inputs.shape.as_list() + inputs = tf.reshape(inputs, [hparams.batch_size] + input_shape[1:]) + labels = tf.reshape(labels, [hparams.batch_size]) + return inputs, labels, features + + +def main(argv): + tf.logging.set_verbosity(tf.logging.INFO) + trainer_lib.set_random_seed(FLAGS.random_seed) + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + t2t_trainer.maybe_log_registry_and_exit() + + + if FLAGS.cloud_mlengine: + cloud_mlengine.launch() + return + + if FLAGS.generate_data: + t2t_trainer.generate_data() + + if cloud_mlengine.job_dir(): + FLAGS.output_dir = cloud_mlengine.job_dir() + + if argv: + t2t_trainer.set_hparams_from_args(argv[1:]) + + if FLAGS.surrogate_attack: + tf.logging.warn("Performing surrogate model attack.") + sur_hparams = create_surrogate_hparams() + trainer_lib.add_problem_hparams(sur_hparams, FLAGS.problem) + + hparams = t2t_trainer.create_hparams() + trainer_lib.add_problem_hparams(hparams, FLAGS.problem) + + attack_params = create_attack_params() + attack_params.add_hparam(attack_params.epsilon_name, 0.0) + + if FLAGS.surrogate_attack: + sur_config = create_surrogate_run_config(sur_hparams) + config = t2t_trainer.create_run_config(hparams) + params = { + "batch_size": hparams.batch_size, + "use_tpu": FLAGS.use_tpu, + } + + # add "_rev" as a hack to avoid image standardization + problem = registry.problem(FLAGS.problem + "_rev") + + inputs, labels, features = prepare_data(problem, hparams, params, config) + + sess = tf.Session() + + if FLAGS.surrogate_attack: + sur_model_fn = t2t_model.T2TModel.make_estimator_model_fn( + FLAGS.surrogate_model, sur_hparams, use_tpu=FLAGS.use_tpu) + sur_ch_model = adv_attack_utils.T2TAttackModel( + sur_model_fn, features, params, sur_config, scope="surrogate") + # Dummy call to construct graph + sur_ch_model.get_probs(inputs) + + checkpoint_path = os.path.expanduser(FLAGS.surrogate_output_dir) + tf.train.init_from_checkpoint( + tf.train.latest_checkpoint(checkpoint_path), {"/": "surrogate/"}) + sess.run(tf.global_variables_initializer()) + + other_vars = set(tf.global_variables()) + + model_fn = t2t_model.T2TModel.make_estimator_model_fn( + FLAGS.model, hparams) + ch_model = adv_attack_utils.T2TAttackModel(model_fn, features, params, config) + + acc_mask = None + probs = ch_model.get_probs(inputs) + if FLAGS.ignore_incorrect: + preds = tf.argmax(probs, -1, output_type=labels.dtype) + preds = tf.reshape(preds, labels.shape) + acc_mask = tf.to_float(tf.equal(labels, preds)) + one_hot_labels = tf.one_hot(labels, probs.shape[-1]) + + if FLAGS.surrogate_attack: + attack = create_attack(attack_params.attack)(sur_ch_model, sess=sess) + else: + attack = create_attack(attack_params.attack)(ch_model, sess=sess) + + new_vars = set(tf.global_variables()) - other_vars + + # Restore weights + saver = tf.train.Saver(new_vars) + checkpoint_path = os.path.expanduser(FLAGS.output_dir) + saver.restore(sess, tf.train.latest_checkpoint(checkpoint_path)) + + # reuse variables + tf.get_variable_scope().reuse_variables() + + def compute_accuracy(x, l, mask): + """Compute model accuracy.""" + preds = ch_model.get_probs(x) + preds = tf.squeeze(preds) + preds = tf.argmax(preds, -1, output_type=l.dtype) + + _, acc_update_op = tf.metrics.accuracy(l, preds, weights=mask) + + if FLAGS.surrogate_attack: + preds = sur_ch_model.get_probs(x) + preds = tf.squeeze(preds) + preds = tf.argmax(preds, -1, output_type=l.dtype) + acc_update_op = tf.tuple((acc_update_op, + tf.metrics.accuracy(l, preds, weights=mask)[1])) + + sess.run(tf.initialize_local_variables()) + for i in range(FLAGS.eval_steps): + tf.logging.info( + "\tEvaluating batch [%d / %d]" % (i + 1, FLAGS.eval_steps)) + acc = sess.run(acc_update_op) + if FLAGS.surrogate_attack: + tf.logging.info("\tFinal acc: (%.4f, %.4f)" % (acc[0], acc[1])) + else: + tf.logging.info("\tFinal acc: %.4f" % acc) + return acc + + epsilon_acc_pairs = [] + for epsilon in attack_params.attack_epsilons: + tf.logging.info("Attacking @ eps=%.4f" % epsilon) + attack_params.set_hparam(attack_params.epsilon_name, epsilon) + adv_x = attack.generate(inputs, y=one_hot_labels, **attack_params.values()) + acc = compute_accuracy(adv_x, labels, acc_mask) + epsilon_acc_pairs.append((epsilon, acc)) + + for epsilon, acc in epsilon_acc_pairs: + if FLAGS.surrogate_attack: + tf.logging.info( + "Accuracy @ eps=%.4f: (%.4f, %.4f)" % (epsilon, acc[0], acc[1])) + else: + tf.logging.info("Accuracy @ eps=%.4f: %.4f" % (epsilon, acc)) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t_avg_all.py b/tensor2tensor/bin/t2t_avg_all.py new file mode 100644 index 000000000..6589e84c3 --- /dev/null +++ b/tensor2tensor/bin/t2t_avg_all.py @@ -0,0 +1,115 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Script to continuously average last N checkpoints in a given directory.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import deque +import os +import shutil +import numpy as np +import six +from six.moves import zip # pylint: disable=redefined-builtin +from tensor2tensor.utils import bleu_hook +import tensorflow.compat.v1 as tf + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string("model_dir", "", + "Directory to load model checkpoints from.") +flags.DEFINE_string("output_dir", "avg/", + "Directory to output the averaged checkpoints to.") +flags.DEFINE_integer("n", 8, "How many checkpoints should be averaged?") +flags.DEFINE_integer("min_steps", 0, "Ignore checkpoints with less steps.") +flags.DEFINE_integer("wait_minutes", 0, + "Wait upto N minutes for a new checkpoint.") + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + + model_dir = os.path.expanduser(FLAGS.model_dir) + output_dir = os.path.expanduser(FLAGS.output_dir) + out_base_file = os.path.join(output_dir, "model.ckpt") + + # Copy flags.txt with the original time, so t2t-bleu can report correct + # relative time. + tf.gfile.MakeDirs(FLAGS.output_dir) + if (not os.path.exists(os.path.join(output_dir, "flags.txt")) and + os.path.exists(os.path.join(model_dir, "flags.txt"))): + shutil.copy2(os.path.join(model_dir, "flags.txt"), + os.path.join(output_dir, "flags.txt")) + + models_processed = 0 + queue = deque() + for model in bleu_hook.stepfiles_iterator(model_dir, FLAGS.wait_minutes, + FLAGS.min_steps): + if models_processed == 0: + var_list = tf.train.list_variables(model.filename) + avg_values = {} + for (name, shape) in var_list: + if not (name.startswith("global_step") or + name.startswith("train_stats/")): + avg_values[name] = np.zeros(shape) + models_processed += 1 + + tf.logging.info("Loading [%d]: %s" % (models_processed, model.filename)) + reader = tf.train.load_checkpoint(model.filename) + for name in avg_values: + avg_values[name] += reader.get_tensor(name) / FLAGS.n + queue.append(model) + if len(queue) < FLAGS.n: + continue + + out_file = "%s-%d" % (out_base_file, model.steps) + tf_vars = [] + tf.logging.info("Averaging %s" % (out_file)) + for (name, value) in six.iteritems(avg_values): + # TODO(martinpopel): dtype=var_dtypes[name] + tf_vars.append(tf.get_variable(name, shape=value.shape)) + placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars] + assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)] + + global_step = tf.get_variable( + "global_step", + initializer=tf.constant(model.steps, dtype=tf.int64), + trainable=False) + with tf.variable_scope("train_stats"): + tf.get_variable("problem_0_steps", initializer=0, trainable=False) + saver = tf.train.Saver(tf.global_variables()) + + tf.logging.info("Running session for %s" % (out_file)) + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + for p, assign_op, (name, value) in zip( + placeholders, assign_ops, six.iteritems(avg_values)): + sess.run(assign_op, {p: value}) + tf.logging.info("Storing to %s" % out_file) + saver.save(sess, out_base_file, global_step=global_step) + os.utime(out_file + ".index", (model.mtime, model.mtime)) + + tf.reset_default_graph() + first_model = queue.popleft() + + reader = tf.train.load_checkpoint(first_model.filename) + for name in avg_values: + avg_values[name] -= reader.get_tensor(name) / FLAGS.n + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t_bleu.py b/tensor2tensor/bin/t2t_bleu.py new file mode 100644 index 000000000..40d691ad6 --- /dev/null +++ b/tensor2tensor/bin/t2t_bleu.py @@ -0,0 +1,175 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Evaluate BLEU score for all checkpoints/translations in a given directory. + +This script can be used in two ways. + + +To evaluate one already translated file: + +``` +t2t-bleu --translation=my-wmt13.de --reference=wmt13_deen.de +``` + +To evaluate all translations in a given directory (translated by +`t2t-translate-all`): + +``` +t2t-bleu + --translations_dir=my-translations + --reference=wmt13_deen.de + --event_dir=events +``` + +In addition to the above-mentioned required parameters, +there are optional parameters: + * bleu_variant: cased (case-sensitive), uncased, both (default). + * tag_suffix: Default="", so the tags will be BLEU_cased and BLEU_uncased. + tag_suffix can be used e.g. for different beam sizes if these should be + plotted in different graphs. + * min_steps: Don't evaluate checkpoints with less steps. + Default=-1 means check the `last_evaluated_step.txt` file, which contains + the number of steps of the last successfully evaluated checkpoint. + * report_zero: Store BLEU=0 and guess its time based on the oldest file in the + translations_dir. Default=True. This is useful, so TensorBoard reports + correct relative time for the remaining checkpoints. This flag is set to + False if min_steps is > 0. + * wait_minutes: Wait upto N minutes for a new translated file. Default=0. + This is useful for continuous evaluation of a running training, in which case + this should be equal to save_checkpoints_secs/60 plus time needed for + translation plus some reserve. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import time +from tensor2tensor.utils import bleu_hook +import tensorflow.compat.v1 as tf + + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string("source", None, + "Path to the source-language file to be translated") +flags.DEFINE_string("reference", None, "Path to the reference translation file") +flags.DEFINE_string("translation", None, + "Path to the MT system translation file") +flags.DEFINE_string("translations_dir", None, + "Directory with translated files to be evaluated.") +flags.DEFINE_string("event_dir", None, "Where to store the event file.") + +flags.DEFINE_string("bleu_variant", "both", + "Possible values: cased(case-sensitive), uncased, " + "both(default).") +flags.DEFINE_string("tag_suffix", "", + "What to add to BLEU_cased and BLEU_uncased tags.") +flags.DEFINE_integer("min_steps", -1, + "Don't evaluate checkpoints with less steps.") +flags.DEFINE_integer("wait_minutes", 0, + "Wait upto N minutes for a new checkpoint, cf. " + "save_checkpoints_secs.") +flags.DEFINE_bool("report_zero", None, + "Store BLEU=0 and guess its time based on the oldest file.") + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + if FLAGS.translation: + if FLAGS.translations_dir: + raise ValueError( + "Cannot specify both --translation and --translations_dir.") + if FLAGS.bleu_variant in ("uncased", "both"): + bleu = 100 * bleu_hook.bleu_wrapper(FLAGS.reference, FLAGS.translation, + case_sensitive=False) + print("BLEU_uncased = %6.2f" % bleu) + if FLAGS.bleu_variant in ("cased", "both"): + bleu = 100 * bleu_hook.bleu_wrapper(FLAGS.reference, FLAGS.translation, + case_sensitive=True) + print("BLEU_cased = %6.2f" % bleu) + return + + if not FLAGS.translations_dir: + raise ValueError( + "Either --translation or --translations_dir must be specified.") + transl_dir = os.path.expanduser(FLAGS.translations_dir) + if not os.path.exists(transl_dir): + exit_time = time.time() + FLAGS.wait_minutes * 60 + tf.logging.info("Translation dir %s does not exist, waiting till %s.", + transl_dir, time.asctime(time.localtime(exit_time))) + while not os.path.exists(transl_dir): + time.sleep(10) + if time.time() > exit_time: + raise ValueError("Translation dir %s does not exist" % transl_dir) + + last_step_file = os.path.join(FLAGS.event_dir, "last_evaluated_step.txt") + if FLAGS.min_steps == -1: + if tf.gfile.Exists(last_step_file): + with open(last_step_file) as ls_file: + FLAGS.min_steps = int(ls_file.read()) + else: + FLAGS.min_steps = 0 + if FLAGS.report_zero is None: + FLAGS.report_zero = FLAGS.min_steps == 0 + + writer = tf.summary.FileWriter(FLAGS.event_dir) + for transl_file in bleu_hook.stepfiles_iterator( + transl_dir, FLAGS.wait_minutes, FLAGS.min_steps, path_suffix=""): + # report_zero handling must be inside the for-loop, + # so we are sure the transl_dir is already created. + if FLAGS.report_zero: + all_files = (os.path.join(transl_dir, f) for f in os.listdir(transl_dir)) + start_time = min( + os.path.getmtime(f) for f in all_files if os.path.isfile(f)) + values = [] + if FLAGS.bleu_variant in ("uncased", "both"): + values.append(tf.Summary.Value( + tag="BLEU_uncased" + FLAGS.tag_suffix, simple_value=0)) + if FLAGS.bleu_variant in ("cased", "both"): + values.append(tf.Summary.Value( + tag="BLEU_cased" + FLAGS.tag_suffix, simple_value=0)) + writer.add_event(tf.summary.Event(summary=tf.Summary(value=values), + wall_time=start_time, step=0)) + FLAGS.report_zero = False + + filename = transl_file.filename + tf.logging.info("Evaluating " + filename) + values = [] + if FLAGS.bleu_variant in ("uncased", "both"): + bleu = 100 * bleu_hook.bleu_wrapper(FLAGS.reference, filename, + case_sensitive=False) + values.append(tf.Summary.Value(tag="BLEU_uncased" + FLAGS.tag_suffix, + simple_value=bleu)) + tf.logging.info("%s: BLEU_uncased = %6.2f" % (filename, bleu)) + if FLAGS.bleu_variant in ("cased", "both"): + bleu = 100 * bleu_hook.bleu_wrapper(FLAGS.reference, filename, + case_sensitive=True) + values.append(tf.Summary.Value(tag="BLEU_cased" + FLAGS.tag_suffix, + simple_value=bleu)) + tf.logging.info("%s: BLEU_cased = %6.2f" % (transl_file.filename, bleu)) + writer.add_event(tf.summary.Event( + summary=tf.Summary(value=values), + wall_time=transl_file.mtime, step=transl_file.steps)) + writer.flush() + with open(last_step_file, "w") as ls_file: + ls_file.write(str(transl_file.steps) + "\n") + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t_datagen.py b/tensor2tensor/bin/t2t_datagen.py new file mode 100644 index 000000000..91c11cd8e --- /dev/null +++ b/tensor2tensor/bin/t2t_datagen.py @@ -0,0 +1,306 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Produces the training and dev data for --problem into --data_dir. + +Produces sharded and shuffled TFRecord files of tensorflow.Example protocol +buffers for a variety of registered datasets. + +All Problems are registered with @registry.register_problem or are in +_SUPPORTED_PROBLEM_GENERATORS in this file. Each entry maps a string name +(selectable on the command-line with --problem) to a function that takes 2 +arguments - input_directory and mode (one of "train" or "dev") - and yields for +each training example a dictionary mapping string feature names to lists of +{string, int, float}. The generator will be run once for each mode. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import multiprocessing +import os +import random +import tempfile + +import numpy as np + +from tensor2tensor import problems as problems_lib # pylint: disable=unused-import +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.envs import env_problem_utils +from tensor2tensor.utils import registry +from tensor2tensor.utils import usr_dir + +try: + # pylint: disable=g-import-not-at-top + from tensor2tensor.data_generators import algorithmic_math + from tensor2tensor.data_generators import audio + from tensor2tensor.data_generators import snli + from tensor2tensor.data_generators import wsj_parsing + # pylint: enable=g-import-not-at-top +except ImportError: + pass + +# Improrting here to prevent pylint from ungrouped-imports warning. +import tensorflow.compat.v1 as tf # pylint: disable=g-import-not-at-top + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string("data_dir", "", "Data directory.") +flags.DEFINE_string("tmp_dir", "/tmp/t2t_datagen", + "Temporary storage directory.") +flags.DEFINE_string("problem", "", + "The name of the problem to generate data for.") +flags.DEFINE_string("exclude_problems", "", + "Comma-separates list of problems to exclude.") +flags.DEFINE_integer( + "num_shards", 0, "How many shards to use. Ignored for " + "registered Problems.") +flags.DEFINE_integer("max_cases", 0, + "Maximum number of cases to generate (unbounded if 0).") +flags.DEFINE_integer( + "env_problem_max_env_steps", 0, + "Maximum number of steps to take for environment-based problems. " + "Actions are chosen randomly") +flags.DEFINE_integer( + "env_problem_batch_size", 0, + "Number of environments to simulate for environment-based problems.") +flags.DEFINE_bool("only_list", False, + "If true, we only list the problems that will be generated.") +flags.DEFINE_integer("random_seed", 429459, "Random seed to use.") +flags.DEFINE_integer("task_id", -1, "For distributed data generation.") +flags.DEFINE_integer("task_id_start", -1, "For distributed data generation.") +flags.DEFINE_integer("task_id_end", -1, "For distributed data generation.") +flags.DEFINE_integer( + "num_concurrent_processes", None, + "Applies only to problems for which multiprocess_generate=True.") +flags.DEFINE_string( + "t2t_usr_dir", "", "Path to a Python module that will be imported. The " + "__init__.py file should include the necessary imports. " + "The imported files should contain registrations, " + "e.g. @registry.register_problem calls, that will then be " + "available to t2t-datagen.") + +# Mapping from problems that we can generate data for to their generators. +# pylint: disable=g-long-lambda +_SUPPORTED_PROBLEM_GENERATORS = { + "algorithmic_algebra_inverse": + (lambda: algorithmic_math.algebra_inverse(26, 0, 2, 100000), + lambda: algorithmic_math.algebra_inverse(26, 3, 3, 10000), + lambda: None), # test set + "parsing_english_ptb8k": + (lambda: wsj_parsing.parsing_token_generator( + FLAGS.data_dir, FLAGS.tmp_dir, True, 2**13, 2**9), + lambda: wsj_parsing.parsing_token_generator( + FLAGS.data_dir, FLAGS.tmp_dir, False, 2**13, 2**9), + lambda: None), # test set + "parsing_english_ptb16k": + (lambda: wsj_parsing.parsing_token_generator( + FLAGS.data_dir, FLAGS.tmp_dir, True, 2**14, 2**9), + lambda: wsj_parsing.parsing_token_generator( + FLAGS.data_dir, FLAGS.tmp_dir, False, 2**14, 2**9), + lambda: None), # test set + "inference_snli32k": + (lambda: snli.snli_token_generator(FLAGS.tmp_dir, True, 2**15), + lambda: snli.snli_token_generator(FLAGS.tmp_dir, False, 2**15), + lambda: None), # test set + "audio_timit_characters_test": (lambda: audio.timit_generator( + FLAGS.data_dir, FLAGS.tmp_dir, True, 1718 + ), lambda: audio.timit_generator(FLAGS.data_dir, FLAGS.tmp_dir, False, 626), + lambda: None), # test set + "audio_timit_tokens_8k_test": (lambda: audio.timit_generator( + FLAGS.data_dir, + FLAGS.tmp_dir, + True, + 1718, + vocab_filename="vocab.endefr.%d" % 2**13, + vocab_size=2**13), lambda: audio.timit_generator( + FLAGS.data_dir, + FLAGS.tmp_dir, + False, + 626, + vocab_filename="vocab.endefr.%d" % 2**13, + vocab_size=2**13), lambda: None), # test set + "audio_timit_tokens_32k_test": (lambda: audio.timit_generator( + FLAGS.data_dir, + FLAGS.tmp_dir, + True, + 1718, + vocab_filename="vocab.endefr.%d" % 2**15, + vocab_size=2**15), lambda: audio.timit_generator( + FLAGS.data_dir, + FLAGS.tmp_dir, + False, + 626, + vocab_filename="vocab.endefr.%d" % 2**15, + vocab_size=2**15), lambda: None), # test set +} + +# pylint: enable=g-long-lambda + + +def set_random_seed(): + """Set the random seed from flag everywhere.""" + tf.set_random_seed(FLAGS.random_seed) + random.seed(FLAGS.random_seed) + np.random.seed(FLAGS.random_seed) + + +def main(_): + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + + # Calculate the list of problems to generate. + problems = sorted( + list(_SUPPORTED_PROBLEM_GENERATORS) + registry.list_base_problems() + + registry.list_env_problems()) + for exclude in FLAGS.exclude_problems.split(","): + if exclude: + problems = [p for p in problems if exclude not in p] + if FLAGS.problem and FLAGS.problem[-1] == "*": + problems = [p for p in problems if p.startswith(FLAGS.problem[:-1])] + elif FLAGS.problem and "," in FLAGS.problem: + problems = [p for p in problems if p in FLAGS.problem.split(",")] + elif FLAGS.problem: + problems = [p for p in problems if p == FLAGS.problem] + else: + problems = [] + + # Remove TIMIT if paths are not given. + if getattr(FLAGS, "timit_paths", None): + problems = [p for p in problems if "timit" not in p] + # Remove parsing if paths are not given. + if getattr(FLAGS, "parsing_path", None): + problems = [p for p in problems if "parsing_english_ptb" not in p] + + if not problems: + problems_str = "\n * ".join( + sorted( + list(_SUPPORTED_PROBLEM_GENERATORS) + + registry.list_base_problems() + registry.list_env_problems())) + error_msg = ("You must specify one of the supported problems to " + "generate data for:\n * " + problems_str + "\n") + error_msg += ("TIMIT and parsing need data_sets specified with " + "--timit_paths and --parsing_path.") + raise ValueError(error_msg) + + if not FLAGS.data_dir: + FLAGS.data_dir = tempfile.gettempdir() + tf.logging.warning( + "It is strongly recommended to specify --data_dir. " + "Data will be written to default data_dir=%s.", FLAGS.data_dir) + FLAGS.data_dir = os.path.expanduser(FLAGS.data_dir) + tf.gfile.MakeDirs(FLAGS.data_dir) + + tf.logging.info("Generating problems:\n%s" % + registry.display_list_by_prefix(problems, starting_spaces=4)) + if FLAGS.only_list: + return + for problem in problems: + set_random_seed() + + if problem in _SUPPORTED_PROBLEM_GENERATORS: + generate_data_for_problem(problem) + elif problem in registry.list_base_problems(): + generate_data_for_registered_problem(problem) + elif problem in registry.list_env_problems(): + generate_data_for_env_problem(problem) + else: + tf.logging.error("Problem %s is not a supported problem for datagen.", + problem) + + +def generate_data_for_problem(problem): + """Generate data for a problem in _SUPPORTED_PROBLEM_GENERATORS.""" + training_gen, dev_gen, test_gen = _SUPPORTED_PROBLEM_GENERATORS[problem] + + num_train_shards = FLAGS.num_shards or 10 + tf.logging.info("Generating training data for %s.", problem) + train_output_files = generator_utils.train_data_filenames( + problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, + num_train_shards) + generator_utils.generate_files(training_gen(), train_output_files, + FLAGS.max_cases) + num_dev_shards = int(num_train_shards * 0.1) + tf.logging.info("Generating development data for %s.", problem) + dev_output_files = generator_utils.dev_data_filenames( + problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, + num_dev_shards) + generator_utils.generate_files(dev_gen(), dev_output_files) + num_test_shards = int(num_train_shards * 0.1) + test_output_files = [] + test_gen_data = test_gen() + if test_gen_data is not None: + tf.logging.info("Generating test data for %s.", problem) + test_output_files = generator_utils.test_data_filenames( + problem + generator_utils.UNSHUFFLED_SUFFIX, FLAGS.data_dir, + num_test_shards) + generator_utils.generate_files(test_gen_data, test_output_files) + all_output_files = train_output_files + dev_output_files + test_output_files + generator_utils.shuffle_dataset(all_output_files) + + +def generate_data_in_process(arg): + problem_name, data_dir, tmp_dir, task_id = arg + problem = registry.problem(problem_name) + problem.generate_data(data_dir, tmp_dir, task_id) + + +def generate_data_for_env_problem(problem_name): + """Generate data for `EnvProblem`s.""" + assert FLAGS.env_problem_max_env_steps > 0, ("--env_problem_max_env_steps " + "should be greater than zero") + assert FLAGS.env_problem_batch_size > 0, ("--env_problem_batch_size should be" + " greather than zero") + problem = registry.env_problem(problem_name) + task_id = None if FLAGS.task_id < 0 else FLAGS.task_id + data_dir = os.path.expanduser(FLAGS.data_dir) + tmp_dir = os.path.expanduser(FLAGS.tmp_dir) + # TODO(msaffar): Handle large values for env_problem_batch_size where we + # cannot create that many environments within the same process. + problem.initialize(batch_size=FLAGS.env_problem_batch_size) + env_problem_utils.play_env_problem_randomly( + problem, num_steps=FLAGS.env_problem_max_env_steps) + problem.generate_data(data_dir=data_dir, tmp_dir=tmp_dir, task_id=task_id) + + +def generate_data_for_registered_problem(problem_name): + """Generate data for a registered problem.""" + tf.logging.info("Generating data for %s.", problem_name) + if FLAGS.num_shards: + raise ValueError("--num_shards should not be set for registered Problem.") + problem = registry.problem(problem_name) + task_id = None if FLAGS.task_id < 0 else FLAGS.task_id + data_dir = os.path.expanduser(FLAGS.data_dir) + tmp_dir = os.path.expanduser(FLAGS.tmp_dir) + if task_id is None and problem.multiprocess_generate: + if FLAGS.task_id_start != -1: + assert FLAGS.task_id_end != -1 + task_id_start = FLAGS.task_id_start + task_id_end = FLAGS.task_id_end + else: + task_id_start = 0 + task_id_end = problem.num_generate_tasks + pool = multiprocessing.Pool(processes=FLAGS.num_concurrent_processes) + problem.prepare_to_generate(data_dir, tmp_dir) + args = [(problem_name, data_dir, tmp_dir, task_id) + for task_id in range(task_id_start, task_id_end)] + pool.map(generate_data_in_process, args) + else: + problem.generate_data(data_dir, tmp_dir, task_id) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t_decoder.py b/tensor2tensor/bin/t2t_decoder.py new file mode 100644 index 000000000..3e9e41389 --- /dev/null +++ b/tensor2tensor/bin/t2t_decoder.py @@ -0,0 +1,215 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Decode from trained T2T models. + +This binary performs inference using the Estimator API. + +Example usage to decode from dataset: + + t2t-decoder \ + --data_dir ~/data \ + --problem=algorithmic_identity_binary40 \ + --model=transformer + --hparams_set=transformer_base + +Set FLAGS.decode_interactive or FLAGS.decode_from_file for alternative decode +sources. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from tensor2tensor.bin import t2t_trainer +from tensor2tensor.data_generators import problem # pylint: disable=unused-import +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.utils import decoding +from tensor2tensor.utils import registry +from tensor2tensor.utils import trainer_lib +from tensor2tensor.utils import usr_dir + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +flags = tf.flags +FLAGS = flags.FLAGS + +# Additional flags in bin/t2t_trainer.py and utils/flags.py +flags.DEFINE_string("checkpoint_path", None, + "Path to the model checkpoint. Overrides output_dir.") +flags.DEFINE_bool("keep_timestamp", False, + "Set the mtime of the decoded file to the " + "checkpoint_path+'.index' mtime.") +flags.DEFINE_bool("decode_interactive", False, + "Interactive local inference mode.") +flags.DEFINE_integer("decode_shards", 1, "Number of decoding replicas.") +flags.DEFINE_string("score_file", "", "File to score. Each line in the file " + "must be in the format input \t target.") +flags.DEFINE_bool("decode_in_memory", False, "Decode in memory.") +flags.DEFINE_bool("disable_grappler_optimizations", False, + "Disable Grappler if need be to avoid tensor format errors.") + + +def create_hparams(): + hparams_path = None + if FLAGS.output_dir: + hparams_path = os.path.join(FLAGS.output_dir, "hparams.json") + return trainer_lib.create_hparams( + FLAGS.hparams_set, + FLAGS.hparams, + data_dir=os.path.expanduser(FLAGS.data_dir), + problem_name=FLAGS.problem, + hparams_path=hparams_path) + + +def create_decode_hparams(): + decode_hp = decoding.decode_hparams(FLAGS.decode_hparams) + decode_hp.shards = FLAGS.decode_shards + decode_hp.shard_id = FLAGS.worker_id + decode_in_memory = FLAGS.decode_in_memory or decode_hp.decode_in_memory + decode_hp.decode_in_memory = decode_in_memory + decode_hp.decode_to_file = FLAGS.decode_to_file + decode_hp.decode_reference = FLAGS.decode_reference + return decode_hp + + +def decode(estimator, hparams, decode_hp): + """Decode from estimator. Interactive, from file, or from dataset.""" + if FLAGS.decode_interactive: + if estimator.config.use_tpu: + raise ValueError("TPU can only decode from dataset.") + decoding.decode_interactively(estimator, hparams, decode_hp, + checkpoint_path=FLAGS.checkpoint_path) + elif FLAGS.decode_from_file: + decoding.decode_from_file(estimator, FLAGS.decode_from_file, hparams, + decode_hp, FLAGS.decode_to_file, + checkpoint_path=FLAGS.checkpoint_path) + if FLAGS.checkpoint_path and FLAGS.keep_timestamp: + ckpt_time = os.path.getmtime(FLAGS.checkpoint_path + ".index") + os.utime(FLAGS.decode_to_file, (ckpt_time, ckpt_time)) + else: + decoding.decode_from_dataset( + estimator, + FLAGS.problem, + hparams, + decode_hp, + decode_to_file=FLAGS.decode_to_file, + dataset_split="test" if FLAGS.eval_use_test_set else None, + checkpoint_path=FLAGS.checkpoint_path) + + +def score_file(filename): + """Score each line in a file and return the scores.""" + # Prepare model. + hparams = create_hparams() + encoders = registry.problem(FLAGS.problem).feature_encoders(FLAGS.data_dir) + has_inputs = "inputs" in encoders + + # Prepare features for feeding into the model. + if has_inputs: + inputs_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. + batch_inputs = tf.reshape(inputs_ph, [1, -1, 1, 1]) # Make it 4D. + targets_ph = tf.placeholder(dtype=tf.int32) # Just length dimension. + batch_targets = tf.reshape(targets_ph, [1, -1, 1, 1]) # Make it 4D. + if has_inputs: + features = {"inputs": batch_inputs, "targets": batch_targets} + else: + features = {"targets": batch_targets} + + # Prepare the model and the graph when model runs on features. + model = registry.model(FLAGS.model)(hparams, tf_estimator.ModeKeys.EVAL) + _, losses = model(features) + saver = tf.train.Saver() + + with tf.Session() as sess: + # Load weights from checkpoint. + if FLAGS.checkpoint_path is None: + ckpts = tf.train.get_checkpoint_state(FLAGS.output_dir) + ckpt = ckpts.model_checkpoint_path + else: + ckpt = FLAGS.checkpoint_path + saver.restore(sess, ckpt) + # Run on each line. + with tf.gfile.Open(filename) as f: + lines = f.readlines() + results = [] + for line in lines: + tab_split = line.split("\t") + if len(tab_split) > 2: + raise ValueError("Each line must have at most one tab separator.") + if len(tab_split) == 1: + targets = tab_split[0].strip() + else: + targets = tab_split[1].strip() + inputs = tab_split[0].strip() + # Run encoders and append EOS symbol. + targets_numpy = encoders["targets"].encode( + targets) + [text_encoder.EOS_ID] + if has_inputs: + inputs_numpy = encoders["inputs"].encode(inputs) + [text_encoder.EOS_ID] + # Prepare the feed. + if has_inputs: + feed = {inputs_ph: inputs_numpy, targets_ph: targets_numpy} + else: + feed = {targets_ph: targets_numpy} + # Get the score. + np_loss = sess.run(losses["training"], feed) + results.append(np_loss) + return results + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + trainer_lib.set_random_seed(FLAGS.random_seed) + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + + + if FLAGS.score_file: + filename = os.path.expanduser(FLAGS.score_file) + if not tf.gfile.Exists(filename): + raise ValueError("The file to score doesn't exist: %s" % filename) + results = score_file(filename) + if not FLAGS.decode_to_file: + raise ValueError("To score a file, specify --decode_to_file for results.") + write_file = tf.gfile.Open(os.path.expanduser(FLAGS.decode_to_file), "w") + for score in results: + write_file.write("%.6f\n" % score) + write_file.close() + return + + hp = create_hparams() + decode_hp = create_decode_hparams() + run_config = t2t_trainer.create_run_config(hp) + if FLAGS.disable_grappler_optimizations: + run_config.session_config.graph_options.rewrite_options.disable_meta_optimizer = True + + # summary-hook in tf.estimator.EstimatorSpec requires + # hparams.model_dir to be set. + hp.add_hparam("model_dir", run_config.model_dir) + + estimator = trainer_lib.create_estimator( + FLAGS.model, + hp, + run_config, + decode_hparams=decode_hp, + use_tpu=FLAGS.use_tpu) + + decode(estimator, hp, decode_hp) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t_distill.py b/tensor2tensor/bin/t2t_distill.py new file mode 100644 index 000000000..3c86a80e5 --- /dev/null +++ b/tensor2tensor/bin/t2t_distill.py @@ -0,0 +1,189 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Perform distillation for a teacher to student. + +This script is intended to be used with --model=distillation. See the model for +example hyperparameters and usage. + +If only output_dir is specified, then teacher_dir is `output_dir/teacher`, and +the student_dir is `output_dir/student`. Logs are written inside `output_dir`. +If teacher_dir is also specified explicitly, the student_dir is still +`output_dir/student` and the logs are written into `output_dir`. If student_dir +is further specified, the logs are written into student_dir unless output_dir is +explicitly specified, which only contains the logs in this case. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from tensor2tensor import models # pylint: disable=unused-import +from tensor2tensor import problems as problems_lib # pylint: disable=unused-import +from tensor2tensor.bin import t2t_trainer +from tensor2tensor.utils import cloud_mlengine +from tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import +from tensor2tensor.utils import trainer_lib +from tensor2tensor.utils import usr_dir + +import tensorflow.compat.v1 as tf + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_bool( + "skip_teacher_training", False, + "By default, we train teacher model. If set to True, skip the training.") +flags.DEFINE_string( + "teacher_dir", None, + "Directory to teacher network. If not specified, `output_dir/teacher` is " + "used instead.") +flags.DEFINE_string( + "student_dir", None, + "Directory to student network. If not specified, `output_dir/student` is " + "used instead.") + + +def main(argv): + tf.logging.set_verbosity(tf.logging.INFO) + trainer_lib.set_random_seed(FLAGS.random_seed) + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + t2t_trainer.maybe_log_registry_and_exit() + + if FLAGS.cloud_mlengine: + cloud_mlengine.launch() + return + + if FLAGS.generate_data: + t2t_trainer.generate_data() + + if cloud_mlengine.job_dir(): + FLAGS.output_dir = cloud_mlengine.job_dir() + + if argv: + t2t_trainer.set_hparams_from_args(argv[1:]) + + root_output_dir = FLAGS.output_dir + + if FLAGS.teacher_dir: + teacher_dir = FLAGS.teacher_dir + else: + teacher_dir = os.path.join(root_output_dir, "teacher") + + # Train Teacher ============ + if FLAGS.skip_teacher_training: + tf.logging.info("training teacher skipped") + else: + hparams = t2t_trainer.create_hparams() + hparams.distill_phase = "train" + FLAGS.output_dir = teacher_dir + + exp_fn = t2t_trainer.create_experiment_fn() + run_config = t2t_trainer.create_run_config(hparams) + exp = exp_fn(run_config, hparams) + if t2t_trainer.is_chief(): + t2t_trainer.save_metadata(hparams) + t2t_trainer.execute_schedule(exp) + + # ========================== + # Train Student ============ + hparams = t2t_trainer.create_hparams() + hparams.add_hparam("teacher_dir", teacher_dir) + hparams.distill_phase = "distill" + if FLAGS.student_dir: + student_dir = FLAGS.student_dir + else: + student_dir = os.path.join(root_output_dir, "student") + FLAGS.output_dir = student_dir + hparams.add_hparam("student_dir", student_dir) + + exp_fn = t2t_trainer.create_experiment_fn() + run_config = t2t_trainer.create_run_config(hparams) + exp = exp_fn(run_config, hparams) + + if t2t_trainer.is_chief(): + t2t_trainer.save_metadata(hparams) + t2t_trainer.execute_schedule(exp) + # ========================== + + +def create_teacher_experiment(run_config, hparams, argv): + """Creates experiment function.""" + tf.logging.info("training teacher") + tf.logging.set_verbosity(tf.logging.INFO) + trainer_lib.set_random_seed(FLAGS.random_seed) + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + t2t_trainer.maybe_log_registry_and_exit() + + if FLAGS.cloud_mlengine: + return cloud_mlengine.launch() + + if FLAGS.generate_data: + t2t_trainer.generate_data() + + if cloud_mlengine.job_dir(): + FLAGS.output_dir = cloud_mlengine.job_dir() + + if argv: + t2t_trainer.set_hparams_from_args(argv[1:]) + + hparams.distill_phase = "train" + exp_fn = t2t_trainer.create_experiment_fn() + exp = exp_fn(run_config, hparams) + return exp + + +def create_student_experiment(run_config, hparams, argv): + """Creates experiment function.""" + tf.logging.info("training student") + tf.logging.set_verbosity(tf.logging.INFO) + trainer_lib.set_random_seed(FLAGS.random_seed) + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + t2t_trainer.maybe_log_registry_and_exit() + + if FLAGS.cloud_mlengine: + return cloud_mlengine.launch() + + if FLAGS.generate_data: + t2t_trainer.generate_data() + + if cloud_mlengine.job_dir(): + FLAGS.output_dir = cloud_mlengine.job_dir() + + if argv: + t2t_trainer.set_hparams_from_args(argv[1:]) + + hparams.add_hparam("teacher_dir", FLAGS.teacher_dir) + hparams.add_hparam("student_dir", FLAGS.student_dir) + hparams.distill_phase = "distill" + exp_fn = t2t_trainer.create_experiment_fn() + exp = exp_fn(run_config, hparams) + return exp + + +def create_experiment_fn(argv, train_teacher): + + def teacher_experiment_fn(run_config, hparams): + return create_teacher_experiment(run_config, hparams, argv) + + def student_experiment_fn(run_config, hparams): + return create_student_experiment(run_config, hparams, argv) + + return teacher_experiment_fn if train_teacher else student_experiment_fn + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t_eval.py b/tensor2tensor/bin/t2t_eval.py new file mode 100644 index 000000000..77ca8d7a0 --- /dev/null +++ b/tensor2tensor/bin/t2t_eval.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Perform evaluation on trained T2T models using the Estimator API.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.bin import t2t_trainer # pylint: disable=unused-import +from tensor2tensor.data_generators import problem # pylint: disable=unused-import +from tensor2tensor.utils import trainer_lib +from tensor2tensor.utils import usr_dir +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +flags = tf.flags +FLAGS = flags.FLAGS + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + trainer_lib.set_random_seed(FLAGS.random_seed) + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + + hparams = trainer_lib.create_hparams( + FLAGS.hparams_set, FLAGS.hparams, data_dir=FLAGS.data_dir, + problem_name=FLAGS.problem) + + # set appropriate dataset-split, if flags.eval_use_test_set. + dataset_split = "test" if FLAGS.eval_use_test_set else None + dataset_kwargs = {"dataset_split": dataset_split} + eval_input_fn = hparams.problem.make_estimator_input_fn( + tf_estimator.ModeKeys.EVAL, hparams, dataset_kwargs=dataset_kwargs) + config = t2t_trainer.create_run_config(hparams) + + # summary-hook in tf.estimator.EstimatorSpec requires + # hparams.model_dir to be set. + hparams.add_hparam("model_dir", config.model_dir) + + estimator = trainer_lib.create_estimator( + FLAGS.model, hparams, config, use_tpu=FLAGS.use_tpu) + ckpt_iter = trainer_lib.next_checkpoint( + hparams.model_dir, FLAGS.eval_timeout_mins) + for ckpt_path in ckpt_iter: + predictions = estimator.evaluate( + eval_input_fn, steps=FLAGS.eval_steps, checkpoint_path=ckpt_path) + tf.logging.info(predictions) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t_prune.py b/tensor2tensor/bin/t2t_prune.py new file mode 100644 index 000000000..e43872d32 --- /dev/null +++ b/tensor2tensor/bin/t2t_prune.py @@ -0,0 +1,119 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Prune T2TModels using some heuristic. + +This supports a very common form of pruning known as magnitude-based pruning. +It ranks individual weights or units according to their magnitudes and zeros +out the smallest k% of weights, effectively removing them from the graph. + +Example run: +- train a resnet on cifar10: + bin/t2t_trainer.py --problem=image_cifar10 --hparams_set=resnet_cifar_32 \ + --model=resnet + +- evaluate different pruning percentages using weight-level pruning: + bin/t2t_prune.py --pruning_params_set=resnet_weight --problem=image_cifar10\ + --hparams_set=resnet_cifar_32 --model=resnet +""" + +import os + +from tensor2tensor.bin import t2t_trainer +from tensor2tensor.data_generators import problem as problem_lib # pylint: disable=unused-import +from tensor2tensor.utils import pruning_utils +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model +from tensor2tensor.utils import trainer_lib +from tensor2tensor.utils import usr_dir + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +flags = tf.flags +FLAGS = flags.FLAGS + +# See flags.py for additional command-line flags. +flags.DEFINE_string("pruning_params_set", None, + "Which pruning parameters to use.") + + +def create_pruning_params(): + return registry.pruning_params(FLAGS.pruning_params_set) + + +def create_pruning_strategy(name): + return registry.pruning_strategy(name) + + +def main(argv): + tf.logging.set_verbosity(tf.logging.INFO) + trainer_lib.set_random_seed(FLAGS.random_seed) + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + t2t_trainer.maybe_log_registry_and_exit() + + + if FLAGS.generate_data: + t2t_trainer.generate_data() + + if argv: + t2t_trainer.set_hparams_from_args(argv[1:]) + hparams = t2t_trainer.create_hparams() + trainer_lib.add_problem_hparams(hparams, FLAGS.problem) + pruning_params = create_pruning_params() + pruning_strategy = create_pruning_strategy(pruning_params.strategy) + + config = t2t_trainer.create_run_config(hparams) + params = {"batch_size": hparams.batch_size} + + # add "_rev" as a hack to avoid image standardization + problem = registry.problem(FLAGS.problem) + input_fn = problem.make_estimator_input_fn(tf_estimator.ModeKeys.EVAL, + hparams) + dataset = input_fn(params, config).repeat() + features, labels = dataset.make_one_shot_iterator().get_next() + + sess = tf.Session() + + model_fn = t2t_model.T2TModel.make_estimator_model_fn( + FLAGS.model, hparams, use_tpu=FLAGS.use_tpu) + spec = model_fn( + features, + labels, + tf_estimator.ModeKeys.EVAL, + params=hparams, + config=config) + + # Restore weights + saver = tf.train.Saver() + checkpoint_path = os.path.expanduser(FLAGS.output_dir or + FLAGS.checkpoint_path) + saver.restore(sess, tf.train.latest_checkpoint(checkpoint_path)) + + def eval_model(): + preds = spec.predictions["predictions"] + preds = tf.argmax(preds, -1, output_type=labels.dtype) + _, acc_update_op = tf.metrics.accuracy(labels=labels, predictions=preds) + sess.run(tf.initialize_local_variables()) + for _ in range(FLAGS.eval_steps): + acc = sess.run(acc_update_op) + return acc + + pruning_utils.sparsify(sess, eval_model, pruning_strategy, pruning_params) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t_trainer.py b/tensor2tensor/bin/t2t_trainer.py new file mode 100644 index 000000000..290b45a83 --- /dev/null +++ b/tensor2tensor/bin/t2t_trainer.py @@ -0,0 +1,427 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Train and evaluate.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import contextlib +import os +import sys +from tensor2tensor import models # pylint: disable=unused-import +from tensor2tensor import problems as problems_lib # pylint: disable=unused-import +from tensor2tensor.data_generators import problem # pylint: disable=unused-import + +from tensor2tensor.utils import cloud_mlengine +from tensor2tensor.utils import contrib +from tensor2tensor.utils import decoding +from tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import +from tensor2tensor.utils import hparams_lib +from tensor2tensor.utils import mlperf_log +from tensor2tensor.utils import registry +from tensor2tensor.utils import trainer_lib +from tensor2tensor.utils import usr_dir +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +flags = tf.flags +FLAGS = flags.FLAGS + +# See utils/flags.py for additional command-line flags. +flags.DEFINE_string("t2t_usr_dir", None, + "Path to a Python module that will be imported. The " + "__init__.py file should include the necessary imports. " + "The imported files should contain registrations, " + "e.g. @registry.register_model calls, that will then be " + "available to the t2t-trainer.") +flags.DEFINE_integer("random_seed", None, "Random seed.") +flags.DEFINE_integer("tpu_num_shards", 8, "Number of tpu shards.") +flags.DEFINE_string("tpu_job_name", None, + "TPU job name. TPUEstimator can auto-infer this but if the " + "configuration is esoteric it should be provided here.") +flags.DEFINE_integer("iterations_per_loop", 100, + "Number of iterations in a TPU training loop.") +flags.DEFINE_bool("use_tpu", False, "Whether to use TPU.") +flags.DEFINE_bool("use_tpu_estimator", False, "Whether to use TPUEstimator. " + "This is always enabled when use_tpu is True.") +flags.DEFINE_integer("export_saved_model_api_version", 1, + "ExportSavedModelApiVersion, 1 (V1, default) or 2 (V2). " + "Default V2 uses model_fn_inference_on_tpu for rewrite." + "Flag use_guarantee_const is only enabled in V2.") +flags.DEFINE_bool("use_guarantee_const_getter", False, + "Whether to use GuaranteeConst Ops to mark all weights as " + "constant. It may improve TPU inference performance and " + "reduce HBM arguments usage. Only available when " + "export_saved_model_api_version=2 and use_tpu=True.") +flags.DEFINE_bool("xla_compile", False, + "Whether to use XLA to compile model_fn.") +flags.DEFINE_integer("xla_jit_level", -1, + "GlobalJitLevel to use while compiling the full graph.") +flags.DEFINE_integer("tpu_infeed_sleep_secs", None, + "How long to sleep the infeed thread.") +flags.DEFINE_bool("generate_data", False, "Generate data before training?") +flags.DEFINE_string("tmp_dir", "/tmp/t2t_datagen", + "Temporary storage directory, used if --generate_data.") +flags.DEFINE_bool("profile", False, "Profile performance?") +flags.DEFINE_integer("inter_op_parallelism_threads", 0, + "Number of inter_op_parallelism_threads to use for CPU. " + "See TensorFlow config.proto for details.") +flags.DEFINE_integer("intra_op_parallelism_threads", 0, + "Number of intra_op_parallelism_threads to use for CPU. " + "See TensorFlow config.proto for details.") +# TODO(lukaszkaiser): resolve memory and variable assign issues and set to True. +flags.DEFINE_bool( + "optionally_use_dist_strat", False, + "Whether to use TensorFlow DistributionStrategy instead of explicitly " + "replicating the model. DistributionStrategy is used only if the " + "model replication configuration is supported by the DistributionStrategy.") +# To maintain compatibility with some internal libs, we guard against these flag +# definitions possibly erroring. Apologies for the ugliness. +try: + flags.DEFINE_string("master", "", "Address of TensorFlow master.") + flags.DEFINE_string("output_dir", "", "Base output directory for run.") + flags.DEFINE_string("schedule", "continuous_train_and_eval", + "Method of Experiment to run.") + flags.DEFINE_integer("eval_steps", 100, + "Number of steps in evaluation. By default, eval will " + "stop after eval_steps or when it runs through the eval " + "dataset once in full, whichever comes first, so this " + "can be a very large number.") +except: # pylint: disable=bare-except + pass + +flags.DEFINE_string("std_server_protocol", "grpc", + "Protocol for tf.train.Server.") + +# Google Cloud TPUs +flags.DEFINE_string("cloud_tpu_name", "%s-tpu" % os.getenv("USER"), + "Name of Cloud TPU instance to use or create.") + +# Google Cloud ML Engine +flags.DEFINE_bool("cloud_mlengine", False, + "Whether to launch on Cloud ML Engine.") +flags.DEFINE_string("cloud_mlengine_master_type", None, + "Machine type for master on Cloud ML Engine. " + "If provided, overrides default selections based on " + "--worker_gpu. User is responsible for ensuring " + "type is valid and that --worker_gpu matches number of " + "GPUs on machine type. See documentation: " + "https://cloud.google.com/ml-engine/reference/rest/v1/" + "projects.jobs#traininginput") +# Hyperparameter tuning on Cloud ML Engine +# Pass an --hparams_range to enable +flags.DEFINE_string("autotune_objective", None, + "TensorBoard metric name to optimize.") +flags.DEFINE_bool("autotune_maximize", True, + "Whether to maximize (vs. minimize) autotune_objective.") +flags.DEFINE_integer("autotune_max_trials", 10, + "Maximum number of tuning experiments to run.") +flags.DEFINE_integer("autotune_parallel_trials", 1, + "How many trials to run in parallel (will spin up this " + "many jobs.") +# Note than in open-source TensorFlow, the dash gets converted to an underscore, +# so access is FLAGS.job_dir. +flags.DEFINE_string("job-dir", None, + "DO NOT USE. Exists only for Cloud ML Engine to pass in " + "during hyperparameter tuning. Overrides --output_dir.") +flags.DEFINE_integer("log_step_count_steps", 100, + "Number of local steps after which progress is printed " + "out") +flags.DEFINE_bool("gpu_automatic_mixed_precision", False, + "Whether to employ GPU automatic mixed precision training " + "(via graph rewrite and dynamic loss scaling).") + + + +def set_hparams_from_args(args): + """Set hparams overrides from unparsed args list.""" + if not args: + return + + hp_prefix = "--hp_" + tf.logging.info("Found unparsed command-line arguments. Checking if any " + "start with %s and interpreting those as hparams " + "settings.", hp_prefix) + + pairs = [] + i = 0 + while i < len(args): + arg = args[i] + if arg.startswith(hp_prefix): + pairs.append((arg[len(hp_prefix):], args[i+1])) + i += 2 + else: + tf.logging.warn("Found unknown flag: %s", arg) + i += 1 + + as_hparams = ",".join(["%s=%s" % (key, val) for key, val in pairs]) + if FLAGS.hparams: + as_hparams = "," + as_hparams + FLAGS.hparams += as_hparams + + +def create_hparams(): + """Create hparams.""" + if FLAGS.use_tpu and "tpu" not in FLAGS.hparams_set: + tf.logging.warn("Not all hyperparameter sets work on TPU. " + "Prefer hparams_sets with a '_tpu' suffix, " + "e.g. transformer_tpu, if available for your model.") + hparams_path = os.path.join(FLAGS.output_dir, "hparams.json") + return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams, + hparams_path=hparams_path) + + +def create_experiment_fn(): + return trainer_lib.create_experiment_fn( + model_name=FLAGS.model, + problem_name=FLAGS.problem, + data_dir=os.path.expanduser(FLAGS.data_dir), + train_steps=FLAGS.train_steps, + eval_steps=FLAGS.eval_steps, + min_eval_frequency=FLAGS.local_eval_frequency, + schedule=FLAGS.schedule, + eval_throttle_seconds=FLAGS.eval_throttle_seconds, + export=FLAGS.export_saved_model, + decode_hparams=decoding.decode_hparams(FLAGS.decode_hparams), + use_tfdbg=FLAGS.tfdbg, + use_dbgprofile=FLAGS.dbgprofile, + eval_early_stopping_steps=FLAGS.eval_early_stopping_steps, + eval_early_stopping_metric=FLAGS.eval_early_stopping_metric, + eval_early_stopping_metric_delta=FLAGS.eval_early_stopping_metric_delta, + eval_early_stopping_metric_minimize=FLAGS + .eval_early_stopping_metric_minimize, + eval_timeout_mins=FLAGS.eval_timeout_mins, + eval_use_test_set=FLAGS.eval_use_test_set, + use_tpu=FLAGS.use_tpu, + use_tpu_estimator=FLAGS.use_tpu_estimator, + use_xla=FLAGS.xla_compile, + export_saved_model_api_version=FLAGS.export_saved_model_api_version, + use_guarantee_const_getter=FLAGS.use_guarantee_const_getter, + warm_start_from=FLAGS.warm_start_from, + decode_from_file=FLAGS.decode_from_file, + decode_to_file=FLAGS.decode_to_file, + decode_reference=FLAGS.decode_reference, + std_server_protocol=FLAGS.std_server_protocol) + + +def create_run_config(hp, output_dir=None): + """Create a run config. + + Args: + hp: model hyperparameters + output_dir: model's output directory, defaults to output_dir flag. + + Returns: + a run config + """ + save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency) + save_ckpt_secs = FLAGS.save_checkpoints_secs or None + if save_ckpt_secs: + save_ckpt_steps = None + assert FLAGS.output_dir + tpu_config_extra_kwargs = {} + if FLAGS.tpu_job_name is not None: + tpu_config_extra_kwargs["tpu_job_name"] = FLAGS.tpu_job_name + + if getattr(hp, "mtf_mode", False): + save_ckpt_steps = None # Disable the default saver + save_ckpt_secs = None # Disable the default saver + tpu_config_extra_kwargs = { + "num_cores_per_replica": + 1, + "per_host_input_for_training": + tf_estimator.tpu.InputPipelineConfig.BROADCAST, + } + + # the various custom getters we have written do not play well together yet. + # TODO(noam): ask rsepassi for help here. + daisy_chain_variables = ( + hp.daisy_chain_variables and + hp.activation_dtype == "float32" and + hp.weight_dtype == "float32") + return trainer_lib.create_run_config( + model_name=FLAGS.model, + model_dir=output_dir or os.path.expanduser(FLAGS.output_dir), + master=FLAGS.master, + iterations_per_loop=FLAGS.iterations_per_loop, + num_shards=FLAGS.tpu_num_shards, + log_device_placement=FLAGS.log_device_placement, + save_checkpoints_steps=save_ckpt_steps, + save_checkpoints_secs=save_ckpt_secs, + keep_checkpoint_max=FLAGS.keep_checkpoint_max, + keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours, + num_gpus=FLAGS.worker_gpu, + gpu_order=FLAGS.gpu_order, + num_async_replicas=FLAGS.worker_replicas, + gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction, + enable_graph_rewriter=FLAGS.enable_graph_rewriter, + use_tpu=FLAGS.use_tpu, + use_tpu_estimator=FLAGS.use_tpu_estimator, + xla_jit_level=FLAGS.xla_jit_level, + schedule=FLAGS.schedule, + no_data_parallelism=hp.no_data_parallelism, + optionally_use_dist_strat=FLAGS.optionally_use_dist_strat, + daisy_chain_variables=daisy_chain_variables, + ps_replicas=FLAGS.ps_replicas, + ps_job=FLAGS.ps_job, + ps_gpu=FLAGS.ps_gpu, + sync=FLAGS.sync, + worker_id=FLAGS.worker_id, + worker_job=FLAGS.worker_job, + random_seed=FLAGS.random_seed, + tpu_infeed_sleep_secs=FLAGS.tpu_infeed_sleep_secs, + inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads, + log_step_count_steps=FLAGS.log_step_count_steps, + intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads, + tpu_config_extra_kwargs=tpu_config_extra_kwargs, + cloud_tpu_name=FLAGS.cloud_tpu_name) + + +def generate_data(): + # Generate data if requested. + data_dir = os.path.expanduser(FLAGS.data_dir) + tmp_dir = os.path.expanduser(FLAGS.tmp_dir) + tf.gfile.MakeDirs(data_dir) + tf.gfile.MakeDirs(tmp_dir) + + problem_name = FLAGS.problem + tf.logging.info("Generating data for %s" % problem_name) + registry.problem(problem_name).generate_data(data_dir, tmp_dir) + + +@contextlib.contextmanager +def profile_context(): + if FLAGS.profile: + with contrib.tfprof().ProfileContext( + "t2tprof", trace_steps=range(100), dump_steps=range(100)) as pctx: + opts = tf.profiler.ProfileOptionBuilder.time_and_memory() + pctx.add_auto_profiling("op", opts, range(100)) + yield + else: + yield + + +def maybe_log_registry_and_exit(): + if FLAGS.registry_help: + tf.logging.info(registry.help_string()) + sys.exit(0) + + +def is_chief(): + schedules = ["train", "train_and_evaluate", "continuous_train_and_eval"] + return FLAGS.worker_id == 0 and FLAGS.schedule in schedules + + +def save_metadata(hparams): + """Saves FLAGS and hparams to output_dir.""" + output_dir = os.path.expanduser(FLAGS.output_dir) + if not tf.gfile.Exists(output_dir): + tf.gfile.MakeDirs(output_dir) + + # Save FLAGS in txt file + if hasattr(FLAGS, "flags_into_string"): + flags_str = FLAGS.flags_into_string() + t2t_flags_str = "\n".join([ + "--%s=%s" % (f.name, f.value) + for f in FLAGS.flags_by_module_dict()["tensor2tensor.utils.flags"] + ]) + else: + flags_dict = FLAGS.__dict__["__flags"] + flags_str = "\n".join( + ["--%s=%s" % (name, str(f)) for (name, f) in flags_dict.items()]) + t2t_flags_str = None + + flags_txt = os.path.join(output_dir, "flags.txt") + with tf.gfile.Open(flags_txt, "w") as f: + f.write(flags_str) + + if t2t_flags_str: + t2t_flags_txt = os.path.join(output_dir, "flags_t2t.txt") + with tf.gfile.Open(t2t_flags_txt, "w") as f: + f.write(t2t_flags_str) + + # Save hparams as hparams.json + new_hparams = hparams_lib.copy_hparams(hparams) + # Modality class is not JSON serializable so remove. + new_hparams.del_hparam("modality") + + hparams_fname = os.path.join(output_dir, "hparams.json") + with tf.gfile.Open(hparams_fname, "w") as f: + f.write(new_hparams.to_json(indent=0, sort_keys=True)) + + +def execute_schedule(exp): + if not hasattr(exp, FLAGS.schedule): + raise ValueError( + "Experiment has no method %s, from --schedule" % FLAGS.schedule) + with profile_context(): + getattr(exp, FLAGS.schedule)() + + +def run_std_server(): + exp = trainer_lib.T2TExperiment(*([None] * 5)) + exp.run_std_server() + + +def main(argv): + tf.logging.set_verbosity(tf.logging.INFO) + + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + + # If we just have to print the registry, do that and exit early. + maybe_log_registry_and_exit() + + # Create HParams. + if argv: + set_hparams_from_args(argv[1:]) + if FLAGS.schedule != "run_std_server": + hparams = create_hparams() + if FLAGS.gpu_automatic_mixed_precision: + setattr(hparams, "gpu_automatic_mixed_precision", True) + + if FLAGS.schedule == "train" or FLAGS.schedule == "train_eval_and_decode": + mlperf_log.transformer_print(key=mlperf_log.RUN_START, hparams=hparams) + if FLAGS.schedule == "run_std_server": + run_std_server() + mlperf_log.transformer_print( + key=mlperf_log.RUN_SET_RANDOM_SEED, value=FLAGS.random_seed, + hparams=hparams) + trainer_lib.set_random_seed(FLAGS.random_seed) + + if FLAGS.cloud_mlengine: + cloud_mlengine.launch() + return + + if FLAGS.generate_data: + generate_data() + + if cloud_mlengine.job_dir(): + FLAGS.output_dir = cloud_mlengine.job_dir() + + exp_fn = create_experiment_fn() + exp = exp_fn(create_run_config(hparams), hparams) + if is_chief(): + save_metadata(hparams) + execute_schedule(exp) + if FLAGS.schedule != "train": + mlperf_log.transformer_print(key=mlperf_log.RUN_FINAL, + hparams=hparams) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/bin/t2t_trainer_test.py b/tensor2tensor/bin/t2t_trainer_test.py new file mode 100644 index 000000000..48826748d --- /dev/null +++ b/tensor2tensor/bin/t2t_trainer_test.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for t2t_trainer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensor2tensor.bin import t2t_trainer +from tensor2tensor.utils import trainer_lib_test + +import tensorflow.compat.v1 as tf + +FLAGS = tf.flags.FLAGS + + +class TrainerTest(tf.test.TestCase): + + @classmethod + def setUpClass(cls): + trainer_lib_test.TrainerLibTest.setUpClass() + + def testTrain(self): + FLAGS.problem = "tiny_algo" + FLAGS.model = "transformer" + FLAGS.hparams_set = "transformer_tiny" + FLAGS.train_steps = 1 + FLAGS.eval_steps = 1 + FLAGS.output_dir = tf.test.get_temp_dir() + FLAGS.data_dir = tf.test.get_temp_dir() + t2t_trainer.main(None) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/bin/t2t_translate_all.py b/tensor2tensor/bin/t2t_translate_all.py new file mode 100644 index 000000000..c938e6aa5 --- /dev/null +++ b/tensor2tensor/bin/t2t_translate_all.py @@ -0,0 +1,107 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Translate a file with all checkpoints in a given directory. + +t2t-decoder will be executed with these parameters: +--problem +--data_dir +--output_dir with the value of --model_dir +--decode_from_file with the value of --source +--decode_hparams with properly formatted --beam_size and --alpha +--checkpoint_path automatically filled +--decode_to_file automatically filled +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import shutil +from tensor2tensor.utils import bleu_hook + +import tensorflow.compat.v1 as tf + +flags = tf.flags +FLAGS = flags.FLAGS + +# t2t-translate-all specific options +flags.DEFINE_string("decoder_command", "t2t-decoder {params}", + "Which command to execute instead t2t-decoder. " + "{params} is replaced by the parameters. Useful e.g. for " + "qsub wrapper.") +flags.DEFINE_string("model_dir", "", + "Directory to load model checkpoints from.") +flags.DEFINE_string("source", None, + "Path to the source-language file to be translated") +flags.DEFINE_string("translations_dir", "translations", + "Where to store the translated files.") +flags.DEFINE_integer("min_steps", 0, "Ignore checkpoints with less steps.") +flags.DEFINE_integer("wait_minutes", 0, + "Wait upto N minutes for a new checkpoint") + +# options derived from t2t-decoder +flags.DEFINE_integer("beam_size", 4, "Beam-search width.") +flags.DEFINE_float("alpha", 0.6, "Beam-search alpha.") +flags.DEFINE_string("model", "transformer", "see t2t-decoder") +flags.DEFINE_string("t2t_usr_dir", None, "see t2t-decoder") +flags.DEFINE_string("data_dir", None, "see t2t-decoder") +flags.DEFINE_string("problem", None, "see t2t-decoder") +flags.DEFINE_string("hparams_set", "transformer_big_single_gpu", + "see t2t-decoder") + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + # pylint: disable=unused-variable + model_dir = os.path.expanduser(FLAGS.model_dir) + translations_dir = os.path.expanduser(FLAGS.translations_dir) + source = os.path.expanduser(FLAGS.source) + tf.gfile.MakeDirs(translations_dir) + translated_base_file = os.path.join(translations_dir, FLAGS.problem) + + # Copy flags.txt with the original time, so t2t-bleu can report correct + # relative time. + flags_path = os.path.join(translations_dir, FLAGS.problem + "-flags.txt") + if not os.path.exists(flags_path): + shutil.copy2(os.path.join(model_dir, "flags.txt"), flags_path) + + locals_and_flags = {"FLAGS": FLAGS} + for model in bleu_hook.stepfiles_iterator(model_dir, FLAGS.wait_minutes, + FLAGS.min_steps): + tf.logging.info("Translating " + model.filename) + out_file = translated_base_file + "-" + str(model.steps) + locals_and_flags.update(locals()) + if os.path.exists(out_file): + tf.logging.info(out_file + " already exists, so skipping it.") + else: + tf.logging.info("Translating " + out_file) + params = ( + "--t2t_usr_dir={FLAGS.t2t_usr_dir} --output_dir={model_dir} " + "--data_dir={FLAGS.data_dir} --problem={FLAGS.problem} " + "--decode_hparams=beam_size={FLAGS.beam_size},alpha={FLAGS.alpha} " + "--model={FLAGS.model} --hparams_set={FLAGS.hparams_set} " + "--checkpoint_path={model.filename} --decode_from_file={source} " + "--decode_to_file={out_file} --keep_timestamp" + ).format(**locals_and_flags) + command = FLAGS.decoder_command.format(**locals()) + tf.logging.info("Running:\n" + command) + os.system(command) + # pylint: enable=unused-variable + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/data_generators/README.md b/tensor2tensor/data_generators/README.md index f8495c38f..144d9f4b0 100644 --- a/tensor2tensor/data_generators/README.md +++ b/tensor2tensor/data_generators/README.md @@ -1,7 +1,7 @@ -# Data generators for T2T models. +# T2T Problems. -This directory contains data generators for a number of problems. We use a -naming scheme for the problems, they have names of the form +This directory contains `Problem` specifications for a number of problems. We +use a naming scheme for the problems, they have names of the form `[task-family]_[task]_[specifics]`. Data for all currently supported problems can be generated by calling the main generator binary (`t2t-datagen`). For example: @@ -20,53 +20,51 @@ All tasks produce TFRecord files of `tensorflow.Example` protocol buffers. ## Adding a new problem -1. Implement and register a Python generator for the dataset -1. Add a problem specification to `problem_hparams.py` specifying input and - output modalities +To add a new problem, subclass +[`Problem`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/problem.py) +and register it with `@registry.register_problem`. See +[`TranslateEndeWmt8k`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/translate_ende.py) +for an example. -To add a new problem, you first need to create python generators for training -and development data for the problem. The python generators should yield -dictionaries with string keys and values being lists of {int, float, str}. -Here is a very simple generator for a data-set where inputs are lists of 1s with -length upto 100 and targets are lists of length 1 with an integer denoting the -length of the input list. +`Problem`s support data generation, training, and decoding. + +Data generation is handled by `Problem.generate_data` which should produce 2 +datasets, training and dev, which should be named according to +`Problem.training_filepaths` and `Problem.dev_filepaths`. +`Problem.generate_data` should also produce any other files that may be required +for training/decoding, e.g. a vocabulary file. + +A particularly easy way to implement `Problem.generate_data` for your dataset is +to create 2 Python generators, one for the training data and another for the +dev data, and pass them to `generator_utils.generate_dataset_and_shuffle`. See +[`TranslateEndeWmt8k.generate_data`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/translate_ende.py) +for an example of usage. + +The generators should yield dictionaries with string keys and values being lists +of {int, float, str}. Here is a very simple generator for a data-set where +inputs are lists of 2s with length up to 100 and targets are lists of length 1 +with an integer denoting the length of the input list. ``` def length_generator(nbr_cases): - for _ in xrange(nbr_cases): + for _ in range(nbr_cases): length = np.random.randint(100) + 1 - yield {"inputs": [1] * length, "targets": [length]} + yield {"inputs": [2] * length, "targets": [length]} ``` -Note that our data reader uses 0 for padding, so it is a good idea to never -generate 0s, except if all your examples have the same size (in which case -they'll never be padded anyway) or if you're doing padding on your own (in which -case please use 0s for padding). When adding the python generator function, -please also add unit tests to check if the code runs. +Note that our data reader uses 0 for padding and other parts of the code assume +end-of-string (EOS) is 1, so it is a good idea to never generate 0s or 1s, +except if all your examples have the same size (in which case they'll never be +padded anyway) or if you're doing padding on your own (in which case please use +0s for padding). When adding the python generator function, please also add unit +tests to check if the code runs. The generator can do arbitrary setup before beginning to yield examples - for example, downloading data, generating vocabulary files, etc. Some examples: -* [Algorithmic generators](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/algorithmic.py) +* [Algorithmic problems](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/algorithmic.py) and their [unit tests](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/algorithmic_test.py) -* [WMT generators](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/wmt.py) - and their [unit tests](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/wmt_test.py) - -When your python generator is ready and tested, add it to the -`_SUPPORTED_PROBLEM_GENERATORS` dictionary in the -[data -generator](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/bin/t2t-datagen). -The keys are problem names, and the values are pairs of (training-set-generator -function, dev-set-generator function). For the generator above, one could add -the following lines: - -``` - "algorithmic_length_upto100": - (lambda: algorithmic.length_generator(10000), - lambda: algorithmic.length_generator(1000)), -``` - -Note the lambdas above: we don't want to call the generators too early. - +* [Translation problems (En-De)](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/translate_ende.py) + and their [unit tests](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/translate_test.py) diff --git a/tensor2tensor/data_generators/__init__.py b/tensor2tensor/data_generators/__init__.py index 27d533abc..ff174dd63 100644 --- a/tensor2tensor/data_generators/__init__.py +++ b/tensor2tensor/data_generators/__init__.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensor2tensor/data_generators/algorithmic.py b/tensor2tensor/data_generators/algorithmic.py index 46ebb27a3..42699a42a 100644 --- a/tensor2tensor/data_generators/algorithmic.py +++ b/tensor2tensor/data_generators/algorithmic.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,80 +18,317 @@ from __future__ import division from __future__ import print_function -# Dependency imports - +import os +import shutil import numpy as np +from six.moves import range # pylint: disable=redefined-builtin +from tensor2tensor.data_generators import generator_utils as utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.layers import modalities +from tensor2tensor.utils import metrics +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + + +class AlgorithmicProblem(problem.Problem): + """Base class for algorithmic problems.""" + + @property + def num_symbols(self): + raise NotImplementedError() + + def generator(self, nbr_symbols, max_length, nbr_cases): + """Generates the data.""" + raise NotImplementedError() + + @property + def train_length(self): + return 40 + + @property + def dev_length(self): + return 400 + + @property + def train_size(self): + return 100000 + + @property + def dev_size(self): + return 10000 + + @property + def num_shards(self): + return 10 + + def generate_data(self, data_dir, _, task_id=-1): + + def generator_eos(nbr_symbols, max_length, nbr_cases): + """Shift by NUM_RESERVED_IDS and append EOS token.""" + for case in self.generator(nbr_symbols, max_length, nbr_cases): + new_case = {} + for feature in case: + new_case[feature] = [ + i + text_encoder.NUM_RESERVED_TOKENS for i in case[feature] + ] + [text_encoder.EOS_ID] + yield new_case + + utils.generate_dataset_and_shuffle( + generator_eos(self.num_symbols, self.train_length, self.train_size), + self.training_filepaths(data_dir, self.num_shards, shuffled=True), + generator_eos(self.num_symbols, self.dev_length, self.dev_size), + self.dev_filepaths(data_dir, 1, shuffled=True), + shuffle=False) + + def hparams(self, defaults, unused_model_hparams): + p = defaults + vocab_size = self.num_symbols + text_encoder.NUM_RESERVED_TOKENS + p.modality = {"inputs": modalities.ModalityType.SYMBOL, + "targets": modalities.ModalityType.SYMBOL} + p.vocab_size = {"inputs": vocab_size, + "targets": vocab_size} + p.input_space_id = problem.SpaceID.DIGIT_0 + p.target_space_id = problem.SpaceID.DIGIT_1 + + +@registry.register_problem +class AlgorithmicIdentityBinary40(AlgorithmicProblem): + """Problem spec for algorithmic binary identity task.""" + + @property + def num_symbols(self): + return 2 + + def generator(self, nbr_symbols, max_length, nbr_cases): + """Generator for the identity (copy) task on sequences of symbols. + + The length of the sequence is drawn uniformly at random from [1, max_length] + and then symbols are drawn uniformly at random from [0, nbr_symbols) until + nbr_cases sequences have been produced. + + Args: + nbr_symbols: number of symbols to use in each sequence. + max_length: integer, maximum length of sequences to generate. + nbr_cases: the number of cases to generate. + + Yields: + A dictionary {"inputs": input-list, "targets": target-list} where + input-list and target-list are the same. + """ + for _ in range(nbr_cases): + l = np.random.randint(max_length) + 1 + inputs = [np.random.randint(nbr_symbols) for _ in range(l)] + yield {"inputs": inputs, "targets": inputs} + + +@registry.register_problem +class AlgorithmicIdentityDecimal40(AlgorithmicIdentityBinary40): + """Problem spec for algorithmic decimal identity task.""" + + @property + def num_symbols(self): + return 10 -from six.moves import xrange # pylint: disable=redefined-builtin +@registry.register_problem +class AlgorithmicIdentityVocab95Train20Eval30(AlgorithmicIdentityBinary40): + """Problem spec for algorithmic decimal identity task.""" + + @property + def num_symbols(self): + return 95 + + @property + def train_length(self): + return 20 + + @property + def dev_length(self): + return 30 + + @property + def train_size(self): + return 1000000 + + +@registry.register_problem +class AlgorithmicShiftDecimal40(AlgorithmicProblem): + """Problem spec for algorithmic decimal shift task.""" + + @property + def num_symbols(self): + return 20 + + def generator(self, nbr_symbols, max_length, nbr_cases): + """Generator for the shift task on sequences of symbols. + + The length of the sequence is drawn uniformly at random from [1, max_length] + and then symbols are drawn uniformly at random from [0, nbr_symbols - shift] + until nbr_cases sequences have been produced (output[i] = input[i] + shift). + + Args: + nbr_symbols: number of symbols to use in each sequence (input + output). + max_length: integer, maximum length of sequences to generate. + nbr_cases: the number of cases to generate. -def identity_generator(nbr_symbols, max_length, nbr_cases): - """Generator for the identity (copy) task on sequences of symbols. + Yields: + A dictionary {"inputs": input-list, "targets": target-list} where + target-list[i] = input-list[i] + shift. + """ + shift = 10 + for _ in range(nbr_cases): + l = np.random.randint(max_length) + 1 + inputs = [np.random.randint(nbr_symbols - shift) for _ in range(l)] + yield {"inputs": inputs, "targets": [i + shift for i in inputs]} - The length of the sequence is drawn uniformly at random from [1, max_length] - and then symbols are drawn uniformly at random from [1, nbr_symbols] until - nbr_cases sequences have been produced. + @property + def dev_length(self): + return 80 - Args: - nbr_symbols: number of symbols to use in each sequence. - max_length: integer, maximum length of sequences to generate. - nbr_cases: the number of cases to generate. - Yields: - A dictionary {"inputs": input-list, "targets": target-list} where - input-list and target-list are the same. - """ - for _ in xrange(nbr_cases): - l = np.random.randint(max_length) + 1 - inputs = [np.random.randint(nbr_symbols) + 1 for _ in xrange(l)] - yield {"inputs": inputs, "targets": inputs} +@registry.register_problem +class AlgorithmicReverseBinary40(AlgorithmicProblem): + """Problem spec for algorithmic binary reversing task.""" + + @property + def num_symbols(self): + return 2 + + def generator(self, nbr_symbols, max_length, nbr_cases): + """Generator for the reversing task on sequences of symbols. + + The length of the sequence is drawn uniformly at random from [1, max_length] + and then symbols are drawn uniformly at random from [0, nbr_symbols) until + nbr_cases sequences have been produced. + + Args: + nbr_symbols: number of symbols to use in each sequence. + max_length: integer, maximum length of sequences to generate. + nbr_cases: the number of cases to generate. + Yields: + A dictionary {"inputs": input-list, "targets": target-list} where + target-list is input-list reversed. + """ + for _ in range(nbr_cases): + l = np.random.randint(max_length) + 1 + inputs = [np.random.randint(nbr_symbols) for _ in range(l)] + yield {"inputs": inputs, "targets": list(reversed(inputs))} -def shift_generator(nbr_symbols, shift, max_length, nbr_cases): - """Generator for the shift task on sequences of symbols. - The length of the sequence is drawn uniformly at random from [1, max_length] - and then symbols are drawn uniformly at random from [1, nbr_symbols - shift] - until nbr_cases sequences have been produced (output[i] = input[i] + shift). +@registry.register_problem +class AlgorithmicReverseDecimal40(AlgorithmicReverseBinary40): + """Problem spec for algorithmic decimal reversing task.""" + + @property + def num_symbols(self): + return 10 + + +def zipf_distribution(nbr_symbols, alpha): + """Helper function: Create a Zipf distribution. Args: - nbr_symbols: number of symbols to use in each sequence (input + output). - shift: by how much to shift the input. - max_length: integer, maximum length of sequences to generate. - nbr_cases: the number of cases to generate. + nbr_symbols: number of symbols to use in the distribution. + alpha: float, Zipf's Law Distribution parameter. Default = 1.5. + Usually for modelling natural text distribution is in + the range [1.1-1.6]. + + Returns: + distr_map: list of float, Zipf's distribution over nbr_symbols. - Yields: - A dictionary {"inputs": input-list, "targets": target-list} where - target-list[i] = input-list[i] + shift. """ - for _ in xrange(nbr_cases): - l = np.random.randint(max_length) + 1 - inputs = [np.random.randint(nbr_symbols - shift) + 1 for _ in xrange(l)] - yield {"inputs": inputs, "targets": [i + shift for i in inputs]} + tmp = np.power(np.arange(1, nbr_symbols + 1), -alpha) + zeta = np.r_[0.0, np.cumsum(tmp)] + return [x / zeta[-1] for x in zeta] + +def zipf_random_sample(distr_map, sample_len): + """Helper function: Generate a random Zipf sample of given length. -def reverse_generator(nbr_symbols, max_length, nbr_cases): - """Generator for the reversing task on sequences of symbols. + Args: + distr_map: list of float, Zipf's distribution over nbr_symbols. + sample_len: integer, length of sequence to generate. + + Returns: + sample: list of integer, Zipf's random sample over nbr_symbols. - The length of the sequence is drawn uniformly at random from [1, max_length] - and then symbols are drawn uniformly at random from [1, nbr_symbols] until + """ + u = np.random.random(sample_len) + # Random produces values in range [0.0,1.0); even if it is almost + # improbable(but possible) that it can generate a clear 0.000..0. + return list(np.searchsorted(distr_map, u)) + + +def reverse_generator_nlplike(nbr_symbols, + max_length, + nbr_cases, + scale_std_dev=100, + alpha=1.5): + """Generator for the reversing nlp-like task on sequences of symbols. + + The length of the sequence is drawn from a Gaussian(Normal) distribution + at random from [1, max_length] and with std deviation of 1%, + then symbols are drawn from Zipf's law at random from [0, nbr_symbols) until nbr_cases sequences have been produced. Args: - nbr_symbols: number of symbols to use in each sequence. + nbr_symbols: integer, number of symbols. max_length: integer, maximum length of sequences to generate. nbr_cases: the number of cases to generate. + scale_std_dev: float, Normal distribution's standard deviation scale factor + used to draw the length of sequence. Default = 1% of the max_length. + alpha: float, Zipf's Law Distribution parameter. Default = 1.5. + Usually for modelling natural text distribution is in + the range [1.1-1.6]. Yields: A dictionary {"inputs": input-list, "targets": target-list} where target-list is input-list reversed. """ - for _ in xrange(nbr_cases): - l = np.random.randint(max_length) + 1 - inputs = [np.random.randint(nbr_symbols) + 1 for _ in xrange(l)] + std_dev = max_length / scale_std_dev + distr_map = zipf_distribution(nbr_symbols, alpha) + for _ in range(nbr_cases): + l = int(abs(np.random.normal(loc=max_length / 2, scale=std_dev)) + 1) + inputs = zipf_random_sample(distr_map, l) yield {"inputs": inputs, "targets": list(reversed(inputs))} +@registry.register_problem +class AlgorithmicReverseNlplike8k(AlgorithmicProblem): + """Problem spec for algorithmic nlp-like reversing task.""" + + @property + def num_symbols(self): + return 8000 + + def generator(self, nbr_symbols, max_length, nbr_cases): + return reverse_generator_nlplike(nbr_symbols, max_length, nbr_cases, 10, + 1.300) + + @property + def train_length(self): + return 70 + + @property + def dev_length(self): + return 70 + + +@registry.register_problem +class AlgorithmicReverseNlplike32k(AlgorithmicReverseNlplike8k): + """Problem spec for algorithmic nlp-like reversing task, 32k vocab.""" + + @property + def num_symbols(self): + return 32000 + + def generator(self, nbr_symbols, max_length, nbr_cases): + return reverse_generator_nlplike(nbr_symbols, max_length, nbr_cases, 10, + 1.050) + + def lower_endian_to_number(l, base): """Helper function: convert a list of digits in the given base to a number.""" return sum([d * (base**i) for i, d in enumerate(l)]) @@ -107,72 +345,222 @@ def random_number_lower_endian(length, base): """Helper function: generate a random number as a lower-endian digits list.""" if length == 1: # Last digit can be 0 only if length is 1. return [np.random.randint(base)] - prefix = [np.random.randint(base) for _ in xrange(length - 1)] + prefix = [np.random.randint(base) for _ in range(length - 1)] return prefix + [np.random.randint(base - 1) + 1] # Last digit is not 0. -def addition_generator(base, max_length, nbr_cases): - """Generator for the addition task. - - The length of each number is drawn uniformly at random from [1, max_length/2] - and then digits are drawn uniformly at random. The numbers are added and - separated by [base+1] in the input. Stops at nbr_cases. - - Args: - base: in which base are the numbers. - max_length: integer, maximum length of sequences to generate. - nbr_cases: the number of cases to generate. +@registry.register_problem +class AlgorithmicAdditionBinary40(AlgorithmicProblem): + """Problem spec for algorithmic binary addition task.""" + + @property + def num_symbols(self): + return 2 + + def generator(self, base, max_length, nbr_cases): # pylint: disable=arguments-differ + """Generator for the addition task. + + The length of each number is drawn uniformly at random in [1, max_length/2] + and then digits are drawn uniformly at random. The numbers are added and + separated by [base] in the input. Stops at nbr_cases. + + Args: + base: in which base are the numbers. + max_length: integer, maximum length of sequences to generate. + nbr_cases: the number of cases to generate. + + Yields: + A dictionary {"inputs": input-list, "targets": target-list} where + input-list are the 2 numbers and target-list is the result of adding them. + + Raises: + ValueError: if max_length is lower than 3. + """ + if max_length < 3: + raise ValueError("Maximum length must be at least 3.") + for _ in range(nbr_cases): + l1 = np.random.randint(max_length // 2) + 1 + l2 = np.random.randint(max_length - l1 - 1) + 1 + n1 = random_number_lower_endian(l1, base) + n2 = random_number_lower_endian(l2, base) + result = lower_endian_to_number(n1, base) + lower_endian_to_number( + n2, base) + inputs = n1 + [base] + n2 + targets = number_to_lower_endian(result, base) + yield {"inputs": inputs, "targets": targets} + + +@registry.register_problem +class AlgorithmicAdditionDecimal40(AlgorithmicAdditionBinary40): + """Problem spec for algorithmic decimal addition task.""" + + @property + def num_symbols(self): + return 10 + + +@registry.register_problem +class AlgorithmicMultiplicationBinary40(AlgorithmicProblem): + """Problem spec for algorithmic binary multiplication task.""" + + @property + def num_symbols(self): + return 2 + + def generator(self, base, max_length, nbr_cases): # pylint: disable=arguments-differ + """Generator for the multiplication task. + + The length of each number is drawn uniformly at random in [1, max_length/2] + and then digits are drawn uniformly at random. The numbers are multiplied + and separated by [base] in the input. Stops at nbr_cases. + + Args: + base: in which base are the numbers. + max_length: integer, maximum length of sequences to generate. + nbr_cases: the number of cases to generate. + + Yields: + A dictionary {"inputs": input-list, "targets": target-list} where + input-list are the 2 numbers and target-list is the result of multiplying + them. + + Raises: + ValueError: if max_length is lower than 3. + """ + if max_length < 3: + raise ValueError("Maximum length must be at least 3.") + for _ in range(nbr_cases): + l1 = np.random.randint(max_length // 2) + 1 + l2 = np.random.randint(max_length - l1 - 1) + 1 + n1 = random_number_lower_endian(l1, base) + n2 = random_number_lower_endian(l2, base) + result = lower_endian_to_number(n1, base) * lower_endian_to_number( + n2, base) + inputs = n1 + [base] + n2 + targets = number_to_lower_endian(result, base) + yield {"inputs": inputs, "targets": targets} + + +@registry.register_problem +class AlgorithmicMultiplicationDecimal40(AlgorithmicMultiplicationBinary40): + """Problem spec for algorithmic decimal multiplication task.""" + + @property + def num_symbols(self): + return 10 + + +@registry.register_problem +class AlgorithmicReverseBinary40Test(AlgorithmicReverseBinary40): + """Test Problem with tiny dataset.""" + + @property + def train_length(self): + return 10 + + @property + def dev_length(self): + return 10 + + @property + def train_size(self): + return 1000 + + @property + def dev_size(self): + return 100 + + @property + def num_shards(self): + return 1 + + +@registry.register_problem +class AlgorithmicSortProblem(AlgorithmicProblem): + """Problem spec for sorting numbers.""" + + @property + def num_symbols(self): + return max(self.train_length, self.dev_length) + + @property + def train_length(self): + return 10 + + @property + def dev_length(self): + return self.train_length * 2 + + @property + def unique(self): + """Unique numbers wo/ replacement or w/ replacement in sorting task.""" + return False + + def generator(self, nbr_symbols, max_length, nbr_cases): + """Generating for sorting task on sequence of symbols. + + The length of the sequence is drawn uniformly at random from [1, max_length] + and then symbols are drawn (uniquely w/ or w/o replacement) uniformly at + random from [0, nbr_symbols) until nbr_cases sequences have been produced. + + Args: + nbr_symbols: number of symbols to use in each sequence. + max_length: integer, maximum length of sequences to generate. + nbr_cases: the number of cases to generate. + + Yields: + A dictionary {"inputs": input-list, "targets": target-list} where + target-list is input-list sorted. + """ + for _ in range(nbr_cases): + # Sample the sequence length. + length = np.random.randint(max_length) + 1 + + if self.unique: + # Sample our inputs w/o replacement. + inputs = np.arange(nbr_symbols) + np.random.shuffle(inputs) + + # Truncate to the desired length. + inputs = inputs[:length] + inputs = list(inputs) + else: + inputs = list(np.random.randint(nbr_symbols, size=length)) + + # Targets are simply the sorted inputs. + targets = list(sorted(inputs)) + + yield {"inputs": inputs, "targets": targets} + + def eval_metrics(self): + defaults = super(AlgorithmicSortProblem, self).eval_metrics() + return defaults + [metrics.Metrics.EDIT_DISTANCE] - Yields: - A dictionary {"inputs": input-list, "targets": target-list} where - input-list are the 2 numbers and target-list is the result of adding them. - Raises: - ValueError: if max_length is lower than 3. - """ - if max_length < 3: - raise ValueError("Maximum length must be at least 3.") - for _ in xrange(nbr_cases): - l1 = np.random.randint(max_length // 2) + 1 - l2 = np.random.randint(max_length - l1 - 1) + 1 - n1 = random_number_lower_endian(l1, base) - n2 = random_number_lower_endian(l2, base) - result = lower_endian_to_number(n1, base) + lower_endian_to_number(n2, base) - # We shift digits by 1 on input and output to leave 0 for padding. - inputs = [i + 1 for i in n1] + [base + 1] + [i + 1 for i in n2] - targets = [i + 1 for i in number_to_lower_endian(result, base)] - yield {"inputs": inputs, "targets": targets} - - -def multiplication_generator(base, max_length, nbr_cases): - """Generator for the multiplication task. - - The length of each number is drawn uniformly at random from [1, max_length/2] - and then digits are drawn uniformly at random. The numbers are multiplied - and separated by [base+1] in the input. Stops at nbr_cases. +@registry.register_problem +class TinyAlgo(AlgorithmicIdentityBinary40): + """A small algorthmic problem for testing.""" - Args: - base: in which base are the numbers. - max_length: integer, maximum length of sequences to generate. - nbr_cases: the number of cases to generate. + def generate_data(self, data_dir, tmp_dir, task_id=-1): + """Ganerate data for this problem.""" - Yields: - A dictionary {"inputs": input-list, "targets": target-list} where - input-list are the 2 numbers and target-list is the result of multiplying - them. + del tmp_dir, task_id + identity_problem = AlgorithmicIdentityBinary40() + utils.generate_files( + identity_problem.generator(self.num_symbols, 40, 100000), + self.training_filepaths(data_dir, 1, shuffled=True), 100) + utils.generate_files( + identity_problem.generator(self.num_symbols, 400, 10000), + self.dev_filepaths(data_dir, 1, shuffled=True), 100) + + @classmethod + def setup_for_test(cls): + """Setup directories and files required to run the problem.""" + + tmp_dir = tf.test.get_temp_dir() + shutil.rmtree(tmp_dir) + os.mkdir(tmp_dir) + cls.data_dir = tmp_dir - Raises: - ValueError: if max_length is lower than 3. - """ - if max_length < 3: - raise ValueError("Maximum length must be at least 3.") - for _ in xrange(nbr_cases): - l1 = np.random.randint(max_length // 2) + 1 - l2 = np.random.randint(max_length - l1 - 1) + 1 - n1 = random_number_lower_endian(l1, base) - n2 = random_number_lower_endian(l2, base) - result = lower_endian_to_number(n1, base) * lower_endian_to_number(n2, base) - # We shift digits by 1 on input and output to leave 0 for padding. - inputs = [i + 1 for i in n1] + [base + 1] + [i + 1 for i in n2] - targets = [i + 1 for i in number_to_lower_endian(result, base)] - yield {"inputs": inputs, "targets": targets} + # Generate a small test dataset + cls().generate_data(TinyAlgo.data_dir, None) diff --git a/tensor2tensor/data_generators/algorithmic_math.py b/tensor2tensor/data_generators/algorithmic_math.py index 932c080e1..85b77e31c 100644 --- a/tensor2tensor/data_generators/algorithmic_math.py +++ b/tensor2tensor/data_generators/algorithmic_math.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +15,6 @@ """Algorithmic data generators for symbolic math tasks. -See go/symbolic-math-dataset """ from __future__ import absolute_import @@ -23,11 +23,8 @@ from collections import namedtuple import random - -# Dependency imports - import six -from six.moves import xrange # pylint: disable=redefined-builtin +from six.moves import range # pylint: disable=redefined-builtin import sympy @@ -161,8 +158,6 @@ def random_expr(depth, vlist, ops): def algebra_inverse_solve(left, right, var, solve_ops): """Solves for the value of the given var in an expression. - See go/symbolic-math-dataset. - Args: left: The root of the ExprNode tree on the left side of the equals sign. right: The root of the ExprNode tree on the right side of the equals sign. @@ -180,7 +175,7 @@ def algebra_inverse_solve(left, right, var, solve_ops): right- Expression on the right side of the op. to_tree- The tree on the other side of the equal sign. The canceled out expression will be moved here. - new_from_tree- The resuling from_tree after the algebraic + new_from_tree- The resulting from_tree after the algebraic manipulation. new_to_tree- The resulting to_tree after the algebraic manipulation. @@ -245,8 +240,6 @@ def generate_algebra_inverse_sample(vlist, ops, solve_ops, min_depth, Given an input equation and variable, produce the expression equal to the variable. - See go/symbolic-math-dataset. - Args: vlist: Variable list. List of chars that can be used in the expression. ops: List of ExprOp instances. The allowed operators for the expression. @@ -277,8 +270,7 @@ def generate_algebra_inverse_sample(vlist, ops, solve_ops, min_depth, left_str = str(left) right_str = str(right) target = str(algebra_inverse_solve(left, right, var, solve_ops)) - sample = var + ":" + left_str + "=" + right_str - + sample = "%s:%s=%s" % (var, left_str, right_str) return sample, target @@ -287,8 +279,6 @@ def generate_algebra_simplify_sample(vlist, ops, min_depth, max_depth): Given an input expression, produce the simplified expression. - See go/symbolic-math-dataset. - Args: vlist: Variable list. List of chars that can be used in the expression. ops: List of ExprOp instances. The allowed operators for the expression. @@ -315,8 +305,6 @@ def generate_calculus_integrate_sample(vlist, ops, min_depth, max_depth, Given an input expression, produce the indefinite integral. - See go/symbolic-math-dataset. - Args: vlist: Variable list. List of chars that can be used in the expression. ops: List of ExprOp instances. The allowed operators for the expression. @@ -348,15 +336,15 @@ def generate_calculus_integrate_sample(vlist, ops, min_depth, max_depth, # AlgebraConfig holds objects required to generate the algebra inverse -# dataset. See go/symbolic-math-dataset. +# dataset. # vlist: Variable list. A list of chars. # dlist: Numberical digit list. A list of chars. # flist: List of special function names. A list of chars. # functions: Dict of special function names. Maps human readable string names to # single char names used in flist. # ops: Dict mapping op symbols (chars) to ExprOp instances. -# solve_ops: Encodes rules for how to algebraicly cancel out each operation. See -# doc-string for `algebra_inverse_solve`. +# solve_ops: Encodes rules for how to algebraically cancel out each operation. +# See doc-string for `algebra_inverse_solve`. # int_encoder: Function that maps a string to a list of tokens. Use this to # encode an expression to feed into a model. # int_decoder: Function that maps a list of tokens to a string. Use this to @@ -370,14 +358,12 @@ def generate_calculus_integrate_sample(vlist, ops, min_depth, max_depth, def math_dataset_init(alphabet_size=26, digits=None, functions=None): """Initializes required objects to generate symbolic math datasets. - See go/symbolic-math-dataset. - Produces token set, ExprOp instances, solve_op dictionary, encoders, and decoders needed to generate the algebra inverse dataset. Args: alphabet_size: How many possible variables there are. Max 52. - digits: How many numerical digits to encode as tokens, "0" throuh + digits: How many numerical digits to encode as tokens, "0" through str(digits-1), or None to encode no digits. functions: Defines special functions. A dict mapping human readable string names, like "log", "exp", "sin", "cos", etc., to single chars. Each @@ -409,8 +395,8 @@ def math_dataset_init(alphabet_size=26, digits=None, functions=None): "/r": lambda l, r, to: (r, ExprNode(l, to, ops["/"])), } alphabet = ( - [six.int2byte(ord("a") + c) - for c in range(26)] + [six.int2byte(ord("A") + c) for c in range(26)]) + [six.int2byte(ord("a") + c).decode("utf-8") for c in range(26)] + + [six.int2byte(ord("A") + c).decode("utf-8") for c in range(26)]) if alphabet_size > 52: raise ValueError( "alphabet_size cannot be greater than 52. Got %s." % alphabet_size) @@ -421,7 +407,7 @@ def math_dataset_init(alphabet_size=26, digits=None, functions=None): raise ValueError("digits cannot must be between 1 and 10. Got %s." % digits) vlist = alphabet[:alphabet_size] if digits is not None: - dlist = [str(d) for d in xrange(digits)] + dlist = [str(d) for d in range(digits)] else: dlist = [] if functions is None: @@ -481,7 +467,7 @@ def algebra_inverse(alphabet_size=26, min_depth=0, max_depth=2, "Got max_depth=%s, min_depth=%s" % (max_depth, min_depth)) alg_cfg = math_dataset_init(alphabet_size) - for _ in xrange(nbr_cases): + for _ in range(nbr_cases): sample, target = generate_algebra_inverse_sample( alg_cfg.vlist, list(alg_cfg.ops.values()), alg_cfg.solve_ops, min_depth, max_depth) @@ -522,7 +508,7 @@ def algebra_simplify(alphabet_size=26, "Got max_depth=%s, min_depth=%s" % (max_depth, min_depth)) alg_cfg = math_dataset_init(alphabet_size, digits=5) - for _ in xrange(nbr_cases): + for _ in range(nbr_cases): sample, target = generate_algebra_simplify_sample( alg_cfg.vlist, list(alg_cfg.ops.values()), min_depth, max_depth) yield { @@ -570,11 +556,18 @@ def calculus_integrate(alphabet_size=26, functions = {"log": "L"} alg_cfg = math_dataset_init(alphabet_size, digits=5, functions=functions) - for _ in xrange(nbr_cases): - sample, target = generate_calculus_integrate_sample( - alg_cfg.vlist, - list(alg_cfg.ops.values()), min_depth, max_depth, alg_cfg.functions) - yield { - "inputs": alg_cfg.int_encoder(sample), - "targets": alg_cfg.int_encoder(target) - } + nbr_case = 0 + while nbr_case < nbr_cases: + try: + sample, target = generate_calculus_integrate_sample( + alg_cfg.vlist, + list(alg_cfg.ops.values()), min_depth, max_depth, alg_cfg.functions) + yield { + "inputs": alg_cfg.int_encoder(sample), + "targets": alg_cfg.int_encoder(target) + } + except: # pylint:disable=bare-except + continue + if nbr_case % 10000 == 0: + print(" calculus_integrate: generating case %d." % nbr_case) + nbr_case += 1 diff --git a/tensor2tensor/data_generators/algorithmic_math_deepmind.py b/tensor2tensor/data_generators/algorithmic_math_deepmind.py new file mode 100644 index 000000000..9069e6e00 --- /dev/null +++ b/tensor2tensor/data_generators/algorithmic_math_deepmind.py @@ -0,0 +1,104 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Data generators for the DeepMind Mathematics Dataset. + +See https://github.com/deepmind/mathematics_dataset for the original repository. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tarfile + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +_URL = "https://storage.cloud.google.com/mathematics-dataset/mathematics_dataset-v1.0.tar.gz" + + +@registry.register_problem +class AlgorithmicMathDeepmindAll(text_problems.Text2TextProblem): + """DeepMind Mathematics Problem, v1.0, all data.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 128, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def is_generate_per_split(self): + return True + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Downloads and extracts the dataset and generates examples. + + Args: + data_dir: The base directory where data and vocab files are stored. + tmp_dir: temp directory to download and extract the dataset. + dataset_split: split of the data-set. + + Yields: + The data examples. + """ + # Create directories if needed. + if not tf.gfile.Exists(tmp_dir): + tf.gfile.MakeDirs(tmp_dir) + if not tf.gfile.Exists(data_dir): + tf.gfile.MakeDirs(data_dir) + + # Download and extract the data. + filename = os.path.basename(_URL) + path = generator_utils.maybe_download(tmp_dir, filename, _URL) + tarfile.open(path, "r:gz").extractall(tmp_dir) + + # Create the list of directories with data files. + train_dirs = ["v1.0/train-easy", "v1.0/train-medium", "v1.0/train-hard"] + eval_dirs = ["v1.0/interpolate", "v1.0/extrapolate"] + dirs = eval_dirs + if dataset_split == problem.DatasetSplit.TRAIN: + dirs = train_dirs + dirs = [os.path.join(tmp_dir, d) for d in dirs] + + # Iterate over directories and files generating examples. + for d in dirs: + files = tf.gfile.Glob(d + "/*.txt") + for fname in files: + # In each text file, the first line is the input, the next the answer, + # and so on until the end of the file. + cur_input = None + with tf.gfile.Open(fname, "rb") as f: + for line in f: + if cur_input is None: + cur_input = line.strip() + else: + yield {"inputs": cur_input, "targets": line.strip()} + cur_input = None diff --git a/tensor2tensor/data_generators/algorithmic_math_test.py b/tensor2tensor/data_generators/algorithmic_math_test.py index 6c4b63054..e1583f3f4 100644 --- a/tensor2tensor/data_generators/algorithmic_math_test.py +++ b/tensor2tensor/data_generators/algorithmic_math_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,18 +14,16 @@ # limitations under the License. """Tests for tensor2tensor.data_generators.algorithmic_math.""" +# TODO(rsepassi): This test is flaky. Disable, remove, or update. from __future__ import absolute_import from __future__ import division from __future__ import print_function - -# Dependency imports - import six import sympy from tensor2tensor.data_generators import algorithmic_math -import tensorflow as tf +import tensorflow.compat.v1 as tf class AlgorithmicMathTest(tf.test.TestCase): diff --git a/tensor2tensor/data_generators/algorithmic_math_two_variables.py b/tensor2tensor/data_generators/algorithmic_math_two_variables.py new file mode 100644 index 000000000..594ef06dc --- /dev/null +++ b/tensor2tensor/data_generators/algorithmic_math_two_variables.py @@ -0,0 +1,133 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Data generators for the Mathematical Language Understanding dataset. + +The training and test data were generated by assigning symbolic variables +either positive or negative decimal integers and then describing the algebraic +operation to perform. We restrict our variable assignments to the range +x,y->[-1000,1000) and the operations to the set {+,-,*}. To ensure that the +model embraces symbolic variables, the order in which x and y appears in the +expression is randomly chosen. For instance, an input string contrasting from +the example shown above might be y=129,x=531,x-y. Each input string is +accompanied by its target string, which is the evaluation of the mathematical +expression. For this study, all targets considered are decimal integers +represented at the character level. About 12 million unique samples were thus +generated and randomly split into training and test sets at an approximate +ratio of 9:1, respectively. + +Example lines from training file: +y=691,x=-999,y*x:-690309 +y=210,x=-995,y+x:-785 +x=-995,y=210,x*x:990025 + +For more information check the following paper: +Artit Wangperawong. Attending to Mathematical Language with Transformers, +arXiv:1812.02825 (https://arxiv.org/abs/1812.02825). +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tarfile +import requests + +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +_URL = ("https://art.wangperawong.com/mathematical_language_understanding" + "_train.tar.gz") + + +def _download_mlu_data(tmp_dir, data_dir): + """Downloads and extracts the dataset. + + Args: + tmp_dir: temp directory to download and extract the dataset + data_dir: The base directory where data and vocab files are stored. + + Returns: + tmp_dir: temp directory containing the raw data. + """ + if not tf.gfile.Exists(data_dir): + tf.gfile.MakeDirs(data_dir) + + filename = os.path.basename(_URL) + file_path = os.path.join(tmp_dir, filename) + headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) " + "AppleWebKit/537.36 (KHTML, like Gecko) " + "Chrome/63.0.3239.132 Safari/537.36"} + resp = requests.get(_URL, headers=headers) + with open(file_path, "wb") as f: + f.write(resp.content) + + with tarfile.open(file_path, "r:gz") as tar: + tar.extractall(tmp_dir) + + return tmp_dir + + +@registry.register_problem +class AlgorithmicMathTwoVariables(text_problems.Text2TextProblem): + """Mathematical language understanding, see arxiv.org/abs/1812.02825.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 10, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def is_generate_per_split(self): + return False + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Downloads and extracts the dataset and generates examples. + + Args: + data_dir: The base directory where data and vocab files are stored. + tmp_dir: temp directory to download and extract the dataset. + dataset_split: split of the data-set. + + Yields: + The data examples. + """ + if not tf.gfile.Exists(tmp_dir): + tf.gfile.MakeDirs(tmp_dir) + + if not tf.gfile.Exists(data_dir): + tf.gfile.MakeDirs(data_dir) + + # Download and extract. + download_path = _download_mlu_data(tmp_dir, data_dir) + filepath = os.path.join(download_path, "symbolic_math_train.txt") + with open(filepath, "r") as fp: + for l in fp: + prob, ans = l.strip().split(":") + yield {"inputs": prob, "targets": ans} diff --git a/tensor2tensor/data_generators/algorithmic_test.py b/tensor2tensor/data_generators/algorithmic_test.py index 7bc2fb5bb..28bc21923 100644 --- a/tensor2tensor/data_generators/algorithmic_test.py +++ b/tensor2tensor/data_generators/algorithmic_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,26 +18,42 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function - -# Dependency imports +from six.moves import range # pylint: disable=redefined-builtin from tensor2tensor.data_generators import algorithmic -import tensorflow as tf +import tensorflow.compat.v1 as tf class AlgorithmicTest(tf.test.TestCase): def testIdentityGenerator(self): + identity_problem = algorithmic.AlgorithmicIdentityBinary40() counter = 0 - for d in algorithmic.identity_generator(3, 8, 10): + for d in identity_problem.generator(3, 8, 10): counter += 1 self.assertEqual(d["inputs"], d["targets"]) self.assertEqual(counter, 10) def testReverseGenerator(self): + reversing_problem = algorithmic.AlgorithmicReverseBinary40() counter = 0 - for d in algorithmic.reverse_generator(3, 8, 10): + for d in reversing_problem.generator(3, 8, 10): + counter += 1 + self.assertEqual(list(reversed(d["inputs"])), d["targets"]) + self.assertEqual(counter, 10) + + def testZipfDistribution(self): + # Following Zipf's Law with alpha equals 1: the first in rank is two times + # more probable/frequent that the second in rank, three times more prob/freq + # that the third in rank and so on. + d = algorithmic.zipf_distribution(10, 1.0001) + for i in range(len(d[1:])-1): + self.assertEqual("%.4f" % (abs(d[i+1]-d[i+2])*(i+2)), "%.4f" % d[1]) + + def testReverseGeneratorNlpLike(self): + counter = 0 + for d in algorithmic.reverse_generator_nlplike(3, 8, 10): counter += 1 self.assertEqual(list(reversed(d["inputs"])), d["targets"]) self.assertEqual(counter, 10) @@ -60,23 +77,33 @@ def testNumberToLowerEndian(self): self.assertEqual(algorithmic.number_to_lower_endian(2137, 10), [7, 3, 1, 2]) def testAdditionGenerator(self): + addition_problem = algorithmic.AlgorithmicAdditionBinary40() counter = 0 - for d in algorithmic.addition_generator(4, 8, 10): + for d in addition_problem.generator(4, 8, 10): counter += 1 - self.assertEqual(d["inputs"].count(5), 1) - self.assertEqual(d["inputs"].count(0), 0) + self.assertEqual(d["inputs"].count(4), 1) + self.assertEqual(d["inputs"].count(5), 0) + self.assertEqual(d["targets"].count(4), 0) self.assertEqual(d["targets"].count(5), 0) - self.assertEqual(d["targets"].count(0), 0) self.assertEqual(counter, 10) def testMultiplicationGenerator(self): + multiplication_problem = algorithmic.AlgorithmicMultiplicationBinary40() counter = 0 - for d in algorithmic.multiplication_generator(4, 8, 10): + for d in multiplication_problem.generator(4, 8, 10): counter += 1 - self.assertEqual(d["inputs"].count(5), 1) - self.assertEqual(d["inputs"].count(0), 0) + self.assertEqual(d["inputs"].count(4), 1) + self.assertEqual(d["inputs"].count(5), 0) + self.assertEqual(d["targets"].count(4), 0) self.assertEqual(d["targets"].count(5), 0) - self.assertEqual(d["targets"].count(0), 0) + self.assertEqual(counter, 10) + + def testSortGenerator(self): + sort_problem = algorithmic.AlgorithmicSortProblem() + counter = 0 + for d in sort_problem.generator(10, 10, 10): + counter += 1 + self.assertEqual(list(sorted(d["inputs"])), d["targets"]) self.assertEqual(counter, 10) diff --git a/tensor2tensor/data_generators/all_problems.py b/tensor2tensor/data_generators/all_problems.py new file mode 100644 index 000000000..253a8e331 --- /dev/null +++ b/tensor2tensor/data_generators/all_problems.py @@ -0,0 +1,147 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Imports for problem modules.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import importlib +import six +from six.moves import range # pylint: disable=redefined-builtin + +MODULES = [ + "tensor2tensor.data_generators.algorithmic", + "tensor2tensor.data_generators.algorithmic_math", + "tensor2tensor.data_generators.algorithmic_math_deepmind", + "tensor2tensor.data_generators.algorithmic_math_two_variables", + "tensor2tensor.data_generators.allen_brain", + "tensor2tensor.data_generators.audio", + "tensor2tensor.data_generators.babi_qa", + "tensor2tensor.data_generators.bair_robot_pushing", + "tensor2tensor.data_generators.celeba", + "tensor2tensor.data_generators.celebahq", + "tensor2tensor.data_generators.cifar", + "tensor2tensor.data_generators.cipher", + "tensor2tensor.data_generators.cnn_dailymail", + "tensor2tensor.data_generators.cola", + "tensor2tensor.data_generators.common_voice", + "tensor2tensor.data_generators.desc2code", + "tensor2tensor.data_generators.dialog_cornell", + "tensor2tensor.data_generators.dialog_dailydialog", + "tensor2tensor.data_generators.dialog_opensubtitles", + "tensor2tensor.data_generators.dialog_personachat", + "tensor2tensor.data_generators.enwik8", + "tensor2tensor.data_generators.fsns", + "tensor2tensor.data_generators.function_docstring", + "tensor2tensor.data_generators.gene_expression", + "tensor2tensor.data_generators.google_robot_pushing", + "tensor2tensor.data_generators.gym_env", + "tensor2tensor.data_generators.ice_parsing", + "tensor2tensor.data_generators.imagenet", + "tensor2tensor.data_generators.image_lsun", + "tensor2tensor.data_generators.imdb", + "tensor2tensor.data_generators.lambada", + "tensor2tensor.data_generators.librispeech", + "tensor2tensor.data_generators.lm1b", + "tensor2tensor.data_generators.lm1b_imdb", + "tensor2tensor.data_generators.lm1b_mnli", + "tensor2tensor.data_generators.mnist", + "tensor2tensor.data_generators.moving_mnist", + "tensor2tensor.data_generators.mrpc", + "tensor2tensor.data_generators.mscoco", + "tensor2tensor.data_generators.multinli", + "tensor2tensor.data_generators.paraphrase_ms_coco", + "tensor2tensor.data_generators.program_search", + "tensor2tensor.data_generators.ocr", + "tensor2tensor.data_generators.pointer_generator_word", + "tensor2tensor.data_generators.problem_hparams", + "tensor2tensor.data_generators.ptb", + "tensor2tensor.data_generators.qnli", + "tensor2tensor.data_generators.quora_qpairs", + "tensor2tensor.data_generators.rte", + "tensor2tensor.data_generators.scitail", + "tensor2tensor.data_generators.seq2edits", + "tensor2tensor.data_generators.snli", + "tensor2tensor.data_generators.stanford_nli", + "tensor2tensor.data_generators.style_transfer", + "tensor2tensor.data_generators.squad", + "tensor2tensor.data_generators.sst_binary", + "tensor2tensor.data_generators.subject_verb_agreement", + "tensor2tensor.data_generators.timeseries", + "tensor2tensor.data_generators.transduction_problems", + "tensor2tensor.data_generators.translate_encs_cubbitt", + "tensor2tensor.data_generators.translate_encs", + "tensor2tensor.data_generators.translate_ende", + "tensor2tensor.data_generators.translate_enes", + "tensor2tensor.data_generators.translate_enet", + "tensor2tensor.data_generators.translate_enfr", + "tensor2tensor.data_generators.translate_enid", + "tensor2tensor.data_generators.translate_enmk", + "tensor2tensor.data_generators.translate_envi", + "tensor2tensor.data_generators.translate_enzh", + "tensor2tensor.data_generators.video_generated", + "tensor2tensor.data_generators.vqa", + "tensor2tensor.data_generators.wiki", + "tensor2tensor.data_generators.wiki_lm", + "tensor2tensor.data_generators.wiki_revision", + "tensor2tensor.data_generators.wiki_multi_problems", + "tensor2tensor.data_generators.wikisum.wikisum", + "tensor2tensor.data_generators.wikitext103", + "tensor2tensor.data_generators.wsj_parsing", + "tensor2tensor.data_generators.wnli", + "tensor2tensor.data_generators.yelp_polarity", + "tensor2tensor.data_generators.yelp_full", + "tensor2tensor.envs.mujoco_problems", + "tensor2tensor.envs.tic_tac_toe_env_problem", +] +ALL_MODULES = list(MODULES) + + + +def _is_import_err_msg(err_str, module): + parts = module.split(".") + suffixes = [".".join(parts[i:]) for i in range(len(parts))] + prefixes = [".".join(parts[:i]) for i in range(len(parts))] + return err_str in (["No module named %s" % suffix for suffix in suffixes] + + ["No module named '%s'" % suffix for suffix in suffixes] + + ["No module named %s" % prefix for prefix in prefixes] + + ["No module named '%s'" % prefix for prefix in prefixes]) + + +def _handle_errors(errors): + """Log out and possibly reraise errors during import.""" + if not errors: + return + log_all = True # pylint: disable=unused-variable + err_msg = "T2T: skipped importing {num_missing} data_generators modules." + print(err_msg.format(num_missing=len(errors))) + for module, err in errors: + err_str = str(err) + if log_all: + print("Did not import module: %s; Cause: %s" % (module, err_str)) + if not _is_import_err_msg(err_str, module): + print("From module %s" % module) + raise err + + +def import_modules(modules): + errors = [] + for module in modules: + try: + importlib.import_module(module) + except ImportError as error: + errors.append((module, error)) + _handle_errors(errors) diff --git a/tensor2tensor/data_generators/allen_brain.py b/tensor2tensor/data_generators/allen_brain.py new file mode 100644 index 000000000..cc05b4599 --- /dev/null +++ b/tensor2tensor/data_generators/allen_brain.py @@ -0,0 +1,445 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Problem definitions for Allen Brain Atlas problems. + +Notes: + + * TODO(cwbeitel): Want to be able to increase up-sampling ratio and/or + in-paint fraction over the course of training. This could be done by + defining a range of problems or perhaps more aptly with an hparam + that is dialed up depending on training performance. + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from io import BytesIO +import math +import os + +import numpy as np +import requests + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import image_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.layers import modalities +from tensor2tensor.utils import contrib +from tensor2tensor.utils import metrics +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +_BASE_EXAMPLE_IMAGE_SIZE = 64 + + +# A 100 image random subset of non-failed acquisitions of Mouse imaging +# products from Allen Brain Institute (api.brain-map.org) dataset. The +# full set (or a desired subset) of image IDs can be obtained following +# the steps described here: http://help.brain-map.org/display/api, +# e.g. https://gist.github.com/cwbeitel/5dffe90eb561637e35cdf6aa4ee3e704 +_IMAGE_IDS = [ + "74887117", "71894997", "69443979", "79853548", "101371232", "77857182", + "70446772", "68994990", "69141561", "70942310", "70942316", "68298378", + "69690156", "74364867", "77874134", "75925043", "73854431", "69206601", + "71771457", "101311379", "74777533", "70960269", "71604493", "102216720", + "74776437", "75488723", "79815814", "77857132", "77857138", "74952778", + "69068486", "648167", "75703410", "74486118", "77857098", "637407", + "67849516", "69785503", "71547630", "69068504", "69184074", "74853078", + "74890694", "74890698", "75488687", "71138602", "71652378", "68079764", + "70619061", "68280153", "73527042", "69764608", "68399025", "244297", + "69902658", "68234159", "71495521", "74488395", "73923026", "68280155", + "75488747", "69589140", "71342189", "75119214", "79455452", "71774294", + "74364957", "68031779", "71389422", "67937572", "69912671", "73854471", + "75008183", "101371376", "75703290", "69533924", "79853544", "77343882", + "74887133", "332587", "69758622", "69618413", "77929999", "244293", + "334792", "75825136", "75008103", "70196678", "71883965", "74486130", + "74693566", "76107119", "76043858", "70252433", "68928364", "74806345", + "67848661", "75900326", "71773690", "75008171"] + + +def PIL_Image(): # pylint: disable=invalid-name + from PIL import Image # pylint: disable=g-import-not-at-top + return Image + + +def _get_case_file_paths(tmp_dir, case, training_fraction=0.95): + """Obtain a list of image paths corresponding to training or eval case. + + Args: + tmp_dir: str, the root path to which raw images were written, at the + top level having meta/ and raw/ subdirs. + case: bool, whether obtaining file paths for training (true) or eval + (false). + training_fraction: float, the fraction of the sub-image path list to + consider as the basis for training examples. + + Returns: + list: A list of file paths. + + Raises: + ValueError: if images not found in tmp_dir, or if training_fraction would + leave no examples for eval. + """ + + paths = tf.gfile.Glob("%s/*.jpg" % tmp_dir) + + if not paths: + raise ValueError("Search of tmp_dir (%s) " % tmp_dir, + "for subimage paths yielded an empty list, ", + "can't proceed with returning training/eval split.") + + split_index = int(math.floor(len(paths)*training_fraction)) + + if split_index >= len(paths): + raise ValueError("For a path list of size %s " + "and a training_fraction of %s " + "the resulting split_index of the paths list, " + "%s, would leave no elements for the eval " + "condition." % (len(paths), + training_fraction, + split_index)) + + if case: + return paths[:split_index] + else: + return paths[split_index:] + + +def maybe_download_image_dataset(image_ids, target_dir): + """Download a set of images from api.brain-map.org to `target_dir`. + + Args: + image_ids: list, a list of image ids. + target_dir: str, a directory to which to download the images. + """ + + tf.gfile.MakeDirs(target_dir) + + num_images = len(image_ids) + + for i, image_id in enumerate(image_ids): + + destination = os.path.join(target_dir, "%s.jpg" % i) + tmp_destination = "%s.temp" % destination + + source_url = ("http://api.brain-map.org/api/v2/" + "section_image_download/%s" % image_id) + + if tf.gfile.Exists(destination): + tf.logging.info("Image with ID already present, " + "skipping download (%s of %s)." % ( + i+1, num_images + )) + continue + + tf.logging.info("Downloading image with id %s (%s of %s)" % ( + image_id, i+1, num_images + )) + + response = requests.get(source_url, stream=True) + + response.raise_for_status() + + with tf.gfile.Open(tmp_destination, "w") as f: + for block in response.iter_content(1024): + f.write(block) + + tf.gfile.Rename(tmp_destination, destination) + + +def random_square_mask(shape, fraction): + """Create a numpy array with specified shape and masked fraction. + + Args: + shape: tuple, shape of the mask to create. + fraction: float, fraction of the mask area to populate with `mask_scalar`. + + Returns: + numpy.array: A numpy array storing the mask. + """ + + mask = np.ones(shape) + + patch_area = shape[0]*shape[1]*fraction + patch_dim = int(math.floor(math.sqrt(patch_area))) + if patch_area == 0 or patch_dim == 0: + return mask + + x = np.random.randint(shape[0] - patch_dim) + y = np.random.randint(shape[1] - patch_dim) + + mask[x:(x + patch_dim), y:(y + patch_dim), :] = 0 + + return mask + + +def _generator(tmp_dir, training, size=_BASE_EXAMPLE_IMAGE_SIZE, + training_fraction=0.95): + """Base problem example generator for Allen Brain Atlas problems. + + Args: + + tmp_dir: str, a directory where raw example input data has been stored. + training: bool, whether the mode of operation is training (or, + alternatively, evaluation), determining whether examples in tmp_dir + prefixed with train or dev will be used. + size: int, the image size to add to the example annotation. + training_fraction: float, the fraction of the sub-image path list to + consider as the basis for training examples. + + Yields: + A dictionary representing the images with the following fields: + * image/encoded: The string encoding the image as JPEG. + * image/format: The string "jpeg" indicating the image format. + * image/height: The integer indicating the image height. + * image/width: The integer indicating the image height. + + """ + + maybe_download_image_dataset(_IMAGE_IDS, tmp_dir) + + image_files = _get_case_file_paths(tmp_dir=tmp_dir, + case=training, + training_fraction=training_fraction) + + image_obj = PIL_Image() + + tf.logging.info("Loaded case file paths (n=%s)" % len(image_files)) + height = size + width = size + + for input_path in image_files: + + img = image_obj.open(input_path) + img = np.float32(img) + shape = np.shape(img) + + for h_index in range(0, int(math.floor(shape[0]/size))): + + h_offset = h_index * size + h_end = h_offset + size - 1 + + for v_index in range(0, int(math.floor(shape[1]/size))): + + v_offset = v_index * size + v_end = v_offset + size - 1 + + # Extract a sub-image tile. + subimage = np.uint8(img[h_offset:h_end, v_offset:v_end]) # pylint: disable=invalid-sequence-index + + # Filter images that are likely background (not tissue). + if np.amax(subimage) < 230: + continue + + subimage = image_obj.fromarray(subimage) + buff = BytesIO() + subimage.save(buff, format="JPEG") + subimage_encoded = buff.getvalue() + + yield { + "image/encoded": [subimage_encoded], + "image/format": ["jpeg"], + "image/height": [height], + "image/width": [width] + } + + +@registry.register_problem +class Img2imgAllenBrain(problem.Problem): + """Allen Brain Atlas histology dataset. + + See also: http://help.brain-map.org/ + + Notes: + + * 64px to 64px identity mapping problem, no in-painting. + + """ + + @property + def train_shards(self): + return 100 + + @property + def dev_shards(self): + return 10 + + @property + def training_fraction(self): + return 0.95 + + @property + def num_channels(self): + """Number of color channels.""" + return 3 + + @property + def input_dim(self): + """The x and y dimension of the input image.""" + # By default, there is no input image, only a target. + return 64 + + @property + def output_dim(self): + """The x and y dimension of the target image.""" + return 64 + + @property + def inpaint_fraction(self): + """The fraction of the input image to be in-painted.""" + # By default, no in-painting is performed. + return None + + def preprocess_example(self, example, mode, hparams): + + # Crop to target shape instead of down-sampling target, leaving target + # of maximum available resolution. + target_shape = (self.output_dim, self.output_dim, self.num_channels) + example["targets"] = tf.random_crop(example["targets"], target_shape) + + example["inputs"] = image_utils.resize_by_area(example["targets"], + self.input_dim) + + if self.inpaint_fraction is not None and self.inpaint_fraction > 0: + + mask = random_square_mask((self.input_dim, + self.input_dim, + self.num_channels), + self.inpaint_fraction) + + example["inputs"] = tf.multiply( + tf.convert_to_tensor(mask, dtype=tf.int64), + example["inputs"]) + + if self.input_dim is None: + raise ValueError("Cannot train in-painting for examples with " + "only targets (i.e. input_dim is None, " + "implying there are only targets to be " + "generated).") + + return example + + def feature_encoders(self, data_dir): + del data_dir + return { + "inputs": text_encoder.ImageEncoder(channels=self.num_channels), + "targets": text_encoder.ImageEncoder(channels=self.num_channels) + } + + def example_reading_spec(self): + data_fields = { + "image/encoded": tf.FixedLenFeature((), tf.string), + "image/format": tf.FixedLenFeature((), tf.string), + } + + data_items_to_decoders = { + "targets": + contrib.slim().tfexample_decoder.Image( + image_key="image/encoded", + format_key="image/format", + channels=self.num_channels), + } + + return data_fields, data_items_to_decoders + + def eval_metrics(self): + eval_metrics = [ + metrics.Metrics.ACC, + metrics.Metrics.ACC_PER_SEQ, + metrics.Metrics.NEG_LOG_PERPLEXITY + ] + return eval_metrics + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + generator_utils.generate_dataset_and_shuffle( + self.generator(tmp_dir, True), + self.training_filepaths(data_dir, self.train_shards, shuffled=True), + self.generator(tmp_dir, False), + self.dev_filepaths(data_dir, self.dev_shards, shuffled=True)) + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = {"inputs": modalities.ModalityType.IDENTITY, + "targets": modalities.ModalityType.IDENTITY} + p.vocab_size = {"inputs": 256, + "targets": 256} + p.batch_size_multiplier = 256 + p.input_space_id = problem.SpaceID.IMAGE + p.target_space_id = problem.SpaceID.IMAGE + + def generator(self, tmp_dir, is_training): + if is_training: + return _generator(tmp_dir, True, size=_BASE_EXAMPLE_IMAGE_SIZE, + training_fraction=self.training_fraction) + else: + return _generator(tmp_dir, False, size=_BASE_EXAMPLE_IMAGE_SIZE, + training_fraction=self.training_fraction) + + +@registry.register_problem +class Img2imgAllenBrainDim48to64(Img2imgAllenBrain): + """48px to 64px resolution up-sampling problem.""" + + def dataset_filename(self): + return "img2img_allen_brain" # Reuse base problem data + + @property + def input_dim(self): + return 48 + + @property + def output_dim(self): + return 64 + + +@registry.register_problem +class Img2imgAllenBrainDim8to32(Img2imgAllenBrain): + """8px to 32px resolution up-sampling problem.""" + + def dataset_filename(self): + return "img2img_allen_brain" # Reuse base problem data + + @property + def input_dim(self): + return 8 + + @property + def output_dim(self): + return 32 + + +@registry.register_problem +class Img2imgAllenBrainDim16to16Paint1(Img2imgAllenBrain): + """In-painting problem (1%) with no resolution upsampling.""" + + def dataset_filename(self): + return "img2img_allen_brain" # Reuse base problem data + + @property + def input_dim(self): + return 16 + + @property + def output_dim(self): + return 16 + + @property + def inpaint_fraction(self): + return 0.01 diff --git a/tensor2tensor/data_generators/allen_brain_test.py b/tensor2tensor/data_generators/allen_brain_test.py new file mode 100644 index 000000000..90f87741b --- /dev/null +++ b/tensor2tensor/data_generators/allen_brain_test.py @@ -0,0 +1,291 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests of the Allen Brain Atlas problems.""" + +import os +import shutil +import tempfile + +import numpy as np + +from tensor2tensor.data_generators import allen_brain +from tensor2tensor.models import image_transformer_2d +from tensor2tensor.utils import contrib + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +tfe = contrib.eager() +tfe.enable_eager_execution() +Modes = tf_estimator.ModeKeys # pylint: disable=invalid-name + + +def mock_raw_image(x_dim=1024, y_dim=1024, num_channels=3, + output_path=None, write_image=True): + """Generate random `x_dim` by `y_dim`, optionally to `output_path`. + + Args: + x_dim: int, the x dimension of generated raw image. + y_dim: int, the x dimension of generated raw image. + num_channels: int, number of channels in image. + output_path: str, path to which to write image. + write_image: bool, whether to write the image to output_path. + + Returns: + numpy.array: The random `x_dim` by `y_dim` image (i.e. array). + """ + + rand_shape = (x_dim, y_dim, num_channels) + + if num_channels != 3: + raise NotImplementedError("mock_raw_image for channels != 3 not yet " + "implemented.") + + img = np.random.random(rand_shape) + img = np.uint8(img*255) + + if write_image: + image_obj = allen_brain.PIL_Image() + pil_img = image_obj.fromarray(img, mode="RGB") + with tf.gfile.Open(output_path, "w") as f: + pil_img.save(f, "jpeg") + + return img + + +def mock_raw_data(tmp_dir, raw_dim=1024, num_channels=3, num_images=1): + """Mock a raw data download directory with meta and raw subdirs. + + Notes: + + * This utility is shared by tests in both allen_brain_utils and + allen_brain so kept here instead of in one of *_test. + + Args: + tmp_dir: str, temporary dir in which to mock data. + raw_dim: int, the x and y dimension of generated raw imgs. + num_channels: int, number of channels in image. + num_images: int, number of images to mock. + """ + + tf.gfile.MakeDirs(tmp_dir) + + for image_id in range(num_images): + + raw_image_path = os.path.join(tmp_dir, "%s.jpg" % image_id) + + mock_raw_image(x_dim=raw_dim, y_dim=raw_dim, + num_channels=num_channels, + output_path=raw_image_path) + + +class TemporaryDirectory(object): + """For py2 support of `with tempfile.TemporaryDirectory() as name:`""" + + def __enter__(self): + self.name = tempfile.mkdtemp() + return self.name + + def __exit__(self, exc_type, exc_value, traceback): + shutil.rmtree(self.name) + + +class TestAllenBrain(tf.test.TestCase): + """Tests that are common to all Allen Brain Atlas problems.""" + + def setUp(self): + + self.all_problems = [ + allen_brain.Img2imgAllenBrainDim16to16Paint1 + ] + + def test_generator_produces_examples(self): + """Basic test that the generator produces examples with expected keys.""" + + for is_training in [True, False]: + with TemporaryDirectory() as tmp_dir: + mock_raw_data(tmp_dir, raw_dim=256, num_images=100) + for example in allen_brain._generator(tmp_dir, is_training): + for key in ["image/encoded", "image/format", + "image/height", "image/width"]: + self.assertTrue(key in example.keys()) + + def test_generate_data_produces_examples_of_correct_shape(self): + """Test examples have correct input and output shapes. + + Notes: + + * Loops over all AllenBrainImage2image* problems. + + """ + + with TemporaryDirectory() as tmp_dir: + mock_raw_data(tmp_dir, raw_dim=256, num_images=100) + with TemporaryDirectory() as data_dir: + for problem_obj in self.all_problems: + problem_object = problem_obj() + + problem_object.generate_data(data_dir, tmp_dir) + + for mode in [Modes.TRAIN, Modes.EVAL]: + + dataset = problem_object.dataset(mode, data_dir) + example = tfe.Iterator(dataset).next() + + num_channels = problem_object.num_channels + + # Check that the input tensor has the right shape + input_dim = problem_object.input_dim + self.assertEqual(example["inputs"].numpy().shape, + (input_dim, input_dim, num_channels)) + + # Check that the targets tensor has the right shape + output_dim = problem_object.output_dim + self.assertEqual(example["targets"].numpy().shape, + (output_dim, output_dim, num_channels)) + + def test_transformer2d_single_step_e2e(self): + """Minimal end-to-end test of training and eval on allen_brain_image2image. + + Notes: + + * Runs problem generate_data + + * Runs a single step of training + + * Runs model in eval mode to obtain a prediction and confirms the + resulting shape. + + * TODO: Running this in predict mode crashes in my environment. + Separately have seen predict mode not produce the right shape + output tensors, as if .infer is still a wip. + + """ + + problem_object = allen_brain.Img2imgAllenBrainDim8to32() + + with TemporaryDirectory() as tmp_dir: + + mock_raw_data(tmp_dir, raw_dim=256, num_images=100) + + with TemporaryDirectory() as data_dir: + + problem_object.generate_data(data_dir, tmp_dir) + + input_xy_dim = problem_object.input_dim + target_xy_dim = problem_object.output_dim + num_channels = problem_object.num_channels + + hparams = image_transformer_2d.img2img_transformer2d_tiny() + hparams.data_dir = data_dir + + p_hparams = problem_object.get_hparams(hparams) + + model = image_transformer_2d.Img2imgTransformer( + hparams, tf_estimator.ModeKeys.TRAIN, p_hparams + ) + + @tfe.implicit_value_and_gradients + def loss_fn(features): + _, losses = model(features) + return losses["training"] + + batch_size = 1 + train_dataset = problem_object.dataset(Modes.TRAIN, data_dir) + train_dataset = train_dataset.repeat(None).batch(batch_size) + + optimizer = tf.train.AdamOptimizer() + + example = tfe.Iterator(train_dataset).next() + example["targets"] = tf.reshape(example["targets"], + [batch_size, + target_xy_dim, + target_xy_dim, + num_channels]) + _, gv = loss_fn(example) + optimizer.apply_gradients(gv) + + model.set_mode(Modes.EVAL) + dataset = problem_object.dataset(Modes.EVAL, data_dir) + + example = tfe.Iterator(dataset).next() + example["inputs"] = tf.reshape(example["inputs"], + [1, + input_xy_dim, + input_xy_dim, + num_channels]) + example["targets"] = tf.reshape(example["targets"], + [1, + target_xy_dim, + target_xy_dim, + num_channels]) + + predictions, _ = model(example) + + self.assertEqual(predictions.numpy().shape, + (1, + target_xy_dim, + target_xy_dim, + num_channels, + 256)) + + +class TestImageMock(tf.test.TestCase): + """Tests of image mocking utility.""" + + def test_image_mock_produces_expected_shape(self): + """Test that the image mocking utility produces expected shape output.""" + + with TemporaryDirectory() as tmp_dir: + + cases = [ + { + "x_dim": 8, + "y_dim": 8, + "num_channels": 3, + "output_path": "/foo", + "write_image": True + } + ] + + for cid, case in enumerate(cases): + output_path = os.path.join(tmp_dir, "dummy%s.jpg" % cid) + img = mock_raw_image(x_dim=case["x_dim"], + y_dim=case["y_dim"], + num_channels=case["num_channels"], + output_path=output_path, + write_image=case["write_image"]) + + self.assertEqual(img.shape, (case["x_dim"], case["y_dim"], + case["num_channels"])) + if case["write_image"]: + self.assertTrue(tf.gfile.Exists(output_path)) + + +class TestMockRawData(tf.test.TestCase): + """Tests of raw data mocking utility.""" + + def test_runs(self): + """Test that data mocking utility runs for cases expected to succeed.""" + + with TemporaryDirectory() as tmp_dir: + + mock_raw_data(tmp_dir, raw_dim=256, num_channels=3, num_images=40) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/audio.py b/tensor2tensor/data_generators/audio.py index 12e0c7b43..0543a902c 100644 --- a/tensor2tensor/data_generators/audio.py +++ b/tensor2tensor/data_generators/audio.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,17 +19,12 @@ from __future__ import print_function import os -from subprocess import call +import subprocess import tarfile import wave +from absl import flags +import tensorflow.compat.v1 as tf -# Dependency imports - -from tensor2tensor.data_generators import generator_utils - -import tensorflow as tf - -flags = tf.flags FLAGS = flags.FLAGS flags.DEFINE_string("timit_paths", "", @@ -62,7 +58,7 @@ def _collect_data(directory, input_ext, target_ext): # if the datafile was "/path/to/datafile.wav" then the key would be # "/path/to/datafile" # value: a pair of strings (input_filepath, target_filepath) - data_files = dict() + data_files = {} for root, _, filenames in os.walk(directory): input_files = [filename for filename in filenames if input_ext in filename] for input_filename in input_files: @@ -80,7 +76,7 @@ def _get_audio_data(filepath): # Construct a true .wav file. out_filepath = filepath.strip(".WAV") + ".wav" # Assumes sox is installed on system. Sox converts from NIST SPHERE to WAV. - call(["sox", filepath, out_filepath]) + subprocess.call(["sox", filepath, out_filepath]) wav_file = wave.open(open(out_filepath)) frame_count = wav_file.getnframes() byte_array = wav_file.readframes(frame_count) @@ -97,7 +93,8 @@ def _get_text_data(filepath): return " ".join(words) -def timit_generator(tmp_dir, +def timit_generator(data_dir, + tmp_dir, training, how_many, start_from=0, @@ -107,6 +104,7 @@ def timit_generator(tmp_dir, """Data generator for TIMIT transcription problem. Args: + data_dir: path to the data directory. tmp_dir: path to temporary storage directory. training: a Boolean; if true, we use the train set, otherwise the test set. how_many: how many inputs and labels to generate. @@ -125,16 +123,22 @@ def timit_generator(tmp_dir, * audio/sample_width: an integer * targets: an integer sequence representing the encoded sentence """ + del data_dir eos_list = [1] if eos_list is None else eos_list if vocab_filename is not None: - vocab_symbolizer = generator_utils.get_or_generate_vocab( - tmp_dir, vocab_filename, vocab_size) + # TODO(lukaszkaiser): Correct this call to generate a vocabulary. No data + # sources are being passed. + # vocab_symbolizer = generator_utils.get_or_generate_vocab( + # data_dir, tmp_dir, vocab_filename, vocab_size) + del vocab_size + vocab_symbolizer = None + assert False _get_timit(tmp_dir) datasets = (_TIMIT_TRAIN_DATASETS if training else _TIMIT_TEST_DATASETS) i = 0 - for data_dir, (audio_ext, transcription_ext) in datasets: - data_dir = os.path.join(tmp_dir, data_dir) - data_files = _collect_data(data_dir, audio_ext, transcription_ext) + for timit_data_dir, (audio_ext, transcription_ext) in datasets: + timit_data_dir = os.path.join(tmp_dir, timit_data_dir) + data_files = _collect_data(timit_data_dir, audio_ext, transcription_ext) data_pairs = data_files.values() for input_file, target_file in sorted(data_pairs)[start_from:]: if i == how_many: diff --git a/tensor2tensor/data_generators/audio_encoder.py b/tensor2tensor/data_generators/audio_encoder.py new file mode 100644 index 000000000..1d8de1a05 --- /dev/null +++ b/tensor2tensor/data_generators/audio_encoder.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Encoder for audio data.""" + +import os +from subprocess import call +import tempfile +import numpy as np +from scipy.io import wavfile + + +class AudioEncoder(object): + """Encoder class for saving and loading waveforms.""" + + def __init__(self, num_reserved_ids=0, sample_rate=16000): + assert num_reserved_ids == 0 + self._sample_rate = sample_rate + + @property + def num_reserved_ids(self): + return 0 + + def encode(self, s): + """Transform a string with a filename into a list of float32. + + Args: + s: path to the file with a waveform. + + Returns: + samples: list of int16s + """ + def convert_to_wav(in_path, out_path, extra_args=None): + if not os.path.exists(out_path): + # TODO(dliebling) On Linux, check if libsox-fmt-mp3 is installed. + args = ["sox", "--rate", "16k", "--bits", "16", "--channel", "1"] + if extra_args: + args += extra_args + call(args + [in_path, out_path]) + + # Make sure that the data is a single channel, 16bit, 16kHz wave. + # TODO(chorowski): the directory may not be writable, this should fallback + # to a temp path, and provide instructions for installing sox. + if s.endswith(".mp3"): + out_filepath = s[:-4] + ".wav" + convert_to_wav(s, out_filepath, ["--guard"]) + s = out_filepath + elif not s.endswith(".wav"): + out_filepath = s + ".wav" + convert_to_wav(s, out_filepath) + s = out_filepath + rate, data = wavfile.read(s) + assert rate == self._sample_rate + assert len(data.shape) == 1 + if data.dtype not in [np.float32, np.float64]: + data = data.astype(np.float32) / np.iinfo(data.dtype).max + return data.tolist() + + def decode(self, ids): + """Transform a sequence of float32 into a waveform. + + Args: + ids: list of integers to be converted. + + Returns: + Path to the temporary file where the waveform was saved. + + Raises: + ValueError: if the ids are not of the appropriate size. + """ + _, tmp_file_path = tempfile.mkstemp() + wavfile.write(tmp_file_path, self._sample_rate, np.asarray(ids)) + return tmp_file_path + + def decode_list(self, ids): + """Transform a sequence of int ids into a wavform file. + + Args: + ids: list of integers to be converted. + + Returns: + Singleton list: path to the temporary file where the wavfile was saved. + """ + return [self.decode(ids)] + + @property + def vocab_size(self): + return 256 diff --git a/tensor2tensor/data_generators/audio_test.py b/tensor2tensor/data_generators/audio_test.py index f1830043f..adf86fbfb 100644 --- a/tensor2tensor/data_generators/audio_test.py +++ b/tensor2tensor/data_generators/audio_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,12 +21,9 @@ import io import os - -# Dependency imports - from tensor2tensor.data_generators import audio -import tensorflow as tf +import tensorflow.compat.v1 as tf class AudioTest(tf.test.TestCase): diff --git a/tensor2tensor/data_generators/babi_qa.py b/tensor2tensor/data_generators/babi_qa.py new file mode 100644 index 000000000..db5c8ef1f --- /dev/null +++ b/tensor2tensor/data_generators/babi_qa.py @@ -0,0 +1,542 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Data generators for bAbi question answering dataset. + + +The dataset consists of 20 tasks for testing text understanding and reasoning +in the bAbI project (https://research.fb.com/downloads/babi/). The aim is that +each task tests a unique aspect of text and reasoning, and hence test different +capabilities of learning models. For more information check the following paper: +Jason Weston, Antoine Bordes, Sumit Chopra and Tomas Mikolov. Towards AI +Complete Question Answering: A Set of Prerequisite Toy Tasks, arXiv:1502.05698. +Available at: http://arxiv.org/abs/1502.05698 + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import os +import shutil +import tarfile +import requests +import six + +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import tokenizer +from tensor2tensor.layers import modalities +from tensor2tensor.utils import metrics +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +_DIR_NAME = "tasks_1-20_v1-2" +_TAR = _DIR_NAME + ".tar.gz" +_URL = "http://www.thespermwhale.com/jaseweston/babi/" + _TAR + +_TASKS = { + "qa0": "qa0_all-tasks", + "qa1": "qa1_single-supporting-fact", + "qa2": "qa2_two-supporting-facts", + "qa3": "qa3_three-supporting-facts", + "qa4": "qa4_two-arg-relations", + "qa5": "qa5_three-arg-relations", + "qa6": "qa6_yes-no-questions", + "qa7": "qa7_counting", + "qa8": "qa8_lists-sets", + "qa9": "qa9_simple-negation", + "qa10": "qa10_indefinite-knowledge", + "qa11": "qa11_basic-coreference", + "qa12": "qa12_conjunction", + "qa13": "qa13_compound-coreference", + "qa14": "qa14_time-reasoning", + "qa15": "qa15_basic-deduction", + "qa16": "qa16_basic-induction", + "qa17": "qa17_positional-reasoning", + "qa18": "qa18_size-reasoning", + "qa19": "qa19_path-finding", + "qa20": "qa20_agents-motivations" +} + +# A list of problem names that are registered by this module. This will get +# populated at module load time in the code at the bottom of this file. +REGISTERED_PROBLEMS = [] + + +def _normalize_string(raw_str): + """Normalizes the string using tokenizer.encode. + + Args: + raw_str: the input string + + Returns: + A string which is ready to be tokenized using split() + """ + return " ".join( + token.strip() + for token in tokenizer.encode(text_encoder.native_to_unicode(raw_str))) + + +def _prepare_babi_data(tmp_dir, data_dir): + """Downloads and extracts the dataset. + + Args: + tmp_dir: temp directory to download and extract the dataset + data_dir: The base directory where data and vocab files are stored. + + Returns: + tmp_dir: temp directory containing the raw data. + """ + if not tf.gfile.Exists(data_dir): + tf.gfile.MakeDirs(data_dir) + + file_path = os.path.join(tmp_dir, _TAR) + headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_1) " + "AppleWebKit/537.36 (KHTML, like Gecko) " + "Chrome/63.0.3239.132 Safari/537.36"} + resp = requests.get(_URL, headers=headers) + with open(file_path, "wb") as f: + f.write(resp.content) + + tar = tarfile.open(file_path) + tar.extractall(tmp_dir) + tar.close() + + return tmp_dir + + +def _build_vocab(generator, vocab_dir, vocab_name): + """Build a vocabulary from examples. + + Args: + generator: text generator for creating vocab. + vocab_dir: directory where to save the vocabulary. + vocab_name: vocab file name. + + Returns: + text encoder. + """ + vocab_path = os.path.join(vocab_dir, vocab_name) + if not tf.gfile.Exists(vocab_path): + data = [] + for line in generator: + data.extend(line.split()) + counter = collections.Counter(data) + count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) + words, _ = list(zip(*count_pairs)) + encoder = text_encoder.TokenTextEncoder(None, vocab_list=words) + encoder.store_to_file(vocab_path) + else: + encoder = text_encoder.TokenTextEncoder(vocab_path) + return encoder + + +def _babi_parser(tmp_dir, + babi_task_id, + subset, + dataset_split, + joint_training=True): + """Parsing the bAbi dataset (train and test). + + Args: + tmp_dir: temp directory to download and extract the dataset + babi_task_id: babi task id + subset: babi subset + dataset_split: dataset split (train or eval) + joint_training: if training the model on all tasks. + + Returns: + babi_instances: set of training examples, each a dict containing a story, + a question and an answer. + babi_lines: all the texts in the data separated based on their + appearance in the stories, questions, or answers. + """ + + def _data_file(mode, task_id): + """Generates the path to the data file for the given mode(train/test). + + Args: + mode: either train or test for bAbi dataset + task_id: babi task id + + Returns: + data file path + """ + file_name = (_TASKS[task_id] + "_{}.txt") + return os.path.join(_DIR_NAME, subset, file_name.format(mode)) + + def _all_task_raw_data_generator(tmp_dir, data_file, dataset_split): + """Prepares raw data for all tasks to gether.. + + Args: + tmp_dir: temp directory + data_file: data file + dataset_split: dataset split + """ + + tf.logging.info("Preparing dataset of all task together") + globe_name = ("*_{}.txt") + mode_name = "test" + if dataset_split == problem.DatasetSplit.TRAIN: + mode_name = "train" + files_name = os.path.join( + tmp_dir, _DIR_NAME, subset, + globe_name.format(mode_name)) + with tf.gfile.GFile(data_file, "wb") as outfile: + for filename in tf.gfile.Glob(files_name): + if filename == data_file: + # don"t want to copy the output into the output + continue + with tf.gfile.GFile(filename, "rb") as readfile: + shutil.copyfileobj(readfile, outfile) + + def _parse_answer(answer): + if (joint_training or babi_task_id in ["qa8", "qa19", "qa0" + ]): # "lists-sets" or "path finding" + return "".join([d for d in answer.split(",")]) # as a single token! + else: + return answer + + if dataset_split == problem.DatasetSplit.TRAIN: + babi_train_task_id = "qa0" if joint_training else babi_task_id + data_file = os.path.join(tmp_dir, _data_file("train", babi_train_task_id)) + else: + data_file = os.path.join(tmp_dir, _data_file("test", babi_task_id)) + + if ((babi_task_id == "qa0" or joint_training) and + not tf.gfile.Exists(os.path.join(tmp_dir, data_file))): + _all_task_raw_data_generator(tmp_dir, data_file, dataset_split) + + tf.logging.info("Parsing %s into training/testing instances...", data_file) + + babi_instances = [] + with tf.gfile.GFile(data_file, mode="r") as f: + story = [] + for line in f: + line_num, line = line.strip().split(" ", 1) + if int(line_num) == 1: + story = [] + if "\t" in line: + question, answer, _ = line.split("\t") + question = _normalize_string(question) + substories = [s for s in story if s] + answer = _parse_answer(answer) + instance = { + FeatureNames.STORY: substories, + FeatureNames.QUESTION: question, + FeatureNames.ANSWER: answer + } + babi_instances.append(instance) + + story.append("") + else: + story.append(_normalize_string(line)) + + return babi_instances + + +class FeatureNames(object): + """Feature names, i.e keys for storing babi_qa data in TFExamples.""" + STORY = "story" + QUESTION = "question" + ANSWER = "answer" + + @classmethod + def features(cls): + for attr, value in cls.__dict__.items(): + if not attr.startswith("__") and not callable(getattr(cls, attr)): + yield value + + +class BabiQa(text_problems.QuestionAndContext2TextProblem): + """Base class for bAbi question answering problems.""" + + def __init__(self, *args, **kwargs): + + super(BabiQa, self).__init__(*args, **kwargs) + assert not self._was_reversed, "This problem is not reversible!" + assert not self._was_copy, "This problem is not copyable!" + + @property + def babi_subset(self): + """The subset of dataset. + + This should be one of the following: + {"en", "en-10k", "shuffled", "shuffled-10k"} + """ + raise NotImplementedError + + @property + def babi_task_id(self): + """The id of the babi task. + + This should be one of the following: + {"qa0", "qa1", "qa1",..."q20"}, where qa0 means all tasks together. + """ + raise NotImplementedError + + def dataset_filename(self): + return "babi_qa_" + self.babi_subset + "_" + _TASKS[self.babi_task_id] + + @property + def vocab_file(self): + return self.babi_subset + "_" + _TASKS[self.babi_task_id] + ".vocab" + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 1, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def is_generate_per_split(self): + return True + + @property + def joint_training(self): + # training on data from all tasks. + return True + + @property + def vocab_type(self): + return text_problems.VocabType.TOKEN + + def get_labels_encoder(self, data_dir): + """Builds encoder for the given class labels. + + Args: + data_dir: data directory + + Returns: + An encoder for class labels. + """ + label_filepath = os.path.join(data_dir, self.vocab_filename) + return text_encoder.TokenTextEncoder(label_filepath) + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + + tmp_dir = _prepare_babi_data(tmp_dir, data_dir) + _build_vocab( + self.generate_text_for_vocab(data_dir, tmp_dir), data_dir, + self.vocab_filename) + examples = _babi_parser(tmp_dir, self.babi_task_id, self.babi_subset, + dataset_split, self.joint_training) + + def _generate_samples(): + """sample generator. + + Yields: + A dict. + + """ + for example in examples: + context = " ".join(example[FeatureNames.STORY]) + yield { + "context": " ".join(context.split()), + "inputs": " ".join(example[FeatureNames.QUESTION].split()), + "targets": example[FeatureNames.ANSWER] + } + + return _generate_samples() + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + """A generator that generates samples that are encoded. + + Args: + data_dir: data directory + tmp_dir: temp directory + dataset_split: dataset split + + Yields: + A dict. + + """ + generator = self.generate_samples(data_dir, tmp_dir, dataset_split) + encoder = self.get_or_create_vocab(data_dir, tmp_dir) + label_encoder = self.get_labels_encoder(data_dir) + for sample in generator: + inputs = encoder.encode(sample["inputs"]) + inputs.append(text_encoder.EOS_ID) + context = encoder.encode(sample["context"]) + context.append(text_encoder.EOS_ID) + targets = label_encoder.encode(sample["targets"]) + sample["targets"] = targets + yield {"inputs": inputs, "context": context, "targets": targets} + + def feature_encoders(self, data_dir): + """Return a dict for encoding and decoding inference input/output. + + Args: + data_dir: data directory + + Returns: + A dict of . + + """ + encoders = (super(BabiQa, self).feature_encoders(data_dir)) + label_encoder = self.get_labels_encoder(data_dir) + encoders["targets"] = label_encoder # bAbi as a classification task + return encoders + + def generate_text_for_vocab(self, data_dir, tmp_dir): + # NOTE: for babi, we create the vocab from both train and test data. + for dataset_split in [ + problem.DatasetSplit.TRAIN, problem.DatasetSplit.EVAL + ]: + + for example in _babi_parser(tmp_dir, self.babi_task_id, self.babi_subset, + dataset_split, self.joint_training): + + context = " ".join(example[FeatureNames.STORY]) + yield " ".join(context.split()) + yield " ".join(example[FeatureNames.QUESTION].split()) + yield example[FeatureNames.ANSWER] + + def hparams(self, defaults, unused_model_hparams): + """Returns problem_hparams. + + Args: + defaults: default hyperparameters + unused_model_hparams: model hyperparameters + + """ + (super(BabiQa, self).hparams(defaults, unused_model_hparams)) + p = defaults + num_classes = self._encoders["targets"].vocab_size + p.modality = {"targets": modalities.ModalityType.CLASS_LABEL} + p.vocab_size = {"targets": num_classes} + + def example_reading_spec(self): + data_fields, data_items_to_decoders = ( + super(BabiQa, self).example_reading_spec()) + data_fields["targets"] = tf.FixedLenFeature([1], tf.int64) + return (data_fields, data_items_to_decoders) + + def eval_metrics(self): + """Specify the set of evaluation metrics for this problem. + + Returns: + List of evaluation metrics of interest. + """ + return [metrics.Metrics.ACC] + + +class BabiQaConcat(BabiQa): + """Babi with question and story concatenated together as inputs.""" + + def preprocess_example(self, example, unused_mode, unused_model_hparams): + sep = tf.convert_to_tensor([self.QUESTION_SEPARATOR_ID], + dtype=example["inputs"].dtype) + example["inputs"] = tf.concat([example["inputs"], sep, example["context"]], + 0) + return example + + def hparams(self, defaults, unused_model_hparams): + super(BabiQaConcat, self).hparams(defaults, unused_model_hparams) + p = defaults + + if "context" in p.modality: + del p.modality["context"] + + if "context" in p.vocab_size: + del p.vocab_size["context"] + + +def _problems_to_register(): + """Problems for which we want to create datasets. + + To avoid a long file with class definition boilerplate for each problem, we + are dynamically creating and registering problems. The set of problems to + register is defined by this function. See below for the code that creates the + classes and registers the problems. + + Returns: + A dictionary mapping problem name to babi_task_id. + """ + all_problems = {} + + # First define some problems using only concrete characters (i.e., no meta + # characters). + problems_on_different_tasks = { + "AllTasks": "qa0", + "Task1": "qa1", + "Task2": "qa2", + "Task3": "qa3", + "Task4": "qa4", + "Task5": "qa5", + "Task6": "qa6", + "Task7": "qa7", + "Task8": "qa8", + "Task9": "qa9", + "Task10": "qa10", + "Task11": "qa11", + "Task12": "qa12", + "Task13": "qa13", + "Task14": "qa14", + "Task15": "qa15", + "Task16": "qa16", + "Task17": "qa17", + "Task18": "qa18", + "Task19": "qa19", + "Task20": "qa20", + } + all_problems.update(problems_on_different_tasks) + + return all_problems + + +def _register_babi_problems(): + """It dynamically instantiates a class for each babi subsets-tasks. + + @registry.register_problem + class BabiQaConcatAllTasks_10k(EditSequenceRegexProblem): + @property + def babi_task_id(self): + return "qa0" + @property + def babi_subset(self): + return "en-10k" + + It does not put the classes into the global namespace, so to access the class + we rely on the registry or this module"s REGISTERED_PROBLEMS list. + It will be available as + + registry.problem("babi_qa_concat_all_tasks_10k") + + i.e., change camel case to snake case. Numbers are considered lower case + characters for these purposes. + """ + for (subset, subset_suffix) in [("en", "_1k"), ("en-10k", "_10k")]: + for problem_name, babi_task_id in six.iteritems(_problems_to_register()): + problem_class = type("BabiQaConcat" + problem_name + subset_suffix, + (BabiQaConcat,), { + "babi_task_id": babi_task_id, + "babi_subset": subset + }) + registry.register_problem(problem_class) + REGISTERED_PROBLEMS.append(problem_class.name) + + +_register_babi_problems() diff --git a/tensor2tensor/data_generators/bair_robot_pushing.py b/tensor2tensor/data_generators/bair_robot_pushing.py new file mode 100644 index 000000000..9ceb834cf --- /dev/null +++ b/tensor2tensor/data_generators/bair_robot_pushing.py @@ -0,0 +1,196 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Berkeley (BAIR) robot pushing dataset. + +Self-Supervised Visual Planning with Temporal Skip Connections +Frederik Ebert, Chelsea Finn, Alex X. Lee, and Sergey Levine. +https://arxiv.org/abs/1710.05268 + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tarfile +import numpy as np + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import video_utils +from tensor2tensor.layers import modalities +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +DATA_URL = ( + "http://rail.eecs.berkeley.edu/datasets/bair_robot_pushing_dataset_v0.tar") + + +# Lazy load PIL.Image +def PIL_Image(): # pylint: disable=invalid-name + from PIL import Image # pylint: disable=g-import-not-at-top + return Image + + +@registry.register_problem +class VideoBairRobotPushing(video_utils.VideoProblem): + """Berkeley (BAIR) robot pushing dataset.""" + + @property + def num_channels(self): + return 3 + + @property + def frame_height(self): + return 64 + + @property + def frame_width(self): + return 64 + + @property + def is_generate_per_split(self): + return True + + # num_train_files * num_videos * num_frames + @property + def total_number_of_frames(self): + return 167 * 256 * 30 + + def max_frames_per_video(self, hparams): + return 30 + + @property + def random_skip(self): + return False + + @property + def only_keep_videos_from_0th_frame(self): + return True + + @property + def use_not_breaking_batching(self): + return True + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each.""" + return [ + {"split": problem.DatasetSplit.TRAIN, "shards": 10}, + {"split": problem.DatasetSplit.EVAL, "shards": 1}, + {"split": problem.DatasetSplit.TEST, "shards": 1}] + + @property + def extra_reading_spec(self): + """Additional data fields to store on disk and their decoders.""" + data_fields = { + "frame_number": tf.FixedLenFeature([1], tf.int64), + } + decoders = { + "frame_number": + contrib.slim().tfexample_decoder.Tensor(tensor_key="frame_number"), + } + return data_fields, decoders + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = {"inputs": modalities.ModalityType.VIDEO, + "targets": modalities.ModalityType.VIDEO} + p.vocab_size = {"inputs": 256, + "targets": 256} + + def parse_frames(self, filenames): + image_key = "{}/image_aux1/encoded" + action_key = "{}/action" + state_key = "{}/endeffector_pos" + + for f in filenames: + print("Parsing ", f) + for serialized_example in tf.python_io.tf_record_iterator(f): + x = tf.train.Example() + x.ParseFromString(serialized_example) + # there are 4 features per frame + # main image, aux image, actions and states + nf = len(x.features.feature.keys()) // 4 + + for i in range(nf): + image_name = image_key.format(i) + action_name = action_key.format(i) + state_name = state_key.format(i) + + byte_str = x.features.feature[image_name].bytes_list.value[0] + img = PIL_Image().frombytes( + "RGB", (self.frame_width, self.frame_height), byte_str) + arr = np.array(img.getdata()) + frame = arr.reshape( + self.frame_width, self.frame_height, self.num_channels) + + state = x.features.feature[state_name].float_list.value + action = x.features.feature[action_name].float_list.value + + yield i, frame, state, action + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + path = generator_utils.maybe_download( + tmp_dir, os.path.basename(DATA_URL), DATA_URL) + + tar = tarfile.open(path) + tar.extractall(tmp_dir) + tar.close() + + if dataset_split == problem.DatasetSplit.TEST: + base_dir = os.path.join(tmp_dir, "softmotion30_44k/test/*") + filenames = tf.gfile.Glob(base_dir) + else: + base_dir = os.path.join(tmp_dir, "softmotion30_44k/train/*") + filenames = tf.gfile.Glob(base_dir) + + # the test-set contains just 256 videos so this should be sufficient. + if dataset_split == problem.DatasetSplit.TRAIN: + filenames = filenames[:-2] + else: + filenames = filenames[-2:] + + for frame_number, frame, state, action in self.parse_frames(filenames): + yield { + "frame_number": [frame_number], + "frame": frame, + "state": state, + "action": action, + } + + +@registry.register_problem +class VideoBairRobotPushingWithActions(VideoBairRobotPushing): + """Berkeley (BAIR) robot pushing dataset with actions.""" + + @property + def extra_reading_spec(self): + """Additional data fields to store on disk and their decoders.""" + data_fields = { + "frame_number": tf.FixedLenFeature([1], tf.int64), + "action": tf.FixedLenFeature([4], tf.float32), + } + decoders = { + "frame_number": + contrib.slim().tfexample_decoder.Tensor(tensor_key="frame_number"), + "action": + contrib.slim().tfexample_decoder.Tensor(tensor_key="action"), + } + return data_fields, decoders diff --git a/tensor2tensor/data_generators/celeba.py b/tensor2tensor/data_generators/celeba.py new file mode 100644 index 000000000..8fe0547a6 --- /dev/null +++ b/tensor2tensor/data_generators/celeba.py @@ -0,0 +1,276 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CelebA.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import zipfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import image_utils +from tensor2tensor.layers import modalities +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +@registry.register_problem +class ImageCeleba(image_utils.ImageProblem): + """CelebA dataset, aligned and cropped images.""" + IMG_DATA = ("img_align_celeba.zip", + "https://drive.google.com/uc?export=download&" + "id=0B7EVK8r0v71pZjFTYXZWM3FlRnM") + LANDMARKS_DATA = ("celeba_landmarks_align", + "https://drive.google.com/uc?export=download&" + "id=0B7EVK8r0v71pd0FJY3Blby1HUTQ") + ATTR_DATA = ("celeba_attr", "https://drive.google.com/uc?export=download&" + "id=0B7EVK8r0v71pblRyaVFSWGxPY0U") + + LANDMARK_HEADINGS = ("lefteye_x lefteye_y righteye_x righteye_y " + "nose_x nose_y leftmouth_x leftmouth_y rightmouth_x " + "rightmouth_y").split() + ATTR_HEADINGS = ( + "5_o_Clock_Shadow Arched_Eyebrows Attractive Bags_Under_Eyes Bald Bangs " + "Big_Lips Big_Nose Black_Hair Blond_Hair Blurry Brown_Hair " + "Bushy_Eyebrows Chubby Double_Chin Eyeglasses Goatee Gray_Hair " + "Heavy_Makeup High_Cheekbones Male Mouth_Slightly_Open Mustache " + "Narrow_Eyes No_Beard Oval_Face Pale_Skin Pointy_Nose Receding_Hairline " + "Rosy_Cheeks Sideburns Smiling Straight_Hair Wavy_Hair Wearing_Earrings " + "Wearing_Hat Wearing_Lipstick Wearing_Necklace Wearing_Necktie Young" + ).split() + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = {"inputs": modalities.ModalityType.IDENTITY, + "targets": modalities.ModalityType.IDENTITY} + p.vocab_size = {"inputs": 256, + "targets": 256} + p.batch_size_multiplier = 256 + p.input_space_id = 1 + p.target_space_id = 1 + + def generator(self, tmp_dir, how_many, start_from=0): + """Image generator for CELEBA dataset. + + Args: + tmp_dir: path to temporary storage directory. + how_many: how many images and labels to generate. + start_from: from which image to start. + + Yields: + A dictionary representing the images with the following fields: + * image/encoded: the string encoding the image as JPEG, + * image/format: the string "jpeg" representing image format, + """ + out_paths = [] + for fname, url in [self.IMG_DATA, self.LANDMARKS_DATA, self.ATTR_DATA]: + path = generator_utils.maybe_download_from_drive(tmp_dir, fname, url) + out_paths.append(path) + + img_path, landmarks_path, attr_path = out_paths # pylint: disable=unbalanced-tuple-unpacking + unzipped_folder = img_path[:-4] + if not tf.gfile.Exists(unzipped_folder): + zipfile.ZipFile(img_path, "r").extractall(tmp_dir) + + with tf.gfile.Open(landmarks_path) as f: + landmarks_raw = f.read() + + with tf.gfile.Open(attr_path) as f: + attr_raw = f.read() + + def process_landmarks(raw_data): + landmarks = {} + lines = raw_data.split("\n") + headings = lines[1].strip().split() + for line in lines[2:-1]: + values = line.strip().split() + img_name = values[0] + landmark_values = [int(v) for v in values[1:]] + landmarks[img_name] = landmark_values + return landmarks, headings + + def process_attrs(raw_data): + attrs = {} + lines = raw_data.split("\n") + headings = lines[1].strip().split() + for line in lines[2:-1]: + values = line.strip().split() + img_name = values[0] + attr_values = [int(v) for v in values[1:]] + attrs[img_name] = attr_values + return attrs, headings + + img_landmarks, _ = process_landmarks(landmarks_raw) + img_attrs, _ = process_attrs(attr_raw) + + image_files = list(sorted(tf.gfile.Glob(unzipped_folder + "/*.jpg"))) + for filename in image_files[start_from:start_from + how_many]: + img_name = os.path.basename(filename) + landmarks = img_landmarks[img_name] + attrs = img_attrs[img_name] + + with tf.gfile.Open(filename, "rb") as f: + encoded_image_data = f.read() + yield { + "image/encoded": [encoded_image_data], + "image/format": ["jpeg"], + "attributes": attrs, + "landmarks": landmarks, + } + + @property + def train_shards(self): + return 100 + + @property + def dev_shards(self): + return 10 + + @property + def test_shards(self): + return 10 + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + train_gen = self.generator(tmp_dir, 162770) + train_paths = self.training_filepaths( + data_dir, self.train_shards, shuffled=False) + generator_utils.generate_files(train_gen, train_paths) + + dev_gen = self.generator(tmp_dir, 19867, 162770) + dev_paths = self.dev_filepaths(data_dir, self.dev_shards, shuffled=False) + generator_utils.generate_files(dev_gen, dev_paths) + + test_gen = self.generator(tmp_dir, 19962, 162770+19867) + test_paths = self.test_filepaths(data_dir, self.test_shards, shuffled=False) + generator_utils.generate_files(test_gen, test_paths) + + generator_utils.shuffle_dataset(train_paths + dev_paths + test_paths) + + +@registry.register_problem +class ImageCelebaMultiResolution(ImageCeleba): + """CelebA at multiple resolutions. + + The resolutions are specified as a hyperparameter during preprocessing. + """ + + def dataset_filename(self): + return "image_celeba" + + def preprocess_example(self, example, mode, hparams): + image = example["inputs"] + # Get resize method. Include a default if not specified, or if it's not in + # TensorFlow's collection of pre-implemented resize methods. + resize_method = getattr(hparams, "resize_method", "BICUBIC") + resize_method = getattr(tf.image.ResizeMethod, resize_method, resize_method) + + # Remove boundaries in CelebA images. Remove 40 pixels each side + # vertically and 20 pixels each side horizontally. + image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40) + + highest_res = hparams.resolutions[-1] + if resize_method == "DILATED": + # Resize image so that dilated subsampling is properly divisible. + scaled_image = image_utils.resize_by_area(image, highest_res) + scaled_images = image_utils.make_multiscale_dilated( + scaled_image, hparams.resolutions, num_channels=self.num_channels) + else: + scaled_images = image_utils.make_multiscale( + image, hparams.resolutions, + resize_method=resize_method, num_channels=self.num_channels) + + # Pack tuple of scaled images into one tensor. We do this by enforcing the + # columns to match for every resolution. + example["inputs"] = image + example["targets"] = tf.concat([ + tf.reshape(scaled_image, + [res**2 // highest_res, highest_res, self.num_channels]) + for scaled_image, res in zip(scaled_images, hparams.resolutions)], + axis=0) + return example + + +@registry.register_problem +class Img2imgCeleba(ImageCeleba): + """8px to 32px problem.""" + + def dataset_filename(self): + return "image_celeba" + + def preprocess_example(self, example, unused_mode, unused_hparams): + image = example["inputs"] + # Remove boundaries in CelebA images. Remove 40 pixels each side + # vertically and 20 pixels each side horizontally. + image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40) + image_8 = image_utils.resize_by_area(image, 8) + image_32 = image_utils.resize_by_area(image, 32) + + example["inputs"] = image_8 + example["targets"] = image_32 + return example + + +@registry.register_problem +class Img2imgCeleba64(Img2imgCeleba): + """8px to 64px problem.""" + + def preprocess_example(self, example, unused_mode, unused_hparams): + image = example["inputs"] + # Remove boundaries in CelebA images. Remove 40 pixels each side + # vertically and 20 pixels each side horizontally. + image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40) + image_8 = image_utils.resize_by_area(image, 8) + image_64 = image_utils.resize_by_area(image, 64) + + example["inputs"] = image_8 + example["targets"] = image_64 + return example + + +@registry.register_problem +class ImageCeleba32(Img2imgCeleba): + """CelebA resized to spatial dims [32, 32].""" + + def preprocess_example(self, example, unused_mode, unused_hparams): + image = example["inputs"] + # Remove boundaries in CelebA images. Remove 40 pixels each side + # vertically and 20 pixels each side horizontally. + image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40) + image = image_utils.resize_by_area(image, 32) + + example["inputs"] = image + example["targets"] = image + return example + + +@registry.register_problem +class ImageCeleba64(Img2imgCeleba): + """CelebA resized to spatial dims [64, 64].""" + + def preprocess_example(self, example, unused_mode, unused_hparams): + image = example["inputs"] + # Remove boundaries in CelebA images. Remove 40 pixels each side + # vertically and 20 pixels each side horizontally. + image = tf.image.crop_to_bounding_box(image, 40, 20, 218 - 80, 178 - 40) + image = image_utils.resize_by_area(image, 64) + + example["inputs"] = image + example["targets"] = image + return example + + diff --git a/tensor2tensor/data_generators/celeba_test.py b/tensor2tensor/data_generators/celeba_test.py new file mode 100644 index 000000000..f9c5a62b7 --- /dev/null +++ b/tensor2tensor/data_generators/celeba_test.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for CelebA.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +from tensor2tensor.data_generators import celeba +from tensor2tensor.utils import hparam + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class CelebaTest(parameterized.TestCase, tf.test.TestCase): + + @parameterized.named_parameters( + ("Default", None), + ("Area", "AREA"), + ("Dilated", "DILATED")) + def testCelebaMultiResolutionPreprocessExample(self, resize_method): + example = {"inputs": tf.random_uniform([218, 178, 3], minval=-1.)} + mode = tf_estimator.ModeKeys.TRAIN + hparams = hparam.HParams(resolutions=[8, 16, 32]) + if resize_method is not None: + hparams.resize_method = resize_method + + problem = celeba.ImageCelebaMultiResolution() + preprocessed_example = problem.preprocess_example(example, mode, hparams) + self.assertLen(preprocessed_example, 2) + self.assertEqual(preprocessed_example["inputs"].shape, (138, 138, 3)) + self.assertEqual(preprocessed_example["targets"].shape, (42, 32, 3)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/celebahq.py b/tensor2tensor/data_generators/celebahq.py new file mode 100644 index 000000000..9383f0da0 --- /dev/null +++ b/tensor2tensor/data_generators/celebahq.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CelebA-HQ.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from tensor2tensor.data_generators import image_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.layers import modalities +from tensor2tensor.utils import metrics +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +@registry.register_problem +class ImageCelebahq128(image_utils.ImageProblem): + """CelebA-HQ dataset, downsampled as 128x128.""" + + def dataset_filename(self): + return "image_celebahq-128" + + def example_reading_spec(self): + data_fields = { + "image/encoded": tf.FixedLenFeature((), tf.string), + "image/format": tf.FixedLenFeature((), tf.string, default_value="png"), + } + _, data_items_to_decoders = super( + ImageCelebahq128, self).example_reading_spec() + return data_fields, data_items_to_decoders + + def filepattern(self, data_dir, mode, shard=None): + """Get filepattern for data files for mode. + + Args: + data_dir: str, data directory. + mode: DatasetSplit + shard: int, if provided, will only read data from the specified shard. + + Returns: + filepattern str + """ + path = os.path.join(data_dir, self.dataset_filename()) + if shard is not None: + shard_str = "%05d" % shard + elif mode == problem.DatasetSplit.TRAIN: + # Use the first 90 shards. + shard_str = "000[0-8]" + else: + assert mode in [problem.DatasetSplit.EVAL, + tf_estimator.ModeKeys.PREDICT, + problem.DatasetSplit.TEST] + # Use the last 10 shards. + shard_str = "0009" + + return "%s-%s*" % (path, shard_str) + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + raise NotImplementedError("Data preprocessing for CelebA-HQ is not " + "currently available. Please follow the steps " + "in https://github.com/tkarras/progressive_growin" + "g_of_gans.") + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.batch_size_multiplier = 1 + p.modality = {"inputs": modalities.ModalityType.IDENTITY} + p.vocab_size = {"inputs": 256} + p.input_space_id = 1 + + def preprocess_example(self, example, mode, hparams): + del mode, hparams # unused + example["inputs"].set_shape((128, 128, 3)) + return example + + +@registry.register_problem +class ImageCelebahq128Dmol(ImageCelebahq128): + """CelebA-HQ dataset with discretized mixture of logistics for evaluation.""" + + def eval_metrics(self): + return [ + metrics.Metrics.DMOL_PERPLEXITY + ] + + +@registry.register_problem +class ImageCelebahq256(ImageCelebahq128): + """CelebA-HQ dataset, downsampled as 256x256.""" + + def dataset_filename(self): + return "image_celebahq-256" + + def preprocess_example(self, example, mode, hparams): + del mode, hparams # unused + example["inputs"].set_shape((256, 256, 3)) + return example + + +@registry.register_problem +class ImageCelebahq256Dmol(ImageCelebahq256): + """CelebA-HQ dataset with discretized mixture of logistics for evaluation.""" + + def eval_metrics(self): + return [ + metrics.Metrics.DMOL_PERPLEXITY + ] diff --git a/tensor2tensor/data_generators/cifar.py b/tensor2tensor/data_generators/cifar.py new file mode 100644 index 000000000..a5dfc01af --- /dev/null +++ b/tensor2tensor/data_generators/cifar.py @@ -0,0 +1,588 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""CIFAR.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tarfile +import numpy as np +import six + +from six.moves import cPickle + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import image_utils +from tensor2tensor.data_generators import mnist +from tensor2tensor.data_generators import problem +from tensor2tensor.layers import modalities +from tensor2tensor.utils import metrics +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +# URLs and filenames for CIFAR data. +_CIFAR10_URL = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz" +_CIFAR10_PREFIX = "cifar-10-batches-py/" +_CIFAR10_TRAIN_FILES = [ + "data_batch_1", "data_batch_2", "data_batch_3", "data_batch_4", + "data_batch_5" +] +_CIFAR10_TEST_FILES = ["test_batch"] +_CIFAR10_IMAGE_SIZE = _CIFAR100_IMAGE_SIZE = 32 + +_CIFAR100_URL = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz" +_CIFAR100_PREFIX = "cifar-100-python/" +_CIFAR100_TRAIN_FILES = ["train"] +_CIFAR100_TEST_FILES = ["test"] + + +def _get_cifar(directory, url): + """Download and extract CIFAR to directory unless it is there.""" + filename = os.path.basename(url) + path = generator_utils.maybe_download(directory, filename, url) + tarfile.open(path, "r:gz").extractall(directory) + + +def cifar_generator(cifar_version, tmp_dir, training, how_many, start_from=0): + """Image generator for CIFAR-10 and 100. + + Args: + cifar_version: string; one of "cifar10" or "cifar100" + tmp_dir: path to temporary storage directory. + training: a Boolean; if true, we use the train set, otherwise the test set. + how_many: how many images and labels to generate. + start_from: from which image to start. + + Returns: + An instance of image_generator that produces CIFAR-10 images and labels. + """ + if cifar_version == "cifar10": + url = _CIFAR10_URL + train_files = _CIFAR10_TRAIN_FILES + test_files = _CIFAR10_TEST_FILES + prefix = _CIFAR10_PREFIX + image_size = _CIFAR10_IMAGE_SIZE + label_key = "labels" + elif cifar_version == "cifar100" or cifar_version == "cifar20": + url = _CIFAR100_URL + train_files = _CIFAR100_TRAIN_FILES + test_files = _CIFAR100_TEST_FILES + prefix = _CIFAR100_PREFIX + image_size = _CIFAR100_IMAGE_SIZE + if cifar_version == "cifar100": + label_key = "fine_labels" + else: + label_key = "coarse_labels" + + _get_cifar(tmp_dir, url) + data_files = train_files if training else test_files + all_images, all_labels = [], [] + for filename in data_files: + path = os.path.join(tmp_dir, prefix, filename) + with tf.gfile.Open(path, "rb") as f: + if six.PY2: + data = cPickle.load(f) + else: + data = cPickle.load(f, encoding="latin1") + images = data["data"] + num_images = images.shape[0] + images = images.reshape((num_images, 3, image_size, image_size)) + all_images.extend([ + np.squeeze(images[j]).transpose((1, 2, 0)) for j in range(num_images) + ]) + labels = data[label_key] + all_labels.extend([labels[j] for j in range(num_images)]) + return image_utils.image_generator( + all_images[start_from:start_from + how_many], + all_labels[start_from:start_from + how_many]) + + +@registry.register_problem +class ImageCifar10Tune(mnist.ImageMnistTune): + """Cifar-10 Tune.""" + + @property + def num_channels(self): + return 3 + + @property + def class_labels(self): + return [ + "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", + "ship", "truck" + ] + + def preprocess_example(self, example, mode, unused_hparams): + image = example["inputs"] + image.set_shape([_CIFAR10_IMAGE_SIZE, _CIFAR10_IMAGE_SIZE, 3]) + if mode == tf_estimator.ModeKeys.TRAIN: + image = image_utils.cifar_image_augmentation(image) + if not self._was_reversed: + image = tf.image.per_image_standardization(image) + example["inputs"] = image + return example + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return cifar_generator("cifar10", tmp_dir, True, 48000) + else: + return cifar_generator("cifar10", tmp_dir, True, 2000, 48000) + + +@registry.register_problem +class ImageCifar10(ImageCifar10Tune): + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return cifar_generator("cifar10", tmp_dir, True, 50000) + else: + return cifar_generator("cifar10", tmp_dir, False, 10000) + + +@registry.register_problem +class ImageCifar10Plain(ImageCifar10): + + def preprocess_example(self, example, mode, unused_hparams): + image = example["inputs"] + image.set_shape([_CIFAR10_IMAGE_SIZE, _CIFAR10_IMAGE_SIZE, 3]) + if not self._was_reversed: + image = tf.image.per_image_standardization(image) + example["inputs"] = image + return example + + +@registry.register_problem +class ImageCifar10PlainGen(ImageCifar10Plain): + """CIFAR-10 32x32 for image generation without standardization preprep.""" + + def dataset_filename(self): + return "image_cifar10_plain" # Reuse CIFAR-10 plain data. + + def preprocess_example(self, example, mode, unused_hparams): + example["inputs"].set_shape([_CIFAR10_IMAGE_SIZE, _CIFAR10_IMAGE_SIZE, 3]) + example["inputs"] = tf.to_int64(example["inputs"]) + return example + + +@registry.register_problem +class ImageCifar10PlainGenFlat(ImageCifar10PlainGen): + """CIFAR-10 for image generation as a flat array of 64*64*3=12228 elements.""" + + def preprocess_example(self, example, mode, unused_hparams): + example["inputs"].set_shape([_CIFAR10_IMAGE_SIZE, _CIFAR10_IMAGE_SIZE, 3]) + example["inputs"] = tf.to_int64(example["inputs"]) + example["inputs"] = tf.reshape(example["inputs"], (-1,)) + + del example["targets"] # Ensure unconditional generation + + return example + + def hparams(self, defaults, model_hparams): + super(ImageCifar10PlainGenFlat, self).hparams(defaults, model_hparams) + # Switch to symbol modality + p = defaults + p.modality["inputs"] = modalities.ModalityType.SYMBOL_WEIGHTS_ALL + p.input_space_id = problem.SpaceID.GENERIC + + +@registry.register_problem +class ImageCifar10PlainRandomShift(ImageCifar10Plain): + """CIFAR-10 32x32 for image generation with random shift data-augmentation.""" + + def dataset_filename(self): + return "image_cifar10_plain" # Reuse CIFAR-10 plain data. + + def preprocess_example(self, example, mode, unused_hparams): + example["inputs"].set_shape([_CIFAR10_IMAGE_SIZE, _CIFAR10_IMAGE_SIZE, 3]) + example["inputs"] = tf.to_int64(example["inputs"]) + if mode == tf_estimator.ModeKeys.TRAIN: + example["inputs"] = image_utils.random_shift( + example["inputs"], wsr=0.1, hsr=0.1) + return example + + +@registry.register_problem +class ImageCifar10PlainGenDmol(ImageCifar10PlainGen): + """Discretized mixture of logistics problem.""" + + def dataset_filename(self): + return "image_cifar10_plain" # Reuse CIFAR-10 plain data. + + def eval_metrics(self): + return [ + metrics.Metrics.DMOL_PERPLEXITY + ] + + +@registry.register_problem +class ImageCifar10Plain8(ImageCifar10): + """CIFAR-10 rescaled to 8x8 for output: Conditional image generation.""" + + def dataset_filename(self): + return "image_cifar10_plain" # Reuse CIFAR-10 plain data. + + def preprocess_example(self, example, mode, unused_hparams): + image = example["inputs"] + image = image_utils.resize_by_area(image, 8) + if not self._was_reversed: + image = tf.image.per_image_standardization(image) + example["inputs"] = image + return example + + +@registry.register_problem +class Img2imgCifar10(ImageCifar10): + """CIFAR-10 rescaled to 8x8 for input and 32x32 for output.""" + + def dataset_filename(self): + return "image_cifar10_plain" # Reuse CIFAR-10 plain data. + + def preprocess_example(self, example, unused_mode, unused_hparams): + inputs = example["inputs"] + # For Img2Img resize input and output images as desired. + example["inputs"] = image_utils.resize_by_area(inputs, 8) + example["targets"] = image_utils.resize_by_area(inputs, 32) + return example + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = {"inputs": modalities.ModalityType.IDENTITY, + "targets": modalities.ModalityType.IDENTITY} + p.vocab_size = {"inputs": 256, + "targets": 256} + p.batch_size_multiplier = 256 + p.input_space_id = 1 + p.target_space_id = 1 + + +@registry.register_problem +class ImageCifar100Tune(mnist.ImageMnistTune): + """Cifar-100 Tune.""" + + @property + def num_classes(self): + return 100 + + @property + def num_channels(self): + return 3 + + @property + def class_labels(self): + return [ + "beaver", + "dolphin", + "otter", + "seal", + "whale", + "aquarium fish", + "flatfish", + "ray", + "shark", + "trout", + "orchids", + "poppies", + "roses", + "sunflowers", + "tulips", + "bottles", + "bowls", + "cans", + "cups", + "plates", + "apples", + "mushrooms", + "oranges", + "pears", + "sweet peppers", + "clock", + "computer keyboard", + "lamp", + "telephone", + "television", + "bed", + "chair", + "couch", + "table", + "wardrobe", + "bee", + "beetle", + "butterfly", + "caterpillar", + "cockroach", + "bear", + "leopard", + "lion", + "tiger", + "wolf", + "bridge", + "castle", + "house", + "road", + "skyscraper", + "cloud", + "forest", + "mountain", + "plain", + "sea", + "camel", + "cattle", + "chimpanzee", + "elephant", + "kangaroo", + "fox", + "porcupine", + "possum", + "raccoon", + "skunk", + "crab", + "lobster", + "snail", + "spider", + "worm", + "baby", + "boy", + "girl", + "man", + "woman", + "crocodile", + "dinosaur", + "lizard", + "snake", + "turtle", + "hamster", + "mouse", + "rabbit", + "shrew", + "squirrel", + "maple", + "oak", + "palm", + "pine", + "willow", + "bicycle", + "bus", + "motorcycle", + "pickup truck", + "train", + "lawn-mower", + "rocket", + "streetcar", + "tank", + "tractor", + ] + + def preprocess_example(self, example, mode, unused_hparams): + image = example["inputs"] + image.set_shape([_CIFAR100_IMAGE_SIZE, _CIFAR100_IMAGE_SIZE, 3]) + if mode == tf_estimator.ModeKeys.TRAIN: + image = image_utils.cifar_image_augmentation(image) + if not self._was_reversed: + image = tf.image.per_image_standardization(image) + example["inputs"] = image + return example + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return cifar_generator("cifar100", tmp_dir, True, 48000) + else: + return cifar_generator("cifar100", tmp_dir, True, 2000, 48000) + + +@registry.register_problem +class ImageCifar100(ImageCifar100Tune): + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return cifar_generator("cifar100", tmp_dir, True, 50000) + else: + return cifar_generator("cifar100", tmp_dir, False, 10000) + + +@registry.register_problem +class ImageCifar100Plain(ImageCifar100): + + def preprocess_example(self, example, mode, unused_hparams): + image = example["inputs"] + image.set_shape([_CIFAR100_IMAGE_SIZE, _CIFAR100_IMAGE_SIZE, 3]) + if not self._was_reversed: + image = tf.image.per_image_standardization(image) + example["inputs"] = image + return example + + +@registry.register_problem +class ImageCifar100PlainGen(ImageCifar100Plain): + """CIFAR-100 32x32 for image generation without standardization preprep.""" + + def dataset_filename(self): + return "image_cifar100_plain" # Reuse CIFAR-100 plain data. + + def preprocess_example(self, example, mode, unused_hparams): + example["inputs"].set_shape([_CIFAR100_IMAGE_SIZE, _CIFAR100_IMAGE_SIZE, 3]) + example["inputs"] = tf.to_int64(example["inputs"]) + return example + + +@registry.register_problem +class ImageCifar100Plain8(ImageCifar100): + """CIFAR-100 rescaled to 8x8 for output: Conditional image generation.""" + + def dataset_filename(self): + return "image_cifar100_plain" # Reuse CIFAR-100 plain data. + + def preprocess_example(self, example, mode, unused_hparams): + image = example["inputs"] + image = image_utils.resize_by_area(image, 8) + if not self._was_reversed: + image = tf.image.per_image_standardization(image) + example["inputs"] = image + return example + + +@registry.register_problem +class Img2imgCifar100(ImageCifar100): + """CIFAR-100 rescaled to 8x8 for input and 32x32 for output.""" + + def dataset_filename(self): + return "image_cifar100_plain" # Reuse CIFAR-100 plain data. + + def preprocess_example(self, example, unused_mode, unused_hparams): + inputs = example["inputs"] + # For Img2Img resize input and output images as desired. + example["inputs"] = image_utils.resize_by_area(inputs, 8) + example["targets"] = image_utils.resize_by_area(inputs, 32) + return example + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = {"inputs": modalities.ModalityType.IDENTITY, + "targets": modalities.ModalityType.IDENTITY} + p.vocab_size = {"inputs": 256, + "targets": 256} + p.batch_size_multiplier = 256 + p.max_expected_batch_size_per_shard = 4 + p.input_space_id = 1 + p.target_space_id = 1 + + +@registry.register_problem +class ImageCifar20Tune(mnist.ImageMnistTune): + """Cifar-20 Tune.""" + + @property + def num_classes(self): + return 20 + + @property + def num_channels(self): + return 3 + + @property + def class_labels(self): + return [ + "aquatic mammals", + "fish", + "flowers", + "food containers", + "fruit and vegetables", + "household electrical devices", + "household furniture", + "insects", + "large carnivores", + "large man-made outdoor things", + "large natural outdoor scenes", + "large omnivores and herbivores", + "medium-sized mammals", + "non-insect invertebrates", + "people", + "reptiles", + "small mammals", + "trees", + "vehicles 1", + "vehicles 2", + ] + + def preprocess_example(self, example, mode, unused_hparams): + image = example["inputs"] + image.set_shape([_CIFAR100_IMAGE_SIZE, _CIFAR100_IMAGE_SIZE, 3]) + if mode == tf_estimator.ModeKeys.TRAIN: + image = image_utils.cifar_image_augmentation(image) + if not self._was_reversed: + image = tf.image.per_image_standardization(image) + example["inputs"] = image + return example + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return cifar_generator("cifar20", tmp_dir, True, 48000) + else: + return cifar_generator("cifar20", tmp_dir, True, 2000, 48000) + + +@registry.register_problem +class ImageCifar20(ImageCifar20Tune): + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return cifar_generator("cifar20", tmp_dir, True, 50000) + else: + return cifar_generator("cifar20", tmp_dir, False, 10000) + + +@registry.register_problem +class ImageCifar20Plain(ImageCifar20): + + def preprocess_example(self, example, mode, unused_hparams): + image = example["inputs"] + image.set_shape([_CIFAR100_IMAGE_SIZE, _CIFAR100_IMAGE_SIZE, 3]) + if not self._was_reversed: + image = tf.image.per_image_standardization(image) + example["inputs"] = image + return example + + +@registry.register_problem +class ImageCifar20PlainGen(ImageCifar20Plain): + """CIFAR-20 32x32 for image generation without standardization preprep.""" + + def dataset_filename(self): + return "image_cifar20_plain" # Reuse CIFAR-20 plain data. + + def preprocess_example(self, example, mode, unused_hparams): + example["inputs"].set_shape([_CIFAR100_IMAGE_SIZE, _CIFAR100_IMAGE_SIZE, 3]) + example["inputs"] = tf.to_int64(example["inputs"]) + return example + + +@registry.register_problem +class ImageCifar20Plain8(ImageCifar20): + """CIFAR-20 rescaled to 8x8 for output: Conditional image generation.""" + + def dataset_filename(self): + return "image_cifar20_plain" # Reuse CIFAR-20 plain data. + + def preprocess_example(self, example, mode, unused_hparams): + image = example["inputs"] + image = image_utils.resize_by_area(image, 8) + if not self._was_reversed: + image = tf.image.per_image_standardization(image) + example["inputs"] = image + return example diff --git a/tensor2tensor/data_generators/cipher.py b/tensor2tensor/data_generators/cipher.py new file mode 100644 index 000000000..a6b55a2c9 --- /dev/null +++ b/tensor2tensor/data_generators/cipher.py @@ -0,0 +1,228 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Cipher data generators.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import deque +import numpy as np + +from tensor2tensor.data_generators import algorithmic +from tensor2tensor.utils import registry + + +@registry.register_problem +class AlgorithmicCipherShift5(algorithmic.AlgorithmicProblem): + """Shift cipher.""" + + @property + def num_symbols(self): + return 5 + + @property + def distribution(self): + return [0.4, 0.3, 0.2, 0.08, 0.02] + + @property + def shift(self): + return 1 + + def generator(self, nbr_symbols, max_length, nbr_cases): + plain_vocab = range(nbr_symbols) + indices = generate_plaintext_random( + plain_vocab, self.distribution, nbr_cases, max_length) + codes = encipher_shift(indices, plain_vocab, self.shift) + for plain, code in zip(indices, codes): + yield {"inputs": plain, "targets": code} + + @property + def train_length(self): + return 100 + + @property + def dev_length(self): + return self.train_length + + +@registry.register_problem +class AlgorithmicCipherVigenere5(algorithmic.AlgorithmicProblem): + """Vinegre cipher.""" + + @property + def num_symbols(self): + return 5 + + @property + def distribution(self): + return [0.4, 0.3, 0.2, 0.08, 0.02] + + @property + def key(self): + return [1, 3] + + def generator(self, nbr_symbols, max_length, nbr_cases): + plain_vocab = range(nbr_symbols) + indices = generate_plaintext_random(plain_vocab, self.distribution, + nbr_cases, max_length) + codes = encipher_vigenere(indices, plain_vocab, self.key) + for plain, code in zip(indices, codes): + yield {"inputs": plain, "targets": code} + + @property + def train_length(self): + return 200 + + @property + def dev_length(self): + return self.train_length + + +@registry.register_problem +class AlgorithmicCipherShift200(AlgorithmicCipherShift5): + """Shift cipher.""" + + @property + def num_symbols(self): + return 200 + + @property + def distribution(self): + vals = range(self.num_symbols) + val_sum = sum(vals) + return [v / val_sum for v in vals] + + +@registry.register_problem +class AlgorithmicCipherVigenere200(AlgorithmicCipherVigenere5): + """Vinegre cipher.""" + + @property + def num_symbols(self): + return 200 + + @property + def distribution(self): + vals = range(self.num_symbols) + val_sum = sum(vals) + return [v / val_sum for v in vals] + + @property + def key(self): + return [1, 3] + + +class ShiftEncryptionLayer(object): + """A single layer for shift.""" + + def __init__(self, vocab, shift): + """Initialize shift layer. + + Args: + vocab: (list of String) the vocabulary + shift: (Integer) the amount of shift apply to the alphabet. + Positive number implies shift to the right, negative number + implies shift to the left. + """ + self.shift = shift + alphabet = vocab + shifted_alphabet = deque(alphabet) + shifted_alphabet.rotate(shift) + self.encrypt = dict(zip(alphabet, list(shifted_alphabet))) + self.decrypt = dict(zip(list(shifted_alphabet), alphabet)) + + def encrypt_character(self, character): + return self.encrypt[character] + + def decrypt_character(self, character): + return self.decrypt[character] + + +def generate_plaintext_random(plain_vocab, distribution, train_samples, + length): + """Generates samples of text from the provided vocabulary. + + Args: + plain_vocab: vocabulary. + distribution: distribution. + train_samples: samples for training. + length: length. + + Returns: + train_indices (np.array of Integers): random integers for training. + shape = [num_samples, length] + test_indices (np.array of Integers): random integers for testing. + shape = [num_samples, length] + plain_vocab (list of Integers): unique vocabularies. + """ + if distribution is not None: + assert len(distribution) == len(plain_vocab) + + train_indices = np.random.choice( + range(len(plain_vocab)), (train_samples, length), p=distribution) + + return train_indices + + +def encipher_shift(plaintext, plain_vocab, shift): + """Encrypt plain text with a single shift layer. + + Args: + plaintext (list of list of Strings): a list of plain text to encrypt. + plain_vocab (list of Integer): unique vocabularies being used. + shift (Integer): number of shift, shift to the right if shift is positive. + Returns: + ciphertext (list of Strings): encrypted plain text. + """ + ciphertext = [] + cipher = ShiftEncryptionLayer(plain_vocab, shift) + + for _, sentence in enumerate(plaintext): + cipher_sentence = [] + for _, character in enumerate(sentence): + encrypted_char = cipher.encrypt_character(character) + cipher_sentence.append(encrypted_char) + ciphertext.append(cipher_sentence) + + return ciphertext + + +def encipher_vigenere(plaintext, plain_vocab, key): + """Encrypt plain text with given key. + + Args: + plaintext (list of list of Strings): a list of plain text to encrypt. + plain_vocab (list of Integer): unique vocabularies being used. + key (list of Integer): key to encrypt cipher using Vigenere table. + + Returns: + ciphertext (list of Strings): encrypted plain text. + """ + ciphertext = [] + # generate Vigenere table + layers = [ + ShiftEncryptionLayer(plain_vocab, i) for i in range(len(plain_vocab)) + ] + + for i, sentence in enumerate(plaintext): + cipher_sentence = [] + for j, character in enumerate(sentence): + key_idx = key[j % len(key)] + encrypted_char = layers[key_idx].encrypt_character(character) + cipher_sentence.append(encrypted_char) + ciphertext.append(cipher_sentence) + + return ciphertext diff --git a/tensor2tensor/data_generators/cleaner_en_xx.py b/tensor2tensor/data_generators/cleaner_en_xx.py new file mode 100644 index 000000000..2d95b6045 --- /dev/null +++ b/tensor2tensor/data_generators/cleaner_en_xx.py @@ -0,0 +1,176 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# encoding=UTF-8 +"""An unsophisticated data cleaner for en-.. sentence translation pairs. + +This pattern-based English-... cleaner aims fairly aggressively for clean +sentence-like pairs. It discards pairs if the English member has signs of +non-sentence noise or origin, e.g., lacks expected punctuation or has suspicious +character sequences. It also simplistically detects and corrects some missing +sentence breaks. It makes minimal assumptions about the other language, mainly +that its sentences can end in one of '.!?' and that its sentences can start +with an ASCII capital letter. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + + +import itertools +import re + +from tensor2tensor.data_generators import text_encoder + +import tensorflow.compat.v1 as tf + + +_RE_GOOD_S_START = re.compile(r'^["“”]?[A-Z]') +_RE_GOOD_S_END = re.compile(r'\w[.?!]["”]?$', re.UNICODE) + +_RE_LABEL_COLON = re.compile(r'^\w+\.?( \w+)?: ', re.UNICODE) +_RE_DIGIT_SPACE_DIGIT = re.compile(r'\d +\d', re.UNICODE) +_RE_ALL_CAP_WORDS = re.compile(r'^[A-Z]\S*(\s+[A-Z]\S+)+\s*$') + +_RE_DQ_ONE = re.compile(r'^[^"“”]*["“”][^"“”]*$') +_RE_DQ_INITIAL = re.compile(r'^["“”]([^"“”]+)$') +_RE_DQ_FINAL = re.compile(r'^[^"“”]+["“”]$') +_RE_DQ_LINE = re.compile(r'^["“”].*["“”]$') + +_RE_DQ_MANY = re.compile(r'(["“”].*){3,}') +_RE_SQ_MANY = re.compile(r'''(['‘’][^st].*){3,}''') +_RE_CHARS_QQ = re.compile(r'''["“”'‘’]\s*["“”'‘’]''') +_RE_SPACE_PUNCT_SPACE = re.compile(r'''\s["“”'‘’,:;]\s''') + +_RE_COPYRIGHT = re.compile(r'©|^Copyright|^\(C\)') +_RE_UNMATCHED_PAREN_LEFT = re.compile(r'[(][^)]*$') +_RE_UNMATCHED_PAREN_RIGHT = re.compile(r'^[^(]*[)]') +_RE_TAGLINE_CITY = re.compile(r'^[A-Z]{2,}(\s+[A-Z]+)*\s+-') +_RE_CHARS_UPPER_UNDERSCORE = re.compile(r'^[A-Z]+[a-z]*_') + + +def paracrawl_v3_pairs(paracrawl_file): + """Generates raw (English, other) pairs from a ParaCrawl V3.0 data file. + + Args: + paracrawl_file: A ParaCrawl V3.0 en-.. data file. + Yields: + Pairs of (sentence_en, sentence_xx), as Unicode strings. + Raises: + StopIteration: If the file ends while this method is in the middle of + creating a translation pair. + """ + raw_sentences = _raw_sentences(paracrawl_file) + for s_en in raw_sentences: + try: + s_xx = next(raw_sentences) + if s_en and s_xx: # Prevent empty string examples. + yield s_en, s_xx + except StopIteration: + tf.logging.error( + 'Unmatched final sentence while reading in sentence pairs: [%s]', + s_en) + + +def _raw_sentences(paracrawl_file): + """Generates Unicode strings, one for each in a ParaCrawl data file. + + Also decodes some of the most common HTML entities found in ParaCrawl data. + + Args: + paracrawl_file: A ParaCrawl V3.0 en-.. data file. + Yields: + One Unicode string for each element in the ParaCrawl data file. + """ + for line_utf8 in paracrawl_file: + line_uni = line_utf8.decode('UTF-8') + text_match = re.match(r' +(.*)$', line_uni) + if text_match: + txt = text_match.group(1) + txt = re.sub(r'&', r'&', txt) + txt = re.sub(r'& ?amp;', r'&', txt) + txt = re.sub(r'& ?apos;', r"'", txt) + txt = re.sub(r'& ?quot;', r'"', txt) + txt = re.sub(r'& ?lt;', r'<', txt) + txt = re.sub(r'& ?gt;', r'>', txt) + yield txt + + +def clean_en_xx_pairs(en_xx_pairs): + """Generates a cleaned-up stream of (English, other) translation pairs. + + Cleaning includes both filtering and simplistic sentence splitting, with + minimal assumptions on the non-English pair member: (1) All filtering is + done based on the English member of the pair, and (2) sentence splitting + assumes only that sentences can end with one of '.!?' and begin with an + ASCII uppercase letter. Input pairs that would get split into different + numbers of sentences (e.g., three English sentences vs. two German ones) are + discarded. + + Args: + en_xx_pairs: A stream (iterable) of Unicode string pairs. Each item in the + stream should be a (sentence_en, sentence_xx) pair. + Yields: + Cleaned-up (sentence_en, sentence_xx) pairs. + """ + for s1, s2 in en_xx_pairs: + if _regex_filter(s1): + continue + s1_list, s2_list = _split_sentences(s1, s2) + if len(s1_list) != len(s2_list): + continue # discard this pair + elif len(s1_list) == 1: + yield s1, s2 + else: + for s1_subsentence, s2_subsentence in itertools.izip(s1_list, s2_list): + if _regex_filter(s1_subsentence): + continue + yield s1_subsentence, s2_subsentence + + +def _regex_filter(sentence): + return (not _is_match(sentence, _RE_GOOD_S_START) + or not _is_match(sentence, _RE_GOOD_S_END) + or _is_match(sentence, _RE_LABEL_COLON) + or _is_match(sentence, _RE_DIGIT_SPACE_DIGIT) + or _is_match(sentence, _RE_DQ_ONE) + or _is_match(sentence, _RE_DQ_INITIAL) + or _is_match(sentence, _RE_DQ_FINAL) + or _is_match(sentence, _RE_DQ_LINE) + or _is_match(sentence, _RE_DQ_MANY) + or _is_match(sentence, _RE_SQ_MANY) + or _is_match(sentence, _RE_CHARS_QQ) + or _is_match(sentence, _RE_SPACE_PUNCT_SPACE) + or _is_match(sentence, _RE_COPYRIGHT) + or _is_match(sentence, _RE_UNMATCHED_PAREN_LEFT) + or _is_match(sentence, _RE_UNMATCHED_PAREN_RIGHT) + or _is_match(sentence, _RE_TAGLINE_CITY) + or _is_match(sentence, _RE_CHARS_UPPER_UNDERSCORE)) + + +def _is_match(sentence, regex): + return regex.search(sentence) + + +def _split_sentences(s1, s2): + s1 = text_encoder.native_to_unicode(s1) + s2 = text_encoder.native_to_unicode(s2) + s1 = re.sub(r'(\w[A-Z]|[0-9a-z])([.!?]) ([A-Z])', r'\1\2__|__\3', s1) + s2 = re.sub(r'([^0-9][.!?]) ([A-Z])', r'\1__|__\2', s2) + s1_subsentences = s1.split('__|__') + s2_subsentences = s2.split('__|__') + return s1_subsentences, s2_subsentences diff --git a/tensor2tensor/data_generators/cnn_dailymail.py b/tensor2tensor/data_generators/cnn_dailymail.py new file mode 100644 index 000000000..e87d17d14 --- /dev/null +++ b/tensor2tensor/data_generators/cnn_dailymail.py @@ -0,0 +1,380 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for the CNN and Daily Mail datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import hashlib +import io +import os +import random +import tarfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import wiki_lm +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +# Links to data from http://cs.nyu.edu/~kcho/DMQA/ +_CNN_STORIES_DRIVE_URL = ("https://drive.google.com/uc?" + "export=download&id=0BwmD_VLjROrfTHk4NFg2SndKcjQ") + +_DAILYMAIL_STORIES_DRIVE_URL = ("https://drive.google.com/uc?export=download&id" + "=0BwmD_VLjROrfM1BxdkxVaTY2bWs") + +# Note: using See et al. (2017) as reference for data generation +# For more info, use the links below + +# Train/Dev/Test Splits for summarization data +_TRAIN_URLS = ("https://raw.githubusercontent.com/abisee/cnn-dailymail/" + "master/url_lists/all_train.txt") +_DEV_URLS = ("https://raw.githubusercontent.com/abisee/cnn-dailymail/" + "master/url_lists/all_val.txt") +_TEST_URLS = ("https://raw.githubusercontent.com/abisee/cnn-dailymail/" + "master/url_lists/all_test.txt") + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + +# Techniques for data prep from See et al. (2017) +dm_single_close_quote = u"\u2019" # unicode +dm_double_close_quote = u"\u201d" +# Acceptable ways to end a sentence. +END_TOKENS = [ + u".", u"!", u"?", u"...", u"'", u"`", u"\"", dm_single_close_quote, + dm_double_close_quote, u")" +] + + +def _maybe_download_corpora(tmp_dir, dataset_split): + """Download corpora if necessary and unzip them. + + Args: + tmp_dir: directory containing dataset. + dataset_split: whether we're in train/dev/test mode. + + Returns: + List of all files generated and path to file containing + train/dev/test split info. + """ + cnn_filename = "cnn_stories.tgz" + cnn_finalpath = os.path.join(tmp_dir, "cnn/stories/") + dailymail_filename = "dailymail_stories.tgz" + dailymail_finalpath = os.path.join(tmp_dir, "dailymail/stories/") + if not tf.gfile.Exists(cnn_finalpath): + cnn_file = generator_utils.maybe_download_from_drive( + tmp_dir, cnn_filename, _CNN_STORIES_DRIVE_URL) + with tarfile.open(cnn_file, "r:gz") as cnn_tar: + cnn_tar.extractall(tmp_dir) + if not tf.gfile.Exists(dailymail_finalpath): + dailymail_file = generator_utils.maybe_download_from_drive( + tmp_dir, dailymail_filename, _DAILYMAIL_STORIES_DRIVE_URL) + with tarfile.open(dailymail_file, "r:gz") as dailymail_tar: + dailymail_tar.extractall(tmp_dir) + + cnn_files = tf.gfile.Glob(cnn_finalpath + "*") + dailymail_files = tf.gfile.Glob(dailymail_finalpath + "*") + all_files = cnn_files + dailymail_files + + if dataset_split == problem.DatasetSplit.TRAIN: + urls_path = generator_utils.maybe_download(tmp_dir, "all_train.txt", + _TRAIN_URLS) + elif dataset_split == problem.DatasetSplit.EVAL: + urls_path = generator_utils.maybe_download(tmp_dir, "all_val.txt", + _DEV_URLS) + else: + urls_path = generator_utils.maybe_download(tmp_dir, "all_test.txt", + _TEST_URLS) + + return all_files, urls_path + + +def example_splits(url_file, all_files): + """Generate splits of the data.""" + + def generate_hash(inp): + """Generate a sha1 hash to match the raw url to the filename extracted.""" + h = hashlib.sha1() + h.update(inp) + return h.hexdigest() + + all_files_map = {f.split("/")[-1]: f for f in all_files} + + urls = [line.strip().encode("utf-8") for line in tf.gfile.Open(url_file)] + + filelist = [] + for url in urls: + url_hash = generate_hash(url) + filename = url_hash + ".story" + if filename not in all_files_map: + tf.logging.info("Missing file: %s" % url) + continue + filelist.append(all_files_map[filename]) + + tf.logging.info("Found %d examples" % len(filelist)) + + return filelist + + +def example_generator(all_files, urls_path, sum_token): + """Generate examples.""" + + def fix_run_on_sents(line): + if u"@highlight" in line: + return line + if not line: + return line + if line[-1] in END_TOKENS: + return line + return line + u"." + + filelist = example_splits(urls_path, all_files) + story_summary_split_token = u" " if sum_token else " " + + for story_file in filelist: + story = [] + summary = [] + reading_highlights = False + for line in tf.gfile.Open(story_file, "rb"): + line = text_encoder.to_unicode_utf8(line.strip()) + line = fix_run_on_sents(line) + if not line: + continue + elif line.startswith(u"@highlight"): + if not story: + break # No article text. + reading_highlights = True + elif reading_highlights: + summary.append(line) + else: + story.append(line) + + if (not story) or not summary: + continue + + yield " ".join(story) + story_summary_split_token + " ".join(summary) + + +def _story_summary_split(story): + split_str = u" " + split_str_len = len(split_str) + split_pos = story.find(split_str) + return story[:split_pos], story[split_pos + split_str_len:] # story, summary + + +def write_raw_text_to_files(all_files, urls_path, dataset_split, tmp_dir): + """Write text to files.""" + + def write_to_file(all_files, urls_path, tmp_dir, filename): + """Write text to files.""" + with io.open( + os.path.join(tmp_dir, filename + ".source"), "w", + encoding="utf-8") as fstory: + with io.open( + os.path.join(tmp_dir, filename + ".target"), "w", + encoding="utf-8") as fsummary: + for example in example_generator(all_files, urls_path, sum_token=True): + story, summary = _story_summary_split(example) + fstory.write(story + "\n") + fsummary.write(summary + "\n") + + if dataset_split == problem.DatasetSplit.TRAIN: + filename = "cnndm.train" + elif dataset_split == problem.DatasetSplit.EVAL: + filename = "cnndm.dev" + else: + filename = "cnndm.test" + + tf.logging.info("Writing %s" % filename) + write_to_file(all_files, urls_path, tmp_dir, filename) + + +@registry.register_problem +class SummarizeCnnDailymail32k(text_problems.Text2TextProblem): + """Summarize CNN and Daily Mail articles to their summary highlights.""" + + def generate_text_for_vocab(self, data_dir, tmp_dir): + del data_dir + all_files, urls_path = _maybe_download_corpora(tmp_dir, + problem.DatasetSplit.TRAIN) + return example_generator(all_files, urls_path, sum_token=False) + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each.""" + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 100, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 10, + }, { + "split": problem.DatasetSplit.TEST, + "shards": 10, + }] + + def is_generate_per_split(self): + return True + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + del data_dir + all_files, urls_path = _maybe_download_corpora(tmp_dir, dataset_split) + write_raw_text_to_files(all_files, urls_path, dataset_split, tmp_dir) + for example in example_generator(all_files, urls_path, sum_token=True): + story, summary = _story_summary_split(example) + yield {"inputs": story, "targets": summary} + + +@registry.register_problem +class SummarizeCnnDailymailWikiLMSharedVocab(SummarizeCnnDailymail32k): + """Summarize CNN and Daily Mail articles using the Wiki 32k vocab.""" + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelEnWiki32k() + + +@registry.register_problem +class SummarizeCnnDailymailWikiLMSharedVocab64k(SummarizeCnnDailymail32k): + """Summarize CNN and Daily Mail articles using the Wiki 64k vocab.""" + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelEnWiki64k() + + +@registry.register_problem +class SummarizeCnnDailymailWikiLMMultiVocab64k(SummarizeCnnDailymail32k): + """Summarize CNN and Daily Mail articles using multi-lingual 64k vocab.""" + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelDeEnFrRoWiki64k() + + +@registry.register_problem +class SummarizeCnnDailymailMulti64kPacked1k(SummarizeCnnDailymail32k): + """Summarize CNN and Daily Mail articles using multi-lingual 64k vocab.""" + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelDeEnFrRoWiki64k() + + @property + def packed_length(self): + return 1024 + + @property + def num_training_examples(self): + return 252600 + + @property + def inputs_prefix(self): + return "CNN Daily Mail article to summary " + + @property + def targets_prefix(self): + return "CNN Daily Mail summary to article " + + +@registry.register_problem +class SummarizeFracCnnDailymailWikiLMSharedVocab64k(SummarizeCnnDailymail32k): + """Summarize a fraction of CNN/DM articles using the Wiki 64k vocab.""" + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelEnWiki64k() + + def fraction_of_data(self): + return 1. + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + del data_dir + all_data = [] + all_files, urls_path = _maybe_download_corpora(tmp_dir, dataset_split) + write_raw_text_to_files(all_files, urls_path, dataset_split, tmp_dir) + for example in example_generator(all_files, urls_path, sum_token=True): + story, summary = _story_summary_split(example) + all_data.append((story, summary)) + + if dataset_split == problem.DatasetSplit.TRAIN: + random.shuffle(all_data) + fractional_len = int(self.fraction_of_data() * len(all_data)) + all_data = all_data[:fractional_len] + + for story, summary in all_data: + yield {"inputs": story, "targets": summary} + + +@registry.register_problem +class SummarizeFrac0p1CnnDailymailWikiLMSharedVocab64k( + SummarizeFracCnnDailymailWikiLMSharedVocab64k): + + def fraction_of_data(self): + return 0.001 + + +@registry.register_problem +class SummarizeFrac1CnnDailymailWikiLMSharedVocab64k( + SummarizeFracCnnDailymailWikiLMSharedVocab64k): + + def fraction_of_data(self): + return 0.01 + + +@registry.register_problem +class SummarizeFrac2CnnDailymailWikiLMSharedVocab64k( + SummarizeFracCnnDailymailWikiLMSharedVocab64k): + + def fraction_of_data(self): + return 0.02 + + +@registry.register_problem +class SummarizeFrac5CnnDailymailWikiLMSharedVocab64k( + SummarizeFracCnnDailymailWikiLMSharedVocab64k): + + def fraction_of_data(self): + return 0.05 + + +@registry.register_problem +class SummarizeFrac10CnnDailymailWikiLMSharedVocab64k( + SummarizeFracCnnDailymailWikiLMSharedVocab64k): + + def fraction_of_data(self): + return 0.1 + + +@registry.register_problem +class SummarizeFrac20CnnDailymailWikiLMSharedVocab64k( + SummarizeFracCnnDailymailWikiLMSharedVocab64k): + + def fraction_of_data(self): + return 0.2 + + +@registry.register_problem +class SummarizeFrac50CnnDailymailWikiLMSharedVocab64k( + SummarizeFracCnnDailymailWikiLMSharedVocab64k): + + def fraction_of_data(self): + return 0.5 diff --git a/tensor2tensor/data_generators/cola.py b/tensor2tensor/data_generators/cola.py new file mode 100644 index 000000000..366e7db8d --- /dev/null +++ b/tensor2tensor/data_generators/cola.py @@ -0,0 +1,113 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for the Corpus of Liguistic Acceptability.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import zipfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + +EOS = text_encoder.EOS + + +@registry.register_problem +class Cola(text_problems.Text2ClassProblem): + """Corpus of Linguistic Acceptability classification problems.""" + + # Link to data from GLUE: https://gluebenchmark.com/tasks + _COLA_URL = ("https://firebasestorage.googleapis.com/v0/b/" + "mtl-sentence-representations.appspot.com/o/" + "data%2FCoLA.zip?alt=media&token=46d5e637-3411-" + "4188-bc44-5809b5bfb5f4") + + @property + def is_generate_per_split(self): + return True + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 10, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def approx_vocab_size(self): + return 2**13 # 8k vocab suffices for this small dataset. + + @property + def num_classes(self): + return 2 + + def class_labels(self, data_dir): + del data_dir + # Note this binary classification is different from usual MNLI. + return ["unacceptable", "acceptable"] + + def _maybe_download_corpora(self, tmp_dir): + cola_filename = "CoLA.zip" + cola_finalpath = os.path.join(tmp_dir, "CoLA") + if not tf.gfile.Exists(cola_finalpath): + zip_filepath = generator_utils.maybe_download( + tmp_dir, cola_filename, self._COLA_URL) + zip_ref = zipfile.ZipFile(zip_filepath, "r") + zip_ref.extractall(tmp_dir) + zip_ref.close() + + return cola_finalpath + + def example_generator(self, filename): + for line in tf.gfile.Open(filename, "rb"): + line = text_encoder.to_unicode_utf8(line.strip()) + _, label, _, sent = line.split("\t") + yield { + "inputs": sent, + "label": int(label) + } + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + cola_dir = self._maybe_download_corpora(tmp_dir) + if dataset_split == problem.DatasetSplit.TRAIN: + filesplit = "train.tsv" + else: + filesplit = "dev.tsv" + + filename = os.path.join(cola_dir, filesplit) + for example in self.example_generator(filename): + yield example + + +@registry.register_problem +class ColaCharacters(Cola): + """Corpus of Linguistic Acceptability problems, character level""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def global_task_id(self): + return problem.TaskID.COLA diff --git a/tensor2tensor/data_generators/common_voice.py b/tensor2tensor/data_generators/common_voice.py new file mode 100644 index 000000000..ef3808f65 --- /dev/null +++ b/tensor2tensor/data_generators/common_voice.py @@ -0,0 +1,251 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Mozilla Common Voice dataset. + +Note: Generating the full set of examples can take upwards of 5 hours. +As the Common Voice data are distributed in MP3 format, experimenters will need +to have both SoX (http://sox.sourceforge.net) and on Linux, the libsox-fmt-mp3 +package installed. The original samples will be downsampled by the encoder. +""" + +import csv +import os +import tarfile +from tensorflow.compat.v1 import estimator as tf_estimator +import tqdm # pylint: disable=g-bad-import-order +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import speech_recognition +from tensor2tensor.utils import registry + + +_COMMONVOICE_URL = "https://common-voice-data-download.s3.amazonaws.com/cv_corpus_v1.tar.gz" # pylint: disable=line-too-long + +_COMMONVOICE_TRAIN_DATASETS = ["cv-valid-train", "cv-other-train"] +_COMMONVOICE_DEV_DATASETS = ["cv-valid-dev", "cv-other-dev"] +_COMMONVOICE_TEST_DATASETS = ["cv-valid-test", "cv-other-test"] + + +def _collect_data(directory): + """Traverses directory collecting input and target files. + + Args: + directory: base path to extracted audio and transcripts. + Returns: + list of (media_base, media_filepath, label) tuples + """ + # Returns: + data_files = [] + transcripts = [ + filename for filename in os.listdir(directory) + if filename.endswith(".csv") + ] + for transcript in transcripts: + transcript_path = os.path.join(directory, transcript) + with open(transcript_path, "r") as transcript_file: + transcript_reader = csv.reader(transcript_file) + # skip header + _ = next(transcript_reader) + for transcript_line in transcript_reader: + media_name, label = transcript_line[0:2] + filename = os.path.join(directory, media_name) + data_files.append((media_name, filename, label)) + return data_files + + +def _file_exists(path, filename): + """Checks if the filename exists under the path.""" + return os.path.isfile(os.path.join(path, filename)) + + +def _is_relative(path, filename): + """Checks if the filename is relative, not absolute.""" + return os.path.abspath(os.path.join(path, filename)).startswith(path) + + +@registry.register_problem() +class CommonVoice(speech_recognition.SpeechRecognitionProblem): + """Problem spec for Commonvoice using clean and noisy data.""" + + # Select only the clean data + TRAIN_DATASETS = _COMMONVOICE_TRAIN_DATASETS[:1] + DEV_DATASETS = _COMMONVOICE_DEV_DATASETS[:1] + TEST_DATASETS = _COMMONVOICE_TEST_DATASETS[:1] + + @property + def num_shards(self): + return 100 + + @property + def use_subword_tokenizer(self): + return False + + @property + def num_dev_shards(self): + return 1 + + @property + def num_test_shards(self): + return 1 + + @property + def use_train_shards_for_dev(self): + """If true, we only generate training data and hold out shards for dev.""" + return False + + def generator(self, + data_dir, + tmp_dir, + datasets, + eos_list=None, + start_from=0, + how_many=0): + del eos_list + i = 0 + + filename = os.path.basename(_COMMONVOICE_URL) + compressed_file = generator_utils.maybe_download(tmp_dir, filename, + _COMMONVOICE_URL) + + read_type = "r:gz" if filename.endswith(".tgz") else "r" + with tarfile.open(compressed_file, read_type) as corpus_tar: + # Create a subset of files that don't already exist. + # tarfile.extractall errors when encountering an existing file + # and tarfile.extract is extremely slow. For security, check that all + # paths are relative. + members = [ + f for f in corpus_tar if _is_relative(tmp_dir, f.name) and + not _file_exists(tmp_dir, f.name) + ] + corpus_tar.extractall(tmp_dir, members=members) + + raw_data_dir = os.path.join(tmp_dir, "cv_corpus_v1") + data_tuples = _collect_data(raw_data_dir) + encoders = self.feature_encoders(data_dir) + audio_encoder = encoders["waveforms"] + text_encoder = encoders["targets"] + for dataset in datasets: + data_tuples = (tup for tup in data_tuples if tup[0].startswith(dataset)) + for utt_id, media_file, text_data in tqdm.tqdm( + sorted(data_tuples)[start_from:]): + if how_many > 0 and i == how_many: + return + i += 1 + wav_data = audio_encoder.encode(media_file) + yield { + "waveforms": wav_data, + "waveform_lens": [len(wav_data)], + "targets": text_encoder.encode(text_data), + "raw_transcript": [text_data], + "utt_id": [utt_id], + "spk_id": ["unknown"], + } + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + train_paths = self.training_filepaths( + data_dir, self.num_shards, shuffled=False) + dev_paths = self.dev_filepaths( + data_dir, self.num_dev_shards, shuffled=False) + test_paths = self.test_filepaths( + data_dir, self.num_test_shards, shuffled=True) + + generator_utils.generate_files( + self.generator(data_dir, tmp_dir, self.TEST_DATASETS), test_paths) + + if self.use_train_shards_for_dev: + all_paths = train_paths + dev_paths + generator_utils.generate_files( + self.generator(data_dir, tmp_dir, self.TRAIN_DATASETS), all_paths) + generator_utils.shuffle_dataset(all_paths) + else: + generator_utils.generate_dataset_and_shuffle( + self.generator(data_dir, tmp_dir, self.TRAIN_DATASETS), train_paths, + self.generator(data_dir, tmp_dir, self.DEV_DATASETS), dev_paths) + + +@registry.register_problem() +class CommonVoiceTrainFullTestClean(CommonVoice): + """Problem to train on full set, but evaluate on clean data only.""" + + def training_filepaths(self, data_dir, num_shards, shuffled): + return CommonVoice.training_filepaths(self, data_dir, num_shards, shuffled) + + def dev_filepaths(self, data_dir, num_shards, shuffled): + return CommonVoiceClean.dev_filepaths(self, data_dir, num_shards, shuffled) + + def test_filepaths(self, data_dir, num_shards, shuffled): + return CommonVoiceClean.test_filepaths(self, data_dir, num_shards, shuffled) + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + raise Exception("Generate Commonvoice and Commonvoice_clean data.") + + def filepattern(self, data_dir, mode, shard=None): + """Get filepattern for data files for mode. + + Matches mode to a suffix. + * DatasetSplit.TRAIN: train + * DatasetSplit.EVAL: dev + * DatasetSplit.TEST: test + * tf.estimator.ModeKeys.PREDICT: dev + + Args: + data_dir: str, data directory. + mode: DatasetSplit + shard: int, if provided, will only read data from the specified shard. + + Returns: + filepattern str + """ + shard_str = "-%05d" % shard if shard is not None else "" + if mode == problem.DatasetSplit.TRAIN: + path = os.path.join(data_dir, "common_voice") + suffix = "train" + elif mode in [problem.DatasetSplit.EVAL, tf_estimator.ModeKeys.PREDICT]: + path = os.path.join(data_dir, "common_voice_clean") + suffix = "dev" + else: + assert mode == problem.DatasetSplit.TEST + path = os.path.join(data_dir, "common_voice_clean") + suffix = "test" + + return "%s-%s%s*" % (path, suffix, shard_str) + + +@registry.register_problem() +class CommonVoiceClean(CommonVoice): + """Problem spec for Common Voice using clean train and clean eval data.""" + + # Select only the "clean" data (crowdsourced quality control). + TRAIN_DATASETS = _COMMONVOICE_TRAIN_DATASETS[:1] + DEV_DATASETS = _COMMONVOICE_DEV_DATASETS[:1] + TEST_DATASETS = _COMMONVOICE_TEST_DATASETS[:1] + + +@registry.register_problem() +class CommonVoiceNoisy(CommonVoice): + """Problem spec for Common Voice using noisy train and noisy eval data.""" + + # Select only the "other" data. + TRAIN_DATASETS = _COMMONVOICE_TRAIN_DATASETS[1:] + DEV_DATASETS = _COMMONVOICE_DEV_DATASETS[1:] + TEST_DATASETS = _COMMONVOICE_TEST_DATASETS[1:] + + +def set_common_voice_length_hparams(hparams): + hparams.max_length = 1650 * 80 + hparams.max_input_seq_length = 1650 + hparams.max_target_seq_length = 350 + return hparams diff --git a/tensor2tensor/data_generators/common_voice_test.py b/tensor2tensor/data_generators/common_voice_test.py new file mode 100644 index 000000000..3798b4240 --- /dev/null +++ b/tensor2tensor/data_generators/common_voice_test.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.data_generators.common_voice.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from tensor2tensor.data_generators import common_voice + +import tensorflow.compat.v1 as tf + +pkg_dir, _ = os.path.split(__file__) +_TESTDATA = os.path.join(pkg_dir, "test_data") + + +class CommonVoiceTest(tf.test.TestCase): + + def testCollectData(self): + output = common_voice._collect_data(_TESTDATA) + self.assertEqual(1, len(output)) + + # NOTE: No header. + self.assertTrue("my_media" == output[0][0]) + self.assertTrue("my_label" == output[0][2]) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/concatenate_examples.py b/tensor2tensor/data_generators/concatenate_examples.py deleted file mode 100644 index b346b6c08..000000000 --- a/tensor2tensor/data_generators/concatenate_examples.py +++ /dev/null @@ -1,180 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -r"""Convert seq-seq examples to "concatenated" examples. - -The concatenated example has no "inputs". -Instead the source is at the beginning of the target. - -We can now use a simple language model. - -Example: -seq-seq mode: -{ - "inputs": subtokenizer.encode("I love you.") + [1] - "targets": subtokenizer.encode("Je t'aime.") + [1] -} --> -concatenated mode: -{ - "inputs": [0] - "targets": (subtokenizer.encode("source English I love you.") + [1] - + subtokenizer.encode("target French Je t'aime.") + [1]) -} - -We add a dummy feature "inputs"=[0] for compatability with seq-to-seq models. - -If FLAGS.combine_to_length is nonzero, then we combine multiple examples into -examples of a constant length, possibly with some padding at the end. - -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import random - -# Dependency imports - -from tensor2tensor.data_generators import generator_utils -from tensor2tensor.data_generators import text_encoder -import tensorflow as tf - -tf.app.flags.DEFINE_string("vocab_file", "", - "SubwordTextEncoder vocabulary file") - -tf.app.flags.DEFINE_boolean( - "random_reverse", False, - "If true, write half of the example with source/target reversed") - -tf.app.flags.DEFINE_boolean( - "count_everything", False, - "If true, assign positive weights to designators, source and target. " - "If false, assign positive weights only to target.") - -tf.app.flags.DEFINE_string("source_domain_string", "English", "") -tf.app.flags.DEFINE_string("target_domain_string", "French", "") - -tf.app.flags.DEFINE_integer( - "combine_to_length", 0, - "If positive, concatenate examples to form examples with target length " - " equal to this value. Targets are padded with subtoken id=0.") - -tf.app.flags.DEFINE_string("in_file", "", "input filename") - -tf.app.flags.DEFINE_string( - "out_prefix", "/usr/local/google/tmp/concat", - "The output filename is equal to out_prefix plus " - "the last 15 characters of in_file. (e.g. -00001-of-00100)") - -FLAGS = tf.app.flags.FLAGS - - -def _make_example(ids, weights, raw_num_bytes): - if FLAGS.combine_to_length > 0: - ids += [0] * (FLAGS.combine_to_length - len(ids)) - return generator_utils.to_example({ - "targets": ids, - "target_weights": weights, - "inputs": [0], - "raw_num_bytes": [raw_num_bytes] - }).SerializeToString() - - -def main(_): - """Convert a file to examples.""" - subtokenizer = text_encoder.SubwordTextEncoder(FLAGS.vocab_file) - total_bytes = 0 - total_subtokens = 0 - total_examples = 0 - dropped_examples = 0 - - combined_subtokens = [] - combined_num_bytes = 0 - combined_weights = [] - - source_specifier = subtokenizer.encode("source " + FLAGS.source_domain_string) - target_specifier = subtokenizer.encode("target " + FLAGS.target_domain_string) - if FLAGS.random_reverse: - r_source_specifier = subtokenizer.encode("source " + - FLAGS.target_domain_string) - r_target_specifier = subtokenizer.encode("target " + - FLAGS.source_domain_string) - - reader = tf.python_io.tf_record_iterator(FLAGS.in_file) - - out_file = FLAGS.out_prefix + FLAGS.in_file[-15:] - writer = tf.python_io.TFRecordWriter(out_file) - - for record in reader: - total_examples += 1 - if total_examples % 1000 == 0: - tf.logging.info("total_examples: %d", total_examples) - x = tf.train.Example() - x.ParseFromString(record) - inputs = [i for i in x.features.feature["inputs"].int64_list.value] - targets = [i for i in x.features.feature["targets"].int64_list.value] - should_reverse = FLAGS.random_reverse and random.random() < 0.5 - source_bytes = len(subtokenizer.decode(inputs[:-1])) + 1 - target_bytes = len(subtokenizer.decode(targets[:-1])) + 1 - if not should_reverse: - subtokens = source_specifier + inputs + target_specifier + targets - weights = ([0.0] * - (len(source_specifier) + len(inputs) + len(target_specifier)) + - [1.0] * len(targets)) - num_bytes = target_bytes - else: - subtokens = r_source_specifier + targets + r_target_specifier + inputs - weights = ( - [0.0] * - (len(r_source_specifier) + len(targets) + len(r_target_specifier)) + - [1.0] * len(inputs)) - num_bytes = source_bytes - if FLAGS.count_everything: - weights = [1.0] * len(subtokens) - num_bytes = source_bytes + target_bytes - total_bytes += num_bytes - total_subtokens += sum(weights) - if FLAGS.combine_to_length: - if combined_subtokens and (len(combined_subtokens) + len(subtokens) > - FLAGS.combine_to_length): - writer.write( - _make_example(combined_subtokens, combined_weights, - combined_num_bytes)) - combined_subtokens = [] - combined_weights = [] - combined_num_bytes = 0 - if len(subtokens) <= FLAGS.combine_to_length: - combined_subtokens.extend(subtokens) - combined_weights.extend(weights) - combined_num_bytes += num_bytes - else: - dropped_examples += 1 - else: - writer.write(_make_example(subtokens, weights, num_bytes)) - if combined_subtokens: - writer.write( - _make_example(combined_subtokens, combined_weights, combined_num_bytes)) - writer.close() - - tf.logging.info("total bytes: %d", total_bytes) - tf.logging.info("total subtokens: %d", total_subtokens) - tf.logging.info("bytes per subtoken: %f", total_bytes / total_subtokens) - tf.logging.info("total documents: %d", total_examples) - tf.logging.info("dropped documents: %d", dropped_examples) - - -if __name__ == "__main__": - tf.app.run() diff --git a/tensor2tensor/data_generators/conll_ner.py b/tensor2tensor/data_generators/conll_ner.py new file mode 100644 index 000000000..52a054223 --- /dev/null +++ b/tensor2tensor/data_generators/conll_ner.py @@ -0,0 +1,92 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for CoNLL dataset.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import zipfile + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + + +@registry.register_problem +class Conll2002Ner(text_problems.Text2textTmpdir): + """Base class for CoNLL2002 problems.""" + + def source_data_files(self, dataset_split): + """Files to be passed to generate_samples.""" + raise NotImplementedError() + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + del data_dir + + url = "https://raw.githubusercontent.com/nltk/nltk_data/gh-pages/packages/corpora/conll2002.zip" # pylint: disable=line-too-long + compressed_filename = os.path.basename(url) + compressed_filepath = os.path.join(tmp_dir, compressed_filename) + generator_utils.maybe_download(tmp_dir, compressed_filename, url) + + compressed_dir = compressed_filepath.strip(".zip") + + filenames = self.source_data_files(dataset_split) + for filename in filenames: + filepath = os.path.join(compressed_dir, filename) + if not tf.gfile.Exists(filepath): + with zipfile.ZipFile(compressed_filepath, "r") as corpus_zip: + corpus_zip.extractall(tmp_dir) + with tf.gfile.GFile(filepath, mode="r") as cur_file: + words, tags = [], [] + for line in cur_file: + line_split = line.strip().split() + if not line_split: + yield { + "inputs": str.join(" ", words), + "targets": str.join(" ", tags) + } + words, tags = [], [] + continue + words.append(line_split[0]) + tags.append(line_split[2]) + if words: + yield {"inputs": str.join(" ", words), "targets": str.join(" ", tags)} + + +@registry.register_problem +class Conll2002EsNer(Conll2002Ner): + """Problem spec for CoNLL2002 Spanish named entity task.""" + TRAIN_FILES = ["esp.train"] + EVAL_FILES = ["esp.testa", "esp.testb"] + + def source_data_files(self, dataset_split): + is_training = dataset_split == problem.DatasetSplit.TRAIN + return self.TRAIN_FILES if is_training else self.EVAL_FILES + + +@registry.register_problem +class Conll2002NlNer(Conll2002Ner): + """Problem spec for CoNLL2002 Dutch named entity task.""" + TRAIN_FILES = ["ned.train"] + EVAL_FILES = ["ned.testa", "ned.testb"] + + def source_data_files(self, dataset_split): + is_training = dataset_split == problem.DatasetSplit.TRAIN + return self.TRAIN_FILES if is_training else self.EVAL_FILES diff --git a/tensor2tensor/data_generators/desc2code.py b/tensor2tensor/data_generators/desc2code.py new file mode 100644 index 000000000..3e0de47b9 --- /dev/null +++ b/tensor2tensor/data_generators/desc2code.py @@ -0,0 +1,308 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for the Description2Code OpenAI data-set.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import os +import random +import re +import zipfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + +_DATASET_URL = "https://drive.google.com/uc?export=download&id=0Bz3fihKG133ceWNFQTQ5S0xhZUk" +_DATASET_FILENAME = "description2code_current.zip" +_DATASET_PB_PATH = "description2code_current/" + +_DESC_DIR_NAME = "description" + +_VOCAB_EN_FILENAME = "vocab.endefr" + +_RE_CPP_INLINE_COMMENT = re.compile("//.*?\n") # Compiled once + + +# Constant defined for a language problem +CodingPbConstants = collections.namedtuple("CodingPbConstants", [ + "code_dir_name", + "vocab_filename", + "filter_patterns", + "target_space", +]) + +PB_PY = CodingPbConstants( + code_dir_name="solutions_python", + vocab_filename="vocab.py", + filter_patterns=["#include", "# include", "import java."], + target_space=problem.SpaceID.PY_TOK, +) +PB_CPP = CodingPbConstants( + code_dir_name="solutions_c++", + vocab_filename="vocab.cpp", + filter_patterns=["import java."], + target_space=problem.SpaceID.CPP_TOK, +) + +# Struct containing a coding problem (contains the paths to the descriptions +# and code files) +CodingPbInfo = collections.namedtuple("CodingPbInfo", "desc_file, code_files") + + +class Desc2CodeProblem(text_problems.Text2TextProblem): + """Base class for Description2Code problems.""" + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 10, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def input_vocab_size(self): + return 2**15 # 32k + + @property + def target_vocab_size(self): + return 2**12 # 4k + + @property + def vocab_input_filename(self): + return "{}.{}".format(_VOCAB_EN_FILENAME, self.input_vocab_size) + + @property + def vocab_target_filename(self): + return "{}.{}".format( + self.pb_constants.vocab_filename, self.target_vocab_size) + + def preprocess_target(self, target): + """Apply some preprocessing to the target. + + For instance, remove space/tabs. + + Args: + target (str): code source content + + Returns: + the pre-processed string content + """ + return target + + def feature_encoders(self, data_dir): + source_vocab_filename = os.path.join(data_dir, self.vocab_input_filename) + target_vocab_filename = os.path.join(data_dir, self.vocab_target_filename) + source_token = text_encoder.SubwordTextEncoder(source_vocab_filename) + target_token = text_encoder.SubwordTextEncoder(target_vocab_filename) + return { + "inputs": source_token, + "targets": target_token, + } + + def is_generate_per_split(self): + return True + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + # Called twice: for train and test + + # Get the list of the training samples (coding challenge samples) + samples = list(generator_samples(tmp_dir, self.pb_constants)) + + # Split between train and dev + # Shuffle to get problems from diverse sources (CodeChef and CodeForces) and + # difficulties in each set. + # Need to sort the samples first before shuffling (as walk() isn't + # deterministic) + samples.sort(key=lambda x: x.desc_file) # in-place + rng = random.Random(7531) # Local fixed seed + rng.shuffle(samples) # in-place + + # Train: 5019/5228 problems + # Dev: 209/5228 problems + len_samples = len(samples) + split = len_samples // 25 + samples = samples[split:] if train else samples[:split] + tf.logging.info("Number of samples for {}: {}/{}".format( + "train" if train else "dev", + len(samples), + len_samples + )) + + def generator_samples_content(get_source, get_target): + """Generate samples.""" + source, target = None, None + # Iterate over the coding samples + for sample in samples: + if get_source: + with tf.gfile.GFile(sample.desc_file, mode="r") as source_file: + source = source_file.read() + + if get_target: + # Each challenge can have multiple implementations (or none) + for code_file in sample.code_files: + with tf.gfile.GFile(code_file, mode="r") as target_file: + target = target_file.read() + target = self.preprocess_target(target) + yield source, target + elif sample.code_files: # Only take the source if a target exists + yield source, target + + def generator_target(): + for _, target in generator_samples_content(False, True): + yield target.strip() + + # Generate vocab for both source and target + + # TODO(lukaszkaiser): Fix vocab generation call. No sources given. + assert not self.vocab_input_filename + source_vocab = None + # source_vocab = generator_utils.get_or_generate_vocab( + # data_dir, tmp_dir, self.vocab_input_filename, self.input_vocab_size) + + target_vocab = generator_utils.get_or_generate_vocab_inner( + data_dir=data_dir, + vocab_filename=self.vocab_target_filename, + vocab_size=self.target_vocab_size, + generator=generator_target(),) + + # Yield the training and testing samples + eos_list = [EOS] + for source, target in generator_samples_content(True, True): + source_ints = source_vocab.encode(source.strip()) + eos_list + target_ints = target_vocab.encode(target.strip()) + eos_list + yield { + "inputs": source_ints, + "targets": target_ints, + } + + +@registry.register_problem +class ProgrammingDesc2codePy(Desc2CodeProblem): + """Description2Code for python problem.""" + + @property + def pb_constants(self): + return PB_PY + + def preprocess_target(self, target): + """Simple tab to space replacement.""" + return target.replace("\t", " ") + + +@registry.register_problem +class ProgrammingDesc2codeCpp(Desc2CodeProblem): + """Description2Code for C++ problem.""" + + @property + def pb_constants(self): + return PB_CPP + + def preprocess_target(self, target): + """Pre-process Cpp files.""" + target = re.sub(_RE_CPP_INLINE_COMMENT, " ", target) # Remove comments + # The regex rule is quite simple, So will fail if a // is inside a string, + # and don't remove /* */ comments + target = " ".join(target.split()) # Normalize all spaces + return target + + +# Utils functions + + +def generator_samples(tmp_dir, pb_cst): + """Generator for the dataset samples. + + If not present, download and extract the dataset. + + Args: + tmp_dir: path to the directory where to download the dataset. + pb_cst: CodingPbConstants object defining paths + + Yields: + A CodingPbInfo object containing the next challenge informations. + """ + # Step1: Download dataset (eventually) + data_zip_path = generator_utils.maybe_download_from_drive( + directory=tmp_dir, + filename=_DATASET_FILENAME, + url=_DATASET_URL, + ) + tf.logging.info("Data downloaded in: {}".format(data_zip_path)) + + # Step2: Extract dataset + # We could deduce _DATASET_PB_PATH from the zip file (instead of + # hardcoded path) + data_rootdir = os.path.join(tmp_dir, _DATASET_PB_PATH) + if not tf.gfile.Exists(data_rootdir): + with zipfile.ZipFile(data_zip_path, "r") as corpus_zip: + corpus_zip.extractall(tmp_dir) + # We could remove the extracted __MACOSX folder + tf.logging.info("Data extracted in: {}".format(tmp_dir)) + else: + tf.logging.info("Data already extracted in: {}".format(tmp_dir)) + + # Step3: Extract the problems list on the extracted folder + def contains_samples(subdir, dirs, files): # pylint: disable=unused-argument + """Check that the folder contains a problem.""" + return ( + _DESC_DIR_NAME in dirs and + pb_cst.code_dir_name in dirs + ) + + def next_sample(subdir, dirs, files): # pylint: disable=unused-argument + """Return the filenames of the problem.""" + # More could be extracted (like the expected inputs/outputs + # pairs, the problem difficulty, the names of the algorithmic techniques + # needed) + desc_file = os.path.join(subdir, _DESC_DIR_NAME, "description.txt") + code_files = [] + # As the dataset is noisy, the program deduce the language from the file + # content. + code_pattern = os.path.join(subdir, pb_cst.code_dir_name, "*.txt") + for f in tf.gfile.Glob(code_pattern): + with tf.gfile.GFile(f, mode="r") as target_file: + # Hack to filter C++/Java files. In theory some python comments could + # make the file be considered as C++ but in practice the chance of + # getting a false negative is low. + content = target_file.read() + if not any(p in content for p in pb_cst.filter_patterns): + code_files.append(f) + return CodingPbInfo( + desc_file=desc_file, + code_files=code_files + ) + + # The dataset contains problem from two different sources (CodeChef + # and CodeForces). Due to the limited number of samples, all problems from + # both sources are merged + for w in tf.gfile.Walk(data_rootdir): + if contains_samples(*w): + yield next_sample(*w) diff --git a/tensor2tensor/data_generators/desc2code_test.py b/tensor2tensor/data_generators/desc2code_test.py new file mode 100644 index 000000000..5c2b0635c --- /dev/null +++ b/tensor2tensor/data_generators/desc2code_test.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for desc2code.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.data_generators import desc2code + +import tensorflow.compat.v1 as tf + +CODE_CPP_IN = """ + #include + +void main() { // This comment will be removed + // This too. + // + /* Not this one */ +\t +\t + int a \t\n = 3;// +// +} + +""" + +CODE_CPP_OUT = ("#include void main() { /* Not this one */ int a = " + "3; }") + + +class Desc2codeTest(tf.test.TestCase): + + def testCppPreprocess(self): + """Check that the file correctly preprocess the code source.""" + cpp_pb = desc2code.ProgrammingDesc2codeCpp() + + self.assertEqual( # Add space beween two lines + cpp_pb.preprocess_target("firstline//comm1\nsecondline//comm2\n"), + "firstline secondline") + # Checking for boths comments and spaces + self.assertEqual(cpp_pb.preprocess_target(CODE_CPP_IN), CODE_CPP_OUT) + self.assertEqual( + cpp_pb.preprocess_target(" not removed //abcd "), + "not removed //abcd") + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/dialog_abstract.py b/tensor2tensor/data_generators/dialog_abstract.py new file mode 100644 index 000000000..266a900ed --- /dev/null +++ b/tensor2tensor/data_generators/dialog_abstract.py @@ -0,0 +1,408 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Abstract class for dialog problems.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import re +import tarfile +import zipfile + +import requests +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators.text_problems import VocabType +from tensor2tensor.layers import modalities +from tensor2tensor.utils import metrics +import tensorflow.compat.v1 as tf + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + + +# An abstract base class for word based chatbot problems. +class DialogAbstract(text_problems.Text2TextProblem): + """Abstract class for dialog problems.""" + + @property + def vocab_type(self): + return text_problems.VocabType.TOKEN + + @property + def is_generate_per_split(self): + return True + + @property + def vocab_file(self): + return self.vocab_filename + + @property + def vocab_filename(self): + return 'vocab.chatbot.' + str(self.targeted_vocab_size) + + @property + def oov_token(self): + return '' + + @property + def use_subword_tokenizer(self): + return False + + @property + def input_space_id(self): + return problem.SpaceID.EN_TOK + + @property + def target_space_id(self): + return problem.SpaceID.EN_TOK + + @property + def targeted_vocab_size(self): + return 2**14 + + @property + def targeted_dataset_size(self): + # Number of utterance pairs in the full dataset. + # If it's 0, then the full size of the dataset is used. + return 0 + + @property + def dataset_split(self): + return {'train': 80, 'val': 10, 'test': 10} + + @property + def dataset_splits(self): + return [{ + 'split': problem.DatasetSplit.TRAIN, + 'shards': 1, + }, { + 'split': problem.DatasetSplit.EVAL, + 'shards': 1, + }, { + 'split': problem.DatasetSplit.TEST, + 'shards': 1, + }] + + @property + def data_dir(self): + return '' + + @property + def raw_data_dir(self): + return '' + + @property + def raw_data(self): + return '' + + @property + def zipped_data(self): + return '' + + @property + def url(http://github.com/self): + return '' + + @data_dir.setter + def data_dir(self, value): + self._data_dir = value + + @raw_data_dir.setter + def raw_data_dir(self, value): + self._raw_data_dir = value + + @raw_data.setter + def raw_data(self, value): + self._raw_data = value + + @zipped_data.setter + def zipped_data(self, value): + self._zipped_data = value + + @url.setter + def url(http://github.com/self,%20value): + self._url = value + + # Main function where the preprocessing of the data starts. + def preprocess_data(self, train_mode): + return NotImplementedError + + # This should also be overriden if the data_pipeline_status is used. + def create_data(self, train_mode): + pass + + def data_pipeline_status(self, train_mode): + """Check at which part of the pipeline are we at. + + This function first checks recursively at which point in the + data processing point are we (what files can be found on the disk), + and then proceeds from there. + + Args: + train_mode: string, whether we are in train, dev or test mode + """ + + # Build the source and target paths. + sourcepath = os.path.join(self._data_dir, train_mode + 'Source.txt') + targetpath = os.path.join(self._data_dir, train_mode + 'Target.txt') + + # If raw data dir doesn't exist, create it. + if not os.path.exists(self._raw_data_dir): + os.makedirs(self._raw_data_dir) + + # Check whether sourcePath.txt exists. + if (os.path.isfile(sourcepath) and os.path.isfile(targetpath) and + os.path.isfile(os.path.join(self._data_dir, self.vocab_file))): + print('problem_log: Source, target and vocab files exist in ' + + self._data_dir + ', proceeding with data generation. ' + + 'If you want to rebuild these files, delete them first.') + return + + # Check whether the raw data is extracted to the raw_data_dir folder. + elif os.path.exists(self._raw_data): + print('problem_log: No source, target or vocab files found in ' + + self._data_dir + '.') + print('problem_log: Extracted raw data is in ' + self._raw_data_dir + + '. Proceeding with creating source, target and vocab files.') + self.create_data(train_mode) + + # Check whether the data is downloaded in the raw_data_dir_folder. + elif os.path.exists(self._zipped_data): + print('problem_log: No source, target or vocab files found in ' + + self._data_dir + '.') + print('problem_log: No extracted raw data found in ' + + self._raw_data_dir + '.') + print('problem_log: Unextracted raw data is in ' + self._raw_data_dir + + '. Extracting and creating source, target and vocab files.') + self.extract_data(train_mode) + + else: + print('problem_log: No source, target or vocab files found in ' + + self._data_dir + '.') + print('problem_log: No raw data found in ' + self._raw_data_dir + + '. Proceeding with downloading the data, extracting it, ' + + 'and creating source, target and vocab files.') + self.download_data(train_mode) + + def download_data(self, train_mode): + """Download data from official sources. + + Args: + train_mode: string, whether we are in train, dev or test mode + """ + + # Open the url and download the data with progress bars. + data_stream = requests.get(self._url, stream=True) + with open(self._zipped_data, 'wb') as f: + for chunk in data_stream.iter_content(1024): + if chunk: + f.write(chunk) + f.flush() + + # Next step is extracting the data. + print('problem_log: Extracting data to ' + self._zipped_data + '.') + self.extract_data(train_mode) + + def extract_data(self, train_mode): + """Extract data and go to the next step. + + Args: + train_mode: string, whether we are in train, dev or test mode + """ + + if self._zipped_data[-2:] == 'gz': + zip_file = tarfile.open(self._zipped_data, 'r:gz') + elif self._zipped_data[-3:] == 'zip': + zip_file = zipfile.ZipFile(self._zipped_data, 'r') + else: + print('problem_log: ' + self._zipped_data + + ' is not a .zip or .gz file, so I can\'t extract it.') + + zip_file.extractall(self._raw_data_dir) + zip_file.close() + + # Next step is creating the source, target and vocab files. + print('problem_log: Creating ' + + train_mode + ' files in ' + self._data_dir) + self.create_data(train_mode) + + # hparams for the problem. + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.stop_at_eos = int(True) + + p.modality = {'targets': modalities.ModalityType.SYMBOL} + if self.has_inputs: + p.modality['inputs'] = modalities.ModalityType.SYMBOL + p.vocab_size = {'inputs': self._encoders['inputs'].vocab_size} + p.vocab_size['targets'] = self._encoders['inputs'].vocab_size + + if self.vocab_type == VocabType.CHARACTER: + p.loss_multiplier = 2.0 + + if self.packed_length: + if self.has_inputs: + p.modality['inputs_segmentation'] = modalities.ModalityType.IDENTITY + p.modality['inputs_position'] = modalities.ModalityType.IDENTITY + p.vocab_size['inputs_segmentation'] = None + p.vocab_size['inputs_position'] = None + p.modality['targets_segmentation'] = modalities.ModalityType.IDENTITY + p.modality['targets_position'] = modalities.ModalityType.IDENTITY + p.vocab_size['targets_segmentation'] = None + p.vocab_size['targets_position'] = None + + # What evaluation metrics to use with this problem. + def eval_metrics(self): + return [metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5, + metrics.Metrics.ACC_PER_SEQ, + metrics.Metrics.NEG_LOG_PERPLEXITY, + metrics.Metrics.APPROX_BLEU] + + # Override this, to start with preprocessing. + def generate_data(self, data_dir, tmp_dir, task_id=-1): + self.data_dir = data_dir + # Determine whether we are in training or validation mode. + self.mode = {problem.DatasetSplit.TRAIN: 'train', + problem.DatasetSplit.EVAL: 'dev', + problem.DatasetSplit.TEST: 'test'} + filepath_fns = {problem.DatasetSplit.TRAIN: self.training_filepaths, + problem.DatasetSplit.EVAL: self.dev_filepaths, + problem.DatasetSplit.TEST: self.test_filepaths} + + split_paths = [(split['split'], filepath_fns[split['split']]( + data_dir, split['shards'], shuffled=self.already_shuffled)) + for split in self.dataset_splits] + all_paths = [] + for _, paths in split_paths: + all_paths.extend(paths) + + if self.is_generate_per_split: + for split, paths in split_paths: + # Create the source and target txt files from the raw data. + self.preprocess_data(self.mode[split]) + generator_utils.generate_files( + self.generate_encoded_samples(data_dir, tmp_dir, split), paths) + else: + self.preprocess_data(self.mode[problem.DatasetSplit.TRAIN]) + generator_utils.generate_files( + self.generate_encoded_samples( + data_dir, tmp_dir, problem.DatasetSplit.TRAIN), all_paths) + + generator_utils.shuffle_dataset(all_paths, extra_fn=self._pack_fn()) + + def generate_samples(self, data_dir, tmp_dir, data_split): + """This function generates train and validation pairs in t2t-datagen style. + + The function assumes that if you have data at one level of the pipeline, + you don't want to re-generate it, so for example if the 4 txt files exist, + the function continues by generating the t2t-datagen format files. + So if you want to re-download or re-generate data, + you have to delete it first from the appropriate directories. + + Args: + data_dir: string, Directory where the data will be generated. The raw + data has to be downloaded one directory level higher. + tmp_dir: string, temp directory. + data_split: string, which data split to generate samples for + + Yields: + dict + """ + + self.data_dir = data_dir + print('problem_log: ' + + self.mode[data_split] + ' data generation activated.') + + s_path = os.path.join(data_dir, self.mode[data_split] + 'Source.txt') + t_path = os.path.join(data_dir, self.mode[data_split] + 'Target.txt') + + # Open the files and yield source-target lines. + with tf.gfile.GFile(s_path, mode='r') as source_file: + with tf.gfile.GFile(t_path, mode='r') as target_file: + source, target = source_file.readline(), target_file.readline() + while source and target: + yield {'inputs': source.strip(), 'targets': target.strip()} + source, target = source_file.readline(), target_file.readline() + + def save_vocab(self, vocab): + """Save the vocabulary to a file. + + Args: + vocab: dict + """ + voc_file = open(os.path.join(self._data_dir, self.vocab_file), 'w') + + # Put the reserved tokens in. + voc_file.write('\n') + voc_file.write('\n') + for word, _ in vocab.most_common(self.targeted_vocab_size - 3): + voc_file.write(word + '\n') + voc_file.write('') + + voc_file.close() + + # Open the 6 files to write the processed data into. + def open_6_files(self): + trainsource = open(os.path.join(self._data_dir, 'trainSource.txt'), 'w') + traintarget = open(os.path.join(self._data_dir, 'trainTarget.txt'), 'w') + devsource = open(os.path.join(self._data_dir, 'devSource.txt'), 'w') + devtarget = open(os.path.join(self._data_dir, 'devTarget.txt'), 'w') + testsource = open(os.path.join(self._data_dir, 'testSource.txt'), 'w') + testtarget = open(os.path.join(self._data_dir, 'testTarget.txt'), 'w') + + return trainsource, traintarget, devsource, \ + devtarget, testsource, testtarget + + # Close the 6 files to write the processed data into. + def close_n_files(self, files): + for f in files: + f.close() + + def clean_line(self, line): + """Clean a line with some regex rules. + + Args: + line: string, line to be processed and returned + + Returns: + string + """ + + # 2 functions for more complex replacing. + def replace(matchobj): + return re.sub("'", " '", str(matchobj.group(0))) + + def replace_null(matchobj): + return re.sub("'", '', str(matchobj.group(0))) + + # Keep some special tokens. + line = re.sub("[^a-z .?!'0-9]", '', line) + line = re.sub('[.]', ' . ', line) + line = re.sub('[?]', ' ? ', line) + line = re.sub('[!]', ' ! ', line) + + # Take care of apostrophes. + line = re.sub("[ ]'[ ]", ' ', line) + line = re.sub(" '[a-z]", replace_null, line) + line = re.sub("n't", " n't", line) + line = re.sub("[^ n]'[^ t]", replace, line) + + return line diff --git a/tensor2tensor/data_generators/dialog_cornell.py b/tensor2tensor/data_generators/dialog_cornell.py new file mode 100644 index 000000000..5b60afd1f --- /dev/null +++ b/tensor2tensor/data_generators/dialog_cornell.py @@ -0,0 +1,178 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Cornell Movie Dialog Dataset.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import os +import re + +from tensor2tensor.data_generators import dialog_abstract +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.utils import registry + + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + + +@registry.register_problem +class DialogCornell32k(dialog_abstract.DialogAbstract): + """Implements the chatbot problem with Cornell Movie Dialog dataset. + + https://www.cs.cornell.edu/~cristian/Cornell_Movie-Dialogs_Corpus.html + """ + + @property + def targeted_vocab_size(self): + return 2**15 + + def preprocess_data(self, train_mode): + """Main function where the preprocessing of the data starts. + + Args: + train_mode: string, whether we are in train, dev or test mode + """ + + # Set the raw data directory and data. + self.raw_data_dir = os.path.join('/'.join(self._data_dir.split('/')[:-1]), + 'raw_data') + self.raw_data = os.path.join(self._raw_data_dir, + 'cornell movie-dialogs corpus') + self.zipped_data = os.path.join(self._raw_data_dir, + 'cornell_movie_dialogs_corpus.zip') + + # Create the download url. + self.url = ('http://www.cs.cornell.edu/~cristian/data/' + + 'cornell_movie_dialogs_corpus.zip') + + # Check at which part of the pipeline are we at. + self.data_pipeline_status(train_mode) + + def create_data(self, train_mode): + """Create the source, target and vocab files. + + Args: + train_mode: string, whether we are in train, dev or test mode + """ + + # Open the 6 files. + trainsource, traintarget, devsource, devtarget, testsource, testtarget = \ + self.open_6_files() + + # Open the raw data. + movie_lines = open( + os.path.join(self._raw_data, 'movie_lines.txt'), errors='ignore') + dialog_list = self.extract_dialog_ids() + + vocabulary = collections.Counter() + line_dict = {} + number_of_lines = 0 + # Iterate through file. + for line in movie_lines: + if number_of_lines % 10000 == 0: + print('problem_log: Parsed ' + str(number_of_lines) + ' lines.') + + line = line.split(' +++$+++ ') + dialog_id = line[0] + line = line[4].lower() + + # Do some cleaning. + line = self.clean_line(line) + line_dict[dialog_id] = line + + number_of_lines += 1 + # Check if we reached the desired dataset size. + if (self.targeted_dataset_size != 0 and + self.targeted_dataset_size < number_of_lines): + break + + counter = 0 + dataset_split_counter = 0 + # Save the actual dialogs. + for dialog in dialog_list: + if counter % 10000 == 0: + print('problem_log: Saved ' + + str(counter) + '/' + str(len(dialog_list)) + ' dialogs.') + + dataset_split_counter += 1 + i = 0 + # Save one utterance. + for utterance in dialog: + if (utterance != dialog[-1] and + dialog[i + 1] != 'L211194' and + dialog[i + 1] != 'L1045'): + source_line = line_dict[utterance] + '\n' + target_line = line_dict[dialog[i + 1]] + '\n' + + # Save to the files according to dataset split. + if dataset_split_counter <= self.dataset_split['train']: + # Build vocabulary. + words = source_line.split() + for word in words: + vocabulary[word] = vocabulary.get(word, 0) + 1 + + trainsource.write(source_line) + traintarget.write(target_line) + + elif dataset_split_counter <= (self.dataset_split['train'] + + self.dataset_split['val']): + devsource.write(source_line) + devtarget.write(target_line) + else: + testsource.write(source_line) + testtarget.write(target_line) + i += 1 + + # Reset the split counter if we reached 100%. + if dataset_split_counter == 100: + dataset_split_counter = 0 + counter += 1 + + # Close the files. + self.close_n_files([trainsource, + traintarget, + devsource, + devtarget, + testsource, + testtarget]) + movie_lines.close() + + # Save the vocabulary. + self.save_vocab(vocabulary) + + # Extract the dialog ids from the dialog file. + def extract_dialog_ids(self): + dialogs = open(os.path.join(self._raw_data, 'movie_conversations.txt'), + errors='ignore') + + dialog_list = [] + # Each line contains a dialog. + for line in dialogs: + line = line.split(' +++$+++ ') + line = line[3].split(',') + + i = 0 + for item in line: + line[i] = re.sub('[^A-Z0-9]', '', item) + i += 1 + dialog_list.append(line) + + dialogs.close() + return dialog_list diff --git a/tensor2tensor/data_generators/dialog_dailydialog.py b/tensor2tensor/data_generators/dialog_dailydialog.py new file mode 100644 index 000000000..6f885ea94 --- /dev/null +++ b/tensor2tensor/data_generators/dialog_dailydialog.py @@ -0,0 +1,144 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""DailyDialog dataset.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import os + +from tensor2tensor.data_generators import dialog_abstract +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.utils import registry + + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + + +@registry.register_problem +class DialogDailydialog16k(dialog_abstract.DialogAbstract): + """A class implementing a simple chatbot problem for the DailyDialog dataset. + + https://arxiv.org/abs/1710.03957 + This version doesn't use any auxiliary information. + """ + + def preprocess_data(self, train_mode): + """Main function where the preprocessing of the data starts. + + Args: + train_mode: string, whether we are in train, dev or test mode + """ + + # Set the raw data directory and data. + self.raw_data_dir = os.path.join('/'.join(self._data_dir.split('/')[:-1]), + 'raw_data') + self.raw_data = os.path.join(self._raw_data_dir, 'ijcnlp_dailydialog') + self.zipped_data = os.path.join(self._raw_data_dir, + 'ijcnlp_dailydialog.zip') + + # Create the download url. + self.url = 'http://yanran.li/files/ijcnlp_dailydialog.zip' + + # Check at which part of the pipeline are we at. + self.data_pipeline_status(train_mode) + + def create_data(self, train_mode): + """Create the source, target and vocab files. + + Args: + train_mode: string, whether we are in train, dev or test mode + """ + + # Open the 6 files. + trainsource, traintarget, devsource, devtarget, testsource, testtarget = \ + self.open_6_files() + + # Open the raw data. + dialogs = open( + os.path.join(self._raw_data, 'dialogues_text.txt'), errors='ignore') + + vocabulary = collections.Counter() + number_of_dialogs = 0 + line_counter = 0 + dataset_split_counter = 0 + # Iterate through the file. + for dialog in dialogs: + dataset_split_counter += 1 + if number_of_dialogs % 1000 == 0: + print('problem_log: Parsed ' + str(number_of_dialogs) + ' dialogs.') + + # Utterances are separated by the __eou__ token. + utterances = dialog.split('__eou__')[:-1] + + # Check which file we should write to. + if dataset_split_counter <= self.dataset_split['train']: + source_file = trainsource + target_file = traintarget + elif dataset_split_counter <= (self.dataset_split['train'] + + self.dataset_split['val']): + source_file = devsource + target_file = devtarget + else: + source_file = testsource + target_file = testtarget + + # Clean the utterances. + i = 0 + for utterance in utterances: + line_counter += 1 + utterance = self.clean_line(utterance.lower()) + i += 1 + + # Build vocabulary. + if dataset_split_counter <= self.dataset_split['train']: + words = utterance.split() + for word in words: + if word in vocabulary: + vocabulary[word] += 1 + else: + vocabulary[word] = 1 + + # Write to files. + if i != len(utterances): + source_file.write(utterance + '\n') + if i != 1: + target_file.write(utterance + '\n') + + number_of_dialogs += 1 + # Reset the split counter if we reached 100%. + if dataset_split_counter == 100: + dataset_split_counter = 0 + + # Check if we reached the desired dataset size. + if (self.targeted_dataset_size != 0 and + self.targeted_dataset_size < line_counter): + break + + # Close the files. + self.close_n_files([trainsource, + traintarget, + devsource, + devtarget, + testsource, + testtarget]) + dialogs.close() + + # Save the vocabulary. + self.save_vocab(vocabulary) diff --git a/tensor2tensor/data_generators/dialog_opensubtitles.py b/tensor2tensor/data_generators/dialog_opensubtitles.py new file mode 100644 index 000000000..1584fd984 --- /dev/null +++ b/tensor2tensor/data_generators/dialog_opensubtitles.py @@ -0,0 +1,258 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OpenSubtitles dataset.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import os +import re +import zipfile + +from tensor2tensor.data_generators import dialog_abstract +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.utils import registry + + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + + +@registry.register_problem +class DialogOpensubtitles64k2009(dialog_abstract.DialogAbstract): + """A class implementing the chatbot problem for the OpenSubtitles dataset. + + http://opus.nlpl.eu/OpenSubtitles-v2018.php + """ + + @property + def targeted_vocab_size(self): + return 2**16 + + @property + def dataset_version(self): + # Year of the opensubtitles dataset creation. + return 2009 + + def extract_data(self, train_mode): + """Extract data and go to the next step. + + Args: + train_mode: string, whether we are in train, dev or test mode + """ + + if self._zipped_data[-3:] == 'zip' or self._zipped_data[-2:] == 'gz': + zip_file = zipfile.ZipFile(self._zipped_data, 'r') + else: + print('problem_log: ' + self._zipped_data + + ' is not a .zip or .gz file, so I can\'t extract it.') + + zip_file.extractall(self._raw_data_dir) + zip_file.close() + + # Next step is creating the source, target and vocab files. + print('problem_log: Creating ' + + train_mode + ' files in ' + self._data_dir) + self.create_data(train_mode) + + def preprocess_data(self, train_mode): + """Main function where the preprocessing of the data starts. + + Args: + train_mode: string, whether we are in train, dev or test mode + """ + + year = '' if self.dataset_version == 2009 else str(self.dataset_version) + # Set the raw data directory and data. + self.raw_data_dir = os.path.join('/'.join(self._data_dir.split('/')[:-1]), + 'raw_data_' + str(self.dataset_version)) + self.raw_data = os.path.join(self._raw_data_dir, 'OpenSubtitles' + year) + self.zipped_data = os.path.join(self._raw_data_dir, 'en.tar.gz') + + # Create the download url. + self.url = ('http://opus.nlpl.eu/download.php?f=OpenSubtitles' + + str(year) + '/en.tar.gz') + + # Check at which part of the pipeline are we at. + self.data_pipeline_status(train_mode) + + def create_data(self, train_mode): + """Create the source, target and vocab files. + + Args: + train_mode: string, whether we are in train, dev or test mode + """ + + # open the 6 files + trainsource, traintarget, devsource, devtarget, testsource, testtarget = \ + self.open_6_files() + + conv_id = 0 + number_of_lines = 0 + dataset_split_counter = 0 + vocabulary = collections.Counter() + # Dind all the files. + for root, _, files in os.walk(self._raw_data_dir): + for f in files: + if conv_id % 100 == 0: + print('problem_log: Parsed ' + str(conv_id) + ' files.') + + source_lines = '' + target_lines = '' + conv_id += 1 + dataset_split_counter += 1 + + # Open one .xml file and parse it. + with open(os.path.join(root, f), 'r', errors='ignore') as txt_file: + words = '' + line_id = 1 + + # Parse one line. + for line in txt_file: + line = str(line) + + # Check if it's a new sentence. + if line.find('= 0: + line = line[index:] + word = line[line.find('>') + 1:line.find(' 0: + pad = [self.PAD] * (self._chunk_size - extra) + bases.extend(pad) + assert (len(bases) % self._chunk_size) == 0 + num_chunks = len(bases) // self._chunk_size + ids = [] + for chunk_idx in range(num_chunks): + start_idx = chunk_idx * self._chunk_size + end_idx = start_idx + self._chunk_size + chunk = tuple(bases[start_idx:end_idx]) + if chunk not in self._tokens_to_ids: + raise ValueError("Unrecognized token %s" % chunk) + ids.append(self._tokens_to_ids[chunk]) + return ids + + def decode(self, ids, strip_extraneous=False): + bases = [] + for idx in ids: + if idx >= self._num_reserved_ids: + chunk = self._ids_to_tokens[idx] + if self.PAD in chunk: + chunk = chunk[:chunk.index(self.PAD)] + else: + if strip_extraneous: + continue + chunk = [text_encoder.RESERVED_TOKENS[idx]] + bases.extend(chunk) + return "".join(bases) + + +class DelimitedDNAEncoder(DNAEncoder): + """DNAEncoder for delimiter separated subsequences. + + Uses ',' as default delimiter. + """ + + def __init__(self, delimiter=",", **kwargs): + self._delimiter = delimiter + self._delimiter_key = tuple(self._delimiter) + super(DelimitedDNAEncoder, self).__init__(**kwargs) + + @property + def delimiter(self): + return self._delimiter + + def _tokens(self): + return super(DelimitedDNAEncoder, self)._tokens() + [self._delimiter_key] + + def encode(self, s): + delimited_string = s + ids = [] + for part in delimited_string.split(self.delimiter): + ids.extend(super(DelimitedDNAEncoder, self).encode(part)) + ids.append(self._tokens_to_ids[self._delimiter_key]) + return ids[:-1] diff --git a/tensor2tensor/data_generators/dna_encoder_test.py b/tensor2tensor/data_generators/dna_encoder_test.py new file mode 100644 index 000000000..46f9b54aa --- /dev/null +++ b/tensor2tensor/data_generators/dna_encoder_test.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.data_generators.dna_encoder.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensor2tensor.data_generators import dna_encoder +import tensorflow.compat.v1 as tf + + +class DnaEncoderTest(tf.test.TestCase): + + def test_encode_decode(self): + original = 'TTCGCGGNNNAACCCAACGCCATCTATGTANNTTGAGTTGTTGAGTTAAA' + + # Encoding should be reversible for any reasonable chunk size. + for chunk_size in [1, 2, 4, 6, 8]: + encoder = dna_encoder.DNAEncoder(chunk_size=chunk_size) + encoded = encoder.encode(original) + decoded = encoder.decode(encoded) + self.assertEqual(original, decoded) + + def test_delimited_dna_encoder(self): + original = 'TTCGCGGNNN,AACCCAACGC,CATCTATGTA,NNTTGAGTTG,TTGAGTTAAA' + + # Encoding should be reversible for any reasonable chunk size. + for chunk_size in [1, 2, 4, 6, 8]: + encoder = dna_encoder.DelimitedDNAEncoder(chunk_size=chunk_size) + encoded = encoder.encode(original) + decoded = encoder.decode(encoded) + self.assertEqual(original, decoded) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/data_generators/enwik8.py b/tensor2tensor/data_generators/enwik8.py new file mode 100644 index 000000000..58e262b07 --- /dev/null +++ b/tensor2tensor/data_generators/enwik8.py @@ -0,0 +1,203 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for enwik8 data-set.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import zipfile + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +def _maybe_download_corpus(tmp_dir): + """Download and unpack the corpus. + + Args: + tmp_dir: directory containing dataset. + + Returns: + path to entire corpus as a text file. + """ + corpus_url = "http://mattmahoney.net/dc/enwik8.zip" + corpus_filename = os.path.basename(corpus_url) + compressed_filepath = generator_utils.maybe_download( + tmp_dir, corpus_filename, corpus_url) + + zip_ref = zipfile.ZipFile(compressed_filepath, "r") + zip_ref.extractall(tmp_dir) + zip_ref.close() + + return os.path.join(tmp_dir, "enwik8") + + +@registry.register_problem +class Enwik8L65k(text_problems.Text2SelfProblem): + """Enwiki8, with examples up to 65,536 characters long.""" + + READ_MODE = "r" + DUPE_FACTOR = 4 + + @property + def is_generate_per_split(self): + return True + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def global_task_id(self): + return problem.TaskID.EN_CHR + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each.""" + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 16, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }, { + "split": problem.DatasetSplit.TEST, + "shards": 1, + }] + + def max_length(self, model_hparams): + return self.sequence_length + + @property + def sequence_length(self): + """Length of each example (number of characters).""" + return 65536 + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + filepath = _maybe_download_corpus(tmp_dir) + with tf.io.gfile.GFile(filepath, mode=self.READ_MODE) as f: + data = f.read() + + tf.logging.info("Length of enwik8 = %d", len(data)) + + num_test_chars = 5000000 + + if dataset_split == problem.DatasetSplit.TRAIN: + part = data[: -2 * num_test_chars] + elif dataset_split == problem.DatasetSplit.EVAL: + part = data[-2 * num_test_chars: -num_test_chars] + elif dataset_split == problem.DatasetSplit.TEST: + part = data[-num_test_chars:] + else: + raise ValueError("Undefined dataset_split") + + tf.logging.info("Length of split '%s' = %d", dataset_split, len(part)) + + # TODO(kitaev): Better handling of evaluation data, to ensure that there is + # always context available. + if dataset_split == problem.DatasetSplit.TRAIN: + offset = self.sequence_length // self.DUPE_FACTOR + for start in range(0, len(part), offset): + yield {"targets": part[start:start+self.sequence_length]} + else: + for start in range(0, len(part), self.sequence_length): + yield {"targets": part[start:start+self.sequence_length]} + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + generator = self.generate_samples(data_dir, tmp_dir, dataset_split) + vocab = self.get_or_create_vocab(data_dir, tmp_dir) + for sample in generator: + sample["targets"] = vocab.encode(sample["targets"]) + yield sample + + +@registry.register_problem +class Enwik8L2k(Enwik8L65k): + """Enwiki8, with examples up to 2048 characters long. + + Reads the input byte-wise and chunks it into fragments of maximum + length of 2048. Does not shift byte indices (we do not assume cls or + pad are used), unlike the base class! + """ + + READ_MODE = "rb" + + @property + def sequence_length(self): + """Length of each example (number of characters).""" + return 2048 + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + return self.generate_samples(data_dir, tmp_dir, dataset_split) + + +@registry.register_problem +class Enwik8L32k(Enwik8L2k): + + @property + def sequence_length(self): + """Length of each example (in tokens).""" + return 32768 + + +@registry.register_problem +class Enwik8L16k(Enwik8L2k): + + @property + def sequence_length(self): + """Length of each example (in tokens).""" + return 16384 + + +@registry.register_problem +class Enwik8L8k(Enwik8L2k): + + @property + def sequence_length(self): + """Length of each example (in tokens).""" + return 8192 + + +@registry.register_problem +class Enwik8L4k(Enwik8L2k): + + @property + def sequence_length(self): + """Length of each example (in tokens).""" + return 4096 + + +@registry.register_problem +class Enwik8L1k(Enwik8L2k): + + @property + def sequence_length(self): + """Length of each example (in tokens).""" + return 1024 + + +@registry.register_problem +class Enwik8L512(Enwik8L2k): + + @property + def sequence_length(self): + """Length of each example (in tokens).""" + return 512 diff --git a/tensor2tensor/data_generators/fsns.py b/tensor2tensor/data_generators/fsns.py new file mode 100644 index 000000000..ac012e8ed --- /dev/null +++ b/tensor2tensor/data_generators/fsns.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""FSNS.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import image_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.layers import modalities +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +@registry.register_problem +class ImageFSNS(image_utils.ImageProblem): + """Problem spec for French Street Name recognition.""" + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + list_url = ("https://raw.githubusercontent.com/tensorflow/models/master/" + "street/python/fsns_urls.txt") + fsns_urls = generator_utils.maybe_download(tmp_dir, "fsns_urls.txt", + list_url) + fsns_files = [ + f.strip() for f in open(fsns_urls, "r") if f.startswith("http://") + ] + for url in fsns_files: + if "/train/train" in url: + generator_utils.maybe_download( + data_dir, "image_fsns-train" + url[-len("-00100-of-00512"):], url) + elif "/validation/validation" in url: + generator_utils.maybe_download( + data_dir, "image_fsns-dev" + url[-len("-00100-of-00512"):], url) + elif "charset" in url: + generator_utils.maybe_download(data_dir, "charset_size134.txt", url) + + def feature_encoders(self, data_dir): + # This vocab file must be present within the data directory. + vocab_filename = os.path.join(data_dir, "charset_size134.txt") + return { + "inputs": text_encoder.ImageEncoder(), + "targets": text_encoder.SubwordTextEncoder(vocab_filename) + } + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = {"inputs": modalities.ModalityType.IMAGE, + "targets": modalities.ModalityType.SYMBOL} + p.vocab_size = {"inputs": 256, + "targets": self._encoders["targets"].vocab_size} + p.batch_size_multiplier = 256 + p.input_space_id = problem.SpaceID.IMAGE + p.target_space_id = problem.SpaceID.EN_TOK + + def example_reading_spec(self): + label_key = "image/unpadded_label" + data_fields, data_items_to_decoders = ( + super(ImageFSNS, self).example_reading_spec()) + data_fields[label_key] = tf.VarLenFeature(tf.int64) + data_items_to_decoders["targets"] = contrib.slim().tfexample_decoder.Tensor( + label_key) + return data_fields, data_items_to_decoders diff --git a/tensor2tensor/data_generators/function_docstring.py b/tensor2tensor/data_generators/function_docstring.py new file mode 100644 index 000000000..93a5e830d --- /dev/null +++ b/tensor2tensor/data_generators/function_docstring.py @@ -0,0 +1,110 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Github function/text similatrity problems.""" +import csv +from six import StringIO +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import metrics +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +@registry.register_problem +class GithubFunctionDocstring(text_problems.Text2TextProblem): + """Function and Docstring similarity Problem. + + This problem contains the data consisting of function + and docstring pairs as CSV files. The files are structured + such that they contain two columns without headers containing + the docstring tokens and function tokens. The delimiter is + ",". + """ + + NUM_SHARDS = 100 + + @property + def base_url(http://github.com/self): + return "gs://kubeflow-examples/t2t-code-search/raw_data" + + @property + def pair_files_list(self): + files = [] + for i in range(self.NUM_SHARDS): + files.append([ + "{}/func-doc-pairs-{:05}-of-{:05}.csv".format(self.base_url, i, + self.NUM_SHARDS), + ("func-doc-pairs-{:05}-of-{:05}.csv".format(i, self.NUM_SHARDS),) + ]) + return files + + @property + def is_generate_per_split(self): + return False + + @property + def approx_vocab_size(self): + return 2**13 + + @property + def max_samples_for_vocab(self): + # FIXME(sanyamkapoor): This exists to handle memory explosion. + return int(3.5e5) + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """A generator to return data samples.Returns the data generator to return. + + + Args: + data_dir: A string representing the data directory. + tmp_dir: A string representing the temporary directory and is + used to download files if not already available. + dataset_split: Train, Test or Eval. + + Yields: + Each element yielded is of a Python dict of the form + {"inputs": "STRING", "targets": "STRING"} + """ + + # TODO(sanyamkapoor): Manually separate train/eval data set. + csv_file_names = self.pair_files_list + csv_files = [ + generator_utils.maybe_download(tmp_dir, file_list[0], uri) + for uri, file_list in csv_file_names + ] + + for pairs_file in csv_files: + tf.logging.debug("Reading {}".format(pairs_file)) + with open(pairs_file, "r") as csv_file: + for line in csv_file: + reader = csv.reader(StringIO(line)) + for docstring_tokens, function_tokens in reader: + yield { + "inputs": docstring_tokens, + "targets": function_tokens, + "embed_code": [0], + } + + def preprocess_example(self, example, mode, unused_hparams): + if mode != tf_estimator.ModeKeys.TRAIN: + example["embed_code"] = [0] + return example + + def eval_metrics(self): + return [ + metrics.Metrics.ACC + ] diff --git a/tensor2tensor/data_generators/gene_expression.py b/tensor2tensor/data_generators/gene_expression.py new file mode 100644 index 000000000..0244edda8 --- /dev/null +++ b/tensor2tensor/data_generators/gene_expression.py @@ -0,0 +1,295 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Gene expression problems. + +Inputs are bases ACTG (with indices assigned in that order). + +Requires the h5py library. + +File format expected: + * h5 file + * h5 datasets should include {train, valid, test}_{in, na, out}, which will + map to inputs, targets mask, and targets for the train, dev, and test + datasets. + * Each record in *_in is a bool 2-D numpy array with one-hot encoded base + pairs with shape [num_input_timesteps, 4]. The base order is ACTG. + * Each record in *_na is a bool 1-D numpy array with shape + [num_output_timesteps]. + * Each record in *_out is a float 2-D numpy array with shape + [num_output_timesteps, num_predictions]. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import multiprocessing as mp +import os +import h5py +import numpy as np + +from six.moves import range # pylint: disable=redefined-builtin + +from tensor2tensor.data_generators import dna_encoder +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.layers import modalities +from tensor2tensor.utils import metrics +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +MAX_CONCURRENT_PROCESSES = 10 + + +class GeneExpressionProblem(problem.Problem): + """Base Problem for gene expression datasets.""" + + @property + def download_url(http://github.com/self): + raise NotImplementedError() + + @property + def h5_file(self): + raise NotImplementedError() + + @property + def num_output_predictions(self): + """Number of float predictions per timestep.""" + return 10 + + @property + def chunk_size(self): + return 4 + + def feature_encoders(self, data_dir): + del data_dir + return { + "inputs": dna_encoder.DNAEncoder(chunk_size=self.chunk_size), + # TODO(rsepassi): RealEncoder? + "targets": text_encoder.TextEncoder() + } + + @property + def num_shards(self): + return 100 + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + try: + # Download source data if download_url specified + h5_filepath = generator_utils.maybe_download(tmp_dir, self.h5_file, + self.download_url) + except NotImplementedError: + # Otherwise, look for it locally + h5_filepath = os.path.join(tmp_dir, self.h5_file) + + with h5py.File(h5_filepath, "r") as h5_file: + num_train_examples = h5_file["train_in"].len() + num_dev_examples = h5_file["valid_in"].len() + num_test_examples = h5_file["test_in"].len() + + # Collect all_filepaths to later shuffle + all_filepaths = [] + # Collect created shard processes to start and join + processes = [] + + datasets = [(self.training_filepaths, self.num_shards, "train", + num_train_examples), (self.dev_filepaths, 10, "valid", + num_dev_examples), + (self.test_filepaths, 10, "test", num_test_examples)] + for fname_fn, nshards, key_prefix, num_examples in datasets: + outfiles = fname_fn(data_dir, nshards, shuffled=False) + all_filepaths.extend(outfiles) + for start_idx, end_idx, outfile in generate_shard_args( + outfiles, num_examples): + p = mp.Process( + target=generate_dataset, + args=(h5_filepath, key_prefix, [outfile], self.chunk_size, + start_idx, end_idx)) + processes.append(p) + + # 1 per training shard + 10 for dev + 10 for test + assert len(processes) == self.num_shards + 20 + + # Start and wait for processes in batches + num_batches = int( + math.ceil(float(len(processes)) / MAX_CONCURRENT_PROCESSES)) + for i in range(num_batches): + start = i * MAX_CONCURRENT_PROCESSES + end = start + MAX_CONCURRENT_PROCESSES + current = processes[start:end] + for p in current: + p.start() + for p in current: + p.join() + + # Shuffle + generator_utils.shuffle_dataset(all_filepaths) + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = {"inputs": modalities.ModalityType.SYMBOL, + "targets": modalities.ModalityType.REAL_LOG_POISSON_LOSS} + p.vocab_size = {"inputs": self._encoders["inputs"].vocab_size, + "targets": self.num_output_predictions} + p.input_space_id = problem.SpaceID.DNA + p.target_space_id = problem.SpaceID.REAL + + def example_reading_spec(self): + data_fields = { + "inputs": tf.VarLenFeature(tf.int64), + "targets": tf.VarLenFeature(tf.float32), + } + data_items_to_decoders = None + return (data_fields, data_items_to_decoders) + + def preprocess_example(self, example, mode, unused_hparams): + del mode + + # Reshape targets to contain num_output_predictions per output timestep + example["targets"] = tf.reshape(example["targets"], + [-1, 1, self.num_output_predictions]) + # Slice off EOS - not needed, and messes up the GeneExpressionConv model + # which expects the input length to be a multiple of the target length. + example["inputs"] = example["inputs"][:-1] + + return example + + def eval_metrics(self): + return [metrics.Metrics.LOG_POISSON, metrics.Metrics.R2] + + +@registry.register_problem +class GenomicsExpressionCage10(GeneExpressionProblem): + + @property + def download_url(http://github.com/self): + return "https://storage.googleapis.com/262k_binned/cage10_l262k_w128.h5" + + @property + def h5_file(self): + return "cage10.h5" + + +@registry.register_problem +class GenomicsExpressionGm12878(GeneExpressionProblem): + + @property + def download_url(http://github.com/self): + return "https://storage.googleapis.com/262k_binned/gm12878_l262k_w128.h5" + + @property + def h5_file(self): + return "gm12878.h5" + + +@registry.register_problem +class GenomicsExpressionL262k(GeneExpressionProblem): + + @property + def h5_file(self): + return "l262k_w128.h5" + + +def generate_shard_args(outfiles, num_examples): + """Generate start and end indices per outfile.""" + num_shards = len(outfiles) + num_examples_per_shard = num_examples // num_shards + start_idxs = [i * num_examples_per_shard for i in range(num_shards)] + end_idxs = list(start_idxs) + end_idxs.pop(0) + end_idxs.append(num_examples) + return zip(start_idxs, end_idxs, outfiles) + + +def generate_dataset(h5_filepath, + key_prefix, + out_filepaths, + chunk_size=1, + start_idx=None, + end_idx=None): + print("PID: %d, Key: %s, (Start, End): (%s, %s)" % (os.getpid(), key_prefix, + start_idx, end_idx)) + generator_utils.generate_files( + dataset_generator(h5_filepath, key_prefix, chunk_size, start_idx, + end_idx), out_filepaths) + + +def dataset_generator(filepath, + dataset, + chunk_size=1, + start_idx=None, + end_idx=None): + """Generate example dicts.""" + encoder = dna_encoder.DNAEncoder(chunk_size=chunk_size) + with h5py.File(filepath, "r") as h5_file: + # Get input keys from h5_file + src_keys = [s % dataset for s in ["%s_in", "%s_na", "%s_out"]] + src_values = [h5_file[k] for k in src_keys] + inp_data, mask_data, out_data = src_values + assert len(set([v.len() for v in src_values])) == 1 + + if start_idx is None: + start_idx = 0 + if end_idx is None: + end_idx = inp_data.len() + + for i in range(start_idx, end_idx): + if i % 100 == 0: + print("Generating example %d for %s" % (i, dataset)) + inputs, mask, outputs = inp_data[i], mask_data[i], out_data[i] + ex_dict = to_example_dict(encoder, inputs, mask, outputs) + # Original data has one output for every 128 input bases. Ensure that the + # ratio has been maintained given the chunk size and removing EOS. + assert (len(ex_dict["inputs"]) - 1) == (( + 128 // chunk_size) * ex_dict["targets_shape"][0]) + yield ex_dict + + +def to_example_dict(encoder, inputs, mask, outputs): + """Convert single h5 record to an example dict.""" + # Inputs + bases = [] + input_ids = [] + last_idx = -1 + for row in np.argwhere(inputs): + idx, base_id = row + idx, base_id = int(idx), int(base_id) + assert idx > last_idx # if not, means 2 True values in 1 row + # Some rows are all False. Those rows are mapped to UNK_ID. + while idx != last_idx + 1: + bases.append(encoder.UNK) + last_idx += 1 + bases.append(encoder.BASES[base_id]) + last_idx = idx + assert len(inputs) == len(bases) + + input_ids = encoder.encode(bases) + input_ids.append(text_encoder.EOS_ID) + + # Targets: mask and output + targets_mask = [float(v) for v in mask] + # The output is (n, m); store targets_shape so that it can be reshaped + # properly on the other end. + targets = [float(v) for v in outputs.flatten()] + targets_shape = [int(dim) for dim in outputs.shape] + assert mask.shape[0] == outputs.shape[0] + + example_keys = ["inputs", "targets_mask", "targets", "targets_shape"] + ex_dict = dict( + zip(example_keys, [input_ids, targets_mask, targets, targets_shape])) + return ex_dict diff --git a/tensor2tensor/data_generators/gene_expression_test.py b/tensor2tensor/data_generators/gene_expression_test.py new file mode 100644 index 000000000..c23c869c0 --- /dev/null +++ b/tensor2tensor/data_generators/gene_expression_test.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for Genetics problems.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np + +from tensor2tensor.data_generators import dna_encoder +from tensor2tensor.data_generators import gene_expression + +import tensorflow.compat.v1 as tf + + +class GeneticsTest(tf.test.TestCase): + + def _one_hot_bases(self, bases): + ref = ["A", "C", "T", "G"] + one_hots = [] + for base in bases: + one_hot = [False] * 4 + if base in ref: + one_hot[ref.index(base)] = True + one_hots.append(one_hot) + return np.array(one_hots) + + def testRecordToExample(self): + encoder = dna_encoder.DNAEncoder(chunk_size=2) + raw_inputs = ["A", "C", "G", "N", "C", "T"] + + # Put in numpy arrays in the same format as in the h5 file + inputs = self._one_hot_bases(raw_inputs) + mask = np.array([True, False, True]) + outputs = np.array([[1.0, 2.0, 3.0], [5.0, 1.0, 0.2], [5.1, 2.3, 2.3]]) + # Convert to example dict + ex_dict = gene_expression.to_example_dict(encoder, inputs, mask, outputs) + + self.assertEqual(len(raw_inputs) // 2 + 1, len(ex_dict["inputs"])) + self.assertAllEqual(encoder.encode(raw_inputs) + [1], ex_dict["inputs"]) + self.assertAllEqual([1.0, 0.0, 1.0], ex_dict["targets_mask"]) + self.assertAllEqual([1.0, 2.0, 3.0, 5.0, 1.0, 0.2, 5.1, 2.3, 2.3], + ex_dict["targets"]) + self.assertAllEqual([3, 3], ex_dict["targets_shape"]) + + def testGenerateShardArgs(self): + num_examples = 37 + num_shards = 4 + outfiles = [str(i) for i in range(num_shards)] + shard_args = gene_expression.generate_shard_args(outfiles, num_examples) + + starts, ends, fnames = zip(*shard_args) + self.assertAllEqual([0, 9, 18, 27], starts) + self.assertAllEqual([9, 18, 27, 37], ends) + self.assertAllEqual(fnames, outfiles) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/generator_utils.py b/tensor2tensor/data_generators/generator_utils.py index 487546e16..0a56c7ae0 100644 --- a/tensor2tensor/data_generators/generator_utils.py +++ b/tensor2tensor/data_generators/generator_utils.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,21 +19,29 @@ from __future__ import division from __future__ import print_function +import functools import gzip -import io +import math +import multiprocessing import os +import random +import stat import tarfile -import urllib - -# Dependency imports - +import tempfile +import numpy as np +import requests import six -from six.moves import xrange # pylint: disable=redefined-builtin +from six.moves import range # pylint: disable=redefined-builtin +# Imports urllib on Python2, urllib.request on Python3 +import six.moves.urllib_request as urllib -from tensor2tensor.data_generators.text_encoder import SubwordTextEncoder -from tensor2tensor.data_generators.tokenizer import Tokenizer +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.utils import mlperf_log -import tensorflow as tf +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +UNSHUFFLED_SUFFIX = "-unshuffled" def to_example(dictionary): @@ -40,16 +49,25 @@ def to_example(dictionary): features = {} for (k, v) in six.iteritems(dictionary): if not v: - raise ValueError("Empty generated field: %s", str((k, v))) - if isinstance(v[0], six.integer_types): + raise ValueError("Empty generated field: %s" % str((k, v))) + # Subtly in PY2 vs PY3, map is not scriptable in py3. As a result, + # map objects will fail with TypeError, unless converted to a list. + if six.PY3 and isinstance(v, map): + v = list(v) + if (isinstance(v[0], six.integer_types) or + np.issubdtype(type(v[0]), np.integer)): features[k] = tf.train.Feature(int64_list=tf.train.Int64List(value=v)) elif isinstance(v[0], float): features[k] = tf.train.Feature(float_list=tf.train.FloatList(value=v)) elif isinstance(v[0], six.string_types): + if not six.PY2: # Convert in python 3. + v = [bytes(x, "utf-8") for x in v] + features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v)) + elif isinstance(v[0], bytes): features[k] = tf.train.Feature(bytes_list=tf.train.BytesList(value=v)) else: - raise ValueError("Value is neither an int nor a float; v: %s type: %s" % - (str(v[0]), str(type(v[0])))) + raise ValueError("Value for %s is not a recognized type; v: %s type: %s" % + (k, str(v[0]), str(type(v[0])))) return tf.train.Example(features=tf.train.Features(feature=features)) @@ -61,7 +79,7 @@ def generate_files_distributed(generator, task_id=0): """generate_files but with a single writer writing to shard task_id.""" assert task_id < num_shards - output_filename = "%s-%.5d-of-%.5d" % (output_name, task_id, num_shards) + output_filename = sharded_name(output_name, task_id, num_shards) output_file = os.path.join(output_dir, output_filename) tf.logging.info("Writing to file %s", output_file) writer = tf.python_io.TFRecordWriter(output_file) @@ -73,18 +91,57 @@ def generate_files_distributed(generator, counter += 1 if max_cases and counter > max_cases: break - sequence_example = to_example(case) - writer.write(sequence_example.SerializeToString()) + example = to_example(case) + writer.write(example.SerializeToString()) writer.close() return output_file -def generate_files(generator, - output_name, - output_dir, - num_shards=1, - max_cases=None): +def _data_filenames(output_name, output_dir, num_shards): + return [ + os.path.join(output_dir, fname) + for fname in shard_filepath(output_name, num_shards) + ] + + +def train_data_filenames(problem, output_dir, num_shards): + return _data_filenames(problem + "-train", output_dir, num_shards) + + +def dev_data_filenames(problem, output_dir, num_shards): + return _data_filenames(problem + "-dev", output_dir, num_shards) + + +def test_data_filenames(problem, output_dir, num_shards): + return _data_filenames(problem + "-test", output_dir, num_shards) + + +def combined_data_filenames(problem, output_dir, num_training_shards): + return (train_data_filenames(problem, output_dir, num_training_shards) + + dev_data_filenames(problem, output_dir, 1) + test_data_filenames( + problem, output_dir, 1)) + + +def sharded_name(base_name, shard, total_shards): + return "%s-%.5d-of-%.5d" % (base_name, shard, total_shards) + + +def shard_filepath(fname, num_shards): + return [ + sharded_name(fname, shard, num_shards) for shard in range(num_shards) + ] + + +def outputs_exist(filenames): + for out_fname in filenames: + out_fname = out_fname.replace(UNSHUFFLED_SUFFIX, "") + if tf.gfile.Exists(out_fname): + return out_fname + + +def generate_files(generator, output_filenames, + max_cases=None, cycle_every_n=1): """Generate cases from a generator and save as TFRecord files. Generated cases are transformed to tf.Example protos and saved as TFRecords @@ -92,42 +149,116 @@ def generate_files(generator, Args: generator: a generator yielding (string -> int/float/str list) dictionaries. - output_name: the file name prefix under which output will be saved. - output_dir: directory to save the output to. - num_shards: how many shards to use (defaults to 1). + output_filenames: List of output file paths. max_cases: maximum number of cases to get from the generator; if None (default), we use the generator until StopIteration is raised. - - Returns: - List of output file paths. + cycle_every_n: how many cases from the generator to take before + switching to the next shard; by default set to 1, switch every case. """ - writers = [] - output_files = [] - for shard in xrange(num_shards): - output_filename = "%s-%.5d-of-%.5d" % (output_name, shard, num_shards) - output_file = os.path.join(output_dir, output_filename) - output_files.append(output_file) - writers.append(tf.python_io.TFRecordWriter(output_file)) + if outputs_exist(output_filenames): + tf.logging.info("Skipping generator because outputs files exists at {}" + .format(output_filenames)) + return + tmp_filenames = [fname + ".incomplete" for fname in output_filenames] + num_shards = len(output_filenames) + # Check if is training or eval, ref: train_data_filenames(). + if num_shards > 0: + if "-train" in output_filenames[0]: + tag = "train" + elif "-dev" in output_filenames[0]: + tag = "eval" + else: + tag = "other" + writers = [tf.python_io.TFRecordWriter(fname) for fname in tmp_filenames] counter, shard = 0, 0 for case in generator: + if case is None: + continue if counter % 100000 == 0: - tf.logging.info("Generating case %d for %s." % (counter, output_name)) + tf.logging.info("Generating case %d." % counter) counter += 1 if max_cases and counter > max_cases: break - sequence_example = to_example(case) - writers[shard].write(sequence_example.SerializeToString()) - shard = (shard + 1) % num_shards + example = to_example(case) + writers[shard].write(example.SerializeToString()) + if counter % cycle_every_n == 0: + shard = (shard + 1) % num_shards for writer in writers: writer.close() - return output_files + for tmp_name, final_name in zip(tmp_filenames, output_filenames): + tf.gfile.Rename(tmp_name, final_name) + + if num_shards > 0: + if tag == "train": + mlperf_log.transformer_print( + key=mlperf_log.PREPROC_NUM_TRAIN_EXAMPLES, value=counter) + elif tag == "eval": + mlperf_log.transformer_print( + key=mlperf_log.PREPROC_NUM_EVAL_EXAMPLES, value=counter) + + tf.logging.info("Generated %s Examples", counter) + + +def download_report_hook(count, block_size, total_size): + """Report hook for download progress. + + Args: + count: current block number + block_size: block size + total_size: total size + """ + percent = int(count * block_size * 100 / total_size) + print("\r%d%%" % percent + " completed", end="\r") + +def maybe_download(directory, filename, uri): + """Download filename from uri unless it's already in directory. -def maybe_download(directory, filename, url): - """Download filename from url unless it's already in directory. + Copies a remote file to local if that local file does not already exist. If + the local file pre-exists this function call, it does not check that the local + file is a copy of the remote. + + Remote filenames can be filepaths, any URI readable by tensorflow.gfile, or a + URL. + + Args: + directory: path to the directory that will be used. + filename: name of the file to download to (do nothing if it already exists). + uri: URI to copy (or download) from. + + Returns: + The path to the downloaded file. + """ + tf.gfile.MakeDirs(directory) + filepath = os.path.join(directory, filename) + if tf.gfile.Exists(filepath): + tf.logging.info("Not downloading, file already found: %s" % filepath) + return filepath + + tf.logging.info("Downloading %s to %s" % (uri, filepath)) + try: + tf.gfile.Copy(uri, filepath) + except tf.errors.UnimplementedError: + if uri.startswith("http"): + inprogress_filepath = filepath + ".incomplete" + inprogress_filepath, _ = urllib.urlretrieve( + uri, inprogress_filepath, reporthook=download_report_hook) + # Print newline to clear the carriage return from the download progress + print() + tf.gfile.Rename(inprogress_filepath, filepath) + else: + raise ValueError("Unrecognized URI: " + filepath) + statinfo = os.stat(filepath) + tf.logging.info("Successfully downloaded %s, %s bytes." % + (filename, statinfo.st_size)) + return filepath + + +def maybe_download_from_drive(directory, filename, url): + """Download filename from Google drive unless it's already in directory. Args: directory: path to the directory that will be used. @@ -139,16 +270,40 @@ def maybe_download(directory, filename, url): """ if not tf.gfile.Exists(directory): tf.logging.info("Creating directory %s" % directory) - os.mkdir(directory) + tf.gfile.MakeDirs(directory) filepath = os.path.join(directory, filename) - if not tf.gfile.Exists(filepath): - tf.logging.info("Downloading %s to %s" % (url, filepath)) - filepath, _ = urllib.urlretrieve(url, filepath) - statinfo = os.stat(filepath) - tf.logging.info("Succesfully downloaded %s, %s bytes." % (filename, - statinfo.st_size)) - else: + confirm_token = None + if tf.gfile.Exists(filepath): tf.logging.info("Not downloading, file already found: %s" % filepath) + return filepath + + # Since the file is big, drive will scan it for virus and take it to a + # warning page. We find the confirm token on this page and append it to the + # URL to start the download process. + confirm_token = None + session = requests.Session() + response = session.get(url, stream=True) + for k, v in response.cookies.items(): + if k.startswith("download_warning"): + confirm_token = v + + if confirm_token: + url = url + "&confirm=" + confirm_token + tf.logging.info("Downloading %s to %s" % (url, filepath)) + + response = session.get(url, stream=True) + # Now begin the download. + chunk_size = 16 * 1024 + with open(filepath, "wb") as f: + for chunk in response.iter_content(chunk_size): + if chunk: + f.write(chunk) + + # Print newline to clear the carriage return from the download progress + print() + statinfo = os.stat(filepath) + tf.logging.info("Successfully downloaded %s, %s bytes." % (filename, + statinfo.st_size)) return filepath @@ -159,90 +314,164 @@ def gunzip_file(gz_path, new_path): gz_path: path to the zipped file. new_path: path to where the file will be unzipped. """ + if tf.gfile.Exists(new_path): + tf.logging.info("File %s already exists, skipping unpacking" % new_path) + return tf.logging.info("Unpacking %s to %s" % (gz_path, new_path)) + # We may be unpacking into a newly created directory, add write mode. + mode = stat.S_IRWXU or stat.S_IXGRP or stat.S_IRGRP or stat.S_IROTH + os.chmod(os.path.dirname(new_path), mode) with gzip.open(gz_path, "rb") as gz_file: - with io.open(new_path, "wb") as new_file: + with tf.gfile.GFile(new_path, mode="wb") as new_file: for line in gz_file: new_file.write(line) -# TODO(aidangomez): en-fr tasks are significantly over-represented below -_DATA_FILE_URLS = [ - # German-English - [ - "http://data.statmt.org/wmt16/translation-task/training-parallel-nc-v11.tgz", # pylint: disable=line-too-long - [ - "training-parallel-nc-v11/news-commentary-v11.de-en.en", - "training-parallel-nc-v11/news-commentary-v11.de-en.de" - ] - ], - # German-English & French-English - [ - "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz", [ - "commoncrawl.de-en.en", "commoncrawl.de-en.de", - "commoncrawl.fr-en.en", "commoncrawl.fr-en.fr" - ] - ], - [ - "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz", [ - "training/europarl-v7.de-en.en", "training/europarl-v7.de-en.de", - "training/europarl-v7.fr-en.en", "training/europarl-v7.fr-en.fr" - ] - ], - # French-English - [ - "http://www.statmt.org/wmt10/training-giga-fren.tar", - ["giga-fren.release2.fixed.en.gz", "giga-fren.release2.fixed.fr.gz"] - ], - [ - "http://www.statmt.org/wmt13/training-parallel-un.tgz", - ["un/undoc.2000.fr-en.en", "un/undoc.2000.fr-en.fr"] - ], -] - - -def get_or_generate_vocab(tmp_dir, vocab_filename, vocab_size): - """Generate a vocabulary from the datasets listed in _DATA_FILE_URLS.""" - vocab_filepath = os.path.join(tmp_dir, vocab_filename) - if os.path.exists(vocab_filepath): - vocab = SubwordTextEncoder(vocab_filepath) - return vocab - - tokenizer = Tokenizer() - for source in _DATA_FILE_URLS: +def get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size, + generator, max_subtoken_length=None, + reserved_tokens=None): + """Inner implementation for vocab generators. + + Args: + data_dir: The base directory where data and vocab files are stored. If None, + then do not save the vocab even if it doesn't exist. + vocab_filename: relative filename where vocab file is stored + vocab_size: target size of the vocabulary constructed by SubwordTextEncoder + generator: a generator that produces tokens from the vocabulary + max_subtoken_length: an optional integer. Set this to a finite value to + avoid quadratic costs during vocab building. + reserved_tokens: List of reserved tokens. `text_encoder.RESERVED_TOKENS` + should be a prefix of `reserved_tokens`. If `None`, defaults to + `RESERVED_TOKENS`. + + Returns: + A SubwordTextEncoder vocabulary object. + """ + if data_dir and vocab_filename: + vocab_filepath = os.path.join(data_dir, vocab_filename) + if tf.gfile.Exists(vocab_filepath): + tf.logging.info("Found vocab file: %s", vocab_filepath) + return text_encoder.SubwordTextEncoder(vocab_filepath) + else: + vocab_filepath = None + + tf.logging.info("Generating vocab file: %s", vocab_filepath) + vocab = text_encoder.SubwordTextEncoder.build_from_generator( + generator, vocab_size, max_subtoken_length=max_subtoken_length, + reserved_tokens=reserved_tokens) + + if vocab_filepath: + tf.gfile.MakeDirs(data_dir) + vocab.store_to_file(vocab_filepath) + + return vocab + + +def get_or_generate_vocab(data_dir, tmp_dir, vocab_filename, vocab_size, + sources, file_byte_budget=1e6, + max_subtoken_length=None): + """Generate a vocabulary from the datasets in sources.""" + + vocab_generator = generate_lines_for_vocab(tmp_dir, sources, file_byte_budget) + return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size, + vocab_generator, max_subtoken_length) + + +def generate_lines_for_vocab(tmp_dir, sources, file_byte_budget=1e6): + """Generate lines for vocabulary generation.""" + tf.logging.info("Generating vocab from: %s", str(sources)) + for source in sources: url = source[0] filename = os.path.basename(url) - read_type = "r:gz" if "tgz" in filename else "r" - compressed_file = maybe_download(tmp_dir, filename, url) - with tarfile.open(compressed_file, read_type) as corpus_tar: - corpus_tar.extractall(tmp_dir) - for lang_file in source[1]: tf.logging.info("Reading file: %s" % lang_file) filepath = os.path.join(tmp_dir, lang_file) + # Extract from tar if needed. + if not tf.gfile.Exists(filepath): + read_type = "r:gz" if filename.endswith("tgz") else "r" + with tarfile.open(compressed_file, read_type) as corpus_tar: + corpus_tar.extractall(tmp_dir) + # For some datasets a second extraction is necessary. - if ".gz" in lang_file: - tf.logging.info("Unpacking subdirectory %s" % filepath) + if lang_file.endswith(".gz"): new_filepath = os.path.join(tmp_dir, lang_file[:-3]) - gunzip_file(filepath, new_filepath) + if tf.gfile.Exists(new_filepath): + tf.logging.info( + "Subdirectory %s already exists, skipping unpacking" % filepath) + else: + tf.logging.info("Unpacking subdirectory %s" % filepath) + gunzip_file(filepath, new_filepath) filepath = new_filepath - # Use Tokenizer to count the word occurrences. with tf.gfile.GFile(filepath, mode="r") as source_file: - file_byte_budget = 3.5e5 if "en" in filepath else 7e5 + file_byte_budget_ = file_byte_budget + counter = 0 + countermax = int(source_file.size() / file_byte_budget_ / 2) for line in source_file: - if file_byte_budget <= 0: - break - line = line.strip() - file_byte_budget -= len(line) - _ = tokenizer.encode(line) - - vocab = SubwordTextEncoder.build_to_target_size( - vocab_size, tokenizer.token_counts, vocab_filepath, 1, 1e3) - return vocab + if counter < countermax: + counter += 1 + else: + if file_byte_budget_ <= 0: + break + line = line.strip() + file_byte_budget_ -= len(line) + counter = 0 + yield line + + +def get_or_generate_tabbed_vocab(data_dir, tmp_dir, source_filename, + index, vocab_filename, vocab_size): + r"""Generate a vocabulary from a tabbed source file. + + The source is a file of source, target pairs, where each line contains + a source string and a target string, separated by a tab ('\t') character. + The index parameter specifies 0 for the source or 1 for the target. + + Args: + data_dir: path to the data directory. + tmp_dir: path to the temporary directory. + source_filename: the name of the tab-separated source file. + index: index. + vocab_filename: the name of the vocabulary file. + vocab_size: vocabulary size. + + Returns: + The vocabulary. + """ + def generate(): + filepath = os.path.join(tmp_dir, source_filename) + tf.logging.info("Generating vocab from %s", filepath) + with tf.gfile.GFile(filepath, mode="r") as source_file: + for line in source_file: + line = line.strip() + if line and "\t" in line: + parts = line.split("\t", 1) + part = parts[index].strip() + yield part + + return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size, + generate()) + + +def get_or_generate_txt_vocab(data_dir, vocab_filename, vocab_size, + filepatterns): + """Generate a vocabulary from txt files with example-per-line.""" + if isinstance(filepatterns, str): + filepatterns = [filepatterns] + + def generate(): + tf.logging.info("Generating vocab from %s", filepatterns) + for filepattern in filepatterns: + for filename in tf.gfile.Glob(filepattern): + with tf.gfile.GFile(filename, mode="r") as source_file: + for line in source_file: + yield line.strip() + + return get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size, + generate()) def read_records(filename): @@ -250,7 +479,7 @@ def read_records(filename): records = [] for record in reader: records.append(record) - if len(records) % 10000 == 0: + if len(records) % 100000 == 0: tf.logging.info("read: %d", len(records)) return records @@ -259,6 +488,767 @@ def write_records(records, out_filename): writer = tf.python_io.TFRecordWriter(out_filename) for count, record in enumerate(records): writer.write(record) - if count % 10000 == 0: + if count > 0 and count % 100000 == 0: tf.logging.info("write: %d", count) writer.close() + + +def generate_dataset_and_shuffle(train_gen, + train_paths, + dev_gen, + dev_paths, + shuffle=True): + generate_files(train_gen, train_paths) + generate_files(dev_gen, dev_paths) + mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER) + if shuffle: + shuffle_dataset(train_paths + dev_paths) + + +def _shuffle_single(fname, extra_fn=None): + """Shuffle a single file of records. + + Args: + fname: a string + extra_fn: an optional function from list of TFRecords to list of TFRecords + to be called after shuffling. + """ + records = read_records(fname) + random.shuffle(records) + if extra_fn is not None: + records = extra_fn(records) + out_fname = fname.replace(UNSHUFFLED_SUFFIX, "") + write_records(records, out_fname) + tf.gfile.Remove(fname) + + +def shuffle_dataset(filenames, extra_fn=None): + """Shuffles the dataset. + + Args: + filenames: a list of strings + extra_fn: an optional function from list of records to list of records + to be called after shuffling a file. + """ + if outputs_exist(filenames): + tf.logging.info("Skipping shuffle because output files exist") + return + tf.logging.info("Shuffling data...") + for filename in filenames: + _shuffle_single(filename, extra_fn=extra_fn) + tf.logging.info("Data shuffled.") + + +class SequencePacker(object): + """Helper for constructing a packed example of sequence examples. + + See comments to pack_examples() + """ + + def __init__(self, first_sequence, spacing=2): + self._spacing = spacing + self._ids = first_sequence[:] + self._segmentation = [1] * len(first_sequence) + self._position = list(range(len(first_sequence))) + + def add(self, ids): + padding = [0] * self._spacing + self._ids.extend(padding + ids) + next_segment_num = self._segmentation[-1] + 1 if self._segmentation else 1 + self._segmentation.extend(padding + [next_segment_num] * len(ids)) + self._position.extend(padding + list(range(len(ids)))) + + def can_fit(self, ids, packed_length): + return len(self._ids) + self._spacing + len(ids) <= packed_length + + def to_dict(self): + return {"inputs": [0], + "targets": self._ids, + "targets_segmentation": self._segmentation, + "targets_position": self._position} + + +class SequencePairPacker(object): + """Helper for packing sequence-to-sequence examples into bigger examples. + + See comments to pack_examples() + """ + + def __init__(self, first_sequence_pair, spacing=2): + self._inputs = SequencePacker(first_sequence_pair[0], spacing) + self._targets = SequencePacker(first_sequence_pair[1], spacing) + + def add(self, pair): + self._inputs.add(pair[0]) + self._targets.add(pair[1]) + + def can_fit(self, pair, packed_length): + return (self._inputs.can_fit(pair[0], packed_length) and + self._targets.can_fit(pair[1], packed_length)) + + def to_dict(self): + ret = self._targets.to_dict() + inputs_dict = self._inputs.to_dict() + ret["inputs"] = inputs_dict["targets"] + ret["inputs_segmentation"] = inputs_dict["targets_segmentation"] + ret["inputs_position"] = inputs_dict["targets_position"] + return ret + + +def pack_examples(examples, + has_inputs, + packed_length=256, + spacing=2, + queue_size=10, + chop_long_sequences=False): + """Pack examples into longer examples. + + If has_inputs=False, we are packing single-sequence examples with + targets only and no inputs. + + In this case, we concatenate the targets from several examples to form + each new example. We insert a number of zeros for spacing between the + original sequences. This is to help the sequences stay separate + under convolutions. If chop_long_sequences is set, then any input sequence + longer than packed_length gets chopped up into multiple examples. Otherwise, + long sequences are emitted as singletons. + + If has_inputs=True, then we are packing sequence-to-sequence + examples. We combine several examples by concatenating the inputs + (as above) and concatenating the targets (as above). Chopping of + long sequences is not supported. + + The packed examples are represented as dictionaries containing: + "inputs", "targets": the packed sequences described above + "inputs_segmentation", "targets_segmentation": + Sequences aligned with "inputs", "targets" specifying to which original + sequence each position belongs. Numbering starts from 1, and 0 is used + for spacing. This information is useful for preventing attention across + segments. + e.g. [1 1 1 1 1 1 0 0 2 2 2 0 0 3 3 3 3 3 0 0 4 4 4] + "inputs_position", "targets_position": + Sequences aligned with "inputs", "targets" specifying position within + the original sequence. This is useful for positional encodings. + e.g. [0 1 2 3 4 5 0 0 0 1 2 0 0 0 1 2 3 4 0 0 0 1 2] + + Args: + examples: a generator returning feature dictionaries. + has_inputs: a boolean + packed_length: an integer + spacing: an integer + queue_size: an integer + chop_long_sequences: a boolean + + Yields: + feature dictionaries. + """ + packer = SequencePairPacker if has_inputs else SequencePacker + combined = [] + for example in examples: + x = ((example["inputs"], example["targets"]) + if has_inputs else example["targets"]) + if chop_long_sequences and len(x) > packed_length: + assert not has_inputs + num_fragments = len(x) // packed_length + for i in range(num_fragments): + yield packer( + x[packed_length * i:packed_length * (i + 1)], spacing).to_dict() + x = x[packed_length * num_fragments:] + added = False + for c in combined: + if c.can_fit(x, packed_length): + c.add(x) + added = True + break + if not added: + if len(combined) == queue_size: + yield combined[0].to_dict() + combined = combined[1:] + combined.append(packer(x, spacing)) + for c in combined: + yield c.to_dict() + + +def pack_dataset(dataset, length, keys=None, use_custom_ops=False): + """Creates a 'packed' version of a dataset on-the-fly. + + This is meant to replace the irritation of having to create a separate + "packed" version of a dataset to train efficiently on TPU. + + Each example in the output dataset represents several examples in the + input dataset. + + For each key in the input dataset, two additional keys are created: + _segmentation: an int32 tensor identifying the parts + representing the original example. + _position: an int32 tensor identifying the position within the original + example. + + Example: + Two input examples get combined to form an output example. + The input examples are: + {"inputs": [8, 7, 1, 0], "targets":[4, 1, 0]} + {"inputs": [2, 3, 4, 1], "targets":[5, 6, 1]} + The output example is: + { + "inputs": [8, 7, 1, 2, 3, 4, 1, 0, 0, 0] + "inputs_segmentation": [1, 1, 1, 2, 2, 2, 2, 0, 0, 0] + "inputs_position": [0, 1, 2, 0, 1, 2, 3, 0, 0, 0] + "targets": [4, 1, 5, 6, 1, 0, 0, 0, 0, 0] + "targets_segmentation": [1, 1, 2, 2, 2, 0, 0, 0, 0, 0] + "targets_position": [0, 1, 0, 1, 2, 0, 0, 0, 0, 0] + } + + 0 represents padding in both the inputs and the outputs. + + Sequences in the incoming examples are truncated to length "length", and the + sequences in the output examples all have fixed (padded) length "length". + + Args: + dataset: a tf.data.Dataset + length: an integer + keys: a list of strings (e.g. ["inputs", "targets"]) + use_custom_ops: use a custom c++ op not included in standard tf (faster) + + Returns: + a tf.data.Dataset + """ + shapes = dataset.output_shapes + if keys is None: + keys = shapes.keys() + + for k in keys: + if k not in shapes: + raise ValueError("Key %s not found in dataset. Available keys are %s" + % (k, shapes.keys())) + if not shapes[k].is_compatible_with(tf.TensorShape([None])): + raise ValueError("Tensors to be packed must be one-dimensional.") + + if use_custom_ops: + return _pack_with_custom_ops(dataset, keys, length) + else: + packer = SequenceDatasetPacker(length, spacing=0, queue_size=10) + return packer(dataset, cycle_length=10, keys=keys) + + +def _pack_with_custom_ops(dataset, keys, length): + """Helper-function for packing a dataset which has already been batched. + + See pack_dataset() + + Relies on custom ops which require a custom compiled binary. + Faster than _pack_with_tf_ops(), and denser packing. + + Args: + dataset: a dataset containing padded batches of examples. + keys: a list of strings (must have length 2) + length: an integer + + Returns: + a dataset. + """ + from tensor2tensor.data_generators.ops import pack_sequences_ops # pylint: disable=g-import-not-at-top + + # trim to length + dataset = dataset.map(lambda x: {k: x[k][:length] for k in keys}) + # Setting batch_size=length ensures that the concatenated sequences (if they + # have length >=1) are sufficient to fill at least one packed example. + batch_size = length + dataset = dataset.padded_batch( + batch_size, padded_shapes={k: [-1] for k in keys}) + + # better packing (may be faster) but requires custom-built binary. + k1, k2 = keys + def map_fn_custom(x): + """Map-function.""" + (k1_packed, k1_segmengation, k1_position, + k2_packed, k2_segmentation, k2_position) = ( + pack_sequences_ops.pack_sequences2(x[k1], x[k2], length, length)) + packed = { + k1: k1_packed, + k1 + "_segmentation": k1_segmengation, + k1 + "_position": k1_position, + k2: k2_packed, + k2 + "_segmentation": k2_segmentation, + k2 + "_position": k2_position, + } + return tf.data.Dataset.from_tensor_slices(packed) + dataset = dataset.flat_map(map_fn_custom) + return dataset + + +INDEX_DTYPE = tf.int32 + + +class SequenceDatasetPacker(object): + """Helper class for packing a dataset of sequences in an online fashon. + + The input sequence is expected to be a tuple of 1D Tensors which will be + converted to a dataset which produces a dict of packed examples, example + positions, and segment ids. + + If `window_size` or `cycle_length` is specified multiple packing operations + will be performed in parallel to increase throughput. A value of None will + select default parallelism parameters. If this dataset will be run on a TPU, + specifying a cycle_length > 10 is recommended. + """ + + def __init__(self, packed_length=256, spacing=0, queue_size=10, + chop_long_sequences=False): + self._packed_length = packed_length + self._spacing = spacing + self._queue_size = queue_size + self._chop_long_sequences = chop_long_sequences + self._num_sequences = None + self._token_dtype = None + + def __call__(self, dataset, **kwargs): + if {"window_size", "cycle_length"}.intersection(kwargs): + return self._concurrent_pack(dataset, **kwargs) + return self._pack(dataset, **kwargs) + + def _concurrent_pack(self, dataset, window_size=None, cycle_length=None, + keys=None): + """Selects sensible default parallelism parameters based for a task.""" + + if window_size is None: + # This is a heuristic to fill all of the queues 10 times, and should do a + # reasonable job balancing parallelism (which benefits from lower window + # size) with packing efficiency (which suffers from edge effects when the + # window size is too low.) + window_size = int(self._packed_length / 8 * self._queue_size * 10) + + if cycle_length is None: + # Typically binning one stream will saturate about 3 cores. + + # Note on TPUs: + # cycle_length should still be explicitly set when training on TPUs, + # since the cpu count will be the local CPU count (which could be quite + # small), wereas the transforms will actually run on the TPU host + # controller which has a very robust CPU. + cycle_length = max([int(multiprocessing.cpu_count() / 3), 1]) + return self._pack(dataset, window_size=window_size, + cycle_length=cycle_length, keys=keys) + + def _pack(self, dataset, window_size=None, cycle_length=None, + deterministic=False, keys=None): + """Main method for chaining together packing transformation steps.""" + (dataset, self._num_sequences, self._token_dtype, keys + ) = self._standardize(dataset, keys) + if window_size is None: + dataset = self._scanning_pack(dataset) + else: + # Dataset.window splits nested Tensors. + re_zip = lambda *x: tf.data.Dataset.zip(x) + dataset = dataset.window(window_size).map(re_zip).interleave( + self._scanning_pack, cycle_length=cycle_length, + block_length=window_size, + num_parallel_calls=tf.data.experimental.AUTOTUNE) + + if not deterministic: + # Sloppy interleave offers a marginal performance improvement. + options = tf.data.Options() + options.experimental_deterministic = False + dataset = dataset.with_options(options) + + dataset = dataset.map( + self._finalize, num_parallel_calls=tf.data.experimental.AUTOTUNE) + self._num_sequences, self._token_dtype = None, None + + if keys: + def dict_pack(example): + output = {} + for i, key in enumerate(keys): + output[key] = example["contents"][:, i] + output[key + "_segmentation"] = example["segment"][:, i] + output[key + "_position"] = example["position"][:, i] + return output + dataset = dataset.map(dict_pack) + return dataset + + def _standardize(self, dataset, keys): + """Force dataset structure into a tuple of Tensors.""" + shapes = tf.data.get_output_shapes(dataset) + + if isinstance(shapes, dict): + keys = keys or tuple(shapes.keys()) + dataset = dataset.map(lambda x: tuple(x[k] for k in keys)) + shapes = tf.data.get_output_shapes(dataset) + + if not all(isinstance(i, tf.TensorShape) for i in shapes): + # Internally this class expects tuples of Tensors, even for the degenerate + # case of a single sequence. + dataset = dataset.map(lambda x: (x,)) + shapes = tf.data.get_output_shapes(dataset) + + for s in shapes: + if not s.is_compatible_with(tf.TensorShape([None])): + raise ValueError("Tensors to be packed must be one-dimensional.") + + if not shapes: + raise ValueError("Expected sequence dataset.") + + if self._chop_long_sequences and len(shapes) != 1: + raise ValueError("chop_long_sequences expects a single sequence dataset.") + + token_types = tf.data.get_output_types(dataset) + if len(set(token_types)) > 1: + raise ValueError("Inconsistent dtypes: {}".format(token_types)) + + return dataset, len(shapes), token_types[0], keys + + def _eviction_fn(self, _): + return tuple(-tf.ones((self._packed_length,), dtype=self._token_dtype) + for _ in range(self._num_sequences)) + + def _scan_initial_state(self): + """Create TensorArrays and indices to track bin assignment. + + availability: TensorArray[queue_size, num_sequences] + This represents the number of tokens available in the ith bin. + See implementation note below. + + contents: TensorArray[queue_size, num_sequences * 2] + This holds the actual contents of the packed strings as well as a bit + mask indicating where sequences begin. It is stored in a flat vector and + is accessed in offsets of packed_length. + + top_index: scalar [0, queue_size) + Integer tensor indicating which index is the "top" bin. See implementation + note below. + + IMPLEMENTATION_NOTE: + The FFD algorithm periodically pops the topmost queue and pushes a new + one to replace it. In order to replicate those semantics with a fixed size + TensorArray, indexing operations are shifted by top_index. For example, + instead of: + `queue_available.read(i)` + + a read is instead performed as: + `queue_available.read((i - top_index) % queue_size)` + + to account for the fact that the "ith" logical FFD queue is stored at + position j. This means that the pop / push update can be performed by + simply incrementing top_index. (And zeroing the old top_index position.) + + Returns: + The state for the binning scan. + """ + + all_available = tf.ones((self._queue_size, self._num_sequences), + dtype=INDEX_DTYPE) * self._packed_length + total_size = self._packed_length * self._queue_size + total_size_range = tf.range(total_size, dtype=INDEX_DTYPE) + empty = tf.zeros((total_size, self._num_sequences * 2), + dtype=self._token_dtype) + + availability = tf.TensorArray( + dtype=INDEX_DTYPE, size=self._queue_size, dynamic_size=False, + clear_after_read=False, element_shape=(self._num_sequences,) + ).scatter(tf.range(self._queue_size, dtype=INDEX_DTYPE), all_available) + + contents = tf.TensorArray( + dtype=self._token_dtype, size=total_size, dynamic_size=False, + clear_after_read=False, element_shape=(self._num_sequences * 2,) + ).scatter(total_size_range, empty) + + # Which index should be considered the "top" bucket for the purpose of + # the first-fit descending algorithm. + top_index = tf.zeros((), dtype=INDEX_DTYPE) + + return availability, contents, top_index + + def _scanning_pack(self, dataset): + """Apply scan based pack to a dataset.""" + if self._chop_long_sequences: + dataset = dataset.map(lambda x: (x[:self._packed_length],)) + else: + dataset = dataset.filter(lambda *x: tf.reduce_max( # pylint: disable=g-long-lambda + tf.stack([tf.shape(i)[0] for i in x]), axis=0) <= self._packed_length) + + # In order to retrieve the sequences which are still in the queue when the + # dataset is exhausted, we feed dummy sequences which are guaranteed to + # displace the remaining elements. + dataset = dataset.concatenate( + tf.data.Dataset.range(self._queue_size).map(self._eviction_fn)) + + initial_state = self._scan_initial_state() + step_fn = functools.partial( + tf.autograph.to_graph(_scan_step_fn), packed_length=self._packed_length, + queue_size=self._queue_size, spacing=self._spacing, + num_sequences=self._num_sequences, token_dtype=self._token_dtype) + + dataset = dataset.apply(tf.data.experimental.scan(initial_state, step_fn)) + + is_real_sample = lambda valid_sample, _: valid_sample + return dataset.filter(is_real_sample) + + def _compute_auxiliary_structure(self, contents_and_mask): + """Compute segment and position metadata.""" + contents = contents_and_mask[:, :self._num_sequences] + start_mask = tf.cast(contents_and_mask[:, self._num_sequences:], + dtype=INDEX_DTYPE) + + segment = tf.cumsum(start_mask, axis=0) + uniform_count = tf.ones_like(segment[:, 0]) + position = [] + for i in range(self._num_sequences): + segment_slice = segment[:, i] + counts = tf.math.segment_sum(uniform_count, segment[:, i]) + position.append(tf.range(self._packed_length) - tf.cumsum( + tf.gather(counts, segment_slice - 1) * start_mask[:, i])) + position = tf.concat([i[:, tf.newaxis] for i in position], axis=1) + + # Correct for padding tokens. + pad_mask = tf.cast(tf.not_equal(contents, 0), dtype=INDEX_DTYPE) + segment *= pad_mask + position *= pad_mask + + return segment, position + + def _finalize(self, _, contents): + """Structure output and compute segment and position metadata.""" + + # The output shape information is lost during the filter; however we can + # guarantee the shape. (That's the point of this exercise, after all!) + contents.set_shape((self._packed_length, self._num_sequences * 2)) + + # Both the dummy branch of the scan step function and the eviction dataset + # use vectors of minus one. The cost of this check is negligible and the + # leakage of such dummy sequences would be difficult to debug downstream. + check_leaks = tf.assert_none_equal(contents, -tf.ones_like(contents)) + with tf.control_dependencies([check_leaks]): + contents = tf.identity(contents) + + segment, position = self._compute_auxiliary_structure(contents) + return {"contents": contents[:, :self._num_sequences], + "segment": segment, "position": position} + + +def _scan_step_fn(state, example, packed_length, queue_size, spacing, + num_sequences, token_dtype): # pylint: disable=g-doc-args + """Transform function used by tf.data.experimental.scan to process an example. + + This is written as a stateless function rather than a class method because we + trace it with AutoGraph (in order to simplify the conditional), and this way + we don't have to worry about handling re-tracing semantics. + + Args: + See the SequenceDatasetPacker class. + + Returns: + The updated queue state, and either a packed example or a dummy sequence + which will be filtered out downstream. + """ + + # Convert TensorArray tuples to lists since we'll need to replace them. + availability, contents, top_index = state + + lengths = tf.concat([tf.shape(i) for i in example], axis=0) + start_availability = availability.stack() + can_fit = tf.reduce_all(tf.greater_equal(start_availability, lengths), axis=1) + any_can_fit = tf.reduce_any(can_fit, axis=0) + + # AutoGraph will convert this block to a tf.cond + if any_can_fit: + # This indicates where in the FFD queue rotation a given index sits + shifted_range = ( + tf.range(queue_size, dtype=INDEX_DTYPE) - top_index) % queue_size + + # Mark any indices which cannot accommodate the current example. + exclusion_mask = tf.cast(tf.logical_not(can_fit), INDEX_DTYPE) * queue_size + + # Index in [0, queue_size) in which to place the sample. Note, this index + # is the position in the actual TensorArray, not the index of the FFD queue. + queue_index = (tf.reduce_min(shifted_range + exclusion_mask) + + top_index) % queue_size + + # NOTE(taylorrobie): We emit a non-empty Tensor for downstream checks. + output_contents = -tf.ones((1, num_sequences), dtype=token_dtype) + + else: + index_range = top_index * packed_length + tf.range(packed_length) + output_contents = contents.gather(index_range) + + # Reset the queue state. + availability = availability.write( + top_index, packed_length * tf.ones((num_sequences,), dtype=INDEX_DTYPE)) + empty_contents = tf.zeros((packed_length, num_sequences * 2), + dtype=token_dtype) + contents = contents.scatter(index_range, empty_contents) + + queue_index = top_index + top_index = (top_index + 1) % queue_size + + pre_assign_availability = availability.read(queue_index) + space_left = pre_assign_availability - lengths - spacing + availability = availability.write(queue_index, space_left) + + # ============================================================================ + # == Update contents ========================================================= + # ============================================================================ + # Consider the following case for a seq-to-seq packing: + # (padding is represented as underscores) + # + # Queue starting state: + # [1, 3, 2, 4, 6, 1, _, _, _, _, _, ...] + # [5, 9, _, _, _, _, _, _, _, _, _, ...] + # + # Examples: + # [4, 2, 4], [3] + # + # Desired new queue state: + # [1, 3, 2, 4, 6, 1, _, _, 4, 2, 4, _, _, ...] + # [5, 9, _, _, 3, _, _, _, _, _, _, _, _, ...] + # + # This could be acomplished by creating a TensorArray for each of the two + # sequences, and scattering into the respective arrays. However TensorArray + # writes are extremely expensive relative to other operations. So instead we + # store the contents in a single TensorArray of shape (packed_length, 2), and + # we pad and concatenate the examples such that they can be added in a single + # assign: + # + # [_, _, _, _, 4, 2, 4] + # [3, _, _, _, _, _, _] + # + + # [1, 3, 2, 4, 6, 1, _, _, _, _, _, ...] + # [5, 9, _, _, _, _, _, _, _, _, _, ...] + # + # And in practice, the extra work of padding is neglidgable compared to + # the gain from vectorizing the TensorArray assign. We also store a bit mask + # denoting where sequences start which is used to compute segment and + # position metadata: + # + # [_, _, _, _, 1, _, _] + # [1, _, _, _, _, _, _] + # + + # [1, _, _, _, _, _, _, _, _, _, _, ...] + # [1, _, _, _, _, _, _, _, _, _, _, ...] + # + # Both the contents and the mask are concatenated in the same TensorArray + # for performance. + + start_index = packed_length - pre_assign_availability + end_index = start_index + lengths + leftmost = tf.reduce_min(start_index, axis=0) + rightmost = tf.reduce_max(end_index, axis=0) + delta = rightmost - leftmost + pad_indices = [tf.stack((start_index[i] - leftmost, rightmost - end_index[i])) + for i in range(num_sequences)] + + padded_examples = [tf.pad(ex, padding[tf.newaxis, :]) + for ex, padding in zip(example, pad_indices)] + padded_examples = tf.transpose(tf.stack(padded_examples)) + mask_update = tf.one_hot(start_index - leftmost, delta, + dtype=contents.dtype, axis=0) + + content_update = tf.concat([padded_examples, mask_update], axis=1) + + index_range = (queue_index * packed_length + # Offset into the right section. + tf.range(delta, dtype=INDEX_DTYPE) + leftmost) + contents = contents.scatter(index_range, contents.gather(index_range) + + content_update) + + state = (availability, contents, top_index) + return state, (tf.logical_not(any_can_fit), output_contents) + + +def make_tmp_dir(suffix="", prefix="tmp", dir=None): # pylint: disable=redefined-builtin + """Make a temporary directory.""" + if dir is None: + return tempfile.mkdtemp(suffix, prefix, dir) + else: + while True: + rand_term = random.randint(1, 9999) + tmp_dir = os.path.join(dir, "%s%d%s" % (prefix, rand_term, suffix)) + if tf.gfile.Exists(tmp_dir): + continue + tf.gfile.MakeDirs(tmp_dir) + break + return tmp_dir + + +def tfrecord_iterator_for_problem(problem, data_dir, + dataset_split=tf_estimator.ModeKeys.TRAIN): + """Iterate over the records on disk for the Problem.""" + filenames = tf.gfile.Glob(problem.filepattern(data_dir, mode=dataset_split)) + example_spec = problem.example_reading_spec()[0] + return tfrecord_iterator(filenames, example_spec=example_spec) + + +def tfrecord_iterator(filenames, gzipped=False, example_spec=None): + """Yields records from TFRecord files. + + Args: + filenames: list, list of TFRecord filenames to read from. + gzipped: bool, whether the TFRecord files are gzip-encoded. + example_spec: dict, + if provided, will parse each record as a tensorflow.Example proto. + + Yields: + Records (or parsed Examples, if example_spec is provided) from files. + """ + with tf.Graph().as_default(): + dataset = tf.data.Dataset.from_tensor_slices(filenames) + + def _load_records(filename): + return tf.data.TFRecordDataset( + filename, + compression_type=tf.constant("GZIP") if gzipped else None, + buffer_size=16 * 1000 * 1000) + + dataset = dataset.flat_map(_load_records) + + def _parse_example(ex_ser): + return tf.parse_single_example(ex_ser, example_spec) + + if example_spec: + dataset = dataset.map(_parse_example, num_parallel_calls=32) + dataset = dataset.prefetch(100) + record_it = dataset.make_one_shot_iterator().get_next() + + with tf.Session() as sess: + while True: + try: + ex = sess.run(record_it) + yield ex + except tf.errors.OutOfRangeError: + break + + +def random_deinterleave(text, separator_symbol="X"): + """Create a fill-in-the-blanks training example from text. + + Split on spaces, then cut into segments at random points. Alternate segments + are assigned to the two output strings. separator_symbol separates segments + within each of the outputs. + + example: + text="The quick brown fox jumps over the lazy dog." + returns: ("X quick brown X the lazy X", "The X fox jumps over X dog.") + + The two outputs can also be reversed to yield an instance of the same problem. + + Args: + text: a string + separator_symbol: a string + Returns: + a pair of strings + """ + words = text.strip().split(" ") + n = len(words) + if n <= 1: + return text, "" + cut = [False] * n + cut[0] = True + num_cuts = int(math.exp(random.uniform(0, math.log(n)))) + for _ in range(num_cuts): + cut[random.randint(1, n -1)] = True + out = [[], []] + part = random.randint(0, 1) + for i in range(n): + if cut[i]: + out[part].append(separator_symbol) + part = 1 - part + out[part].append(words[i]) + return " ".join(out[0]), " ".join(out[1]) diff --git a/tensor2tensor/data_generators/generator_utils_test.py b/tensor2tensor/data_generators/generator_utils_test.py index 726763f7a..f8e1926e1 100644 --- a/tensor2tensor/data_generators/generator_utils_test.py +++ b/tensor2tensor/data_generators/generator_utils_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -22,12 +23,72 @@ import io import os import tempfile - -# Dependency imports +from builtins import bytes # pylint: disable=redefined-builtin from tensor2tensor.data_generators import generator_utils -import tensorflow as tf +import tensorflow.compat.v1 as tf + + +INPUTS = ( + (1, 2, 3), + (4, 5,), + (6,), +) +TARGETS = ( + (10,), + (20, 30, 40), + (50, 60,), +) +INPUTS_PACKED = ( + (1, 2, 3, 4, 5), + (6, 0, 0, 0, 0), +) +INPUTS_SEGMENTATION = ( + (1, 1, 1, 2, 2), + (1, 0, 0, 0, 0), +) +INPUTS_POSITION = ( + (0, 1, 2, 0, 1), + (0, 0, 0, 0, 0), +) +TARGETS_PACKED = ( + (10, 20, 30, 40, 0), + (50, 60, 0, 0, 0), +) +TARGETS_SEGMENTATION = ( + (1, 2, 2, 2, 0), + (1, 1, 0, 0, 0), +) +TARGETS_POSITION = ( + (0, 0, 1, 2, 0), + (0, 1, 0, 0, 0), +) + + +def example_generator(): + for i, t in zip(INPUTS, TARGETS): + yield {"inputs": list(i), "targets": list(t)} + + +def trim_right(x): + x = {k: list(v) for k, v in x.items()} + while all(x.values()) and not any(i[-1] for i in x.values()): + _ = [i.pop() for i in x.values()] + return x + + +def reference_packing(trim_fn=None): + no_trim = lambda x: {k: list(v) for k, v in x.items()} + trim_fn = trim_fn or no_trim + outputs = [INPUTS_PACKED, INPUTS_POSITION, INPUTS_SEGMENTATION, + TARGETS_PACKED, TARGETS_POSITION, TARGETS_SEGMENTATION] + for i, i_pos, i_seg, t, t_pos, t_seg in zip(*outputs): + output = trim_fn({"inputs": i, "inputs_position": i_pos, + "inputs_segmentation": i_seg}) + output.update(trim_fn({"targets": t, "targets_position": t_pos, + "targets_segmentation": t_seg})) + yield output class GeneratorUtilsTest(tf.test.TestCase): @@ -41,11 +102,12 @@ def testGenerateFiles(self): def test_generator(): yield {"inputs": [1], "target": [1]} - generator_utils.generate_files(test_generator(), tmp_file_name, tmp_dir) - self.assertTrue(tf.gfile.Exists(tmp_file_path + "-00000-of-00001")) + filenames = generator_utils.train_data_filenames(tmp_file_name, tmp_dir, 1) + generator_utils.generate_files(test_generator(), filenames) + self.assertTrue(tf.gfile.Exists(tmp_file_path + "-train-00000-of-00001")) # Clean up. - os.remove(tmp_file_path + "-00000-of-00001") + os.remove(tmp_file_path + "-train-00000-of-00001") os.remove(tmp_file_path) def testMaybeDownload(self): @@ -62,19 +124,33 @@ def testMaybeDownload(self): os.remove(tmp_file_path + ".http") os.remove(tmp_file_path) + def testMaybeDownloadFromDrive(self): + tmp_dir = self.get_temp_dir() + (_, tmp_file_path) = tempfile.mkstemp(dir=tmp_dir) + tmp_file_name = os.path.basename(tmp_file_path) + + # Download Google index to the temporary file.http. + res_path = generator_utils.maybe_download_from_drive( + tmp_dir, tmp_file_name + ".http", "http://drive.google.com/") + self.assertEqual(res_path, tmp_file_path + ".http") + + # Clean up. + os.remove(tmp_file_path + ".http") + os.remove(tmp_file_path) + def testGunzipFile(self): tmp_dir = self.get_temp_dir() (_, tmp_file_path) = tempfile.mkstemp(dir=tmp_dir) # Create a test zip file and unzip it. with gzip.open(tmp_file_path + ".gz", "wb") as gz_file: - gz_file.write("test line") + gz_file.write(bytes("test line", "utf-8")) generator_utils.gunzip_file(tmp_file_path + ".gz", tmp_file_path + ".txt") # Check that the unzipped result is as expected. lines = [] for line in io.open(tmp_file_path + ".txt", "rb"): - lines.append(line.strip()) + lines.append(line.decode("utf-8").strip()) self.assertEqual(len(lines), 1) self.assertEqual(lines[0], "test line") @@ -83,6 +159,55 @@ def testGunzipFile(self): os.remove(tmp_file_path + ".txt") os.remove(tmp_file_path) + def testGetOrGenerateTxtVocab(self): + data_dir = tempfile.mkdtemp(dir=self.get_temp_dir()) + test_file = os.path.join(self.get_temp_dir(), "test.txt") + with tf.gfile.Open(test_file, "w") as outfile: + outfile.write("a b c\n") + outfile.write("d e f\n") + # Create a vocab over the test file. + vocab1 = generator_utils.get_or_generate_txt_vocab( + data_dir, "test.voc", 20, test_file) + self.assertTrue(tf.gfile.Exists(os.path.join(data_dir, "test.voc"))) + self.assertIsNotNone(vocab1) + + # Append a new line to the test file which would change the vocab if + # the vocab were not being read from file. + with tf.gfile.Open(test_file, "a") as outfile: + outfile.write("g h i\n") + vocab2 = generator_utils.get_or_generate_txt_vocab( + data_dir, "test.voc", 20, test_file) + self.assertTrue(tf.gfile.Exists(os.path.join(data_dir, "test.voc"))) + self.assertIsNotNone(vocab2) + self.assertEqual(vocab1.dump(), vocab2.dump()) + + def testPacking(self): + packed = generator_utils.pack_examples( + example_generator(), has_inputs=True, packed_length=5, queue_size=2, + spacing=0) + for example, reference in zip(packed, reference_packing(trim_right)): + self.assertAllEqual(set(example.keys()), set(reference.keys())) + for k in reference: + self.assertAllEqual(example[k], reference[k]) + + def testDatasetPacking(self): + dataset = tf.data.Dataset.from_generator( + example_generator, + output_types={"inputs": tf.int64, "targets": tf.int64}, + output_shapes={"inputs": tf.TensorShape((None,)), + "targets": tf.TensorShape((None,))} + ) + dataset = generator_utils.pack_dataset( + dataset, length=5, keys=("inputs", "targets"), use_custom_ops=False) + + with tf.Session().as_default() as sess: + batch = dataset.make_one_shot_iterator().get_next() + for reference in reference_packing(): + example = sess.run(batch) + self.assertAllEqual(set(example.keys()), set(reference.keys())) + for k in reference: + self.assertAllEqual(example[k], reference[k]) + if __name__ == "__main__": tf.test.main() diff --git a/tensor2tensor/data_generators/google_robot_pushing.py b/tensor2tensor/data_generators/google_robot_pushing.py new file mode 100644 index 000000000..a35670ef8 --- /dev/null +++ b/tensor2tensor/data_generators/google_robot_pushing.py @@ -0,0 +1,139 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Google robot pushing dataset. + +Unsupervised Learning for Physical Interaction through Video Prediction +Chelsea Finn, Ian Goodfellow, Sergey Levine +https://arxiv.org/abs/1605.07157 + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import io +import os +import numpy as np + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import video_utils +from tensor2tensor.layers import modalities +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +BASE_URL = "https://storage.googleapis.com/brain-robotics-data/push/" +DATA_TRAIN = (264, "push_train/push_train.tfrecord-{:05d}-of-00264") +DATA_TEST_SEEN = (5, "/push_testseen/push_testseen.tfrecord-{:05d}-of-00005") +DATA_TEST_NOVEL = (5, "/push_testnovel/push_testnovel.tfrecord-{:05d}-of-00005") + + +# Lazy load PIL.Image +def PIL_Image(): # pylint: disable=invalid-name + from PIL import Image # pylint: disable=g-import-not-at-top + return Image + + +@registry.register_problem +class VideoGoogleRobotPushing(video_utils.VideoProblem): + """Google robot pushing dataset.""" + + @property + def num_channels(self): + return 3 + + @property + def frame_height(self): + return 64 + + @property + def frame_width(self): + return 64 + + @property + def total_number_of_frames(self): + # TODO(mbz): correct this number to be the real total number of frames. + return 50 * 10 * 1000 + + @property + def max_number_of_frames_per_video(self): + return 60 + + @property + def is_generate_per_split(self): + return True + + def parse_frames(self, filename): + image_key = "move/{}/image/encoded" + action_key = "move/{}/commanded_pose/vec_pitch_yaw" + state_key = "move/{}/endeffector/vec_pitch_yaw" + + for serialized_example in tf.python_io.tf_record_iterator(filename): + x = tf.train.Example() + x.ParseFromString(serialized_example) + # there are 6 features per frame + nf = len(x.features.feature.keys()) // 6 + # it seems features after 60 don't have any image + nf = min(nf, self.max_number_of_frames_per_video) + + for i in range(nf): + image_name = image_key.format(i) + action_name = action_key.format(i) + state_name = state_key.format(i) + + byte_str = x.features.feature[image_name].bytes_list.value[0] + img = PIL_Image().open(io.BytesIO(byte_str)) + # The original images are much bigger than 64x64 + img = img.resize((self.frame_width, self.frame_height), + resample=PIL_Image().BILINEAR) + arr = np.array(img.getdata()) + frame = arr.reshape( + self.frame_width, self.frame_height, self.num_channels) + + state = x.features.feature[state_name].float_list.value + action = x.features.feature[action_name].float_list.value + + yield i, frame, state, action + + def get_urls(self, count, url_part): + template = os.path.join(BASE_URL, url_part) + return [template.format(i) for i in range(count)] + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + if dataset_split == problem.DatasetSplit.TRAIN: + urls = self.get_urls(DATA_TRAIN[0], DATA_TRAIN[1]) + else: + urls = self.get_urls(DATA_TEST_SEEN[0], DATA_TEST_SEEN[1]) + urls += self.get_urls(DATA_TEST_NOVEL[0], DATA_TEST_NOVEL[1]) + + for url in urls: + path = generator_utils.maybe_download(tmp_dir, os.path.basename(url), url) + for frame_number, frame, state, action in self.parse_frames(path): + yield { + "frame_number": [frame_number], + "frame": frame, + "state": state, + "action": action, + } + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = {"inputs": modalities.ModalityType.VIDEO, + "targets": modalities.ModalityType.VIDEO} + p.vocab_size = {"inputs": 256, + "targets": 256} diff --git a/tensor2tensor/data_generators/gym_env.py b/tensor2tensor/data_generators/gym_env.py new file mode 100644 index 000000000..b176ab857 --- /dev/null +++ b/tensor2tensor/data_generators/gym_env.py @@ -0,0 +1,909 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""RL environments.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import itertools +import random + +from gym.spaces import Box +import numpy as np + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import video_utils +from tensor2tensor.layers import modalities +from tensor2tensor.rl import gym_utils +from tensor2tensor.utils import contrib +from tensor2tensor.utils import metrics +from tensor2tensor.utils import misc_utils +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +Frame = collections.namedtuple( + # Order of elements reflects time progression within a frame. + "Frame", ("observation", "reward", "unclipped_reward", "done", "action") +) + + +# pylint: disable=g-complex-comprehension +class Observation(object): + """Encoded observations. + + Args: + data: Encoded observation. + decode_fn: Function for decoding observation. + """ + + def __init__(self, data, decode_fn): + self.data = data + self._decode = decode_fn + + def __eq__(self, other): + """Equality comparison based on encoded data.""" + if isinstance(other, Observation): + return self.data == other.data + else: + return False + + def __ne__(self, other): + """For consistency with __eq__.""" + return not self == other + + def decode(self): + """Decode the observation.""" + return self._decode(self.data) + + +class _Noncopyable(object): + + def __init__(self, obj): + self.obj = obj + + def __deepcopy__(self, memo): + return self + + +class EnvSimulationProblem(video_utils.VideoProblem): + """Base Problem class for use with world models. + + Attributes: + action_space: Gym action space. Should be overridden in derived classes. + reward_range: Tuple (min, max) representing the range of rewards. Limits + should be integer (discrete rewards). + """ + + action_space = None + reward_range = (-1, 1) + + @property + def num_actions(self): + return self.action_space.n + + @property + def num_rewards(self): + (min_reward, max_reward) = self.reward_range + return max_reward - min_reward + 1 + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = { + "inputs": modalities.ModalityType.VIDEO, + "input_reward": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, + "input_action": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, + "targets": modalities.ModalityType.VIDEO, + "target_reward": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, + "target_action": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, + } + p.vocab_size = { + "inputs": 256, + "input_reward": self.num_rewards, + "input_action": self.num_actions, + "targets": 256, + "target_reward": self.num_rewards, + "target_action": self.num_actions, + } + p.input_space_id = problem.SpaceID.IMAGE + p.target_space_id = problem.SpaceID.IMAGE + + +class T2TEnv(EnvSimulationProblem): + """Abstract class representing a batch of environments. + + Attributes: + history: List of finished rollouts, where rollout is a list of Frames. + batch_size: Number of environments played simultaneously. + observation_space: Gym observation space. Should be overridden in derived + classes. + name: Problem name for generating filenames. Should be overridden in + derived classes. + + Args: + batch_size: Number of environments in a batch. + store_rollouts: Whether to store collected rollouts in memory and later on + disk. Defaults to True. + """ + + observation_space = None + name = None + + def __init__(self, batch_size, *args, **kwargs): + self._store_rollouts = kwargs.pop("store_rollouts", True) + + super(T2TEnv, self).__init__(*args, **kwargs) + + self.batch_size = batch_size + self._rollouts_by_epoch_and_split = collections.OrderedDict() + self.current_epoch = None + self._should_preprocess_on_reset = True + with tf.Graph().as_default() as tf_graph: + self._tf_graph = _Noncopyable(tf_graph) + self._decoded_image_p = _Noncopyable( + tf.placeholder(dtype=tf.uint8, shape=(None, None, None)) + ) + self._encoded_image_t = _Noncopyable( + tf.image.encode_png(self._decoded_image_p.obj) + ) + self._encoded_image_p = _Noncopyable(tf.placeholder(tf.string)) + self._decoded_image_t = _Noncopyable( + tf.image.decode_png(self._encoded_image_p.obj) + ) + self._session = _Noncopyable(tf.Session()) + + def __str__(self): + """Returns a string representation of the environment for debug purposes.""" + raise NotImplementedError + + def start_new_epoch(self, epoch, load_data_dir=None): + if not isinstance(epoch, int): + raise ValueError("Epoch should be integer, got {}".format(epoch)) + if epoch in self._rollouts_by_epoch_and_split: + raise ValueError("Epoch {} already registered".format(epoch)) + self.current_epoch = epoch + self._current_epoch_rollouts = [] + self._rollouts_by_epoch_and_split[epoch] = collections.defaultdict(list) + self._current_batch_frames = [None for _ in range(self.batch_size)] + self._current_batch_rollouts = [[] for _ in range(self.batch_size)] + if load_data_dir is not None: + self._load_epoch_data(load_data_dir) + + def current_epoch_rollouts(self, split=None, minimal_rollout_frames=0): + # TODO(kc): order of rollouts (by splits) is a bit uncontrolled + # (rollouts_by_split.values() reads dict values), is it a problem? + rollouts_by_split = self._rollouts_by_epoch_and_split[self.current_epoch] + if not rollouts_by_split: + if split is not None: + raise ValueError( + "Data is not splitted into train/dev/test. If data created by " + "environment interaction (NOT loaded from disk) you should call " + "generate_data() first. Note that generate_data() will write to " + "disk and can corrupt your experiment data." + ) + else: + rollouts = self._current_epoch_rollouts + else: + if split is not None: + rollouts = rollouts_by_split[split] + else: + rollouts = [ + rollout + for rollouts in rollouts_by_split.values() + for rollout in rollouts + ] + return [rollout for rollout in rollouts + if len(rollout) >= minimal_rollout_frames] + + def _preprocess_observations(self, obs): + """Transforms a batch of observations. + + Can be overridden in derived classes. + + Args: + obs: A batch of observations. + + Returns: + Transformed batch of observations. + """ + return obs + + def _decode_png(self, encoded_observation): + """Decodes a single observation from PNG.""" + return self._session.obj.run( + self._decoded_image_t.obj, + feed_dict={self._encoded_image_p.obj: encoded_observation} + ) + + def _encode_observations(self, observations): + """Encodes observations as PNG.""" + return [ + Observation( + self._session.obj.run( + self._encoded_image_t.obj, + feed_dict={self._decoded_image_p.obj: observation} + ), + self._decode_png + ) + for observation in observations + ] + + def _step(self, actions): + """Makes a step in all environments without recording history. + + Should be overridden in derived classes. + + Should not do any preprocessing of the observations and rewards; this + should be done in _preprocess_*. + + Args: + actions: Batch of actions. + + Returns: + (obs, rewards, dones) - batches of observations, rewards and done flags + respectively. + """ + raise NotImplementedError + + def step(self, actions): + """Makes a step in all environments. + + Does any preprocessing and records frames. + + Args: + actions: Batch of actions. + + Returns: + (obs, rewards, dones) - batches of observations, rewards and done flags + respectively. + + Raises: + ValueError: when the data for current epoch has already been loaded. + """ + if self._store_rollouts and \ + self._rollouts_by_epoch_and_split[self.current_epoch]: + raise ValueError( + "Data for current epoch has already been loaded from disk." + ) + (obs, unclipped_rewards, dones) = self._step(actions) + obs = self._preprocess_observations(obs) + (min_reward, max_reward) = self.reward_range + rewards = np.around(np.clip(unclipped_rewards, min_reward, max_reward)) + if self._store_rollouts: + unclipped_rewards = unclipped_rewards.astype(np.float64) + encoded_obs = self._encode_observations(obs) + for (rollout, frame, action) in zip( + self._current_batch_rollouts, self._current_batch_frames, actions + ): + rollout.append(frame._replace(action=action)) + + # orud = (observation, reward, unclipped_reward, done) + self._current_batch_frames = [ + Frame(*orud, action=None) + for orud in zip(encoded_obs, rewards, unclipped_rewards, dones) + ] + return (obs, rewards, dones) + + def _reset(self, indices): + """Resets environments at given indices without recording history. + + Args: + indices: Indices of environments to reset. + + Returns: + Batch of initial observations of reset environments. + """ + raise NotImplementedError + + def reset(self, indices=None): + """Resets environments at given indices. + + Does any preprocessing and adds rollouts to history. + + Args: + indices: Indices of environments to reset. + + Returns: + Batch of initial observations of reset environments. + + Raises: + ValueError: when there's no current epoch. + """ + if self._store_rollouts and self.current_epoch is None: + raise ValueError( + "No current epoch. start_new_epoch() should first be called." + ) + + if indices is None: + indices = np.arange(self.batch_size) + new_obs = self._reset(indices) + if self._should_preprocess_on_reset: + new_obs = self._preprocess_observations(new_obs) + if self._store_rollouts: + encoded_obs = self._encode_observations(new_obs) + for (index, ob) in zip(indices, encoded_obs): + frame = self._current_batch_frames[index] + if frame is not None: + rollout = self._current_batch_rollouts[index] + rollout.append(frame._replace(action=0)) + self._current_epoch_rollouts.append(rollout) + self._current_batch_rollouts[index] = [] + self._current_batch_frames[index] = Frame( + observation=ob, reward=0, unclipped_reward=0, done=False, + action=None + ) + return new_obs + + def close(self): + """Cleanups any resources. + + Can be overridden in derived classes. + """ + self._session.obj.close() + + @property + def num_channels(self): + """Number of color channels in each frame.""" + raise NotImplementedError + + def eval_metrics(self): + eval_metrics = [ + metrics.Metrics.ACC, metrics.Metrics.ACC_PER_SEQ, + metrics.Metrics.IMAGE_RMSE + ] + return eval_metrics + + @property + def extra_reading_spec(self): + """Additional data fields to store on disk and their decoders.""" + field_names = ("frame_number", "action", "reward", "done") + data_fields = { + name: tf.FixedLenFeature([1], tf.int64) for name in field_names + } + decoders = { + name: contrib.slim().tfexample_decoder.Tensor(tensor_key=name) + for name in field_names + } + return (data_fields, decoders) + + @property + def frame_height(self): + return self.observation_space.shape[0] + + @property + def frame_width(self): + return self.observation_space.shape[1] + + @property + def only_keep_videos_from_0th_frame(self): + return False + + def _generate_frames(self, rollouts): + for rollout in rollouts: + for (frame_number, frame) in enumerate(rollout): + yield { + "frame_number": [frame_number], + "epoch": [self.current_epoch], + "image/encoded": [frame.observation.data], + "image/format": ["png"], + "image/height": [self.frame_height], + "image/width": [self.frame_width], + "action": [int(frame.action)], + "reward": [int(frame.reward - self.reward_range[0])], + "unclipped_reward": [float(frame.unclipped_reward)], + "done": [int(frame.done)] + } + + @staticmethod + def _calc_num_frames(rollouts): + return sum(len(rollout) for rollout in rollouts) + + def _split_current_epoch(self): + """Splits frames in the current epoch according to self.dataset_splits. + + Rollouts can be broken on shard boundary. This is desirable when we have + few long rollouts and we want to make sure we have data in the dev set. + """ + num_frames = self._calc_num_frames(self._current_epoch_rollouts) + num_shards = sum(split["shards"] for split in self.dataset_splits) + shard_size = num_frames // num_shards + + splits = self.dataset_splits + num_saved_frames = 0 + split_index = 0 + split_begin_index = 0 + rollouts_by_split = collections.defaultdict(list) + + def split_size(split_index): + return splits[split_index]["shards"] * shard_size + + for rollout in self._current_epoch_rollouts: + num_saved_frames_current_rollout = 0 + # Split the rollout into chunks corresponding to dataset splits. In most + # cases there should be only one chunk. On dataset split boundary there + # will be two. If a rollout is longer then the size of a dataset split, + # there might be more. + while num_saved_frames_current_rollout < len(rollout): + max_chunk_length = ( + split_begin_index + split_size(split_index) - num_saved_frames + ) + if split_index == len(splits) - 1: + # Put the remainder in the last split to preserve the ordering. + max_chunk_length = len(rollout) + rollout_chunk = rollout[ + num_saved_frames_current_rollout: + (num_saved_frames_current_rollout + max_chunk_length) + ] + rollouts_by_split[splits[split_index]["split"]].append(rollout_chunk) + num_saved_frames_current_rollout += len(rollout_chunk) + num_saved_frames += len(rollout_chunk) + + if num_saved_frames == split_begin_index + split_size(split_index): + split_begin_index += split_size(split_index) + split_index = min(split_index + 1, len(splits) - 1) + + self._rollouts_by_epoch_and_split[self.current_epoch] = rollouts_by_split + self._current_epoch_rollouts = [] + + def splits_and_paths(self, data_dir): + """List of pairs (split, paths) for the current epoch.""" + filepath_fns = { + problem.DatasetSplit.TRAIN: self.training_filepaths, + problem.DatasetSplit.EVAL: self.dev_filepaths, + problem.DatasetSplit.TEST: self.test_filepaths, + } + + def append_epoch(paths): + return [ + "{}.{}".format(path, self.current_epoch) + for path in paths + ] + + # We set shuffled=True as we don't want to shuffle on disk later. + return [ + (split["split"], append_epoch(filepath_fns[split["split"]]( + data_dir, split["shards"], shuffled=True + ))) + for split in self.dataset_splits + ] + + def filepattern(self, data_dir, mode, shard=None, only_last=False): + filepattern = super(T2TEnv, self).filepattern( + data_dir, mode, shard + ) + if only_last: + filepattern += ".{}".format(self.current_epoch) + return filepattern + + def generate_data(self, data_dir, tmp_dir=None, task_id=-1): + """Saves the current epoch rollouts to disk, split into train/dev sets.""" + if not self._rollouts_by_epoch_and_split[self.current_epoch]: + # Data not loaded from disk. + self._split_current_epoch() + + rollouts_by_split = self._rollouts_by_epoch_and_split[self.current_epoch] + splits_and_paths = self.splits_and_paths(data_dir) + + for (split, paths) in splits_and_paths: + rollouts = rollouts_by_split[split] + num_frames = self._calc_num_frames(rollouts) + shard_size = num_frames // len(paths) + + frame_gen = self._generate_frames(rollouts) + for (path_index, path) in enumerate(paths): + limit = shard_size + # Put the remainder in the last shard to preserve the ordering. + if path_index == len(paths) - 1: + limit = None + generator_utils.generate_files( + itertools.islice(frame_gen, limit), [path], + cycle_every_n=float("inf") + ) + + def _load_epoch_data(self, data_dir): + any_files_found = False + all_files_found = True + any_shard_empty = False + + for split, paths in self.splits_and_paths(data_dir): + try: + any_shard_empty |= self._load_epoch_split(split, paths) + any_files_found = True + except tf.errors.NotFoundError: + all_files_found = False + if any_shard_empty or (not all_files_found and any_files_found): + raise ValueError("Some data is missing, the experiment might've been " + "interupted during generating data.") + + def _load_epoch_split(self, split, paths): + epoch = self.current_epoch + last_frame_number = -1 + any_shard_empty = False + current_rollout = [] + + for path in paths: + this_shard_empty = True + for example in tf.python_io.tf_record_iterator(path): + this_shard_empty = False + + result = tf.train.Example.FromString(example) + feature = result.features.feature + + def get_feature_value(key, list_name): + return getattr(feature[key], list_name).value[0] # pylint: disable=cell-var-from-loop + + fields = { + key: get_feature_value(key, list_name) + for (key, list_name) in [ + ("image/encoded", "bytes_list"), ("reward", "int64_list"), + ("unclipped_reward", "float_list"), ("done", "int64_list"), + ("action", "int64_list") + ] + } + fields["reward"] += self.reward_range[0] + fields["done"] = bool(fields["done"]) + fields["observation"] = Observation( + fields["image/encoded"], self._decode_png + ) + del fields["image/encoded"] + + frame = Frame(**fields) + frame_number = get_feature_value("frame_number", "int64_list") + if frame_number == last_frame_number + 1: + current_rollout.append(frame) + else: + self._rollouts_by_epoch_and_split[epoch][split].append( + current_rollout) + current_rollout = [frame] + last_frame_number = frame_number + + any_shard_empty |= this_shard_empty + + self._rollouts_by_epoch_and_split[epoch][split].append( + current_rollout + ) + return any_shard_empty + + +class T2TGymEnv(T2TEnv): + """Class representing a batch of Gym environments. + + Do not register it, instead create subclass with hardcoded __init__ + arguments and register this subclass. + """ + + noop_action = 0 + + def __init__(self, base_env_name=None, batch_size=1, grayscale=False, + resize_height_factor=2, resize_width_factor=2, + rl_env_max_episode_steps=-1, max_num_noops=0, + maxskip_envs=False, sticky_actions=False, + should_derive_observation_space=True, + **kwargs): + if base_env_name is None: + base_env_name = self.base_env_name + self._base_env_name = base_env_name + super(T2TGymEnv, self).__init__(batch_size, **kwargs) + # TODO(afrozm): Find a proper way of doing this. Refactor or cleanup. + self.should_derive_observation_space = should_derive_observation_space + self.grayscale = grayscale + self.resize_height_factor = resize_height_factor + self.resize_width_factor = resize_width_factor + self.rl_env_max_episode_steps = rl_env_max_episode_steps + self.maxskip_envs = maxskip_envs + self.sticky_actions = sticky_actions + self._initial_state = None + self._initial_frames = None + if not self.name: + # Set problem name if not registered. + self.name = "Gym%s" % base_env_name + + self._envs = [ + gym_utils.make_gym_env( + base_env_name, rl_env_max_episode_steps=rl_env_max_episode_steps, + maxskip_env=maxskip_envs, sticky_actions=sticky_actions) + for _ in range(self.batch_size)] + + # max_num_noops works only with atari envs. + if max_num_noops > 0: + assert self._envs[0].unwrapped.get_action_meanings()[ + self.noop_action + ] == "NOOP" + self.max_num_noops = max_num_noops + + orig_observ_space = self._envs[0].observation_space + if not all(env.observation_space == orig_observ_space + for env in self._envs): + raise ValueError("All environments must use the same observation space.") + + self.observation_space = orig_observ_space + if self.should_derive_observation_space: + self.observation_space = self._derive_observation_space(orig_observ_space) + + self.action_space = self._envs[0].action_space + if not all(env.action_space == self.action_space for env in self._envs): + raise ValueError("All environments must use the same action space.") + + if self.should_derive_observation_space: + with self._tf_graph.obj.as_default(): + self._resize = {} + orig_height, orig_width = orig_observ_space.shape[:2] + self._img_batch_t = _Noncopyable(tf.placeholder( + dtype=tf.uint8, shape=(None, orig_height, orig_width, 3))) + height, width = self.observation_space.shape[:2] + resized = tf.image.resize_images(self._img_batch_t.obj, + [height, width], + tf.image.ResizeMethod.AREA) + resized = tf.cast(resized, tf.as_dtype(self.observation_space.dtype)) + if self.grayscale: + resized = tf.image.rgb_to_grayscale(resized) + self._resized_img_batch_t = _Noncopyable(resized) + + # TODO(afrozm): Find a place for this. Till then use self._envs[0]'s hparams. + def hparams(self, defaults, unused_model_hparams): + if hasattr(self._envs[0], "hparams"): + tf.logging.info("Retuning the env's hparams from T2TGymEnv.") + return self._envs[0].hparams(defaults, unused_model_hparams) + + # Otherwise just call the super-class' hparams. + tf.logging.info("Retuning the T2TGymEnv's superclass' hparams.") + super(T2TGymEnv, self).hparams(defaults, unused_model_hparams) + + def new_like(self, **kwargs): + env_kwargs = { + "base_env_name": self.base_env_name, + "batch_size": self.batch_size, + "grayscale": self.grayscale, + "resize_height_factor": self.resize_height_factor, + "resize_width_factor": self.resize_width_factor, + "rl_env_max_episode_steps": self.rl_env_max_episode_steps, + "max_num_noops": self.max_num_noops, + "maxskip_envs": self.maxskip_envs, + } + env_kwargs.update(kwargs) + return T2TGymEnv(**env_kwargs) + + @property + def base_env_name(self): + return self._base_env_name + + @property + def num_channels(self): + return self.observation_space.shape[2] + + # TODO(afrozm): Why is this separated out from _preprocess_observations? + def _derive_observation_space(self, orig_observ_space): + height, width, channels = orig_observ_space.shape + if self.grayscale: + channels = 1 + resized_height = height // self.resize_height_factor + resized_width = width // self.resize_width_factor + shape = (resized_height, resized_width, channels) + return Box(low=orig_observ_space.low.min(), + high=orig_observ_space.high.max(), shape=shape, + dtype=orig_observ_space.dtype) + + def __str__(self): + return "T2TGymEnv(%s)" % ", ".join([str(env) for env in self._envs]) + + def _encode_observations(self, observations): + if not self.should_derive_observation_space: + return observations + return super(T2TGymEnv, self)._encode_observations(observations) + + def _preprocess_observations(self, observations): + # TODO(afrozm): Clean this up. + if not self.should_derive_observation_space: + return observations + return self._session.obj.run( + self._resized_img_batch_t.obj, + feed_dict={self._img_batch_t.obj: observations}) + + @property + def state(self): + """Gets the current state.""" + return [env.unwrapped.clone_full_state() for env in self._envs] + + def set_initial_state(self, initial_state, initial_frames): + """Sets the state that will be used on next reset.""" + self._initial_state = initial_state + self._initial_frames = initial_frames[:, -1, ...] + self._should_preprocess_on_reset = False + + def _step(self, actions): + (obs, rewards, dones, _) = zip(*[ + env.step(action) for (env, action) in zip(self._envs, actions) + ]) + return tuple(map(np.stack, (obs, rewards, dones))) + + def _reset(self, indices): + def reset_with_initial_state(env, index): + """Resets environment taking self._initial_state into account.""" + obs = env.reset() + if self._initial_state is None: + return obs + else: + env.unwrapped.restore_full_state(self._initial_state[index]) + return self._initial_frames[index, ...] + + def reset_with_noops(env, index): + """Resets environment and applies random number of NOOP actions on it.""" + obs = reset_with_initial_state(env, index) + try: + num_noops = random.randint(1, self.max_num_noops) + except ValueError: + num_noops = 0 + + for _ in range(num_noops): + (obs, _, done, _) = env.step(self.noop_action) + if done: + obs = reset_with_initial_state(env, index) + + return obs + + return np.stack([ + reset_with_noops(self._envs[index], index) for index in indices + ]) + + def close(self): + for env in self._envs: + env.close() + + +class DummyWorldModelProblem(EnvSimulationProblem): + """Dummy Problem for world model prediction.""" + + def __init__(self, action_space, reward_range, frame_height, frame_width): + super(DummyWorldModelProblem, self).__init__() + self.action_space = action_space + self.reward_range = reward_range + self._frame_height = frame_height + self._frame_width = frame_width + + @property + def frame_height(self): + """Height of each frame.""" + return self._frame_height + + @property + def frame_width(self): + """Width of each frame.""" + return self._frame_width + + +# Atari registration. + +# Game list from our list of ROMs +# Removed because XDeterministic-v4 did not exist: +# * adventure +# * defender +# * kaboom +ATARI_GAMES = [ + "air_raid", "alien", "amidar", "assault", "asterix", "asteroids", + "atlantis", "bank_heist", "battle_zone", "beam_rider", "berzerk", "bowling", + "boxing", "breakout", "carnival", "centipede", "chopper_command", + "crazy_climber", "demon_attack", "double_dunk", "elevator_action", "enduro", + "fishing_derby", "freeway", "frostbite", "gopher", "gravitar", "hero", + "ice_hockey", "jamesbond", "journey_escape", "kangaroo", "krull", + "kung_fu_master", "montezuma_revenge", "ms_pacman", "name_this_game", + "phoenix", "pitfall", "pong", "pooyan", "private_eye", "qbert", "riverraid", + "road_runner", "robotank", "seaquest", "skiing", "solaris", + "space_invaders", "star_gunner", "tennis", "time_pilot", "tutankham", + "up_n_down", "venture", "video_pinball", "wizard_of_wor", "yars_revenge", + "zaxxon" +] + +# List from paper: +# https://arxiv.org/pdf/1805.11593.pdf +# plus frostbite. +ATARI_GAMES_WITH_HUMAN_SCORE = [ + "alien", "amidar", "assault", "asterix", "asteroids", + "atlantis", "bank_heist", "battle_zone", "beam_rider", "bowling", + "boxing", "breakout", "chopper_command", + "crazy_climber", "demon_attack", "double_dunk", "enduro", + "fishing_derby", "freeway", "frostbite", "gopher", "gravitar", "hero", + "ice_hockey", "jamesbond", "kangaroo", "krull", + "kung_fu_master", "montezuma_revenge", "ms_pacman", "name_this_game", + "pitfall", "pong", "private_eye", "qbert", "riverraid", + "road_runner", "seaquest", "solaris", + "up_n_down", "video_pinball", "yars_revenge", +] + + +# Blacklist a few games where it makes little sense to run on for now. +ATARI_GAMES_WITH_HUMAN_SCORE_NICE = [ + g for g in ATARI_GAMES_WITH_HUMAN_SCORE if g not in [ + "solaris", "pitfall", "montezuma_revenge", "enduro", + "video_pinball", "double_dunk"]] + + +ATARI_WHITELIST_GAMES = [ + "amidar", + "bank_heist", + "berzerk", + "boxing", + "crazy_climber", + "freeway", + "frostbite", + "gopher", + "kung_fu_master", + "ms_pacman", + "pong", + "qbert", + "seaquest", +] + + +# Games on which model-free does better than model-based at this point. +ATARI_CURIOUS_GAMES = [ + "bank_heist", + "boxing", + "enduro", + "kangaroo", + "road_runner", + "up_n_down", +] + + +# Games on which based should work. +ATARI_DEBUG_GAMES = [ + "crazy_climber", + "freeway", + "pong", +] + + +# Different ATARI game modes in OpenAI Gym. Full list here: +# https://github.com/openai/gym/blob/master/gym/envs/__init__.py +ATARI_GAME_MODES = [ + "Deterministic-v0", # 0.25 repeat action probability, 4 frame skip. + "Deterministic-v4", # 0.00 repeat action probability, 4 frame skip. + "NoFrameskip-v0", # 0.25 repeat action probability, 1 frame skip. + "NoFrameskip-v4", # 0.00 repeat action probability, 1 frame skip. + "-v0", # 0.25 repeat action probability, (2 to 5) frame skip. + "-v4" # 0.00 repeat action probability, (2 to 5) frame skip. +] + + +def register_game(game_name, game_mode="NoFrameskip-v4"): + """Create and register problems for the game. + + Args: + game_name: str, one of the games in ATARI_GAMES, e.g. "bank_heist". + game_mode: the frame skip and sticky keys config. + + Raises: + ValueError: if game_name or game_mode are wrong. + """ + if game_name not in ATARI_GAMES: + raise ValueError("Game %s not in ATARI_GAMES" % game_name) + if game_mode not in ATARI_GAME_MODES: + raise ValueError("Unknown ATARI game mode: %s." % game_mode) + camel_game_name = misc_utils.snakecase_to_camelcase(game_name) + game_mode + # Create and register the Problem + cls = type("Gym%sRandom" % camel_game_name, + (T2TGymEnv,), {"base_env_name": camel_game_name}) + registry.register_problem(cls) + + +# Register the atari games with all of the possible modes. +for atari_game in ATARI_GAMES: + for atari_game_mode in ATARI_GAME_MODES: + register_game(atari_game, game_mode=atari_game_mode) diff --git a/tensor2tensor/data_generators/gym_env_test.py b/tensor2tensor/data_generators/gym_env_test.py new file mode 100644 index 000000000..29d6afc68 --- /dev/null +++ b/tensor2tensor/data_generators/gym_env_test.py @@ -0,0 +1,288 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Gym env tests.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import shutil + +import gym +from gym.spaces import Box +from gym.spaces import Discrete +import numpy as np + +from tensor2tensor.data_generators import gym_env +from tensor2tensor.data_generators import problem +from tensor2tensor.rl.gym_utils import make_gym_env + +import tensorflow.compat.v1 as tf + + +class TestEnv(gym.Env): + """Test environment. + + Odd frames are "done". + """ + + action_space = Discrete(1) + # TODO(afrozm): Gym's Box has a bug for uint8 type, which doesn't allow + # sampling, send them a PR. Till that time let this be np.int64 + observation_space = Box( + low=0, high=255, shape=(2, 6, 3), dtype=np.int64 + ) + + def __init__(self): + self._counter = 0 + + def _generate_ob(self): + return self.observation_space.sample() + + def step(self, action): + done = self._counter % 2 == 1 + self._counter += 1 + reward = 5 if done else -5 + return (self._generate_ob(), reward, done, {}) + + def reset(self): + return self._generate_ob() + +TEST_ENV_NAME = "T2TTestEnv-v1" + +gym.envs.register(id=TEST_ENV_NAME, entry_point=TestEnv) + + +class GymEnvTest(tf.test.TestCase): + + splits = (problem.DatasetSplit.TRAIN, problem.DatasetSplit.EVAL) + + # TODO(koz4k): Tests for loading: + # - loaded epoch is read-only + # - partial write detection (should raise on loading) + + def setUp(self): + self.out_dir = tf.test.get_temp_dir() + shutil.rmtree(self.out_dir) + os.mkdir(self.out_dir) + np.random.seed(0) + + def init_batch_and_play(self, env_name, steps_per_epoch=1, epochs=(0,), + generate_data=False, batch_size=2, **kwargs): + env = gym_env.T2TGymEnv(env_name, batch_size=batch_size, **kwargs) + obs = [] + rewards = [] + num_dones = 0 + for epoch in epochs: + env.start_new_epoch(epoch, self.out_dir) + _, epoch_obs, epoch_rewards, epoch_num_dones = \ + self.play(env, steps_per_epoch) + epoch_obs.append(env.reset()) + if generate_data: + env.generate_data(self.out_dir) + obs.extend(epoch_obs) + rewards.extend(epoch_rewards) + num_dones += epoch_num_dones + return env, obs, rewards, num_dones + + def play(self, env, n_steps): + obs = [] + rewards = [] + obs.append(env.reset()) + num_dones = 0 + for _ in range(n_steps): + step_obs, step_rewards, dones = env.step(actions=[0, 0]) + obs.append(step_obs) + rewards.append(step_rewards) + for (i, done) in enumerate(dones): + if done: + env.reset([i]) + num_dones += 1 + return env, obs, rewards, num_dones + + def test_splits_dataset(self): + env, _, _, _ = self.init_batch_and_play( + TEST_ENV_NAME, steps_per_epoch=20, generate_data=True + ) + + for split in self.splits: + self.assertTrue(env.current_epoch_rollouts(split)) + + def test_split_preserves_number_of_rollouts(self): + batch_size = 2 + env, _, _, num_dones = self.init_batch_and_play( + TEST_ENV_NAME, steps_per_epoch=20, generate_data=True, + batch_size=batch_size + ) + + num_rollouts_after_split = sum( + len(env.current_epoch_rollouts(split)) for split in self.splits + ) + # After the end of epoch all environments are reset, which increases number + # of rollouts by batch size. Number of rollouts could be increased by one + # in case a rollout is broken on a boundary between the dataset splits. + self.assertGreaterEqual(num_rollouts_after_split, num_dones + batch_size) + self.assertLessEqual(num_rollouts_after_split, num_dones + batch_size + 1) + + def test_split_preserves_number_of_frames(self): + batch_size = 2 + env, _, _, num_dones = self.init_batch_and_play( + TEST_ENV_NAME, steps_per_epoch=20, generate_data=True, + batch_size=batch_size + ) + + num_frames = sum( + len(rollout) + for split in self.splits + for rollout in env.current_epoch_rollouts(split) + ) + # There are 3 frames in every rollout: the initial one and two returned by + # step(). Additionally there are batch_size observations coming from final + # reset at the end of epoch. + self.assertEqual(num_frames, 3 * num_dones + batch_size) + + def test_generates_data(self): + # This test needs base env which outputs done after two steps. + self.init_batch_and_play( + TEST_ENV_NAME, steps_per_epoch=20, generate_data=True + ) + + filenames = os.listdir(self.out_dir) + self.assertTrue(filenames) + for filename in filenames: + path = os.path.join(self.out_dir, filename) + records = list(tf.python_io.tf_record_iterator(path)) + self.assertTrue(records) + + def test_shards_per_epoch(self): + def num_ending_with(filenames, suffix): + return sum( + 1 for filename in filenames if filename.endswith(suffix) + ) + + env = gym_env.T2TGymEnv(TEST_ENV_NAME, batch_size=2) + env.start_new_epoch(0, self.out_dir) + self.play(env, n_steps=20) + env.generate_data(self.out_dir) + + filenames = os.listdir(self.out_dir) + num_shards_per_epoch = len(filenames) + self.assertEqual(num_ending_with(filenames, ".0"), num_shards_per_epoch) + + env.start_new_epoch(1, self.out_dir) + self.play(env, n_steps=20) + env.generate_data(self.out_dir) + + filenames = os.listdir(self.out_dir) + self.assertEqual(len(filenames), 2 * num_shards_per_epoch) + for suffix in (".0", ".1"): + self.assertEqual(num_ending_with(filenames, suffix), num_shards_per_epoch) + + def test_frame_numbers_are_continuous(self): + env, _, _, _ = self.init_batch_and_play( + TEST_ENV_NAME, steps_per_epoch=20, generate_data=True + ) + + frame_numbers = [ + tf.train.Example.FromString( + record + ).features.feature["frame_number"].int64_list.value[0] + for (_, paths) in env.splits_and_paths(self.out_dir) + for path in paths + for record in tf.python_io.tf_record_iterator(path) + ] + last_frame_number = -1 + for frame_number in frame_numbers: + # Every consecutive frame number should be either zero (first frame in + # a new rollout) or one bigger than the last one (next frame in the same + # rollout). + if frame_number > 0: + self.assertEqual(frame_number, last_frame_number + 1) + last_frame_number = frame_number + + def test_clipping(self): + _, _, rewards, _ = self.init_batch_and_play(TEST_ENV_NAME, + steps_per_epoch=2) + self.assertTrue(np.max(rewards) == 1) + self.assertTrue(np.min(rewards) == -1) + + def test_resize(self): + env_name = TEST_ENV_NAME + orig_env = make_gym_env(env_name) + resize_height_factor = 2 + resize_width_factor = 3 + orig_height, orig_width = orig_env.observation_space.shape[:2] + env, obs, _, _ = self.init_batch_and_play( + env_name, steps_per_epoch=1, + resize_height_factor=resize_height_factor, + resize_width_factor=resize_width_factor) + for obs_batch in obs: + ob = obs_batch[0] + self.assertEqual(ob.shape, env.observation_space.shape) + height, width = ob.shape[:2] + self.assertEqual(height, orig_height // resize_height_factor) + self.assertEqual(width, orig_width // resize_width_factor) + + def test_no_resize_option(self): + env_name = TEST_ENV_NAME + orig_env = make_gym_env(env_name) + resize_height_factor = 2 + resize_width_factor = 3 + orig_height, orig_width = orig_env.observation_space.shape[:2] + env, obs, _, _ = self.init_batch_and_play( + env_name, steps_per_epoch=1, + resize_height_factor=resize_height_factor, + resize_width_factor=resize_width_factor, + should_derive_observation_space=False) + for obs_batch in obs: + ob = obs_batch[0] + self.assertEqual(ob.shape, env.observation_space.shape) + height, width = ob.shape[:2] + self.assertEqual(height, orig_height) + self.assertEqual(width, orig_width) + + def assert_channels(self, env, obs, n_channels): + self.assertEqual(env.observation_space.shape[2], n_channels) + self.assertEqual(env.num_channels, n_channels) + for obs_batch in obs: + ob = obs_batch[0] + self.assertEqual(ob.shape[2], n_channels) + + def test_channels(self): + env_name = TEST_ENV_NAME + env, obs, _, _ = self.init_batch_and_play(env_name, grayscale=True) + self.assert_channels(env, obs, n_channels=1) + + env, obs, _, _ = self.init_batch_and_play(env_name, grayscale=False) + self.assert_channels(env, obs, n_channels=3) + + def test_generating_and_loading_preserves_rollouts(self): + env_name = TEST_ENV_NAME + from_env = gym_env.T2TGymEnv(env_name, batch_size=1) + from_env.start_new_epoch(0, self.out_dir) + self.play(from_env, n_steps=20) + from_env.generate_data(self.out_dir) + + to_env = gym_env.T2TGymEnv(env_name, batch_size=1) + to_env.start_new_epoch(0, self.out_dir) + + self.assertEqual( + from_env.current_epoch_rollouts(), to_env.current_epoch_rollouts() + ) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/ice_parsing.py b/tensor2tensor/data_generators/ice_parsing.py new file mode 100644 index 000000000..1f7380515 --- /dev/null +++ b/tensor2tensor/data_generators/ice_parsing.py @@ -0,0 +1,117 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This module implements the ice_parsing_* problems.""" + +# These parse plain text into flattened parse trees and POS tags. +# The training data is stored in files named `parsing_train.pairs` +# and `parsing_dev.pairs`. These files are UTF-8 text files where +# each line contains an input sentence and a target parse tree, +# separated by a tab character. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.layers import modalities +from tensor2tensor.utils import registry + + +def tabbed_parsing_token_generator(data_dir, tmp_dir, train, prefix, + source_vocab_size, target_vocab_size): + """Generate source and target data from a single file.""" + filename = "parsing_{0}.pairs".format("train" if train else "dev") + source_vocab = generator_utils.get_or_generate_tabbed_vocab( + data_dir, tmp_dir, filename, 0, + prefix + "_source.tokens.vocab.%d" % source_vocab_size, source_vocab_size) + target_vocab = generator_utils.get_or_generate_tabbed_vocab( + data_dir, tmp_dir, filename, 1, + prefix + "_target.tokens.vocab.%d" % target_vocab_size, target_vocab_size) + pair_filepath = os.path.join(tmp_dir, filename) + return text_problems.text2text_generate_encoded( + text_problems.text2text_txt_tab_iterator(pair_filepath), source_vocab, + target_vocab) + + +def tabbed_parsing_character_generator(tmp_dir, train): + """Generate source and target data from a single file.""" + character_vocab = text_encoder.ByteTextEncoder() + filename = "parsing_{0}.pairs".format("train" if train else "dev") + pair_filepath = os.path.join(tmp_dir, filename) + return text_problems.text2text_generate_encoded( + text_problems.text2text_txt_tab_iterator(pair_filepath), character_vocab) + + +@registry.register_problem +class ParsingIcelandic16k(problem.Problem): + """Problem spec for parsing tokenized Icelandic text to constituency trees.""" + + @property + def source_vocab_size(self): + return 2**14 # 16384 + + @property + def targeted_vocab_size(self): + return 2**8 # 256 + + @property + def input_space_id(self): + return problem.SpaceID.ICE_TOK + + @property + def target_space_id(self): + return problem.SpaceID.ICE_PARSE_TOK + + @property + def num_shards(self): + return 10 + + def feature_encoders(self, data_dir): + source_vocab_filename = os.path.join( + data_dir, "ice_source.tokens.vocab.%d" % self.source_vocab_size) + target_vocab_filename = os.path.join( + data_dir, "ice_target.tokens.vocab.%d" % self.targeted_vocab_size) + source_subtokenizer = text_encoder.SubwordTextEncoder(source_vocab_filename) + target_subtokenizer = text_encoder.SubwordTextEncoder(target_vocab_filename) + return { + "inputs": source_subtokenizer, + "targets": target_subtokenizer, + } + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + generator_utils.generate_dataset_and_shuffle( + tabbed_parsing_token_generator(data_dir, tmp_dir, True, "ice", + self.source_vocab_size, + self.targeted_vocab_size), + self.training_filepaths(data_dir, self.num_shards, shuffled=False), + tabbed_parsing_token_generator(data_dir, tmp_dir, False, "ice", + self.source_vocab_size, + self.targeted_vocab_size), + self.dev_filepaths(data_dir, 1, shuffled=False)) + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = {"inputs": modalities.ModalityType.SYMBOL, + "targets": modalities.ModalityType.SYMBOL} + p.vocab_size = {"inputs": self._encoders["inputs"].vocab_size, + "targets": self.targeted_vocab_size} + p.input_space_id = self.input_space_id + p.target_space_id = self.target_space_id + p.loss_multiplier = 2.5 # Rough estimate of avg number of tokens per word diff --git a/tensor2tensor/data_generators/image.py b/tensor2tensor/data_generators/image.py deleted file mode 100644 index 55b5f2fc7..000000000 --- a/tensor2tensor/data_generators/image.py +++ /dev/null @@ -1,306 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Data generators for image data-sets.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import cPickle -import gzip -import io -import json -import os -import random -import tarfile -import zipfile - -# Dependency imports - -import numpy as np -from six.moves import xrange # pylint: disable=redefined-builtin -from six.moves import zip # pylint: disable=redefined-builtin -from tensor2tensor.data_generators import generator_utils - -import tensorflow as tf - - -def image_generator(images, labels): - """Generator for images that takes image and labels lists and creates pngs. - - Args: - images: list of images given as [width x height x channels] numpy arrays. - labels: list of ints, same length as images. - - Yields: - A dictionary representing the images with the following fields: - * image/encoded: the string encoding the image as PNG, - * image/format: the string "png" representing image format, - * image/class/label: an integer representing the label, - * image/height: an integer representing the height, - * image/width: an integer representing the width. - Every field is actually a singleton list of the corresponding type. - - Raises: - ValueError: if images is an empty list. - """ - if not images: - raise ValueError("Must provide some images for the generator.") - (width, height, channels) = images[0].shape - with tf.Graph().as_default(): - image_t = tf.placeholder(dtype=tf.uint8, shape=(width, height, channels)) - encoded_image_t = tf.image.encode_png(image_t) - with tf.Session() as sess: - for (image, label) in zip(images, labels): - enc_string = sess.run(encoded_image_t, feed_dict={image_t: image}) - yield { - "image/encoded": [enc_string], - "image/format": ["png"], - "image/class/label": [label], - "image/height": [height], - "image/width": [width] - } - - -# URLs and filenames for MNIST data. -_MNIST_URL = "http://yann.lecun.com/exdb/mnist/" -_MNIST_TRAIN_DATA_FILENAME = "train-images-idx3-ubyte.gz" -_MNIST_TRAIN_LABELS_FILENAME = "train-labels-idx1-ubyte.gz" -_MNIST_TEST_DATA_FILENAME = "t10k-images-idx3-ubyte.gz" -_MNIST_TEST_LABELS_FILENAME = "t10k-labels-idx1-ubyte.gz" -_MNIST_IMAGE_SIZE = 28 - - -def _get_mnist(directory): - """Download all MNIST files to directory unless they are there.""" - for filename in [ - _MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME, - _MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME - ]: - generator_utils.maybe_download(directory, filename, _MNIST_URL + filename) - - -def _extract_mnist_images(filename, num_images): - """Extract images from an MNIST file into a numpy array. - - Args: - filename: The path to an MNIST images file. - num_images: The number of images in the file. - - Returns: - A numpy array of shape [number_of_images, height, width, channels]. - """ - with gzip.open(filename) as bytestream: - bytestream.read(16) - buf = bytestream.read(_MNIST_IMAGE_SIZE * _MNIST_IMAGE_SIZE * num_images) - data = np.frombuffer(buf, dtype=np.uint8) - data = data.reshape(num_images, _MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1) - return data - - -def _extract_mnist_labels(filename, num_labels): - """Extract labels from an MNIST file into integers. - - Args: - filename: The path to an MNIST labels file. - num_labels: The number of labels in the file. - - Returns: - A int64 numpy array of shape [num_labels] - """ - with gzip.open(filename) as bytestream: - bytestream.read(8) - buf = bytestream.read(num_labels) - labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) - return labels - - -def mnist_generator(tmp_dir, training, how_many, start_from=0): - """Image generator for MNIST. - - Args: - tmp_dir: path to temporary storage directory. - training: a Boolean; if true, we use the train set, otherwise the test set. - how_many: how many images and labels to generate. - start_from: from which image to start. - - Returns: - An instance of image_generator that produces MNIST images. - """ - _get_mnist(tmp_dir) - d = _MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME - l = _MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME - data_path = os.path.join(tmp_dir, d) - labels_path = os.path.join(tmp_dir, l) - images = _extract_mnist_images(data_path, 60000 if training else 10000) - labels = _extract_mnist_labels(labels_path, 60000 if training else 10000) - # Shuffle the data to make sure classes are well distributed. - data = list(zip(images, labels)) - random.shuffle(data) - images, labels = list(zip(*data)) - return image_generator(images[start_from:start_from + how_many], - labels[start_from:start_from + how_many]) - - -# URLs and filenames for CIFAR data. -_CIFAR10_URL = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz" -_CIFAR10_PREFIX = "cifar-10-batches-py/" -_CIFAR10_TRAIN_FILES = [ - "data_batch_1", "data_batch_2", "data_batch_3", "data_batch_4", - "data_batch_5" -] -_CIFAR10_TEST_FILES = ["test_batch"] -_CIFAR10_IMAGE_SIZE = 32 - - -def _get_cifar10(directory): - """Download and extract CIFAR to directory unless it is there.""" - filename = os.path.basename(_CIFAR10_URL) - path = generator_utils.maybe_download(directory, filename, _CIFAR10_URL) - tarfile.open(path, "r:gz").extractall(directory) - - -def cifar10_generator(tmp_dir, training, how_many, start_from=0): - """Image generator for CIFAR-10. - - Args: - tmp_dir: path to temporary storage directory. - training: a Boolean; if true, we use the train set, otherwise the test set. - how_many: how many images and labels to generate. - start_from: from which image to start. - - Returns: - An instance of image_generator that produces CIFAR-10 images and labels. - """ - _get_cifar10(tmp_dir) - data_files = _CIFAR10_TRAIN_FILES if training else _CIFAR10_TEST_FILES - all_images, all_labels = [], [] - for filename in data_files: - path = os.path.join(tmp_dir, _CIFAR10_PREFIX, filename) - with tf.gfile.Open(path, "r") as f: - data = cPickle.load(f) - images = data["data"] - num_images = images.shape[0] - images = images.reshape((num_images, 3, _CIFAR10_IMAGE_SIZE, - _CIFAR10_IMAGE_SIZE)) - all_images.extend([ - np.squeeze(images[j]).transpose((1, 2, 0)) for j in xrange(num_images) - ]) - labels = data["labels"] - all_labels.extend([labels[j] for j in xrange(num_images)]) - # Shuffle the data to make sure classes are well distributed. - data = zip(all_images, all_labels) - random.shuffle(data) - all_images, all_labels = zip(*data) - return image_generator(all_images[start_from:start_from + how_many], - all_labels[start_from:start_from + how_many]) - - -# URLs and filenames for MSCOCO data. -_MSCOCO_ROOT_URL = "http://msvocds.blob.core.windows.net/" -_MSCOCO_URLS = [ - "coco2014/train2014.zip", "coco2014/val2014.zip", "coco2014/test2014.zip", - "annotations-1-0-3/captions_train-val2014.zip" -] -_MSCOCO_TRAIN_PREFIX = "train2014" -_MSCOCO_EVAL_PREFIX = "val2014" -_MSCOCO_TRAIN_CAPTION_FILE = "annotations/captions_train2014.json" -_MSCOCO_EVAL_CAPTION_FILE = "annotations/captions_val2014.json" - - -def _get_mscoco(directory): - """Download and extract MSCOCO datasets to directory unless it is there.""" - for url in _MSCOCO_URLS: - filename = os.path.basename(url) - download_url = os.path.join(_MSCOCO_ROOT_URL, url) - path = generator_utils.maybe_download(directory, filename, download_url) - unzip_dir = os.path.join(directory, filename.strip(".zip")) - if not tf.gfile.Exists(unzip_dir): - zipfile.ZipFile(path, "r").extractall(directory) - - -def mscoco_generator(tmp_dir, - training, - how_many, - start_from=0, - eos_list=None, - vocab_filename=None, - vocab_size=0): - """Image generator for MSCOCO captioning problem with token-wise captions. - - Args: - tmp_dir: path to temporary storage directory. - training: a Boolean; if true, we use the train set, otherwise the test set. - how_many: how many images and labels to generate. - start_from: from which image to start. - eos_list: optional list of end of sentence tokens, otherwise use default - value `1`. - vocab_filename: file within `tmp_dir` to read vocabulary from. - vocab_size: integer target to generate vocabulary size to. - - Yields: - A dictionary representing the images with the following fields: - * image/encoded: the string encoding the image as JPEG, - * image/format: the string "jpeg" representing image format, - * image/class/label: a list of integers representing the caption, - * image/height: an integer representing the height, - * image/width: an integer representing the width. - Every field is actually a list of the corresponding type. - """ - eos_list = [1] if eos_list is None else eos_list - if vocab_filename is not None: - vocab_symbolizer = generator_utils.get_or_generate_vocab( - tmp_dir, vocab_filename, vocab_size) - _get_mscoco(tmp_dir) - caption_filepath = (_MSCOCO_TRAIN_CAPTION_FILE - if training else _MSCOCO_EVAL_CAPTION_FILE) - caption_filepath = os.path.join(tmp_dir, caption_filepath) - prefix = _MSCOCO_TRAIN_PREFIX if training else _MSCOCO_EVAL_PREFIX - caption_file = io.open(caption_filepath) - caption_json = json.load(caption_file) - # Dictionary from image_id to ((filename, height, width), captions). - image_dict = dict() - for image in caption_json["images"]: - image_dict[image["id"]] = [(image["file_name"], image["height"], - image["width"]), []] - annotations = caption_json["annotations"] - annotation_count = len(annotations) - image_count = len(image_dict) - tf.logging.info("Processing %d images and %d labels\n" % (image_count, - annotation_count)) - for annotation in annotations: - image_id = annotation["image_id"] - image_dict[image_id][1].append(annotation["caption"]) - - data = list(image_dict.values())[start_from:start_from + how_many] - random.shuffle(data) - for image_info, labels in data: - image_filename = image_info[0] - image_filepath = os.path.join(tmp_dir, prefix, image_filename) - with tf.gfile.Open(image_filepath, "r") as f: - encoded_image_data = f.read() - height, width = image_info[1], image_info[2] - for label in labels: - if vocab_filename is None: - label = [ord(c) for c in label] + eos_list - else: - label = vocab_symbolizer.encode(label) + eos_list - yield { - "image/encoded": [encoded_image_data], - "image/format": ["jpeg"], - "image/class/label": label, - "image/height": [height], - "image/width": [width] - } diff --git a/tensor2tensor/data_generators/image_lsun.py b/tensor2tensor/data_generators/image_lsun.py new file mode 100644 index 000000000..7ad16d897 --- /dev/null +++ b/tensor2tensor/data_generators/image_lsun.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""LSUN datasets (bedrooms only for now).""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import io +import os +import zipfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import image_utils +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + + +_LSUN_URL = "http://lsun.cs.princeton.edu/htbin/download.cgi?tag=latest&category=%s&set=%s" +_LSUN_DATA_FILENAME = "lsun-%s-%s.zip" + + +def pil_image(): + import PIL # pylint: disable=g-import-not-at-top + return PIL.Image + + +def _get_lsun(directory, category, split_name): + """Downloads all lsun files to directory unless they are there.""" + generator_utils.maybe_download(directory, + _LSUN_DATA_FILENAME % (category, split_name), + _LSUN_URL % (category, split_name)) + + +@registry.register_problem +class ImageLsunBedrooms(image_utils.ImageProblem): + """LSUN Bedrooms.""" + + @property + def num_channels(self): + """Number of color channels.""" + return 3 + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + """Generates LSUN bedrooms dataset and writes it in data_dir.""" + generator_utils.generate_dataset_and_shuffle( + self.read_and_convert_to_png(tmp_dir, "train"), + self.training_filepaths(data_dir, 100, shuffled=False), + self.read_and_convert_to_png(tmp_dir, "val"), + self.dev_filepaths(data_dir, 1, shuffled=False)) + + def read_and_convert_to_png(self, tmp_dir, split_name): + """Downloads the datasets, extracts from zip and yields in PNG format.""" + category = "bedroom" + _get_lsun(tmp_dir, category, split_name) + filename = _LSUN_DATA_FILENAME % (category, split_name) + data_path = os.path.join(tmp_dir, filename) + print("Extracting zip file.") + zip_ref = zipfile.ZipFile(data_path, "r") + zip_ref.extractall(tmp_dir) + zip_ref.close() + + print("Opening database.") + data_file = os.path.join(tmp_dir, + "%s_%s_lmdb/data.mdb" % (category, split_name)) + + filename_queue = tf.train.string_input_producer([data_file], num_epochs=1) + reader = tf.LMDBReader() + _, webp_image_tensor = reader.read(filename_queue) + + object_count = 0 + with tf.train.MonitoredTrainingSession() as session: + while True: + webp_image = session.run(webp_image_tensor) + object_count += 1 + if object_count % 1000 == 0: + print("Extracted %d objects." % object_count) + # Unfortunately Tensorflow doesn't support reading or parsing + # WebP images, so we have to do it via Image PIL library. + image = pil_image().open(io.BytesIO(webp_image)) + buf = io.BytesIO() + width, height = image.size + image.save(buf, "PNG") + yield { + "image/encoded": [buf.getvalue()], + "image/format": ["png"], + "image/class/label": [0], + "image/height": [height], + "image/width": [width] + } diff --git a/tensor2tensor/data_generators/image_test.py b/tensor2tensor/data_generators/image_test.py deleted file mode 100644 index c5b4f14be..000000000 --- a/tensor2tensor/data_generators/image_test.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Image generators test.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -import numpy as np -from tensor2tensor.data_generators import image - -import tensorflow as tf - - -class ImageTest(tf.test.TestCase): - - def testImageGenerator(self): - # 2 random images - np.random.seed(1111) # To avoid any flakiness. - image1 = np.random.randint(0, 255, size=(10, 12, 3)) - image2 = np.random.randint(0, 255, size=(10, 12, 3)) - # Call image generator on the 2 images with labels [1, 2]. - encoded_imgs, labels = [], [] - for dictionary in image.image_generator([image1, image2], [1, 2]): - self.assertEqual( - sorted(list(dictionary)), [ - "image/class/label", "image/encoded", "image/format", - "image/height", "image/width" - ]) - self.assertEqual(dictionary["image/format"], ["png"]) - self.assertEqual(dictionary["image/height"], [12]) - self.assertEqual(dictionary["image/width"], [10]) - encoded_imgs.append(dictionary["image/encoded"]) - labels.append(dictionary["image/class/label"]) - - # Check that the result labels match the inputs. - self.assertEqual(len(labels), 2) - self.assertEqual(labels[0], [1]) - self.assertEqual(labels[1], [2]) - - # Decode images and check that they match the inputs. - self.assertEqual(len(encoded_imgs), 2) - image_t = tf.placeholder(dtype=tf.string) - decoded_png_t = tf.image.decode_png(image_t) - with self.test_session() as sess: - encoded_img1 = encoded_imgs[0] - self.assertEqual(len(encoded_img1), 1) - decoded1 = sess.run(decoded_png_t, feed_dict={image_t: encoded_img1[0]}) - self.assertAllClose(decoded1, image1) - encoded_img2 = encoded_imgs[1] - self.assertEqual(len(encoded_img2), 1) - decoded2 = sess.run(decoded_png_t, feed_dict={image_t: encoded_img2[0]}) - self.assertAllClose(decoded2, image2) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensor2tensor/data_generators/image_utils.py b/tensor2tensor/data_generators/image_utils.py new file mode 100644 index 000000000..be402162d --- /dev/null +++ b/tensor2tensor/data_generators/image_utils.py @@ -0,0 +1,426 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base classes and utilities for image datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import io +import os +import numpy as np +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import modalities +from tensor2tensor.utils import contrib +from tensor2tensor.utils import metrics + +import tensorflow.compat.v1 as tf + + +def matplotlib_pyplot(): + import matplotlib # pylint: disable=g-import-not-at-top + matplotlib.use("agg") + import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top + return plt + + +def image_to_tf_summary_value(image, tag): + """Converts a NumPy image to a tf.Summary.Value object. + + Args: + image: 3-D NumPy array. + tag: name for tf.Summary.Value for display in tensorboard. + Returns: + image_summary: A tf.Summary.Value object. + """ + curr_image = np.asarray(image, dtype=np.uint8) + height, width, n_channels = curr_image.shape + # If monochrome image, then reshape to [height, width] + if n_channels == 1: + curr_image = np.reshape(curr_image, [height, width]) + s = io.BytesIO() + matplotlib_pyplot().imsave(s, curr_image, format="png") + img_sum = tf.Summary.Image(encoded_image_string=s.getvalue(), + height=height, width=width, + colorspace=n_channels) + return tf.Summary.Value(tag=tag, image=img_sum) + + +def convert_predictions_to_image_summaries(hook_args): + """Optionally converts images from hooks_args to image summaries. + + Args: + hook_args: DecodeHookArgs namedtuple + Returns: + summaries: list of tf.Summary values if hook_args.decode_hpara + """ + decode_hparams = hook_args.decode_hparams + if not decode_hparams.display_decoded_images: + return [] + predictions = hook_args.predictions[0] + + # Display ten random inputs and outputs so that tensorboard does not hang. + all_summaries = [] + rand_predictions = np.random.choice(predictions, size=10) + for ind, prediction in enumerate(rand_predictions): + output_summary = image_to_tf_summary_value( + prediction["outputs"], tag="%d_output" % ind) + input_summary = image_to_tf_summary_value( + prediction["inputs"], tag="%d_input" % ind) + all_summaries.append(input_summary) + all_summaries.append(output_summary) + return all_summaries + + +def resize_by_area(img, size): + """image resize function used by quite a few image problems.""" + return tf.to_int64( + tf.image.resize_images(img, [size, size], tf.image.ResizeMethod.AREA)) + + +def make_multiscale(image, resolutions, + resize_method=tf.image.ResizeMethod.BICUBIC, + num_channels=3): + """Returns list of scaled images, one for each resolution. + + Args: + image: Tensor of shape [height, height, num_channels]. + resolutions: List of heights that image's height is resized to. + resize_method: tf.image.ResizeMethod. + num_channels: Number of channels in image. + + Returns: + List of Tensors, one for each resolution with shape given by + [resolutions[i], resolutions[i], num_channels]. + """ + scaled_images = [] + for height in resolutions: + scaled_image = tf.image.resize_images( + image, + size=[height, height], # assuming that height = width + method=resize_method) + scaled_image = tf.to_int64(scaled_image) + scaled_image.set_shape([height, height, num_channels]) + scaled_images.append(scaled_image) + + return scaled_images + + +def make_multiscale_dilated(image, resolutions, num_channels=3): + """Returns list of scaled images, one for each resolution. + + Resizes by skipping every nth pixel. + + Args: + image: Tensor of shape [height, height, num_channels]. + resolutions: List of heights that image's height is resized to. The function + assumes VALID padding, so the original image's height must be divisible + by each resolution's height to return the exact resolution size. + num_channels: Number of channels in image. + + Returns: + List of Tensors, one for each resolution with shape given by + [resolutions[i], resolutions[i], num_channels] if resolutions properly + divide the original image's height; otherwise shape height and width is up + to valid skips. + """ + image_height = common_layers.shape_list(image)[0] + scaled_images = [] + for height in resolutions: + dilation_rate = image_height // height # assuming height = width + scaled_image = image[::dilation_rate, ::dilation_rate] + scaled_image = tf.to_int64(scaled_image) + scaled_image.set_shape([None, None, num_channels]) + scaled_images.append(scaled_image) + return scaled_images + + +class ImageProblem(problem.Problem): + """Base class for problems with images.""" + + @property + def num_channels(self): + """Number of color channels.""" + return 3 + + @property + def vocab_size(self): + """Number of pixel values.""" + return 256 + + def example_reading_spec(self): + data_fields = { + "image/encoded": tf.FixedLenFeature((), tf.string), + "image/format": tf.FixedLenFeature((), tf.string), + } + + data_items_to_decoders = { + "inputs": + contrib.slim().tfexample_decoder.Image( + image_key="image/encoded", + format_key="image/format", + channels=self.num_channels), + } + + return data_fields, data_items_to_decoders + + def preprocess_example(self, example, mode, hparams): + if not self._was_reversed: + example["inputs"] = tf.image.per_image_standardization(example["inputs"]) + return example + + def eval_metrics(self): + eval_metrics = [ + metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5, + metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY + ] + if self._was_reversed: + eval_metrics += [metrics.Metrics.IMAGE_SUMMARY] + return eval_metrics + + @property + def decode_hooks(self): + return [convert_predictions_to_image_summaries] + + +class Image2ClassProblem(ImageProblem): + """Base class for image classification problems.""" + + @property + def is_small(self): + raise NotImplementedError() + + @property + def num_classes(self): + raise NotImplementedError() + + @property + def train_shards(self): + raise NotImplementedError() + + @property + def dev_shards(self): + return 1 + + @property + def class_labels(self): + return ["ID_%d" % i for i in range(self.num_classes)] + + def feature_encoders(self, data_dir): + del data_dir + return { + "inputs": text_encoder.ImageEncoder(channels=self.num_channels), + "targets": text_encoder.ClassLabelEncoder(self.class_labels) + } + + def generator(self, data_dir, tmp_dir, is_training): + raise NotImplementedError() + + def example_reading_spec(self): + label_key = "image/class/label" + data_fields, data_items_to_decoders = ( + super(Image2ClassProblem, self).example_reading_spec()) + data_fields[label_key] = tf.FixedLenFeature((1,), tf.int64) + + data_items_to_decoders["targets"] = contrib.slim().tfexample_decoder.Tensor( + label_key) + return data_fields, data_items_to_decoders + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = {"inputs": modalities.ModalityType.IMAGE, + "targets": modalities.ModalityType.CLASS_LABEL} + p.vocab_size = {"inputs": 256, + "targets": self.num_classes} + p.batch_size_multiplier = 4 if self.is_small else 256 + p.loss_multiplier = 3.0 if self.is_small else 1.0 + if self._was_reversed: + p.loss_multiplier = 1.0 + p.input_space_id = problem.SpaceID.IMAGE + p.target_space_id = problem.SpaceID.IMAGE_LABEL + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + generator_utils.generate_dataset_and_shuffle( + self.generator(data_dir, tmp_dir, True), + self.training_filepaths(data_dir, self.train_shards, shuffled=False), + self.generator(data_dir, tmp_dir, False), + self.dev_filepaths(data_dir, self.dev_shards, shuffled=False)) + + +def encode_images_as_png(images): + """Yield images encoded as pngs.""" + if tf.executing_eagerly(): + for image in images: + yield tf.image.encode_png(image).numpy() + else: + (height, width, channels) = images[0].shape + with tf.Graph().as_default(): + image_t = tf.placeholder(dtype=tf.uint8, shape=(height, width, channels)) + encoded_image_t = tf.image.encode_png(image_t) + with tf.Session() as sess: + for image in images: + enc_string = sess.run(encoded_image_t, feed_dict={image_t: image}) + yield enc_string + + +def image_generator(images, labels): + """Generator for images that takes image and labels lists and creates pngs. + + Args: + images: list of images given as [width x height x channels] numpy arrays. + labels: list of ints, same length as images. + + Yields: + A dictionary representing the images with the following fields: + * image/encoded: the string encoding the image as PNG, + * image/format: the string "png" representing image format, + * image/class/label: an integer representing the label, + * image/height: an integer representing the height, + * image/width: an integer representing the width. + Every field is actually a singleton list of the corresponding type. + + Raises: + ValueError: if images is an empty list. + """ + if not images: + raise ValueError("Must provide some images for the generator.") + width, height, _ = images[0].shape + for (enc_image, label) in zip(encode_images_as_png(images), labels): + yield { + "image/encoded": [enc_image], + "image/format": ["png"], + "image/class/label": [int(label)], + "image/height": [height], + "image/width": [width] + } + + +class Image2TextProblem(ImageProblem): + """Base class for image-to-text problems.""" + + @property + def is_character_level(self): + raise NotImplementedError() + + @property + def vocab_problem(self): + raise NotImplementedError() # Not needed if self.is_character_level. + + @property + def target_space_id(self): + raise NotImplementedError() + + @property + def train_shards(self): + raise NotImplementedError() + + @property + def dev_shards(self): + raise NotImplementedError() + + def generator(self, data_dir, tmp_dir, is_training): + raise NotImplementedError() + + def example_reading_spec(self): + label_key = "image/class/label" + data_fields, data_items_to_decoders = ( + super(Image2TextProblem, self).example_reading_spec()) + data_fields[label_key] = tf.VarLenFeature(tf.int64) + data_items_to_decoders["targets"] = contrib.slim().tfexample_decoder.Tensor( + label_key) + return data_fields, data_items_to_decoders + + def feature_encoders(self, data_dir): + if self.is_character_level: + encoder = text_encoder.ByteTextEncoder() + else: + vocab_filename = os.path.join( + data_dir, self.vocab_problem.vocab_filename) + encoder = text_encoder.SubwordTextEncoder(vocab_filename) + input_encoder = text_encoder.ImageEncoder(channels=self.num_channels) + return {"inputs": input_encoder, "targets": encoder} + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = {"inputs": modalities.ModalityType.IMAGE, + "targets": modalities.ModalityType.SYMBOL} + p.vocab_size = {"inputs": 256, + "targets": self._encoders["targets"].vocab_size} + p.batch_size_multiplier = 256 + p.loss_multiplier = 1.0 + p.input_space_id = problem.SpaceID.IMAGE + p.target_space_id = self.target_space_id + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + generator_utils.generate_dataset_and_shuffle( + self.generator(data_dir, tmp_dir, True), + self.training_filepaths(data_dir, self.train_shards, shuffled=False), + self.generator(data_dir, tmp_dir, False), + self.dev_filepaths(data_dir, self.dev_shards, shuffled=False)) + + +def image_augmentation(images, do_colors=False, crop_size=None): + """Image augmentation: cropping, flipping, and color transforms.""" + if crop_size is None: + crop_size = [299, 299] + images = tf.random_crop(images, crop_size + [3]) + images = tf.image.random_flip_left_right(images) + if do_colors: # More augmentation, but might be slow. + images = tf.image.random_brightness(images, max_delta=32. / 255.) + images = tf.image.random_saturation(images, lower=0.5, upper=1.5) + images = tf.image.random_hue(images, max_delta=0.2) + images = tf.image.random_contrast(images, lower=0.5, upper=1.5) + return images + + +def cifar_image_augmentation(images): + """Image augmentation suitable for CIFAR-10/100. + + As described in https://arxiv.org/pdf/1608.06993v3.pdf (page 5). + + Args: + images: a Tensor. + Returns: + Tensor of the same shape as images. + """ + images = tf.image.resize_image_with_crop_or_pad(images, 40, 40) + images = tf.random_crop(images, [32, 32, 3]) + images = tf.image.random_flip_left_right(images) + return images + + +def random_shift(image, wsr=0.1, hsr=0.1): + """Apply random horizontal and vertical shift to images. + + This is the default data-augmentation strategy used on CIFAR in Glow. + + Args: + image: a 3-D Tensor + wsr: Width shift range, as a float fraction of the width. + hsr: Height shift range, as a float fraction of the width. + Returns: + images: images translated by the provided wsr and hsr. + """ + height, width, _ = common_layers.shape_list(image) + width_range, height_range = wsr*width, hsr*height + height_translations = tf.random_uniform((1,), -height_range, height_range) + width_translations = tf.random_uniform((1,), -width_range, width_range) + translations = tf.concat((height_translations, width_translations), axis=0) + return contrib.image().translate(image, translations=translations) diff --git a/tensor2tensor/data_generators/image_utils_test.py b/tensor2tensor/data_generators/image_utils_test.py new file mode 100644 index 000000000..d275f2844 --- /dev/null +++ b/tensor2tensor/data_generators/image_utils_test.py @@ -0,0 +1,151 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""image_utils test.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +from tensor2tensor.data_generators import image_utils +from tensor2tensor.utils import decoding + +import tensorflow.compat.v1 as tf + + +class ImageTest(tf.test.TestCase): + + def testImageAugmentation(self): + x = np.random.rand(500, 500, 3) + with self.test_session() as session: + y = image_utils.image_augmentation(tf.constant(x)) + res = session.run(y) + self.assertEqual(res.shape, (299, 299, 3)) + + def testImageGenerator(self): + # 2 random images + np.random.seed(1111) # To avoid any flakiness. + image1 = np.random.randint(0, 255, size=(10, 12, 3)) + image2 = np.random.randint(0, 255, size=(10, 12, 3)) + # Call image generator on the 2 images with labels [1, 2]. + encoded_imgs, labels = [], [] + for dictionary in image_utils.image_generator([image1, image2], [1, 2]): + self.assertEqual( + sorted(list(dictionary)), [ + "image/class/label", "image/encoded", "image/format", + "image/height", "image/width" + ]) + self.assertEqual(dictionary["image/format"], ["png"]) + self.assertEqual(dictionary["image/height"], [12]) + self.assertEqual(dictionary["image/width"], [10]) + encoded_imgs.append(dictionary["image/encoded"]) + labels.append(dictionary["image/class/label"]) + + # Check that the result labels match the inputs. + self.assertEqual(len(labels), 2) + self.assertEqual(labels[0], [1]) + self.assertEqual(labels[1], [2]) + + # Decode images and check that they match the inputs. + self.assertEqual(len(encoded_imgs), 2) + image_t = tf.placeholder(dtype=tf.string) + decoded_png_t = tf.image.decode_png(image_t) + with self.test_session() as sess: + encoded_img1 = encoded_imgs[0] + self.assertEqual(len(encoded_img1), 1) + decoded1 = sess.run(decoded_png_t, feed_dict={image_t: encoded_img1[0]}) + self.assertAllClose(decoded1, image1) + encoded_img2 = encoded_imgs[1] + self.assertEqual(len(encoded_img2), 1) + decoded2 = sess.run(decoded_png_t, feed_dict={image_t: encoded_img2[0]}) + self.assertAllClose(decoded2, image2) + + def testMakeMultiscaleDivisible(self): + image = tf.random_normal([256, 256, 3]) + resolutions = [8, 16, 64, 256] + scaled_images = image_utils.make_multiscale(image, resolutions) + self.assertEqual(scaled_images[0].shape, (8, 8, 3)) + self.assertEqual(scaled_images[1].shape, (16, 16, 3)) + self.assertEqual(scaled_images[2].shape, (64, 64, 3)) + self.assertEqual(scaled_images[3].shape, (256, 256, 3)) + + def testMakeMultiscaleIndivisible(self): + image = tf.random_normal([256, 256, 3]) + resolutions = [255] + scaled_images = image_utils.make_multiscale(image, resolutions) + self.assertEqual(scaled_images[0].shape, (255, 255, 3)) + + def testMakeMultiscaleLarger(self): + image = tf.random_normal([256, 256, 3]) + resolutions = [257] + scaled_images = image_utils.make_multiscale(image, resolutions) + self.assertEqual(scaled_images[0].shape, (257, 257, 3)) + + def testMakeMultiscaleDilatedDivisible(self): + image = tf.random_normal([256, 256, 3]) + resolutions = [8, 16, 64, 256] + scaled_images = image_utils.make_multiscale_dilated(image, resolutions) + self.assertEqual(scaled_images[0].shape, (8, 8, 3)) + self.assertEqual(scaled_images[1].shape, (16, 16, 3)) + self.assertEqual(scaled_images[2].shape, (64, 64, 3)) + self.assertEqual(scaled_images[3].shape, (256, 256, 3)) + + def testMakeMultiscaleDilatedIndivisible(self): + image = tf.random_normal([256, 256, 3]) + resolutions = [255] + scaled_images = image_utils.make_multiscale_dilated(image, resolutions) + self.assertEqual(scaled_images[0].shape, (256, 256, 3)) + + def testMakeMultiscaleDilatedLarger(self): + image = tf.random_normal([256, 256, 3]) + resolutions = [257] + with self.assertRaisesRegexp(ValueError, "strides.* must be non-zero"): + _ = image_utils.make_multiscale_dilated(image, resolutions) + + def testRandomShift(self): + image = tf.random_normal([256, 256, 3]) + image_shift = image_utils.random_shift(image, wsr=0.1, hsr=0.1) + self.assertEqual(image_shift.shape, [256, 256, 3]) + + def testImageToSummaryValue(self): + rng = np.random.RandomState(0) + x = rng.randint(0, 255, (32, 32, 3)) + x_summary = image_utils.image_to_tf_summary_value(x, "X_image") + self.assertEqual(x_summary.tag, "X_image") + + def testConvertPredictionsToImageSummaries(self): + # Initialize predictions. + rng = np.random.RandomState(0) + x = rng.randint(0, 255, (32, 32, 3)) + predictions = [[{"outputs": x, "inputs": x}] * 50] + + decode_hparams = decoding.decode_hparams() + # should return 20 summaries of images, 10 outputs and 10 inputs if + # display_decoded_images is set to True. + for display, summaries_length in zip([True, False], [20, 0]): + decode_hparams.display_decoded_images = display + decode_hooks = decoding.DecodeHookArgs( + estimator=None, problem=None, output_dirs=None, + hparams=decode_hparams, decode_hparams=decode_hparams, + predictions=predictions) + summaries = image_utils.convert_predictions_to_image_summaries( + decode_hooks) + self.assertEqual(len(summaries), summaries_length) + if summaries: + self.assertIsInstance(summaries[0], tf.Summary.Value) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/imagenet.py b/tensor2tensor/data_generators/imagenet.py new file mode 100644 index 000000000..175016b86 --- /dev/null +++ b/tensor2tensor/data_generators/imagenet.py @@ -0,0 +1,627 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""ImageNet.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import image_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.layers import modalities +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +# URLs and filenames for IMAGENET 32x32 data from +# https://arxiv.org/abs/1601.06759. +_IMAGENET_SMALL_ROOT_URL = "http://image-net.org/small/" +_IMAGENET_SMALL_URLS = [ + "train_32x32.tar", "valid_32x32.tar"] +_IMAGENET_SMALL_TRAIN_PREFIX = "train_32x32" +_IMAGENET_SMALL_EVAL_PREFIX = "valid_32x32" +_IMAGENET_SMALL_IMAGE_SIZE = 32 + + +# URLs and filenames for IMAGENET 64x64 data. +_IMAGENET_MEDIUM_ROOT_URL = "http://image-net.org/small/" +_IMAGENET_MEDIUM_URLS = [ + "train_64x64.tar", "valid_64x64.tar"] +_IMAGENET_MEDIUM_TRAIN_PREFIX = "train_64x64" +_IMAGENET_MEDIUM_EVAL_PREFIX = "valid_64x64" +_IMAGENET_MEDIUM_IMAGE_SIZE = 64 + + +# Derived from ImageNet data +MEAN_RGB = [0.485, 0.456, 0.406] +STDDEV_RGB = [0.229, 0.224, 0.225] + + +def imagenet_pixelrnn_generator(tmp_dir, + training, + size=_IMAGENET_SMALL_IMAGE_SIZE): + """Image generator for Imagenet 64x64 downsampled images. + + It assumes that the data has been downloaded from + http://image-net.org/small/*_32x32.tar or + http://image-net.org/small/*_64x64.tar into tmp_dir. + Args: + tmp_dir: path to temporary storage directory. + training: a Boolean; if true, we use the train set, otherwise the test set. + size: image size (assumes height and width are same) + + Yields: + A dictionary representing the images with the following fields: + * image/encoded: the string encoding the image as JPEG, + * image/format: the string "jpeg" representing image format, + * image/height: an integer representing the height, + * image/width: an integer representing the width. + Every field is actually a list of the corresponding type. + """ + if size == _IMAGENET_SMALL_IMAGE_SIZE: + train_prefix = _IMAGENET_SMALL_TRAIN_PREFIX + eval_prefix = _IMAGENET_SMALL_EVAL_PREFIX + else: + train_prefix = _IMAGENET_MEDIUM_TRAIN_PREFIX + eval_prefix = _IMAGENET_MEDIUM_EVAL_PREFIX + prefix = train_prefix if training else eval_prefix + images_filepath = os.path.join(tmp_dir, prefix) + image_files = tf.gfile.Glob(images_filepath + "/*") + height = size + width = size + const_label = 0 + for filename in image_files: + with tf.gfile.Open(filename, "r") as f: + encoded_image = f.read() + yield { + "image/encoded": [encoded_image], + "image/format": ["png"], + "image/class/label": [const_label], + "image/height": [height], + "image/width": [width] + } + + +def imagenet_preprocess_example(example, mode, resize_size=None, + normalize=True): + """Preprocessing used for Imagenet and similar problems.""" + resize_size = resize_size or [299, 299] + assert resize_size[0] == resize_size[1] + + image = example["inputs"] + if mode == tf_estimator.ModeKeys.TRAIN: + image = preprocess_for_train(image, image_size=resize_size[0], + normalize=normalize) + else: + image = preprocess_for_eval(image, image_size=resize_size[0], + normalize=normalize) + + example["inputs"] = image + return example + + +@registry.register_problem +class ImageImagenet(image_utils.Image2ClassProblem): + """Imagenet.""" + + @property + def is_small(self): + return False + + @property + def num_classes(self): + return 1000 + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + # TODO(lukaszkaiser): find a better way than printing this. + print("To generate the ImageNet dataset in the proper format, follow " + "instructions at https://github.com/tensorflow/models/tree/master" + "/research/inception/README.md#getting-started") + + def preprocess_example(self, example, mode, _): + return imagenet_preprocess_example(example, mode) + + +class ImageImagenetRescaled(ImageImagenet): + """Imagenet rescaled to rescale_size.""" + + @property + def rescale_size(self): + # return [224, 224] + raise NotImplementedError() + + @property + def normalize_image(self): + """Whether the image should be normalized in preprocessing.""" + return True + + def dataset_filename(self): + return "image_imagenet" # Reuse Imagenet data. + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + tf.logging.warning( + "Generate data for rescaled ImageNet problems with image_imagenet") + + def preprocess_example(self, example, mode, _): + return imagenet_preprocess_example( + example, mode, resize_size=self.rescale_size, + normalize=self.normalize_image) + + +@registry.register_problem +class ImageImagenet224(ImageImagenetRescaled): + """Imagenet rescaled to 224x224.""" + + @property + def rescale_size(self): + return [224, 224] + + +@registry.register_problem +class ImageImagenet224NoNormalization(ImageImagenet224): + """Imagenet rescaled to 224x224 without normalization.""" + + @property + def normalize_image(self): + """Whether the image should be normalized in preprocessing.""" + return False + + +@registry.register_problem +class ImageImagenet256(ImageImagenetRescaled): + """Imagenet rescaled to 256x256.""" + + @property + def rescale_size(self): + return [256, 256] + + +@registry.register_problem +class ImageImagenet32(ImageImagenetRescaled): + """Imagenet rescaled to 32x32.""" + + @property + def rescale_size(self): + return [32, 32] + + @property + def is_small(self): + return True # Modalities like for CIFAR. + + def preprocess_example(self, example, mode, _): + # Just resize with area. + if self._was_reversed: + example["inputs"] = tf.to_int64( + tf.image.resize_images(example["inputs"], self.rescale_size, + tf.image.ResizeMethod.AREA)) + else: + example = imagenet_preprocess_example(example, mode) + example["inputs"] = tf.to_int64( + tf.image.resize_images(example["inputs"], self.rescale_size)) + return example + + +@registry.register_problem +class ImageImagenet32Gen(ImageImagenet): + """Imagenet 32 from the pixen cnn paper.""" + + @property + def train_shards(self): + return 1024 + + @property + def dev_shards(self): + return 10 + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + generator_utils.generate_dataset_and_shuffle( + self.generator(data_dir, tmp_dir, True), + self.training_filepaths(data_dir, self.train_shards, shuffled=True), + self.generator(data_dir, tmp_dir, False), + self.dev_filepaths(data_dir, self.dev_shards, shuffled=True)) + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return imagenet_pixelrnn_generator( + tmp_dir, int(True), size=_IMAGENET_SMALL_IMAGE_SIZE) + else: + return imagenet_pixelrnn_generator( + tmp_dir, int(is_training), size=_IMAGENET_SMALL_IMAGE_SIZE) + + def preprocess_example(self, example, mode, unused_hparams): + example["inputs"].set_shape([_IMAGENET_SMALL_IMAGE_SIZE, + _IMAGENET_SMALL_IMAGE_SIZE, 3]) + example["inputs"] = tf.to_int64(example["inputs"]) + return example + + +@registry.register_problem +class ImageImagenet64Gen(ImageImagenet): + """Imagenet 64 from the pixen cnn paper.""" + + @property + def train_shards(self): + return 1024 + + @property + def dev_shards(self): + return 10 + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + generator_utils.generate_dataset_and_shuffle( + self.generator(data_dir, tmp_dir, True), + self.training_filepaths(data_dir, self.train_shards, shuffled=True), + self.generator(data_dir, tmp_dir, False), + self.dev_filepaths(data_dir, self.dev_shards, shuffled=True)) + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return imagenet_pixelrnn_generator( + tmp_dir, int(True), size=_IMAGENET_MEDIUM_IMAGE_SIZE) + else: + return imagenet_pixelrnn_generator( + tmp_dir, int(False), size=_IMAGENET_MEDIUM_IMAGE_SIZE) + + def preprocess_example(self, example, mode, unused_hparams): + example["inputs"].set_shape([_IMAGENET_MEDIUM_IMAGE_SIZE, + _IMAGENET_MEDIUM_IMAGE_SIZE, 3]) + example["inputs"] = tf.to_int64(example["inputs"]) + return example + + +@registry.register_problem +class ImageImagenetMultiResolutionGen(ImageImagenet64Gen): + """ImageNet at multiple resolutions. + + The resolutions are specified as a hyperparameter during preprocessing. + """ + + def dataset_filename(self): + return "image_imagenet64_gen" + + @property + def train_shards(self): + return 1024 + + @property + def dev_shards(self): + return 10 + + def preprocess_example(self, example, mode, hparams): + image = example["inputs"] + # Get resize method. Include a default if not specified, or if it's not in + # TensorFlow's collection of pre-implemented resize methods. + resize_method = getattr(hparams, "resize_method", "BICUBIC") + resize_method = getattr(tf.image.ResizeMethod, resize_method, resize_method) + + if resize_method == "DILATED": + scaled_images = image_utils.make_multiscale_dilated( + image, hparams.resolutions, num_channels=self.num_channels) + else: + scaled_images = image_utils.make_multiscale( + image, hparams.resolutions, + resize_method=resize_method, num_channels=self.num_channels) + + # Pack tuple of scaled images into one tensor. We do this by enforcing the + # columns to match for every resolution. + # TODO(avaswani, trandustin): We should create tuples because this will not + # work if height*width of low res < width of high res + highest_res = hparams.resolutions[-1] + example["inputs"] = tf.concat([ + tf.reshape(scaled_image, + [res**2 // highest_res, highest_res, self.num_channels]) + for scaled_image, res in zip(scaled_images, hparams.resolutions)], + axis=0) + return example + + +@registry.register_problem +class ImageImagenet64GenFlat(ImageImagenet64Gen): + """Imagenet 64 from the pixen cnn paper, as a flat array.""" + + def dataset_filename(self): + return "image_imagenet64_gen" # Reuse data. + + def preprocess_example(self, example, mode, unused_hparams): + example["inputs"].set_shape( + [_IMAGENET_MEDIUM_IMAGE_SIZE, _IMAGENET_MEDIUM_IMAGE_SIZE, 3]) + example["inputs"] = tf.to_int64(example["inputs"]) + example["inputs"] = tf.reshape(example["inputs"], (-1,)) + + del example["targets"] # Ensure unconditional generation + + return example + + def hparams(self, defaults, model_hparams): + super(ImageImagenet64GenFlat, self).hparams(defaults, model_hparams) + # Switch to symbol modality + p = defaults + p.modality["inputs"] = modalities.ModalityType.SYMBOL_WEIGHTS_ALL + p.input_space_id = problem.SpaceID.GENERIC + + +@registry.register_problem +class ImageImagenet32Small(ImageImagenet): + """Imagenet small from the pixel cnn paper.""" + + @property + def is_small(self): + return False # Modalities like for CIFAR. + + @property + def num_classes(self): + return 1000 + + @property + def train_shards(self): + return 1024 + + @property + def dev_shards(self): + return 10 + + def preprocess_example(self, example, mode, unused_hparams): + example["inputs"].set_shape([_IMAGENET_SMALL_IMAGE_SIZE, + _IMAGENET_SMALL_IMAGE_SIZE, 3]) + example["inputs"] = tf.to_int64(example["inputs"]) + return example + + +@registry.register_problem +class ImageImagenet64(ImageImagenet32): + """Imagenet rescaled to 64x64.""" + + @property + def rescale_size(self): + return [64, 64] + + +@registry.register_problem +class Img2imgImagenet(image_utils.ImageProblem): + """Imagenet rescaled to 8x8 for input and 32x32 for output.""" + + def dataset_filename(self): + return "image_imagenet" # Reuse Imagenet data. + + def preprocess_example(self, example, unused_mode, unused_hparams): + + inputs = example["inputs"] + # For Img2Img resize input and output images as desired. + example["inputs"] = image_utils.resize_by_area(inputs, 8) + example["targets"] = image_utils.resize_by_area(inputs, 32) + return example + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + tf.logging.warning("Generate data for img2img_imagenet with image_imagenet") + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = {"inputs": modalities.ModalityType.IDENTITY, + "targets": modalities.ModalityType.IDENTITY} + p.vocab_size = {"inputs": 256, + "targets": 256} + p.batch_size_multiplier = 256 + p.input_space_id = 1 + p.target_space_id = 1 + + +# The following preprocessing functions were taken from +# cloud_tpu/models/resnet/resnet_preprocessing.py +# ============================================================================== +def _crop(image, offset_height, offset_width, crop_height, crop_width): + """Crops the given image using the provided offsets and sizes. + + Note that the method doesn't assume we know the input image size but it does + assume we know the input image rank. + + Args: + image: `Tensor` image of shape [height, width, channels]. + offset_height: `Tensor` indicating the height offset. + offset_width: `Tensor` indicating the width offset. + crop_height: the height of the cropped image. + crop_width: the width of the cropped image. + + Returns: + the cropped (and resized) image. + + Raises: + InvalidArgumentError: if the rank is not 3 or if the image dimensions are + less than the crop size. + """ + original_shape = tf.shape(image) + + rank_assertion = tf.Assert( + tf.equal(tf.rank(image), 3), ["Rank of image must be equal to 3."]) + with tf.control_dependencies([rank_assertion]): + cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) + + size_assertion = tf.Assert( + tf.logical_and( + tf.greater_equal(original_shape[0], crop_height), + tf.greater_equal(original_shape[1], crop_width)), + ["Crop size greater than the image size."]) + + offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) + + # Use tf.slice instead of crop_to_bounding box as it accepts tensors to + # define the crop size. + with tf.control_dependencies([size_assertion]): + image = tf.slice(image, offsets, cropped_shape) + return tf.reshape(image, cropped_shape) + + +def distorted_bounding_box_crop(image, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.05, 1.0), + max_attempts=100, + scope=None): + """Generates cropped_image using a one of the bboxes randomly distorted. + + See `tf.image.sample_distorted_bounding_box` for more documentation. + + Args: + image: `Tensor` of image (it will be converted to floats in [0, 1]). + bbox: `Tensor` of bounding boxes arranged `[1, num_boxes, coords]` + where each coordinate is [0, 1) and the coordinates are arranged + as `[ymin, xmin, ymax, xmax]`. If num_boxes is 0 then use the whole + image. + min_object_covered: An optional `float`. Defaults to `0.1`. The cropped + area of the image must contain at least this fraction of any bounding + box supplied. + aspect_ratio_range: An optional list of `float`s. The cropped area of the + image must have an aspect ratio = width / height within this range. + area_range: An optional list of `float`s. The cropped area of the image + must contain a fraction of the supplied image within in this range. + max_attempts: An optional `int`. Number of attempts at generating a cropped + region of the image of the specified constraints. After `max_attempts` + failures, return the entire image. + scope: Optional `str` for name scope. + Returns: + (cropped image `Tensor`, distorted bbox `Tensor`). + """ + with tf.name_scope(scope, default_name="distorted_bounding_box_crop", + values=[image, bbox]): + # Each bounding box has shape [1, num_boxes, box coords] and + # the coordinates are ordered [ymin, xmin, ymax, xmax]. + + # A large fraction of image datasets contain a human-annotated bounding + # box delineating the region of the image containing the object of interest. + # We choose to create a new bounding box for the object which is a randomly + # distorted version of the human-annotated bounding box that obeys an + # allowed range of aspect ratios, sizes and overlap with the human-annotated + # bounding box. If no box is supplied, then we assume the bounding box is + # the entire image. + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + max_attempts=max_attempts, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box + + # Crop the image to the specified bounding box. + cropped_image = tf.slice(image, bbox_begin, bbox_size) + return cropped_image, distort_bbox + + +def _random_crop(image, size): + """Make a random crop of (`size` x `size`).""" + bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) + random_image, bbox = distorted_bounding_box_crop( + image, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(3. / 4, 4. / 3.), + area_range=(0.08, 1.0), + max_attempts=1, + scope=None) + bad = _at_least_x_are_true(tf.shape(image), tf.shape(random_image), 3) + + image = tf.cond( + bad, lambda: _center_crop(_do_scale(image, size), size), + lambda: tf.image.resize_bicubic([random_image], [size, size])[0]) + return image + + +def _flip(image): + """Random horizontal image flip.""" + image = tf.image.random_flip_left_right(image) + return image + + +def _at_least_x_are_true(a, b, x): + """At least `x` of `a` and `b` `Tensors` are true.""" + match = tf.equal(a, b) + match = tf.cast(match, tf.int32) + return tf.greater_equal(tf.reduce_sum(match), x) + + +def _do_scale(image, size): + """Rescale the image by scaling the smaller spatial dimension to `size`.""" + shape = tf.cast(tf.shape(image), tf.float32) + w_greater = tf.greater(shape[0], shape[1]) + shape = tf.cond(w_greater, + lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32), + lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32)) + + return tf.image.resize_bicubic([image], shape)[0] + + +def _center_crop(image, size): + """Crops to center of image with specified `size`.""" + image_height = tf.shape(image)[0] + image_width = tf.shape(image)[1] + + offset_height = ((image_height - size) + 1) / 2 + offset_width = ((image_width - size) + 1) / 2 + image = _crop(image, offset_height, offset_width, size, size) + return image + + +def _normalize(image): + """Normalize the image to zero mean and unit variance.""" + offset = tf.constant(MEAN_RGB, shape=[1, 1, 3]) + image -= offset + + scale = tf.constant(STDDEV_RGB, shape=[1, 1, 3]) + image /= scale + return image + + +def preprocess_for_train(image, image_size=224, normalize=True): + """Preprocesses the given image for evaluation. + + Args: + image: `Tensor` representing an image of arbitrary size. + image_size: int, how large the output image should be. + normalize: bool, if True the image is normalized. + + Returns: + A preprocessed image `Tensor`. + """ + if normalize: image = tf.to_float(image) / 255.0 + image = _random_crop(image, image_size) + if normalize: image = _normalize(image) + image = _flip(image) + image = tf.reshape(image, [image_size, image_size, 3]) + return image + + +def preprocess_for_eval(image, image_size=224, normalize=True): + """Preprocesses the given image for evaluation. + + Args: + image: `Tensor` representing an image of arbitrary size. + image_size: int, how large the output image should be. + normalize: bool, if True the image is normalized. + + Returns: + A preprocessed image `Tensor`. + """ + if normalize: image = tf.to_float(image) / 255.0 + image = _do_scale(image, image_size + 32) + if normalize: image = _normalize(image) + image = _center_crop(image, image_size) + image = tf.reshape(image, [image_size, image_size, 3]) + return image diff --git a/tensor2tensor/data_generators/imagenet_test.py b/tensor2tensor/data_generators/imagenet_test.py new file mode 100644 index 000000000..688153590 --- /dev/null +++ b/tensor2tensor/data_generators/imagenet_test.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for ImageNet.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +from tensor2tensor.data_generators import imagenet +from tensor2tensor.utils import hparam + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class ImagenetTest(parameterized.TestCase, tf.test.TestCase): + + @parameterized.named_parameters( + ("Default", None), + ("Area", "AREA"), + ("Dilated", "DILATED")) + def testImagenetMultiResolutionPreprocessExample(self, resize_method): + example = {"inputs": tf.random_uniform([64, 64, 3], minval=-1.)} + mode = tf_estimator.ModeKeys.TRAIN + hparams = hparam.HParams(resolutions=[8, 16, 32]) + if resize_method is not None: + hparams.resize_method = resize_method + + problem = imagenet.ImageImagenetMultiResolutionGen() + preprocessed_example = problem.preprocess_example(example, mode, hparams) + self.assertLen(preprocessed_example, 1) + self.assertEqual(preprocessed_example["inputs"].shape, (42, 32, 3)) + + def testImagenetIsNormalized(self): + problem = imagenet.ImageImagenet224() + self.assertTrue(problem.normalize_image) + problem = imagenet.ImageImagenet224NoNormalization() + self.assertFalse(problem.normalize_image) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/imdb.py b/tensor2tensor/data_generators/imdb.py new file mode 100644 index 000000000..b7c62b4e3 --- /dev/null +++ b/tensor2tensor/data_generators/imdb.py @@ -0,0 +1,106 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""IMDB Sentiment Classification Problem.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tarfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +@registry.register_problem +class SentimentIMDB(text_problems.Text2ClassProblem): + """IMDB sentiment classification.""" + URL = "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz" + + @property + def is_generate_per_split(self): + return True + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 10, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def approx_vocab_size(self): + return 2**13 # 8k vocab suffices for this small dataset. + + @property + def num_classes(self): + return 2 + + def class_labels(self, data_dir): + del data_dir + return ["neg", "pos"] + + def doc_generator(self, imdb_dir, dataset, include_label=False): + dirs = [(os.path.join(imdb_dir, dataset, "pos"), True), (os.path.join( + imdb_dir, dataset, "neg"), False)] + + for d, label in dirs: + for filename in os.listdir(d): + with tf.gfile.Open(os.path.join(d, filename)) as imdb_f: + doc = imdb_f.read().strip() + if include_label: + yield doc, label + else: + yield doc + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Generate examples.""" + # Download and extract + compressed_filename = os.path.basename(self.URL) + download_path = generator_utils.maybe_download(tmp_dir, compressed_filename, + self.URL) + imdb_dir = os.path.join(tmp_dir, "aclImdb") + if not tf.gfile.Exists(imdb_dir): + with tarfile.open(download_path, "r:gz") as tar: + tar.extractall(tmp_dir) + + # Generate examples + train = dataset_split == problem.DatasetSplit.TRAIN + dataset = "train" if train else "test" + for doc, label in self.doc_generator(imdb_dir, dataset, include_label=True): + yield { + "inputs": doc, + "label": int(label), + } + + +@registry.register_problem +class SentimentIMDBCharacters(SentimentIMDB): + """IMDB sentiment classification, character level.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def global_task_id(self): + return problem.TaskID.EN_CHR_SENT diff --git a/tensor2tensor/data_generators/inspect_tfrecord.py b/tensor2tensor/data_generators/inspect_tfrecord.py new file mode 100644 index 000000000..592215bc4 --- /dev/null +++ b/tensor2tensor/data_generators/inspect_tfrecord.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Inspect a TFRecord file of tensorflow.Example and show tokenizations. + +python data_generators/inspect_tfrecord.py \ + --logtostderr \ + --print_targets \ + --subword_text_encoder_filename=$DATA_DIR/vocab.endefr.8192 \ + --input_filename=$DATA_DIR/wmt_ende_tokens_8k-train-00000-of-00100 +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import six + +from tensor2tensor.data_generators import text_encoder + +import tensorflow.compat.v1 as tf + + +tf.flags.DEFINE_string("subword_text_encoder_filename", "", + "SubwordTextEncoder vocabulary file") +tf.flags.DEFINE_string("token_text_encoder_filename", "", + "TokenTextEncoder vocabulary file") +tf.flags.DEFINE_bool("byte_text_encoder", False, "use a ByteTextEncoder") +tf.flags.DEFINE_string("input_filename", "", "input filename") +tf.flags.DEFINE_bool("print_inputs", False, "Print decoded inputs to stdout") +tf.flags.DEFINE_bool("print_targets", False, "Print decoded targets to stdout") +tf.flags.DEFINE_bool("print_all", False, "Print all fields") + +FLAGS = tf.flags.FLAGS + + +def main(_): + """Convert a file to examples.""" + if FLAGS.subword_text_encoder_filename: + encoder = text_encoder.SubwordTextEncoder( + FLAGS.subword_text_encoder_filename) + elif FLAGS.token_text_encoder_filename: + encoder = text_encoder.TokenTextEncoder(FLAGS.token_text_encoder_filename) + elif FLAGS.byte_text_encoder: + encoder = text_encoder.ByteTextEncoder() + else: + encoder = None + reader = tf.python_io.tf_record_iterator(FLAGS.input_filename) + total_sequences = 0 + total_input_tokens = 0 + total_target_tokens = 0 + nonpadding_input_tokens = 0 + nonpadding_target_tokens = 0 + max_input_length = 0 + max_target_length = 0 + for record in reader: + x = tf.train.Example() + x.ParseFromString(record) + inputs = [int(i) for i in x.features.feature["inputs"].int64_list.value] + targets = [int(i) for i in x.features.feature["targets"].int64_list.value] + if FLAGS.print_inputs: + print("INPUTS:\n" + encoder.decode(inputs) if encoder else inputs) + if FLAGS.print_targets: + print("TARGETS:\n" + encoder.decode(targets) if encoder else targets) + nonpadding_input_tokens += len(inputs) - inputs.count(0) + nonpadding_target_tokens += len(targets) - targets.count(0) + total_input_tokens += len(inputs) + total_target_tokens += len(targets) + total_sequences += 1 + max_input_length = max(max_input_length, len(inputs)) + max_target_length = max(max_target_length, len(targets)) + if FLAGS.print_all: + for k, v in six.iteritems(x.features.feature): + print("%s: %s" % (k, v.int64_list.value)) + + print("total_sequences: %d" % total_sequences) + print("total_input_tokens: %d" % total_input_tokens) + print("total_target_tokens: %d" % total_target_tokens) + print("nonpadding_input_tokens: %d" % nonpadding_input_tokens) + print("nonpadding_target_tokens: %d" % nonpadding_target_tokens) + print("max_input_length: %d" % max_input_length) + print("max_target_length: %d" % max_target_length) + + +if __name__ == "__main__": + tf.app.run() diff --git a/tensor2tensor/data_generators/lambada.py b/tensor2tensor/data_generators/lambada.py new file mode 100644 index 000000000..8b00f6a4e --- /dev/null +++ b/tensor2tensor/data_generators/lambada.py @@ -0,0 +1,374 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for LAMBADA data-sets. + + +Lmbada as a language modeling task: + https://arxiv.org/abs/1606.06031 + +Lmbada as a reading comprehension task: + https://arxiv.org/abs/1610.08431 + For lambada as reading comprehension task, one can use the dataset that is + provided here: + http://ttic.uchicago.edu/~kgimpel/data/lambada-train-valid.tar.gz + In this dataset samples for which the target word is not in the context are + removed from the trained data. + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import csv +import os +import tarfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.layers import modalities +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +_UNK = "" + + +_TAR = "lambada-dataset.tar.gz" +_URL = "http://clic.cimec.unitn.it/lambada/" + _TAR +_VOCAB = "lambada-vocab-2.txt" + + +def _prepare_lambada_data(tmp_dir, data_dir, vocab_size, vocab_filename): + """Downloading and preparing the dataset. + + Args: + tmp_dir: tem directory + data_dir: data directory + vocab_size: size of vocabulary + vocab_filename: name of vocab file + + """ + + if not tf.gfile.Exists(data_dir): + tf.gfile.MakeDirs(data_dir) + + file_path = generator_utils.maybe_download(tmp_dir, _TAR, _URL) + tar_all = tarfile.open(file_path) + tar_all.extractall(tmp_dir) + tar_all.close() + tar_train = tarfile.open(os.path.join(tmp_dir, "train-novels.tar")) + tar_train.extractall(tmp_dir) + tar_train.close() + + vocab_path = os.path.join(data_dir, vocab_filename) + if not tf.gfile.Exists(vocab_path): + with tf.gfile.GFile(os.path.join(tmp_dir, _VOCAB), "r") as infile: + reader = csv.reader(infile, delimiter="\t") + words = [row[0] for row in reader] + words = [_UNK] + words[:vocab_size] + with tf.gfile.GFile(vocab_path, "w") as outfile: + outfile.write("\n".join(words)) + + +def get_dataset_split(tmp_dir, split, use_control_set): + """Gives the file paths with regards to the given split. + + Args: + tmp_dir: temp directory + split: dataset split + use_control_set: uses control dataset if true. + + Returns: + list of file paths. + + """ + if not use_control_set: + dataset_split = { + problem.DatasetSplit.TRAIN: [ + f for f in tf.gfile.Glob( + os.path.join(tmp_dir, "train-novels/*/*.txt")) + ], + problem.DatasetSplit.EVAL: [ + os.path.join(tmp_dir, "lambada_development_plain_text.txt") + ], + problem.DatasetSplit.TEST: [ + os.path.join(tmp_dir, "lambada_test_plain_text.txt") + ] + } + + else: + dataset_split = { + problem.DatasetSplit.TRAIN: [ + f for f in tf.gfile.Glob( + os.path.join(tmp_dir, "train-novels/*/*.txt")) + ], + problem.DatasetSplit.EVAL: [ + os.path.join(tmp_dir, "lambada_control_test_data_plain_text.txt") + ], + } + + return dataset_split[split] + + +@registry.register_problem +class LambadaLm(text_problems.Text2SelfProblem): + """Lambada as language modeling task.""" + + @property + def is_generate_per_split(self): + """If true, a single call to generate_samples generates for a single split. + + Returns: + Boolean. + """ + return True + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each. + + Returns: + A dict containing splits information. + """ + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 10, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }, { + "split": problem.DatasetSplit.TEST, + "shards": 1, + }] + + @property + def vocab_type(self): + return text_problems.VocabType.TOKEN + + @property + def vocab_size(self): + # Similar to the setup of the main paper + return 60000 + + @property + def oov_token(self): + return _UNK + + @property + def use_control_set(self): + """If evaluate on control set.""" + return False + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Generates samples. + + Args: + data_dir: data directory + tmp_dir: temp directory + dataset_split: dataset split + + Returns: + sample generator + + """ + _prepare_lambada_data(tmp_dir, data_dir, self.vocab_size, + self.vocab_filename) + files = get_dataset_split(tmp_dir, dataset_split, self.use_control_set) + + def _generate_samples(): + """sample generator. + + Yields: + A dict. + + """ + for filepath in files: + with tf.gfile.GFile(filepath, "r") as f: + for line in f: + line = " ".join(line.split()) + yield {"targets": line} + + return _generate_samples() + + +@registry.register_problem +class LambadaLmControl(LambadaLm): + """Lambada as language modeling task on control dataset.""" + + @property + def control_set(self): + """If test on control set.""" + return False + + +@registry.register_problem +class LambadaRc(text_problems.Text2ClassProblem): + """Lambada as reading comprehension task.""" + + @property + def is_generate_per_split(self): + """If true, a single call to generate_samples generates for a single split. + + Returns: + Boolean. + """ + return True + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each. + + Returns: + A dict containing splits information. + """ + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 10, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }, { + "split": problem.DatasetSplit.TEST, + "shards": 1, + }] + + @property + def vocab_type(self): + return text_problems.VocabType.TOKEN + + @property + def vocab_size(self): + # Similar to the setup of the main paper + return 60000 + + @property + def oov_token(self): + return _UNK + + @property + def use_control_set(self): + """If test on control set.""" + return False + + def get_labels_encoder(self, data_dir): + """Builds encoder for the given class labels. + + Args: + data_dir: data directory + + Returns: + An encoder for class labels. + """ + label_filepath = os.path.join(data_dir, self.vocab_filename) + return text_encoder.TokenTextEncoder( + label_filepath, replace_oov=self.oov_token) + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Generates samples. + + Args: + data_dir: data directory + tmp_dir: temp directory + dataset_split: dataset split + + Returns: + sample generator + + """ + _prepare_lambada_data(tmp_dir, data_dir, self.vocab_size, + self.vocab_filename) + files = get_dataset_split(tmp_dir, dataset_split, self.use_control_set) + + def _generate_samples(): + """sample generator. + + Yields: + A dict. + + """ + for filepath in files: + with tf.gfile.GFile(filepath, "r") as f: + for line in f: + input_target = line.split() + yield { + "inputs": " ".join(input_target[:-1]), + "label": input_target[-1] + } + + return _generate_samples() + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + """A generator that generates samples that are encoded. + + Args: + data_dir: data directory + tmp_dir: temp directory + dataset_split: dataset split + + Yields: + A dict. + + """ + generator = self.generate_samples(data_dir, tmp_dir, dataset_split) + txt_encoder = self.get_or_create_vocab(data_dir, tmp_dir) + label_encoder = self.get_labels_encoder(data_dir) + for sample in generator: + inputs = txt_encoder.encode(sample["inputs"]) + inputs.append(text_encoder.EOS_ID) + targets = label_encoder.encode(sample["label"]) + yield {"inputs": inputs, "targets": targets} + + def feature_encoders(self, data_dir): + """Return a dict for encoding and decoding inference input/output. + + Args: + data_dir: data directory + + Returns: + A dict of . + + """ + txt_encoder = self.get_or_create_vocab(data_dir, None, force_get=True) + label_encoder = self.get_labels_encoder(data_dir) + return {"inputs": txt_encoder, "targets": label_encoder} + + def hparams(self, defaults, unused_model_hparams): + """Returns problem_hparams. + + Args: + defaults: default hyperparameters + unused_model_hparams: model hyperparameters + + """ + + p = defaults + p.modality = {"inputs": modalities.ModalityType.SYMBOL, + "targets": modalities.ModalityType.CLASS_LABEL} + p.vocab_size = {"inputs": self._encoders["inputs"].vocab_size, + "targets": self._encoders["targets"].vocab_size} + + +@registry.register_problem +class LambadaRcControl(LambadaRc): + """Lambada as reading comprehension task on control dataset.""" + + @property + def control_set(self): + """If test on control set.""" + return True diff --git a/tensor2tensor/data_generators/librispeech.py b/tensor2tensor/data_generators/librispeech.py new file mode 100644 index 000000000..90d073222 --- /dev/null +++ b/tensor2tensor/data_generators/librispeech.py @@ -0,0 +1,328 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Librispeech dataset.""" + +import os +import tarfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import speech_recognition +from tensor2tensor.utils import registry + +from tensorflow.compat.v1 import estimator as tf_estimator + +_LIBRISPEECH_TRAIN_DATASETS = [ + [ + "http://www.openslr.org/resources/12/train-clean-100.tar.gz", # pylint: disable=line-too-long + "train-clean-100" + ], + [ + "http://www.openslr.org/resources/12/train-clean-360.tar.gz", + "train-clean-360" + ], + [ + "http://www.openslr.org/resources/12/train-other-500.tar.gz", + "train-other-500" + ], +] +_LIBRISPEECH_DEV_DATASETS = [ + [ + "http://www.openslr.org/resources/12/dev-clean.tar.gz", + "dev-clean" + ], + [ + "http://www.openslr.org/resources/12/dev-other.tar.gz", + "dev-other" + ], +] +_LIBRISPEECH_TEST_DATASETS = [ + [ + "http://www.openslr.org/resources/12/test-clean.tar.gz", + "test-clean" + ], + [ + "http://www.openslr.org/resources/12/test-other.tar.gz", + "test-other" + ], +] + + +def _collect_data(directory, input_ext, transcription_ext): + """Traverses directory collecting input and target files.""" + # Directory from string to tuple pair of strings + # key: the filepath to a datafile including the datafile's basename. Example, + # if the datafile was "/path/to/datafile.wav" then the key would be + # "/path/to/datafile" + # value: a pair of strings (media_filepath, label) + data_files = {} + for root, _, filenames in os.walk(directory): + transcripts = [filename for filename in filenames + if transcription_ext in filename] + for transcript in transcripts: + transcript_path = os.path.join(root, transcript) + with open(transcript_path, "r") as transcript_file: + for transcript_line in transcript_file: + line_contents = transcript_line.strip().split(" ", 1) + media_base, label = line_contents + key = os.path.join(root, media_base) + assert key not in data_files + media_name = "%s.%s"%(media_base, input_ext) + media_path = os.path.join(root, media_name) + data_files[key] = (media_base, media_path, label) + return data_files + + +@registry.register_problem() +class Librispeech(speech_recognition.SpeechRecognitionProblem): + """Problem spec for Librispeech using clean and noisy data.""" + + # Select only the clean data + TRAIN_DATASETS = _LIBRISPEECH_TRAIN_DATASETS + DEV_DATASETS = _LIBRISPEECH_DEV_DATASETS + TEST_DATASETS = _LIBRISPEECH_TEST_DATASETS + + @property + def num_shards(self): + return 100 + + @property + def use_subword_tokenizer(self): + return False + + @property + def num_dev_shards(self): + return 1 + + @property + def num_test_shards(self): + return 1 + + @property + def use_train_shards_for_dev(self): + """If true, we only generate training data and hold out shards for dev.""" + return False + + def generator(self, data_dir, tmp_dir, datasets, + eos_list=None, start_from=0, how_many=0): + del eos_list + i = 0 + for url, subdir in datasets: + filename = os.path.basename(url) + compressed_file = generator_utils.maybe_download(tmp_dir, filename, url) + + read_type = "r:gz" if filename.endswith("tgz") else "r" + with tarfile.open(compressed_file, read_type) as corpus_tar: + # Create a subset of files that don't already exist. + # tarfile.extractall errors when encountering an existing file + # and tarfile.extract is extremely slow + members = [] + for f in corpus_tar: + if not os.path.isfile(os.path.join(tmp_dir, f.name)): + members.append(f) + corpus_tar.extractall(tmp_dir, members=members) + + raw_data_dir = os.path.join(tmp_dir, "LibriSpeech", subdir) + data_files = _collect_data(raw_data_dir, "flac", "txt") + data_pairs = data_files.values() + + encoders = self.feature_encoders(data_dir) + audio_encoder = encoders["waveforms"] + text_encoder = encoders["targets"] + + for utt_id, media_file, text_data in sorted(data_pairs)[start_from:]: + if how_many > 0 and i == how_many: + return + i += 1 + wav_data = audio_encoder.encode(media_file) + spk_id, unused_book_id, _ = utt_id.split("-") + yield { + "waveforms": wav_data, + "waveform_lens": [len(wav_data)], + "targets": text_encoder.encode(text_data), + "raw_transcript": [text_data], + "utt_id": [utt_id], + "spk_id": [spk_id], + } + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + train_paths = self.training_filepaths( + data_dir, self.num_shards, shuffled=False) + dev_paths = self.dev_filepaths( + data_dir, self.num_dev_shards, shuffled=False) + test_paths = self.test_filepaths( + data_dir, self.num_test_shards, shuffled=True) + + generator_utils.generate_files( + self.generator(data_dir, tmp_dir, self.TEST_DATASETS), test_paths) + + if self.use_train_shards_for_dev: + all_paths = train_paths + dev_paths + generator_utils.generate_files( + self.generator(data_dir, tmp_dir, self.TRAIN_DATASETS), all_paths) + generator_utils.shuffle_dataset(all_paths) + else: + generator_utils.generate_dataset_and_shuffle( + self.generator(data_dir, tmp_dir, self.TRAIN_DATASETS), train_paths, + self.generator(data_dir, tmp_dir, self.DEV_DATASETS), dev_paths) + + +@registry.register_problem() +class LibrispeechTrainFullTestClean(Librispeech): + """Problem to train on full 960h, but evaluate on clean data only.""" + + def training_filepaths(self, data_dir, num_shards, shuffled): + return Librispeech.training_filepaths(self, data_dir, num_shards, shuffled) + + def dev_filepaths(self, data_dir, num_shards, shuffled): + return LibrispeechClean.dev_filepaths(self, data_dir, num_shards, shuffled) + + def test_filepaths(self, data_dir, num_shards, shuffled): + return LibrispeechClean.test_filepaths(self, data_dir, num_shards, shuffled) + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + raise Exception("Generate librispeech and librispeech_clean data.") + + def filepattern(self, data_dir, mode, shard=None): + """Get filepattern for data files for mode. + + Matches mode to a suffix. + * DatasetSplit.TRAIN: train + * DatasetSplit.EVAL: dev + * DatasetSplit.TEST: test + * tf.estimator.ModeKeys.PREDICT: dev + + Args: + data_dir: str, data directory. + mode: DatasetSplit + shard: int, if provided, will only read data from the specified shard. + + Returns: + filepattern str + """ + shard_str = "-%05d" % shard if shard is not None else "" + if mode == problem.DatasetSplit.TRAIN: + path = os.path.join(data_dir, "librispeech") + suffix = "train" + elif mode in [problem.DatasetSplit.EVAL, tf_estimator.ModeKeys.PREDICT]: + path = os.path.join(data_dir, "librispeech_clean") + suffix = "dev" + else: + assert mode == problem.DatasetSplit.TEST + path = os.path.join(data_dir, "librispeech_clean") + suffix = "test" + + return "%s-%s%s*" % (path, suffix, shard_str) + + +@registry.register_problem() +class LibrispeechTrainFullTestOther(Librispeech): + """Problem to train on full 960h, but evaluate on clean data only.""" + + def training_filepaths(self, data_dir, num_shards, shuffled): + return Librispeech.training_filepaths(self, data_dir, num_shards, shuffled) + + def dev_filepaths(self, data_dir, num_shards, shuffled): + return LibrispeechNoisy.dev_filepaths(self, data_dir, num_shards, shuffled) + + def test_filepaths(self, data_dir, num_shards, shuffled): + return LibrispeechNoisy.test_filepaths(self, data_dir, num_shards, shuffled) + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + raise Exception("Generate librispeech and librispeech_noisy data.") + + def filepattern(self, data_dir, mode, shard=None): + """Get filepattern for data files for mode. + + Matches mode to a suffix. + * DatasetSplit.TRAIN: train + * DatasetSplit.EVAL: dev + * DatasetSplit.TEST: test + * tf.estimator.ModeKeys.PREDICT: dev + + Args: + data_dir: str, data directory. + mode: DatasetSplit + shard: int, if provided, will only read data from the specified shard. + + Returns: + filepattern str + """ + shard_str = "-%05d" % shard if shard is not None else "" + if mode == problem.DatasetSplit.TRAIN: + path = os.path.join(data_dir, "librispeech") + suffix = "train" + elif mode in [problem.DatasetSplit.EVAL, tf_estimator.ModeKeys.PREDICT]: + path = os.path.join(data_dir, "librispeech_noisy") + suffix = "dev" + else: + assert mode == problem.DatasetSplit.TEST + path = os.path.join(data_dir, "librispeech_noisy") + suffix = "test" + + return "%s-%s%s*" % (path, suffix, shard_str) + + +@registry.register_problem() +class LibrispeechCleanSmall(Librispeech): + """Problem spec for Librispeech using 100h clean train and clean eval data.""" + + # Select only the clean data + TRAIN_DATASETS = _LIBRISPEECH_TRAIN_DATASETS[:1] + DEV_DATASETS = _LIBRISPEECH_DEV_DATASETS[:1] + TEST_DATASETS = _LIBRISPEECH_TEST_DATASETS[:1] + + +@registry.register_problem() +class LibrispeechClean(Librispeech): + """Problem spec for Librispeech using 460h clean train and clean eval data.""" + + # Select only the clean data + TRAIN_DATASETS = _LIBRISPEECH_TRAIN_DATASETS[:2] + DEV_DATASETS = _LIBRISPEECH_DEV_DATASETS[:1] + TEST_DATASETS = _LIBRISPEECH_TEST_DATASETS[:1] + + +@registry.register_problem() +class LibrispeechNoisy(Librispeech): + """Problem spec for Librispeech using 500h noisy train and noisy eval data.""" + + # Select only the noisy data + TRAIN_DATASETS = _LIBRISPEECH_TRAIN_DATASETS[2:] + DEV_DATASETS = _LIBRISPEECH_DEV_DATASETS[1:] + TEST_DATASETS = _LIBRISPEECH_TEST_DATASETS[1:] + + +# TODO(lukaszkaiser): clean up hparams or remove from here. +def add_librispeech_hparams(hparams): + """Adding to base hparams the attributes for for librispeech.""" + hparams.batch_size = 36 + hparams.audio_compression = 8 + hparams.hidden_size = 2048 + hparams.max_input_seq_length = 600000 + hparams.max_target_seq_length = 350 + hparams.max_length = hparams.max_input_seq_length + hparams.min_length_bucket = hparams.max_input_seq_length // 2 + hparams.learning_rate = 0.05 + hparams.train_steps = 5000000 + hparams.num_hidden_layers = 4 + return hparams + + +def set_librispeech_length_hparams(hparams): + hparams.max_length = 1650 * 80 # this limits inputs[1] * inputs[2] + hparams.max_input_seq_length = 1650 + hparams.max_target_seq_length = 350 + return hparams diff --git a/tensor2tensor/data_generators/lm1b.py b/tensor2tensor/data_generators/lm1b.py new file mode 100644 index 000000000..8f209ed47 --- /dev/null +++ b/tensor2tensor/data_generators/lm1b.py @@ -0,0 +1,205 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for LM1B data-set.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tarfile +from six.moves import range # pylint: disable=redefined-builtin + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +def _original_vocab(tmp_dir): + """Returns a set containing the original vocabulary. + + This is important for comparing with published results. + + Args: + tmp_dir: directory containing dataset. + + Returns: + a set of strings + """ + vocab_url = ("http://download.tensorflow.org/models/LM_LSTM_CNN/" + "vocab-2016-09-10.txt") + vocab_filename = os.path.basename(vocab_url + ".en") + vocab_filepath = os.path.join(tmp_dir, vocab_filename) + if not os.path.exists(vocab_filepath): + generator_utils.maybe_download(tmp_dir, vocab_filename, vocab_url) + return set([ + text_encoder.native_to_unicode(l.strip()) + for l in tf.gfile.Open(vocab_filepath) + ]) + + +def _replace_oov(original_vocab, line): + """Replace out-of-vocab words with "UNK". + + This maintains compatibility with published results. + + Args: + original_vocab: a set of strings (The standard vocabulary for the dataset) + line: a unicode string - a space-delimited sequence of words. + + Returns: + a unicode string - a space-delimited sequence of words. + """ + return u" ".join( + [word if word in original_vocab else u"UNK" for word in line.split()]) + + +def _train_data_filenames(tmp_dir): + return [ + os.path.join(tmp_dir, + "1-billion-word-language-modeling-benchmark-r13output", + "training-monolingual.tokenized.shuffled", + "news.en-%05d-of-00100" % i) for i in range(1, 100) + ] + + +def _dev_data_filenames(tmp_dir): + return [os.path.join(tmp_dir, + "1-billion-word-language-modeling-benchmark-r13output", + "heldout-monolingual.tokenized.shuffled", + "news.en.heldout-00000-of-00050")] + + +def _maybe_download_corpus(tmp_dir): + """Download and unpack the corpus. + + Args: + tmp_dir: directory containing dataset. + """ + corpus_url = ("http://www.statmt.org/lm-benchmark/" + "1-billion-word-language-modeling-benchmark-r13output.tar.gz") + corpus_filename = os.path.basename(corpus_url) + corpus_filepath = os.path.join(tmp_dir, corpus_filename) + if not os.path.exists(corpus_filepath): + generator_utils.maybe_download(tmp_dir, corpus_filename, corpus_url) + with tarfile.open(corpus_filepath, "r:gz") as corpus_tar: + corpus_tar.extractall(tmp_dir) + + +@registry.register_problem +class LanguagemodelLm1b32k(text_problems.Text2SelfProblem): + """A language model on the 1B words corpus. + + Ratio of dev tokens (including eos) to dev words (including eos) + 176923 / 159658 = 1.108137; multiply log_ppl by this to compare results. + """ + + @property + def approx_vocab_size(self): + return 2**15 # 32768 + + @property + def max_samples_for_vocab(self): + return 63000 + + def is_generate_per_split(self): + return True + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + del data_dir + split_files = { + problem.DatasetSplit.TRAIN: _train_data_filenames(tmp_dir), + problem.DatasetSplit.EVAL: _dev_data_filenames(tmp_dir), + } + _maybe_download_corpus(tmp_dir) + original_vocab = _original_vocab(tmp_dir) + files = split_files[dataset_split] + for filepath in files: + tf.logging.info("filepath = %s", filepath) + for line in tf.gfile.Open(filepath): + txt = _replace_oov(original_vocab, text_encoder.native_to_unicode(line)) + yield {"targets": txt} + + +@registry.register_problem +class LanguagemodelLm1b8k(LanguagemodelLm1b32k): + + @property + def approx_vocab_size(self): + return 2**13 # 8192 + + +@registry.register_problem +class LanguagemodelLm1b32kPacked(LanguagemodelLm1b32k): + """Packed version for TPU training.""" + + @property + def packed_length(self): + return 256 + + @property + def vocab_filename(self): + return LanguagemodelLm1b32k().vocab_filename + + +@registry.register_problem +class LanguagemodelLm1b8kPacked(LanguagemodelLm1b8k): + """Packed version, 8k vocabulary. + + Ratio of dev tokens (including eos) to dev words (including eos) + 207351 / 159658 = 1.29872; multiply log-ppl by this to compare results. + """ + + @property + def packed_length(self): + return 256 + + @property + def vocab_filename(self): + return LanguagemodelLm1b8k().vocab_filename + + +@registry.register_problem +class LanguagemodelLm1bCharacters(LanguagemodelLm1b32k): + """A language model on the 1B words corpus, character level. + + Ratio of dev chars (including eos) to dev words (including eos) + 826189 / 159658 = 5.174742; multiply log-ppl by this to compare results. + """ + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def global_task_id(self): + return problem.TaskID.EN_CHR + + +@registry.register_problem +class LanguagemodelLm1bCharactersPacked(LanguagemodelLm1bCharacters): + """Packed version. + + Ratio of dev chars (including eos) to dev words (including eos) + 826189 / 159658 = 5.174742; multiply log-ppl by this to compare results. + """ + + @property + def packed_length(self): + return 1024 diff --git a/tensor2tensor/data_generators/lm1b_imdb.py b/tensor2tensor/data_generators/lm1b_imdb.py new file mode 100644 index 000000000..e4789e822 --- /dev/null +++ b/tensor2tensor/data_generators/lm1b_imdb.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for LM1B and IMDb combined data-set.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.data_generators import imdb +from tensor2tensor.data_generators import lm1b +from tensor2tensor.data_generators import multi_problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + + +@registry.register_problem +class LanguagemodelLm1bSentimentIMDB(multi_problem.MultiProblem): + """LM1b and IMDb mixed problem class for multitask learning.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelLm1bSentimentIMDB, self).__init__(was_reversed, was_copy) + self.task_list.append(lm1b.LanguagemodelLm1bCharacters()) + self.task_list.append(imdb.SentimentIMDBCharacters()) + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER diff --git a/tensor2tensor/data_generators/lm1b_mnli.py b/tensor2tensor/data_generators/lm1b_mnli.py new file mode 100644 index 000000000..6e7b385f7 --- /dev/null +++ b/tensor2tensor/data_generators/lm1b_mnli.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for LM1B and MNLI combined datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.data_generators import lm1b +from tensor2tensor.data_generators import multi_problem +from tensor2tensor.data_generators import multinli +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + + +@registry.register_problem +class LanguagemodelLm1bMultiNLISubwords(multi_problem.MultiProblem): + """LM1b and MNLI mixed problem class for multitask learning.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelLm1bMultiNLISubwords, self).__init__( + was_reversed, was_copy) + self.task_list.append(lm1b.LanguagemodelLm1b32k()) + self.task_list.append(multinli.MultiNLISharedVocab()) + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + + +@registry.register_problem +class LanguagemodelLm1bMultiNLI(multi_problem.MultiProblem): + """LM1b and MNLI mixed problem class for multitask learning.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelLm1bMultiNLI, self).__init__(was_reversed, was_copy) + self.task_list.append(lm1b.LanguagemodelLm1bCharacters()) + self.task_list.append(multinli.MultiNLICharacters()) + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER diff --git a/tensor2tensor/data_generators/lm_example.py b/tensor2tensor/data_generators/lm_example.py deleted file mode 100644 index 9cf930afc..000000000 --- a/tensor2tensor/data_generators/lm_example.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -r"""Convert language modeling data to tf.Example format. - -Uses SubwordTextEncoder. - -For each line, we generate a tf.Example, with "targets" equal to a sequence -of subtokens (integers), ending in subtoken id 1 for end-of-sequence. We add -a dummy feature "inputs"=[0] for compatability with seq-to-seq models. - -If FLAGS.combine_to_length is nonzero, then we combine multiple sequences into -examples of a constant length, possibly with some padding at the end. - -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -from tensor2tensor.data_generators import generator_utils -from tensor2tensor.data_generators import text_encoder - -import tensorflow as tf - -tf.app.flags.DEFINE_string( - "vocab_file", "", "SubwordTextEncoder vocabulary file") - -tf.app.flags.DEFINE_integer( - "combine_to_length", 0, - "If positive, concatenate documents to form examples with length exactly" - " equal to this value. Documents are still suffixed with subtoken id=1. " - " Examples are padded with subtoken id=0.") - -tf.app.flags.DEFINE_string("in_filepattern", "", "input filename") - -tf.app.flags.DEFINE_string( - "out_prefix", "", "The output filename is equal to out_prefix plus " - "the last 15 characters of in_file. (e.g. -00001-of-00100)") - -FLAGS = tf.app.flags.FLAGS - - -def _make_example(ids, raw_num_bytes): - if FLAGS.combine_to_length > 0: - ids += [0] * (FLAGS.combine_to_length - len(ids)) - return generator_utils.to_example({ - "targets": ids, - "inputs": [0], - "raw_num_bytes": [raw_num_bytes] - }).SerializeToString() - - -def convert_file(in_file, encoder): - """Convert a file to examples.""" - total_bytes = 0 - total_subtokens = 0 - total_documents = 0 - dropped_documents = 0 - - combined_subtokens = [] - combined_num_bytes = 0 - - out_file = FLAGS.out_prefix + in_file[-15:] - writer = tf.python_io.TFRecordWriter(out_file) - out_file = FLAGS.out_prefix + in_file[-15:] - print ("in_file", in_file, "out_file", out_file) - for line in tf.gfile.Open(in_file): - total_documents += 1 - assert line[-1] == "\n" - num_bytes = len(line) - total_bytes += num_bytes - line = line[:-1] - subtokens = encoder.encode(line) + [1] - total_subtokens += len(subtokens) - if FLAGS.combine_to_length: - if len(combined_subtokens) + len(subtokens) > FLAGS.combine_to_length: - writer.write(_make_example(combined_subtokens, combined_num_bytes)) - combined_subtokens = [] - combined_num_bytes = 0 - if len(subtokens) <= FLAGS.combine_to_length: - combined_subtokens.extend(subtokens) - combined_num_bytes += num_bytes - else: - dropped_documents += 1 - else: - writer.write(_make_example(subtokens, num_bytes)) - if combined_subtokens: - writer.write(_make_example(combined_subtokens, combined_num_bytes)) - writer.close() - - tf.logging.info("total bytes: %d", total_bytes) - tf.logging.info("total subtokens: %d", total_subtokens) - tf.logging.info("bytes per subtoken: %f", total_bytes / total_subtokens) - tf.logging.info("total documents: %d", total_documents) - tf.logging.info("dropped documents: %d", dropped_documents) - - -def main(_): - """Convert a file to examples.""" - encoder = text_encoder.SubwordTextEncoder(FLAGS.vocab_file) - - in_files = tf.gfile.Glob(FLAGS.in_filepattern) - assert in_files, "No matching input files" - for in_file in in_files: - convert_file(in_file, encoder) - - -if __name__ == "__main__": - tf.app.run() diff --git a/tensor2tensor/data_generators/mnist.py b/tensor2tensor/data_generators/mnist.py new file mode 100644 index 000000000..ba5f85b4b --- /dev/null +++ b/tensor2tensor/data_generators/mnist.py @@ -0,0 +1,258 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MNIST.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import gzip +import os +import random +import numpy as np + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import image_utils +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +# URLs and filenames for MNIST data. +_MNIST_URL = "http://yann.lecun.com/exdb/mnist/" +_MNIST_TRAIN_DATA_FILENAME = "train-images-idx3-ubyte.gz" +_MNIST_TRAIN_LABELS_FILENAME = "train-labels-idx1-ubyte.gz" +_MNIST_TEST_DATA_FILENAME = "t10k-images-idx3-ubyte.gz" +_MNIST_TEST_LABELS_FILENAME = "t10k-labels-idx1-ubyte.gz" +_MNIST_IMAGE_SIZE = 28 + + +def _get_mnist(directory): + """Download all MNIST files to directory unless they are there.""" + for filename in [ + _MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME, + _MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME + ]: + generator_utils.maybe_download(directory, filename, _MNIST_URL + filename) + + +def _extract_mnist_images(filename, num_images): + """Extract images from an MNIST file into a numpy array. + + Args: + filename: The path to an MNIST images file. + num_images: The number of images in the file. + + Returns: + A numpy array of shape [number_of_images, height, width, channels]. + """ + with gzip.open(filename) as bytestream: + bytestream.read(16) + buf = bytestream.read(_MNIST_IMAGE_SIZE * _MNIST_IMAGE_SIZE * num_images) + data = np.frombuffer(buf, dtype=np.uint8) + data = data.reshape(num_images, _MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1) + return data + + +def _extract_mnist_labels(filename, num_labels): + """Extract labels from an MNIST file into integers. + + Args: + filename: The path to an MNIST labels file. + num_labels: The number of labels in the file. + + Returns: + A int64 numpy array of shape [num_labels] + """ + with gzip.open(filename) as bytestream: + bytestream.read(8) + buf = bytestream.read(num_labels) + labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) + return labels + + +def mnist_common_generator(tmp_dir, + training, + how_many, + data_filename, + label_filename, + start_from=0): + """Image generator for MNIST. + + Args: + tmp_dir: path to temporary storage directory. + training: a Boolean; if true, we use the train set, otherwise the test set. + how_many: how many images and labels to generate. + data_filename: file that contains features data. + label_filename: file that contains labels. + start_from: from which image to start. + + Returns: + An instance of image_generator that produces MNIST images. + """ + data_path = os.path.join(tmp_dir, data_filename) + labels_path = os.path.join(tmp_dir, label_filename) + images = _extract_mnist_images(data_path, 60000 if training else 10000) + labels = _extract_mnist_labels(labels_path, 60000 if training else 10000) + # Shuffle the data to make sure classes are well distributed. + data = list(zip(images, labels)) + random.shuffle(data) + images, labels = list(zip(*data)) + return image_utils.image_generator(images[start_from:start_from + how_many], + labels[start_from:start_from + how_many]) + + +def mnist_generator(tmp_dir, training, how_many, start_from=0): + """Image generator for MNIST. + + Args: + tmp_dir: path to temporary storage directory. + training: a Boolean; if true, we use the train set, otherwise the test set. + how_many: how many images and labels to generate. + start_from: from which image to start. + + Returns: + An instance of image_generator that produces MNIST images. + """ + _get_mnist(tmp_dir) + d = _MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME + l = _MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME + return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from) + + +@registry.register_problem +class ImageMnistTune(image_utils.Image2ClassProblem): + """MNIST, tuning data.""" + + @property + def num_channels(self): + return 1 + + @property + def is_small(self): + return True + + @property + def num_classes(self): + return 10 + + @property + def class_labels(self): + return [str(c) for c in range(self.num_classes)] + + @property + def train_shards(self): + return 10 + + def preprocess_example(self, example, mode, unused_hparams): + image = example["inputs"] + image.set_shape([_MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1]) + if not self._was_reversed: + image = tf.image.per_image_standardization(image) + example["inputs"] = image + return example + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return mnist_generator(tmp_dir, True, 55000) + else: + return mnist_generator(tmp_dir, True, 5000, 55000) + + +@registry.register_problem +class ImageMnist(ImageMnistTune): + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return mnist_generator(tmp_dir, True, 60000) + else: + return mnist_generator(tmp_dir, False, 10000) + + +# URLs and filenames for MNIST data. +_FASHION_MNIST_URL = ("http://fashion-mnist.s3-website.eu-central-1/" + ".amazonaws.com/") +_FASHION_MNIST_LOCAL_FILE_PREFIX = "fashion-" +_FASHION_MNIST_IMAGE_SIZE = 28 + + +def _get_fashion_mnist(directory): + """Download all FashionMNIST files to directory unless they are there.""" + # Fashion mnist files have the same names as MNIST. + # We must choose a separate name (by adding 'fashion-' prefix) in the tmp_dir. + for filename in [ + _MNIST_TRAIN_DATA_FILENAME, _MNIST_TRAIN_LABELS_FILENAME, + _MNIST_TEST_DATA_FILENAME, _MNIST_TEST_LABELS_FILENAME + ]: + generator_utils.maybe_download(directory, + _FASHION_MNIST_LOCAL_FILE_PREFIX + filename, + _FASHION_MNIST_URL + filename) + + +def fashion_mnist_generator(tmp_dir, training, how_many, start_from=0): + """Image generator for FashionMNIST. + + Args: + tmp_dir: path to temporary storage directory. + training: a Boolean; if true, we use the train set, otherwise the test set. + how_many: how many images and labels to generate. + start_from: from which image to start. + + Returns: + An instance of image_generator that produces MNIST images. + """ + _get_fashion_mnist(tmp_dir) + d = _FASHION_MNIST_LOCAL_FILE_PREFIX + ( + _MNIST_TRAIN_DATA_FILENAME if training else _MNIST_TEST_DATA_FILENAME) + l = _FASHION_MNIST_LOCAL_FILE_PREFIX + ( + _MNIST_TRAIN_LABELS_FILENAME if training else _MNIST_TEST_LABELS_FILENAME) + return mnist_common_generator(tmp_dir, training, how_many, d, l, start_from) + + +@registry.register_problem +class ImageFashionMnist(image_utils.Image2ClassProblem): + """Fashion MNIST.""" + + @property + def is_small(self): + return True + + @property + def num_channels(self): + return 1 + + @property + def num_classes(self): + return 10 + + @property + def class_labels(self): + return [str(c) for c in range(self.num_classes)] + + @property + def train_shards(self): + return 10 + + def preprocess_example(self, example, mode, unused_hparams): + image = example["inputs"] + image.set_shape([_MNIST_IMAGE_SIZE, _MNIST_IMAGE_SIZE, 1]) + example["inputs"] = image + return example + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return fashion_mnist_generator(tmp_dir, True, 60000) + else: + return fashion_mnist_generator(tmp_dir, False, 10000) diff --git a/tensor2tensor/data_generators/moving_mnist.py b/tensor2tensor/data_generators/moving_mnist.py new file mode 100644 index 000000000..445b45b8e --- /dev/null +++ b/tensor2tensor/data_generators/moving_mnist.py @@ -0,0 +1,153 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Moving MNIST dataset. + +Unsupervised Learning of Video Representations using LSTMs +Nitish Srivastava, Elman Mansimov, Ruslan Salakhutdinov +https://arxiv.org/abs/1502.04681 + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import numpy as np + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import video_utils +from tensor2tensor.layers import modalities +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf +import tensorflow_datasets as tfds +from tensorflow_datasets.video import moving_sequence + + +DATA_URL = ( + "http://www.cs.toronto.edu/~nitish/unsupervised_video/mnist_test_seq.npy") +SPLIT_TO_SIZE = { + problem.DatasetSplit.TRAIN: 100000, + problem.DatasetSplit.EVAL: 10000, + problem.DatasetSplit.TEST: 10000} + + +@registry.register_problem +class VideoMovingMnist(video_utils.VideoProblem): + """MovingMnist Dataset.""" + + @property + def num_channels(self): + return 1 + + @property + def frame_height(self): + return 64 + + @property + def frame_width(self): + return 64 + + @property + def is_generate_per_split(self): + return True + + # num_videos * num_frames + @property + def total_number_of_frames(self): + return 100000 * 20 + + def max_frames_per_video(self, hparams): + return 20 + + @property + def random_skip(self): + return False + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each.""" + return [ + {"split": problem.DatasetSplit.TRAIN, "shards": 10}, + {"split": problem.DatasetSplit.EVAL, "shards": 1}, + {"split": problem.DatasetSplit.TEST, "shards": 1}] + + @property + def extra_reading_spec(self): + """Additional data fields to store on disk and their decoders.""" + data_fields = { + "frame_number": tf.FixedLenFeature([1], tf.int64), + } + decoders = { + "frame_number": + contrib.slim().tfexample_decoder.Tensor(tensor_key="frame_number"), + } + return data_fields, decoders + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = {"inputs": modalities.ModalityType.VIDEO, + "targets": modalities.ModalityType.VIDEO} + p.vocab_size = {"inputs": 256, + "targets": 256} + + def get_test_iterator(self, tmp_dir): + path = generator_utils.maybe_download( + tmp_dir, os.path.basename(DATA_URL), DATA_URL) + with tf.io.gfile.GFile(path, "rb") as fp: + mnist_test = np.load(fp) + mnist_test = np.transpose(mnist_test, (1, 0, 2, 3)) + mnist_test = np.expand_dims(mnist_test, axis=-1) + mnist_test = tf.data.Dataset.from_tensor_slices(mnist_test) + return mnist_test.make_initializable_iterator() + + def map_fn(self, image, label): + sequence = moving_sequence.image_as_moving_sequence( + image, sequence_length=20) + return sequence.image_sequence + + def get_train_iterator(self): + mnist_ds = tfds.load("mnist:3.*.*", split=tfds.Split.TRAIN, + as_supervised=True) + mnist_ds = mnist_ds.repeat() + moving_mnist_ds = mnist_ds.map(self.map_fn).batch(2) + moving_mnist_ds = moving_mnist_ds.map(lambda x: tf.reduce_max(x, axis=0)) + return moving_mnist_ds.make_initializable_iterator() + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + with tf.Graph().as_default(): + # train and eval set are generated on-the-fly. + # test set is the official test-set. + if dataset_split == problem.DatasetSplit.TEST: + moving_ds = self.get_test_iterator(tmp_dir) + else: + moving_ds = self.get_train_iterator() + + next_video = moving_ds.get_next() + with tf.Session() as sess: + sess.run(moving_ds.initializer) + + n_samples = SPLIT_TO_SIZE[dataset_split] + for _ in range(n_samples): + next_video_np = sess.run(next_video) + for frame_number, frame in enumerate(next_video_np): + yield { + "frame_number": [frame_number], + "frame": frame, + } diff --git a/tensor2tensor/data_generators/mrpc.py b/tensor2tensor/data_generators/mrpc.py new file mode 100644 index 000000000..bdf4a3b5c --- /dev/null +++ b/tensor2tensor/data_generators/mrpc.py @@ -0,0 +1,136 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for the MSR Paraphrase Corpus.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + +EOS = text_encoder.EOS + + +@registry.register_problem +class MSRParaphraseCorpus(text_problems.TextConcat2ClassProblem): + """MSR Paraphrase Identification problems.""" + + # Link to data from GLUE: https://gluebenchmark.com/tasks + DEV_IDS = ("https://firebasestorage.googleapis.com/v0/b/" + "mtl-sentence-representations.appspot.com/o/" + "data%2Fmrpc_dev_ids.tsv?alt=media&token=ec5c0836-31d5-" + "48f4-b431-7480817f1adc") + MRPC_TRAIN = ("https://s3.amazonaws.com/senteval/senteval_data/" + "msr_paraphrase_train.txt") + MRPC_TEST = ("https://s3.amazonaws.com/senteval/senteval_data/" + "msr_paraphrase_test.txt") + DATA_DIR = "MRPC" + + @property + def is_generate_per_split(self): + return True + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 10, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }, { + "split": problem.DatasetSplit.TEST, + "shards": 1, + }] + + @property + def approx_vocab_size(self): + return 2**13 # 8k vocab suffices for this small dataset. + + @property + def num_classes(self): + return 2 + + def class_labels(self, data_dir): + del data_dir + return ["not_paraphrase", "paraphrase"] + + def _maybe_download_corpora(self, tmp_dir): + mrpc_dir = os.path.join(tmp_dir, self.DATA_DIR) + tf.gfile.MakeDirs(mrpc_dir) + mrpc_train_finalpath = os.path.join(mrpc_dir, "msr_paraphrase_train.txt") + mrpc_test_finalpath = os.path.join(mrpc_dir, "msr_paraphrase_test.txt") + mrpc_dev_ids_finalpath = os.path.join(mrpc_dir, "dev_ids.tsv") + + def download_file(tdir, filepath, url): + if not tf.gfile.Exists(filepath): + generator_utils.maybe_download(tdir, filepath, url) + + download_file(mrpc_dir, mrpc_train_finalpath, self.MRPC_TRAIN) + download_file(mrpc_dir, mrpc_test_finalpath, self.MRPC_TEST) + download_file(mrpc_dir, mrpc_dev_ids_finalpath, self.DEV_IDS) + + return mrpc_dir + + def example_generator(self, filename, dev_ids, dataset_split): + for idx, line in enumerate(tf.gfile.Open(filename, "rb")): + if idx == 0: continue # skip header + line = text_encoder.to_unicode_utf8(line.strip()) + l, id1, id2, s1, s2 = line.split("\t") + is_dev = [id1, id2] in dev_ids + if dataset_split == problem.DatasetSplit.TRAIN and is_dev: + continue + if dataset_split == problem.DatasetSplit.EVAL and not is_dev: + continue + inputs = [[s1, s2], [s2, s1]] + for inp in inputs: + yield { + "inputs": inp, + "label": int(l) + } + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + mrpc_dir = self._maybe_download_corpora(tmp_dir) + if dataset_split != problem.DatasetSplit.TEST: + filesplit = "msr_paraphrase_train.txt" + else: + filesplit = "msr_paraphrase_test.txt" + dev_ids = [] + if dataset_split != problem.DatasetSplit.TEST: + for row in tf.gfile.Open(os.path.join(mrpc_dir, "dev_ids.tsv")): + dev_ids.append(row.strip().split("\t")) + + filename = os.path.join(mrpc_dir, filesplit) + for example in self.example_generator(filename, dev_ids, dataset_split): + yield example + + +@registry.register_problem +class MSRParaphraseCorpusCharacters(MSRParaphraseCorpus): + """MSR Paraphrase Identification problems, character level""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def global_task_id(self): + return problem.TaskID.EN_SIM diff --git a/tensor2tensor/data_generators/mscoco.py b/tensor2tensor/data_generators/mscoco.py new file mode 100644 index 000000000..8e8ed8686 --- /dev/null +++ b/tensor2tensor/data_generators/mscoco.py @@ -0,0 +1,267 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""MS COCO.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import io +import json +import os +import random +import zipfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import image_utils +from tensor2tensor.data_generators import imagenet +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import translate_ende +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +# URLs and filenames for MSCOCO data. +_MSCOCO_ROOT_URL = "http://msvocds.blob.core.windows.net/" +_MSCOCO_URLS = [ + "coco2014/train2014.zip", "coco2014/val2014.zip", "coco2014/test2014.zip", + "annotations-1-0-3/captions_train-val2014.zip" +] +_MSCOCO_TRAIN_PREFIX = "train2014" +_MSCOCO_EVAL_PREFIX = "val2014" +_MSCOCO_TRAIN_CAPTION_FILE = "annotations/captions_train2014.json" +_MSCOCO_EVAL_CAPTION_FILE = "annotations/captions_val2014.json" + + +def _get_mscoco(directory): + """Download and extract MSCOCO datasets to directory unless it is there.""" + for url in _MSCOCO_URLS: + filename = os.path.basename(url) + download_url = os.path.join(_MSCOCO_ROOT_URL, url) + path = generator_utils.maybe_download(directory, filename, download_url) + unzip_dir = os.path.join(directory, filename.strip(".zip")) + if not tf.gfile.Exists(unzip_dir): + zipfile.ZipFile(path, "r").extractall(directory) + + +def mscoco_generator(data_dir, + tmp_dir, + training, + how_many, + start_from=0, + eos_list=None, + vocab_filename=None): + """Image generator for MSCOCO captioning problem with token-wise captions. + + Args: + data_dir: path to the data directory. + tmp_dir: path to temporary storage directory. + training: a Boolean; if true, we use the train set, otherwise the test set. + how_many: how many images and labels to generate. + start_from: from which image to start. + eos_list: optional list of end of sentence tokens, otherwise use default + value `1`. + vocab_filename: file within `tmp_dir` to read vocabulary from. + + Yields: + A dictionary representing the images with the following fields: + * image/encoded: the string encoding the image as JPEG, + * image/format: the string "jpeg" representing image format, + * image/class/label: a list of integers representing the caption, + * image/height: an integer representing the height, + * image/width: an integer representing the width. + Every field is actually a list of the corresponding type. + """ + eos_list = [1] if eos_list is None else eos_list + def get_vocab(): + """Get vocab for caption text encoder.""" + if data_dir is not None and vocab_filename is not None: + vocab_filepath = os.path.join(data_dir, vocab_filename) + if tf.gfile.Exists(vocab_filepath): + tf.logging.info("Found vocab file: %s", vocab_filepath) + vocab_symbolizer = text_encoder.SubwordTextEncoder(vocab_filepath) + return vocab_symbolizer + else: + raise ValueError("Vocab file does not exist: %s" % vocab_filepath) + return None + + vocab_symbolizer = get_vocab() + _get_mscoco(tmp_dir) + caption_filepath = ( + _MSCOCO_TRAIN_CAPTION_FILE if training else _MSCOCO_EVAL_CAPTION_FILE) + caption_filepath = os.path.join(tmp_dir, caption_filepath) + prefix = _MSCOCO_TRAIN_PREFIX if training else _MSCOCO_EVAL_PREFIX + caption_file = io.open(caption_filepath) + caption_json = json.load(caption_file) + # Dictionary from image_id to ((filename, height, width), captions). + image_dict = {} + for image in caption_json["images"]: + image_dict[image["id"]] = [(image["file_name"], image["height"], + image["width"]), []] + annotations = caption_json["annotations"] + annotation_count = len(annotations) + image_count = len(image_dict) + tf.logging.info("Processing %d images and %d labels\n" % (image_count, + annotation_count)) + for annotation in annotations: + image_id = annotation["image_id"] + image_dict[image_id][1].append(annotation["caption"]) + + data = list(image_dict.values())[start_from:start_from + how_many] + random.shuffle(data) + for image_info, labels in data: + image_filename = image_info[0] + image_filepath = os.path.join(tmp_dir, prefix, image_filename) + with tf.gfile.Open(image_filepath, "rb") as f: + encoded_image_data = f.read() + height, width = image_info[1], image_info[2] + for label in labels: + if vocab_filename is None or vocab_symbolizer is None: + label = [ord(c) for c in label] + eos_list + else: + label = vocab_symbolizer.encode(label) + eos_list + yield { + "image/encoded": [encoded_image_data], + "image/format": ["jpeg"], + "image/class/label": label, + "image/height": [height], + "image/width": [width] + } + + +@registry.register_problem +class ImageMsCocoCharacters(image_utils.Image2TextProblem): + """MSCOCO, character level.""" + + @property + def is_character_level(self): + return True + + @property + def target_space_id(self): + return problem.SpaceID.EN_CHR + + @property + def train_shards(self): + return 100 + + @property + def dev_shards(self): + return 10 + + def preprocess_example(self, example, mode, _): + return imagenet.imagenet_preprocess_example(example, mode) + + def generator(self, data_dir, tmp_dir, is_training): + if is_training: + return mscoco_generator(data_dir, tmp_dir, True, 80000) + else: + return mscoco_generator(data_dir, tmp_dir, False, 40000) + raise NotImplementedError() + + +@registry.register_problem +class ImageMsCocoTokens32k(ImageMsCocoCharacters): + """MSCOCO, 8k tokens vocab.""" + + @property + def is_character_level(self): + return False + + @property + def vocab_problem(self): + return translate_ende.TranslateEndeWmt32k() + + @property + def target_space_id(self): + return problem.SpaceID.EN_TOK + + @property + def train_shards(self): + return 100 + + @property + def dev_shards(self): + return 10 + + def generator(self, data_dir, tmp_dir, is_training): + # We use the translate vocab file as the vocabulary for captions. + # This requires having the vocab file present in the data_dir for the + # generation pipeline to succeed. + vocab_filename = self.vocab_problem.vocab_filename + if is_training: + return mscoco_generator( + data_dir, + tmp_dir, + True, + 80000, + vocab_filename=vocab_filename) + else: + return mscoco_generator( + data_dir, + tmp_dir, + False, + 40000, + vocab_filename=vocab_filename) + + +@registry.register_problem +class ImageTextMsCocoMultiResolution(ImageMsCocoTokens32k): + """MSCoCo at multiple resolutions.""" + + def dataset_filename(self): + return "image_ms_coco_tokens32k" + + def preprocess_example(self, example, mode, hparams): + image = example["inputs"] + # Get resize method. Include a default if not specified, or if it's not in + # TensorFlow's collection of pre-implemented resize methods. + resize_method = getattr(hparams, "resize_method", "BICUBIC") + resize_method = getattr(tf.image.ResizeMethod, resize_method, resize_method) + + highest_res = hparams.resolutions[-1] + if resize_method == "DILATED": + # Resize image so that dilated subsampling is properly divisible. + scaled_image = image_utils.resize_by_area(image, highest_res) + scaled_images = image_utils.make_multiscale_dilated( + scaled_image, hparams.resolutions, num_channels=self.num_channels) + else: + scaled_images = image_utils.make_multiscale( + image, hparams.resolutions, + resize_method=resize_method, num_channels=self.num_channels) + + # Pack tuple of scaled images into one tensor. We do this by enforcing the + # columns to match for every resolution. + example["inputs"] = tf.concat([ + tf.reshape(scaled_image, + [res**2 // highest_res, highest_res, self.num_channels]) + for scaled_image, res in zip(scaled_images, hparams.resolutions)], + axis=0) + return example + + +@registry.register_problem +class ImageTextMsCoco(ImageMsCocoTokens32k): + """Problem for using MsCoco for generating images from text.""" + _MSCOCO_IMAGE_SIZE = 32 + + def dataset_filename(self): + return "image_ms_coco_tokens32k" + + def preprocess_example(self, example, mode, unused_hparams): + example["inputs"] = image_utils.resize_by_area( + example["inputs"], self._MSCOCO_IMAGE_SIZE) + return example diff --git a/tensor2tensor/data_generators/mscoco_test.py b/tensor2tensor/data_generators/mscoco_test.py new file mode 100644 index 000000000..dc2e74d40 --- /dev/null +++ b/tensor2tensor/data_generators/mscoco_test.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for MS COCO.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +from tensor2tensor.data_generators import mscoco +from tensor2tensor.utils import hparam + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class MscocoTest(parameterized.TestCase, tf.test.TestCase): + + @parameterized.named_parameters( + ("Default", None), + ("Area", "AREA"), + ("Dilated", "DILATED")) + def testMsCocoMultiResolutionPreprocessExample(self, resize_method): + example = {"inputs": tf.random_uniform([400, 400, 3], minval=-1.)} + mode = tf_estimator.ModeKeys.TRAIN + hparams = hparam.HParams(resolutions=[8, 16, 32]) + if resize_method is not None: + hparams.resize_method = resize_method + + problem = mscoco.ImageTextMsCocoMultiResolution() + preprocessed_example = problem.preprocess_example(example, mode, hparams) + self.assertLen(preprocessed_example, 1) + self.assertEqual(preprocessed_example["inputs"].shape, (42, 32, 3)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/multi_problem.py b/tensor2tensor/data_generators/multi_problem.py new file mode 100644 index 000000000..b5cb4e638 --- /dev/null +++ b/tensor2tensor/data_generators/multi_problem.py @@ -0,0 +1,554 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base class for combining multiple problems for multitask learning.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import discretization +from tensor2tensor.layers import modalities +from tensor2tensor.utils import metrics +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class MixingSchedule(object): + """Available schedules for mixing datasets.""" + EXPONENTIAL = "exponential" + CONSTANT = "constant" + PRETRAIN = "pretrain" + + +def normalize_example_nlp(task, example, is_infer, vocab_type, vocab_offset, + max_input_length, max_target_length, + fixed_train_length): + """Normalize the examples from different tasks so they can be merged. + + This function is specific to NLP tasks and normalizes them so that in the + end the example only has "targets" and "task_id". For tasks that originally + have inputs, this is done by appending task_id to the inputs and prepending + targets, so normalized_targets = inputs task_id targets. For classification + tasks, targets are constructed by spelling out the class. + + Args: + task: the Problem class of the task we are normalizing. + example: a dictionary of tensors, the example to normalize. + is_infer: bool, whether we are performing inference or not. + vocab_type: the type of vocabulary in use. + vocab_offset: integer, offset index for subword vocabularies. + max_input_length: maximum length to cut inputs to. + max_target_length: maximum length to cut targets to. + fixed_train_length: set length to this size if > 0. + + Returns: + a dictionary of tensors, like example, after normalizing, which in this + case means that it only has "targets" and "task_id" as feature. + """ + if task.has_inputs: + example["inputs"] = example["inputs"][:-1] # remove EOS token + + if hasattr(task, "class_labels"): + if vocab_type == text_problems.VocabType.CHARACTER: + # TODO(urvashik): handle the case where num_labels > 9 + example["targets"] = tf.cast(discretization.int_to_bit( + example["targets"], 1, base=10) + 50, tf.int64) + example["targets"] = tf.squeeze(example["targets"], axis=[-1]) + elif vocab_type == text_problems.VocabType.SUBWORD: + example["targets"] = vocab_offset + example["targets"] + else: + # sequence with inputs and targets eg: summarization + if task.has_inputs: + if max_input_length > 0: + example["inputs"] = example["inputs"][:max_input_length] + # Do not truncate targets during inference with beam decoding. + if max_target_length > 0 and not is_infer: + example["targets"] = example["targets"][:max_target_length] + + def make_constant_shape(x, size): + x = x[:size] + xlen = tf.shape(x)[0] + x = tf.pad(x, [[0, size - xlen]]) + return tf.reshape(x, [size]) + + if task.has_inputs: + if is_infer: + concat_list = [example["inputs"], [task.task_id]] + example["inputs"] = tf.concat(concat_list, axis=0) + else: + inputs = example.pop("inputs") + concat_list = [inputs, [task.task_id], example["targets"]] + example["targets"] = tf.concat(concat_list, axis=0) + if fixed_train_length > 0: + example["targets"] = make_constant_shape( + example["targets"], fixed_train_length) + else: + concat_list = [[task.task_id], example["targets"]] + example["targets"] = tf.concat(concat_list, axis=0) + if not is_infer and fixed_train_length > 0: + example["targets"] = make_constant_shape( + example["targets"], fixed_train_length) + + example["task_id"] = tf.constant([task.task_id], dtype=tf.int64) + return example + + +def flatten_zip_dataset(*args): + """A list of examples to a dataset containing mixed examples. + + Given a list of `n` dataset examples, flatten them by converting + each element into a dataset and concatenating them to convert into a + single dataset. + + Args: + *args: A list containing one example each from `n` different datasets. + + Returns: + flattened: A new dataset containing the examples from the list as part + of a single dataset. + """ + flattened = tf.data.Dataset.from_tensors(args[0]) + for ex in args[1:]: + flattened = flattened.concatenate(tf.data.Dataset.from_tensors(ex)) + return flattened + + +class MultiProblem(problem.Problem): + """MultiProblem base class.""" + + _ADDED_EVAL_COUNT = 20000 + + def __init__(self, was_reversed=False, was_copy=False): + super(MultiProblem, self).__init__(was_reversed, was_copy) + self.task_list = [] + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + assert len(self.task_list) > 1 + for task in self.task_list: + task.generate_data(data_dir, tmp_dir, task_id) + + def normalize_example(self, task, example, encoder, hparams, is_infer): + """Normalize the examples from different tasks so they can be merged.""" + # Here we use the default function for NLP tasks that makes everything + # a part of "targets" feature. Override in your subclasses for other uses. + vocab_offset = encoder.vocab_size + len(self.task_list) + return normalize_example_nlp( + task, example, is_infer, self.vocab_type, vocab_offset, + hparams.multiproblem_max_input_length, + hparams.multiproblem_max_target_length, + hparams.multiproblem_fixed_train_length) + + def filepattern(self, data_dir, mode, shard=None): + tf.logging.info("Generating multi problem filepattern") + return [task.filepattern(data_dir, mode, shard) for task in self.task_list] + + def get_hparams(self, model_hparams=None): + if self._hparams is not None: + return self._hparams + self._hparams = self.task_list[0].get_hparams(model_hparams) + # Increase the vocab size to account for task ids and modify the modality. + vocab_size_inc = len(self.task_list) + vocab_size_inc += self.get_max_num_classes() + vocab_size = self._hparams.vocabulary["targets"].vocab_size + new_vocab_size = vocab_size + vocab_size_inc + if model_hparams.multiproblem_vocab_size > new_vocab_size: + new_vocab_size = model_hparams.multiproblem_vocab_size + tf.logging.info("Old vocabulary size: %d" % vocab_size) + self.update_task_ids(vocab_size) + tf.logging.info("New vocabulary size: %d" % new_vocab_size) + self._hparams.vocab_size["targets"] = new_vocab_size + self._hparams.modality["targets"] = modalities.ModalityType.SYMBOL + return self._hparams + + def dataset(self, + mode, + data_dir=None, + num_threads=None, + output_buffer_size=None, + shuffle_files=None, + hparams=None, + preprocess=True, + dataset_split=None, + shard=None, + partition_id=0, + num_partitions=1, + shuffle_buffer_size=1024, + max_records=-1): + # A list of datasets corresponding to the tasks in the task_list object + # that need to be mixed. + datasets = [] + is_training = mode == tf_estimator.ModeKeys.TRAIN + is_infer = mode == tf_estimator.ModeKeys.PREDICT + enc = self.task_list[0].feature_encoders(data_dir=data_dir)["targets"] + self.update_task_ids(enc.vocab_size) + + for task in self.task_list: + task_dataset = task.dataset(mode=mode, + data_dir=data_dir, + num_threads=num_threads, + output_buffer_size=output_buffer_size, + shuffle_files=shuffle_files, + hparams=hparams, + preprocess=preprocess, + dataset_split=dataset_split, + shard=shard, + partition_id=partition_id, + num_partitions=num_partitions, + shuffle_buffer_size=shuffle_buffer_size, + max_records=max_records) + + if is_training: + task_dataset = task_dataset.repeat() + + # pylint: disable=cell-var-from-loop + task_dataset = task_dataset.map( + lambda x: self.normalize_example(task, x, enc, hparams, is_infer)) + # pylint: enable=cell-var-from-loop + + # To run evaluation, we want to zip datasets from different tasks, + # but zipping will cut off at the shortest dataset in tf.Datasets. + # For this reason, we add zero padding to the shorter datasets as + # it will be ignored in metrics but it provides space for larger data. + if not is_training and not is_infer: + zeros = tf.zeros([self._ADDED_EVAL_COUNT, 1], dtype=tf.int64) + pad_data = tf.data.Dataset.from_tensor_slices({ + "targets": zeros, + "batch_prediction_key": zeros, + "task_id": zeros, + }) + task_dataset = task_dataset.concatenate(pad_data) + + datasets.append(task_dataset) + + # Setup the problem hparams by setting them to the LM task hparams. + self.get_hparams(model_hparams=hparams) + + if is_training: + # Using tf.Variable instead of get_variable to work around issues with + # queues on multiple hosts. Note that this will separately count steps + # on each host that's feeding the data, so in a large-scale setting you + # may need to adjust hparams for that. For example, a 4x4 slice of a TPU + # pod may use 2 data hosts, so we'll be only adding 1 here once for 2 + # examples -- divide the corresponding hparams by 2 to compensate. + problem_step = tf.Variable(tf.constant(0, dtype=tf.int64), + trainable=False, use_resource=True, + dtype=tf.int64, name="problem_step") + dataset_iterators = [d.make_one_shot_iterator() for d in datasets] + + def get_next_from_dataset(dataset_iter): + return dataset_iter.get_next() + + def get_exp_sched_prob(): + """Inverse decay exponential to mix datasets.""" + with tf.control_dependencies([problem_step.assign_add(1)]): + inv_exp_decay = common_layers.inverse_exp_decay( + max_step=hparams.multiproblem_schedule_max_examples, + min_value=1e-4, + step=tf.to_float(problem_step) + ) + # inv_exp_decay is bounded above by 1.0 + return inv_exp_decay * hparams.multiproblem_schedule_threshold + + def get_const_sched_prob(): + return hparams.multiproblem_schedule_threshold + + def get_pretrain_sched_prob(): + """Pretrain the primary tasks for max examples.""" + with tf.control_dependencies([problem_step.assign_add(1)]): + return tf.cond( + tf.greater(problem_step, + tf.cast(hparams.multiproblem_schedule_max_examples, + dtype=tf.int64)), + lambda: 1.0, lambda: 0.0) + + def mix_data(example): + """Function to mix the different datasets according to a schedule.""" + del example + # This block computes the probability of mixing the primary task with + # the secondary tasks. 0 = only the primary task, 1 = only the secondary + # tasks. + if hparams.multiproblem_mixing_schedule == MixingSchedule.EXPONENTIAL: + prob = get_exp_sched_prob() + prob = tf.cond( + tf.equal(tf.floormod( + problem_step, tf.cast(5e6, dtype=tf.int64)), 0), + lambda: tf.Print(prob, [prob], message="Probability"), + lambda: prob) + elif hparams.multiproblem_mixing_schedule == MixingSchedule.CONSTANT: + prob = get_const_sched_prob() + elif hparams.multiproblem_mixing_schedule == MixingSchedule.PRETRAIN: + prob = get_pretrain_sched_prob() + else: + raise ValueError("Unknown schedule %s" % str( + hparams.multiproblem_mixing_schedule)) + tf.logging.info("Using the %s schedule to " + "train the MultiProblem." % str( + hparams.multiproblem_mixing_schedule)) + tf.logging.info("Schedule mixing threshold " + "%.2f" % hparams.multiproblem_schedule_threshold) + + # If per-task thresholds are specified, use them. + thresholds = None + if hparams.multiproblem_per_task_threshold: + thresholds = hparams.multiproblem_per_task_threshold.split(",") + thresholds = [float(t) for t in thresholds] # Convert to floats. + thresholds_sum = sum(thresholds) + tf.logging.info("Per-task thresholds: %s." % str(thresholds)) + thresholds = [t / thresholds_sum for t in thresholds] # Normalize. + thresholds = [sum(thresholds[:i+1]) for i in range(len(thresholds))] + tf.logging.info("Per-task threshold sums: %s." % str(thresholds)) + if len(thresholds) != len(self.task_list): + tf.logging.warn("Specified %d thresholds but encountered %d tasks." + % (len(thresholds), len(self.task_list))) + thresholds = None + + def sample_task(curr_task, num_tasks_left, randnum): + """A recursive function to sample a task. + + This function treats the probability as the threshold for the primary + task and divides the remaining probability mass across the other + tasks. + + Args: + curr_task: The index of the task being considered for sampling. + num_tasks_left: Number of tasks remaining to possibly sample from. + randnum: The random number used to select the dataset. + + Returns: + A Tensor representing an example from the task that was sampled + from. + """ + if num_tasks_left == 0: + return get_next_from_dataset(dataset_iterators[curr_task]) + + if thresholds is not None: # Use per-task thresholds if specified. + prob_sum = thresholds[curr_task] + return tf.cond( + randnum < prob_sum, + lambda: get_next_from_dataset(dataset_iterators[curr_task]), + lambda: sample_task(curr_task+1, num_tasks_left-1, randnum) + ) + + # When curr_task is 0, the primary task, the new prob is the same as + # the original probability. `tf.greater` indicates that the primary + # task receives (1-prob) of the probability mass. + # Otherwise, `prob` is divided equally amongst all the secondary + # tasks. + new_prob = prob - (curr_task * prob / (len(self.task_list)-1)) + return tf.cond( + tf.greater(randnum, new_prob), + lambda: get_next_from_dataset(dataset_iterators[curr_task]), + lambda: sample_task(curr_task+1, num_tasks_left-1, randnum) + ) + + return tf.data.Dataset.from_tensors( + sample_task(0, len(self.task_list)-1, tf.random_uniform([]))) + + single_mtl_dataset = tf.data.Dataset.from_tensors(tf.zeros([1])).repeat() + single_mtl_dataset = single_mtl_dataset.flat_map(mix_data) + + else: + if hparams.multiproblem_target_eval_only: + single_mtl_dataset = datasets[1] + else: + single_mtl_dataset = tf.data.Dataset.zip(tuple(datasets)).flat_map( + flatten_zip_dataset) + + return single_mtl_dataset + + def eval_metrics(self): + for task in self.task_list: + if "summarize" in task.name: + return [ + metrics.Metrics.ACC, metrics.Metrics.NEG_LOG_PERPLEXITY, + metrics.Metrics.ROUGE_2_F, metrics.Metrics.ROUGE_L_F + ] + return [ + metrics.Metrics.ACC, metrics.Metrics.NEG_LOG_PERPLEXITY, + ] + + def update_task_ids(self, encoder_vocab_size): + """Generate task_ids for each problem. + + These ids correspond to the index of the task in the task_list. + + Args: + encoder_vocab_size: the size of the vocab which is used to compute + the index offset. + """ + for idx, task in enumerate(self.task_list): + task.set_task_id(idx + encoder_vocab_size) + tf.logging.info("Task %d (%s) has id %d." % + (idx, task.name, task.task_id)) + + def get_max_num_classes(self): + """Compute the maximum number of classes any subtask has. + + This is useful for modifying the size of the softmax to include the output + labels for the classification tasks. Currently, labels from different tasks + are overloaded. + + Returns: + num: Highest number of output classes in any text classification sub-task + within this MultiProblem. + """ + num = 0 + for task in self.task_list: + if hasattr(task, "num_classes"): + if num < task.num_classes: + num = task.num_classes + + return num + + +def aggregate_task_losses(hparams, + problem_hparams, + logits, + feature_name, + feature): + """Multiproblem loss function.""" + + # If no reweighting, we want the default loss to mimic the LM loss. + if not hparams.multiproblem_reweight_label_loss: + return aggregate_task_lm_losses(hparams=hparams, + problem_hparams=problem_hparams, + logits=logits, + feature_name=feature_name, + feature=feature) + + summaries = [] + main_task_id = hparams.problem.task_list[0].task_id + vocab_size = problem_hparams.vocab_size[feature_name] + if vocab_size is not None and hasattr(hparams, "vocab_divisor"): + vocab_size += (-vocab_size) % hparams.vocab_divisor + modality = problem_hparams.modality[feature_name] + loss = hparams.loss.get(feature_name, modalities.get_loss(modality)) + weights_fn = hparams.weights_fn.get( + feature_name, modalities.get_weights_fn(modality)) + # Primary task loss + loss_num, loss_den = loss( + logits, feature, + lambda x: common_layers.weights_multi_problem_all(x, main_task_id), + hparams, vocab_size, weights_fn) + + loss_val = loss_num / tf.maximum(1.0, loss_den) + summaries.append([hparams.problem.task_list[0].name+"_loss", loss_val]) + + # Since the losses may undergo rescaling, they cannot exist as separate + # numerators and denominators. Set the denominators to 1 in order to faciliate + # loss averaging. + loss_num = loss_val + loss_den = tf.minimum(tf.convert_to_tensor(1, dtype=tf.float32), loss_den) + + for task in hparams.problem.task_list[1:]: + # Loss only from the input sequence -- the auxiliary LM loss. + seq_loss_num, seq_loss_den = loss( + logits, feature, + lambda x: common_layers.weights_multi_problem_input(x, task.task_id), # pylint: disable=cell-var-from-loop + hparams, vocab_size) + seq_loss_num *= problem_hparams.loss_multiplier + + # Unscaled sequence loss. + seq_loss = seq_loss_num / tf.maximum(1.0, seq_loss_den) + summaries.append([task.name+"_seq_loss", seq_loss]) + + if hasattr(task, "num_classes"): + # Loss only from the classification label. + label_loss_num, label_loss_den = loss( + logits, feature, + lambda x: common_layers.weights_multi_problem(x, task.task_id), # pylint: disable=cell-var-from-loop + hparams, vocab_size) + label_loss_num *= problem_hparams.loss_multiplier + + # Unscaled classification label loss. + label_loss = label_loss_num / tf.maximum(1.0, label_loss_den) + summaries.append([task.name+"_label_loss", label_loss]) + + # Scaling. + if hparams.multiproblem_reweight_label_loss: + label_loss *= hparams.multiproblem_label_weight + seq_loss *= (1 - hparams.multiproblem_label_weight) + + # This is the training loss for the optimizer after scaling. + task_loss_val = seq_loss + label_loss + + loss_den_ = label_loss_den + + else: + # Loss only from the target sequence. + target_loss_num, target_loss_den = loss( + logits, feature, + lambda x: common_layers.weights_multi_problem(x, task.task_id), # pylint: disable=cell-var-from-loop + hparams, vocab_size) + target_loss_num *= problem_hparams.loss_multiplier + + # Unscaled target sequence loss. + target_loss = target_loss_num / tf.maximum(1.0, target_loss_den) + summaries.append([task.name+"_target_loss", target_loss]) + + # Scaling. + if hparams.multiproblem_reweight_label_loss: + target_loss *= hparams.multiproblem_label_weight + seq_loss *= (1 - hparams.multiproblem_label_weight) + + # This is the training loss for the optimizer after all the scaling. + task_loss_val = seq_loss + target_loss + + loss_den_ = target_loss_den + + summaries.append([task.name+"_loss", task_loss_val]) + # Adding 1 to the loss den for each task leads to averaging task losses. + # TODO(urvashik): Fix combination with other task losses - weighted + # average based on the number of examples from that task. + loss_num += task_loss_val + loss_den += tf.minimum(tf.convert_to_tensor(1, dtype=tf.float32), + loss_den_) + + return loss_num, loss_den, summaries + + +def aggregate_task_lm_losses(hparams, + problem_hparams, + logits, + feature_name, + feature): + """LM loss for multiproblems.""" + summaries = [] + vocab_size = problem_hparams.vocab_size[feature_name] + if vocab_size is not None and hasattr(hparams, "vocab_divisor"): + vocab_size += (-vocab_size) % hparams.vocab_divisor + modality = problem_hparams.modality[feature_name] + loss = hparams.loss.get(feature_name, modalities.get_loss(modality)) + weights_fn = hparams.weights_fn.get( + feature_name, modalities.get_weights_fn(modality)) + loss_num = 0. + loss_den = 0. + for task in hparams.problem.task_list: + loss_num_, loss_den_ = loss( + logits, feature, + lambda x: common_layers.weights_multi_problem_all(x, task.task_id), # pylint: disable=cell-var-from-loop + hparams, vocab_size, weights_fn) + + loss_num += loss_num_ + loss_den += loss_den_ + + loss_val = loss_num_ / tf.maximum(1.0, loss_den_) + summaries.append([task.name+"_loss", loss_val]) + + return loss_num, loss_den, summaries diff --git a/tensor2tensor/data_generators/multi_problem_v2.py b/tensor2tensor/data_generators/multi_problem_v2.py new file mode 100644 index 000000000..dfdf66ece --- /dev/null +++ b/tensor2tensor/data_generators/multi_problem_v2.py @@ -0,0 +1,427 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Multi-problem scheduling in T2T. + +Data sampling schedules are specified by an interpolation method i and a +sequence of tuples (t, pmf), where i can either be 'linear' or 'step', +t is the global_step at training, and pmf is the distribution from which +training examples from each problem are sampled. + +Linear interpolation constructs a piecewise linear training schedule, connecting +pmfs with linear segments. Step interpolation abruptly shifts the sampling +distribution to pmf at global_step t. Both interpolation methods can approximate +any continuous sampling process with sufficient points of interpolation. + +Continuation of the interpolant is constant outside the domain specified by +the schedule. That is, we sample from pmfs[0] for global_step < ts[0] and +pmfs[-1] for global_step > ts[-1]. + +Examples of schedule strings include: + +(1) 'step @0 0.7, 0.3': Sample from problem 0 w.p. 0.7 and problem 1 w.p. 0.3 + for the entirety of training. Since there is only one point, the choice of + interpolation method and global_step does not matter. + +(2) 'step @0 1.0 0.0 @100 0.0 1.0': Train on problem 0 for the first 100 steps + then train on problem 1 for the rest of training. + +(3) 'step @0 0.5 0.5 0.0 @100 1.0 0.0 0.0': Pretrain on problems 0 and 1 for the + first 100 steps then fine tune on problem 2 for the rest of training. + +(4) 'linear @0 1.0 0.0 @100 0.0 1.0' Linear transition from training on problem + 0 to problem 1 over 100 steps, then train on problem 1 for the rest of + training. + +(5) 'linear @0 1.0 0.0 @100 0.9 0.1 @200 0.4 0.6 @300 0.0 1.0': Approximate + inverse exponential decay from problem 0 to problem 1 over 300 steps, then + train on problem 1 for the rest of training. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import os + +import numpy as np + +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +import tensorflow.compat.v1 as tf + + +class MultiProblemV2(problem.Problem): + """Dataset scheduling for multiple problems.""" + + def __init__(self, problems, schedule, **kwargs): + """Creates a MultiProblem object. + + Args: + problems: A list of problem.Problem objects. + schedule: A schedule tuple, see encode_schedule for details. + **kwargs: Keywords for problem.Problem.__init__. + """ + super(MultiProblemV2, self).__init__(**kwargs) + self.problems = problems + self.schedule = schedule + + def filepattern(self, *args, **kwargs): + """Returns a list of filepatterns, one for each problem.""" + return [p.filepattern(*args, **kwargs) for p in self.problems] + + def generate_data(self, *args, **kwargs): + """Generates data for each problem.""" + for p in self.problems: + p.generate_data(*args, **kwargs) + + @property + def only_eval_first_problem(self): + """Only run validation on examples from the first problem.""" + return False + + def normalize_example(self, example, hparams): + """Preprocesses examples from different problems before mixing.""" + del hparams # Unused. + return example + + def dataset(self, mode, hparams=None, global_step=None, **kwargs): + """Returns a dataset containing examples from multiple problems. + + Args: + mode: A member of problem.DatasetSplit. + hparams: A tf.HParams object, the model hparams. + global_step: A scalar tensor used to compute the sampling distribution. + If global_step is None, we call tf.train.get_or_create_global_step by + default. + **kwargs: Keywords for problem.Problem.Dataset. + + Returns: + A dataset containing examples from multiple problems. + """ + datasets = [p.dataset(mode, **kwargs) for p in self.problems] + datasets = [ + d.map(lambda x, i=j: self.normalize_example( # pylint: disable=g-long-lambda + dict(x, problem_id=tf.constant([i])), hparams)) + for j, d in enumerate(datasets) # Tag examples with a problem_id. + ] + if mode is problem.DatasetSplit.TRAIN: + if global_step is None: + global_step = tf.train.get_or_create_global_step() + pmf = get_schedule_distribution(self.schedule, global_step) + return get_multi_dataset(datasets, pmf) + elif self.only_eval_first_problem: + return datasets[0] + else: + datasets = [d.repeat() for d in datasets] + return tf.data.Dataset.zip(tuple(datasets)).flat_map( + lambda *x: functools.reduce( # pylint: disable=g-long-lambda + tf.data.Dataset.concatenate, + map(tf.data.Dataset.from_tensors, x))) + + +class MultiText2TextProblem(MultiProblemV2, text_problems.Text2TextProblem): + """Dataset scheduling for multiple text-to-text problems.""" + + def normalize_example(self, example, hparams): + """Assumes that example contains both inputs and targets.""" + + length = self.max_length(hparams) + def _to_constant_shape(tensor): + tensor = tensor[:length] + tensor = tf.pad(tensor, [(0, length - tf.shape(tensor)[0])]) + return tf.reshape(tensor, [length]) + + if self.has_inputs: + example['inputs'] = _to_constant_shape(example['inputs']) + example['targets'] = _to_constant_shape(example['targets']) + elif 'inputs' in example: + if self.packed_length: + raise ValueError('cannot concatenate packed examples on the fly.') + inputs = example.pop('inputs')[:-1] # Remove EOS token. + targets = tf.concat([inputs, example['targets']], 0) + example['targets'] = _to_constant_shape(targets) + else: + example['targets'] = _to_constant_shape(example['targets']) + if self.packed_length: + if self.has_inputs: + if 'inputs_segmentation' in example: + example['inputs_segmentation'] = _to_constant_shape( + example['inputs_segmentation']) + example['inputs_position'] = _to_constant_shape( + example['inputs_position']) + else: + example['inputs_segmentation'] = tf.to_int64( + tf.not_equal(example['inputs'], 0)) + example['inputs_position'] = ( + example['inputs_segmentation'] * tf.range(length, dtype=tf.int64)) + if 'targets_segmentation' in example: + example['targets_segmentation'] = _to_constant_shape( + example['targets_segmentation']) + example['targets_position'] = _to_constant_shape( + example['targets_position']) + else: + example['targets_segmentation'] = tf.to_int64( + tf.not_equal(example['targets'], 0)) + example['targets_position'] = ( + example['targets_segmentation'] * tf.range(length, dtype=tf.int64)) + return example + + def generate_data_with_shared_vocab(self, data_dir, tmp_dir, task_id=-1): + """Generates TF-Records for problems using a global vocabulary file.""" + global_vocab_filename = os.path.join(data_dir, self.vocab_filename) + if not tf.gfile.Exists(global_vocab_filename): + raise ValueError( + 'Global vocabulary file: %s does not exist, ' + 'please create one using build_vocab.py' % global_vocab_filename) + # Before generating data, we copy the global vocabulary file to the children + # locations. Although this is not the most disk efficient strategy, it + # imposes the fewest changes to the text-to-text API. + for p in self.problems: + local_vocab_filename = os.path.join(data_dir, p.vocab_filename) + if not tf.gfile.Exists(local_vocab_filename): + tf.gfile.Copy(global_vocab_filename, local_vocab_filename) + p.generate_data(data_dir, tmp_dir, task_id) + + @property + def packed_length(self): + """Set this to a positive integer if some of the problems are packed.""" + return None + + +def get_multi_dataset(datasets, pmf=None): + """Returns a Dataset that samples records from one or more Datasets. + + Args: + datasets: A list of one or more Dataset objects to sample from. + pmf: A tensor of shape [len(datasets)], the probabilities to sample each + dataset with. This tensor is often constructed with the global_step. If + this is None, we sample from the datasets uniformly at random. + + Returns: + A Dataset object containing records from multiple datasets. Note that + because this dataset iterates through other datasets it is stateful, thus + you will need to call make_initializable_iterator instead of + make_one_shot_iterator. + """ + pmf = tf.fill([len(datasets)], 1.0 / len(datasets)) if pmf is None else pmf + samplers = [d.repeat().make_one_shot_iterator().get_next for d in datasets] + sample = lambda _: categorical_case(pmf, samplers) + return tf.data.Dataset.from_tensors([]).repeat().map(sample) + + +def get_schedule_distribution(schedule, global_step=None): + """Computes the pmf of a schedule given the global_step. + + Args: + schedule: A schedule tuple, see encode_schedule for details. + global_step: A scalar tensor, the step to query the schedule. + + Returns: + A 1-D tensor of probs, the sampling distribution of the global_step. + """ + interpolation, steps, pmfs = schedule + if len(pmfs) == 1: + # py_func doesn't seem to work on TPU - at least get the constant case to + # run. + # TODO(noam): get the general case working. + return pmfs[0] + if global_step is None: + global_step = tf.train.get_or_create_global_step() + if interpolation == 'step': + interpolation_fn = step_interpolation + elif interpolation == 'linear': + interpolation_fn = linear_interpolation + else: + raise ValueError('Invalid interpolation strategy: %s' % interpolation) + return tf.reshape( + tf.py_func( + func=lambda x: interpolation_fn(x, np.array(steps), np.array(pmfs)), + inp=[global_step], Tout=tf.float32), [len(pmfs[0])]) + + +def categorical_case(pmf, fns, rand=None): + """Returns the outputs of fns[i] with probability pmf[i]. + + Args: + pmf: A 1-D tensor of probabilities, the probability mass function. + fns: A list of callables that return tensors, same length as pmf. + rand: An optional scalar between 0.0 and 1.0, the output of an RNG. + + Returns: + A tensor, the output of fns[i] with probability pmf[i]. + """ + rand = tf.random_uniform([]) if rand is None else rand + cmf = tf.pad(tf.cumsum(pmf), [(1, 0)]) + cmf = [cmf[i] for i in range(len(fns) + 1)] + preds = [(rand >= a) & (rand < b) for a, b in zip(cmf[:-1], cmf[1:])] + return tf.case(list(zip(preds, fns)), exclusive=True) + + +def linear_interpolation(x, xp, fp, **kwargs): + """Multi-dimensional linear interpolation. + + Returns the multi-dimensional piecewise linear interpolant to a function with + given discrete data points (xp, fp), evaluated at x. + + Note that *N and *M indicate zero or more dimensions. + + Args: + x: An array of shape [*N], the x-coordinates of the interpolated values. + xp: An np.array of shape [D], the x-coordinates of the data points, must be + increasing. + fp: An np.array of shape [D, *M], the y-coordinates of the data points. + **kwargs: Keywords for np.interp. + + Returns: + An array of shape [*N, *M], the interpolated values. + """ + yp = fp.reshape([fp.shape[0], -1]).transpose() + y = np.stack([np.interp(x, xp, zp, **kwargs) for zp in yp]).transpose() + return y.reshape(x.shape[:1] + fp.shape[1:]).astype(np.float32) + + +def step_interpolation(x, xp, fp, **kwargs): + """Multi-dimensional step interpolation. + + Returns the multi-dimensional step interpolant to a function with + given discrete data points (xp, fp), evaluated at x. + + Note that *N and *M indicate zero or more dimensions. + + Args: + x: An array of shape [*N], the x-coordinates of the interpolated values. + xp: An np.array of shape [D], the x-coordinates of the data points, must be + increasing. + fp: An np.array of shape [D, *M], the y-coordinates of the data points. + **kwargs: Unused. + + Returns: + An array of shape [*N, *M], the interpolated values. + """ + del kwargs # Unused. + xp = np.expand_dims(xp, -1) + lower, upper = xp[:-1], xp[1:] + conditions = (x >= lower) & (x < upper) + # Underflow and overflow conditions and values. Values default to fp[0] and + # fp[-1] respectively. + conditions = np.concatenate([[x < xp[0]], conditions, [x >= xp[-1]]]) + values = np.concatenate([[fp[0]], fp]) + assert np.all(np.sum(conditions, 0) == 1), 'xp must be increasing.' + indices = np.argmax(conditions, 0) + return values[indices].astype(np.float32) + + +def constant_schedule(pmf): + """Returns a schedule tuple for constant sampling distribution. + + Args: + pmf: An array of shape [N] of probabilities. The sampling distribution to + use throughout training. Probabilities must sum to one. + + Returns: + A schedule tuple, see encode_schedule for details. + """ + return ('step', (0,), (tuplize(pmf),)) + + +def example_rates_to_pmf(example_rates): + """Creates a probability-mass-function based on relative example rates. + + Args: + example_rates: a list or tuple + Returns: + a list of floats + """ + total = sum(example_rates) + return [r / total for r in example_rates] + + +def epoch_rates_to_pmf(problems, epoch_rates=None): + """Create a probability-mass-function based on relative epoch rates. + + if epoch_rates=None, then we use uniform epoch rates [1.0] * len(problems) + i.e. it takes each problem the same time to go through one epoch. + + If epoch_rates is given, then these are the relative numbers of epochs + of each problem to go through in a given amount of time. + + Each must have problem.num_training_examples implemented. + + Args: + problems: a list of Problem instances. + epoch_rates: an optional list of float + + Returns: + a list of floating point values. + """ + if epoch_rates is None: + epoch_rates = [1.0] * len(problems) + example_rates = [epoch_rate * p.num_training_examples + for p, epoch_rate in zip(problems, epoch_rates)] + return example_rates_to_pmf(example_rates) + + +def encode_schedule(schedule): + """Encodes a schedule tuple into a string. + + Args: + schedule: A tuple containing (interpolation, steps, pmfs), where + interpolation is a string specifying the interpolation strategy, steps + is an int array_like of shape [N] specifying the global steps, and pmfs is + an array_like of shape [N, M] where pmf[i] is the sampling distribution + at global step steps[i]. N is the number of schedule requirements to + interpolate and M is the size of the probability space. + + Returns: + The string encoding of the schedule tuple. + """ + interpolation, steps, pmfs = schedule + return interpolation + ' ' + ' '.join( + '@' + str(s) + ' ' + ' '.join(map(str, p)) for s, p in zip(steps, pmfs)) + + +def decode_schedule(string): + """Decodes a string into a schedule tuple. + + Args: + string: The string encoding of a schedule tuple. + + Returns: + A schedule tuple, see encode_schedule for details. + """ + splits = string.split() + steps = [int(x[1:]) for x in splits[1:] if x[0] == '@'] + pmfs = np.reshape( + [float(x) for x in splits[1:] if x[0] != '@'], [len(steps), -1]) + return splits[0], tuplize(steps), tuplize(pmfs) + + +def tuplize(nested): + """Recursively converts iterables into tuples. + + Args: + nested: A nested structure of items and iterables. + + Returns: + A nested structure of items and tuples. + """ + if isinstance(nested, str): + return nested + try: + return tuple(map(tuplize, nested)) + except TypeError: + return nested diff --git a/tensor2tensor/data_generators/multi_problem_v2_test.py b/tensor2tensor/data_generators/multi_problem_v2_test.py new file mode 100644 index 000000000..d21b98ecb --- /dev/null +++ b/tensor2tensor/data_generators/multi_problem_v2_test.py @@ -0,0 +1,207 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.data_generators.multi_problem.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np + +from tensor2tensor.data_generators import multi_problem_v2 +from tensor2tensor.data_generators import problem +import tensorflow.compat.v1 as tf + + +class MultiProblemV2Test(parameterized.TestCase, tf.test.TestCase): + + @parameterized.parameters( + { + 'inputs': [(0.0, ['string', 12]), np.array([12, 10])], + 'targets': ((0.0, ('string', 12)), (12, 10)), + }, + { + 'inputs': [1.0, np.ones([2, 3])], + 'targets': (1.0, ((1.0, 1.0, 1.0), (1.0, 1.0, 1.0))), + }, + ) + def test_tuplize(self, inputs, targets): + self.assertEqual(multi_problem_v2.tuplize(inputs), targets) + + @parameterized.parameters( + { + 'schedule': ('step', (100,), ((0.25, 0.75),)), + 'string': 'step @100 0.25 0.75', + }, + { + 'schedule': ('step', (100, 200), ((0.25, 0.75), (0.62, 0.38))), + 'string': 'step @100 0.25 0.75 @200 0.62 0.38', + }, + { + 'schedule': ('linear', (100, 200), ((0.25, 0.75), (0.62, 0.38))), + 'string': 'linear @100 0.25 0.75 @200 0.62 0.38', + }, + ) + def test_encode_decode_schedule(self, schedule, string): + self.assertEqual(multi_problem_v2.encode_schedule(schedule), string) + self.assertEqual(multi_problem_v2.decode_schedule(string), schedule) + + @parameterized.parameters( + { + 'x': np.array([-1.0, 0.0, 0.25, 0.5, 0.75, 1.0, 2.0]), + 'xp': np.array([0.0, 1.0]), + 'fp': np.array([0.2, 0.4]), + 'y': np.array([0.2, 0.2, 0.25, 0.3, 0.35, 0.4, 0.4]), + }, + { + 'x': np.array([-1.0, 0.0, 0.5, 1.0, 2.0]), + 'xp': np.array([0.0, 1.0]), + 'fp': np.array([[0.2, 0.4], [0.4, 0.2]]), + 'y': np.array( + [[0.2, 0.4], [0.2, 0.4], [0.3, 0.3], [0.4, 0.2], [0.4, 0.2]]), + }, + ) + def test_linear_interpolation(self, x, xp, fp, y): + self.assertAllClose(multi_problem_v2.linear_interpolation(x, xp, fp), y) + + @parameterized.parameters( + { + 'x': np.array([-1.0, 0.0, 0.25, 0.5, 0.75, 1.0, 2.0]), + 'xp': np.array([0.0, 0.6, 0.9]), + 'fp': np.array([0.1, 0.9, 0.6]), + 'y': np.array([0.1, 0.1, 0.1, 0.1, 0.9, 0.6, 0.6]), + }, + { + 'x': np.array([-1.0, 0.0, 0.5, 1.0, 2.0]), + 'xp': np.array([0.0, 0.6, 0.9]), + 'fp': np.array([[0.1, 0.4], [0.9, 0.2], [0.6, 0.9]]), + 'y': np.array( + [[0.1, 0.4], [0.1, 0.4], [0.1, 0.4], [0.6, 0.9], [0.6, 0.9]]), + }, + ) + def test_step_interpolation(self, x, xp, fp, y): + self.assertAllClose(multi_problem_v2.step_interpolation(x, xp, fp), y) + + @parameterized.parameters( + { + 'schedule': ('linear', (100, 200), ((0.25, 0.75), (0.62, 0.38))), + 'steps': np.array([50, 100, 150, 200, 250]), + 'pmfs': np.array( + [[0.25, 0.75], [0.25, 0.75], [0.435, 0.565], [0.62, 0.38], + [0.62, 0.38]]), + }, + { + 'schedule': ('step', (100, 200), ((0.25, 0.75), (0.62, 0.38))), + 'steps': np.array([50, 100, 150, 200, 250]), + 'pmfs': np.array( + [[0.25, 0.75], [0.25, 0.75], [0.25, 0.75], [0.62, 0.38], + [0.62, 0.38]]), + }, + ) + def test_get_schedule_distribution(self, schedule, steps, pmfs): + with self.test_session() as sess: + global_step = tf.train.get_or_create_global_step() + output = multi_problem_v2.get_schedule_distribution(schedule, global_step) + sess.run(global_step.initializer) + for step, pmf in zip(steps, pmfs): + sess.run(global_step.assign(step)) + self.assertAllClose(sess.run(output), pmf) + + @parameterized.parameters( + { + 'pmf': np.array([1.0, 0.0], np.float32), + 'fns': [lambda: 0, lambda: 1], + 'rands': np.array([0.1, 0.4, 0.6, 0.9], np.float32), + 'targets': np.array([0, 0, 0, 0], np.float32), + }, + { + 'pmf': np.array([0.2, 0.6, 0.2], np.float32), + 'fns': [lambda: 0, lambda: 1, lambda: 2], + 'rands': np.array([0.1, 0.4, 0.6, 0.9], np.float32), + 'targets': np.array([0, 1, 1, 2], np.float32), + }, + ) + def test_categorical_case(self, pmf, fns, rands, targets): + with self.test_session() as sess: + for rand, target in zip(rands, targets): + output = multi_problem_v2.categorical_case(pmf, fns, rand) + self.assertEqual(sess.run(output), target) + + @parameterized.parameters( + { + 'pmf': np.array([1.0, 0.0], np.float32), + 'num_datasets': 2, + 'sample_size': 10, + }, + { + 'pmf': np.array([0.3, 0.7], np.float32), + 'num_datasets': 2, + 'sample_size': 400, + }, + { + 'pmf': None, + 'num_datasets': 2, + 'sample_size': 400, + }, + ) + def test_get_multi_dataset(self, pmf, num_datasets, sample_size): + with self.test_session() as sess: + datasets = [tf.data.Dataset.from_tensors(i) for i in range(num_datasets)] + multi_dataset = multi_problem_v2.get_multi_dataset(datasets, pmf) + multi_dataset = multi_dataset.batch(sample_size) + iterator = multi_dataset.make_initializable_iterator() + sess.run(iterator.initializer) + sample_pmf = tf.reduce_mean( + tf.one_hot(iterator.get_next(), num_datasets), 0) + if pmf is None: + pmf = np.array([1.0 / num_datasets] * num_datasets, np.float32) + self.assertAllClose(sess.run(sample_pmf), pmf, rtol=0.1, atol=0.1) + + @parameterized.parameters( + { + 'schedule': ('step', (100, 200), ((1.0, 0.0), (0.0, 1.0))), + 'num_datasets': 2, + 'sample_size': 20, + }, + { + 'schedule': ('linear', (100, 200), ((0.6, 0.4), (0.1, 0.9))), + 'num_datasets': 2, + 'sample_size': 400, + }, + ) + def test_multi_problem_v2(self, schedule, num_datasets, sample_size): + + class DummyProblem(problem.Problem): + + def dataset(self, *args, **kwargs): + return tf.data.Dataset.from_tensors({'targets': 0.0}) + + with self.test_session() as sess: + for mode in [problem.DatasetSplit.TRAIN, problem.DatasetSplit.EVAL]: + p = multi_problem_v2.MultiProblemV2( + [DummyProblem() for _ in range(num_datasets)], schedule) + global_step = tf.train.get_or_create_global_step() + dataset = p.dataset(mode, global_step).batch(sample_size) + iterator = dataset.make_initializable_iterator() + features = iterator.get_next() + sess.run(global_step.initializer) + sess.run(iterator.initializer) + sess.run(features) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/data_generators/multinli.py b/tensor2tensor/data_generators/multinli.py new file mode 100644 index 000000000..d784157ae --- /dev/null +++ b/tensor2tensor/data_generators/multinli.py @@ -0,0 +1,223 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for MultiNLI.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import zipfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import lm1b +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import wiki_lm +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + +EOS = text_encoder.EOS + +# Link to data from GLUE: https://gluebenchmark.com/tasks +_MNLI_URL = ("https://firebasestorage.googleapis.com/v0/b/" + "mtl-sentence-representations.appspot.com/o/" + "data%2FMNLI.zip?alt=media&token=50329ea1-e339-" + "40e2-809c-10c40afff3ce") + + +def _maybe_download_corpora(tmp_dir): + """Download corpora for multinli. + + Args: + tmp_dir: a string + Returns: + a string + """ + mnli_filename = "MNLI.zip" + mnli_finalpath = os.path.join(tmp_dir, "MNLI") + if not tf.gfile.Exists(mnli_finalpath): + zip_filepath = generator_utils.maybe_download( + tmp_dir, mnli_filename, _MNLI_URL) + zip_ref = zipfile.ZipFile(zip_filepath, "r") + zip_ref.extractall(tmp_dir) + zip_ref.close() + + return mnli_finalpath + + +def _example_generator(filename): + """Generate mnli examples. + + Args: + filename: a string + Yields: + dictionaries containing "premise", "hypothesis" and "label" strings + """ + for idx, line in enumerate(tf.gfile.Open(filename, "rb")): + if idx == 0: continue # skip header + line = text_encoder.to_unicode_utf8(line.strip()) + split_line = line.split("\t") + # Works for both splits even though dev has some extra human labels. + yield { + "premise": split_line[8], + "hypothesis": split_line[9], + "label": split_line[-1] + } + + +@registry.register_problem +class MultiNLI(text_problems.TextConcat2ClassProblem): + """MultiNLI classification problems.""" + + @property + def is_generate_per_split(self): + return True + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 100, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def approx_vocab_size(self): + return 2**15 + + @property + def num_classes(self): + return 3 + + def class_labels(self, data_dir): + del data_dir + # Note this binary classification is different from usual MNLI. + return ["contradiction", "entailment", "neutral"] + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + mnli_dir = _maybe_download_corpora(tmp_dir) + if dataset_split == problem.DatasetSplit.TRAIN: + filesplit = ["train.tsv"] + else: + # Using dev matched as the default for eval. Can also switch this to + # dev_mismatched.tsv + filesplit = ["dev_matched.tsv"] + label_list = self.class_labels(data_dir=None) + for fs in filesplit: + filename = os.path.join(mnli_dir, fs) + for example in _example_generator(filename): + yield { + "inputs": [example["premise"], example["hypothesis"]], + "label": label_list.index(example["label"]) + } + + +@registry.register_problem +class MultiNLIText2text(text_problems.Text2TextProblem): + """MultiNLI classification problems.""" + + @property + def is_generate_per_split(self): + return True + + @property + def approx_vocab_size(self): + return 2**15 + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + mnli_dir = _maybe_download_corpora(tmp_dir) + if dataset_split == problem.DatasetSplit.TRAIN: + filesplit = ["train.tsv"] + else: + # Using dev matched as the default for eval. Can also switch this to + # dev_mismatched.tsv + filesplit = ["dev_matched.tsv"] + for fs in filesplit: + filename = os.path.join(mnli_dir, fs) + for example in _example_generator(filename): + yield { + "inputs": "multinli premise: %s hypothesis: %s" % ( + example["premise"], example["hypothesis"]), + "targets": example["label"] + } + + +@registry.register_problem +class MultiNLIText2textMulti64kPacked1k(MultiNLIText2text): + """MultiNLI classification problems with the multi-lingual vocabulary.""" + + @property + def packed_length(self): + return 1024 + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelDeEnFrRoWiki64k() + + @property + def num_training_examples(self): + return 18300 + + +@registry.register_problem +class MultiNLICharacters(MultiNLI): + """MultiNLI classification problems, character level.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def global_task_id(self): + return problem.TaskID.THREE_CL_NLI + + +@registry.register_problem +class MultiNLISharedVocab(MultiNLI): + """MultiNLI classification problems with the LM1b vocabulary.""" + + @property + def use_vocab_from_other_problem(self): + return lm1b.LanguagemodelLm1b32k() + + +@registry.register_problem +class MultiNLIWikiLMSharedVocab(MultiNLI): + """MultiNLI classification problems with the Wiki vocabulary.""" + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelEnWiki32k() + + +@registry.register_problem +class MultiNLIWikiLMSharedVocab64k(MultiNLIWikiLMSharedVocab): + """MultiNLI classification problems with the Wiki vocabulary.""" + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelEnWiki64k() + + +@registry.register_problem +class MultiNLIWikiLMMultiVocab64k(MultiNLIWikiLMSharedVocab): + """MultiNLI classification problems with the multi-lingual vocabulary.""" + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelDeEnFrRoWiki64k() diff --git a/tensor2tensor/data_generators/ocr.py b/tensor2tensor/data_generators/ocr.py new file mode 100644 index 000000000..395bbf7e6 --- /dev/null +++ b/tensor2tensor/data_generators/ocr.py @@ -0,0 +1,84 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OCR.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import struct +from tensor2tensor.data_generators import image_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +@registry.register_problem +class OcrTest(image_utils.Image2TextProblem): + """OCR test problem.""" + + @property + def is_small(self): + return True + + @property + def is_character_level(self): + return True + + @property + def target_space_id(self): + return problem.SpaceID.EN_CHR + + @property + def train_shards(self): + return 1 + + @property + def dev_shards(self): + return 1 + + def preprocess_example(self, example, mode, _): + # Resize from usual size ~1350x60 to 90x4 in this test. + img = example["inputs"] + img = tf.to_int64( + tf.image.resize_images(img, [90, 4], tf.image.ResizeMethod.AREA)) + img = tf.image.per_image_standardization(img) + example["inputs"] = img + return example + + def generator(self, data_dir, tmp_dir, is_training): + # In this test problem, we assume that the data is in tmp_dir/ocr/ in + # files names 0.png, 0.txt, 1.png, 1.txt and so on until num_examples. + num_examples = 2 + ocr_dir = os.path.join(tmp_dir, "ocr/") + tf.logging.info("Looking for OCR data in %s." % ocr_dir) + for i in range(num_examples): + image_filepath = os.path.join(ocr_dir, "%d.png" % i) + text_filepath = os.path.join(ocr_dir, "%d.txt" % i) + with tf.gfile.Open(text_filepath, "rb") as f: + label = f.read() + with tf.gfile.Open(image_filepath, "rb") as f: + encoded_image_data = f.read() + # In PNG files width and height are stored in these bytes. + width, height = struct.unpack(">ii", encoded_image_data[16:24]) + yield { + "image/encoded": [encoded_image_data], + "image/format": ["png"], + "image/class/label": label.strip(), + "image/height": [height], + "image/width": [width] + } diff --git a/tensor2tensor/data_generators/ops/pack_sequences_ops.cc b/tensor2tensor/data_generators/ops/pack_sequences_ops.cc new file mode 100644 index 000000000..211f18d2a --- /dev/null +++ b/tensor2tensor/data_generators/ops/pack_sequences_ops.cc @@ -0,0 +1,594 @@ +#include "base/integral_types.h" +#include "third_party/tensorflow/core/framework/op_kernel.h" +#include "third_party/tensorflow/core/framework/shape_inference.h" +#include "third_party/tensorflow/core/framework/tensor.h" +#include "third_party/tensorflow/core/framework/types.h" +#include "third_party/tensorflow/core/framework/types.proto.h" +#include "third_party/tensorflow/core/platform/errors.h" + +namespace tensor2tensor { +namespace { + +using ::tensorflow::bfloat16; +using ::tensorflow::DataTypeVector; +using ::tensorflow::DEVICE_CPU; +using ::tensorflow::OpInputList; +using ::tensorflow::OpKernel; +using ::tensorflow::OpKernelConstruction; +using ::tensorflow::OpKernelContext; +using ::tensorflow::OpOutputList; +using ::tensorflow::Status; +using ::tensorflow::Tensor; +using ::tensorflow::TensorShape; +using ::tensorflow::TTypes; +using ::tensorflow::errors::InvalidArgument; +using ::tensorflow::shape_inference::DimensionHandle; +using ::tensorflow::shape_inference::InferenceContext; +using ::tensorflow::shape_inference::ShapeHandle; + +REGISTER_OP("PackSequences2") + .Input("inputs: int64") + .Input("targets: int64") + .Input("inputs_max_length: int32") + .Input("targets_max_length: int32") + .Output("inputs_packed: int64") + .Output("inputs_segmentation: int32") + .Output("inputs_position: int32") + .Output("targets_packed: int64") + .Output("targets_segmentation: int32") + .Output("targets_position: int32") + .SetShapeFn([](InferenceContext* ctx) { + for (int i=0; i < ctx->num_outputs(); i++) { + ctx->set_output(i, ctx->Matrix(ctx->UnknownDim(), + ctx->UnknownDim())); + } + return tensorflow::Status(); + }); + +// Given a collection of examples, each of which consists of two sequences +// ('inputs' and 'targets') this op packs them into as few packed/combined +// examples as possible, to try to minimize padding. +class PackSequences2Op : public OpKernel { + public: + explicit PackSequences2Op( + OpKernelConstruction* ctx) : OpKernel(ctx) { + } + + void Compute(OpKernelContext* ctx) override { + auto inputs = ctx->input(0).matrix(); + auto targets = ctx->input(1).matrix(); + int inputs_max_length = ctx->input(2).scalar()(); + int targets_max_length = ctx->input(3).scalar()(); + int n = inputs.dimension(0); // Number of examples in the input. + std::vector inputs_lengths(n); + std::vector targets_lengths(n); + // Calculate, in 'inputs_lengths', the actual length of each input sequence + // in "inputs", ignoring padding: + int padded_inputs_length = + std::min(static_cast(inputs.dimension(1)), inputs_max_length); + for (int i = 0; i < n; i++) { + for (int j = 0; j < padded_inputs_length; j++) { + if (inputs(i, j) != 0) + inputs_lengths[i]++; + } + } + // Calculate, in 'targets_lengths', the actual length of each target + // sequence in "targets", ignoring padding: + int padded_targets_length = + std::min(static_cast(targets.dimension(1)), targets_max_length); + for (int i = 0; i < n; i++) { + for (int j = 0; j < padded_targets_length; j++) { + if (targets(i, j) != 0) + targets_lengths[i]++; + } + } + int num_combined = 0; // Number of combined examples currently generated. + std::vector combined_inputs_length; + std::vector combined_targets_length; + std::vector > combined_sequence_ids; + for (int seq_id = 0; seq_id < n; seq_id++) { + int inputs_length = inputs_lengths[seq_id]; + int targets_length = targets_lengths[seq_id]; + // Try to fit the current example, 'seq_id', into one of the existing + // packed examples. The code checks to see if the current example fits in + // any of the last 1000 packed examples already generated. If it fits in + // any, then the example if packed there. Otherwise, a new packed example + // is generated with the new example, and 'num_combined' is increased to + // reflect this: + for (int combined_id = std::max(0, num_combined - 1000); true; + combined_id++) { + if (combined_id == num_combined) { + // The current example, 'seq_id', did not fit in any of the current + // packed examples, so, we generate a new packed example: + combined_inputs_length.push_back(inputs_length); + combined_targets_length.push_back(targets_length); + combined_sequence_ids.push_back(std::vector(1, seq_id)); + num_combined++; + break; + } else if ( + (combined_inputs_length[combined_id] + inputs_length + <= inputs_max_length) && + (combined_targets_length[combined_id] + targets_length + <= targets_max_length)) { + // The current example, 'seq_id', fits in one of the current packed + // examples, 'combined_id', so, we just add it there, + combined_inputs_length[combined_id] += inputs_length; + combined_targets_length[combined_id] += targets_length; + combined_sequence_ids[combined_id].push_back(seq_id); + break; + } + } + } + + auto output_shape_inputs = + TensorShape({static_cast(num_combined), + static_cast(inputs_max_length)}); + auto output_shape_targets = + TensorShape({static_cast(num_combined), + static_cast(targets_max_length)}); + + Tensor* inputs_packed; + OP_REQUIRES_OK(ctx, ctx->allocate_output( + 0, output_shape_inputs, &inputs_packed)); + auto inputs_packed_m = inputs_packed->matrix(); + inputs_packed_m.setZero(); + + Tensor* inputs_segmentation; + OP_REQUIRES_OK( + ctx, ctx->allocate_output( + 1, output_shape_inputs, &inputs_segmentation)); + auto inputs_segmentation_m = inputs_segmentation->matrix(); + inputs_segmentation_m.setZero(); + + Tensor* inputs_position; + OP_REQUIRES_OK( + ctx, ctx->allocate_output(2, output_shape_inputs, &inputs_position)); + auto inputs_position_m = inputs_position->matrix(); + inputs_position_m.setZero(); + + Tensor* targets_packed; + OP_REQUIRES_OK(ctx, ctx->allocate_output( + 3, output_shape_targets, &targets_packed)); + auto targets_packed_m = targets_packed->matrix(); + targets_packed_m.setZero(); + + Tensor* targets_segmentation; + OP_REQUIRES_OK( + ctx, ctx->allocate_output( + 4, output_shape_targets, &targets_segmentation)); + auto targets_segmentation_m = targets_segmentation->matrix(); + targets_segmentation_m.setZero(); + + Tensor* targets_position; + OP_REQUIRES_OK( + ctx, ctx->allocate_output(5, output_shape_targets, &targets_position)); + auto targets_position_m = targets_position->matrix(); + targets_position_m.setZero(); + + // Copy the actual sequences from 'inputs' and 'targets' into the + // packed/combined examples: + for (int combined_id = 0; combined_id < num_combined; combined_id++) { + int inputs_pos = 0; + int targets_pos = 0; + for (int i=0; i < combined_sequence_ids[combined_id].size(); i++) { + int seq_id = combined_sequence_ids[combined_id][i]; + for (int j=0; j < inputs_lengths[seq_id]; j++) { + inputs_packed_m(combined_id, inputs_pos) = inputs(seq_id, j); + inputs_segmentation_m(combined_id, inputs_pos) = i + 1; + inputs_position_m(combined_id, inputs_pos) = j; + inputs_pos++; + } + for (int j=0; j < targets_lengths[seq_id]; j++) { + targets_packed_m(combined_id, targets_pos) = targets(seq_id, j); + targets_segmentation_m(combined_id, targets_pos) = i + 1; + targets_position_m(combined_id, targets_pos) = j; + targets_pos++; + } + } + } + } +}; + +REGISTER_OP("PackSequencesK") + .Input("inputs: Tinput_types") + .Input("max_lengths: Tinput_count * int32") + .Attr("Tinput_types: list(type)") + .Attr("Tinput_count: int") + .Output("outputs_packed: Tinput_types") + .Output("outputs_segmentation: Tinput_count * int32") + .Output("outputs_position: Tinput_count * int32") + .SetShapeFn([](InferenceContext* ctx) { + DataTypeVector input_types; + int input_count; + TF_RETURN_IF_ERROR(ctx->GetAttr("Tinput_types", &input_types)); + TF_RETURN_IF_ERROR(ctx->GetAttr("Tinput_count", &input_count)); + if (input_types.size() != input_count) { + return InvalidArgument( + "`inputs` and `max_lengths` had different numbers of elements"); + } + std::vector input_shapes; + TF_RETURN_IF_ERROR(ctx->input("inputs", &input_shapes)); + std::vector output_shapes; + std::vector segmentation_shapes; + std::vector position_shapes; + for (int i = 0; i < input_shapes.size(); i++) { + const auto& input_shape = input_shapes.at(i); + int rank = ctx->Rank(input_shape); + segmentation_shapes.push_back( + ctx->Matrix(ctx->UnknownDim(), ctx->UnknownDim())); + position_shapes.push_back( + ctx->Matrix(ctx->UnknownDim(), ctx->UnknownDim())); + if (rank == 2) { + output_shapes.push_back( + ctx->MakeShape({ctx->UnknownDim(), ctx->UnknownDim()})); + } else if (rank == 3) { + output_shapes.push_back( + ctx->MakeShape({ctx->UnknownDim(), ctx->UnknownDim(), + ctx->Value(ctx->Dim(input_shape, 2))})); + } else { + return InvalidArgument( + "Only rank 2 and rank 3 inputs are supported"); + } + } + TF_RETURN_IF_ERROR(ctx->set_output("outputs_packed", output_shapes)); + TF_RETURN_IF_ERROR( + ctx->set_output("outputs_segmentation", segmentation_shapes)); + TF_RETURN_IF_ERROR(ctx->set_output("outputs_position", position_shapes)); + return tensorflow::Status(); + }); + +typedef int InputIndex; +typedef int BatchIndex; +typedef int SeqIndex; + +struct PackingSpec { + SeqIndex seq_id; + BatchIndex batch_pos; + int seq_length; + int offset; + int segment_id; +}; + +// This op generalizes PackSequences2Op to examples that contain an arbitrary +// number of sequences (rather than assuming there are just inputs and targets). +// The packing logic is the same. +class PackSequencesKOp : public OpKernel { + public: + explicit PackSequencesKOp(OpKernelConstruction* ctx) : OpKernel(ctx) { + OP_REQUIRES_OK(ctx, ctx->GetAttr("Tinput_types", &input_types_)); + OP_REQUIRES_OK(ctx, ctx->GetAttr("Tinput_count", &input_count_)); + OP_REQUIRES( + ctx, input_types_.size() == input_count_, + InvalidArgument( + "`inputs` and `max_lengths` had different numbers of elements")); + } + + void Compute(OpKernelContext* ctx) override { + OpInputList inputs; + OpInputList max_lengths_list; + + OP_REQUIRES_OK(ctx, ctx->input_list("inputs", &inputs)); + OP_REQUIRES_OK(ctx, ctx->input_list("max_lengths", &max_lengths_list)); + OP_REQUIRES( + ctx, inputs.size() == max_lengths_list.size(), + InvalidArgument( + "`inputs` and `max_lengths` had different numbers of elements")); + + std::map max_lengths; + for (InputIndex i = 0; i < max_lengths_list.size(); i++) { + max_lengths[i] = max_lengths_list[i].scalar()(); + } + + int n = inputs.begin()->dim_size(0); + for (const auto& input : inputs) { + OP_REQUIRES(ctx, input.dim_size(0) == n, + InvalidArgument("`inputs` had different batch sizes")); + } + + std::map padded_inputs_lengths; + for (InputIndex i = 0; i < inputs.size(); i++) { + padded_inputs_lengths[i] = + std::min(static_cast(inputs[i].dim_size(1)), max_lengths[i]); + } + + std::map> inputs_lengths; + for (InputIndex i = 0; i < inputs.size(); i++) { + inputs_lengths[i] = + GetInputLengths(ctx, inputs[i], padded_inputs_lengths[i]); + } + + int num_combined = 0; + std::map> combined_inputs_lengths; + std::map> packing_specs; + std::map segment_counter; + + for (SeqIndex seq_id = 0; seq_id < n; seq_id++) { + for (BatchIndex b = std::max(0, num_combined - 1000); b < n; b++) { + bool enough_room = true; + for (InputIndex i = 0; i < inputs.size(); i++) { + int cur_seq_len = combined_inputs_lengths[i][b]; + if (cur_seq_len + inputs_lengths[i][seq_id] > max_lengths[i]) { + enough_room = false; + break; + } + } + if (enough_room) { + num_combined = std::max(num_combined, b + 1); + for (InputIndex i = 0; i < inputs.size(); i++) { + packing_specs[i][seq_id] = { + .seq_id = seq_id, + .batch_pos = b, + .seq_length = inputs_lengths[i][seq_id], + .offset = combined_inputs_lengths[i][b], + .segment_id = (segment_counter[b] + 1) // Add 1 because zero=pad + }; + combined_inputs_lengths[i][b] += inputs_lengths[i][seq_id]; + } + segment_counter[b]++; + break; + } + } + for (InputIndex i = 0; i < inputs.size(); i++) { + if (packing_specs[i].find(seq_id) == packing_specs[i].end()) { + ctx->CtxFailure(InvalidArgument(tensorflow::strings::StrCat( + "failed to pack example=", seq_id, " into input=", i))); + } + } + } + + OpOutputList outputs_packed; + OpOutputList outputs_segmentation; + OpOutputList outputs_position; + + OP_REQUIRES_OK( + ctx, ctx->output_list("outputs_packed", &outputs_packed)); + OP_REQUIRES_OK( + ctx, ctx->output_list("outputs_segmentation", &outputs_segmentation)); + OP_REQUIRES_OK( + ctx, ctx->output_list("outputs_position", &outputs_position)); + + for (InputIndex i = 0; i < inputs.size(); i++) { + TensorShape output_shape_2d = {static_cast(num_combined), + static_cast(max_lengths[i])}; + + TensorShape output_shape = output_shape_2d; + if (inputs[i].dims() == 3) { + output_shape.AddDim(inputs[i].dim_size(2)); + } else if (inputs[i].dims() != 2) { + ctx->CtxFailure(InvalidArgument("invalid rank")); + } + + Tensor* packed; + Tensor* segmentation; + Tensor* position; + + OP_REQUIRES_OK(ctx, outputs_packed.allocate(i, output_shape, &packed)); + OP_REQUIRES_OK(ctx, outputs_segmentation.allocate(i, output_shape_2d, + &segmentation)); + OP_REQUIRES_OK(ctx, + outputs_position.allocate(i, output_shape_2d, &position)); + + auto segmentation_eigen = segmentation->matrix(); + auto position_eigen = position->matrix(); + + SetZero(ctx, packed); + segmentation_eigen.setZero(); + position_eigen.setZero(); + + for (const auto& pair : packing_specs.at(i)) { + PackSequence(ctx, inputs[i], packed, segmentation_eigen, + position_eigen, pair.second); + } + } + } + + private: + std::vector GetInputLengths( + OpKernelContext* ctx, + const Tensor& input, + const int padded_input_length) { + switch (input.dtype()) { + case tensorflow::DT_BFLOAT16: + return GetInputLengths(ctx, input, padded_input_length); + case tensorflow::DT_FLOAT: + return GetInputLengths(ctx, input, padded_input_length); + case tensorflow::DT_INT32: + return GetInputLengths(ctx, input, padded_input_length); + case tensorflow::DT_INT64: + return GetInputLengths(ctx, input, padded_input_length); + default: + ctx->CtxFailure( + tensorflow::errors::InvalidArgument("unsupported input dtype")); + return {}; + } + } + + template + std::vector GetInputLengths( + OpKernelContext* ctx, + const Tensor& input, + const int padded_input_length) { + if (input.dims() == 2) { + return GetInputLengths( + input.tensor(), padded_input_length); + } else if (input.dims() == 3) { + return GetInputLengths( + input.tensor(), padded_input_length); + } else { + ctx->CtxFailure( + tensorflow::errors::InvalidArgument("unsupported input rank")); + return {}; + } + } + + template + std::vector GetInputLengths( + const typename TTypes::Tensor& input, + const int padded_input_length) { + std::vector input_lengths; + for (int i = 0; i < input.dimension(0); i++) { + int input_length = 0; + for (int j = 0; j < padded_input_length; j++) { + if (input(i, j) != 0) { + input_length++; + } + } + input_lengths.push_back(input_length); + } + return input_lengths; + } + + template + std::vector GetInputLengths( + const typename TTypes::Tensor& input, + const int padded_input_length) { + std::vector input_lengths; + for (int i = 0; i < input.dimension(0); i++) { + int input_length = 0; + for (int j = 0; j < padded_input_length; j++) { + for (int k = 0; k < input.dimension(2); k++) { + if (input(i, j, k) != 0) { + input_length++; + break; + } + } + } + input_lengths.push_back(input_length); + } + return input_lengths; + } + + void SetZero(OpKernelContext* ctx, Tensor* inputs) { + switch (inputs->dtype()) { + case tensorflow::DT_BFLOAT16: + SetZero(ctx, inputs); + break; + case tensorflow::DT_FLOAT: + SetZero(ctx, inputs); + break; + case tensorflow::DT_INT32: + SetZero(ctx, inputs); + break; + case tensorflow::DT_INT64: + SetZero(ctx, inputs); + break; + default: + ctx->CtxFailure( + tensorflow::errors::InvalidArgument("unsupported input dtype")); + } + } + + template + void SetZero(OpKernelContext* ctx, Tensor* inputs) { + switch (inputs->dims()) { + case 2: + inputs->tensor().setZero(); + break; + case 3: + inputs->tensor().setZero(); + break; + default: + ctx->CtxFailure( + tensorflow::errors::InvalidArgument("unsupported input rank")); + } + } + + void PackSequence(OpKernelContext* ctx, const Tensor& inputs, Tensor* packed, + TTypes::Tensor segmentation, + TTypes::Tensor position, + const PackingSpec& spec) { + switch (inputs.dtype()) { + case tensorflow::DT_FLOAT: + PackSequence( + ctx, inputs, packed, segmentation, position, spec); + break; + case tensorflow::DT_BFLOAT16: + PackSequence( + ctx, inputs, packed, segmentation, position, spec); + break; + case tensorflow::DT_INT32: + PackSequence(ctx, inputs, packed, segmentation, position, + spec); + break; + case tensorflow::DT_INT64: + PackSequence(ctx, inputs, packed, segmentation, position, + spec); + break; + default: + ctx->CtxFailure( + tensorflow::errors::InvalidArgument("unsupported input dtype")); + } + } + + template + void PackSequence(OpKernelContext* ctx, const Tensor& inputs, Tensor* packed, + TTypes::Tensor segmentation, + TTypes::Tensor position, + const PackingSpec& spec) { + switch (inputs.dims()) { + case 2: + PackSequence( + ctx, + inputs.tensor(), + packed->tensor(), // TensorMap is pass-by-ref. + segmentation, + position, + spec); + break; + case 3: + PackSequence( + ctx, + inputs.tensor(), + packed->tensor(), // TensorMap is pass-by-ref. + segmentation, + position, + spec); + break; + default: + ctx->CtxFailure( + tensorflow::errors::InvalidArgument("unsupported input rank")); + } + } + + template + void PackSequence(OpKernelContext* ctx, + const typename TTypes::Tensor& inputs, + typename TTypes::Tensor packed, + TTypes::Tensor segmentation, + TTypes::Tensor position, + const PackingSpec& spec) { + for (int i = 0; i < spec.seq_length; i++) { + packed(spec.batch_pos, spec.offset + i) = inputs(spec.seq_id, i); + segmentation(spec.batch_pos, spec.offset + i) = spec.segment_id; + position(spec.batch_pos, spec.offset + i) = i; + } + } + + template + void PackSequence(OpKernelContext* ctx, + const typename TTypes::Tensor& inputs, + typename TTypes::Tensor packed, + TTypes::Tensor segmentation, + TTypes::Tensor position, + const PackingSpec& spec) { + for (int i = 0; i < spec.seq_length; i++) { + for (int k = 0; k < inputs.dimension(2); k++) { + packed(spec.batch_pos, spec.offset + i, k) = inputs(spec.seq_id, i, k); + } + segmentation(spec.batch_pos, spec.offset + i) = spec.segment_id; + position(spec.batch_pos, spec.offset + i) = i; + } + } + + DataTypeVector input_types_; + int input_count_; +}; + +REGISTER_KERNEL_BUILDER(Name("PackSequences2").Device(DEVICE_CPU), + PackSequences2Op); + +REGISTER_KERNEL_BUILDER(Name("PackSequencesK").Device(DEVICE_CPU), + PackSequencesKOp); + +} // namespace +} // namespace tensor2tensor diff --git a/tensor2tensor/data_generators/ops/pack_sequences_ops_test.py b/tensor2tensor/data_generators/ops/pack_sequences_ops_test.py new file mode 100644 index 000000000..48b676f4e --- /dev/null +++ b/tensor2tensor/data_generators/ops/pack_sequences_ops_test.py @@ -0,0 +1,480 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for pack_sequences_ops.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from tensor2tensor.data_generators.ops import pack_sequences_ops +import tensorflow.compat.v1 as tf + + +def _pack_sequences_k(inputs, targets, input_max_length, target_max_length): + """Wrapper for pack_sequences_k with same interface as pack_sequences_2.""" + inputs = tf.convert_to_tensor(inputs, tf.int32) + targets = tf.convert_to_tensor(targets, tf.int32) + input_max_length = tf.convert_to_tensor(input_max_length, dtype=tf.int32) + target_max_length = tf.convert_to_tensor(target_max_length, dtype=tf.int32) + (packed, segmentation, position) = pack_sequences_ops.pack_sequences_k( + [inputs, targets], [input_max_length, target_max_length]) + (inputs_packed, targets_packed) = packed + (inputs_segmentation, targets_segmentation) = segmentation + (inputs_position, targets_position) = position + return (inputs_packed, inputs_segmentation, inputs_position, targets_packed, + targets_segmentation, targets_position) + + +class PackSequencesOpsTest(tf.test.TestCase): + + def do_test_pack_sequences_length3(self, pack_fn): + inputs = [ + [1, 2, 3], + [4, 5, 0], + [6, 0, 0], + ] + targets = [ + [10, 0, 0], + [20, 30, 40], + [50, 60, 0], + ] + inputs_max_length = 3 + targets_max_length = 3 + (inputs_packed, inputs_segmentation, inputs_position, targets_packed, + targets_segmentation, targets_position) = ( + pack_fn(inputs, targets, inputs_max_length, targets_max_length)) + self.assertAllEqual(inputs_packed, [ + [1, 2, 3], + [4, 5, 0], + [6, 0, 0], + ]) + self.assertAllEqual(inputs_segmentation, [ + [1, 1, 1], + [1, 1, 0], + [1, 0, 0], + ]) + self.assertAllEqual(inputs_position, [ + [0, 1, 2], + [0, 1, 0], + [0, 0, 0], + ]) + self.assertAllEqual(targets_packed, [ + [10, 0, 0], + [20, 30, 40], + [50, 60, 0], + ]) + self.assertAllEqual(targets_segmentation, [ + [1, 0, 0], + [1, 1, 1], + [1, 1, 0], + ]) + self.assertAllEqual(targets_position, [ + [0, 0, 0], + [0, 1, 2], + [0, 1, 0], + ]) + + def do_test_pack_sequences_length4(self, pack_fn): + inputs = [ + [1, 2, 3], + [4, 5, 0], + [6, 0, 0], + ] + targets = [ + [10, 0, 0], + [20, 30, 40], + [50, 60, 0], + ] + inputs_max_length = 4 + targets_max_length = 4 + (inputs_packed, inputs_segmentation, inputs_position, targets_packed, + targets_segmentation, targets_position) = ( + pack_fn(inputs, targets, inputs_max_length, targets_max_length)) + self.assertAllEqual(inputs_packed, [ + [1, 2, 3, 6], + [4, 5, 0, 0], + ]) + self.assertAllEqual(inputs_segmentation, [ + [1, 1, 1, 2], + [1, 1, 0, 0], + ]) + self.assertAllEqual(inputs_position, [ + [0, 1, 2, 0], + [0, 1, 0, 0], + ]) + self.assertAllEqual(targets_packed, [ + [10, 50, 60, 0], + [20, 30, 40, 0], + ]) + self.assertAllEqual(targets_segmentation, [ + [1, 2, 2, 0], + [1, 1, 1, 0], + ]) + self.assertAllEqual(targets_position, [ + [0, 0, 1, 0], + [0, 1, 2, 0], + ]) + + def do_test_pack_sequences_length5(self, pack_fn): + inputs = [ + [1, 2, 3], + [4, 5, 0], + [6, 0, 0], + ] + targets = [ + [10, 0, 0], + [20, 30, 40], + [50, 60, 0], + ] + max_length = 5 + (inputs_packed, inputs_segmentation, inputs_position, targets_packed, + targets_segmentation, targets_position) = ( + pack_fn(inputs, targets, max_length, max_length)) + self.assertAllEqual( + inputs_packed, [ + [1, 2, 3, 4, 5], + [6, 0, 0, 0, 0], + ]) + self.assertAllEqual( + inputs_segmentation, [ + [1, 1, 1, 2, 2], + [1, 0, 0, 0, 0], + ]) + self.assertAllEqual( + inputs_position, [ + [0, 1, 2, 0, 1], + [0, 0, 0, 0, 0], + ]) + self.assertAllEqual( + targets_packed, [ + [10, 20, 30, 40, 0], + [50, 60, 0, 0, 0], + ]) + self.assertAllEqual( + targets_segmentation, [ + [1, 2, 2, 2, 0], + [1, 1, 0, 0, 0], + ]) + self.assertAllEqual( + targets_position, [ + [0, 0, 1, 2, 0], + [0, 1, 0, 0, 0], + ]) + + def do_test_pack_sequences_length6(self, pack_fn): + inputs = [ + [1, 2, 3], + [4, 5, 0], + [6, 0, 0], + ] + targets = [ + [10, 0, 0], + [20, 30, 40], + [50, 60, 0], + ] + max_length = 6 + (inputs_packed, inputs_segmentation, inputs_position, targets_packed, + targets_segmentation, targets_position) = ( + pack_fn(inputs, targets, max_length, max_length)) + self.assertAllEqual(inputs_packed, [ + [1, 2, 3, 4, 5, 6], + ]) + self.assertAllEqual(inputs_segmentation, [ + [1, 1, 1, 2, 2, 3], + ]) + self.assertAllEqual(inputs_position, [ + [0, 1, 2, 0, 1, 0], + ]) + self.assertAllEqual(targets_packed, [ + [10, 20, 30, 40, 50, 60], + ]) + self.assertAllEqual(targets_segmentation, [ + [1, 2, 2, 2, 3, 3], + ]) + self.assertAllEqual(targets_position, [ + [0, 0, 1, 2, 0, 1], + ]) + + def do_test_pack_sequences_length7(self, pack_fn): + inputs = [ + [1, 2, 3], + [4, 5, 0], + [6, 0, 0], + ] + targets = [ + [10, 0, 0], + [20, 30, 40], + [50, 60, 0], + ] + max_length = 7 + (inputs_packed, inputs_segmentation, inputs_position, targets_packed, + targets_segmentation, targets_position) = ( + pack_fn(inputs, targets, max_length, max_length)) + self.assertAllEqual(inputs_packed, [ + [1, 2, 3, 4, 5, 6, 0], + ]) + self.assertAllEqual(inputs_segmentation, [ + [1, 1, 1, 2, 2, 3, 0], + ]) + self.assertAllEqual(inputs_position, [ + [0, 1, 2, 0, 1, 0, 0], + ]) + self.assertAllEqual(targets_packed, [ + [10, 20, 30, 40, 50, 60, 0], + ]) + self.assertAllEqual(targets_segmentation, [ + [1, 2, 2, 2, 3, 3, 0], + ]) + self.assertAllEqual(targets_position, [ + [0, 0, 1, 2, 0, 1, 0], + ]) + + def do_test_pack_sequences_length_different_lengths(self, pack_fn): + inputs = [ + [1, 2, 3], + [4, 5, 0], + [6, 0, 0], + ] + targets = [ + [10, 0, 0], + [20, 30, 40], + [50, 60, 0], + ] + input_max_length = 3 + target_max_length = 4 + (inputs_packed, inputs_segmentation, inputs_position, targets_packed, + targets_segmentation, targets_position) = ( + pack_fn(inputs, targets, input_max_length, target_max_length)) + self.assertAllEqual(inputs_packed, [ + [1, 2, 3], + [4, 5, 0], + [6, 0, 0], + ]) + self.assertAllEqual(inputs_segmentation, [ + [1, 1, 1], + [1, 1, 0], + [1, 0, 0], + ]) + self.assertAllEqual(inputs_position, [ + [0, 1, 2], + [0, 1, 0], + [0, 0, 0], + ]) + self.assertAllEqual(targets_packed, [ + [10, 0, 0, 0], + [20, 30, 40, 0], + [50, 60, 0, 0], + ]) + self.assertAllEqual(targets_segmentation, [ + [1, 0, 0, 0], + [1, 1, 1, 0], + [1, 1, 0, 0], + ]) + self.assertAllEqual(targets_position, [ + [0, 0, 0, 0], + [0, 1, 2, 0], + [0, 1, 0, 0], + ]) + + def test_pack_sequences2(self): + self.do_test_pack_sequences_length3(pack_sequences_ops.pack_sequences2) + self.do_test_pack_sequences_length4(pack_sequences_ops.pack_sequences2) + self.do_test_pack_sequences_length5(pack_sequences_ops.pack_sequences2) + self.do_test_pack_sequences_length6(pack_sequences_ops.pack_sequences2) + self.do_test_pack_sequences_length7(pack_sequences_ops.pack_sequences2) + self.do_test_pack_sequences_length_different_lengths( + pack_sequences_ops.pack_sequences2) + + def test_pack_sequences_k(self): + self.do_test_pack_sequences_length3(_pack_sequences_k) + self.do_test_pack_sequences_length4(_pack_sequences_k) + self.do_test_pack_sequences_length5(_pack_sequences_k) + self.do_test_pack_sequences_length6(_pack_sequences_k) + self.do_test_pack_sequences_length7(_pack_sequences_k) + self.do_test_pack_sequences_length_different_lengths(_pack_sequences_k) + + def test_random_inputs(self): + for _ in range(10): + batch_size = np.random.randint(900, 1100, size=[]) + input_seqlen = np.random.randint(1, 10, size=[]) + target_seqlen = np.random.randint(1, 10, size=[]) + inputs_list = [] + targets_list = [] + for _ in range(batch_size): + input_num_pads = np.random.randint(0, input_seqlen, size=[]) + input_pads = np.full([input_num_pads], 0, dtype=np.int32) + inputs = np.random.randint(1, 10, size=[input_seqlen - input_num_pads]) + inputs = np.concatenate([inputs, input_pads], axis=0) + + target_num_pads = np.random.randint(0, target_seqlen, size=[]) + target_pads = np.full([target_num_pads], 0, dtype=np.int32) + targets = np.random.randint( + 1, 10, size=[target_seqlen - target_num_pads]) + targets = np.concatenate([targets, target_pads], axis=0) + + inputs_list.append(inputs) + targets_list.append(targets) + input_maxlen = np.random.randint(input_seqlen, input_seqlen + 10, size=[]) + target_maxlen = np.random.randint( + target_seqlen, target_seqlen + 10, size=[]) + (inputs_packed2, inputs_segmentation2, inputs_positions2, targets_packed2, + targets_segmentation2, targets_positions2) = ( + pack_sequences_ops.pack_sequences2(inputs_list, targets_list, + input_maxlen, target_maxlen)) + (inputs_packed_k, inputs_segmentation_k, inputs_positions_k, + targets_packed_k, targets_segmentation_k, targets_positions_k) = ( + _pack_sequences_k(inputs_list, targets_list, input_maxlen, + target_maxlen)) + + self.assertAllEqual(inputs_packed2, inputs_packed_k) + self.assertAllEqual(inputs_segmentation2, inputs_segmentation_k) + self.assertAllEqual(inputs_positions2, inputs_positions_k) + self.assertAllEqual(targets_packed2, targets_packed_k) + self.assertAllEqual(targets_segmentation2, targets_segmentation_k) + self.assertAllEqual(targets_positions2, targets_positions_k) + + def test_pack_sequences_k_multi_input(self): + input_tokens = tf.convert_to_tensor([ + [1, 2, 3], + [4, 5, 0], + [6, 0, 0], + ], dtype=tf.int32) + input_vectors = tf.convert_to_tensor([ + [[0, 1, 2], [1, 2, 3], [2, 3, 4]], + [[3, 4, 5], [4, 5, 6], [0, 0, 0]], + [[5, 6, 7], [0, 0, 0], [0, 0, 0]], + ], dtype=tf.float32) + targets = tf.convert_to_tensor([ + [10, 0, 0], + [20, 30, 40], + [50, 60, 0], + ], dtype=tf.int32) + (packed, segmentation, position) = pack_sequences_ops.pack_sequences_k( + [input_tokens, input_vectors, targets], + [5, 3, 5]) + (input_tokens_packed, input_vectors_packed, targets_packed) = packed + (input_tokens_segmentation, input_vectors_segmentation, + targets_segmentation) = segmentation + (input_tokens_position, input_vectors_position, targets_position) = position + self.assertAllEqual( + input_tokens_packed, [ + [1, 2, 3, 0, 0], + [4, 5, 6, 0, 0], + ]) + self.assertAllEqual( + input_vectors_packed, [ + [[0, 1, 2], [1, 2, 3], [2, 3, 4]], + [[3, 4, 5], [4, 5, 6], [5, 6, 7]], + ]) + self.assertAllEqual( + input_tokens_segmentation, [ + [1, 1, 1, 0, 0], + [1, 1, 2, 0, 0], + ]) + self.assertAllEqual( + input_vectors_segmentation, [ + [1, 1, 1], + [1, 1, 2], + ]) + self.assertAllEqual( + input_tokens_position, [ + [0, 1, 2, 0, 0], + [0, 1, 0, 0, 0], + ]) + self.assertAllEqual( + input_vectors_position, [ + [0, 1, 2], + [0, 1, 0], + ]) + self.assertAllEqual( + targets_packed, [ + [10, 0, 0, 0, 0], + [20, 30, 40, 50, 60], + ]) + self.assertAllEqual( + targets_segmentation, [ + [1, 0, 0, 0, 0], + [1, 1, 1, 2, 2], + ]) + self.assertAllEqual( + targets_position, [ + [0, 0, 0, 0, 0], + [0, 1, 2, 0, 1], + ]) + + def test_pack_sequences_k_int64(self): + inputs = tf.convert_to_tensor([ + [1, 2, 3], + [4, 5, 0], + [6, 0, 0], + ], dtype=tf.int64) + max_length = tf.convert_to_tensor(5, dtype=tf.int32) + (packed, segmentation, position) = pack_sequences_ops.pack_sequences_k( + [inputs], [max_length]) + (inputs_packed,) = packed + (inputs_segmentation,) = segmentation + (inputs_position,) = position + self.assertAllEqual( + inputs_packed, [ + [1, 2, 3, 4, 5], + [6, 0, 0, 0, 0], + ]) + self.assertEqual(inputs_packed.dtype, tf.int64) + self.assertAllEqual( + inputs_segmentation, [ + [1, 1, 1, 2, 2], + [1, 0, 0, 0, 0], + ]) + self.assertAllEqual( + inputs_position, [ + [0, 1, 2, 0, 1], + [0, 0, 0, 0, 0], + ]) + + def test_pack_sequences_k_bfloat16(self): + inputs = tf.convert_to_tensor([ + [1, 2, 3], + [4, 5, 0], + [6, 0, 0], + ], dtype=tf.bfloat16) + max_length = tf.convert_to_tensor(5, dtype=tf.int32) + (packed, segmentation, position) = pack_sequences_ops.pack_sequences_k( + [inputs], [max_length]) + (inputs_packed,) = packed + (inputs_segmentation,) = segmentation + (inputs_position,) = position + self.assertAllEqual( + inputs_packed, [ + [1, 2, 3, 4, 5], + [6, 0, 0, 0, 0], + ]) + self.assertEqual(inputs_packed.dtype, tf.bfloat16) + self.assertAllEqual( + inputs_segmentation, [ + [1, 1, 1, 2, 2], + [1, 0, 0, 0, 0], + ]) + self.assertAllEqual( + inputs_position, [ + [0, 1, 2, 0, 1], + [0, 0, 0, 0, 0], + ]) + + +if __name__ == "__main__": + tf.enable_eager_execution() + tf.test.main() diff --git a/tensor2tensor/data_generators/ops/subword_text_encoder.cc b/tensor2tensor/data_generators/ops/subword_text_encoder.cc new file mode 100644 index 000000000..fba7ba2a4 --- /dev/null +++ b/tensor2tensor/data_generators/ops/subword_text_encoder.cc @@ -0,0 +1,134 @@ +#include "third_party/py/tensor2tensor/data_generators/ops/subword_text_encoder.h" + +#include "third_party/absl/strings/str_cat.h" +#include "third_party/absl/strings/str_split.h" +#include "third_party/absl/strings/string_view.h" +#include "third_party/icu/include/unicode/uchar.h" +#include "third_party/icu/include/unicode/utf8.h" +#include "third_party/tensorflow/core/framework/tensor.h" +#include "third_party/tensorflow/core/platform/env.h" + +namespace tensor2tensor { +namespace { + +using ::tensorflow::Env; + +// End of Sequence token ID to insert at end of encoded text. +constexpr int64_t kEosTokenId = 1; + +} // namespace + +SubwordTextEncoder::SubwordTextEncoder(const std::string& vocab_filename) { + // TODO(ormandi): Add a unified vocabulary reader function. + std::string vocab_contents; + TF_CHECK_OK( + ReadFileToString(Env::Default(), vocab_filename, &vocab_contents)); + std::vector vocab_list = + absl::StrSplit(vocab_contents, '\n'); + // Strip trailing newline by skipping last element, then strip the first and + // last chars to remove enclosing quotes. + auto vocab_size = vocab_list.size() - vocab_list.back().empty(); + for (auto i = 0; i < vocab_size; ++i) { + absl::string_view token = + vocab_list[i].substr(1, vocab_list[i].length() - 2); + int char_index = 0; + do { + // Note throughout that these strings are unicode so we iterate over utf-8 + // code points, which may be between 8-32 bits long, using U8_NEXT. It is + // important never to iterate directly over ascii characters or models + // will fail to handle non-ascii alphabets properly. + UChar32 c; + U8_NEXT(token, char_index, token.length(), c); + CHECK_GE(c, 0); + alphabet_.insert(c); + } while (char_index < token.length()); + vocab_.insert({std::string(token), i}); + } +} + +void SubwordTextEncoder::Encode(absl::string_view text, std::vector* ids) { + // Subsequent code can read characters beyond the bound of the string_view + // in "text". For example, U8_NEXT requires that the offset should be + // strictly smaller than the length, but this is possible with the code + // below. Ideally, this should not happen, but work around this issue by + // using the pointer to circumvent bounds checking until the code or tests + // are fixed. + const char* ptr = text.data(); + + ids->clear(); + int token_start = 0; + int token_end = 0; + UChar32 c; + UChar32 next_c; + U8_NEXT(ptr, token_end, text.length(), c); + CHECK_GE(c, 0); + while (token_end <= text.length()) { + int next_end = token_end; + U8_NEXT(ptr, next_end, text.length(), next_c); + CHECK_GE(next_c, 0); + // Subtoken break when switching from non-alphanum to alphanum, or when + // reaching the end of the original token. + if (u_isalnum(next_c) != u_isalnum(c) || token_end >= text.length()) { + absl::string_view next_token(ptr + token_start, token_end - token_start); + if (next_token != " ") { + EncodeSubtokens(next_token, ids); + } + token_start = token_end; + } + token_end = next_end; + c = next_c; + } + ids->push_back(kEosTokenId); +} + +void SubwordTextEncoder::EncodeSubtokens( + absl::string_view token, std::vector *ids) { + std::string token_s = EscapeToken(token); + token = token_s; + int subtoken_start = 0; + // TODO(noam): this algorithm is quadratic in the length of the token. + // We should instead start with a length equal to the maximum subtoken + // length in the vocabulary. + int subtoken_end = token.length(); + while (subtoken_start < token.length()) { + absl::string_view subtoken = + token.substr(subtoken_start, subtoken_end - subtoken_start); + auto iter = vocab_.find(subtoken); + if (iter != vocab_.end()) { + ids->push_back(iter->second); + subtoken_start = subtoken_end; + // TODO(noam): again, set subtoken_end forward only enough to catch + // the longest subtoken in the vocabulary. + subtoken_end = token.length(); + } else { + U8_BACK_1((const uint8_t*)token_s.data(), 0, subtoken_end); + if (subtoken_end <= subtoken_start) { + LOG(FATAL) << "Unencodable tokens found."; + } + } + } +} + +std::string SubwordTextEncoder::EscapeToken(absl::string_view token) { + std::string token_s; + int i = 0; + do { + int prev = i; + UChar32 c; + U8_NEXT(token, i, token.length(), c); + CHECK_GE(c, 0); + if (c == '_') { + absl::StrAppend(&token_s, "\\u"); + } else if (c == '\\') { + absl::StrAppend(&token_s, "\\\\"); + } else if (c == '\n' || alphabet_.find(c) == alphabet_.end()) { + absl::StrAppend(&token_s, "\\", c, ";"); + } else { + absl::StrAppend(&token_s, token.substr(prev, i - prev)); + } + } while (i < token.length()); + absl::StrAppend(&token_s, "_"); + return token_s; +} + +} // namespace tensor2tensor diff --git a/tensor2tensor/data_generators/ops/subword_text_encoder.h b/tensor2tensor/data_generators/ops/subword_text_encoder.h new file mode 100644 index 000000000..17cb2bf66 --- /dev/null +++ b/tensor2tensor/data_generators/ops/subword_text_encoder.h @@ -0,0 +1,44 @@ +#ifndef TENSOR2TESNOR_DATA_GENERATORS_OPS_SUBWORD_TEXT_ENCODER_H_ +#define TENSOR2TESNOR_DATA_GENERATORS_OPS_SUBWORD_TEXT_ENCODER_H_ + +#include "third_party/absl/container/flat_hash_map.h" +#include "third_party/absl/container/flat_hash_set.h" +#include "third_party/absl/strings/string_view.h" +#include "third_party/icu/include/unicode/uchar.h" +#include "third_party/tensorflow/core/framework/tensor.h" + +namespace tensor2tensor { + +// A subword text encoder with built in tokenizer. +// +// Equivalent to tensor2tensor's subword text +// https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/text_encoder.py, +// This code (or a suitable replacement) should eventually move into tfds +// and should be deleted from tensor2tensor. + +class SubwordTextEncoder { + public: + explicit SubwordTextEncoder(const std::string& vocab_filename); + virtual ~SubwordTextEncoder() {} + + // Breaks up input text into subtokens. + void Encode(absl::string_view text, std::vector* ids); + + private: + // Given a full token as input, breaks the token up into subtokens and appends + // corresponding IDs to the ids vector. + void EncodeSubtokens(absl::string_view token, std::vector* ids); + + // Escapes a token so unencodable characters are replaced by escape sequences. + std::string EscapeToken(absl::string_view token); + + // Maps subword tokens to IDs. + absl::flat_hash_map vocab_; + // A set containing all valid unicode code points that can be encoded without + // being escaped. + absl::flat_hash_set alphabet_; +}; + +} // namespace tensor2tensor + +#endif // TENSOR2TESNOR_DATA_GENERATORS_OPS_SUBWORD_TEXT_ENCODER_H_ diff --git a/tensor2tensor/data_generators/ops/subword_text_encoder_ops.cc b/tensor2tensor/data_generators/ops/subword_text_encoder_ops.cc new file mode 100644 index 000000000..7d89b7d65 --- /dev/null +++ b/tensor2tensor/data_generators/ops/subword_text_encoder_ops.cc @@ -0,0 +1,68 @@ +#include + +#include "third_party/py/tensor2tensor/data_generators/ops/subword_text_encoder.h" +#include "third_party/tensorflow/core/framework/op_kernel.h" +#include "third_party/tensorflow/core/framework/shape_inference.h" +#include "third_party/tensorflow/core/framework/tensor.h" +#include "third_party/tensorflow/core/framework/types.h" + +namespace tensor2tensor { +namespace { + +using ::tensorflow::DEVICE_CPU; +using ::tensorflow::OpKernel; +using ::tensorflow::OpKernelConstruction; +using ::tensorflow::OpKernelContext; +using ::tensorflow::Status; +using ::tensorflow::Tensor; +using ::tensorflow::TensorShape; +using ::tensorflow::tstring; +using ::tensorflow::shape_inference::InferenceContext; + +REGISTER_OP("SubwordTextEncoderEncode") + .Input("s: string") + .Output("encoded: int64") + .Attr("vocab_filename: string") + .SetShapeFn([](InferenceContext* ctx) { + ctx->set_output(0, ctx->Vector(ctx->UnknownDim())); + return tensorflow::Status(); + }); + +class SubwordTextEncoderEncodeOp : public OpKernel { + public: + explicit SubwordTextEncoderEncodeOp( + OpKernelConstruction* ctx) : OpKernel(ctx) { + std::string vocab_filename; + OP_REQUIRES_OK(ctx, ctx->GetAttr("vocab_filename", &vocab_filename)); + encoder_ = std::make_unique(vocab_filename); + } + + void Compute(OpKernelContext* ctx) override { + // Get input string and deserialize into ArticleExample proto. + absl::string_view s = ctx->input(0).scalar()(); + + // Construct encoded output tensors. + std::vector encoded_ids; + encoder_->Encode(s, &encoded_ids); + Tensor* encoded; + OP_REQUIRES_OK( + ctx, ctx->allocate_output( + 0, TensorShape({static_cast(encoded_ids.size())}), + &encoded)); + auto encoded_vec = encoded->vec(); + // TODO(noam): find someone who remembers c++ eigen and ask the proper way + // to copy a std::Vector to an Eigen whatever-this-is + for (int i = 0; i < encoded_ids.size(); i++) { + encoded_vec(i) = encoded_ids[i]; + } + } + + private: + std::unique_ptr encoder_; +}; + +REGISTER_KERNEL_BUILDER(Name("SubwordTextEncoderEncode").Device(DEVICE_CPU), + SubwordTextEncoderEncodeOp); + +} // namespace +} // namespace tensor2tensor diff --git a/tensor2tensor/data_generators/ops/subword_text_encoder_ops_test.py b/tensor2tensor/data_generators/ops/subword_text_encoder_ops_test.py new file mode 100644 index 000000000..952838f3a --- /dev/null +++ b/tensor2tensor/data_generators/ops/subword_text_encoder_ops_test.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for subword_text_encoder_ops.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.data_generators.ops import subword_text_encoder_ops +import tensorflow.compat.v1 as tf + +vocab_file = ( + "third_party/py/tensor2tensor/data_generators/ops/testdata/subwords") + + +class SubwordTextEncoderOpsTest(tf.test.TestCase): + + def test_subword_text_encoder_encode(self): + s = "the quick brown fox jumps over the lazy dog" + encoded = subword_text_encoder_ops.subword_text_encoder_encode( + s, vocab_file) + self.assertAllEqual(encoded, [2, 3, 4, 5, 6, 7, 8, 9, 2, 11, 12, 1]) + + +if __name__ == "__main__": + tf.enable_eager_execution() + tf.test.main() diff --git a/tensor2tensor/data_generators/ops/subword_text_encoder_test.cc b/tensor2tensor/data_generators/ops/subword_text_encoder_test.cc new file mode 100644 index 000000000..9bad1523d --- /dev/null +++ b/tensor2tensor/data_generators/ops/subword_text_encoder_test.cc @@ -0,0 +1,44 @@ +#include "third_party/py/tensor2tensor/data_generators/ops/subword_text_encoder.h" + +#include "testing/base/public/gunit.h" +#include "third_party/tensorflow/core/framework/tensor.h" +#include "third_party/tensorflow/core/framework/tensor_testutil.h" + +namespace tensor2tensor { +namespace { + +TEST(SubwordTextEncoderTest, EncodesSubTokens) { + SubwordTextEncoder encoder("third_party/py/tensor2tensor/" + "data_generators/ops/testdata/subwords"); + std::vector t; + encoder.Encode("the quick brown fox jumps over the lazy dog", &t); + EXPECT_EQ(t, std::vector({2, 3, 4, 5, 6, 7, 8, 9, 2, 11, 12, 1})); +} + +TEST(SubwordTextEncoderTest, EncodesUnicodeSubTokens) { + SubwordTextEncoder encoder("third_party/py/tensor2tensor/" + "data_generators/ops/testdata/subwords"); + std::vector t; + encoder.Encode("ɧęĻĽÒ", &t); + EXPECT_EQ(t, std::vector({13, 14, 1})); +} + +TEST(SubwordTextEncoderTest, EncodesUnicodeCodePoints) { + SubwordTextEncoder encoder("third_party/py/tensor2tensor/" + "data_generators/ops/testdata/subwords"); + std::vector t; + encoder.Encode("⻦ ⻭", &t); + EXPECT_EQ(t, std::vector({15, 18, 16, 17, 1})); +} + +TEST(SubwordTextEncoderTest, EncodesCharactersNotInAlphabet) { + SubwordTextEncoder encoder("third_party/py/tensor2tensor/" + "data_generators/ops/testdata/subwords"); + std::vector t; + encoder.Encode("!", &t); + // Subtokens: '\', '3', '3', ';', '_', '', ''. + EXPECT_EQ(t, std::vector({19, 23, 23, 30, 17, 1})); +} + +} // namespace +} // namespace tensor2tensor diff --git a/tensor2tensor/data_generators/ops/testdata/subwords b/tensor2tensor/data_generators/ops/testdata/subwords new file mode 100644 index 000000000..2591acac1 --- /dev/null +++ b/tensor2tensor/data_generators/ops/testdata/subwords @@ -0,0 +1,31 @@ +'' +'' +'the_' +'quick_' +'brow' +'n_' +'fox_' +'jump' +'s_' +'over_' +'the_' +'lazy_' +'dog_' +'ɧę' +'ĻĽÒ_' +'⻦' +'⻭' +'_' +' ' +'\' +'0' +'1' +'2' +'3' +'4' +'5' +'6' +'7' +'8' +'9' +';' \ No newline at end of file diff --git a/tensor2tensor/data_generators/paraphrase_ms_coco.py b/tensor2tensor/data_generators/paraphrase_ms_coco.py new file mode 100644 index 000000000..84de12859 --- /dev/null +++ b/tensor2tensor/data_generators/paraphrase_ms_coco.py @@ -0,0 +1,196 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base classes for paraphrase generation problems.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import io +import json +import os +import zipfile + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +_MS_COCO_DOWNLOAD_URL = "http://msvocds.blob.core.windows.net/annotations-1-0-3" +_MS_COCO_ZIPPED_FILE = "captions_train-val2014.zip" + +_MS_COCO_TRAIN_FILE = "captions_train2014.json" +_MS_COCO_DEV_FILE = "captions_val2014.json" + + +def create_combination(list_of_sentences): + """Generates all possible pair combinations for the input list of sentences. + + For example: + + input = ["paraphrase1", "paraphrase2", "paraphrase3"] + + output = [("paraphrase1", "paraphrase2"), + ("paraphrase1", "paraphrase3"), + ("paraphrase2", "paraphrase3")] + + Args: + list_of_sentences: the list of input sentences. + Returns: + the list of all possible sentence pairs. + """ + num_sentences = len(list_of_sentences) - 1 + combinations = [] + for i, _ in enumerate(list_of_sentences): + if i == num_sentences: + break + num_pairs = num_sentences - i + populated = num_pairs * [list_of_sentences[i]] + zipped = list(zip(populated, list_of_sentences[i + 1:])) + combinations += zipped + return combinations + + +class ParaphraseGenerationProblem(text_problems.Text2TextProblem): + """Paraphrase problem.""" + + @property + def bidirectional(self): + """If set to true, generates data in the following way. + + sentence1 -> sentence2 + sentence2 -> sentence1 + """ + raise NotImplementedError() + + def prepare_data(self, data_dir, tmp_dir, dataset_split): + raise NotImplementedError() + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + paraphrase_pairs = self.prepare_data(data_dir, tmp_dir, dataset_split) + for (caption1, caption2) in paraphrase_pairs: + caption_pairs = [(caption1, caption2)] + if self.bidirectional: + caption_pairs += [(caption2, caption1)] + for caption_pair in caption_pairs: + yield { + "inputs": caption_pair[0], + "targets": caption_pair[1] + } + + +class ParaphraseGenerationMsCocoProblem(ParaphraseGenerationProblem): + """Paraphrase problem.""" + + @property + def is_generate_per_split(self): + return True + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 10, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def approx_vocab_size(self): + return 2 ** 13 + + def prepare_data(self, data_dir, tmp_dir, dataset_split): + ms_coco_path = self._maybe_download(tmp_dir, dataset_split) + captions = self._get_captions(ms_coco_path) + tf.logging.info("Retrieved %d captions\n" % (len(captions))) + paraphrase_pairs = [] + + tf.logging.info("Generating input combinations...") + for captions_for_image in captions: + combinations_of_captions = create_combination(captions_for_image) + paraphrase_pairs += combinations_of_captions + + tf.logging.info("Created %d combinations pairs." % (len(paraphrase_pairs))) + return paraphrase_pairs + + def _maybe_download(self, tmp_dir, dataset_split): + filename = os.path.basename(_MS_COCO_ZIPPED_FILE) + download_url = os.path.join(_MS_COCO_DOWNLOAD_URL, filename) + path = generator_utils.maybe_download(tmp_dir, filename, download_url) + unzip_dir = os.path.join(tmp_dir, filename.strip(".zip")) + if not tf.gfile.Exists(unzip_dir): + tf.logging.info("Unzipping data to {}".format(unzip_dir)) + zipfile.ZipFile(path, "r").extractall(unzip_dir) + + if dataset_split == problem.DatasetSplit.TRAIN: + ms_coco_file = _MS_COCO_TRAIN_FILE + else: + ms_coco_file = _MS_COCO_DEV_FILE + ms_coco_path = os.path.join(unzip_dir, "annotations", ms_coco_file) + return ms_coco_path + + def _get_captions(self, ms_coco_path): + caption_file = io.open(ms_coco_path) + caption_json = json.load(caption_file) + annotations = caption_json["annotations"] + captions_for_image = collections.defaultdict(list) + + for annotation in annotations: + image_id = annotation["image_id"] + captions_for_image[image_id].append(annotation["caption"]) + + captions = list(captions_for_image.values()) + return captions + + +@registry.register_problem +class ParaphraseGenerationMsCocoProblem2d( + ParaphraseGenerationMsCocoProblem): + + @property + def bidirectional(self): + return True + + +@registry.register_problem +class ParaphraseGenerationMsCocoProblem1d( + ParaphraseGenerationMsCocoProblem): + + @property + def bidirectional(self): + return False + + +@registry.register_problem +class ParaphraseGenerationMsCocoProblem2dCharacters( + ParaphraseGenerationMsCocoProblem2d): + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + +@registry.register_problem +class ParaphraseGenerationMsCocoProblem1dCharacters( + ParaphraseGenerationMsCocoProblem1d): + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER diff --git a/tensor2tensor/data_generators/paraphrase_ms_coco_test.py b/tensor2tensor/data_generators/paraphrase_ms_coco_test.py new file mode 100644 index 000000000..95649d80c --- /dev/null +++ b/tensor2tensor/data_generators/paraphrase_ms_coco_test.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.data_generators.paraphrase_ms_coco.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import mock + +from tensor2tensor.data_generators import paraphrase_ms_coco + +import tensorflow.compat.v1 as tf + + +class ParaphraseGenerationProblemTest(tf.test.TestCase): + + def testCombinationPairs(self): + inputs = ["A", "B", "C"] + expected_combination = [("A", "B"), ("A", "C"), ("B", "C")] + actual_combination = paraphrase_ms_coco.create_combination(inputs) + self.assertEqual(actual_combination, expected_combination) + + @mock.patch("tensor2tensor.data_generators" + ".paraphrase_ms_coco.ParaphraseGenerationProblem.prepare_data", + return_value=[("sentence1", "sentence2")]) + @mock.patch("tensor2tensor.data_generators" + ".paraphrase_ms_coco.ParaphraseGenerationProblem.bidirectional") + def testBidirectionalTrue(self, data, bidirectional): + paraphrase_problem = paraphrase_ms_coco.ParaphraseGenerationProblem() + paraphrase_problem.bidirectional = True + + expected_generated_data = [{"inputs": "sentence1", "targets": "sentence2"}, + {"inputs": "sentence2", "targets": "sentence1"}] + actual_generated_data = list(paraphrase_problem + .generate_samples("data_dir", + "tmp_dir", + "dataset_split")) + self.assertEqual(actual_generated_data, expected_generated_data) + + @mock.patch("tensor2tensor.data_generators" + ".paraphrase_ms_coco.ParaphraseGenerationProblem.prepare_data", + return_value=[("sentence1", "sentence2")]) + @mock.patch("tensor2tensor.data_generators" + ".paraphrase_ms_coco.ParaphraseGenerationProblem.bidirectional") + def testBidirectionalFalse(self, data, bidirectional): + paraphrase_problem = paraphrase_ms_coco.ParaphraseGenerationProblem() + paraphrase_problem.bidirectional = False + + expected_generated_data = [{"inputs": "sentence1", "targets": "sentence2"}] + actual_generated_data = list(paraphrase_problem + .generate_samples("data_dir", + "tmp_dir", + "dataset_split")) + self.assertEqual(actual_generated_data, expected_generated_data) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/pointer_generator_word.py b/tensor2tensor/data_generators/pointer_generator_word.py new file mode 100644 index 000000000..257b3d7bb --- /dev/null +++ b/tensor2tensor/data_generators/pointer_generator_word.py @@ -0,0 +1,198 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generator for pointer-generator for word transformer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + + +@registry.register_problem +class Text2textCopyableTokens(text_problems.Text2textTmpdirTokens): + """Allows training a variant of Text2textTmpdirTokens that supports copying. + + Handling the case where the input contains OOV tokens. Store a temporary vocab + ID for source OOV, so that the decoder can directly copy from the input. + Uses TokenTextEncoderOov as the vocab encoder. + """ + + def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False): + vocab_filename = os.path.join(data_dir, self.vocab_filename) + encoder = TokenTextEncoderOov( + vocab_filename, replace_oov=self.oov_token) + return encoder + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + generator = self.generate_samples(data_dir, tmp_dir, dataset_split) + encoder = self.get_or_create_vocab(data_dir, tmp_dir) + return self.text2text_generate_encoded_oovs( + generator, encoder, has_inputs=self.has_inputs) + + def text2text_generate_encoded_oovs(self, + sample_generator, + vocab, + targets_vocab=None, + has_inputs=True): + """Encode Text2Text samples from the generator with the vocab.""" + targets_vocab = targets_vocab or vocab + for sample in sample_generator: + if has_inputs: + (sample["inputs"], sample["inputs_extend"], source_oovs, + _) = vocab.encode(sample["inputs"]) + sample["inputs"].append(text_encoder.EOS_ID) + sample["inputs_extend"].append(text_encoder.EOS_ID) + # need to pass the source OOV tokens to the target encoder + sample["targets"], sample["targets_extend"] = targets_vocab.encode_target( + sample["targets"], source_oovs) + sample["targets"].append(text_encoder.EOS_ID) + sample["targets_extend"].append(text_encoder.EOS_ID) + yield sample + + def example_reading_spec(self): + data_fields = { + "inputs": tf.VarLenFeature(tf.int64), + "inputs_extend": tf.VarLenFeature(tf.int64), + "targets": tf.VarLenFeature(tf.int64), + "targets_extend": tf.VarLenFeature(tf.int64) + } + data_items_to_decoders = None + return (data_fields, data_items_to_decoders) + + +class TokenTextEncoderOov(text_encoder.TokenTextEncoder): + """Encoder based on a user-supplied vocabulary (file or list). + + This encoder extends over TokenTextEncoder by additionally assigning distinct + temporary IDs to OOV tokens appearing in the source sequence. This facilitates + decoding with the pointer-generator mechanism using word-based tokenization. + + NOTE: TokenTextEncoderOov does not conform to the TextEncoder API; it changes + the signature of encode and decode. + """ + + def encode(self, s): + """Converts a space-separated string of tokens to lists of ids. + + Also store temporary vocabulary IDs for source OOV tokens. OOVs are + represented by their temporary OOV number. E.g., if the vocabulary size + is 50k and the source has 3 OOVs, then these temporary OOV numbers will + be 50000, 50001, 50002. + + Args: + s: human-readable string to be converted. + + Returns: + ids: list of integers + ids_extend: list of integers including extended temporary vocab IDs for + source OOVs. + oovs: A dict storing source OOV words, used for the decoder to copy. The + key is OOV word, and the value is the order they appear in the source, + starting from 0. + source_oov_id_to_token: a list of source OOV tokens, in the same order as + they appear in the source. + """ + sentence = s + tokens = sentence.strip().split() + ids = [] + ids_extend = [] + oovs = {} + for t in tokens: + if t in self._token_to_id: + ids.append(self._token_to_id[t]) + ids_extend.append(self._token_to_id[t]) + else: + next_oov_id = len(oovs) + oov_num = oovs.get(t, next_oov_id) + if oov_num == next_oov_id: + oovs[t] = oov_num + ids_extend.append(self.vocab_size + oov_num) + ids.append(self._token_to_id[self._replace_oov]) + source_oov_id_to_token = [""] * len(oovs) + for oov in oovs: + source_oov_id_to_token[oovs[oov]] = oov + if self._reverse: + return ids[::-1], ids_extend[::-1], oovs, source_oov_id_to_token + else: + return ids, ids_extend, oovs, source_oov_id_to_token + + def encode_target(self, target, source_oovs): + """Converts a space-separated string of tokens to lists of ids. + + Also store a version of extened vocabulary IDs. + For target OOVs that are in the source, encode them using the temporary + vocab IDs. + For target OOVs not in the source, encode them as + + Args: + target: target string + source_oovs: source OOV words stored in dict, key is the word, value is + the order in which they appear in the source starting from 0 + + Returns: + ids: list of integers + ids_extend: list of integers including extended vocabulary IDs. + """ + tokens = target.strip().split() + ids = [] + ids_extend = [] + for t in tokens: + if t in self._token_to_id: + i = self._token_to_id[t] + ids.append(i) + ids_extend.append(i) + else: + ids.append(self._token_to_id[self._replace_oov]) + if t in source_oovs: + vocab_idx = self.vocab_size + source_oovs[t] + ids_extend.append(vocab_idx) + else: + ids_extend.append(self._token_to_id[self._replace_oov]) + if self._reverse: + return ids[::-1], ids_extend[::-1] + else: + return ids, ids_extend + + def decode_oov(self, ids, source_oov): + return " ".join(self.decode_list_oov(ids, source_oov)) + + def decode_list_oov(self, ids, source_oov_id_to_token): + """decode ids back to tokens, considering OOVs temporary IDs. + + Args: + ids: vocab ids. Could possibly include source temporary OOV ID starting + from vocab_size. + source_oov_id_to_token: a list of source OOV tokens, with the order the + same as they appear in the source. + + Returns: + decoded tokens, possibly including source OOV tokens. + + """ + seq = reversed(ids) if self._reverse else ids + tokens = [] + for cur_id in seq: + if cur_id in self._id_to_token: + tokens.append(self._id_to_token[cur_id]) + else: + tokens.append(source_oov_id_to_token[cur_id - self.vocab_size]) + return tokens diff --git a/tensor2tensor/data_generators/problem.py b/tensor2tensor/data_generators/problem.py new file mode 100644 index 000000000..20c421ef9 --- /dev/null +++ b/tensor2tensor/data_generators/problem.py @@ -0,0 +1,1086 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base class for problem/dataset definitions.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import functools +import os +import random +import six + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.utils import contrib +from tensor2tensor.utils import data_reader +from tensor2tensor.utils import hparam +from tensor2tensor.utils import metrics +from tensor2tensor.utils import mlperf_log + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator +# pylint: disable=g-import-not-at-top +try: + from tensorflow.contrib.tpu.python.tpu import tpu_config +except ImportError: + # TF 2.0 doesn't ship with contrib. + tpu_config = None +# pylint: enable=g-import-not-at-top + + + +class DatasetSplit(object): + TRAIN = tf_estimator.ModeKeys.TRAIN + EVAL = tf_estimator.ModeKeys.EVAL + TEST = "test" + + +class SpaceID(object): + """Input and target space ids. Add more as needed.""" + # Generic / unknown output space (default) + GENERIC = 0 + # Image labels + IMAGE_LABEL = 1 + # English characters + EN_CHR = 2 + # English tokens + EN_TOK = 3 + # English bpe tokens + EN_BPE_TOK = 4 + # French characters + FR_CHR = 5 + # French tokens + FR_TOK = 6 + # German characters + DE_CHR = 7 + # German tokens + DE_TOK = 8 + # German bpe tokens + DE_BPE_TOK = 9 + # Digit cipher lexicon 0 + DIGIT_0 = 10 + # Digit cipher lexicon 1 + DIGIT_1 = 11 + # Audio waveform domain + AUDIO_WAV = 12 + # Audio spectral domain + AUDIO_SPECTRAL = 13 + # Parse characters + PARSE_CHR = 14 + # Parse tokens + PARSE_TOK = 15 + # Chinese tokens + ZH_TOK = 16 + # Icelandic characters + ICE_CHAR = 17 + # Icelandic tokens + ICE_TOK = 18 + # Icelandic parse tokens + ICE_PARSE_TOK = 19 + # Macedonian tokens + MK_TOK = 20 + # Czech tokens + CS_TOK = 21 + # Czech characters + CS_CHR = 22 + # Genetic bases (ACTG) + DNA = 23 + # Real numbers + REAL = 24 + # Images + IMAGE = 25 + # Peptide + PEPTIDE = 26 + # Python + PY_TOK = 27 + # C++ + CPP_TOK = 28 + # Strokes + STROKES = 29 + # Pickled Python + PICKLED_PYTHON = 30 + + +class TaskID(object): + """Problem specific task ids. Add more as needed.""" + # English characters + EN_CHR = 2 + # English characters sentiment + EN_CHR_SENT = 3 + # English Premise Hypothesis pair + EN_PR_HYP = 4 + # English NLI + EN_NLI = 5 + # COLA + COLA = 6 + # Enligh Question Context pair + EN_Q_CONT = 7 + # English similarity task + EN_SIM = 8 + # English sentence pair + EN_SENT_PAIR = 9 + # 3 class NLI + THREE_CL_NLI = 10 + + +def default_model_hparams(): + return hparam.HParams( + max_input_seq_length=0, + max_target_seq_length=0, + prepend_mode="none", + split_to_length=0, + data_dir=None) + + +def preprocess_example_common(example, mode, hparams): + """Preprocessing steps common to all models.""" + if "inputs" in example and hparams.max_input_seq_length > 0: + example["inputs"] = example["inputs"][:hparams.max_input_seq_length] + if hparams.prepend_mode != "none": + if mode == tf_estimator.ModeKeys.PREDICT: + example["partial_targets"] = tf.concat([example["inputs"], [0]], 0) + else: + example["targets"] = tf.concat( + [example["inputs"], [0], example["targets"]], 0) + if "targets" in example and hparams.max_target_seq_length > 0: + example["targets"] = example["targets"][:hparams.max_target_seq_length] + if hparams.split_to_length: + new_example = {} + for k, v in six.iteritems(example): + if k == "targets" or k == "inputs": + new_example[k] = tf.reshape(v, [-1, hparams.split_to_length, 1, 1]) + else: + tf.logging.warning("Dropping feature %s" % k) + return tf.data.Dataset.from_tensor_slices(new_example) + return example + + +class Problem(object): + """Problem base class. Specifies a T2T problem. + + Problems unify the specification of a problem for data generation, training, + and inference. + + New problems are specified by the following methods: + + Data generation: + * generate_data(data_dir, tmp_dir) + - Generate training and dev datasets into data_dir. + - Additional files, e.g. vocabulary files, should also be written to + data_dir. Vocab files are newline-separated files with each line + containing a token. The standard convention for the filename is to + set it to be + ${Problem.vocab_filename}.${Problem.targeted_vocab_size} + - Downloads and other files can be written to tmp_dir + - If you have a training and dev generator, you can generate the + training and dev datasets with + generator_utils.generate_dataset_and_shuffle. + - Use the self.training_filepaths and self.dev_filepaths functions to + get sharded filenames. If shuffled=False, the filenames will contain + an "unshuffled" suffix; you should then shuffle the data + shard-by-shard with generator_utils.shuffle_dataset. + - Allows to specify the number of shards, optionally (can be omitted). + - Subclasses must override + * dataset_filename() + - Base filename for problem. + - Defaults to registered name (self.name). + + Training: + * hparams(defaults, model_hparams) + - Specify the problem hyperparameters (see _default_hparams) + - Mutate defaults as needed + * example_reading_spec + - Specify the names and types of the features on disk. + - Specify tf.contrib.slim.tfexample_decoder + * preprocess_example(example, mode, hparams) + - Preprocess the example feature dict from feature name to Tensor or + SparseTensor. + - Used in training, eval, and inference (specified by mode). + + Eval: + * eval_metrics + - Specify the set of evaluation metrics for this problem. + * eval_hooks + - Specify the set of evalueation hooks for this problem. + + Inference: + * feature_encoders(data_dir) + - Return a dict of for encoding and decoding + inference input/output. + - Defaults to TextEncoder for inputs and targets. + """ + + # ============================================================================ + # BEGIN SUBCLASS INTERFACE + # ============================================================================ + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + raise NotImplementedError() + + @property + def multiprocess_generate(self): + """Whether to generate the data in multiple parallel processes.""" + return False + + @property + def num_generate_tasks(self): + """Needed if multiprocess_generate is True.""" + raise NotImplementedError() + + @property + def num_training_examples(self): + """Used when mixing problems - how many examples are in the dataset.""" + raise NotImplementedError() + + def prepare_to_generate(self, data_dir, tmp_dir): + """Prepare to generate data in parallel on different processes. + + This function is called if multiprocess_generate is True. + + Some things that might need to be done once are downloading the data + if it is not yet downloaded, and building the vocabulary. + + Args: + data_dir: a string + tmp_dir: a string + """ + raise NotImplementedError() + + def hparams(self, defaults, model_hparams): + pass + + def max_length(self, model_hparams): + """Maximum sequence length. + + Problems with fixed length should override. + + Args: + model_hparams: model hyperparameters + Returns: + an integer + """ + return (model_hparams.split_to_length or model_hparams.max_length or + model_hparams.batch_size) + + def tpu_batch_size_per_shard(self, model_hparams): + """Batch size in examples per TPU core. + + Args: + model_hparams: model hyperparameters + Returns: + an integer + """ + if self.batch_size_means_tokens and not model_hparams.use_fixed_batch_size: + return model_hparams.batch_size // self.max_length(model_hparams) + else: + return model_hparams.batch_size + + @property + def batch_size_means_tokens(self): + """Do we specify hparams.batch_size in tokens per datashard per batch. + + This is generally done for text problems. + + If False, we assume that batch sizes are specified in examples per + datashard per batch. + + TODO(noam): we should be more explicit and replace the hyperparameter + batch size with two hyperparameters: + hparams.examples_per_batch_per_datashard + hparams.tokens_per_batch_per_datashard + + Returns: + a boolean + """ + return False + + @property + def skip_random_fraction_when_training(self): + """Skip a random number of examples at the beginning of training.""" + # Skip a random fraction at the beginning of the stream. The skip is + # essential for synchronous highly-parallel training to avoid multiple + # replicas reading the same data in lock-step. So keep this true unless + # you have a very specific setting in which it needs to be turned off. + return True + + def dataset_filename(self): + return self.name + + def feature_encoders(self, data_dir): + del data_dir + return { + "inputs": text_encoder.TextEncoder(), + "targets": text_encoder.TextEncoder() + } + + def example_reading_spec(self): + """Define how data is serialized to file and read back. + + Returns: + data_fields: A dictionary mapping data names to its feature type. + data_items_to_decoders: A dictionary mapping data names to TF Example + decoders, to be used when reading back TF examples from disk. + """ + data_fields = { + "inputs": tf.VarLenFeature(tf.int64), + "targets": tf.VarLenFeature(tf.int64) + } + data_items_to_decoders = None + return (data_fields, data_items_to_decoders) + + def preprocess_example(self, example, mode, hparams): + """Runtime preprocessing. + + Return a dict or a tf.data.Dataset.from_tensor_slices (if you want each + example to turn into multiple). + + Args: + example: dict, features + mode: tf.estimator.ModeKeys + hparams: HParams, model hyperparameters + + Returns: + dict or Dataset + """ + return preprocess_example_common(example, mode, hparams) + + def eval_metrics(self): + return [ + metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5, + metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY + ] + + @property + def all_metrics_fns(self): + return metrics.METRICS_FNS + + def eval_metric_fns(self, model_hparams): + del model_hparams + metric_names = self.eval_metrics() + if not all([m in self.all_metrics_fns for m in metric_names]): + error_str = ("Unrecognized metric. Problem %s specified metrics " + "%s. Recognized metrics are %s.") + raise ValueError(error_str % (self.name, + metric_names, + list(self.all_metrics_fns.keys()))) + return { + metric_name: self.all_metrics_fns[metric_name] + for metric_name in metric_names + } + + def eval_hooks(self, features, logits, hparams): + del features, logits, hparams + return [] + + @property + def task_id(self): + if self._task_id == -1 and hasattr(self, "global_task_id"): + self._task_id = self.global_task_id() + return self._task_id + + def set_task_id(self, new_task_id): + self._task_id = new_task_id + + # ============================================================================ + # END SUBCLASS INTERFACE + # ============================================================================ + + @tf.autograph.experimental.do_not_convert() + def preprocess(self, dataset, mode, hparams, interleave=True): + """Runtime preprocessing on the whole dataset. + + Return a tf.data.Dataset -- the preprocessed version of the given one. + By default this function calls preprocess_example. + + Args: + dataset: the Dataset of already decoded but not yet preprocessed features. + mode: tf.estimator.ModeKeys + hparams: HParams, model hyperparameters + interleave: bool, whether to use parallel_interleave, which is faster + but will alter the order of samples non-deterministically, or flat_map, + which is slower but will preserve the sample order. + + Returns: + a Dataset + """ + def _preprocess(example): + examples = self.preprocess_example(example, mode, hparams) + if not isinstance(examples, tf.data.Dataset): + examples = tf.data.Dataset.from_tensors(examples) + return examples + + if interleave: + dataset = dataset.apply( + tf.data.experimental.parallel_interleave( + _preprocess, sloppy=True, cycle_length=8)) + else: + dataset = dataset.flat_map(_preprocess) + + return dataset + + def training_filepaths(self, data_dir, num_shards, shuffled): + file_basename = self.dataset_filename() + if not shuffled: + file_basename += generator_utils.UNSHUFFLED_SUFFIX + return generator_utils.train_data_filenames(file_basename, data_dir, + num_shards) + + def dev_filepaths(self, data_dir, num_shards, shuffled): + file_basename = self.dataset_filename() + if not shuffled: + file_basename += generator_utils.UNSHUFFLED_SUFFIX + return generator_utils.dev_data_filenames(file_basename, data_dir, + num_shards) + + def test_filepaths(self, data_dir, num_shards, shuffled): + file_basename = self.dataset_filename() + if not shuffled: + file_basename += generator_utils.UNSHUFFLED_SUFFIX + return generator_utils.test_data_filenames(file_basename, data_dir, + num_shards) + + def data_filepaths(self, split, output_dir, num_shards, shuffled): + if split == DatasetSplit.TRAIN: + return self.training_filepaths(output_dir, num_shards, shuffled) + elif split == DatasetSplit.EVAL: + return self.dev_filepaths(output_dir, num_shards, shuffled) + elif split == DatasetSplit.TEST: + return self.test_filepaths(output_dir, num_shards, shuffled) + else: + raise ValueError("Unknown value for split: %s" % split) + + def filepattern(self, data_dir, mode, shard=None): + """Get filepattern for data files for mode. + + Matches mode to a suffix. + * DatasetSplit.TRAIN: train + * DatasetSplit.EVAL: dev + * DatasetSplit.TEST: test + * tf.estimator.ModeKeys.PREDICT: dev + + Args: + data_dir: str, data directory. + mode: DatasetSplit + shard: int, if provided, will only read data from the specified shard. + + Returns: + filepattern str + """ + path = os.path.join(data_dir, self.dataset_filename()) + shard_str = "-%05d" % shard if shard is not None else "" + if mode == DatasetSplit.TRAIN: + suffix = "train" + elif mode in [DatasetSplit.EVAL, tf_estimator.ModeKeys.PREDICT]: + suffix = "dev" + else: + assert mode == DatasetSplit.TEST + suffix = "test" + + return "%s-%s%s*" % (path, suffix, shard_str) + + def __init__(self, was_reversed=False, was_copy=False): + """Create a Problem. + + Args: + was_reversed: bool, whether to reverse inputs and targets. + was_copy: bool, whether to copy inputs to targets. Can be composed with + was_reversed so that if both are true, the targets become the inputs, + which are then copied to targets so that the task is targets->targets. + """ + self._was_reversed = was_reversed + self._was_copy = was_copy + self._encoders = None + self._hparams = None + self._feature_info = None + self._task_id = -1 + + @property + def was_reversed(self): + """Whether the problem was reversed.""" + return self._was_reversed + + def get_feature_encoders(self, data_dir=None): + if self._encoders is None: + self._encoders = self.feature_encoders(data_dir) + return self._encoders + + def get_hparams(self, model_hparams=None): + """Returns problem_hparams.""" + if self._hparams is not None: + return self._hparams + + if model_hparams is None: + model_hparams = default_model_hparams() + + if self._encoders is None: + data_dir = (model_hparams and hasattr(model_hparams, "data_dir") and + model_hparams.data_dir) or None + self.get_feature_encoders(data_dir) + + hp = _default_hparams() + ret = self.hparams(hp, model_hparams) + if ret is not None: + raise ValueError("The Problem subclass hparams function should mutate " + "the defaults passed in and return None.") + + hp.add_hparam("vocabulary", self._encoders) + hp.add_hparam("was_reversed", self._was_reversed) + hp.add_hparam("was_copy", self._was_copy) + + if self._was_reversed: + _reverse_problem_hparams(hp) + if self._was_copy: + _copy_problem_hparams(hp) + + self._hparams = hp + return self._hparams + + def maybe_reverse_features(self, feature_map): + """Reverse features between inputs and targets if the problem is '_rev'.""" + if not self._was_reversed: + return + inputs = feature_map.pop("inputs", None) + targets = feature_map.pop("targets", None) + inputs_seg = feature_map.pop("inputs_segmentation", None) + targets_seg = feature_map.pop("targets_segmentation", None) + inputs_pos = feature_map.pop("inputs_position", None) + targets_pos = feature_map.pop("targets_position", None) + if inputs is not None: + feature_map["targets"] = inputs + if targets is not None: + feature_map["inputs"] = targets + if inputs_seg is not None: + feature_map["targets_segmentation"] = inputs_seg + if targets_seg is not None: + feature_map["inputs_segmentation"] = targets_seg + if inputs_pos is not None: + feature_map["targets_position"] = inputs_pos + if targets_pos is not None: + feature_map["inputs_position"] = targets_pos + + def maybe_copy_features(self, feature_map): + if not self._was_copy: + return + feature_map["targets"] = feature_map["inputs"] + if ("inputs_segmentation" in feature_map and + "targets_segmentation" not in feature_map): + feature_map["targets_segmentation"] = feature_map["inputs_segmentation"] + if ("inputs_position" in feature_map and + "targets_position" not in feature_map): + feature_map["targets_position"] = feature_map["inputs_position"] + + def maybe_reverse_and_copy(self, example): + self.maybe_reverse_features(example) + self.maybe_copy_features(example) + return example + + @tf.autograph.experimental.do_not_convert() + def dataset(self, + mode, + data_dir=None, + num_threads=None, + output_buffer_size=None, + shuffle_files=None, + hparams=None, + preprocess=True, + dataset_split=None, + shard=None, + partition_id=0, + num_partitions=1, + shuffle_buffer_size=1024, + max_records=-1): + """Build a Dataset for this problem. + + Args: + mode: tf.estimator.ModeKeys; determines which files to read from. + data_dir: directory that contains data files. + num_threads: int, number of threads to use for decode and preprocess + Dataset.map calls. + output_buffer_size: int, how many elements to prefetch at end of pipeline. + shuffle_files: whether to shuffle input files. Default behavior (i.e. when + shuffle_files=None) is to shuffle if mode == TRAIN. + hparams: HParams; hparams to be passed to + Problem.preprocess_example and Problem.hparams. If None, will use a + default set that is a no-op. + preprocess: bool, whether to map the Dataset through + Problem.preprocess_example. + dataset_split: DatasetSplit, which split to read data + from (TRAIN:"-train", EVAL:"-dev", "test":"-test"). Defaults to mode. + shard: int, if provided, will only read data from the specified shard. + partition_id: integer - which partition of the dataset to read from + num_partitions: how many partitions in the dataset + shuffle_buffer_size: if shuffle_files is True, this is the buffer size + used to shuffle records. + max_records: int, number of records to truncate to. + + Returns: + Dataset containing dict. + + Raises: + ValueError: if num_partitions is greater than the number of data files. + """ + is_training = mode == tf_estimator.ModeKeys.TRAIN + shuffle_files = shuffle_files or shuffle_files is None and is_training + + dataset_split = dataset_split or mode + assert data_dir + + if hparams is None: + hparams = default_model_hparams() + + if not hasattr(hparams, "data_dir"): + hparams.add_hparam("data_dir", data_dir) + if not hparams.data_dir: + hparams.data_dir = data_dir + # Construct the Problem's hparams so that items within it are accessible + _ = self.get_hparams(hparams) + + data_filepattern = self.filepattern(data_dir, dataset_split, shard=shard) + tf.logging.info("Reading data files from %s", data_filepattern) + data_files = sorted( + contrib.slim().parallel_reader.get_data_files(data_filepattern)) + + # Functions used in dataset transforms below. `filenames` can be either a + # `tf.string` tensor or `tf.data.Dataset` containing one or more filenames. + def _load_records_and_preprocess(filenames): + """Reads files from a string tensor or a dataset of filenames.""" + # Load records from file(s) with an 8MiB read buffer. + dataset = tf.data.TFRecordDataset(filenames, buffer_size=8 * 1024 * 1024) + # Decode. + dataset = dataset.map(self.decode_example, num_parallel_calls=num_threads) + # Preprocess if requested. + # Note that preprocessing should happen per-file as order may matter. + if preprocess: + dataset = self.preprocess(dataset, mode, hparams, + interleave=shuffle_files) + return dataset + + if len(data_files) < num_partitions: + raise ValueError( + "number of data files (%d) must be at least the number of hosts (%d)" + % (len(data_files), num_partitions)) + data_files = [f for (i, f) in enumerate(data_files) + if i % num_partitions == partition_id] + tf.logging.info( + "partition: %d num_data_files: %d" % (partition_id, len(data_files))) + if shuffle_files: + mlperf_log.transformer_print(key=mlperf_log.INPUT_ORDER) + random.shuffle(data_files) + + dataset = tf.data.Dataset.from_tensor_slices(tf.constant(data_files)) + # Create data-set from files by parsing, pre-processing and interleaving. + if shuffle_files: + dataset = dataset.apply( + tf.data.experimental.parallel_interleave( + _load_records_and_preprocess, sloppy=True, cycle_length=8)) + else: + dataset = _load_records_and_preprocess(dataset) + + dataset = dataset.map( + self.maybe_reverse_and_copy, num_parallel_calls=num_threads) + dataset = dataset.take(max_records) + + ## Shuffle records only for training examples. + if shuffle_files and is_training: + dataset = dataset.shuffle(shuffle_buffer_size) + if hparams.get("pack_dataset", False): + dataset = generator_utils.pack_dataset( + dataset, hparams.max_length, keys=["inputs", "targets"], + use_custom_ops=hparams.get("use_custom_ops", False)) + if output_buffer_size: + dataset = dataset.prefetch(output_buffer_size) + + return dataset + + def decode_example(self, serialized_example): + """Return a dict of Tensors from a serialized tensorflow.Example.""" + data_fields, data_items_to_decoders = self.example_reading_spec() + # Necessary to rejoin examples in the correct order with the Cloud ML Engine + # batch prediction API. + data_fields["batch_prediction_key"] = tf.FixedLenFeature([1], tf.int64, 0) + + if getattr(self._hparams, "sampling_method", "") == "random_per_example": + data_fields["sampling_temp"] = tf.FixedLenFeature( + [1], tf.float32, getattr(self._hparams, "sampling_temp", 1.0)) + data_fields["sampling_keep_top_k"] = tf.FixedLenFeature( + [1], tf.int64, getattr(self._hparams, "sampling_keep_top_k", -1)) + + if data_items_to_decoders is None: + data_items_to_decoders = {} + for field in data_fields: + if data_fields[field].dtype is tf.string: + default_value = b"" + else: + default_value = 0 + data_items_to_decoders[field] = contrib.slim().tfexample_decoder.Tensor( + field, default_value=default_value) + + decoder = contrib.slim().tfexample_decoder.TFExampleDecoder( + data_fields, data_items_to_decoders) + + decode_items = list(sorted(data_items_to_decoders)) + decoded = decoder.decode(serialized_example, items=decode_items) + return dict(zip(decode_items, decoded)) + + @property + def decode_hooks(self): + """List of functions to be run after full decodes have been produced. + + Returns: + List of functions. Each function should expect a single argument, an + instance of decoding.DecodeHookArgs and optionally return a list of + tf.Summary.Value objects. + """ + return [] + + @property + def has_inputs(self): + return "inputs" in self.get_feature_encoders() + + @property + def feature_info(self): + """Retrieve dict. + + Must first call Problem.get_hparams or Problem.dataset to have the problem's + internal hparams already constructed. + + Returns: + dict + """ + if self._feature_info is not None: + return self._feature_info + + assert self._hparams is not None + + hp = self.get_hparams() + if self.has_inputs: + in_id = hp.input_space_id + out_id = hp.target_space_id + + features = collections.defaultdict(FeatureInfo) + for feature_name, modality_cls in six.iteritems(hp.modality): + finfo = features[feature_name] + finfo.modality = modality_cls + finfo.vocab_size = hp.vocab_size[feature_name] + + vocabs = hp.vocabulary + for name, encoder in six.iteritems(vocabs): + features[name].encoder = encoder + + if self.has_inputs: + features["inputs"].space_id = in_id + features["targets"].space_id = out_id + + self._feature_info = features + return features + + def make_estimator_input_fn(self, + mode, + hparams, + data_dir=None, + force_repeat=False, + prevent_repeat=False, + dataset_kwargs=None): + """Return input_fn wrapped for Estimator.""" + + def estimator_input_fn(params, config): + return self.input_fn( + mode, + hparams, + data_dir=data_dir, + params=params, + config=config, + force_repeat=force_repeat, + prevent_repeat=prevent_repeat, + dataset_kwargs=dataset_kwargs) + + return estimator_input_fn + + def _dataset_partition(self, mode, config, params): + """Which part of the training data to read. + + If there are multiple parallel calls to input_fn (multiple TPU hosts), + then we want each one to read from a separate partition of the training + data. + + Args: + mode: tf.estimator.ModeKeys + config: RunConfig + params: A dict that contains parameters. + Returns: + partition_id: an integer + num_partitions: an integer + """ + if mode != tf_estimator.ModeKeys.TRAIN or not hasattr(config, "tpu_config"): + # Reset in the case when using TPU but alternating TRAIN and EVAL. + self._next_partition_id = 0 + return 0, 1 + phift = config.tpu_config.per_host_input_for_training + # This is the mesh-tensorflow case. + if (hasattr(tpu_config.InputPipelineConfig, "BROADCAST") and + phift == tpu_config.InputPipelineConfig.BROADCAST): + return 0, 1 + if phift: + num_hosts = (params["context"].num_hosts if "context" in params + else config.tpu_config.num_shards // 8) + num_partitions = max(num_hosts, 1) + else: + num_partitions = config.tpu_config.num_shards + partition_id = getattr(self, "_next_partition_id", 0) + self._next_partition_id = partition_id + 1 + tf.logging.info("num_partitions = %d partition_id = %d" % + (num_partitions, partition_id)) + assert partition_id < num_partitions + return partition_id, num_partitions + + def input_fn(self, + mode, + hparams, + data_dir=None, + params=None, + config=None, + force_repeat=False, + prevent_repeat=False, + dataset_kwargs=None): + """Builds input pipeline for problem. + + Args: + mode: tf.estimator.ModeKeys + hparams: HParams, model hparams + data_dir: str, data directory; if None, will use hparams.data_dir + params: dict, may include "batch_size" + config: RunConfig; should have the data_parallelism attribute if not using + TPU + force_repeat: bool, whether to repeat the data even if not training + prevent_repeat: bool, whether to not repeat when in training mode. + Overrides force_repeat. + dataset_kwargs: dict, if passed, will pass as kwargs to self.dataset + method when called + + Returns: + (features_dict, Tensor targets) + """ + partition_id, num_partitions = self._dataset_partition(mode, config, params) + is_training = mode == tf_estimator.ModeKeys.TRAIN + if config and config.use_tpu: + num_threads = 64 + else: + num_threads = data_reader.cpu_count() if is_training else 1 + data_dir = data_dir or (hasattr(hparams, "data_dir") and hparams.data_dir) + dataset_kwargs = dataset_kwargs or {} + dataset_kwargs.update({ + "mode": mode, + "data_dir": data_dir, + "num_threads": num_threads, + "hparams": hparams, + "partition_id": partition_id, + "num_partitions": num_partitions, + }) + return data_reader.input_fn( + self.dataset(**dataset_kwargs), + self.filepattern(data_dir, mode), + self.skip_random_fraction_when_training, + self.batch_size_means_tokens, + self.get_hparams().batch_size_multiplier, + self.max_length(hparams), + mode, + hparams, + data_dir=data_dir, + params=params, + config=config, + force_repeat=force_repeat, + prevent_repeat=prevent_repeat) + + @property + def export_assets(self): + """Assets to export with the model. + + This property contains a dictionary of assets, such as vocabulary files, + that should be exported together with the model, or None if no assets + are needed. + """ + + return None + + def serving_input_fn(self, hparams, decode_hparams=None, use_tpu=False): + """Input fn for serving export, starting from serialized example.""" + self._hparams = hparams + mode = tf_estimator.ModeKeys.PREDICT + serialized_example = tf.placeholder( + dtype=tf.string, shape=[None], name="serialized_example") + dataset = tf.data.Dataset.from_tensor_slices(serialized_example) + dataset = dataset.map(self.decode_example) + dataset = dataset.map(lambda ex: self.preprocess_example(ex, mode, hparams)) + dataset = dataset.map(data_reader.cast_ints_to_int32) + + if use_tpu: + padded_shapes = data_reader.pad_for_tpu(dataset.output_shapes, hparams, + hparams.max_length) + batch_size = 1 if not decode_hparams else getattr(decode_hparams, + "batch_size", 1) + dataset = dataset.padded_batch( + batch_size, padded_shapes, drop_remainder=False) + dataset = dataset.map( + functools.partial(data_reader.pad_batch, batch_multiple=batch_size)) + else: + dataset = dataset.padded_batch( + tf.shape(serialized_example, out_type=tf.int64)[0], + dataset.output_shapes) + + dataset = dataset.map(data_reader.standardize_shapes) + features = tf.data.experimental.get_single_element(dataset) + + if self.has_inputs: + features.pop("targets", None) + + return tf_estimator.export.ServingInputReceiver( + features=features, receiver_tensors=serialized_example) + + +class FeatureInfo(object): + """Encapsulates information about a feature.""" + + def __init__(self, + encoder=None, + modality=None, + vocab_size=None, + space_id=None): + self.encoder = encoder + self.modality = modality + self.vocab_size = vocab_size + self.space_id = space_id + + +def _copy_problem_hparams(p_hparams): + """Use input modality, vocab, and space id for target.""" + p = p_hparams + # Duplicate input modality. + p.modality["targets"] = p.modality["inputs"] + # Duplicate input vocab size. + p.vocab_size["targets"] = p.vocab_size["inputs"] + # Duplicate input vocabulary. + p.vocabulary["targets"] = p.vocabulary["inputs"] + # Duplicate input space ids. + p.target_space_id = p.input_space_id + # Mark that p was reversed. + p.was_copy = True + + +def _reverse_problem_hparams(p_hparams): + """Swap input/output modalities, vocab, and space ids.""" + p = p_hparams + + # Swap modalities. + # TODO(trandustin): Note this assumes target modalities have feature name + # 'target', and each intended feature to swap has feature name 'input'. + # In the future, remove need for this behavior. + reversed_modality = {} + for feature_name in p.modality: + # Copy feature as-is. + if "target" not in feature_name and "input" not in feature_name: + reversed_modality[feature_name] = p.modality[feature_name] + else: + # Change "target" to "input" and vice-versa for this feature. + if "target" in feature_name: + reversed_feature_name = feature_name.replace("target", "input") + else: + assert "input" in feature_name, feature_name + reversed_feature_name = feature_name.replace("input", "target") + reversed_modality[reversed_feature_name] = p.modality[feature_name] + + p.modality = reversed_modality + + # Swap vocab sizes. + reversed_vocab_size = {} + for feature_name in p.vocab_size: + reversed_feature_name = feature_name.replace("target", "input") + if "target" in feature_name and reversed_feature_name in p.vocab_size: + reversed_vocab_size[feature_name] = p.vocab_size[reversed_feature_name] + reversed_vocab_size[reversed_feature_name] = p.vocab_size[feature_name] + + p.vocab_size = reversed_vocab_size + + # Swap vocabularies. + input_vocabulary = p.vocabulary.pop("inputs", None) + target_vocabulary = p.vocabulary.pop("targets", None) + if input_vocabulary is not None: + p.vocabulary["targets"] = input_vocabulary + if target_vocabulary is not None: + p.vocabulary["inputs"] = target_vocabulary + + # Swap input/target space ids. + input_space_id = p.input_space_id + target_space_id = p.target_space_id + if input_space_id is not None: + p.target_space_id = input_space_id + else: + p.target_space_id = SpaceID.GENERIC + if target_space_id is not None: + p.input_space_id = target_space_id + else: + p.input_space_id = SpaceID.GENERIC + + # Mark that p was reversed. + p.was_reversed = True + + +def _default_hparams(): + """A set of basic model hyperparameters.""" + return hparam.HParams( + # Use this parameter to get comparable perplexity numbers with different + # tokenizations. This value should be set to the ratio of the number of + # tokens in the test set according to the tokenization used to the number + # of tokens in the test set in the "official" tokenization. For + # example, if we are using a word-piece based model and we want to + # compute per-word perplexity, then we set loss_multiplier to the number + # of wordpieces per word in the test set. + loss_multiplier=1.0, + + # Use this parameter to allow for larger sequences in the batch. Without + # the use of this parameter, the size of the inner two dimensions will + # be used to judge the sequence length. + batch_size_multiplier=1, + + # During inference for autoregressive problems, if the batch_size is 1, + # the inference will stop when the model predict a text_encoder.EOS_ID + # token. + stop_at_eos=False, + + # Modalities used to map from features to a space compatible with + # chosen model architecture. It comprises key-value pairs of a feature + # name (str) and its modality type. + modality={}, + vocab_size={}, + + # Identifiers used to tell the model which input/target space will be + # expected. For example, it can tell that we expect French as characters + # as output, or Spanish as sound. Spaces defined as constants in SpaceID + # class. + input_space_id=SpaceID.GENERIC, + target_space_id=SpaceID.GENERIC) + + +def problem_hparams_to_features(problem_hparams): + input_space_id, target_space_id = 0, 0 + if problem_hparams: + input_space_id = problem_hparams.input_space_id + target_space_id = problem_hparams.target_space_id + return { + "input_space_id": input_space_id, + "target_space_id": target_space_id, + } diff --git a/tensor2tensor/data_generators/problem_hparams.py b/tensor2tensor/data_generators/problem_hparams.py index 26249d2bc..6ad656cbd 100644 --- a/tensor2tensor/data_generators/problem_hparams.py +++ b/tensor2tensor/data_generators/problem_hparams.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,683 +21,188 @@ from __future__ import print_function import os - -# Dependency imports - +from tensor2tensor.data_generators import problem from tensor2tensor.data_generators import text_encoder -from tensor2tensor.utils import modality - -import tensorflow as tf - - -def default_problem_hparams(): - """A set of basic model hyperparameters.""" - return tf.contrib.training.HParams( - # Use this parameter to get comparable perplexity numbers with different - # tokenizations. This value should be set to the ratio of the number of - # tokens in the test set according to the tokeization used to the number - # of tokens in the test set in the "official" tokenization. For example, - # if we are using a word-piece based model and we want to compute - # per-word perplexity, then we set loss_multiplier to the number of - # wordpieces per word in the test set. - loss_multiplier=1.0, - - # Use this parameter to allow for larger sequences in the batch. Without - # the use of this parameter, the size of the inner two dimensions will be - # used to judge the sequence length. - batch_size_multiplier=1, - - # To make queues of the right capacity, it's good to know the maximal - # expected batch size, as it can vary a lot. It only affects performance - # of input readers and memory use. The defaults should be safe and fast, - # but decrease if your reader uses a lot of memory and increase if slow. - max_expected_batch_size_per_shard=64, - - # Modalities used to map from input features to a space compatible with - # chosen model architecture. One modality per feature key. - input_modality={}, - - # Modality used to map from hidden representation to the target space. - target_modality=None, - - # Identifiers used to tell the model which input/target space will be - # expected. For example, it can tell that we expect French as characters - # as output, or Spanish as sound. An integer with the following semantics: - # 0: Generic / unknown output space (default) - # 1: Image labels - # 2: English characters - # 3: English tokens - # 4: English bpe tokens - # 5: French characters - # 6: French tokens - # 7: German characters - # 8: German tokens - # 9: German bpe tokens - # 10: Digit cipher lexicon 0 - # 11: Digit cipher lexicon 1 - # 12: Audio waveform domain - # 13: Audio spectral domain - # 14: Parse characters - # 15: Parse tokens - # Add more above if needed. - input_space_id=0, - target_space_id=0, - - # Vocabulary per feature key. - # a vocabulary converts to/from human-readable strings. - # E.g. {"inputs": text_encoder.ByteTextEncoder(), - # "targets": wordpiece.WordpieceVocab("vocab_filename.txt")} - vocabulary={ - "inputs": text_encoder.TextEncoder(), - "targets": text_encoder.TextEncoder() - }, - - # This is a marker to keep track if the problem was reversed or copied. - # Only set automatically, do not override the default. - # - # These tags can be combined in order to perform copies of the input or - # the targets. For instance `problem_copy` will copy the inputs, but - # `problem_rev_copy` will copy the targets. - was_reversed=False, - was_copy=False,) - - -def parse_problem_name(problem_name): - """Determines if problem_name specifies a copy and/or reversal. - - Args: - problem_name: A string containing a single problem name from FLAGS.problems. - - Returns: - base_name: A string with the base problem name. - was_reversed: A boolean. - was_copy: A boolean. - """ - # Recursively strip tags until we reach a base name. - if len(problem_name) > 4 and problem_name[-4:] == "_rev": - base, _, was_copy = parse_problem_name(problem_name[:-4]) - return base, True, was_copy - elif len(problem_name) > 5 and problem_name[-5:] == "_copy": - base, was_reversed, _ = parse_problem_name(problem_name[:-5]) - return base, was_reversed, True - else: - return problem_name, False, False - - -def problem_hparams(problem_name, model_hparams): - """Generate problem hyperparameters based on problem name. - - Args: - problem_name: a string - model_hparams: a tf.contrib.training.HParams - - Returns: - a tf.contrib.training.HParams - - Raises: - ValueError: if problem_name is unknown. - """ - base_name, was_reversed, was_copy = parse_problem_name(problem_name) - if base_name not in _problem_hparams_map: - map_str = "\n* ".join(_problem_hparams_map.keys()) - error_msg = "%s not in the supported set of problems:\n%s" % (base_name, - map_str) - raise ValueError(error_msg) - p = _problem_hparams_map.get(base_name)(model_hparams) - if was_reversed: - # Swap modalities. - input_modality = p.input_modality["inputs"] - target_modality = p.target_modality - p.input_modality["inputs"] = target_modality - p.target_modality = input_modality - # Swap vocabularies. - input_vocabulary = p.vocabulary["inputs"] - target_vocabulary = p.vocabulary["targets"] - p.vocabulary["inputs"] = target_vocabulary - p.vocabulary["targets"] = input_vocabulary - # Swap input/target space ids. - input_space_id = p.input_space_id - target_space_id = p.target_space_id - p.input_space_id = target_space_id - p.target_space_id = input_space_id - # Mark that p was reversed. - p.was_reversed = True - if was_copy: - # Duplicate input modality. - p.target_modality = p.input_modality["inputs"] - # Duplicate input vocabulary. - p.vocabulary["targets"] = p.vocabulary["inputs"] - # Duplicate input space ids. - p.target_space_id = p.input_space_id - # Mark that p was reversed. - p.was_copy = True - return p - - -def test_problem_hparams(model_hparams, input_vocab_size, target_vocab_size): +from tensor2tensor.layers import modalities +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +# TODO(rsepassi): Merge these problems with their data generators. Currently +# they only implement the hparams. + + +class AudioTimitProblem(problem.Problem): + """Base class for TIMIT problems.""" + + def example_reading_spec(self): + data_fields = { + "inputs": tf.VarLenFeature(tf.int64), + "audio/sample_count": tf.FixedLenFeature((), tf.int64), + "audio/sample_width": tf.FixedLenFeature((), tf.int64), + "targets": tf.VarLenFeature(tf.int64), + } + return data_fields, None + + def preprocess_example(self, example, mode, hparams): + example = super(AudioTimitProblem, self).preprocess_example( + example, mode, hparams) + # Reshape audio to proper shape + sample_count = tf.to_int32(example.pop("audio/sample_count")) + sample_width = tf.to_int32(example.pop("audio/sample_width")) + channel_count = 1 + example["inputs"] = tf.reshape(example["inputs"], + [sample_count, sample_width, channel_count]) + return example + + +@registry.register_problem +class AudioTimitCharactersTune(AudioTimitProblem): + """TIMIT to characters.""" + + def feature_encoders(self, _): + return { + "inputs": text_encoder.TextEncoder(), + "targets": text_encoder.ByteTextEncoder(), + } + + def hparams(self, defaults, model_hparams): + hp = defaults + hp.modality = {"inputs": modalities.ModalityType.SPEECH_RECOGNITION, + "targets": modalities.ModalityType.SYMBOL} + hp.vocab_size = {"inputs": None, + "targets": 256} + + +@registry.register_problem +class AudioTimitTokens8kTune(AudioTimitProblem): + """TIMIT to tokens.""" + + @property + def target_vocab_size(self): + return 2**13 # 8192 + + def feature_encoders(self, data_dir): + vocab_filename = os.path.join(data_dir, + "vocab.endefr.%d" % self.target_vocab_size) + subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename) + return { + "inputs": text_encoder.TextEncoder(), + "targets": subtokenizer, + } + + def hparams(self, defaults, model_hparams): + hp = defaults + hp.modality = {"inputs": modalities.ModalityType.SPEECH_RECOGNITION, + "targets": modalities.ModalityType.SYMBOL} + hp.vocab_size = { + "inputs": None, + "targets": self.get_feature_encoders()["targets"].vocab_size, + } + hp.batch_size_multiplier = 256 + hp.loss_multiplier = 2.0 + hp.input_space_id = 13 + hp.target_space_id = 3 + + +@registry.register_problem +class AudioTimitTokens8kTest(AudioTimitTokens8kTune): + """TIMIT to tokens.""" + pass + + +@registry.register_problem +class ParsingEnglishPtb8k(problem.Problem): + """Parsing.""" + + @property + def target_vocab_size(self): + return 2**13 # 8192 + + def feature_encoders(self, data_dir): + vocab_filename = os.path.join(data_dir, + "vocab.endefr.%d" % self.target_vocab_size) + subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename) + return { + "inputs": subtokenizer, + "targets": subtokenizer, + } + + def hparams(self, defaults, model_hparams): + hp = defaults + hp.modality = {"inputs": modalities.ModalityType.SYMBOL, + "targets": modalities.ModalityType.SYMBOL} + hp.vocab_size = { + "inputs": self.get_feature_encoders()["inputs"].vocab_size, + "targets": self.get_feature_encoders()["targets"].vocab_size, + } + hp.batch_size_multiplier = 256 + hp.loss_multiplier = 2.0 + hp.input_space_id = 3 + hp.target_space_id = 15 + + +@registry.register_problem +class ParsingEnglishPtb16k(problem.Problem): + """Parsing.""" + + @property + def vocab_prefix(self): + return "wsj" + + @property + def inputs_target_vocab_size(self): + return 2**9 # 512 + + @property + def targets_target_vocab_size(self): + return 2**14 # 16384 + + def feature_encoders(self, data_dir): + source_vocab_filename = os.path.join( + data_dir, + self.vocab_prefix + "_source.vocab.%d" % self.inputs_target_vocab_size) + target_vocab_filename = os.path.join( + data_dir, + self.vocab_prefix + "_target.vocab.%d" % self.targets_target_vocab_size) + source_subtokenizer = text_encoder.SubwordTextEncoder(source_vocab_filename) + target_subtokenizer = text_encoder.SubwordTextEncoder(target_vocab_filename) + return { + "inputs": source_subtokenizer, + "targets": target_subtokenizer, + } + + def hparams(self, defaults, model_hparams): + hp = defaults + hp.modality = {"inputs": modalities.ModalityType.SYMBOL, + "targets": modalities.ModalityType.SYMBOL} + hp.vocab_size = { + "inputs": self.get_feature_encoders()["inputs"].vocab_size, + "targets": self.get_feature_encoders()["targets"].vocab_size, + } + hp.input_space_id = 3 + hp.target_space_id = 15 + + +class TestProblem(problem.Problem): + """Test problem.""" + + def __init__(self, input_vocab_size, target_vocab_size): + super(TestProblem, self).__init__(False, False) + self.input_vocab_size = input_vocab_size + self.target_vocab_size = target_vocab_size + + def hparams(self, defaults, model_hparams): + hp = defaults + hp.modality = {"inputs": modalities.ModalityType.SYMBOL, + "targets": modalities.ModalityType.SYMBOL} + hp.vocab_size = {"inputs": self.input_vocab_size, + "targets": self.target_vocab_size} + + +def test_problem_hparams(input_vocab_size=None, + target_vocab_size=None, + model_hparams=None): """Problem hparams for testing model bodies.""" - p = default_problem_hparams() - p.input_modality = { - "inputs": modality.SymbolModality(model_hparams, input_vocab_size) - } - p.target_modality = modality.SymbolModality(model_hparams, target_vocab_size) - p.vocabulary = { - "inputs": text_encoder.TextEncoder(), - "targets": text_encoder.TextEncoder() - } - return p - - -def algorithmic(vocab_size, model_hparams): - """Default parameters for algorithmic tasks.""" - p = default_problem_hparams() - p.input_modality = { - "inputs": modality.SymbolModality(model_hparams, vocab_size) - } - p.target_modality = modality.SymbolModality(model_hparams, vocab_size) - p.vocabulary = { - "inputs": text_encoder.TextEncoder(num_reserved_ids=1), - "targets": text_encoder.TextEncoder(num_reserved_ids=1), - } - p.input_space_id = 10 - p.target_space_id = 11 - return p - - -def audio_timit_characters(model_hparams): - """English audio transcription benchmark.""" - p = default_problem_hparams() - p.input_modality = { - "inputs": modality.AudioModality(model_hparams), - } - p.target_modality = modality.SymbolModality(model_hparams, 256) - p.vocabulary = { - "inputs": text_encoder.TextEncoder(), - "targets": text_encoder.ByteTextEncoder(), - } - p.batch_size_multiplier = 256 - p.loss_multiplier = 2.0 - p.input_space_id = 12 - p.target_space_id = 2 - return p - - -def audio_timit_tokens(model_hparams, wrong_vocab_size): - """English audio transcription benchmark. - - Args: - model_hparams: a tf.contrib.training.HParams - wrong_vocab_size: a number used in the filename indicating the approximate - vocabulary size. This is not to be confused with the actual vocabulary - size. - Returns: - a tf.contrib.training.HParams - """ - p = default_problem_hparams() - # This vocab file must be present within the data directory. - vocab_filename = os.path.join(model_hparams.data_dir, - "tokens.vocab.%d" % wrong_vocab_size) - subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename) - p.input_modality = { - "inputs": modality.AudioModality(model_hparams), - } - p.target_modality = modality.SymbolModality(model_hparams, - subtokenizer.vocab_size) - p.vocabulary = { - "inputs": text_encoder.TextEncoder(), - "targets": subtokenizer, - } - p.batch_size_multiplier = 256 - p.loss_multiplier = 2.0 - p.input_space_id = 13 - p.target_space_id = 3 - return p - - -def audio_wsj_characters(model_hparams): - """English audio transcription benchmark.""" - p = default_problem_hparams() - p.input_modality = { - "inputs": modality.AudioSpectralModality(model_hparams), - } - p.target_modality = modality.SymbolModality(model_hparams, 256) - p.vocabulary = { - "inputs": text_encoder.TextEncoder(), - "targets": text_encoder.ByteTextEncoder(), - } - p.batch_size_multiplier = 512 - p.loss_multiplier = 2.0 - p.input_space_id = 13 - p.target_space_id = 2 - return p - - -def audio_wsj_tokens(model_hparams, wrong_vocab_size): - """English audio transcription benchmark. - - Args: - model_hparams: a tf.contrib.training.HParams - wrong_vocab_size: a number used in the filename indicating the approximate - vocabulary size. This is not to be confused with the actual vocabulary - size. - Returns: - a tf.contrib.training.HParams - """ - p = default_problem_hparams() - # This vocab file must be present within the data directory. - vocab_filename = os.path.join(model_hparams.data_dir, - "tokens.vocab.%d" % wrong_vocab_size) - subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename) - p.input_modality = { - "inputs": modality.AudioModality(model_hparams), - } - p.target_modality = modality.SymbolModality(model_hparams, - subtokenizer.vocab_size) - p.vocabulary = { - "inputs": text_encoder.TextEncoder(), - "targets": subtokenizer, - } - p.batch_size_multiplier = 512 - p.loss_multiplier = 2.0 - p.input_space_id = 12 - p.target_space_id = 3 - return p - - -def lm1b_16k(model_hparams): - """Billion-word language-modeling benchmark, 16k subtoken vocabulary.""" - p = default_problem_hparams() - p.perplexity_exponent = 1.184206 - p.input_modality = {} - p.target_modality = modality.SymbolModality(model_hparams, 16384) - p.vocabulary = { - "targets": - text_encoder.SubwordTextEncoder( - os.path.join(model_hparams.data_dir, - "lm1b_16k.subword_text_encoder")) - } - p.target_space_id = 3 - return p - - -def lm1b_64k(model_hparams): - """Billion-word language-modeling benchmark, 64k subtoken vocabulary.""" - p = default_problem_hparams() - p.perplexity_exponent = 1.067068 - p.input_modality = {} - p.target_modality = modality.SymbolModality(model_hparams, 65536) - p.vocabulary = { - "targets": - text_encoder.SubwordTextEncoder( - os.path.join(model_hparams.data_dir, - "lm1b_64k.subword_text_encoder")) - } - p.target_space_id = 3 - return p - - -def wmt_enfr_characters(model_hparams): - """English to French translation benchmark.""" - p = default_problem_hparams() - p.input_modality = {"inputs": modality.SymbolModality(model_hparams, 256)} - p.target_modality = modality.SymbolModality(model_hparams, 256) - p.vocabulary = { - "inputs": text_encoder.ByteTextEncoder(), - "targets": text_encoder.ByteTextEncoder(), - } - p.loss_multiplier = 2.0 - p.input_space_id = 2 - p.target_space_id = 5 - return p - - -def wmt_enfr_tokens(model_hparams, wrong_vocab_size): - """English to French translation benchmark. - - Args: - model_hparams: a tf.contrib.training.HParams - wrong_vocab_size: a number used in the filename indicating the approximate - vocabulary size. This is not to be confused with the actual vocabulary - size. - Returns: - a tf.contrib.training.HParams - """ - p = default_problem_hparams() - # This vocab file must be present within the data directory. - vocab_filename = os.path.join(model_hparams.data_dir, - "tokens.vocab.%d" % wrong_vocab_size) - subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename) - p.input_modality = { - "inputs": modality.SymbolModality(model_hparams, subtokenizer.vocab_size) - } - p.target_modality = modality.SymbolModality(model_hparams, - subtokenizer.vocab_size) - p.vocabulary = { - "inputs": subtokenizer, - "targets": subtokenizer, - } - p.input_space_id = 3 - p.target_space_id = 6 - return p - - -def wmt_ende_bpe32k(model_hparams): - """English to German translation benchmark.""" - p = default_problem_hparams() - # single modality object enables embedding sharing between inputs and target - # when model_hparams.shared_source_target_embedding is True. - vocab_size = 40960 - m = modality.SymbolModality(model_hparams, vocab_size) - p.input_modality = {"inputs": m} - p.target_modality = m - # This vocab file must be present within the data directory. - vocab_filename = os.path.join(model_hparams.data_dir, "vocab.bpe.32000") - p.vocabulary = { - "inputs": text_encoder.TokenTextEncoder(vocab_filename=vocab_filename), - "targets": text_encoder.TokenTextEncoder(vocab_filename=vocab_filename), - } - p.loss_multiplier = 1.4 - p.input_space_id = 4 - p.target_space_id = 9 - return p - - -def wmt_ende_characters(model_hparams): - """English to German translation benchmark.""" - p = default_problem_hparams() - p.input_modality = {"inputs": modality.SymbolModality(model_hparams, 256)} - p.target_modality = modality.SymbolModality(model_hparams, 256) - p.vocabulary = { - "inputs": text_encoder.ByteTextEncoder(), - "targets": text_encoder.ByteTextEncoder(), - } - p.loss_multiplier = 2.0 - p.input_space_id = 2 - p.target_space_id = 7 - return p - - -def wmt_ende_tokens(model_hparams, wrong_vocab_size): - """English to German translation benchmark.""" - p = default_problem_hparams() - # This vocab file must be present within the data directory. - vocab_filename = os.path.join(model_hparams.data_dir, - "tokens.vocab.%d" % wrong_vocab_size) - subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename) - p.input_modality = { - "inputs": modality.SymbolModality(model_hparams, subtokenizer.vocab_size) - } - p.target_modality = modality.SymbolModality(model_hparams, - subtokenizer.vocab_size) - p.vocabulary = { - "inputs": subtokenizer, - "targets": subtokenizer, - } - p.input_space_id = 3 - p.target_space_id = 8 - return p - - -def wmt_ende_v2(model_hparams, vocab_size): - """English to German translation benchmark with separate vocabularies.""" - p = default_problem_hparams() - # These vocab files must be present within the data directory. - source_vocab_filename = os.path.join(model_hparams.data_dir, - "wmt_ende_v2.en.vocab.%d" % vocab_size) - target_vocab_filename = os.path.join(model_hparams.data_dir, - "wmt_ende_v2.de.vocab.%d" % vocab_size) - p.input_modality = { - "inputs": modality.SymbolModality(model_hparams, vocab_size) - } - p.target_modality = modality.SymbolModality(model_hparams, vocab_size) - p.vocabulary = { - "inputs": text_encoder.SubwordTextEncoder(source_vocab_filename), - "targets": text_encoder.SubwordTextEncoder(target_vocab_filename), - } - p.input_space_id = 3 - p.target_space_id = 8 - return p - - -def wmt_concat(model_hparams, wrong_vocab_size): - """English to German translation benchmark.""" - p = default_problem_hparams() - # This vocab file must be present within the data directory. - vocab_filename = os.path.join(model_hparams.data_dir, - "tokens.vocab.%d" % wrong_vocab_size) - subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename) - vocab_size = subtokenizer.vocab_size - p.input_modality = {} - p.target_modality = modality.SymbolModality(model_hparams, vocab_size) - p.vocabulary = {"targets": subtokenizer} - return p - - -def wmt_parsing_characters(model_hparams): - """English to parse tree translation benchmark.""" - p = default_problem_hparams() - p.input_modality = {"inputs": modality.SymbolModality(model_hparams, 256)} - p.target_modality = modality.SymbolModality(model_hparams, 256) - p.vocabulary = { - "inputs": text_encoder.ByteTextEncoder(), - "targets": text_encoder.ByteTextEncoder(), - } - p.loss_multiplier = 2.0 - p.input_space_id = 2 - p.target_space_id = 14 - return p - - -def wmt_parsing_tokens(model_hparams, wrong_vocab_size): - """English to parse tree translation benchmark. - - Args: - model_hparams: a tf.contrib.training.HParams - wrong_vocab_size: a number used in the filename indicating the approximate - vocabulary size. This is not to be confused with the actual vocabulary - size. - Returns: - a tf.contrib.training.HParams - """ - p = default_problem_hparams() - # This vocab file must be present within the data directory. - vocab_filename = os.path.join(model_hparams.data_dir, - "tokens.vocab.%d" % wrong_vocab_size) - subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename) - p.input_modality = { - "inputs": modality.SymbolModality(model_hparams, subtokenizer.vocab_size) - } - p.target_modality = modality.SymbolModality(model_hparams, - subtokenizer.vocab_size) - p.vocabulary = { - "inputs": subtokenizer, - "targets": subtokenizer, - } - p.input_space_id = 3 - p.target_space_id = 15 - return p - - -def wsj_parsing_tokens(model_hparams, wrong_source_vocab_size, - wrong_target_vocab_size): - """English to parse tree translation benchmark. - - Args: - model_hparams: a tf.contrib.training.HParams - wrong_source_vocab_size: a number used in the filename indicating the - approximate vocabulary size. This is not to be confused with the actual - vocabulary size. - wrong_target_vocab_size: a number used in the filename indicating the - approximate target vocabulary size. This is not to be confused with the - actual target vocabulary size. - Returns: - a tf.contrib.training.HParams - """ - p = default_problem_hparams() - # This vocab file must be present within the data directory. - source_vocab_filename = os.path.join( - model_hparams.data_dir, - "wsj_source.tokens.vocab.%d" % wrong_source_vocab_size) - target_vocab_filename = os.path.join( - model_hparams.data_dir, - "wsj_target.tokens.vocab.%d" % wrong_target_vocab_size) - source_subtokenizer = text_encoder.SubwordTextEncoder( - source_vocab_filename) - target_subtokenizer = text_encoder.SubwordTextEncoder( - target_vocab_filename) - p.input_modality = { - "inputs": modality.SymbolModality(model_hparams, - source_subtokenizer.vocab_size) - } - p.target_modality = modality.SymbolModality(model_hparams, - target_subtokenizer.vocab_size) - p.vocabulary = { - "inputs": source_subtokenizer, - "targets": target_subtokenizer, - } - p.input_space_id = 3 - p.target_space_id = 15 - return p - - -def image_cifar10(model_hparams): - """CIFAR-10.""" - p = default_problem_hparams() - p.input_modality = {"inputs": modality.SmallImageModality(model_hparams)} - p.target_modality = modality.ClassLabelModality(model_hparams, 10) - p.batch_size_multiplier = 4 - p.max_expected_batch_size_per_shard = 8 - p.loss_multiplier = 3.0 - p.input_space_id = 1 - p.target_space_id = 1 - return p - - -def image_mnist(model_hparams): - """MNIST.""" - p = default_problem_hparams() - p.input_modality = {"inputs": modality.SymbolModality(model_hparams, 256)} - p.target_modality = modality.ClassLabelModality(model_hparams, 10) - p.batch_size_multiplier = 4 - p.max_expected_batch_size_per_shard = 8 - p.loss_multiplier = 3.0 - p.input_space_id = 1 - p.target_space_id = 1 - return p - - -def image_imagenet(model_hparams): - """ImageNet.""" - p = default_problem_hparams() - p.input_modality = { - "inputs": modality.ImageModality(model_hparams), - } - p.target_modality = modality.ClassLabelModality( - model_hparams, 1000, is2d=model_hparams.imagenet_use_2d) - p.batch_size_multiplier = 256 - p.max_expected_batch_size_per_shard = 2 - p.loss_multiplier = 0.7 - p.input_space_id = 1 - p.target_space_id = 1 - return p - - -def image_mscoco_characters(model_hparams): - """COCO image captioning with captions as characters.""" - p = default_problem_hparams() - p.input_modality = {"inputs": modality.ImageModality(model_hparams)} - p.target_modality = modality.SymbolModality(model_hparams, 256) - p.vocabulary = { - "inputs": text_encoder.TextEncoder(), - "targets": text_encoder.ByteTextEncoder(), - } - p.batch_size_multiplier = 128 - p.max_expected_batch_size_per_shard = 2 - p.loss_multiplier = 2.0 - p.input_space_id = 1 - p.target_space_id = 2 - return p - - -def image_mscoco_tokens(model_hparams, vocab_count): - """COCO image captioning with captions as tokens.""" - p = default_problem_hparams() - p.input_modality = {"inputs": modality.ImageModality(model_hparams)} - # This vocab file must be present within the data directory. - vocab_filename = os.path.join(model_hparams.data_dir, - "tokens.vocab.%d" % vocab_count) - subtokenizer = text_encoder.SubwordTextEncoder(vocab_filename) - p.target_modality = modality.SymbolModality(model_hparams, - subtokenizer.vocab_size) - p.vocabulary = { - "inputs": text_encoder.TextEncoder(), - "targets": subtokenizer, - } - p.batch_size_multiplier = 256 - p.max_expected_batch_size_per_shard = 2 - p.input_space_id = 1 - p.target_space_id = 3 - return p - - -# Dictionary of named hyperparameter settings for various problems. -# This is only accessed through the problem_hparams function below. -_problem_hparams_map = { - "algorithmic_addition_binary40": lambda p: algorithmic(3, p), - "algorithmic_addition_decimal40": lambda p: algorithmic(11, p), - "algorithmic_identity_binary40": lambda p: algorithmic(3, p), - "algorithmic_identity_decimal40": lambda p: algorithmic(11, p), - "algorithmic_multiplication_binary40": lambda p: algorithmic(3, p), - "algorithmic_multiplication_decimal40": lambda p: algorithmic(11, p), - "algorithmic_reverse_binary40": lambda p: algorithmic(3, p), - "algorithmic_reverse_decimal40": lambda p: algorithmic(11, p), - "algorithmic_shift_decimal40": lambda p: algorithmic(21, p), - "audio_timit_characters_tune": audio_timit_characters, - "audio_timit_characters_test": audio_timit_characters, - "audio_timit_tokens_8k_tune": lambda p: audio_timit_tokens(p, 2**13), - "audio_timit_tokens_8k_test": lambda p: audio_timit_tokens(p, 2**13), - "audio_wsj_characters_tune": audio_wsj_characters, - "audio_wsj_characters_test": audio_wsj_characters, - "audio_wsj_tokens_8k_tune": lambda p: audio_wsj_tokens(p, 2**13), - "audio_wsj_tokens_8k_test": lambda p: audio_wsj_tokens(p, 2**13), - "lm1b_16k": lm1b_16k, - "lm1b_64k": lm1b_64k, - "wmt_parsing_characters": wmt_parsing_characters, - "wmt_parsing_tokens_8k": lambda p: wmt_parsing_tokens(p, 2**13), - "wsj_parsing_tokens_16k": lambda p: wsj_parsing_tokens(p, 2**14, 2**9), - "wsj_parsing_tokens_32k": lambda p: wsj_parsing_tokens(p, 2**15, 2**9), - "wmt_enfr_characters": wmt_enfr_characters, - "wmt_enfr_tokens_8k": lambda p: wmt_enfr_tokens(p, 2**13), - "wmt_enfr_tokens_32k": lambda p: wmt_enfr_tokens(p, 2**15), - "wmt_enfr_tokens_32k_shuffled": lambda p: wmt_enfr_tokens(p, 2**15), - "wmt_enfr_tokens_32k_combined": lambda p: wmt_enfr_tokens(p, 2**15), - "wmt_enfr_tokens_128k": lambda p: wmt_enfr_tokens(p, 2**17), - # bytes per subtoken: 3.267350 - "wmt_ende_concat_8k": lambda p: wmt_concat(p, 2**13), - # bytes per subtoken: 4.236272 - "wmt_ende_concat_32k": lambda p: wmt_concat(p, 2**15), - "wmt_ende_characters": wmt_ende_characters, - "wmt_ende_tokens_8k": lambda p: wmt_ende_tokens(p, 2**13), - "wmt_ende_tokens_32k": lambda p: wmt_ende_tokens(p, 2**15), - "wmt_ende_tokens_128k": lambda p: wmt_ende_tokens(p, 2**17), - # bytes per subtoken: 4.59291664162 - "wmt_ende_bpe32k": wmt_ende_bpe32k, - "wmt_ende_bpe32k_shuffled": wmt_ende_bpe32k, - "wmt_ende_bpe32k_combined": wmt_ende_bpe32k, - "wmt_ende_bpe32k_160": wmt_ende_bpe32k, - "wmt_ende_v2_32k_combined": lambda p: wmt_ende_v2(p, 2**15), - "wmt_ende_v2_16k_combined": lambda p: wmt_ende_v2(p, 2**14), - "image_cifar10_tune": image_cifar10, - "image_cifar10_test": image_cifar10, - "image_mnist_tune": image_mnist, - "image_mnist_test": image_mnist, - "image_mscoco_characters_tune": image_mscoco_characters, - "image_mscoco_characters_test": image_mscoco_characters, - "image_mscoco_tokens_8k_tune": lambda p: image_mscoco_tokens(p, 2**13), - "image_mscoco_tokens_8k_test": lambda p: image_mscoco_tokens(p, 2**13), - "image_mscoco_tokens_32k_tune": lambda p: image_mscoco_tokens(p, 2**15), - "image_mscoco_tokens_32k_test": lambda p: image_mscoco_tokens(p, 2**15), - "image_mscoco_tokens_128k_tune": lambda p: image_mscoco_tokens(p, 2**17), - "image_mscoco_tokens_128k_test": lambda p: image_mscoco_tokens(p, 2**17), - "image_imagenet": image_imagenet, -} + p = TestProblem(input_vocab_size, target_vocab_size) + return p.get_hparams(model_hparams) diff --git a/tensor2tensor/data_generators/problem_hparams_test.py b/tensor2tensor/data_generators/problem_hparams_test.py deleted file mode 100644 index 5c8bc5516..000000000 --- a/tensor2tensor/data_generators/problem_hparams_test.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for tensor2tensor.problem_hparams.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -from tensor2tensor.data_generators import problem_hparams - -import tensorflow as tf - - -class ProblemHparamsTest(tf.test.TestCase): - - def testParseProblemName(self): - problem_name = "base" - self.assertEqual(problem_hparams.parse_problem_name(problem_name), - ("base", False, False)) - problem_name = "base_rev" - self.assertEqual( - problem_hparams.parse_problem_name(problem_name), ("base", True, False)) - problem_name = "base_copy" - self.assertEqual( - problem_hparams.parse_problem_name(problem_name), ("base", False, True)) - problem_name = "base_copy_rev" - self.assertEqual( - problem_hparams.parse_problem_name(problem_name), ("base", True, True)) - problem_name = "base_rev_copy" - self.assertEqual( - problem_hparams.parse_problem_name(problem_name), ("base", True, True)) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensor2tensor/data_generators/problem_test.py b/tensor2tensor/data_generators/problem_test.py new file mode 100644 index 000000000..de1c26bea --- /dev/null +++ b/tensor2tensor/data_generators/problem_test.py @@ -0,0 +1,235 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test for common problem functionalities.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized # for assertLen +import numpy as np + +from tensor2tensor.data_generators import algorithmic +from tensor2tensor.data_generators import problem as problem_module +from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.layers import modalities +from tensor2tensor.utils import hparam +from tensor2tensor.utils import test_utils + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator +tf.enable_eager_execution() + + +def assert_tensors_equal(sess, t1, t2, n): + """Compute tensors `n` times and ensure that they are equal.""" + + for _ in range(n): + + v1, v2 = sess.run([t1, t2]) + + if v1.shape != v2.shape: + return False + + if not np.all(v1 == v2): + return False + + return True + + +class ProblemTest(parameterized.TestCase, tf.test.TestCase): + + @classmethod + def setUpClass(cls): + algorithmic.TinyAlgo.setup_for_test() + + @test_utils.run_in_graph_mode_only() + def testNoShuffleDeterministic(self): + problem = algorithmic.TinyAlgo() + dataset = problem.dataset(mode=tf_estimator.ModeKeys.TRAIN, + data_dir=algorithmic.TinyAlgo.data_dir, + shuffle_files=False) + + tensor1 = dataset.make_one_shot_iterator().get_next()["targets"] + tensor2 = dataset.make_one_shot_iterator().get_next()["targets"] + + with tf.Session() as sess: + self.assertTrue(assert_tensors_equal(sess, tensor1, tensor2, 20)) + + @test_utils.run_in_graph_mode_only() + def testNoShufflePreprocess(self): + + problem = algorithmic.TinyAlgo() + dataset1 = problem.dataset(mode=tf_estimator.ModeKeys.TRAIN, + data_dir=algorithmic.TinyAlgo.data_dir, + shuffle_files=False, preprocess=False) + dataset2 = problem.dataset(mode=tf_estimator.ModeKeys.TRAIN, + data_dir=algorithmic.TinyAlgo.data_dir, + shuffle_files=False, preprocess=True) + + tensor1 = dataset1.make_one_shot_iterator().get_next()["targets"] + tensor2 = dataset2.make_one_shot_iterator().get_next()["targets"] + + with tf.Session() as sess: + self.assertTrue(assert_tensors_equal(sess, tensor1, tensor2, 20)) + + @test_utils.run_in_graph_and_eager_modes() + def testProblemHparamsModality(self): + problem = problem_hparams.TestProblem(input_vocab_size=2, + target_vocab_size=3) + p_hparams = problem.get_hparams() + self.assertEqual(p_hparams.modality["inputs"], + modalities.ModalityType.SYMBOL) + self.assertEqual(p_hparams.modality["targets"], + modalities.ModalityType.SYMBOL) + + @test_utils.run_in_graph_and_eager_modes() + def testProblemHparamsInputOnlyModality(self): + class InputOnlyProblem(problem_module.Problem): + + def hparams(self, defaults, model_hparams): + hp = defaults + hp.modality = {"inputs": modalities.ModalityType.SYMBOL} + hp.vocab_size = {"inputs": 2} + + problem = InputOnlyProblem(False, False) + p_hparams = problem.get_hparams() + self.assertEqual(p_hparams.modality["inputs"], + modalities.ModalityType.SYMBOL) + self.assertLen(p_hparams.modality, 1) + + @test_utils.run_in_graph_and_eager_modes() + def testProblemHparamsTargetOnlyModality(self): + class TargetOnlyProblem(problem_module.Problem): + + def hparams(self, defaults, model_hparams): + hp = defaults + hp.modality = {"targets": modalities.ModalityType.SYMBOL} + hp.vocab_size = {"targets": 3} + + problem = TargetOnlyProblem(False, False) + p_hparams = problem.get_hparams() + self.assertEqual(p_hparams.modality["targets"], + modalities.ModalityType.SYMBOL) + self.assertLen(p_hparams.modality, 1) + + @test_utils.run_in_graph_and_eager_modes() + def testDataFilenames(self): + problem = algorithmic.TinyAlgo() + + num_shards = 10 + shuffled = False + data_dir = "/tmp" + + # Test training_filepaths and data_filepaths give the same list on + # appropriate arguments. + self.assertAllEqual( + problem.training_filepaths(data_dir, num_shards, shuffled), + problem.data_filepaths(problem_module.DatasetSplit.TRAIN, data_dir, + num_shards, shuffled)) + + self.assertAllEqual( + problem.dev_filepaths(data_dir, num_shards, shuffled), + problem.data_filepaths(problem_module.DatasetSplit.EVAL, data_dir, + num_shards, shuffled)) + + self.assertAllEqual( + problem.test_filepaths(data_dir, num_shards, shuffled), + problem.data_filepaths(problem_module.DatasetSplit.TEST, data_dir, + num_shards, shuffled)) + + @test_utils.run_in_graph_mode_only() + def testServingInputFnUseTpu(self): + problem = problem_module.Problem() + max_length = 128 + batch_size = 10 + hparams = hparam.HParams( + max_length=max_length, + max_input_seq_length=max_length, + max_target_seq_length=max_length, + prepend_mode="none", + split_to_length=0) + decode_hparams = hparam.HParams(batch_size=batch_size) + serving_input_receiver = problem.serving_input_fn( + hparams=hparams, decode_hparams=decode_hparams, use_tpu=True) + serving_input_fn_input = getattr(serving_input_receiver, + "receiver_tensors")["input"] + serving_input_fn_output = getattr(serving_input_receiver, + "features")["inputs"] + example_1 = tf.train.Example( + features=tf.train.Features(feature={ + "inputs": tf.train.Feature( + int64_list=tf.train.Int64List(value=[0])) + })) + example_2 = tf.train.Example( + features=tf.train.Features(feature={ + "inputs": tf.train.Feature( + int64_list=tf.train.Int64List(value=[1])) + })) + serialized_examples = [ + example_1.SerializeToString(), + example_2.SerializeToString() + ] + with self.test_session() as sess: + output_shape = sess.run( + tf.shape(serving_input_fn_output), + feed_dict={serving_input_fn_input: serialized_examples}) + self.assertEqual(output_shape[0], batch_size) + self.assertEqual(output_shape[1], max_length) + + @test_utils.run_in_graph_and_eager_modes() + def testInputAndTargetVocabSizesAreReversed(self): + + class WasReversedTestProblem(problem_module.Problem): + + def __init__(self, input_vocab_size, target_vocab_size, was_reversed): + super(WasReversedTestProblem, self).__init__(was_reversed, False) + self.input_vocab_size = input_vocab_size + self.target_vocab_size = target_vocab_size + + def hparams(self, defaults, model_hparams): + hp = defaults + hp.vocab_size = {"targets": self.target_vocab_size, + "inputs": self.input_vocab_size} + + problem = WasReversedTestProblem(input_vocab_size=1, + target_vocab_size=3, + was_reversed=True) + p_hparams = problem.get_hparams() + self.assertEqual(p_hparams.vocab_size["inputs"], 3) + self.assertEqual(p_hparams.vocab_size["targets"], 1) + + @test_utils.run_in_graph_and_eager_modes() + def testInputAndTargetModalitiesAreReversed(self): + + class WasReversedTestProblem(problem_module.Problem): + + def __init__(self, was_reversed): + super(WasReversedTestProblem, self).__init__(was_reversed, False) + + def hparams(self, defaults, model_hparams): + hp = defaults + hp.modality["inputs"] = "inputs_modality" + hp.modality["targets"] = "targets_modality" + + problem = WasReversedTestProblem(was_reversed=True) + p_hparams = problem.get_hparams() + self.assertEqual(p_hparams.modality["inputs"], "targets_modality") + self.assertEqual(p_hparams.modality["targets"], "inputs_modality") + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/program_search.py b/tensor2tensor/data_generators/program_search.py new file mode 100644 index 000000000..694434f96 --- /dev/null +++ b/tensor2tensor/data_generators/program_search.py @@ -0,0 +1,125 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Program Search Problems.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import gzip +import json +import os + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +@registry.register_problem +class ProgramSearchAlgolisp(text_problems.Text2TextProblem): + """Problem class for Program Search Algolisp task. + + Synthesizing programs from description and examples. + + Please see: https://arxiv.org/pdf/1802.04335.pdf for the full description. + """ + + # The locations of the train, dev, and test set. + DROPBOX = "https://www.dropbox.com/" + DATA_URLS = { + problem.DatasetSplit.TRAIN: ( + DROPBOX + "/s/qhun6kml9yb2ui9/metaset3.train.jsonl.gz?dl=1"), + problem.DatasetSplit.EVAL: ( + DROPBOX + "/s/aajkw83j2ps8bzx/metaset3.dev.jsonl.gz?dl=1"), + problem.DatasetSplit.TEST: ( + DROPBOX + "/s/f1x9ybkjpf371cp/metaset3.test.jsonl.gz?dl=1"), + } + + @staticmethod + def _extract_filename_from_http://github.com/url(url): + # Ex: TRAIN_URL --> metaset3.train.jsonl.gz + + # Get everything from the last / onwards. + filename = os.path.basename(url) + + # Get rid of everything after the first ? + return filename.split("?")[0] + + @staticmethod + def _flatten_target_programs(iterable): + # The target programs are read as nested lists, we should flatten them. + yield "[" + it = iter(iterable) + for e in it: + if isinstance(e, (list, tuple)): + for f in ProgramSearchAlgolisp._flatten_target_programs(e): + yield f + else: + yield e + yield "]" + + @staticmethod + def _parse_json_to_dict(json_line): + # First parse it through json. + line_json_dict = json.loads(json_line) + + # The features of interest "text" and "short_tree" are stored as lists in + # this dictionary -- "short_tree" is a nested list. We flatten and join the + # lists on space, to return a string in both these cases. + + # Make another dictionary, to return only the features we want. + return { + "inputs": + " ".join(line_json_dict["text"]), + "targets": + " ".join([ + i for i in ProgramSearchAlgolisp._flatten_target_programs( + line_json_dict["short_tree"]) + ]) + } + + @property + def is_generate_per_split(self): + # Return True since we already have the train and the dev set separated out. + return True + + def maybe_download_dataset(self, tmp_dir, dataset_split): + """Downloads the appropriate dataset file and returns its path.""" + # Get the dataset url for the split requested. + url = self.DATA_URLS.get(dataset_split, None) + + # Sanity check. + if url is None: + tf.logging.fatal("Unknown dataset_split passed: {}".format(dataset_split)) + + # Download the data, if it doesn't already exist. + return generator_utils.maybe_download(tmp_dir, + self._extract_filename_from_http://github.com/url(url), + url) + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + del data_dir + + # Download the data, if it doesn't already exist. + downloaded_filepath = self.maybe_download_dataset(tmp_dir, dataset_split) + + # Decompress the file and iterate through it. + with gzip.open(downloaded_filepath, "rb") as data_fp: + for line in data_fp: + yield self._parse_json_to_dict(line.strip()) diff --git a/tensor2tensor/data_generators/program_search_test.py b/tensor2tensor/data_generators/program_search_test.py new file mode 100644 index 000000000..85f87ac98 --- /dev/null +++ b/tensor2tensor/data_generators/program_search_test.py @@ -0,0 +1,112 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.data_generators.program_search.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import gzip +import os +import shutil +import tempfile + +from builtins import bytes # pylint: disable=redefined-builtin +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import program_search + +import tensorflow.compat.v1 as tf + + +class ProgramSearchAlgolispStub(program_search.ProgramSearchAlgolisp): + """Stub of ProgramSearchAlgolisp that stubs out maybe_download_dataset. + + The maybe_download_dataset writes one predetermined example in a zip file + self.n number of times and returns the file path. + """ + + EXAMPLE = ('{"funcs": [], "tests": [{"output": 0, "input": {"a": 5}}, ' + '{"output": 1, "input": {"a": 20}}, {"output": 2, "input": ' + '{"a": 28}}, {"output": 1, "input": {"a": 13}}, {"output": 1, ' + '"input": {"a": 27}}, {"output": 1, "input": {"a": 13}}, ' + '{"output": 1, "input": {"a": 20}}, {"output": 0, ' + '"input": {"a": 8}}, {"output": 0, "input": {"a": 8}}, ' + '{"output": 0, "input": {"a": 4}}], "short_tree": ["invoke1", ' + '["lambda1", ["if", ["==", ["len", ["digits", "arg1"]], "1"], "0",' + ' ["+", "1", ["self", ["reduce", ["digits", "arg1"], "0", ' + '"+"]]]]], "a"], "tags": [], "text": ["given", "a", "number", "a",' + ' ",", "find", "how", "many", "times", "you", "can", "replace", ' + '"a", "with", "sum", "of", "its", "digits", "before", "it", ' + '"becomes", "a", "single", "digit", "number"], "return_type": ' + '"int", "args": {"a": "int"}, "nodes": ["l1_recursive_digits"]}') + + EXAMPLE_INPUT = ('given a number a , find how many times you can replace a ' + 'with sum of its digits before it becomes a single digit ' + 'number') + + EXAMPLE_TARGET = ('[ invoke1 [ lambda1 [ if [ == [ len [ digits arg1 ] ] 1 ]' + ' 0 [ + 1 [ self [ reduce [ digits arg1 ] 0 + ] ] ] ] ] a ' + ']') + + N = 10 + + def maybe_download_dataset(self, tmp_dir, dataset_split): + (_, data_file) = tempfile.mkstemp( + suffix='.gz', prefix=str(dataset_split) + '-', dir=tmp_dir) + + with gzip.open(data_file, 'wb') as gz_file: + content = '\n'.join([self.EXAMPLE] * self.N) + gz_file.write(bytes(content, 'utf-8')) + return data_file + + +class ProgramSearchAlgolispTest(tf.test.TestCase): + + @classmethod + def setUpClass(cls): + # Setup the temp directory tree. + cls.tmp_dir = tf.test.get_temp_dir() + shutil.rmtree(cls.tmp_dir) + os.mkdir(cls.tmp_dir) + + @classmethod + def tearDownClass(cls): + # Cleanup the temp directory tree. + shutil.rmtree(cls.tmp_dir) + + def testEndToEnd(self): + # End-to-end test, the stub problem class creates a .gz file with nps_stub.N + # example and we check if we're able to process it correctly. + nps_stub = ProgramSearchAlgolispStub() + num = 0 + for example in nps_stub.generate_samples(None, self.tmp_dir, + problem.DatasetSplit.TRAIN): + + # Only one example in 'file', so this is OK. + self.assertEqual(example['inputs'], + ProgramSearchAlgolispStub.EXAMPLE_INPUT) + + self.assertEqual(example['targets'], + ProgramSearchAlgolispStub.EXAMPLE_TARGET) + + num += 1 + + # assert that we have as many examples as there are in the file. + self.assertEqual(num, nps_stub.N) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/data_generators/ptb.py b/tensor2tensor/data_generators/ptb.py new file mode 100644 index 000000000..2f436c577 --- /dev/null +++ b/tensor2tensor/data_generators/ptb.py @@ -0,0 +1,169 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for PTB data-sets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import os +import sys +import tarfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +EOS = text_encoder.EOS +PTB_URL = "http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz" + + +def _read_words(filename): + """Reads words from a file.""" + with tf.gfile.GFile(filename, "r") as f: + if sys.version_info[0] >= 3: + return f.read().replace("\n", " %s " % EOS).split() + else: + return f.read().decode("utf-8").replace("\n", " %s " % EOS).split() + + +def _build_vocab(filename, vocab_path, vocab_size): + """Reads a file to build a vocabulary of `vocab_size` most common words. + + The vocabulary is sorted by occurrence count and has one word per line. + + Args: + filename: file to read list of words from. + vocab_path: path where to save the vocabulary. + vocab_size: size of the vocabulary to generate. + """ + data = _read_words(filename) + counter = collections.Counter(data) + count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) + words, _ = list(zip(*count_pairs)) + words = words[:vocab_size] + with open(vocab_path, "w") as f: + f.write("\n".join(words)) + + +def _get_token_encoder(vocab_dir, vocab_name, filename): + """Reads from file and returns a `TokenTextEncoder` for the vocabulary.""" + vocab_path = os.path.join(vocab_dir, vocab_name) + if not tf.gfile.Exists(vocab_path): + _build_vocab(filename, vocab_path, 10000) + return text_encoder.TokenTextEncoder(vocab_path) + + +def _maybe_download_corpus(tmp_dir, vocab_type): + """Download and unpack the corpus. + + Args: + tmp_dir: directory containing dataset. + vocab_type: which vocabulary are we using. + + Returns: + The list of names of files. + """ + filename = os.path.basename(PTB_URL) + compressed_filepath = generator_utils.maybe_download( + tmp_dir, filename, PTB_URL) + ptb_files = [] + ptb_char_files = [] + + with tarfile.open(compressed_filepath, "r:gz") as tgz: + files = [] + # Selecting only relevant files. + for m in tgz.getmembers(): + if "ptb" in m.name and ".txt" in m.name: + if "char" in m.name: + ptb_char_files += [m.name] + else: + ptb_files += [m.name] + files += [m] + + tgz.extractall(tmp_dir, members=files) + + if vocab_type == text_problems.VocabType.CHARACTER: + return ptb_char_files + else: + return ptb_files + + +@registry.register_problem +class LanguagemodelPtb10k(text_problems.Text2SelfProblem): + """PTB, 10k vocab.""" + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 10, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def is_generate_per_split(self): + return True + + @property + def vocab_filename(self): + return "vocab.lmptb.10000" + + @property + def vocab_type(self): + return text_problems.VocabType.TOKEN + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + files = _maybe_download_corpus(tmp_dir, self.vocab_type) + + train_file, valid_file = None, None + for filename in files: + if "train" in filename: + train_file = os.path.join(tmp_dir, filename) + elif "valid" in filename: + valid_file = os.path.join(tmp_dir, filename) + + assert train_file, "Training file not found" + assert valid_file, "Validation file not found" + + _get_token_encoder(data_dir, self.vocab_filename, train_file) + + train = dataset_split == problem.DatasetSplit.TRAIN + filepath = train_file if train else valid_file + + def _generate_samples(): + with tf.gfile.GFile(filepath, "r") as f: + for line in f: + line = " ".join(line.replace("\n", " %s " % EOS).split()) + yield {"targets": line} + + return _generate_samples() + + +@registry.register_problem +class LanguagemodelPtbCharacters(LanguagemodelPtb10k): + """PTB, character-level.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER diff --git a/tensor2tensor/data_generators/qnli.py b/tensor2tensor/data_generators/qnli.py new file mode 100644 index 000000000..38be61ee5 --- /dev/null +++ b/tensor2tensor/data_generators/qnli.py @@ -0,0 +1,117 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for the Question-Answering NLI dataset.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import zipfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + +EOS = text_encoder.EOS + + +@registry.register_problem +class QuestionNLI(text_problems.TextConcat2ClassProblem): + """Question Answering NLI classification problems.""" + + # Link to data from GLUE: https://gluebenchmark.com/tasks + _QNLI_URL = ("https://firebasestorage.googleapis.com/v0/b/" + "mtl-sentence-representations.appspot.com/o/" + "data%2FQNLI.zip?alt=media&token=c24cad61-f2df-" + "4f04-9ab6-aa576fa829d0") + + @property + def is_generate_per_split(self): + return True + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 100, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def approx_vocab_size(self): + return 2**15 + + @property + def num_classes(self): + return 2 + + def class_labels(self, data_dir): + del data_dir + # Note this binary classification is different from usual MNLI. + return ["not_entailment", "entailment"] + + def _maybe_download_corpora(self, tmp_dir): + qnli_filename = "QNLI.zip" + qnli_finalpath = os.path.join(tmp_dir, "QNLI") + if not tf.gfile.Exists(qnli_finalpath): + zip_filepath = generator_utils.maybe_download( + tmp_dir, qnli_filename, self._QNLI_URL) + zip_ref = zipfile.ZipFile(zip_filepath, "r") + zip_ref.extractall(tmp_dir) + zip_ref.close() + + return qnli_finalpath + + def example_generator(self, filename): + label_list = self.class_labels(data_dir=None) + for idx, line in enumerate(tf.gfile.Open(filename, "rb")): + if idx == 0: continue # skip header + line = text_encoder.to_unicode_utf8(line.strip()) + _, s1, s2, l = line.split("\t") + inputs = [s1, s2] + l = label_list.index(l) + yield { + "inputs": inputs, + "label": l + } + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + qnli_dir = self._maybe_download_corpora(tmp_dir) + if dataset_split == problem.DatasetSplit.TRAIN: + filesplit = "train.tsv" + else: + filesplit = "dev.tsv" + + filename = os.path.join(qnli_dir, filesplit) + for example in self.example_generator(filename): + yield example + + +@registry.register_problem +class QuestionNLICharacters(QuestionNLI): + """Question-Answering NLI classification problems, character level""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def global_task_id(self): + return problem.TaskID.EN_NLI diff --git a/tensor2tensor/data_generators/quora_qpairs.py b/tensor2tensor/data_generators/quora_qpairs.py new file mode 100644 index 000000000..47d8541e8 --- /dev/null +++ b/tensor2tensor/data_generators/quora_qpairs.py @@ -0,0 +1,123 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for the Quora Question Pairs dataset.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import zipfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + +EOS = text_encoder.EOS + + +@registry.register_problem +class QuoraQuestionPairs(text_problems.TextConcat2ClassProblem): + """Quora duplicate question pairs binary classification problems.""" + + # Link to data from GLUE: https://gluebenchmark.com/tasks + _QQP_URL = ("https://firebasestorage.googleapis.com/v0/b/" + "mtl-sentence-representations.appspot.com/o/" + "data%2FQQP.zip?alt=media&token=700c6acf-160d-" + "4d89-81d1-de4191d02cb5") + + @property + def is_generate_per_split(self): + return True + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 100, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def approx_vocab_size(self): + return 2**15 + + @property + def num_classes(self): + return 2 + + def class_labels(self, data_dir): + del data_dir + return ["not_duplicate", "duplicate"] + + def _maybe_download_corpora(self, tmp_dir): + qqp_filename = "QQP.zip" + qqp_finalpath = os.path.join(tmp_dir, "QQP") + if not tf.gfile.Exists(qqp_finalpath): + zip_filepath = generator_utils.maybe_download( + tmp_dir, qqp_filename, self._QQP_URL) + zip_ref = zipfile.ZipFile(zip_filepath, "r") + zip_ref.extractall(tmp_dir) + zip_ref.close() + + return qqp_finalpath + + def example_generator(self, filename): + skipped = 0 + for idx, line in enumerate(tf.gfile.Open(filename, "rb")): + if idx == 0: continue # skip header + line = text_encoder.to_unicode_utf8(line.strip()) + split_line = line.split("\t") + if len(split_line) < 6: + skipped += 1 + tf.logging.info("Skipping %d" % skipped) + continue + s1, s2, l = split_line[3:] + # A neat data augmentation trick from Radford et al. (2018) + # https://blog.openai.com/language-unsupervised/ + inputs = [[s1, s2], [s2, s1]] + for inp in inputs: + yield { + "inputs": inp, + "label": int(l) + } + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + qqp_dir = self._maybe_download_corpora(tmp_dir) + if dataset_split == problem.DatasetSplit.TRAIN: + filesplit = "train.tsv" + else: + filesplit = "dev.tsv" + + filename = os.path.join(qqp_dir, filesplit) + for example in self.example_generator(filename): + yield example + + +@registry.register_problem +class QuoraQuestionPairsCharacters(QuoraQuestionPairs): + """Quora duplicate question pairs classification problems, character level""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def global_task_id(self): + return problem.TaskID.EN_SIM diff --git a/tensor2tensor/data_generators/replace_oov.py b/tensor2tensor/data_generators/replace_oov.py deleted file mode 100644 index 7e2c8dc50..000000000 --- a/tensor2tensor/data_generators/replace_oov.py +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -r"""Data preprocessor for lm1b benchmark. - -Process the raw text file to replace out-of-vocab words with "". - -The input consists of a tokenized text file, where tokens are separated with -whitespace. - -Outputs a similar text file where the OOV words have been repalced with UNK. -The whitespace in the output may be different. - -This maintains compatibility with the benchmark, which does the same thing. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -from six.moves import xrange # pylint: disable=redefined-builtin - -import tensorflow as tf - -tf.app.flags.DEFINE_string("vocab_file", "", - "text file containing one word per line") - -tf.app.flags.DEFINE_string("in_filepattern", "", "input filename") - -tf.app.flags.DEFINE_string( - "out_prefix", "", "The output filename is equal to out_prefix plus " - "the last 15 characters of in_file. (e.g. -00001-of-00100)") - -FLAGS = tf.app.flags.FLAGS - - -def replace_oov(vocab, in_file): - """Replace out-of-vocab words with .""" - out_file = FLAGS.out_prefix + in_file[-15:] - print ("in_file", in_file, "out_file", out_file) - with tf.gfile.Open(out_file, "w") as out: - for line in tf.gfile.Open(in_file): - words = line.split() - for i in xrange(len(words)): - if not vocab.get(words[i]): - words[i] = "UNK" - out_line = " ".join(words) + "\n" - out.write(out_line) - - -def main(_): - vocab = {} - with tf.gfile.Open(FLAGS.vocab_file) as vocab_file: - for line in vocab_file: - vocab[line.strip()] = True - - in_files = tf.gfile.Glob(FLAGS.in_filepattern) - assert in_files, "No matching input files" - for in_file in in_files: - replace_oov(vocab, in_file) - -if __name__ == "__main__": - tf.app.run() diff --git a/tensor2tensor/data_generators/rte.py b/tensor2tensor/data_generators/rte.py new file mode 100644 index 000000000..0fc5e56af --- /dev/null +++ b/tensor2tensor/data_generators/rte.py @@ -0,0 +1,117 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for the Recognizing Textual Entailment dataset.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import zipfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + +EOS = text_encoder.EOS + + +@registry.register_problem +class RTE(text_problems.TextConcat2ClassProblem): + """Recognizing Textual Entailment classification problems.""" + + # Link to data from GLUE: https://gluebenchmark.com/tasks + _RTE_URL = ("https://firebasestorage.googleapis.com/v0/b/" + "mtl-sentence-representations.appspot.com/o/" + "data%2FRTE.zip?alt=media&token=5efa7e85-a0bb-" + "4f19-8ea2-9e1840f077fb") + + @property + def is_generate_per_split(self): + return True + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 1, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def approx_vocab_size(self): + return 2**13 # 8k vocab suffices for this small dataset. + + @property + def num_classes(self): + return 2 + + def class_labels(self, data_dir): + del data_dir + # Note this binary classification is different from usual MNLI. + return ["not_entailment", "entailment"] + + def _maybe_download_corpora(self, tmp_dir): + rte_filename = "RTE.zip" + rte_finalpath = os.path.join(tmp_dir, "RTE") + if not tf.gfile.Exists(rte_finalpath): + zip_filepath = generator_utils.maybe_download( + tmp_dir, rte_filename, self._RTE_URL) + zip_ref = zipfile.ZipFile(zip_filepath, "r") + zip_ref.extractall(tmp_dir) + zip_ref.close() + + return rte_finalpath + + def example_generator(self, filename): + label_list = self.class_labels(data_dir=None) + for idx, line in enumerate(tf.gfile.Open(filename, "rb")): + if idx == 0: continue # skip header + line = text_encoder.to_unicode_utf8(line.strip()) + _, s1, s2, l = line.split("\t") + inputs = [s1, s2] + l = label_list.index(l) + yield { + "inputs": inputs, + "label": l + } + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + rte_dir = self._maybe_download_corpora(tmp_dir) + if dataset_split == problem.DatasetSplit.TRAIN: + filesplit = "train.tsv" + else: + filesplit = "dev.tsv" + + filename = os.path.join(rte_dir, filesplit) + for example in self.example_generator(filename): + yield example + + +@registry.register_problem +class RTECharacters(RTE): + """Recognizing Textual Entailment classification problems, character level""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def global_task_id(self): + return problem.TaskID.EN_NLI diff --git a/tensor2tensor/data_generators/scitail.py b/tensor2tensor/data_generators/scitail.py new file mode 100644 index 000000000..f56a2f84c --- /dev/null +++ b/tensor2tensor/data_generators/scitail.py @@ -0,0 +1,125 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for SciTail.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import zipfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import lm1b +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + +EOS = text_encoder.EOS + + +@registry.register_problem +class SciTail(text_problems.TextConcat2ClassProblem): + """SciTail classification problems.""" + + # Data from allen institute for AI. + _SCITAIL_URL = ("http://data.allenai.org.s3.amazonaws.com/" + "downloads/SciTailV1.1.zip") + + @property + def is_generate_per_split(self): + return True + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 10, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def approx_vocab_size(self): + return 2**13 + + @property + def num_classes(self): + return 2 + + def class_labels(self, data_dir): + del data_dir + # Note this binary classification is different from usual SNLI. + return ["neutral", "entails"] + + def _maybe_download_corpora(self, tmp_dir): + scitail_filename = "SciTailV1.1.zip" + scitail_finalpath = os.path.join(tmp_dir, "SciTailV1.1") + if not tf.gfile.Exists(scitail_finalpath): + zip_filepath = generator_utils.maybe_download( + tmp_dir, scitail_filename, self._SCITAIL_URL) + zip_ref = zipfile.ZipFile(zip_filepath, "r") + zip_ref.extractall(tmp_dir) + zip_ref.close() + + return scitail_finalpath + + def example_generator(self, filename): + label_list = self.class_labels(data_dir=None) + for line in tf.gfile.Open(filename, "rb"): + line = text_encoder.to_unicode_utf8(line.strip()) + split_line = line.split("\t") + s1, s2 = split_line[:2] + l = label_list.index(split_line[2]) + inputs = [s1, s2] + yield { + "inputs": inputs, + "label": l + } + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + scitail_dir = self._maybe_download_corpora(tmp_dir) + if dataset_split == problem.DatasetSplit.TRAIN: + filesplit = "tsv_format/scitail_1.0_train.tsv" + else: + filesplit = "tsv_format/scitail_1.0_dev.tsv" + + filename = os.path.join(scitail_dir, filesplit) + for example in self.example_generator(filename): + yield example + + +@registry.register_problem +class SciTailCharacters(SciTail): + """SciTail classification problems, character level""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def global_task_id(self): + return problem.TaskID.EN_NLI + + +@registry.register_problem +class SciTailSharedVocab(SciTail): + """SciTail classification problems with the LM1b vocabulary""" + + @property + def vocab_filename(self): + return lm1b.LanguagemodelLm1b32k().vocab_filename diff --git a/tensor2tensor/data_generators/seq2edits.py b/tensor2tensor/data_generators/seq2edits.py new file mode 100644 index 000000000..40495454a --- /dev/null +++ b/tensor2tensor/data_generators/seq2edits.py @@ -0,0 +1,266 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Problems for Seq2Edits (see models/research/transformer_seq2edits.py).""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.layers import modalities +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +@modalities.is_pointwise +def pointer_top(body_output, targets, model_hparams, vocab_size): + """Like identity_top() with is_pointwise annotation.""" + del targets, model_hparams, vocab_size # unused arg + return body_output + + +def pointer_bottom(x, model_hparams, vocab_size): + """Like identity_bottom() without converting to float.""" + del model_hparams, vocab_size # unused arg + return x + + +@registry.register_problem +class Seq2editsGec(text_problems.Text2TextProblem): + """Seq2Edits for grammatical error correction.""" + + def dataset_filename(self): + return "edit_ops_gec" + + @property + def vocab_file(self): + return "vocab.subwords" + + @property + def vocab_filename(self): + return "vocab.subwords" + + @property + def error_tag_vocab_file(self): + return "vocab.error_tags" + + def feature_encoders(self, data_dir): + subword_encoder = text_encoder.SubwordTextEncoder( + os.path.join(data_dir, self.vocab_file)) + error_tag_encoder = text_encoder.TokenTextEncoder( + os.path.join(data_dir, self.error_tag_vocab_file)) + return { + "inputs": subword_encoder, + "targets": subword_encoder, + "targets_error_tag": error_tag_encoder + } + + def hparams(self, defaults, model_hparams): + super(Seq2editsGec, self).hparams(defaults, model_hparams) + + for pointer_feat in ["targets_start_token", "targets_end_token"]: + defaults.modality[pointer_feat] = modalities.ModalityType.IDENTITY + defaults.vocab_size[pointer_feat] = None + model_hparams.bottom[pointer_feat] = pointer_bottom + model_hparams.top[pointer_feat] = pointer_top + # Whether to use tags. + if "use_error_tags" not in model_hparams: + model_hparams.add_hparam("use_error_tags", True) + # If true, span and tag prediction is in the middle of the decoder layer + # stack. Otherwise, they are at the end of the decoder layer stack. + if "middle_prediction" not in model_hparams: + model_hparams.add_hparam("middle_prediction", True) + # If middle_prediction=True, divide num_decoder_layers by this to get the + # number of layers before and after the middle prediction. + if "middle_prediction_layer_factor" not in model_hparams: + model_hparams.add_hparam("middle_prediction_layer_factor", 2) + # Whether to predict the targets_start_token feature. If this is false, use + # the previous end token as implicit start token. + if "use_start_token" not in model_hparams: + model_hparams.add_hparam("use_start_token", False) + # Whether to feed back targets_end_token to the next time step. If false, + # only feed back targets_start_token. + if "feedback_end_token" not in model_hparams: + model_hparams.add_hparam("feedback_end_token", False) + # Number of feedforward layers between prediction layers in the cascade. + if "ffn_in_prediction_cascade" not in model_hparams: + model_hparams.add_hparam("ffn_in_prediction_cascade", 1) + # Embedding size for error tags. + if "error_tag_embed_size" not in model_hparams: + model_hparams.add_hparam("error_tag_embed_size", 6) + if model_hparams.use_error_tags: + defaults.modality["targets_error_tag"] = modalities.ModalityType.SYMBOL + error_tag_vocab_size = self._encoders["targets_error_tag"].vocab_size + defaults.vocab_size["targets_error_tag"] = error_tag_vocab_size + model_hparams.top["targets_error_tag"] = pointer_top + + def example_reading_spec(self): + data_fields, _ = super(Seq2editsGec, self).example_reading_spec() + data_fields["targets_start_token"] = tf.VarLenFeature(tf.int64) + data_fields["targets_end_token"] = tf.VarLenFeature(tf.int64) + data_fields["targets_error_tag"] = tf.VarLenFeature(tf.int64) + return data_fields, None + + +@registry.register_problem +class Seq2editsGecPacked256(Seq2editsGec): + """Packed version for TPU.""" + + def dataset_filename(self): + return "edit_ops_gec_packed256" + + @property + def packed_length(self): + return 256 + + @property + def max_segment_length(self): + return 256 + + +@registry.register_problem +class Seq2editsGecNoTags(Seq2editsGec): + """Seq2Edits for grammatical error correction without tags.""" + + def dataset_filename(self): + return "edit_ops_gec" + + def hparams(self, defaults, model_hparams): + super(Seq2editsGecNoTags, self).hparams(defaults, model_hparams) + model_hparams.use_error_tags = False + + +@registry.register_problem +class Seq2editsGecNoTagsPacked256(Seq2editsGecPacked256): + """Packed version for TPU.""" + + def dataset_filename(self): + return "edit_ops_gec_packed256" + + def hparams(self, defaults, model_hparams): + super(Seq2editsGecNoTagsPacked256, self).hparams(defaults, model_hparams) + model_hparams.use_error_tags = False + + +@registry.register_problem +class Seq2editsGecDeep(Seq2editsGec): + """Seq2Edits for grammatical error correction with deeper decoder.""" + + def hparams(self, defaults, model_hparams): + super(Seq2editsGecDeep, self).hparams(defaults, model_hparams) + model_hparams.middle_prediction_layer_factor = 1.5 + + +@registry.register_problem +class Seq2editsGecDeepPacked256(Seq2editsGecPacked256): + """Packed version for TPU.""" + + def hparams(self, defaults, model_hparams): + super(Seq2editsGecDeepPacked256, self).hparams(defaults, model_hparams) + model_hparams.middle_prediction_layer_factor = 1.5 + + +@registry.register_problem +class Seq2editsGecDeepNoTags(Seq2editsGec): + """Deep Seq2Edits model for grammatical error correction without tags.""" + + def hparams(self, defaults, model_hparams): + super(Seq2editsGecDeepNoTags, self).hparams(defaults, model_hparams) + model_hparams.middle_prediction_layer_factor = 1.5 + model_hparams.use_error_tags = False + + +@registry.register_problem +class Seq2editsGecDeepNoTagsPacked256(Seq2editsGecPacked256): + """Packed version for TPU.""" + + def hparams(self, defaults, model_hparams): + super(Seq2editsGecDeepNoTagsPacked256, self).hparams( + defaults, model_hparams) + model_hparams.middle_prediction_layer_factor = 1.5 + model_hparams.use_error_tags = False + + +@registry.register_problem +class Seq2editsTextnorm(Seq2editsGec): + """Seq2Edits for text normalization.""" + + def dataset_filename(self): + return "edit_ops_textnorm" + + @property + def source_vocab_file(self): + return "vocab.source" + + @property + def target_vocab_file(self): + return "vocab.target" + + @property + def error_tag_vocab_file(self): + return "vocab.error_tags" + + def feature_encoders(self, data_dir): + source_encoder = text_encoder.TokenTextEncoder( + os.path.join(data_dir, self.source_vocab_file)) + target_encoder = text_encoder.TokenTextEncoder( + os.path.join(data_dir, self.target_vocab_file)) + error_tag_encoder = text_encoder.TokenTextEncoder( + os.path.join(data_dir, self.error_tag_vocab_file)) + return { + "inputs": source_encoder, + "targets": target_encoder, + "targets_error_tag": error_tag_encoder + } + + +@registry.register_problem +class Seq2editsTextnormPacked256(Seq2editsTextnorm): + """Packed version for TPU.""" + + def dataset_filename(self): + return "edit_ops_textnorm_packed256" + + @property + def packed_length(self): + return 256 + + @property + def max_segment_length(self): + return 256 + + +@registry.register_problem +class Seq2editsTextnormNoTags(Seq2editsTextnorm): + """Seq2Edits for text normalization without tags.""" + + def hparams(self, defaults, model_hparams): + super(Seq2editsTextnormNoTags, self).hparams(defaults, model_hparams) + model_hparams.use_error_tags = False + + +@registry.register_problem +class Seq2editsTextnormNoTagsPacked256(Seq2editsTextnormPacked256): + """Packed version for TPU.""" + + def hparams(self, defaults, model_hparams): + super(Seq2editsTextnormNoTagsPacked256, self).hparams( + defaults, model_hparams) + model_hparams.use_error_tags = False diff --git a/tensor2tensor/data_generators/snli.py b/tensor2tensor/data_generators/snli.py index 5613ece4d..cdfceb8ad 100644 --- a/tensor2tensor/data_generators/snli.py +++ b/tensor2tensor/data_generators/snli.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,13 +21,11 @@ import os import zipfile - -# Dependency imports - from tensor2tensor.data_generators import generator_utils from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import tokenizer -import tensorflow as tf +import tensorflow.compat.v1 as tf _EOS = 1 _SEP = 2 @@ -130,23 +129,25 @@ def _parse_dataset(file_path, tmp_dir, train): def _get_or_generate_vocab(tmp_dir, vocab_filename, vocab_size): + """Read or create vocabulary.""" vocab_filepath = os.path.join(tmp_dir, vocab_filename) print('Vocab file written to: ' + vocab_filepath) if tf.gfile.Exists(vocab_filepath): gs = text_encoder.SubwordTextEncoder(vocab_filepath) return gs - else: - example_file = os.path.join(tmp_dir, _EXAMPLES_FILE) - gs = text_encoder.SubwordTextEncoder() - token_counts = text_encoder.SubwordTextEncoder.get_token_counts( - example_file, corpus_max_lines=1000000) - gs = gs.build_to_target_size( - vocab_size, token_counts, vocab_filepath, min_val=1, max_val=1e3) - return gs + example_file = os.path.join(tmp_dir, _EXAMPLES_FILE) + gs = text_encoder.SubwordTextEncoder() + token_counts = tokenizer.corpus_token_counts( + example_file, corpus_max_lines=1000000) + gs = gs.build_to_target_size( + vocab_size, token_counts, min_val=1, max_val=1e3) + gs.store_to_file(vocab_filepath) + return gs def snli_token_generator(tmp_dir, train, vocab_size): + """Generate example dicts.""" _download_and_parse_dataset(tmp_dir, train) symbolizer_vocab = _get_or_generate_vocab( diff --git a/tensor2tensor/data_generators/speech_recognition.py b/tensor2tensor/data_generators/speech_recognition.py new file mode 100644 index 000000000..6b253f0d0 --- /dev/null +++ b/tensor2tensor/data_generators/speech_recognition.py @@ -0,0 +1,149 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Common classes for automatic speech recognition (ASR) datasets. + +The audio import uses sox to generate normalized waveforms, please install +it as appropriate (e.g. using apt-get or yum). +""" + +import numpy as np + +from tensor2tensor.data_generators import audio_encoder +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.layers import common_audio +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import modalities +from tensor2tensor.utils import metrics + +import tensorflow.compat.v1 as tf + + +class ByteTextEncoderWithEos(text_encoder.ByteTextEncoder): + """Encodes each byte to an id and appends the EOS token.""" + + def encode(self, s): + return super(ByteTextEncoderWithEos, self).encode(s) + [text_encoder.EOS_ID] + + +class SpeechRecognitionProblem(problem.Problem): + """Base class for speech recognition problems.""" + + def hparams(self, defaults, model_hparams): + def add_if_absent(p, attr, value): + if not hasattr(p, attr): + p.add_hparam(attr, value) + + p = model_hparams + # Filterbank extraction in bottom instead of preprocess_example is faster. + add_if_absent(p, "audio_preproc_in_bottom", False) + # The trainer seems to reserve memory for all members of the input dict + add_if_absent(p, "audio_keep_example_waveforms", False) + add_if_absent(p, "audio_sample_rate", 16000) + add_if_absent(p, "audio_preemphasis", 0.97) + add_if_absent(p, "audio_dither", 1.0 / np.iinfo(np.int16).max) + add_if_absent(p, "audio_frame_length", 25.0) + add_if_absent(p, "audio_frame_step", 10.0) + add_if_absent(p, "audio_lower_edge_hertz", 20.0) + add_if_absent(p, "audio_upper_edge_hertz", 8000.0) + add_if_absent(p, "audio_num_mel_bins", 80) + add_if_absent(p, "audio_add_delta_deltas", True) + add_if_absent(p, "num_zeropad_frames", 250) + + p = defaults + p.modality = {"inputs": modalities.ModalityType.SPEECH_RECOGNITION, + "targets": modalities.ModalityType.SYMBOL} + p.vocab_size = {"inputs": None, + "targets": 256} + + @property + def is_character_level(self): + return True + + @property + def input_space_id(self): + return problem.SpaceID.AUDIO_SPECTRAL + + @property + def target_space_id(self): + return problem.SpaceID.EN_CHR + + def feature_encoders(self, _): + return { + "inputs": None, # Put None to make sure that the logic in + # decoding.py doesn't try to convert the floats + # into text... + "waveforms": audio_encoder.AudioEncoder(), + "targets": ByteTextEncoderWithEos(), + } + + def example_reading_spec(self): + data_fields = { + "waveforms": tf.VarLenFeature(tf.float32), + "targets": tf.VarLenFeature(tf.int64), + } + + data_items_to_decoders = None + + return data_fields, data_items_to_decoders + + def preprocess_example(self, example, mode, hparams): + p = hparams + if p.audio_preproc_in_bottom: + example["inputs"] = tf.expand_dims( + tf.expand_dims(example["waveforms"], -1), -1) + else: + waveforms = tf.expand_dims(example["waveforms"], 0) + mel_fbanks = common_audio.compute_mel_filterbank_features( + waveforms, + sample_rate=p.audio_sample_rate, + dither=p.audio_dither, + preemphasis=p.audio_preemphasis, + frame_length=p.audio_frame_length, + frame_step=p.audio_frame_step, + lower_edge_hertz=p.audio_lower_edge_hertz, + upper_edge_hertz=p.audio_upper_edge_hertz, + num_mel_bins=p.audio_num_mel_bins, + apply_mask=False) + if p.audio_add_delta_deltas: + mel_fbanks = common_audio.add_delta_deltas(mel_fbanks) + fbank_size = common_layers.shape_list(mel_fbanks) + assert fbank_size[0] == 1 + + # This replaces CMVN estimation on data + var_epsilon = 1e-09 + mean = tf.reduce_mean(mel_fbanks, keepdims=True, axis=1) + variance = tf.reduce_mean(tf.squared_difference(mel_fbanks, mean), + keepdims=True, axis=1) + mel_fbanks = (mel_fbanks - mean) * tf.rsqrt(variance + var_epsilon) + + # Later models like to flatten the two spatial dims. Instead, we add a + # unit spatial dim and flatten the frequencies and channels. + example["inputs"] = tf.concat([ + tf.reshape(mel_fbanks, [fbank_size[1], fbank_size[2], fbank_size[3]]), + tf.zeros((p.num_zeropad_frames, fbank_size[2], fbank_size[3]))], 0) + + if not p.audio_keep_example_waveforms: + del example["waveforms"] + return super(SpeechRecognitionProblem, self + ).preprocess_example(example, mode, hparams) + + def eval_metrics(self): + defaults = super(SpeechRecognitionProblem, self).eval_metrics() + return defaults + [ + metrics.Metrics.EDIT_DISTANCE, + metrics.Metrics.WORD_ERROR_RATE + ] diff --git a/tensor2tensor/data_generators/squad.py b/tensor2tensor/data_generators/squad.py new file mode 100644 index 000000000..2e1141c3a --- /dev/null +++ b/tensor2tensor/data_generators/squad.py @@ -0,0 +1,238 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for SquaAD (https://rajpurkar.github.io/SQuAD-explorer/). +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import os +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import wiki_lm +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +_DEV_SET = "dev-v1.1.json" +_URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset" +_TRAINING_SET = "train-v1.1.json" + + +def _generate_examples(tmp_dir, dataset_split): + """Generate squad examples. + + Args: + tmp_dir: a string + dataset_split: problem.DatasetSplit.TRAIN or problem.DatasetSplit.EVAL + Yields: + dictionaries representing examples + """ + if dataset_split == problem.DatasetSplit.TRAIN: + file_name = _TRAINING_SET + else: + file_name = _DEV_SET + squad_file = generator_utils.maybe_download(tmp_dir, + file_name, + os.path.join(_URL, file_name)) + with tf.gfile.GFile(squad_file, mode="r") as fp: + squad = json.load(fp) + + version = squad["version"] + for article in squad["data"]: + if "title" in article: + title = article["title"].strip() + else: + title = "no title" + for paragraph in article["paragraphs"]: + context = paragraph["context"].strip() + for qa in paragraph["qas"]: + question = qa["question"].strip() + id_ = qa["id"] + answer_starts = [answer["answer_start"] for answer in qa["answers"]] + answers = [answer["text"].strip() for answer in qa["answers"]] + + # Features currently used are "context", "question", and "answers". + # Others are extracted here for the ease of future expansions. + example = { + "version": version, + "title": title, + "context": context, + "question": question, + "id": id_, + "answer_starts": answer_starts, + "answers": answers, + "num_answers": len(answers), + "is_supervised": True, + } + yield example + + +@registry.register_problem +class SquadText2text(text_problems.Text2TextProblem): + """Squad as a Text2TextProblem.""" + + @property + def is_generate_per_split(self): + return True + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + for example in _generate_examples(tmp_dir, dataset_split): + yield { + "inputs": "squad context: %s question: %s" % ( + example["context"], example["question"]), + # TODO(ddohan, wgaj): Figure out a way of extracting all answers. + "targets": example["answers"][0], + } + + +@registry.register_problem +class SquadText2textMulti64kPacked1k(SquadText2text): + """Squad with multi-lingual vocabulary.""" + + @property + def packed_length(self): + return 1024 + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelDeEnFrRoWiki64k() + + @property + def num_training_examples(self): + return 16300 + + +@registry.register_problem +class Squad(text_problems.QuestionAndContext2TextProblem): + """Base class for SquAD question answering problem.""" + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 10, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def is_generate_per_split(self): + return True + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + for example in _generate_examples(tmp_dir, dataset_split): + yield { + "inputs": example["question"], + # TODO(ddohan, wgaj): Figure out a way of extracting all answers. + "targets": example["answers"][0], + "context": example["context"] + } + + +@registry.register_problem +class SquadConcat(Squad): + """Squad with question and context concatenated together in inputs.""" + + def dataset_filename(self): + return "squad" + + def preprocess_example(self, example, unused_mode, unused_model_hparams): + sep = tf.convert_to_tensor([self.QUESTION_SEPARATOR_ID], + dtype=example["inputs"].dtype) + example["inputs"] = tf.concat( + [example["inputs"], sep, example["context"]], 0) + return example + + def hparams(self, defaults, unused_model_hparams): + (super(SquadConcat, self) + .hparams(defaults, unused_model_hparams)) + p = defaults + del p.modality["context"] + del p.vocab_size["context"] + + +@registry.register_problem +class SquadConcatMulti64k(SquadConcat): + """Squad with question and context concatenated, multi-lingual vocabulary.""" + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 100, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + def preprocess_example(self, example, unused_mode, unused_model_hparams): + sep = tf.convert_to_tensor([self.QUESTION_SEPARATOR_ID], + dtype=example["inputs"].dtype) + example["inputs"] = tf.concat( + [example["inputs"], sep, example["context"]], 0) + example.pop("context") + return example + + def dataset_filename(self): + return "squad_multi64k" + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelDeEnFrRoWiki64k() + + +@registry.register_problem +class SquadConcatSharedVocab(SquadConcatMulti64k): + """Squad with question and context concatenated, multi-lingual vocabulary.""" + + def dataset_filename(self): + return "squad" + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelEnWiki32k() + + +@registry.register_problem +class SquadConcatPositioned(SquadConcat): + """SquadConcat with targets in format of answer position + answer length.""" + + def generate_targets(self, targets, context): + targets = targets[:-1] # skip last terminal symbol. + targets_new = [] + i = 0 + while i < len(context) - len(targets): + if context[i: i + len(targets)] == targets: + # emit answer's position and length. + targets_new.append(i) + targets_new.append(len(targets)) + i += 1 + return targets_new + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + samples = (super(SquadConcatPositioned, self) + .generate_encoded_samples(data_dir, tmp_dir, dataset_split)) + for sample in samples: + sample["targets"] = self.generate_targets(sample["targets"], + sample["context"]) + if sample["targets"]: + yield sample diff --git a/tensor2tensor/data_generators/sst_binary.py b/tensor2tensor/data_generators/sst_binary.py new file mode 100644 index 000000000..c45e2d046 --- /dev/null +++ b/tensor2tensor/data_generators/sst_binary.py @@ -0,0 +1,114 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Stanford Sentiment Treebank Binary Classification Problem.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import zipfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + +EOS = text_encoder.EOS + + +@registry.register_problem +class SentimentSSTBinary(text_problems.Text2ClassProblem): + """Stanford Sentiment Treebank binary classification problems.""" + + # Link to data from GLUE: https://gluebenchmark.com/tasks + _SST2_URL = ("https://firebasestorage.googleapis.com/v0/b/" + "mtl-sentence-representations.appspot.com/o/" + "data%2FSST-2.zip?alt=media&token=aabc5f6b-e466-" + "44a2-b9b4-cf6337f84ac8") + + @property + def is_generate_per_split(self): + return True + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 10, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def approx_vocab_size(self): + return 2**14 + + @property + def num_classes(self): + return 2 + + def class_labels(self, data_dir): + del data_dir + # Note this binary classification is different from usual MNLI. + return ["neg", "pos"] + + def _maybe_download_corpora(self, tmp_dir): + sst_binary_filename = "SST-2.zip" + sst_binary_finalpath = os.path.join(tmp_dir, "SST-2") + if not tf.gfile.Exists(sst_binary_finalpath): + zip_filepath = generator_utils.maybe_download( + tmp_dir, sst_binary_filename, self._SST2_URL) + zip_ref = zipfile.ZipFile(zip_filepath, "r") + zip_ref.extractall(tmp_dir) + zip_ref.close() + + return sst_binary_finalpath + + def example_generator(self, filename): + for idx, line in enumerate(tf.gfile.Open(filename, "rb")): + if idx == 0: continue # skip header + line = text_encoder.to_unicode_utf8(line.strip()) + sent, label = line.split("\t") + yield { + "inputs": sent, + "label": int(label) + } + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + sst_binary_dir = self._maybe_download_corpora(tmp_dir) + if dataset_split == problem.DatasetSplit.TRAIN: + filesplit = "train.tsv" + else: + filesplit = "dev.tsv" + + filename = os.path.join(sst_binary_dir, filesplit) + for example in self.example_generator(filename): + yield example + + +@registry.register_problem +class SentimentSSTBinaryCharacters(SentimentSSTBinary): + """Binary Stanford Sentiment Treebank problems, character level""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def global_task_id(self): + return problem.TaskID.EN_CHR_SENT diff --git a/tensor2tensor/data_generators/stanford_nli.py b/tensor2tensor/data_generators/stanford_nli.py new file mode 100644 index 000000000..f6be6b6e6 --- /dev/null +++ b/tensor2tensor/data_generators/stanford_nli.py @@ -0,0 +1,147 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for StanfordNLI.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import zipfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import lm1b +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import wiki_lm +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + +EOS = text_encoder.EOS + + +@registry.register_problem +class StanfordNLI(text_problems.TextConcat2ClassProblem): + """StanfordNLI classification problems.""" + + # Link to data from GLUE: https://gluebenchmark.com/tasks + _SNLI_URL = ("https://nlp.stanford.edu/projects/snli/snli_1.0.zip") + + @property + def is_generate_per_split(self): + return True + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 100, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def approx_vocab_size(self): + return 2**15 + + @property + def num_classes(self): + return 3 + + def class_labels(self, data_dir): + del data_dir + # Note this binary classification is different from usual SNLI. + return ["contradiction", "entailment", "neutral"] + + def _maybe_download_corpora(self, tmp_dir): + snli_filename = "SNLI.zip" + snli_finalpath = os.path.join(tmp_dir, "snli_1.0") + if not tf.gfile.Exists(snli_finalpath): + zip_filepath = generator_utils.maybe_download( + tmp_dir, snli_filename, self._SNLI_URL) + zip_ref = zipfile.ZipFile(zip_filepath, "r") + zip_ref.extractall(tmp_dir) + zip_ref.close() + + return snli_finalpath + + def example_generator(self, filename): + label_list = self.class_labels(data_dir=None) + for idx, line in enumerate(tf.gfile.Open(filename, "rb")): + if idx == 0: continue # skip header + line = text_encoder.to_unicode_utf8(line.strip()) + split_line = line.split("\t") + # Works for both splits even though dev has some extra human labels. + s1, s2 = split_line[5:7] + if split_line[0] == "-": + continue + l = label_list.index(split_line[0]) + inputs = [s1, s2] + yield { + "inputs": inputs, + "label": l + } + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + snli_dir = self._maybe_download_corpora(tmp_dir) + if dataset_split == problem.DatasetSplit.TRAIN: + filesplit = "snli_1.0_train.txt" + else: + filesplit = "snli_1.0_dev.txt" + + filename = os.path.join(snli_dir, filesplit) + for example in self.example_generator(filename): + yield example + + +@registry.register_problem +class StanfordNLICharacters(StanfordNLI): + """StanfordNLI classification problems, character level""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def global_task_id(self): + return problem.TaskID.THREE_CL_NLI + + +@registry.register_problem +class StanfordNLISharedVocab(StanfordNLI): + """StanfordNLI classification problems with the LM1b vocabulary""" + + @property + def vocab_filename(self): + return lm1b.LanguagemodelLm1b32k().vocab_filename + + +@registry.register_problem +class StanfordNLIWikiLMSharedVocab(StanfordNLI): + """StanfordNLI classification problems with the Wiki vocabulary""" + + @property + def vocab_filename(self): + return wiki_lm.LanguagemodelEnWiki32k().vocab_filename + + +@registry.register_problem +class StanfordNLIWikiLMSharedVocab64k(StanfordNLIWikiLMSharedVocab): + """StanfordNLI classification problems with the Wiki vocabulary""" + + @property + def vocab_filename(self): + return wiki_lm.LanguagemodelEnWiki64k().vocab_filename diff --git a/tensor2tensor/data_generators/style_transfer.py b/tensor2tensor/data_generators/style_transfer.py new file mode 100644 index 000000000..5067d8dfa --- /dev/null +++ b/tensor2tensor/data_generators/style_transfer.py @@ -0,0 +1,166 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base classes for text-based language style transfer problems. + +* StyleTransferProblem: abstract class for style transfer problems. +* StyleTransferShakespeare: specific problem implementation that enriches + language with Shakespeare-like style. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tarfile + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + + +# Modern-Shakespeare corpus is consisted of: +# - 18,395 parallel sentences for training (train set), +# - 1,218 parallel sentences for evaluation (dev set), +# - 1,462 parallel sentence for testing (test set). + + +_SHAKESPEARE_MODERN_TRAIN_DATASET = [[ + "https://github.com/tlatkowski/st/raw/master/shakespeare.train.tgz", + ("train.original", "train.modern") +]] + +_SHAKESPEARE_MODERN_DEV_DATASET = [[ + "https://github.com/tlatkowski/st/raw/master/shakespeare.dev.tgz", + ("dev.original", "dev.modern") +]] + +_TRAIN_SHARDS = 1 +_DEV_SHARDS = 1 +_SUBWORD_VOCAB_SIZE = 8000 + + +class StyleTransferProblemShakespeare(text_problems.Text2TextProblem): + """Base class for transferring styles problems""" + + @property + def target(self): + raise NotImplementedError() + + @property + def source(self): + raise NotImplementedError() + + def dataset_url(http://github.com/self,%20dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + if train: + return _SHAKESPEARE_MODERN_TRAIN_DATASET + return _SHAKESPEARE_MODERN_DEV_DATASET + + def vocab_data_files(self): + """Files to be passed to get_or_generate_vocab.""" + return self.dataset_url(http://github.com/problem.DatasetSplit.TRAIN) + + @property + def approx_vocab_size(self): + return _SUBWORD_VOCAB_SIZE + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each.""" + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": _TRAIN_SHARDS, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": _DEV_SHARDS, + }] + + @property + def is_generate_per_split(self): + return True + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + dataset = self.dataset_url(http://github.com/dataset_split) + + url = dataset[0][0] + compressed_filename = os.path.basename(url) + compressed_filepath = os.path.join(tmp_dir, compressed_filename) + generator_utils.maybe_download(tmp_dir, compressed_filename, url) + + mode = "r:gz" if compressed_filepath.endswith("gz") else "r" + with tarfile.open(compressed_filepath, mode) as corpus_tar: + corpus_tar.extractall(tmp_dir) + + if self.vocab_type == text_problems.VocabType.SUBWORD: + generator_utils.get_or_generate_vocab( + data_dir, tmp_dir, self.vocab_filename, self.approx_vocab_size, + self.vocab_data_files()) + + source_file, target_file = self.source_target_paths(dataset_split, tmp_dir) + return text_problems.text2text_txt_iterator(source_file, + target_file) + + def source_target_paths(self, dataset_split, tmp_dir): + tag = "train" if dataset_split == problem.DatasetSplit.TRAIN else "dev" + source_path = os.path.join(tmp_dir, tag + self.source) + target_path = os.path.join(tmp_dir, tag + self.target) + return source_path, target_path + + +@registry.register_problem +class StyleTransferShakespeareToModern(StyleTransferProblemShakespeare): + """Transferring style from Shakespeare original English to modern one""" + + @property + def target(self): + return ".modern" + + @property + def source(self): + return ".original" + + +@registry.register_problem +class StyleTransferModernToShakespeare(StyleTransferProblemShakespeare): + """Transferring style from modern English to Shakespeare original English""" + + @property + def target(self): + return ".original" + + @property + def source(self): + return ".modern" + + +@registry.register_problem +class StyleTransferShakespeareToModernCharacters( + StyleTransferShakespeareToModern): + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + +@registry.register_problem +class StyleTransferModernToShakespeareCharacters( + StyleTransferModernToShakespeare): + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER diff --git a/tensor2tensor/data_generators/style_transfer_test.py b/tensor2tensor/data_generators/style_transfer_test.py new file mode 100644 index 000000000..0397fd942 --- /dev/null +++ b/tensor2tensor/data_generators/style_transfer_test.py @@ -0,0 +1,87 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.data_generators.style_transfer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import style_transfer +import tensorflow.compat.v1 as tf + + +class StyleTransferProblemShakespeareTest(tf.test.TestCase): + + def testSourceAndTargetPathsTrainModern2Shakespeare(self): + tmp_dir = "tmp_dir" + modern_to_shakespeare_data_gen = ( + style_transfer.StyleTransferModernToShakespeare()) + actual_source, actual_target = ( + modern_to_shakespeare_data_gen.source_target_paths( + problem.DatasetSplit.TRAIN, tmp_dir)) + + expected_source = "{}/train.modern".format(tmp_dir) + expected_target = "{}/train.original".format(tmp_dir) + + self.assertEqual(actual_source, expected_source) + self.assertEqual(actual_target, expected_target) + + def testSourceAndTargetPathsTrainShakespeare2Modern(self): + tmp_dir = "tmp_dir" + shakespeare_to_modern_data_gen = ( + style_transfer.StyleTransferShakespeareToModern()) + actual_source, actual_target = ( + shakespeare_to_modern_data_gen.source_target_paths( + problem.DatasetSplit.TRAIN, tmp_dir)) + + expected_source = "{}/train.original".format(tmp_dir) + expected_target = "{}/train.modern".format(tmp_dir) + + self.assertEqual(actual_source, expected_source) + self.assertEqual(actual_target, expected_target) + + def testSourceAndTargetPathsDevModern2Shakespeare(self): + tmp_dir = "tmp_dir" + modern_to_shakespeare_data_gen = ( + style_transfer.StyleTransferModernToShakespeare()) + actual_source, actual_target = ( + modern_to_shakespeare_data_gen.source_target_paths( + problem.DatasetSplit.EVAL, tmp_dir)) + + expected_source = "{}/dev.modern".format(tmp_dir) + expected_target = "{}/dev.original".format(tmp_dir) + + self.assertEqual(actual_source, expected_source) + self.assertEqual(actual_target, expected_target) + + def testSourceAndTargetPathsDevShakespeare2Modern(self): + tmp_dir = "tmp_dir" + shakespeare_to_modern_data_gen = ( + style_transfer.StyleTransferShakespeareToModern()) + actual_source, actual_target = ( + shakespeare_to_modern_data_gen.source_target_paths( + problem.DatasetSplit.EVAL, tmp_dir)) + + expected_source = "{}/dev.original".format(tmp_dir) + expected_target = "{}/dev.modern".format(tmp_dir) + + self.assertEqual(actual_source, expected_source) + self.assertEqual(actual_target, expected_target) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/subject_verb_agreement.py b/tensor2tensor/data_generators/subject_verb_agreement.py new file mode 100644 index 000000000..aa9bf012d --- /dev/null +++ b/tensor2tensor/data_generators/subject_verb_agreement.py @@ -0,0 +1,296 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for subject-verb agreement dataset. + +https://arxiv.org/pdf/1611.01368.pdf + +Based on he main paper, predicting verb's number can be done in two setups: +- Language Modeling +- Binary Classification + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import csv +import gzip +import os +import random +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import metrics +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +_FILE_NAME = 'agr_50_mostcommon_10K' +_TAR = _FILE_NAME + '.tsv.gz' +_URL = 'http://tallinzen.net/media/rnn_agreement/' + _TAR +_LABEL_DICT = {'VBZ': 0, 'VBP': 1} + + +def _build_vocab(examples, example_field, vocab_dir, vocab_name): + """Build a vocabulary from examples. + + Args: + examples: a dict containing all the examples. + example_field: field of example from which the vocabulary is built. + vocab_dir: directory where to save the vocabulary. + vocab_name: vocab file name. + + Returns: + text encoder. + """ + vocab_path = os.path.join(vocab_dir, vocab_name) + if not tf.gfile.Exists(vocab_path): + data = [] + for e in examples: + data.extend(e[example_field].split()) + counter = collections.Counter(data) + count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) + words, _ = list(zip(*count_pairs)) + encoder = text_encoder.TokenTextEncoder(None, vocab_list=words) + encoder.store_to_file(vocab_path) + else: + encoder = text_encoder.TokenTextEncoder(vocab_path) + return encoder + + +def load_examples(tmp_dir, prop_train=0.09, prop_val=0.01): + """Loads exampls from the tsv file. + + Args: + tmp_dir: temp directory. + prop_train: proportion of the train data + prop_val: proportion of the validation data + + Returns: + All examples in the dataset pluse train, test, and development splits. + + """ + + infile = generator_utils.maybe_download(tmp_dir, _TAR, _URL) + tf.logging.info('Loading examples') + + all_examples = [] + for i, d in enumerate(csv.DictReader(gzip.open(infile), delimiter='\t')): + if i % 100000 == 0: + tf.logging.info('%d examples have been loaded....' % i) + ex = {x: int(y) if y.isdigit() else y for x, y in d.items()} + all_examples.append(ex) + + random.seed(1) + random.shuffle(all_examples) + n_train = int(len(all_examples) * prop_train) + n_val = n_train + int(len(all_examples) * prop_val) + train = all_examples[:n_train] + val = all_examples[n_train:n_val] + test = [] + for e in all_examples[n_val:]: + if e['n_intervening'] == e['n_diff_intervening']: + test.append(e) + + return all_examples, train, val, test + + +@registry.register_problem +class SvaNumberPrediction(text_problems.Text2ClassProblem): + """Subject verb agreement as verb number predicion (binary classification).""" + + @property + def is_generate_per_split(self): + # generate_data will shard the data into TRAIN and EVAL for us. + return True + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each. + + This is the setup of the main paper. 10% train/ 90% eval + + Returns: + A dict containing splits information. + + """ + return [{ + 'split': problem.DatasetSplit.TRAIN, + 'shards': 1, + }, { + 'split': problem.DatasetSplit.EVAL, + 'shards': 1, + }, { + 'split': problem.DatasetSplit.TEST, + 'shards': 10, + }] + + @property + def train_proportion(self): + # generate_data will shard the data into TRAIN and EVAL for us. + return 0.09 + + @property + def validation_proportion(self): + # generate_data will shard the data into TRAIN and EVAL for us. + return 0.01 + + @property + def vocab_type(self): + return text_problems.VocabType.TOKEN + + @property + def num_classes(self): + return 2 + + def class_labels(self, data_dir): + """Class labels.""" + del data_dir + return ['VBZ', 'VBP'] + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Generate samples of text and label pairs. + + Each yielded dict will be a single example. The inputs should be raw text. + The label should be an int in [0, self.num_classes). + + Args: + data_dir: final data directory. Typically only used in this method to copy + over user-supplied vocab files (for example, if vocab_type == + VocabType.TOKEN). + tmp_dir: temporary directory that you can use for downloading and scratch. + dataset_split: problem.DatasetSplit, which data split to generate samples + for (for example, training and evaluation). + + Returns: + sample generator. + """ + example_filed = 'sentence' + examples_for_vocab, train, val, test = load_examples( + tmp_dir, self.train_proportion, self.validation_proportion) + _build_vocab( + examples_for_vocab, example_filed, data_dir, self.vocab_filename) + if dataset_split == problem.DatasetSplit.TRAIN: + examples = train + + elif dataset_split == problem.DatasetSplit.EVAL: + examples = val + + elif dataset_split == problem.DatasetSplit.TEST: + examples = test + + def _generate_samples(): + for example in examples: + index = int(example['verb_index']) - 1 + inputs = example[example_filed].split()[:index] + yield { + 'inputs': ' '.join(inputs), + 'label': _LABEL_DICT[example['verb_pos']] + } + + return _generate_samples() + + def eval_metrics(self): + """Specify the set of evaluation metrics for this problem. + + Returns: + List of evaluation metrics of interest. + """ + # TODO(dehghani): Implement accuracy of the target word as a t2t metric. + return [metrics.Metrics.ACC] + + +@registry.register_problem +class SvaLanguageModeling(text_problems.Text2SelfProblem): + """Subject verb agreement as language modeling task.""" + + @property + def is_generate_per_split(self): + # generate_data will shard the data into TRAIN and EVAL for us. + return True + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each. + + This is the setup of the main paper. 10% train/ 90% eval + + Returns: + A dict containing splits information. + + """ + return [{ + 'split': problem.DatasetSplit.TRAIN, + 'shards': 1, + }, { + 'split': problem.DatasetSplit.EVAL, + 'shards': 1, + }, { + 'split': problem.DatasetSplit.TEST, + 'shards': 10, + }] + + @property + def train_proportion(self): + # generate_data will shard the data into TRAIN and EVAL for us. + return 0.09 + + @property + def validation_proportion(self): + # generate_data will shard the data into TRAIN and EVAL for us. + return 0.01 + + @property + def vocab_type(self): + return text_problems.VocabType.TOKEN + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Generates samples. + + Args: + data_dir: data directory + tmp_dir: temp directory + dataset_split: dataset split + + Returns: + sample generator. + + """ + + example_filed = 'sentence' + examples_for_vocab, train, val, test = load_examples( + tmp_dir, self.train_proportion, self.validation_proportion) + _build_vocab( + examples_for_vocab, example_filed, data_dir, self.vocab_filename) + if dataset_split == problem.DatasetSplit.TRAIN: + examples = train + + elif dataset_split == problem.DatasetSplit.EVAL: + examples = val + + elif dataset_split == problem.DatasetSplit.TEST: + examples = test + + def _generate_samples(): + for example in examples: + index = int(example['verb_index']) - 1 + targets = example[example_filed].split()[:index + 1] + yield {'targets': ' '.join(targets)} + + return _generate_samples() diff --git a/tensor2tensor/data_generators/test_data/1.csv b/tensor2tensor/data_generators/test_data/1.csv new file mode 100644 index 000000000..fcb33cc3c --- /dev/null +++ b/tensor2tensor/data_generators/test_data/1.csv @@ -0,0 +1,2 @@ +media_name,label +my_media,my_label diff --git a/tensor2tensor/data_generators/test_data/corpus-1.txt b/tensor2tensor/data_generators/test_data/corpus-1.txt new file mode 100644 index 000000000..c05e47f90 --- /dev/null +++ b/tensor2tensor/data_generators/test_data/corpus-1.txt @@ -0,0 +1,4 @@ +One morning I shot an elephant in my pajamas. How he got in my pajamas, I don't +know. + +Groucho Marx diff --git a/tensor2tensor/data_generators/test_data/corpus-2.txt b/tensor2tensor/data_generators/test_data/corpus-2.txt new file mode 100644 index 000000000..f45577c4b --- /dev/null +++ b/tensor2tensor/data_generators/test_data/corpus-2.txt @@ -0,0 +1,3 @@ +I haven't slept for 10 days... because that would be too long. + +Mitch Hedberg diff --git a/tensor2tensor/data_generators/test_data/vocab-1.txt b/tensor2tensor/data_generators/test_data/vocab-1.txt new file mode 100644 index 000000000..d34d3d957 --- /dev/null +++ b/tensor2tensor/data_generators/test_data/vocab-1.txt @@ -0,0 +1,2 @@ +lollipop,8 +reverberated,12 diff --git a/tensor2tensor/data_generators/test_data/vocab-2.txt b/tensor2tensor/data_generators/test_data/vocab-2.txt new file mode 100644 index 000000000..1ad6d20b9 --- /dev/null +++ b/tensor2tensor/data_generators/test_data/vocab-2.txt @@ -0,0 +1,4 @@ +kattywampus,11 +kaput +balderdash,10 +jiggery-pokery,14 diff --git a/tensor2tensor/data_generators/text_encoder.py b/tensor2tensor/data_generators/text_encoder.py index 78bc05661..636f5f1f2 100644 --- a/tensor2tensor/data_generators/text_encoder.py +++ b/tensor2tensor/data_generators/text_encoder.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -23,26 +24,96 @@ from __future__ import division from __future__ import print_function -# Dependency imports - +import collections +from itertools import chain +import math +import re +import tempfile +import time +import numpy as np import six -from six.moves import xrange # pylint: disable=redefined-builtin +from six.moves import range # pylint: disable=redefined-builtin from tensor2tensor.data_generators import tokenizer -import tensorflow as tf +import tensorflow.compat.v1 as tf # Reserved tokens for things like padding and EOS symbols. -PAD = '' -EOS = '' +PAD = "" +EOS = "" RESERVED_TOKENS = [PAD, EOS] +NUM_RESERVED_TOKENS = len(RESERVED_TOKENS) +PAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0 +EOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1 + +if six.PY2: + RESERVED_TOKENS_BYTES = RESERVED_TOKENS +else: + RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")] + +# Regular expression for unescaping token strings. +# '\u' is converted to '_' +# '\\' is converted to '\' +# '\213;' is converted to unichr(213) +_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);") +_ESCAPE_CHARS = set(u"\\_u;0123456789") + + +# Unicode utility functions that work with Python 2 and 3 +def native_to_unicode(s): + if is_unicode(s): + return s + try: + return to_unicode(s) + except UnicodeDecodeError: + res = to_unicode(s, ignore_errors=True) + tf.logging.info("Ignoring Unicode error, outputting: %s" % res) + return res + + +def unicode_to_native(s): + if six.PY2: + return s.encode("utf-8") if is_unicode(s) else s + else: + return s + + +def is_unicode(s): + return isinstance(s, six.text_type) + + +def to_unicode(s, ignore_errors=False): + if is_unicode(s): + return s + error_mode = "ignore" if ignore_errors else "strict" + return s.decode("utf-8", errors=error_mode) + + +def to_unicode_ignore_errors(s): + return to_unicode(s, ignore_errors=True) + + +def to_unicode_utf8(s): + return unicode(s, "utf-8") if six.PY2 else s.decode("utf-8") + + +def strip_ids(ids, ids_to_strip): + """Strip ids_to_strip from the end ids.""" + ids = list(ids) + while ids and ids[-1] in ids_to_strip: + ids.pop() + return ids class TextEncoder(object): """Base class for converting from ints to/from human readable strings.""" - def __init__(self, num_reserved_ids=2): + def __init__(self, num_reserved_ids=NUM_RESERVED_TOKENS): self._num_reserved_ids = num_reserved_ids + @property + def num_reserved_ids(self): + return self._num_reserved_ids + def encode(self, s): """Transform a human-readable string into a sequence of int ids. @@ -59,24 +130,43 @@ def encode(self, s): """ return [int(w) + self._num_reserved_ids for w in s.split()] - def decode(self, ids): + def decode(self, ids, strip_extraneous=False): """Transform a sequence of int ids into a human-readable string. EOS is not expected in ids. Args: ids: list of integers to be converted. + strip_extraneous: bool, whether to strip off extraneous tokens + (EOS and PAD). Returns: s: human-readable string. """ + if strip_extraneous: + ids = strip_ids(ids, list(range(self._num_reserved_ids or 0))) + return " ".join(self.decode_list(ids)) + + def decode_list(self, ids): + """Transform a sequence of int ids into a their string versions. + + This method supports transforming individual input/output ids to their + string versions so that sequence to/from text conversions can be visualized + in a human readable format. + + Args: + ids: list of integers to be converted. + + Returns: + strs: list of human-readable string. + """ decoded_ids = [] for id_ in ids: if 0 <= id_ < self._num_reserved_ids: decoded_ids.append(RESERVED_TOKENS[int(id_)]) else: - decoded_ids.append(id_) - return '%s' % decoded_ids + decoded_ids.append(id_ - self._num_reserved_ids) + return [str(d) for d in decoded_ids] @property def vocab_size(self): @@ -87,117 +177,373 @@ class ByteTextEncoder(TextEncoder): """Encodes each byte to an id. For 8-bit strings only.""" def encode(self, s): - return [ord(c) + self._num_reserved_ids for c in s] - - def decode(self, ids): + numres = self._num_reserved_ids + if six.PY2: + if isinstance(s, unicode): + s = s.encode("utf-8") + return [ord(c) + numres for c in s] + # Python3: explicitly convert to UTF-8 + return [c + numres for c in s.encode("utf-8")] + + def decode(self, ids, strip_extraneous=False): + if strip_extraneous: + ids = strip_ids(ids, list(range(self._num_reserved_ids or 0))) + numres = self._num_reserved_ids decoded_ids = [] + int2byte = six.int2byte for id_ in ids: - if 0 <= id_ < self._num_reserved_ids: - decoded_ids.append(RESERVED_TOKENS[int(id_)]) + if 0 <= id_ < numres: + decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)]) else: - decoded_ids.append(chr(id_)) - - return ''.join(decoded_ids) + decoded_ids.append(int2byte(id_ - numres)) + if six.PY2: + return "".join(decoded_ids) + # Python3: join byte arrays and then decode string + return b"".join(decoded_ids).decode("utf-8", "replace") + + def decode_list(self, ids): + numres = self._num_reserved_ids + decoded_ids = [] + int2byte = six.int2byte + for id_ in ids: + if 0 <= id_ < numres: + decoded_ids.append(RESERVED_TOKENS_BYTES[int(id_)]) + else: + decoded_ids.append(int2byte(id_ - numres)) + # Python3: join byte arrays and then decode string + return decoded_ids @property def vocab_size(self): return 2**8 + self._num_reserved_ids +class ClassLabelEncoder(TextEncoder): + """Encoder for class labels.""" + + def __init__(self, class_labels=None, class_labels_fname=None): + super(ClassLabelEncoder, self).__init__(num_reserved_ids=0) + + if class_labels_fname: + with tf.gfile.Open(class_labels_fname) as f: + class_labels = [label.strip() for label in f.readlines()] + + assert class_labels + self._class_labels = class_labels + + def encode(self, s): + label_str = s + return self._class_labels.index(label_str) + + def decode(self, ids, strip_extraneous=False): + del strip_extraneous + label_id = ids + if isinstance(label_id, list): + assert len(label_id) == 1 + label_id, = label_id + if isinstance(label_id, np.ndarray): + label_id = np.squeeze(label_id) + return self._class_labels[label_id] + + def decode_list(self, ids): + return [self._class_labels[i] for i in ids] + + @property + def vocab_size(self): + return len(self._class_labels) + + +class OneHotClassLabelEncoder(ClassLabelEncoder): + """One-hot encoder for class labels.""" + + def encode(self, label_str, on_value=1, off_value=0): # pylint: disable=arguments-differ + e = np.full(self.vocab_size, off_value, dtype=np.int32) + e[self._class_labels.index(label_str)] = on_value + return e.tolist() + + def decode(self, ids, strip_extraneous=False): + del strip_extraneous + label_id = ids + if isinstance(label_id, np.ndarray): + label_id = np.squeeze(label_id).astype(np.int8).tolist() + assert isinstance(label_id, list) + assert len(label_id) == self.vocab_size + return self._class_labels[label_id.index(1)] + + @property + def vocab_size(self): + return len(self._class_labels) + + class TokenTextEncoder(TextEncoder): - """Encoder based on a user-supplied vocabulary.""" + """Encoder based on a user-supplied vocabulary (file or list).""" - def __init__(self, vocab_filename, reverse=False, num_reserved_ids=2): - """Initialize from a file, one token per line.""" - super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids) + def __init__(self, + vocab_filename, + reverse=False, + vocab_list=None, + replace_oov=None, + num_reserved_ids=NUM_RESERVED_TOKENS): + """Initialize from a file or list, one token per line. + + Handling of reserved tokens works as follows: + - When initializing from a list, we add reserved tokens to the vocab. + - When initializing from a file, we do not add reserved tokens to the vocab. + - When saving vocab files, we save reserved tokens to the file. + Args: + vocab_filename: If not None, the full filename to read vocab from. If this + is not None, then vocab_list should be None. + reverse: Boolean indicating if tokens should be reversed during encoding + and decoding. + vocab_list: If not None, a list of elements of the vocabulary. If this is + not None, then vocab_filename should be None. + replace_oov: If not None, every out-of-vocabulary token seen when + encoding will be replaced by this string (which must be in vocab). + num_reserved_ids: Number of IDs to save for reserved tokens like . + """ + super(TokenTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids) self._reverse = reverse - if vocab_filename is not None: - self._load_vocab_from_file(vocab_filename) + self._replace_oov = replace_oov + if vocab_filename: + self._init_vocab_from_file(vocab_filename) + else: + assert vocab_list is not None + self._init_vocab_from_list(vocab_list) - def encode(self, sentence): + def encode(self, s): """Converts a space-separated string of tokens to a list of ids.""" - ret = [self._token_to_id[tok] for tok in sentence.strip().split()] - if self._reverse: - ret = ret[::-1] - return ret + sentence = s + tokens = sentence.strip().split() + if self._replace_oov is not None: + tokens = [t if t in self._token_to_id else self._replace_oov + for t in tokens] + ret = [self._token_to_id[tok] for tok in tokens] + return ret[::-1] if self._reverse else ret + + def decode(self, ids, strip_extraneous=False): + return " ".join(self.decode_list(ids)) - def decode(self, ids): - if self._reverse: - ids = ids[::-1] - return ' '.join([self._safe_id_to_token(i) for i in ids]) + def decode_list(self, ids): + seq = reversed(ids) if self._reverse else ids + return [self._safe_id_to_token(i) for i in seq] @property def vocab_size(self): return len(self._id_to_token) def _safe_id_to_token(self, idx): - return self._id_to_token.get(idx, 'ID_%d' % idx) + return self._id_to_token.get(idx, "ID_%d" % idx) + + def _init_vocab_from_file(self, filename): + """Load vocab from a file. + + Args: + filename: The file to load vocabulary from. + """ + with tf.gfile.Open(filename) as f: + tokens = [token.strip() for token in f.readlines()] + + def token_gen(): + for token in tokens: + yield token + + self._init_vocab(token_gen(), add_reserved_tokens=False) + + def _init_vocab_from_list(self, vocab_list): + """Initialize tokens from a list of tokens. + + It is ok if reserved tokens appear in the vocab list. They will be + removed. The set of tokens in vocab_list should be unique. + + Args: + vocab_list: A list of tokens. + """ + def token_gen(): + for token in vocab_list: + if token not in RESERVED_TOKENS: + yield token + + self._init_vocab(token_gen()) + + def _init_vocab(self, token_generator, add_reserved_tokens=True): + """Initialize vocabulary with tokens from token_generator.""" - def _load_vocab_from_file(self, filename): - """Load vocab from a file.""" - self._token_to_id = {} self._id_to_token = {} + non_reserved_start_index = 0 - for idx, tok in enumerate(RESERVED_TOKENS): - self._token_to_id[tok] = idx - self._id_to_token[idx] = tok + if add_reserved_tokens: + self._id_to_token.update(enumerate(RESERVED_TOKENS)) + non_reserved_start_index = len(RESERVED_TOKENS) - token_start_idx = self._num_reserved_ids - with tf.gfile.Open(filename) as f: - for i, line in enumerate(f): - idx = token_start_idx + i - tok = line.strip() - self._token_to_id[tok] = idx - self._id_to_token[idx] = tok + self._id_to_token.update( + enumerate(token_generator, start=non_reserved_start_index)) + + # _token_to_id is the reverse of _id_to_token + self._token_to_id = dict((v, k) + for k, v in six.iteritems(self._id_to_token)) + + def store_to_file(self, filename): + """Write vocab file to disk. + + Vocab files have one token per line. The file ends in a newline. Reserved + tokens are written to the vocab file as well. + + Args: + filename: Full path of the file to store the vocab to. + """ + with tf.gfile.Open(filename, "w") as f: + for i in range(len(self._id_to_token)): + f.write(self._id_to_token[i] + "\n") + + +def _escape_token(token, alphabet): + """Escape away underscores and OOV characters and append '_'. + + This allows the token to be expressed as the concatenation of a list + of subtokens from the vocabulary. The underscore acts as a sentinel + which allows us to invertibly concatenate multiple such lists. + + Args: + token: A unicode string to be escaped. + alphabet: A set of all characters in the vocabulary's alphabet. + + Returns: + escaped_token: An escaped unicode string. + + Raises: + ValueError: If the provided token is not unicode. + """ + if not isinstance(token, six.text_type): + raise ValueError("Expected string type for token, got %s" % type(token)) + + token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u") + ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token] + return u"".join(ret) + "_" + + +def _unescape_token(escaped_token): + """Inverse of _escape_token(). + + Args: + escaped_token: a unicode string + + Returns: + token: a unicode string + """ + + def match(m): + if m.group(1) is None: + return u"_" if m.group(0) == u"\\u" else u"\\" + + try: + return six.unichr(int(m.group(1))) + except (ValueError, OverflowError) as _: + return u"\u3013" # Unicode for undefined character. + + trimmed = escaped_token[:-1] if escaped_token.endswith("_") else escaped_token + return _UNESCAPE_REGEX.sub(match, trimmed) class SubwordTextEncoder(TextEncoder): - """Class for breaking tokens into subtokens. + """Class for invertibly encoding text using a limited vocabulary. - Invertibly encodes a string as a sequence of subtokens from a limited + Invertibly encodes a native string as a sequence of subtokens from a limited vocabulary. A SubwordTextEncoder is built from a corpus (so it is tailored to the text in the corpus), and stored to a file. See text_encoder_build_subword.py. It can then be loaded and used to encode/decode any text. + + Encoding has four phases: + + 1. Tokenize into a list of tokens. Each token is a unicode string of either + all alphanumeric characters or all non-alphanumeric characters. We drop + tokens consisting of a single space that are between two alphanumeric + tokens. + + 2. Escape each token. This escapes away special and out-of-vocabulary + characters, and makes sure that each token ends with an underscore, and + has no other underscores. + + 3. Represent each escaped token as a the concatenation of a list of subtokens + from the limited vocabulary. Subtoken selection is done greedily from + beginning to end. That is, we construct the list in order, always picking + the longest subtoken in our vocabulary that matches a prefix of the + remaining portion of the encoded token. + + 4. Concatenate these lists. This concatenation is invertible due to the + fact that the trailing underscores indicate when one list is finished. + """ - def __init__(self, filename=None, num_reserved_ids=2): - """Read from a file.""" - self._tokenizer = tokenizer.Tokenizer() + def __init__(self, filename=None): + """Initialize and read from a file, if provided. + + Args: + filename: filename from which to read vocab. If None, do not load a + vocab + """ + self._alphabet = set() + self.filename = filename if filename is not None: self._load_from_file(filename) + super(SubwordTextEncoder, self).__init__() - super(SubwordTextEncoder, self).__init__(num_reserved_ids=num_reserved_ids) - - def encode(self, raw_text): - """Converts a string to a list of subtoken ids. + def encode(self, s): + """Converts a native string to a list of subtoken ids. Args: - raw_text: a string. + s: a native string. Returns: a list of integers in the range [0, vocab_size) """ - return self._tokens_to_subtokens(self._tokenizer.encode(raw_text)) + return self._tokens_to_subtoken_ids( + tokenizer.encode(native_to_unicode(s))) - def decode(self, subtokens): - """Converts a sequence of subtoken ids to a string. + def encode_without_tokenizing(self, token_text): + """Converts string to list of subtoken ids without calling tokenizer. + + This treats `token_text` as a single token and directly converts it + to subtoken ids. This may be useful when the default tokenizer doesn't + do what we want (e.g., when encoding text with tokens composed of lots of + nonalphanumeric characters). It is then up to the caller to make sure that + raw text is consistently converted into tokens. Only use this if you are + sure that `encode` doesn't suit your needs. Args: - subtokens: a list of integers in the range [0, vocab_size) + token_text: A native string representation of a single token. Returns: - a string + A list of subword token ids; i.e., integers in the range [0, vocab_size). """ - return self._tokenizer.decode(self._subtokens_to_tokens(subtokens)) + return self._tokens_to_subtoken_ids([native_to_unicode(token_text)]) + + def decode(self, ids, strip_extraneous=False): + """Converts a sequence of subtoken ids to a native string. + + Args: + ids: a list of integers in the range [0, vocab_size) + strip_extraneous: bool, whether to strip off extraneous tokens + (EOS and PAD). + + Returns: + a native string + """ + if strip_extraneous: + ids = strip_ids(ids, list(range(self._num_reserved_ids or 0))) + return unicode_to_native( + tokenizer.decode(self._subtoken_ids_to_tokens(ids))) + + def decode_list(self, ids): + return [self._subtoken_id_to_subtoken_string(s) for s in ids] @property def vocab_size(self): """The subtoken vocabulary size.""" return len(self._all_subtoken_strings) - def _tokens_to_subtokens(self, tokens): + def _tokens_to_subtoken_ids(self, tokens): """Converts a list of tokens to a list of subtoken ids. Args: @@ -207,10 +553,27 @@ def _tokens_to_subtokens(self, tokens): """ ret = [] for token in tokens: - ret.extend(self._escaped_token_to_subtokens(self._escape_token(token))) + ret.extend(self._token_to_subtoken_ids(token)) + return ret + + def _token_to_subtoken_ids(self, token): + """Converts token to a list of subtoken ids. + + Args: + token: a string. + Returns: + a list of integers in the range [0, vocab_size) + """ + cache_location = hash(token) % self._cache_size + cache_key, cache_value = self._cache[cache_location] + if cache_key == token: + return cache_value + ret = self._escaped_token_to_subtoken_ids( + _escape_token(token, self._alphabet)) + self._cache[cache_location] = (token, ret) return ret - def _subtokens_to_tokens(self, subtokens): + def _subtoken_ids_to_tokens(self, subtokens): """Converts a list of subtoken ids to a list of tokens. Args: @@ -218,237 +581,486 @@ def _subtokens_to_tokens(self, subtokens): Returns: a list of strings. """ - concatenated = ''.join( - [self.subtoken_to_subtoken_string(s) for s in subtokens]) - split = concatenated.split('_') - return [self._unescape_token(t + '_') for t in split if t] - - def subtoken_to_subtoken_string(self, subtoken): - """Subtoken_String (string) corresponding to the given subtoken (id).""" - if (subtoken >= 0 and subtoken < self.vocab_size and - self._all_subtoken_strings[subtoken]): + concatenated = "".join( + [self._subtoken_id_to_subtoken_string(s) for s in subtokens]) + split = concatenated.split("_") + ret = [] + for t in split: + if t: + unescaped = _unescape_token(t + "_") + if unescaped: + ret.append(unescaped) + return ret + + def _subtoken_id_to_subtoken_string(self, subtoken): + """Converts a subtoken integer ID to a subtoken string.""" + if 0 <= subtoken < self.vocab_size: return self._all_subtoken_strings[subtoken] - else: - if 0 <= subtoken < self._num_reserved_ids: - return '%s_' % RESERVED_TOKENS[subtoken] - else: - return 'ID%d_' % subtoken + return u"" - def _escaped_token_to_subtokens(self, escaped_token): - """Converts an escaped token string to a list of subtokens. + def _escaped_token_to_subtoken_strings(self, escaped_token): + """Converts an escaped token string to a list of subtoken strings. Args: - escaped_token: an escaped token + escaped_token: An escaped token as a unicode string. Returns: - a list of one or more integers. + A list of subtokens as unicode strings. """ + # NOTE: This algorithm is greedy; it won't necessarily produce the "best" + # list of subtokens. ret = [] - pos = 0 - while pos < len(escaped_token): - end = len(escaped_token) - while True: - subtoken = self._subtoken_string_to_id.get(escaped_token[pos:end], -1) - if subtoken != -1: + start = 0 + token_len = len(escaped_token) + while start < token_len: + for end in range( + min(token_len, start + self._max_subtoken_len), start, -1): + subtoken = escaped_token[start:end] + if subtoken in self._subtoken_string_to_id: + ret.append(subtoken) + start = end break - end -= 1 - ret.append(subtoken) - pos = end + + else: # Did not break + # If there is no possible encoding of the escaped token then one of the + # characters in the token is not in the alphabet. This should be + # impossible and would be indicative of a bug. + raise ValueError( + "Token substring '%s' not found in subtoken vocabulary." % + escaped_token) + return ret + def _escaped_token_to_subtoken_ids(self, escaped_token): + """Converts an escaped token string to a list of subtoken IDs. + + Args: + escaped_token: An escaped token as a unicode string. + Returns: + A list of subtoken IDs as integers. + """ + return [ + self._subtoken_string_to_id[subtoken] + for subtoken in self._escaped_token_to_subtoken_strings(escaped_token) + ] + + @classmethod + def build_from_generator(cls, + generator, + target_size, + max_subtoken_length=None, + reserved_tokens=None): + """Builds a SubwordTextEncoder from the generated text. + + Args: + generator: yields text. + target_size: int, approximate vocabulary size to create. + max_subtoken_length: Maximum length of a subtoken. If this is not set, + then the runtime and memory use of creating the vocab is quadratic in + the length of the longest token. If this is set, then it is instead + O(max_subtoken_length * length of longest token). + reserved_tokens: List of reserved tokens. The global variable + `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this + argument is `None`, it will use `RESERVED_TOKENS`. + + Returns: + SubwordTextEncoder with `vocab_size` approximately `target_size`. + """ + token_counts = collections.defaultdict(int) + for item in generator: + for tok in tokenizer.encode(native_to_unicode(item)): + token_counts[tok] += 1 + encoder = cls.build_to_target_size( + target_size, token_counts, 1, 1e3, + max_subtoken_length=max_subtoken_length, + reserved_tokens=reserved_tokens) + return encoder + @classmethod def build_to_target_size(cls, target_size, token_counts, - store_filename, min_val, max_val, + max_subtoken_length=None, + reserved_tokens=None, num_iterations=4): """Builds a SubwordTextEncoder that has `vocab_size` near `target_size`. - Uses simple recursive binary search to find a `min_count` value that most + Uses simple recursive binary search to find a minimum token count that most closely matches the `target_size`. Args: - target_size: desired vocab_size to approximate. - token_counts: a dictionary of string to int. - store_filename: a string - where to write the vocabulary. - min_val: an integer - lower bound for `min_count`. - max_val: an integer - upper bound for `min_count`. - num_iterations: an integer. how many iterations of refinement. + target_size: Desired vocab_size to approximate. + token_counts: A dictionary of token counts, mapping string to int. + min_val: An integer; lower bound for the minimum token count. + max_val: An integer; upper bound for the minimum token count. + max_subtoken_length: Maximum length of a subtoken. If this is not set, + then the runtime and memory use of creating the vocab is quadratic in + the length of the longest token. If this is set, then it is instead + O(max_subtoken_length * length of longest token). + reserved_tokens: List of reserved tokens. The global variable + `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this + argument is `None`, it will use `RESERVED_TOKENS`. + num_iterations: An integer; how many iterations of refinement. Returns: - a SubwordTextEncoder instance. + A SubwordTextEncoder instance. + + Raises: + ValueError: If `min_val` is greater than `max_val`. """ - present_count = (max_val + min_val) // 2 - tf.logging.info('Trying min_count %d' % present_count) - subtokenizer = cls() - subtokenizer.build_from_token_counts(token_counts, store_filename, - present_count, num_iterations) + if min_val > max_val: + raise ValueError("Lower bound for the minimum token count " + "is greater than the upper bound.") + if target_size < 1: + raise ValueError("Target size must be positive.") + + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + + def bisect(min_val, max_val): + """Bisection to find the right size.""" + present_count = (max_val + min_val) // 2 + tf.logging.info("Trying min_count %d" % present_count) + subtokenizer = cls() + subtokenizer.build_from_token_counts( + token_counts, present_count, num_iterations, + max_subtoken_length=max_subtoken_length, + reserved_tokens=reserved_tokens) + + # Being within 1% of the target size is ok. + is_ok = abs(subtokenizer.vocab_size - target_size) * 100 < target_size + # If min_val == max_val, we can't do any better than this. + if is_ok or min_val >= max_val or present_count < 2: + return subtokenizer - if min_val >= max_val or subtokenizer.vocab_size == target_size: - return subtokenizer - elif subtokenizer.vocab_size > target_size: - other_subtokenizer = cls.build_to_target_size( - target_size, token_counts, store_filename, present_count + 1, max_val, - num_iterations) - if (abs(other_subtokenizer.vocab_size - target_size) < - abs(subtokenizer.vocab_size - target_size)): - return other_subtokenizer + if subtokenizer.vocab_size > target_size: + other_subtokenizer = bisect(present_count + 1, max_val) else: + other_subtokenizer = bisect(min_val, present_count - 1) + + if other_subtokenizer is None: return subtokenizer - else: - other_subtokenizer = cls.build_to_target_size( - target_size, token_counts, store_filename, min_val, present_count - 1, - num_iterations) + if (abs(other_subtokenizer.vocab_size - target_size) < abs(subtokenizer.vocab_size - target_size)): return other_subtokenizer - else: - return subtokenizer + return subtokenizer + + return bisect(min_val, max_val) def build_from_token_counts(self, token_counts, - store_filename, min_count, - num_iterations=4): + num_iterations=4, + reserved_tokens=None, + max_subtoken_length=None): """Train a SubwordTextEncoder based on a dictionary of word counts. Args: - token_counts: a dictionary of string to int. - store_filename: a string - where to write the vocabulary. + token_counts: a dictionary of Unicode strings to int. min_count: an integer - discard subtokens with lower counts. num_iterations: an integer. how many iterations of refinement. + reserved_tokens: List of reserved tokens. The global variable + `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this + argument is `None`, it will use `RESERVED_TOKENS`. + max_subtoken_length: Maximum length of a subtoken. If this is not set, + then the runtime and memory use of creating the vocab is quadratic in + the length of the longest token. If this is set, then it is instead + O(max_subtoken_length * length of longest token). + + Raises: + ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it + is not clear what the space is being reserved for, or when it will be + filled in. """ + if reserved_tokens is None: + reserved_tokens = RESERVED_TOKENS + else: + # There is not complete freedom in replacing RESERVED_TOKENS. + for default, proposed in zip(RESERVED_TOKENS, reserved_tokens): + if default != proposed: + raise ValueError("RESERVED_TOKENS must be a prefix of " + "reserved_tokens.") + + # Initialize the alphabet. Note, this must include reserved tokens or it can + # result in encoding failures. + alphabet_tokens = chain(six.iterkeys(token_counts), + [native_to_unicode(t) for t in reserved_tokens]) + + self._init_alphabet_from_tokens(alphabet_tokens) + + # Bootstrap the initial list of subtokens with the characters from the + # alphabet plus the escaping characters. + self._init_subtokens_from_list(list(self._alphabet), + reserved_tokens=reserved_tokens) + # We build iteratively. On each iteration, we segment all the words, # then count the resulting potential subtokens, keeping the ones # with high enough counts for our new vocabulary. - for i in xrange(num_iterations): - counts = {} + if min_count < 1: + min_count = 1 + for i in range(num_iterations): + tf.logging.info("Iteration {0}".format(i)) + + # Collect all substrings of the encoded token that break along current + # subtoken boundaries. + subtoken_counts = collections.defaultdict(int) for token, count in six.iteritems(token_counts): - escaped_token = self._escape_token(token) - # we will count all tails of the escaped_token, starting from boundaries - # determined by our current segmentation. - if i == 0: - starts = list(range(len(escaped_token))) - else: - subtokens = self._escaped_token_to_subtokens(escaped_token) - pos = 0 - starts = [] - for subtoken in subtokens: - starts.append(pos) - pos += len(self.subtoken_to_subtoken_string(subtoken)) - for start in starts: - for end in xrange(start + 1, len(escaped_token) + 1): - subtoken_string = escaped_token[start:end] - counts[subtoken_string] = counts.get(subtoken_string, 0) + count - # array of lists of candidate subtoken strings, by length + iter_start_time = time.time() + escaped_token = _escape_token(token, self._alphabet) + subtokens = self._escaped_token_to_subtoken_strings(escaped_token) + start = 0 + for subtoken in subtokens: + last_position = len(escaped_token) + 1 + if max_subtoken_length is not None: + last_position = min(last_position, start + max_subtoken_length) + + for end in range(start + 1, last_position): + new_subtoken = escaped_token[start:end] + subtoken_counts[new_subtoken] += count + start += len(subtoken) + iter_time_secs = time.time() - iter_start_time + if iter_time_secs > 0.1: + tf.logging.info(u"Processing token [{0}] took {1} seconds, consider " + "setting Text2TextProblem.max_subtoken_length to a " + "smaller value.".format(token, iter_time_secs)) + + # Array of sets of candidate subtoken strings, by length. len_to_subtoken_strings = [] - for subtoken_string, count in six.iteritems(counts): - if count < min_count or len(subtoken_string) <= 1: - continue - while len(len_to_subtoken_strings) <= len(subtoken_string): - len_to_subtoken_strings.append([]) - len_to_subtoken_strings[len(subtoken_string)].append(subtoken_string) - new_subtoken_strings = [] - # consider the candidates longest to shortest, so that if we accept + for subtoken_string, count in six.iteritems(subtoken_counts): + lsub = len(subtoken_string) + if count >= min_count: + while len(len_to_subtoken_strings) <= lsub: + len_to_subtoken_strings.append(set()) + len_to_subtoken_strings[lsub].add(subtoken_string) + + # Consider the candidates longest to shortest, so that if we accept # a longer subtoken string, we can decrement the counts of its prefixes. - for subtoken_strings in len_to_subtoken_strings[::-1]: + new_subtoken_strings = [] + for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1): + subtoken_strings = len_to_subtoken_strings[lsub] for subtoken_string in subtoken_strings: - count = counts[subtoken_string] - if count < min_count: - continue - new_subtoken_strings.append((-count, subtoken_string)) - for l in xrange(1, len(subtoken_string)): - counts[subtoken_string[:l]] -= count - # make sure we have all single characters. - new_subtoken_strings.extend([(-counts.get(chr(i), 0), chr(i)) - for i in xrange(2**8)]) - new_subtoken_strings.sort() - self._init_from_list([''] * self._num_reserved_ids + - [p[1] for p in new_subtoken_strings]) - print('vocab_size = %d' % self.vocab_size) - - original = 'This sentence was encoded by the SubwordTextEncoder.' - encoded = self.encode(original) - print(encoded) - print([self.subtoken_to_subtoken_string(s) for s in encoded]) - decoded = self.decode(encoded) - print(decoded) - assert decoded == original - self._store_to_file(store_filename) - - def _init_from_list(self, subtoken_strings): - """Initialize from a list of subtoken strings.""" - self._all_subtoken_strings = subtoken_strings - self._subtoken_string_to_id = {} - for i in xrange(len(subtoken_strings)): - subtoken_string = subtoken_strings[i] - if subtoken_string: - self._subtoken_string_to_id[subtoken_string] = i + count = subtoken_counts[subtoken_string] + if count >= min_count: + # Exclude alphabet tokens here, as they must be included later, + # explicitly, regardless of count. + if subtoken_string not in self._alphabet: + new_subtoken_strings.append((count, subtoken_string)) + for l in range(1, lsub): + subtoken_counts[subtoken_string[:l]] -= count + + # Include the alphabet explicitly to guarantee all strings are encodable. + new_subtoken_strings.extend((subtoken_counts.get(a, 0), a) + for a in self._alphabet) + new_subtoken_strings.sort(reverse=True) + + # Reinitialize to the candidate vocabulary. + new_subtoken_strings = [subtoken for _, subtoken in new_subtoken_strings] + if reserved_tokens: + escaped_reserved_tokens = [ + _escape_token(native_to_unicode(t), self._alphabet) + for t in reserved_tokens + ] + new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings + + self._init_subtokens_from_list(new_subtoken_strings) + tf.logging.info("vocab_size = %d" % self.vocab_size) - def _load_from_file(self, filename): - """Load from a file.""" + @property + def all_subtoken_strings(self): + return tuple(self._all_subtoken_strings) + + def dump(self): + """Debugging dump of the current subtoken vocabulary.""" + subtoken_strings = [(i, s) + for s, i in six.iteritems(self._subtoken_string_to_id)] + print(u", ".join(u"{0} : '{1}'".format(i, s) + for i, s in sorted(subtoken_strings))) + + def _init_subtokens_from_list(self, subtoken_strings, reserved_tokens=None): + """Initialize token information from a list of subtoken strings. + + Args: + subtoken_strings: a list of subtokens + reserved_tokens: List of reserved tokens. We must have `reserved_tokens` + as None or the empty list, or else the global variable `RESERVED_TOKENS` + must be a prefix of `reserved_tokens`. + + Raises: + ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it + is not clear what the space is being reserved for, or when it will be + filled in. + """ + if reserved_tokens is None: + reserved_tokens = [] + + if reserved_tokens: + self._all_subtoken_strings = reserved_tokens + subtoken_strings + else: + self._all_subtoken_strings = subtoken_strings + + # we remember the maximum length of any subtoken to avoid having to + # check arbitrarily long strings. + self._max_subtoken_len = max([len(s) for s in subtoken_strings]) + self._subtoken_string_to_id = { + s: i + len(reserved_tokens) + for i, s in enumerate(subtoken_strings) if s + } + # Initialize the cache to empty. + self._cache_size = 2 ** 20 + self._cache = [(None, None)] * self._cache_size + + def _init_alphabet_from_tokens(self, tokens): + """Initialize alphabet from an iterable of token or subtoken strings.""" + # Include all characters from all tokens in the alphabet to guarantee that + # any token can be encoded. Additionally, include all escaping characters. + self._alphabet = {c for token in tokens for c in token} + self._alphabet |= _ESCAPE_CHARS + + def _load_from_file_object(self, f): + """Load from a file object. + + Args: + f: File object to load vocabulary from + """ subtoken_strings = [] + for line in f: + s = line.rstrip() + # Some vocab files wrap words in single quotes, but others don't + if ((s.startswith("'") and s.endswith("'")) or + (s.startswith("\"") and s.endswith("\""))): + s = s[1:-1] + subtoken_strings.append(native_to_unicode(s)) + self._init_subtokens_from_list(subtoken_strings) + self._init_alphabet_from_tokens(subtoken_strings) + + def _load_from_file(self, filename): + """Load from a vocab file.""" + if not tf.gfile.Exists(filename): + raise ValueError("File %s not found" % filename) with tf.gfile.Open(filename) as f: - for line in f: - subtoken_strings.append(line.strip()[1:-1].decode('string-escape')) - self._init_from_list(subtoken_strings) + self._load_from_file_object(f) - def _store_to_file(self, filename): - with tf.gfile.Open(filename, 'w') as f: + def store_to_file(self, filename, add_single_quotes=True): + with tf.gfile.Open(filename, "w") as f: for subtoken_string in self._all_subtoken_strings: - f.write('\'' + subtoken_string.encode('string-escape') + '\'\n') + if add_single_quotes: + f.write("'" + unicode_to_native(subtoken_string) + "'\n") + else: + f.write(unicode_to_native(subtoken_string) + "\n") + + +class ImageEncoder(object): + """Encoder class for saving and loading images.""" + + def __init__(self, num_reserved_ids=0, height=None, width=None, channels=3): + assert num_reserved_ids == 0 + self._height = height + self._width = width + self._channels = channels - def _escape_token(self, token): - r"""Translate '\'->'\\' and '_'->'\u', then append '_'. + @property + def num_reserved_ids(self): + return 0 + + def encode(self, s): + """Transform a string with a filename into a list of RGB integers. Args: - token: a string + s: path to the file with an image. + Returns: - escaped_token: a string + ids: list of integers """ - return token.replace('\\', '\\\\').replace('_', '\\u') + '_' + try: + import matplotlib.image as im # pylint: disable=g-import-not-at-top + except ImportError as e: + tf.logging.warning( + "Reading an image requires matplotlib to be installed: %s", e) + raise NotImplementedError("Image reading not implemented.") + return im.imread(s) - def _unescape_token(self, escaped_token): - r"""Remove '_' from end, then translate '\\'->'\' and '\u'->'_'. + def decode(self, ids, strip_extraneous=False): + """Transform a sequence of int ids into an image file. - TODO(noam): There must be some better way to do this with regexps. + Args: + ids: list of integers to be converted. + strip_extraneous: unused + + Returns: + Path to the temporary file where the image was saved. + + Raises: + ValueError: if the ids are not of the appropriate size. + """ + del strip_extraneous + _, tmp_file_path = tempfile.mkstemp("_decode.png") + if self._height is None or self._width is None: + size = int(math.sqrt(len(ids) / self._channels)) + length = size * size * self._channels + else: + size = None + length = self._height * self._width * self._channels + if len(ids) != length: + raise ValueError("Length of ids (%d) must be height (%d) x width (%d) x " + "channels (%d); %d != %d.\n Ids: %s" + % (len(ids), self._height, self._width, self._channels, + len(ids), length, " ".join([str(i) for i in ids]))) + with tf.Graph().as_default(): + raw = tf.constant(ids, dtype=tf.uint8) + if size is None: + img = tf.reshape(raw, [self._height, self._width, self._channels]) + else: + img = tf.reshape(raw, [size, size, self._channels]) + png = tf.image.encode_png(img) + op = tf.write_file(tmp_file_path, png) + with tf.Session() as sess: + sess.run(op) + return tmp_file_path + + def decode_list(self, ids): + """Transform a sequence of int ids into an image file. Args: - escaped_token: a string + ids: list of integers to be converted. + Returns: - token: a string + Singleton list: path to the temporary file where the image was saved. """ - assert escaped_token[-1] == '_' - escaped_token = escaped_token[:-1] - if '\\' not in escaped_token: - return escaped_token - ret = '' - pos = 0 - while pos < len(escaped_token): - if escaped_token[pos] == '\\' and pos + 1 < len(escaped_token): - if escaped_token[pos + 1] == 'u': - ret += '_' - else: - ret += escaped_token[pos + 1] - pos += 1 - pos += 1 - return ret + return [self.decode(ids)] - @classmethod - def get_token_counts(cls, text_filepattern, corpus_max_lines): - """Read the corpus and compute a dictionary of word counts.""" - tok = tokenizer.Tokenizer() - token_counts = {} - lines_read = 0 - filenames = tf.gfile.Glob(text_filepattern) - for text_filename in filenames: - with tf.gfile.Open(text_filename) as f: - for line in f: - tokens = tok.encode(line.strip()) - for t in tokens: - token_counts[t] = token_counts.get(t, 0) + 1 - lines_read += 1 - if corpus_max_lines > 0 and lines_read > corpus_max_lines: - return token_counts - return token_counts + @property + def vocab_size(self): + return 256 + + +class RealEncoder(object): + """Encoder class for saving and loading float values.""" + + def encode(self, s): + """Transform a string (space separated float values) into a float array. + + Args: + s: space separated float values. + + Returns: + Array of float values. + """ + return [float(w) for w in s.split()] + + def decode(self, ids, strip_extraneous=False): + """Transform sequence of float values into string (float values). + + Args: + ids: array of floats to be converted. + strip_extraneous: unused + + Returns: + String having space separated float values. + + Raises: + ValueError: if the ids are not of the appropriate size. + """ + del strip_extraneous + return " ".join([str(i) for i in ids]) diff --git a/tensor2tensor/data_generators/text_encoder_build_subword.py b/tensor2tensor/data_generators/text_encoder_build_subword.py index ee71af9f6..2f5bca643 100644 --- a/tensor2tensor/data_generators/text_encoder_build_subword.py +++ b/tensor2tensor/data_generators/text_encoder_build_subword.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,46 +22,57 @@ Example usage: python data_generators/text_encoder_build_subword.py \ - --corpus_filepattern=$LM1B_DIR/train-unk-* \ - --corpus_max_lines=17500 \ - --output_fn=$DATA_DIR/lm1b16k.subword_text_encoder \ + --corpus_filepattern=$DATA_DIR/my_problem-train-* \ + --corpus_max_lines=12345 \ + --output_filename=$DATA_DIR/my_problem.subword_text_encoder \ --logtostderr -python data_generators/text_encoder_build_subword.py \ - --corpus_filepattern=$LM1B_DIR/train-unk-* \ - --corpus_max_lines=270000 \ - --output_fn=$DATA_DIR/lm1b64k.subword_text_encoder \ - --logtostderr """ from __future__ import absolute_import from __future__ import division from __future__ import print_function - -# Dependency imports - from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import tokenizer -import tensorflow as tf +import tensorflow.compat.v1 as tf -tf.app.flags.DEFINE_string('output_fn', '/tmp/my.subword_text_encoder', - 'where to store the SubwordTextEncoder') -tf.app.flags.DEFINE_string('corpus_filepattern', '', - 'Corpus of one or more text files') -tf.app.flags.DEFINE_integer('min_count', 5, 'Minimum subtoken count in corpus') -tf.app.flags.DEFINE_integer('corpus_max_lines', 10000, - 'How many lines of corpus to read') -tf.app.flags.DEFINE_integer('num_iterations', 4, 'Number of iterations') -FLAGS = tf.app.flags.FLAGS +tf.flags.DEFINE_string('output_filename', '/tmp/my.subword_text_encoder', + 'where to store the SubwordTextEncoder') +tf.flags.DEFINE_string('corpus_filepattern', '', + 'Corpus of one or more text files') +tf.flags.DEFINE_string('vocab_filepattern', '', 'One or more vocabulary files ' + '(one word per line as "word,count")') +tf.flags.DEFINE_integer('min_count', 5, 'Minimum subtoken count in corpus') +tf.flags.DEFINE_integer('corpus_max_lines', 10000, + 'How many lines of corpus to read') +tf.flags.DEFINE_integer('num_iterations', 4, 'Number of iterations') +tf.flags.DEFINE_bool('split_on_newlines', True, 'Break corpus into lines.') +FLAGS = tf.flags.FLAGS def main(unused_argv): - gs = text_encoder.SubwordTextEncoder() - if not FLAGS.corpus_filepattern: - raise ValueError('Must provide --corpus_filepattern') - token_counts = text_encoder.SubwordTextEncoder.get_token_counts( - FLAGS.corpus_filepattern, FLAGS.corpus_max_lines) - gs.build_from_token_counts(token_counts, FLAGS.output_fn, FLAGS.min_count, - FLAGS.num_iterations) + if FLAGS.corpus_filepattern and FLAGS.vocab_filepattern: + raise ValueError( + 'Must only provide one of --corpus_filepattern or --vocab_filepattern') + + elif FLAGS.corpus_filepattern: + token_counts = tokenizer.corpus_token_counts( + FLAGS.corpus_filepattern, + FLAGS.corpus_max_lines, + split_on_newlines=FLAGS.split_on_newlines) + + elif FLAGS.vocab_filepattern: + token_counts = tokenizer.vocab_token_counts(FLAGS.vocab_filepattern, + FLAGS.corpus_max_lines) + + else: + raise ValueError( + 'Must provide one of --corpus_filepattern or --vocab_filepattern') + + encoder = text_encoder.SubwordTextEncoder() + encoder.build_from_token_counts(token_counts, FLAGS.min_count, + FLAGS.num_iterations) + encoder.store_to_file(FLAGS.output_filename) if __name__ == '__main__': diff --git a/tensor2tensor/data_generators/text_encoder_inspect_subword.py b/tensor2tensor/data_generators/text_encoder_inspect_subword.py deleted file mode 100644 index 0ad9a2701..000000000 --- a/tensor2tensor/data_generators/text_encoder_inspect_subword.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -r"""Inspect a TFRecord file of tensorflow.Example and show tokenizations. - -python data_generators/text_encoder_inspect_subword.py \ - --logtostderr \ - --vocab_file=$DATA_DIR/tokens.vocab.8192 \ - --in_file=$DATA_DIR/wmt_ende_tokens_8k-train-00000-of-00100 -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -from tensor2tensor.data_generators import text_encoder - -import tensorflow as tf - -tf.app.flags.DEFINE_string("vocab_file", "", - "SubwordTextEncoder vocabulary file") - -tf.app.flags.DEFINE_string("in_file", "", "input filename") - -FLAGS = tf.app.flags.FLAGS - - -def ShowSequence(subtokenizer, subtokens, label): - print("%s decoded = %s" % (label, subtokenizer.decode(subtokens))) - print("%s subtoken ids = %s" % (label, subtokens)) - print("%s subtoken strings = %s" % - (label, - [subtokenizer.subtoken_to_subtoken_string(s) for s in subtokens])) - print("") - - -def main(_): - """Convert a file to examples.""" - subtokenizer = text_encoder.SubwordTextEncoder(FLAGS.vocab_file) - reader = tf.python_io.tf_record_iterator(FLAGS.in_file) - for record in reader: - x = tf.train.Example() - x.ParseFromString(record) - inputs = [int(i) for i in x.features.feature["inputs"].int64_list.value] - targets = [int(i) for i in x.features.feature["targets"].int64_list.value] - ShowSequence(subtokenizer, inputs, "inputs") - ShowSequence(subtokenizer, targets, "targets") - - -if __name__ == "__main__": - tf.app.run() diff --git a/tensor2tensor/data_generators/text_encoder_test.py b/tensor2tensor/data_generators/text_encoder_test.py new file mode 100644 index 000000000..e2ed4e985 --- /dev/null +++ b/tensor2tensor/data_generators/text_encoder_test.py @@ -0,0 +1,385 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.data_generators.text_encoder.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import collections +import io +import os +import random +import shutil +import string + +import mock +import six +from six.moves import range # pylint: disable=redefined-builtin + +from tensor2tensor.data_generators import text_encoder +import tensorflow.compat.v1 as tf + + +class NativeToUnicodeTest(tf.test.TestCase): + + def test_native_to_unicode(self): + s = r"foo bar" + s_unicode = text_encoder.native_to_unicode(s) + if six.PY2: + self.assertIsInstance(s_unicode, unicode) + self.assertEqual(s_unicode, u"foo bar") + + +class EscapeUnescapeTokenTest(tf.test.TestCase): + + def test_escape_token(self): + escaped = text_encoder._escape_token( + "Foo! Bar.\nunder_score back\\slash", + set("abcdefghijklmnopqrstuvwxyz .\n") | text_encoder._ESCAPE_CHARS) + + self.assertEqual( + "\\70;oo\\33; \\66;ar.\\10;under\\uscore back\\\\slash_", escaped) + + def test_unescape_token(self): + unescaped = text_encoder._unescape_token( + "\\70;oo\\33; \\66;ar.\\10;under\\uscore back\\\\slash_") + + self.assertEqual( + "Foo! Bar.\nunder_score back\\slash", unescaped) + + +class TokenTextEncoderTest(tf.test.TestCase): + + @classmethod + def setUpClass(cls): + """Make sure the test dir exists and is empty.""" + cls.test_temp_dir = os.path.join(tf.test.get_temp_dir(), "encoder_test") + shutil.rmtree(cls.test_temp_dir, ignore_errors=True) + tf.gfile.MakeDirs(cls.test_temp_dir) + + def test_save_and_reload(self): + """Test that saving and reloading doesn't change the vocab. + + Note that this test reads and writes to the filesystem, which necessitates + that this test size be "large". + """ + + corpus = "A B C D E F G H I J K L M N O P Q R S T U V W X Y Z" + vocab_filename = os.path.join(self.test_temp_dir, "abc.vocab") + + # Make text encoder from a list and store vocab to fake filesystem. + encoder = text_encoder.TokenTextEncoder(None, vocab_list=corpus.split()) + encoder.store_to_file(vocab_filename) + + # Load back the saved vocab file from the fake_filesystem. + new_encoder = text_encoder.TokenTextEncoder(vocab_filename) + + self.assertEqual(encoder._id_to_token, new_encoder._id_to_token) + self.assertEqual(encoder._token_to_id, new_encoder._token_to_id) + + def test_reserved_tokens_in_corpus(self): + """Test that we handle reserved tokens appearing in the corpus.""" + corpus = "A B {} D E F {} G {}".format(text_encoder.EOS, + text_encoder.EOS, + text_encoder.PAD) + + encoder = text_encoder.TokenTextEncoder(None, vocab_list=corpus.split()) + + all_tokens = encoder._id_to_token.values() + + # If reserved tokens are removed correctly, then the set of tokens will + # be unique. + self.assertEqual(len(all_tokens), len(set(all_tokens))) + + +class SubwordTextEncoderTest(tf.test.TestCase): + + @classmethod + def setUpClass(cls): + """Make sure the test dir exists and is empty.""" + cls.test_temp_dir = os.path.join(tf.test.get_temp_dir(), "encoder_test") + shutil.rmtree(cls.test_temp_dir, ignore_errors=True) + tf.gfile.MakeDirs(cls.test_temp_dir) + + def test_encode_decode(self): + corpus = ( + "This is a corpus of text that provides a bunch of tokens from which " + "to build a vocabulary. It will be used when strings are encoded " + "with a TextEncoder subclass. The encoder was coded by a coder.") + token_counts = collections.Counter(corpus.split(" ")) + alphabet = set(corpus) - {" "} + + original = "This is a coded sentence encoded by the SubwordTextEncoder." + token_counts.update(original.split(" ")) + + encoder = text_encoder.SubwordTextEncoder.build_to_target_size( + 100, token_counts, 2, 10) + + # Encoding should be reversible. + encoded = encoder.encode(original) + decoded = encoder.decode(encoded) + self.assertEqual(original, decoded) + + # The substrings coded and coder are frequent enough in the corpus that + # they should appear in the vocabulary even though they are substrings + # of other included strings. + subtoken_strings = {encoder.all_subtoken_strings[i] for i in encoded} + self.assertIn("encoded_", subtoken_strings) + self.assertIn("coded_", subtoken_strings) + self.assertIn("TextEncoder", encoder.all_subtoken_strings) + self.assertIn("coder", encoder.all_subtoken_strings) + + # Every character in the corpus should be in the encoders alphabet and + # its subtoken vocabulary. + self.assertTrue(alphabet.issubset(encoder._alphabet)) + for a in alphabet: + self.assertIn(a, encoder.all_subtoken_strings) + + def test_unicode(self): + corpus = "Cat emoticons. \U0001F638 \U0001F639 \U0001F63A \U0001F63B" + token_counts = collections.Counter(corpus.split(" ")) + + encoder = text_encoder.SubwordTextEncoder.build_to_target_size( + 100, token_counts, 2, 10) + + self.assertIn("\U0001F638", encoder._alphabet) + self.assertIn("\U0001F63B", encoder.all_subtoken_strings) + + def test_small_vocab(self): + corpus = "The quick brown fox jumps over the lazy dog" + token_counts = collections.Counter(corpus.split(" ")) + alphabet = set(corpus) - {" "} + + encoder = text_encoder.SubwordTextEncoder.build_to_target_size( + 10, token_counts, 2, 10) + + # All vocabulary elements are in the alphabet and subtoken strings even + # if we requested a smaller vocabulary to assure all expected strings + # are encodable. + self.assertTrue(alphabet.issubset(encoder._alphabet)) + for a in alphabet: + self.assertIn(a, encoder.all_subtoken_strings) + + def test_long_tokens(self): + """Subword tokenization should still run efficiently with long tokens. + + To make it run efficiently, we need to use the `max_subtoken_length` + argument when calling SubwordTextEncoder.build_to_target_size. + """ + token_length = 4000 + num_tokens = 50 + target_vocab_size = 600 + max_subtoken_length = 10 # Set this to `None` to get problems. + max_count = 500 + + # Generate some long random strings. + random.seed(0) + long_tokens = [] + for _ in range(num_tokens): + long_token = "".join([random.choice(string.ascii_uppercase) + for _ in range(token_length)]) + long_tokens.append(long_token) + + corpus = " ".join(long_tokens) + token_counts = collections.Counter(corpus.split(" ")) + alphabet = set(corpus) - {" "} + + encoder = text_encoder.SubwordTextEncoder.build_to_target_size( + target_vocab_size, token_counts, 1, max_count, num_iterations=1, + max_subtoken_length=max_subtoken_length) + + # All vocabulary elements are in the alphabet and subtoken strings even + # if we requested a smaller vocabulary to assure all expected strings + # are encodable. + self.assertTrue(alphabet.issubset(encoder._alphabet)) + for a in alphabet: + self.assertIn(a, encoder.all_subtoken_strings) + + def test_custom_reserved_tokens(self): + """Test that we can pass custom reserved tokens to SubwordTextEncoder.""" + corpus = "The quick brown fox jumps over the lazy dog" + token_counts = collections.Counter(corpus.split(" ")) + + start_symbol = "" + end_symbol = "" + reserved_tokens = text_encoder.RESERVED_TOKENS + [start_symbol, + end_symbol] + encoder = text_encoder.SubwordTextEncoder.build_to_target_size( + 10, token_counts, 2, 10, reserved_tokens=reserved_tokens) + + # Make sure that reserved tokens appear in the right places. + self.assertEqual(encoder.decode([2]), start_symbol) + self.assertEqual(encoder.decode([3]), end_symbol) + + # Make sure that we haven't messed up the ability to reconstruct. + reconstructed_corpus = encoder.decode(encoder.encode(corpus)) + self.assertEqual(corpus, reconstructed_corpus) + + def test_encodable_when_not_in_alphabet(self): + corpus = "the quick brown fox jumps over the lazy dog" + token_counts = collections.Counter(corpus.split(" ")) + + encoder = text_encoder.SubwordTextEncoder.build_to_target_size( + 100, token_counts, 2, 10) + original = "This has UPPER CASE letters that are out of alphabet" + + # Early versions could have an infinite loop when breaking into subtokens + # if there was any out-of-alphabet characters in the encoded string. + encoded = encoder.encode(original) + decoded = encoder.decode(encoded) + + self.assertEqual(original, decoded) + encoded_str = "".join(encoder.all_subtoken_strings[i] for i in encoded) + self.assertIn("\\84;", encoded_str) + + @mock.patch.object(text_encoder, "_ESCAPE_CHARS", new=set("\\_;13579")) + def test_raises_exception_when_not_encodable(self): + corpus = "the quick brown fox jumps over the lazy dog" + token_counts = collections.Counter(corpus.split(" ")) + + # Deliberately exclude some required encoding chars from the alphabet + # and token list, making some strings unencodable. + encoder = text_encoder.SubwordTextEncoder.build_to_target_size( + 100, token_counts, 2, 10) + original = "This has UPPER CASE letters that are out of alphabet" + + # Previously there was a bug which produced an infinite loop in this case. + with self.assertRaises(ValueError): + encoder.encode(original) + + def test_load_from_file(self): + # Test a vocab file with words not wrapped with single quotes + encoder = text_encoder.SubwordTextEncoder() + correct_vocab = ["the", "and", "of"] + vocab = io.StringIO("the\n" + "and\n" + "of\n") + encoder._load_from_file_object(vocab) + self.assertAllEqual(encoder.all_subtoken_strings, correct_vocab) + + # Test a vocab file with words wrapped in single quotes + encoder = text_encoder.SubwordTextEncoder() + vocab = io.StringIO("\"the\"\n" + "\"and\"\n" + "\"of\"\n") + encoder._load_from_file_object(vocab) + self.assertAllEqual(encoder.all_subtoken_strings, correct_vocab) + + def test_reserved_token_chars_not_in_alphabet(self): + corpus = "dog" + token_counts = collections.Counter(corpus.split(" ")) + encoder1 = text_encoder.SubwordTextEncoder.build_to_target_size( + 100, token_counts, 2, 100) + filename = os.path.join(self.test_temp_dir, "out.voc") + encoder1.store_to_file(filename) + encoder2 = text_encoder.SubwordTextEncoder(filename=filename) + + self.assertEqual(encoder1._alphabet, encoder2._alphabet) + + for t in text_encoder.RESERVED_TOKENS: + for c in t: + # Verify that encoders can encode all reserved token chars. + encoder1.encode(c) + encoder2.encode(c) + + def test_save_and_reload(self): + corpus = "the quick brown fox jumps over the lazy dog" + token_counts = collections.Counter(corpus.split(" ")) + + # Deliberately exclude some required encoding chars from the alphabet + # and token list, making some strings unencodable. + encoder = text_encoder.SubwordTextEncoder.build_to_target_size( + 100, token_counts, 2, 10) + + filename = os.path.join(self.test_temp_dir, "out.voc") + encoder.store_to_file(filename) + new_encoder = text_encoder.SubwordTextEncoder(filename) + + self.assertEqual(encoder._alphabet, new_encoder._alphabet) + self.assertEqual(encoder.all_subtoken_strings, + new_encoder.all_subtoken_strings) + self.assertEqual(encoder._subtoken_string_to_id, + new_encoder._subtoken_string_to_id) + self.assertEqual(encoder._max_subtoken_len, new_encoder._max_subtoken_len) + + def test_save_and_reload_no_single_quotes(self): + corpus = "the quick brown fox jumps over the lazy dog" + token_counts = collections.Counter(corpus.split(" ")) + + # Deliberately exclude some required encoding chars from the alphabet + # and token list, making some strings unencodable. + encoder = text_encoder.SubwordTextEncoder.build_to_target_size( + 100, token_counts, 2, 10) + + filename = os.path.join(self.test_temp_dir, "out.voc") + encoder.store_to_file(filename, add_single_quotes=False) + new_encoder = text_encoder.SubwordTextEncoder(filename) + + self.assertEqual(encoder._alphabet, new_encoder._alphabet) + self.assertEqual(encoder.all_subtoken_strings, + new_encoder.all_subtoken_strings) + self.assertEqual(encoder._subtoken_string_to_id, + new_encoder._subtoken_string_to_id) + self.assertEqual(encoder._max_subtoken_len, new_encoder._max_subtoken_len) + + def test_build_from_generator(self): + + corpus = "The quick brown fox jumps over the lazy dog" + + def gen(): + for _ in range(3): + yield corpus + + start_symbol = "" + end_symbol = "" + reserved_tokens = text_encoder.RESERVED_TOKENS + [start_symbol, + end_symbol] + encoder = text_encoder.SubwordTextEncoder.build_from_generator( + gen(), 10, reserved_tokens=reserved_tokens) + + # Make sure that reserved tokens appear in the right places. + self.assertEqual(encoder.decode([2]), start_symbol) + self.assertEqual(encoder.decode([3]), end_symbol) + + self.assertEqual("hi%s" % start_symbol, + encoder.decode(encoder.encode("hi") + [2])) + + # Make sure that we haven't messed up the ability to reconstruct. + reconstructed_corpus = encoder.decode(encoder.encode(corpus)) + self.assertEqual(corpus, reconstructed_corpus) + + +class OneHotClassLabelEncoderTest(tf.test.TestCase): + + def test_one_hot_encode(self): + encoder = text_encoder.OneHotClassLabelEncoder( + class_labels=["zero", "one", "two"]) + self.assertEqual(encoder.encode("zero"), [1, 0, 0]) + self.assertEqual(encoder.encode("one"), [0, 1, 0]) + self.assertEqual(encoder.encode("two"), [0, 0, 1]) + + def test_one_hot_decode(self): + encoder = text_encoder.OneHotClassLabelEncoder( + class_labels=["zero", "one", "two"]) + self.assertEqual(encoder.decode([1, 0, 0]), "zero") + self.assertEqual(encoder.decode([0, 1, 0]), "one") + self.assertEqual(encoder.decode([0, 0, 1]), "two") + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/text_problems.py b/tensor2tensor/data_generators/text_problems.py new file mode 100644 index 000000000..8e4693f22 --- /dev/null +++ b/tensor2tensor/data_generators/text_problems.py @@ -0,0 +1,1408 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base classes for text-based Problems. + +* Text2TextProblem: input=text, target=text. +* Text2ClassProblem: input=text, target=class. +* Text2RealProblem: input=text, target=float. +* Text2SelfProblem (for language modeling): target=text +* QuestionAndContext2TextProblem: input=text, context=text, target=text. + +The Text2TextTmpDir problem allows you to train without defining a problem. It +expects you to format your data in a particular way and put it in tmp_dir. See +its docstring. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import re + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.layers import modalities +from tensor2tensor.utils import metrics +from tensor2tensor.utils import mlperf_log +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +class VocabType(object): + """Available text vocabularies.""" + CHARACTER = "character" + SUBWORD = "subwords" + TOKEN = "tokens" + + +class Text2TextProblem(problem.Problem): + """Base class for text-to-text problems. + + Subclasses only must override `generate_samples` and `is_generate_per_split`. + See the "Subclass interface" code block below to see what else subclasses can + override. + """ + + # START: Subclass interface + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each.""" + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 100, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def is_generate_per_split(self): + """A single call to `generate_samples` generates for all `dataset_splits`. + + Set to True if you already have distinct subsets of data for each dataset + split specified in `self.dataset_splits`. `self.generate_samples` will be + called once for each split. + + Set to False if you have a unified dataset that you'd like to have split out + into training and evaluation data automatically. `self.generate_samples` + will be called only once and the data will be sharded across the dataset + splits specified in `self.dataset_splits`. + + Returns: + bool + """ + raise NotImplementedError() + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Generate samples of input text and target text pairs. + + Each yielded dict will be made into a single example. The values should be + raw text. The Problem will generate a vocabulary and encode the raw text as + integers as part of the data generation process. + + This method is typically called once per split in `self.dataset_splits` + unless `self.is_generate_per_split=False`. + + Args: + data_dir: final data directory. Typically only used in this method to copy + over user-supplied vocab files (for example, if vocab_type == + VocabType.TOKEN). + tmp_dir: temporary directory that you can use for downloading and scratch. + dataset_split: problem.DatasetSplit, which data split to generate samples + for (for example, training and evaluation). + + Yields: + {"inputs": text, "targets": text} + """ + raise NotImplementedError() + + @property + def vocab_type(self): + """What kind of vocabulary to use. + + `VocabType`s: + * `SUBWORD`: `SubwordTextEncoder`, an invertible wordpiece vocabulary. + Must provide `self.approx_vocab_size`. Generates the vocabulary based on + the training data. To limit the number of samples the vocab generation + looks at, override `self.max_samples_for_vocab`. Recommended and + default. + * `CHARACTER`: `ByteTextEncoder`, encode raw bytes. + * `TOKEN`: `TokenTextEncoder`, vocabulary based on a file. Must provide a + vocabulary file yourself (`TokenTextEncoder.store_to_file`) because one + will not be generated for you. The vocab file should be stored in + `data_dir/` with the name specified by `self.vocab_filename`. + + Returns: + VocabType constant + """ + return VocabType.SUBWORD + + @property + def approx_vocab_size(self): + """Approximate vocab size to generate. Only for VocabType.SUBWORD.""" + return 2**15 # ~32k + + @property + def additional_reserved_tokens(self): + """Additional reserved tokens. Only for VocabType.SUBWORD. + + Returns: + List of str tokens that will get vocab ids 2+ (0 and 1 are reserved for + padding and end-of-string). + """ + return [] + + @property + def oov_token(self): + """Out of vocabulary token. Only for VocabType.TOKEN.""" + return None + + @property + def max_samples_for_vocab(self): + """How many samples from `generate_samples` to look at for vocab generation. + + Only applies if self.vocab_type == VocabType.SUBWORD. + + If None, look at all training samples. + + Returns: + None or int. + """ + return None + + @property + def packed_length(self): + """Pack multiple examples into a single example of constant length. + + This is useful for TPU training to reduce the fraction of padding tokens. + See generator_utils.pack_examples. + + Returns: + None or int + """ + return None + + @property + def packed_spacing(self): + """If this is a packed dataset, how much padding to insert between examples. + + Returns: + int + """ + return 0 + + # END: Subclass interface + + @property + def has_inputs(self): + return True + + def max_length(self, model_hparams): + return (self.packed_length or + super(Text2TextProblem, self).max_length(model_hparams)) + + def feature_encoders(self, data_dir): + encoder = self.get_or_create_vocab(data_dir, None, force_get=True) + encoders = {"targets": encoder} + if self.has_inputs: + encoders["inputs"] = encoder + return encoders + + def generate_text_for_vocab(self, data_dir, tmp_dir): + for i, sample in enumerate( + self.generate_samples(data_dir, tmp_dir, problem.DatasetSplit.TRAIN)): + if self.has_inputs: + yield sample["inputs"] + yield sample["targets"] + if self.max_samples_for_vocab and (i + 1) >= self.max_samples_for_vocab: + break + + @property + def vocab_filename(self): + other_problem = self.use_vocab_from_other_problem + if other_problem: + return other_problem.vocab_filename + if self.vocab_type == VocabType.SUBWORD: + return "vocab.%s.%d.%s" % (self.dataset_filename(), + self.approx_vocab_size, + VocabType.SUBWORD) + else: + return "vocab.%s.%s" % (self.dataset_filename(), VocabType.TOKEN) + + @property + def use_vocab_from_other_problem(self): + """Optional - use the vocabulary from a different problem. + + TODO(noam): problems should override this method instead of overriding + vocab_filename(), so as to generate the correct vocabulary. Fix everywhere. + + Returns: + a Text2TextProblem instance or None + """ + return None + + def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False): + if self.vocab_type == VocabType.CHARACTER: + encoder = text_encoder.ByteTextEncoder() + elif self.vocab_type == VocabType.SUBWORD: + if force_get: + vocab_filepath = os.path.join(data_dir, self.vocab_filename) + encoder = text_encoder.SubwordTextEncoder(vocab_filepath) + else: + other_problem = self.use_vocab_from_other_problem + if other_problem: + return other_problem.get_or_create_vocab(data_dir, tmp_dir, force_get) + encoder = generator_utils.get_or_generate_vocab_inner( + data_dir, self.vocab_filename, self.approx_vocab_size, + self.generate_text_for_vocab(data_dir, tmp_dir), + max_subtoken_length=self.max_subtoken_length, + reserved_tokens=( + text_encoder.RESERVED_TOKENS + self.additional_reserved_tokens)) + elif self.vocab_type == VocabType.TOKEN: + vocab_filename = os.path.join(data_dir, self.vocab_filename) + encoder = text_encoder.TokenTextEncoder(vocab_filename, + replace_oov=self.oov_token) + else: + raise ValueError( + "Unrecognized VocabType: %s" % str(self.vocab_type)) + return encoder + + def _pack_fn(self): + """For packed datasets, returns a function to pack examples. + + Returns: + None or a function from list of TFRecords to list of TFRecords + """ + if not self.packed_length: + return None + def my_fn(records): + """Function from list of TFRecords to list of TFRecords.""" + examples = [] + for record in records: + x = tf.train.Example() + x.ParseFromString(record) + example_dict = {} + if self.has_inputs: + example_dict["inputs"] = [ + int(i) for i in x.features.feature["inputs"].int64_list.value] + example_dict["targets"] = [ + int(i) for i in x.features.feature["targets"].int64_list.value] + examples.append(example_dict) + examples = list(self._maybe_pack_examples(examples)) + return [ + generator_utils.to_example(x).SerializeToString() for x in examples] + return my_fn + + def _maybe_pack_examples(self, generator): + """Wraps generator with packer if self.packed_length.""" + if not self.packed_length: + return generator + return generator_utils.pack_examples( + generator, + self.has_inputs, + self.packed_length, + spacing=self.packed_spacing, + chop_long_sequences=not self.has_inputs) + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + if dataset_split == problem.DatasetSplit.TRAIN: + mlperf_log.transformer_print(key=mlperf_log.PREPROC_TOKENIZE_TRAINING) + elif dataset_split == problem.DatasetSplit.EVAL: + mlperf_log.transformer_print(key=mlperf_log.PREPROC_TOKENIZE_EVAL) + + generator = self.generate_samples(data_dir, tmp_dir, dataset_split) + encoder = self.get_or_create_vocab(data_dir, tmp_dir) + return text2text_generate_encoded(generator, encoder, + has_inputs=self.has_inputs, + inputs_prefix=self.inputs_prefix, + targets_prefix=self.targets_prefix) + + @property + def max_subtoken_length(self): + """Maximum subtoken length when generating vocab. + + SubwordTextEncoder vocabulary building is quadratic-time wrt this variable, + setting it to None uses the length of the longest token in the corpus. + + Returns: + an integer or None + """ + return 200 + + @property + def batch_size_means_tokens(self): + return True + + @property + def already_shuffled(self): + return False + + @property + def inputs_prefix(self): + """String to prepend to inputs before tokenization.""" + return "" + + @property + def targets_prefix(self): + """String to prepend to targets before tokenization.""" + return "" + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + + filepath_fns = { + problem.DatasetSplit.TRAIN: self.training_filepaths, + problem.DatasetSplit.EVAL: self.dev_filepaths, + problem.DatasetSplit.TEST: self.test_filepaths, + } + + split_paths = [(split["split"], filepath_fns[split["split"]]( + data_dir, split["shards"], shuffled=self.already_shuffled)) + for split in self.dataset_splits] + all_paths = [] + for _, paths in split_paths: + all_paths.extend(paths) + + if self.is_generate_per_split: + for split, paths in split_paths: + generator_utils.generate_files( + self.generate_encoded_samples(data_dir, tmp_dir, split), paths) + else: + generator_utils.generate_files( + self.generate_encoded_samples( + data_dir, tmp_dir, problem.DatasetSplit.TRAIN), all_paths) + + generator_utils.shuffle_dataset(all_paths, extra_fn=self._pack_fn()) + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.stop_at_eos = int(True) + + p.modality = {"targets": modalities.ModalityType.SYMBOL} + p.vocab_size = {"targets": self._encoders["targets"].vocab_size} + if self.has_inputs: + p.modality["inputs"] = modalities.ModalityType.SYMBOL + p.vocab_size["inputs"] = self._encoders["inputs"].vocab_size + if self.vocab_type == VocabType.CHARACTER: + p.loss_multiplier = 2.0 + + if self.packed_length: + if self.has_inputs: + p.modality["inputs_segmentation"] = modalities.ModalityType.IDENTITY + p.modality["inputs_position"] = modalities.ModalityType.IDENTITY + p.vocab_size["inputs_segmentation"] = None + p.vocab_size["inputs_position"] = None + p.modality["targets_segmentation"] = modalities.ModalityType.IDENTITY + p.modality["targets_position"] = modalities.ModalityType.IDENTITY + p.vocab_size["targets_segmentation"] = None + p.vocab_size["targets_position"] = None + + def example_reading_spec(self): + data_fields = {"targets": tf.VarLenFeature(tf.int64)} + if self.has_inputs: + data_fields["inputs"] = tf.VarLenFeature(tf.int64) + + if self.packed_length: + if self.has_inputs: + data_fields["inputs_segmentation"] = tf.VarLenFeature(tf.int64) + data_fields["inputs_position"] = tf.VarLenFeature(tf.int64) + data_fields["targets_segmentation"] = tf.VarLenFeature(tf.int64) + data_fields["targets_position"] = tf.VarLenFeature(tf.int64) + + data_items_to_decoders = None + return (data_fields, data_items_to_decoders) + + def eval_metrics(self): + return [ + metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5, + metrics.Metrics.ACC_PER_SEQ, metrics.Metrics.NEG_LOG_PERPLEXITY, + metrics.Metrics.APPROX_BLEU, metrics.Metrics.ROUGE_2_F, + metrics.Metrics.ROUGE_L_F + ] + + +class QuestionAndContext2TextProblem(Text2TextProblem): + """Problems consisting of inputs, context, and a target. + + Variant of Text2TextProblem that includes a "context" feature in addition to + "inputs" and "targets." + """ + QUESTION_SEPARATOR = "" + QUESTION_SEPARATOR_ID = 2 + + @property + def additional_reserved_tokens(self): + return [self.QUESTION_SEPARATOR] + + def feature_encoders(self, data_dir): + encoders = (super(QuestionAndContext2TextProblem, self) + .feature_encoders(data_dir)) + encoders["context"] = encoders["inputs"] + return encoders + + def generate_text_for_vocab(self, data_dir, tmp_dir): + for i, sample in enumerate( + self.generate_samples(data_dir, tmp_dir, problem.DatasetSplit.TRAIN)): + yield sample["inputs"] + yield sample["context"] + yield sample["targets"] + if self.max_samples_for_vocab and (i + 1) >= self.max_samples_for_vocab: + break + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + generator = super( + QuestionAndContext2TextProblem, self).generate_encoded_samples( + data_dir, tmp_dir, dataset_split) + vocab = self.feature_encoders(data_dir)["context"] + for sample in generator: + context = vocab.encode(sample["context"]) + context.append(text_encoder.EOS_ID) + sample["context"] = context + yield sample + + def hparams(self, defaults, unused_model_hparams): + (super(QuestionAndContext2TextProblem, self) + .hparams(defaults, unused_model_hparams)) + p = defaults + p.modality["context"] = modalities.ModalityType.SYMBOL + p.vocab_size["context"] = self._encoders["context"].vocab_size + if self.packed_length: + raise NotImplementedError("QuestionAndContext2Text does not " + "support packed_length") + + def example_reading_spec(self): + data_fields, data_items_to_decoders = (super(QuestionAndContext2TextProblem, + self) + .example_reading_spec()) + data_fields["context"] = tf.VarLenFeature(tf.int64) + return (data_fields, data_items_to_decoders) + + +class Text2SelfProblem(Text2TextProblem): + """Language modeling problems base class. + + See Text2TextProblem for subclass interface. + """ + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Generate samples of text. + + Args: + data_dir: final data directory. Typically only used in this method to copy + over user-supplied vocab files (for example, if vocab_type == + VocabType.TOKEN). + tmp_dir: temporary directory that you can use for downloading and scratch. + dataset_split: problem.DatasetSplit, which data split to generate samples + for (for example, training and evaluation). + + Yields: + Sample: dict: for language modeling problems + (i.e. Text2SelfProblems), this generator should yield dicts with only + the "targets" key. + """ + raise NotImplementedError() + + @property + def has_inputs(self): + return False + + +class Text2ClassProblem(Text2TextProblem): + """Base class for text classification problems.""" + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Generate samples of text and label pairs. + + Each yielded dict will be a single example. The inputs should be raw text. + The label should be an int in [0, self.num_classes). + + Args: + data_dir: final data directory. Typically only used in this method to copy + over user-supplied vocab files (for example, if vocab_type == + VocabType.TOKEN). + tmp_dir: temporary directory that you can use for downloading and scratch. + dataset_split: problem.DatasetSplit, which data split to generate samples + for (for example, training and evaluation). + + Yields: + {"inputs": text, "label": int} + """ + raise NotImplementedError() + + # START: Additional subclass interface + @property + def num_classes(self): + """The number of classes.""" + raise NotImplementedError() + + def class_labels(self, data_dir): + """String representation of the classes.""" + del data_dir + return ["ID_%d" % i for i in range(self.num_classes)] + + # END: Additional subclass interface + + def generate_text_for_vocab(self, data_dir, tmp_dir): + for i, sample in enumerate( + self.generate_samples(data_dir, tmp_dir, problem.DatasetSplit.TRAIN)): + yield sample["inputs"] + if self.max_samples_for_vocab and (i + 1) >= self.max_samples_for_vocab: + break + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + generator = self.generate_samples(data_dir, tmp_dir, dataset_split) + encoder = self.get_or_create_vocab(data_dir, tmp_dir) + for sample in generator: + inputs = encoder.encode(sample["inputs"]) + inputs.append(text_encoder.EOS_ID) + label = sample["label"] + yield {"inputs": inputs, "targets": [label]} + + def feature_encoders(self, data_dir): + encoder = self.get_or_create_vocab(data_dir, None, force_get=True) + + return { + "inputs": encoder, + "targets": text_encoder.ClassLabelEncoder(self.class_labels(data_dir)) + } + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = {"inputs": modalities.ModalityType.SYMBOL, + "targets": modalities.ModalityType.CLASS_LABEL} + p.vocab_size = {"inputs": self._encoders["inputs"].vocab_size, + "targets": self.num_classes} + + def example_reading_spec(self): + data_fields = { + "inputs": tf.VarLenFeature(tf.int64), + "targets": tf.FixedLenFeature([1], tf.int64), + } + data_items_to_decoders = None + return (data_fields, data_items_to_decoders) + + +class TextConcat2ClassProblem(Text2ClassProblem): + """Base class for text classification problems with multiple inputs. + + For problems where there are multiple input sentences and we wish to concat + these inputs with a special delimiter. See, for example, NLI tasks. + """ + CONCAT_TOKEN = "$" + + def generate_text_for_vocab(self, data_dir, tmp_dir): + for i, sample in enumerate( + self.generate_samples(data_dir, tmp_dir, problem.DatasetSplit.TRAIN)): + for inp in sample["inputs"]: + yield inp + if self.max_samples_for_vocab and (i + 1) >= self.max_samples_for_vocab: + break + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + generator = self.generate_samples(data_dir, tmp_dir, dataset_split) + encoder = self.get_or_create_vocab(data_dir, tmp_dir) + for sample in generator: + inputs = [] + for idx, inp in enumerate(sample["inputs"]): + inputs += encoder.encode(inp) + if idx < len(sample["inputs"]) - 1: + inputs.append(encoder.encode(self.CONCAT_TOKEN)[0]) + inputs.append(text_encoder.EOS_ID) + label = sample["label"] + yield {"inputs": inputs, "targets": [label]} + + +class Text2RealProblem(Text2TextProblem): + """Base class for text regression problems with one or more tasks. + + Suitable for text-based problems where targets are continuous, real values. + When ntasks = 1, each text example is mapped to a single scalar value. When + ntasks > 1, each text example is mapped to a 1-d vector of length ntasks. + """ + + @property + def ntasks(self): + """Set to n > 1 for multitask regression.""" + return 1 + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Generate samples of text and real-valued target pairs. + + Each yielded dict will be a single example. The inputs should be raw text. + The target should be a list containing ntasks floats. + Args: + data_dir: final data directory. Typically only used in this method to copy + over user-supplied vocab files (for example, if vocab_type == + VocabType.TOKEN). + tmp_dir: temporary directory that you can use for downloading and scratch. + dataset_split: problem.DatasetSplit, which data split to generate samples + for (for example, training and evaluation). + Yields: + {"inputs": text, "targets": [x1, x2, ..., xN]} where N is ntasks + """ + raise NotImplementedError() + + def generate_text_for_vocab(self, data_dir, tmp_dir): + for i, sample in enumerate( + self.generate_samples(data_dir, tmp_dir, problem.DatasetSplit.TRAIN)): + yield sample["inputs"] + if self.max_samples_for_vocab and (i + 1) >= self.max_samples_for_vocab: + break + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + generator = self.generate_samples(data_dir, tmp_dir, dataset_split) + encoder = self.get_or_create_vocab(data_dir, tmp_dir) + for sample in generator: + inputs = encoder.encode(sample["inputs"]) + inputs.append(text_encoder.EOS_ID) + yield {"inputs": inputs, "targets": sample["targets"]} + + def feature_encoders(self, data_dir): + encoder = self.get_or_create_vocab(data_dir, None, force_get=True) + + return { + "inputs": encoder, + "targets": text_encoder.RealEncoder(), + } + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = { + "inputs": modalities.ModalityType.SYMBOL, + "targets": modalities.ModalityType.REAL_L2_LOSS, + } + p.vocab_size = { + "inputs": self._encoders["inputs"].vocab_size, + "targets": self.ntasks + } + p.target_space_id = problem.SpaceID.REAL + p.add_hparam("regression_targets", True) + + def max_length(self, model_hparams): + return model_hparams.batch_size * self.ntasks + + def preprocess_example(self, example, unused_mode, unused_hparams): + example = problem.preprocess_example_common(example, unused_mode, + unused_hparams) + example["targets"] = tf.reshape(example["targets"], [1, 1, self.ntasks]) + return example + + def example_reading_spec(self): + data_fields = { + "inputs": tf.VarLenFeature(tf.int64), + "targets": tf.FixedLenFeature([self.ntasks], tf.float32), + } + data_items_to_decoders = None + return (data_fields, data_items_to_decoders) + + def eval_metrics(self): + metrics_list = [metrics.Metrics.RMSE] + if self.ntasks == 1: + metrics_list.append(metrics.Metrics.PEARSON) + return metrics_list + + +def txt_line_iterator(txt_path): + """Iterate through lines of file.""" + with tf.gfile.Open(txt_path) as f: + for line in f: + yield line.strip() + + +def txt_and_label_iterator(txt_path): + """Iterate through lines of file.""" + problem_pattern_without_vocab_size = re.compile("(.*)\tExtra_Label: (.*)") + with tf.gfile.Open(txt_path) as f: + for line in f: + results = problem_pattern_without_vocab_size.search(line.strip()) + try: + line = results.group(1) + extra_label = int(results.group(2)) + except AttributeError: + raise ValueError( + "Please provide the file in the right format, with each line having" + " the following format:\n\\t" + "Extra_Label:\\s" + ) + yield [line, extra_label] + + +def text2text_txt_iterator(source_txt_path, target_txt_path): + """Yield dicts for Text2TextProblem.generate_samples from lines of files.""" + for inputs, targets in zip( + txt_line_iterator(source_txt_path), txt_line_iterator(target_txt_path)): + yield {"inputs": inputs, "targets": targets} + + +def text2text_txt_iterator_with_label(source_txt_path, target_txt_path): + """Yield dicts for Text2TextProblem.generate_samples from lines of files.""" + for inputs, (targets, extra_label) in zip( + txt_line_iterator(source_txt_path), + txt_and_label_iterator(target_txt_path)): + yield {"inputs": inputs, "targets": targets, "extra_label": [extra_label]} + + +def text2text_txt_iterator_with_index(source_txt_path, target_txt_path): + """Yield dicts for Text2TextProblem.generate_samples from lines of files.""" + for (idx, (inputs, targets)) in enumerate(zip( + txt_line_iterator(source_txt_path), + txt_line_iterator(target_txt_path))): + yield {"inputs": inputs, "targets": targets, "idx": [idx]} + + +def text2text_distill_iterator(source_txt_path, target_txt_path, + distill_txt_path): + """Yield dicts for Text2TextProblem.generate_samples from lines of files.""" + for inputs, targets, dist_targets in zip( + txt_line_iterator(source_txt_path), txt_line_iterator(target_txt_path), + txt_line_iterator(distill_txt_path)): + yield {"inputs": inputs, "targets": targets, "dist_targets": dist_targets} + + +def text2self_txt_iterator(txt_path): + for line in txt_line_iterator(txt_path): + yield {"targets": line} + + +def text2class_txt_iterator(source_txt_path, label_txt_path, class_strs=None): + """Yield dicts for Text2ClassProblem.generate_samples from lines of files. + + Args: + source_txt_path: txt file with record per line. + label_txt_path: txt file with label per line, either as int or str. If + string, must provide class_strs. + class_strs: list of class label names. Must be in correct order (i.e. + ["a", "b", "c"] means that "a" will get class ID 0, "b" ID 1, etc.). + + Yields: + {"inputs": inputs, "label": label} + """ + if class_strs: + class_strs = dict([(s, i) for i, s in enumerate(class_strs)]) + for inputs, label in zip( + txt_line_iterator(source_txt_path), txt_line_iterator(label_txt_path)): + label = label.strip() + if class_strs: + label = class_strs[label] + else: + label = int(label) + yield {"inputs": inputs, "label": label} + + +def text2real_txt_iterator(source_txt_path, target_txt_path): + """Yield dicts for Text2RealProblem.generate_samples from lines of files. + + Args: + source_txt_path: txt file with record per line. + target_txt_path: txt file with float (or space-separated float list for + multitask) per line. + Yields: + {"inputs": inputs, "targets": targets} + """ + for inputs, targets in zip( + txt_line_iterator(source_txt_path), txt_line_iterator(target_txt_path)): + targets = [float(x) for x in targets.split(" ")] + yield {"inputs": inputs, "targets": targets} + + +def txt_line_sharded_iterator(txt_pattern): + """Iterate through lines of sharded file.""" + all_files = tf.gfile.Glob(txt_pattern) + for txt_path in all_files: + with tf.gfile.Open(txt_path) as f: + for line in f: + yield line.strip() + + +def text2text_txt_sharded_iterator(source_txt_pattern, target_txt_pattern): + """Yield dicts for Text2TextProblem.generate_samples from lines of files. + + Args: + source_txt_pattern: path to the sharded source file + target_txt_pattern: path to the sharded target file + + Yields: + {"inputs": inputs, "targets": targets} + + """ + for inputs, targets in zip( + txt_line_sharded_iterator(source_txt_pattern), + txt_line_sharded_iterator(target_txt_pattern)): + yield {"inputs": inputs, "targets": targets} + + +def text2text_txt_tab_iterator(txt_path): + """Yield dicts for Text2TextProblem.generate_samples from lines of txt_path. + + Args: + txt_path: path to txt file with a record per line, source and target + are tab-separated. + + Yields: + {"inputs": inputs, "targets": targets} + """ + if txt_path.endswith(".tsv*"): + data_iterator = txt_line_sharded_iterator(txt_path) + else: + data_iterator = txt_line_iterator(txt_path) + for line in data_iterator: + if line and "\t" in line: + parts = line.split("\t", 1) + inputs, targets = parts[:2] + yield {"inputs": inputs.strip(), "targets": targets.strip()} + + +def text2text_generate_encoded(sample_generator, + vocab, + targets_vocab=None, + has_inputs=True, + inputs_prefix="", + targets_prefix=""): + """Encode Text2Text samples from the generator with the vocab.""" + targets_vocab = targets_vocab or vocab + for sample in sample_generator: + if has_inputs: + sample["inputs"] = vocab.encode(inputs_prefix + sample["inputs"]) + sample["inputs"].append(text_encoder.EOS_ID) + sample["targets"] = targets_vocab.encode(targets_prefix + sample["targets"]) + sample["targets"].append(text_encoder.EOS_ID) + yield sample + + +@registry.register_problem +class Text2textTmpdir(Text2TextProblem): + """Allows training a Text2TextProblem without defining a subclass. + + Put your training and evaluation data into the following files in tmp_dir, + with 1 record per line: + + * inputs.train.txt + * targets.train.txt + * inputs.eval.txt + * targets.eval.txt + """ + TRAIN_FILES = ("inputs.train.txt", "targets.train.txt") + EVAL_FILES = ("inputs.eval.txt", "targets.eval.txt") + + @property + def is_generate_per_split(self): + return True + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + del data_dir + is_training = dataset_split == problem.DatasetSplit.TRAIN + files = self.TRAIN_FILES if is_training else self.EVAL_FILES + files = [os.path.join(self._tmp_dir_override or tmp_dir, f) for f in files] + inputs_file, targets_file = files + return text2text_txt_iterator(inputs_file, targets_file) + + @property + def _tmp_dir_override(self): + return None + + +class Text2TextRemotedir(Text2textTmpdir): + """Text2TextProblem from files in a remote directory. + + SRC_REMOTE_DIR should be a remote directory, e.g. a GCS bucket (gs://...), + that contains the following files, 1 record per line: + + * inputs.train.txt + * targets.train.txt + * inputs.eval.txt + * targets.eval.txt + + """ + # Override in subclass. + SRC_REMOTE_DIR = None + + @property + def _tmp_dir_override(self): + assert self.SRC_REMOTE_DIR + return self.SRC_REMOTE_DIR + + +@registry.register_problem +class Text2textTmpdirTokens(Text2textTmpdir): + """Allows training a token-based variant of Text2textTmpdir. + + Put your training and evaluation data into the following files in tmp_dir, + with 1 record per line along with a vocabulary file with 1 token per line + (you can leave out PAD, EOS, and UNK as those will be automatically added) + + * inputs.train.txt + * targets.train.txt + * inputs.eval.txt + * targets.eval.txt + * vocab.txt + """ + + @property + def vocab_type(self): + return VocabType.TOKEN + + @property + def oov_token(self): + return "" + + def _generate_vocab(self, tmp_dir): + vocab_list = [self.oov_token] + user_vocab_file = os.path.join(tmp_dir, "vocab.txt") + with tf.gfile.GFile(user_vocab_file, "r") as vocab_file: + for line in vocab_file: + token = line.strip() + vocab_list.append(token) + token_encoder = text_encoder.TokenTextEncoder(None, vocab_list=vocab_list) + return token_encoder + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + vocab_filepath = os.path.join(data_dir, self.vocab_filename) + if not tf.gfile.Exists(vocab_filepath): + token_encoder = self._generate_vocab(tmp_dir) + token_encoder.store_to_file(vocab_filepath) + return super(Text2textTmpdirTokens, self).generate_samples(data_dir, + tmp_dir, + dataset_split) + + +class ChoppedTextProblem(Text2SelfProblem): + """Tokenize and chop text files into fixed-length language-modeling examples. + + The input data is a set of text files, as specified by + self.train_text_filepaths() and self.dev_text_filepaths(). + + The text is tokenized using a SubwordTextEncoder, and + then split into examples, each of length self.sequence_length(). + """ + + def train_text_filepaths(self, tmp_dir): + """Local filepaths of text files containing training data. + + This function may want to download the files if they do not exist. + + Args: + tmp_dir: a string + Returns: + a list of strings. + """ + raise NotImplementedError() + + def dev_text_filepaths(self, tmp_dir): + """Local filepaths of text files containing dev data. + + This function may want to download the files if they do not exist. + + Args: + tmp_dir: a string + Returns: + a list of strings. + """ + raise NotImplementedError() + + @property + def sequence_length(self): + """Length of each example (in tokens).""" + raise NotImplementedError() + + def max_length(self, model_hparams): + return model_hparams.split_to_length or self.sequence_length + + def text_filepaths_for_task(self, tmp_dir, task_id): + """List of input filepaths for a particular training or dev shard. + + Args: + tmp_dir: a string + task_id: an integer less than self.num_shards + Returns: + a list of tuples (filepath, start_pos, num_bytes) + """ + assert task_id >= 0 + assert task_id < self.num_train_shards + self.num_dev_shards + if task_id < self.num_train_shards: + return [ + f for i, f in enumerate(self.train_text_filepaths(tmp_dir)) + if i % self.num_train_shards == task_id + ] + else: + return [ + f for i, f in enumerate(self.dev_text_filepaths(tmp_dir)) + if i % self.num_dev_shards == task_id - self.num_train_shards + ] + + def filepath_to_unicode_strings(self, filepath): + """Read text out of an input file. + + The default just reads the text, converts to unicode and yields one + unicode string. + + Subclasses can override this function in order to preprocess, and can + yield any number of strings. + + Args: + filepath: a string + Yields: + unicode strings. + """ + f = tf.gfile.Open(filepath) + b = f.read() + yield text_encoder.to_unicode_ignore_errors(b) + + def file_generator(self, + filepaths, + max_chars_per_file=None, + max_chars_total=None): + """Read complete text of input files and yield unicode strings. + + By default, one unicode string is produced per file, but this is + not guaranteed, since subclasses can override + filepath_to_unicode_strings(). + + max_chars_per_file and max_chars_total can also be specified, in which + case some strings may be truncated or dropped to limit the total + amount of output. + + Args: + filepaths: a list of strings + max_chars_per_file: an optional integer + max_chars_total: an optional integer + Yields: + unicode strings + """ + chars_total = 0 + for fname in filepaths: + chars_this_file = 0 + tf.logging.info("reading file %s" % fname) + for text in self.filepath_to_unicode_strings(fname): + if (max_chars_per_file and + chars_this_file + len(text) > max_chars_per_file): + text = text[:max_chars_per_file - chars_this_file] + if max_chars_total and chars_total + len(text) > max_chars_total: + text = text[:max_chars_total - chars_total] + chars_total += len(text) + chars_this_file += len(text) + if text: + yield text + if max_chars_total and chars_total >= max_chars_total: + return + if max_chars_per_file and chars_this_file >= max_chars_per_file: + break + + def example_generator(self, encoder, tmp_dir, task_id): + """Generator for examples. + + Args: + encoder: a TextEncoder + tmp_dir: a string + task_id: an integer + Yields: + feature dictionaries + """ + filepaths = self.text_filepaths_for_task(tmp_dir, task_id) + if task_id >= self.num_train_shards: + # this is dev data - limit the total length. + max_chars_per_file = self.max_dev_chars // ( + self.num_dev_shards * len(filepaths)) + else: + max_chars_per_file = None + tokens = [] + for ftext in self.file_generator( + filepaths, max_chars_per_file=max_chars_per_file): + tokens.extend(encoder.encode(ftext)) + pos = 0 + while pos + self.sequence_length <= len(tokens): + yield {"targets": tokens[pos:pos + self.sequence_length]} + pos += self.sequence_length + if pos > 0: + tokens = tokens[pos:] + if self.remainder_policy == "pad": + if tokens: + targets = tokens + [0] * (self.sequence_length - len(tokens)) + yield {"targets": targets} + else: + assert self.remainder_policy == "drop" + + @property + def remainder_policy(self): + """What to do with leftover tokens. + + Returns: + a string - either "pad" or "drop". + """ + return "pad" + + def prepare_to_generate(self, data_dir, tmp_dir): + """Make sure that the data is prepared and the vocab is generated.""" + self.get_or_create_vocab(data_dir, tmp_dir) + self.train_text_filepaths(tmp_dir) + self.dev_text_filepaths(tmp_dir) + + def generate_text_for_vocab(self, data_dir, tmp_dir): + return self.file_generator( + self.train_text_filepaths(tmp_dir), + max_chars_total=self.max_chars_for_vocab) + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + """Generates training/dev data. + + Args: + data_dir: a string + tmp_dir: a string + task_id: an optional integer + Returns: + shard or shards for which data was generated. + """ + tf.logging.info("generate_data task_id=%s" % task_id) + encoder = self.get_or_create_vocab(data_dir, tmp_dir) + assert task_id >= 0 and task_id < self.num_generate_tasks + if task_id < self.num_train_shards: + out_file = self.training_filepaths( + data_dir, self.num_train_shards, shuffled=False)[task_id] + else: + out_file = self.dev_filepaths( + data_dir, self.num_dev_shards, + shuffled=False)[task_id - self.num_train_shards] + generator_utils.generate_files( + self.example_generator(encoder, tmp_dir, task_id), [out_file]) + generator_utils.shuffle_dataset([out_file]) + + @property + def max_chars_for_vocab(self): + """Number of characters of training data to use for generating vocab.""" + return 10**7 + + @property + def num_train_shards(self): + return self.dataset_splits[0]["shards"] + + @property + def num_dev_shards(self): + return self.dataset_splits[1]["shards"] + + @property + def max_dev_chars(self): + """Limit dev set to at most this many characters (default 10M).""" + return 10**7 + + @property + def multiprocess_generate(self): + return True + + @property + def num_generate_tasks(self): + return self.num_train_shards + self.num_dev_shards + + def eval_metrics(self): + return [metrics.Metrics.ACC, metrics.Metrics.NEG_LOG_PERPLEXITY] + + +class DistributedText2TextProblem(Text2TextProblem): + """Base class for text-to-text problems for large-datasets. + + Text2TextProblem doesn't support data generation in a distributed manner. + + Use DistributedText2TextProblem if you have a sharded dataset(s) and want to + create tf.Examples from them in a distributed manner. + + Every task will write to one output shard and will read from specific input + shards. + + Subclasses should override `generate_samples`, `input_dataset_files` + and `is_generate_per_split` as described below. + + Users need to generate the vocabulary before generating data. + See tensor2tensor/bin/build_vocab.py. + """ + + # START: Subclass interface + + def generate_samples(self, data_dir, tmp_dir, dataset_split, input_files): + """Generate samples of input text and target text pairs. + + Subclasses should generate the samples using only files from `input_files`. + + Please see Text2TextProblem.generate_samples for a fuller explanation. + + Args: + data_dir: final data directory. + tmp_dir: temporary directory that you can use for downloading and scratch. + dataset_split: problem.DatasetSplit, which data split to generate samples + for (for example, training and evaluation). + input_files: Generate samples using only these input dataset files. + + Yields: + {"inputs": text, "targets": text} + """ + raise NotImplementedError() + + def input_files(self, dataset_split=problem.DatasetSplit.TRAIN): + """The input files of the input dataset. + + If you don't have a separate dev/test split then returning [] + suffices for dataset_split != problem.DatasetSplit.TRAIN + + Args: + dataset_split: The split for which to return the input files for. + + Returns: + list of strings: The files for the supplied datasplit + """ + + raise NotImplementedError() + + # END: Subclass interface + + @property + def num_output_shards(self): + # Returns the total number of output shards. + num_output_shards = 0 + for split in self.dataset_splits: + num_output_shards += split["shards"] + return num_output_shards + + @property + def split_to_input_filenames(self): + # Dictionary of dataset split to input dataset filenames. + split_to_input_filenames = {} + num_input_files = 0 + if not self.is_generate_per_split: + # We just have a single input dataset file. + split_to_input_filenames[problem.DatasetSplit.TRAIN] = ( + self.input_files(problem.DatasetSplit.TRAIN)) + num_input_files += len( + split_to_input_filenames[problem.DatasetSplit.TRAIN]) + else: + # We have separate input dataset files. + for dataset_split in self.dataset_splits: + split = dataset_split["split"] + split_to_input_filenames[split] = self.input_files(split) + num_input_files += len(split_to_input_filenames[split]) + + # Number of input files >= number of output files. So that every task should + # have some work to do! + assert num_input_files >= self.num_output_shards + + return split_to_input_filenames + + def _task_id_to_output_split(self, task_id): + # Takes a task_id and returns a tuple of + # (split of the dataset to operate on, number of shards in that split, + # offset of this task from the first task to operate on that split) + num_output_shards = 0 + for dataset_split in self.dataset_splits: + num_output_shards += dataset_split["shards"] + if task_id < num_output_shards: + return (dataset_split["split"], dataset_split["shards"], + (task_id - num_output_shards + dataset_split["shards"])) + + def _task_id_to_output_file(self, data_dir, task_id): + # Returns the output filename that this task will write. + + dataset_split, shards, offset = self._task_id_to_output_split(task_id) + + filepath_fns = { + problem.DatasetSplit.TRAIN: self.training_filepaths, + problem.DatasetSplit.EVAL: self.dev_filepaths, + problem.DatasetSplit.TEST: self.test_filepaths, + } + + return filepath_fns[dataset_split](data_dir, shards, False)[offset] + + @staticmethod + def _divide_equally(input_files, num_tasks, task_id): + # There are num_tasks total tasks, we need to divide these + # input files among them equally and return the slice that task_id should + # read from. + task_load, remainder = divmod(len(input_files), num_tasks) + + # This is the slice of almost equal sized chunks of files for a task_id to + # handle -- this distributes the excess remainder tasks among the first + # "remainder" task_ids. + + # The extra min(task_id, remainder) in the end comes from assigning the + # remainder of the tasks to task_ids [0, remainder), so we need to advance + # the start by how many ever remainder tasks already assigned. + start_idx = task_id * task_load + min(task_id, remainder) + + # This will handle atleast `task_load` files, plus an extra one if `task_id` + # is still less than remainder. + num_elements = task_load + int(task_id < remainder) + + return input_files[start_idx : start_idx + num_elements] + + def _task_id_to_input_files(self, task_id): + # Returns a list of input files that this task should read and process. + + if not self.is_generate_per_split: + # We just have one unified input dataset to handle, so all tasks will read + # from the TRAIN dataset. + input_files = self.split_to_input_filenames[problem.DatasetSplit.TRAIN] + + return self._divide_equally(input_files, self.num_output_shards, task_id) + + # self.is_generate_per_split is True. + dataset_split, num_shards, offset = self._task_id_to_output_split(task_id) + input_files = self.split_to_input_filenames[dataset_split] + return self._divide_equally(input_files, num_shards, offset) + + def generate_text_for_vocab(self, data_dir, tmp_dir): + # We need to override this because we'll be reading from specific files + # instead + + # What files should we read for creating the vocabulary? + input_files_for_vocab = [] + if self.is_generate_per_split: + input_files_for_vocab = ( + self.split_to_input_filenames[problem.DatasetSplit.TRAIN]) + else: + # We need to compute the 'train' shards from the whole input. + # Go over all task_ids that output training data, collect their input + # files. + for task_id in range(self.num_output_shards): + split, _, _ = self._task_id_to_output_split(task_id) + if split == problem.DatasetSplit.TRAIN: + input_files_for_vocab.extend(self._task_id_to_input_files(task_id)) + + # Generate samples only from the above generated files. + for i, sample in enumerate( + self.generate_samples(data_dir, tmp_dir, problem.DatasetSplit.TRAIN, + input_files_for_vocab)): + if self.has_inputs: + yield sample["inputs"] + yield sample["targets"] + if self.max_samples_for_vocab and (i + 1) >= self.max_samples_for_vocab: + break + + def generate_encoded_samples(self, + data_dir, + tmp_dir, + dataset_split, + input_files): + # Since this is a distributed problem, we don't want every task to create + # its own vocabulary, so we assume that the dictionary is already created + # for example by using build_vocab.py + vocab_filepath = os.path.join(data_dir, self.vocab_filename) + if not tf.gfile.Exists(vocab_filepath): + raise ValueError("Vocab file: %s doesn't exist, please use " + "build_vocab.py to create one." % vocab_filepath) + encoder = self.get_or_create_vocab(data_dir, tmp_dir, force_get=True) + generator = self.generate_samples(data_dir, tmp_dir, dataset_split, + input_files) + return text2text_generate_encoded( + generator, encoder, has_inputs=self.has_inputs, + inputs_prefix=self.inputs_prefix, + targets_prefix=self.targets_prefix) + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + # task_id should be in [0, self.num_output_shards) + assert (0 <= task_id) and (task_id < self.num_output_shards) + + # A task_id is only supposed to write only one output shard, it can operate + # over multiple *input* shards. + input_files = self._task_id_to_input_files(task_id) + output_file = self._task_id_to_output_file(data_dir, task_id) + + # Which output split is this task writing to? + split, _, _ = self._task_id_to_output_split(task_id) + + # Actually generate examples. + generator_utils.generate_files( + self.generate_encoded_samples( + data_dir, tmp_dir, split, input_files), + [output_file]) + + # Shuffle the output. + generator_utils.shuffle_dataset([output_file], extra_fn=self._pack_fn()) diff --git a/tensor2tensor/data_generators/text_problems_test.py b/tensor2tensor/data_generators/text_problems_test.py new file mode 100644 index 000000000..331d65e6f --- /dev/null +++ b/tensor2tensor/data_generators/text_problems_test.py @@ -0,0 +1,411 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Text problems test.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import shutil +from tensor2tensor.data_generators import problem as problem_lib +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class Test1(text_problems.Text2textTmpdir): + + @property + def name(self): + # name is normally provided by register_problem, but this problem is not + # registered, so we provide one here to avoid inheriting the parent class's + # name. + return "test1" + + @property + def approx_vocab_size(self): + return 3 + + @property + def dataset_splits(self): + return [{ + "split": problem_lib.DatasetSplit.TRAIN, + "shards": 1, + }, { + "split": problem_lib.DatasetSplit.EVAL, + "shards": 1, + }] + + +class TextProblems(tf.test.TestCase): + + @classmethod + def setUpClass(cls): + cls.tmp_dir = tf.test.get_temp_dir() + shutil.rmtree(cls.tmp_dir) + os.mkdir(cls.tmp_dir) + + cls.inputs = [ + "Hello world", + "Goodbye world", + ] + cls.targets = [ + "Hola mundo", + "Adios mundo", + ] + cls.labels = [2, 3] + cls.labels_strs = ["c", "d"] + + cls.inputs_file = os.path.join(cls.tmp_dir, "inputs.train.txt") + cls.targets_file = os.path.join(cls.tmp_dir, "targets.train.txt") + cls.labels_file = os.path.join(cls.tmp_dir, "labels.train.txt") + cls.labels_str_file = os.path.join(cls.tmp_dir, "labels_str.train.txt") + data = [(cls.inputs, cls.inputs_file), (cls.targets, cls.targets_file), + (cls.labels, cls.labels_file), (cls.labels_strs, + cls.labels_str_file)] + + for lines, filename in data: + with tf.gfile.Open(filename, "w") as f: + for line in lines: + f.write(str(line)) + f.write("\n") + + cls.tabbed_file = os.path.join(cls.tmp_dir, "tabbed.train.txt") + with tf.gfile.Open(cls.tabbed_file, "w") as f: + for inputs, targets in zip(cls.inputs, cls.targets): + f.write("%s\t%s\n" % (inputs, targets)) + + tf.gfile.Copy(cls.inputs_file, os.path.join(cls.tmp_dir, "inputs.eval.txt")) + tf.gfile.Copy(cls.targets_file, os.path.join(cls.tmp_dir, + "targets.eval.txt")) + + cls.targets_regr = [[1.23, 2.34], [4.56, 5.67]] + cls.targets_regr_file = os.path.join(cls.tmp_dir, "targets_regr.train.txt") + with tf.gfile.Open(cls.targets_regr_file, "w") as f: + for targets in cls.targets_regr: + f.write(" ".join([str(x) for x in targets]) + "\n") + + def testTxtLineIterator(self): + lines = [line for line in text_problems.txt_line_iterator(self.inputs_file)] + self.assertEqual(lines, self.inputs) + + def testText2TextTxtIterator(self): + inputs = [] + targets = [] + for entry in text_problems.text2text_txt_iterator(self.inputs_file, + self.targets_file): + inputs.append(entry["inputs"]) + targets.append(entry["targets"]) + self.assertEqual(inputs, self.inputs) + self.assertEqual(targets, self.targets) + + def testText2SelfTxtIterator(self): + targets = [ + entry["targets"] + for entry in text_problems.text2self_txt_iterator(self.targets_file) + ] + self.assertEqual(targets, self.targets) + + def testText2ClassTxtIterator(self): + inputs = [] + labels = [] + for entry in text_problems.text2class_txt_iterator(self.inputs_file, + self.labels_file): + inputs.append(entry["inputs"]) + labels.append(entry["label"]) + self.assertEqual(inputs, self.inputs) + self.assertEqual(labels, self.labels) + + def testText2ClassTxtIteratorWithStrs(self): + inputs = [] + labels = [] + for entry in text_problems.text2class_txt_iterator( + self.inputs_file, self.labels_str_file, class_strs=["a", "b", "c", + "d"]): + inputs.append(entry["inputs"]) + labels.append(entry["label"]) + self.assertEqual(inputs, self.inputs) + self.assertEqual(labels, self.labels) + + def testText2RealTxtIterator(self): + inputs = [] + targets = [] + for entry in text_problems.text2real_txt_iterator(self.inputs_file, + self.targets_regr_file): + inputs.append(entry["inputs"]) + targets.append(entry["targets"]) + self.assertEqual(inputs, self.inputs) + self.assertEqual(targets, self.targets_regr) + + def testText2TextTxtTabIterator(self): + inputs = [] + targets = [] + for entry in text_problems.text2text_txt_tab_iterator(self.tabbed_file): + inputs.append(entry["inputs"]) + targets.append(entry["targets"]) + self.assertEqual(inputs, self.inputs) + self.assertEqual(targets, self.targets) + + def testText2TextTmpDir(self): + problem = Test1() + problem.generate_data(self.tmp_dir, self.tmp_dir) + vocab_file = os.path.join(self.tmp_dir, "vocab.test1.3.subwords") + train_file = os.path.join(self.tmp_dir, "test1-train-00000-of-00001") + eval_file = os.path.join(self.tmp_dir, "test1-dev-00000-of-00001") + self.assertTrue(tf.gfile.Exists(vocab_file)) + self.assertTrue(tf.gfile.Exists(train_file)) + self.assertTrue(tf.gfile.Exists(eval_file)) + + dataset = problem.dataset(tf_estimator.ModeKeys.TRAIN, self.tmp_dir) + features = dataset.make_one_shot_iterator().get_next() + + examples = [] + exhausted = False + with self.test_session() as sess: + examples.append(sess.run(features)) + examples.append(sess.run(features)) + try: + sess.run(features) + except tf.errors.OutOfRangeError: + exhausted = True + + self.assertTrue(exhausted) + self.assertEqual(2, len(examples)) + + self.assertNotEqual( + list(examples[0]["inputs"]), list(examples[1]["inputs"])) + + example = examples[0] + encoder = text_encoder.SubwordTextEncoder(vocab_file) + inputs_encoded = list(example["inputs"]) + inputs_encoded.pop() # rm EOS + self.assertTrue(encoder.decode(inputs_encoded) in self.inputs) + targets_encoded = list(example["targets"]) + targets_encoded.pop() # rm EOS + self.assertTrue(encoder.decode(targets_encoded) in self.targets) + + +class FakeDistributedProblem(text_problems.DistributedText2TextProblem): + + def __init__(self): + self.name = "fake_distributed_problem" + # Call the base class ctor. + super(FakeDistributedProblem, self).__init__() + + def generate_samples(self, data_dir, tmp_dir, dataset_split, input_files): + # Read all lines from all the input_files and return the same word as input + # and target. + for input_file in input_files: + with tf.gfile.Open(input_file, "r") as f: + for line in f.read().strip().split("\n"): + yield {"inputs": line.strip(), "targets": line.strip()} + + @property + def is_generate_per_split(self): + return True + + @property + def dataset_splits(self): + return [{ + "split": problem_lib.DatasetSplit.TRAIN, + "shards": 2, + }, { + "split": problem_lib.DatasetSplit.EVAL, + "shards": 3, + }, { + "split": problem_lib.DatasetSplit.TEST, + "shards": 4, + }] + + def input_files(self, dataset_split=problem_lib.DatasetSplit.TRAIN): + if dataset_split == problem_lib.DatasetSplit.TRAIN: + return self.train_files + elif dataset_split == problem_lib.DatasetSplit.EVAL: + return self.dev_files + return self.test_files + + @classmethod + def setup_for_test(cls): + # First setup the temp train, dev, test files and then call the ctor. + cls.tmp_dir = tf.test.get_temp_dir() + shutil.rmtree(cls.tmp_dir) + os.mkdir(cls.tmp_dir) + + # Write 25 train files, 5 dev files, 11 test files. + train_pattern = os.path.join(cls.tmp_dir, "train-%05d-of-00025") + dev_pattern = os.path.join(cls.tmp_dir, "dev-%05d-of-00005") + test_pattern = os.path.join(cls.tmp_dir, "test-%05d-of-00011") + cls.train_files, cls.dev_files, cls.test_files = [], [], [] + for i in range(25): + cls.train_files.append(train_pattern % i) + with tf.gfile.Open(cls.train_files[-1], "w") as f: + f.write("train_%d\n" % i) + for i in range(5): + cls.dev_files.append(dev_pattern % i) + with tf.gfile.Open(cls.dev_files[-1], "w") as f: + f.write("dev_%d\n" % i) + for i in range(11): + cls.test_files.append(test_pattern % i) + with tf.gfile.Open(cls.test_files[-1], "w") as f: + f.write("test_%d\n" % i) + + +class FakeDistributedProblemNotPerSplit(FakeDistributedProblem): + + @property + def is_generate_per_split(self): + return False + + +class DistributedText2TextProblemsTest(tf.test.TestCase): + + def setUp(self): + FakeDistributedProblem.setup_for_test() + + def testOutputSharding(self): + problem = FakeDistributedProblemNotPerSplit() + + # self.dataset_split is 2, 3, 4 + # So: + # num output shards = 2 + 3 + 4 = 9 + # task_ids will be in range = [0, 9) + + expected_split_shard_and_offset = [ + (problem_lib.DatasetSplit.TRAIN, 2, 0), + (problem_lib.DatasetSplit.TRAIN, 2, 1), + (problem_lib.DatasetSplit.EVAL, 3, 0), + (problem_lib.DatasetSplit.EVAL, 3, 1), + (problem_lib.DatasetSplit.EVAL, 3, 2), + (problem_lib.DatasetSplit.TEST, 4, 0), + (problem_lib.DatasetSplit.TEST, 4, 1), + (problem_lib.DatasetSplit.TEST, 4, 2), + (problem_lib.DatasetSplit.TEST, 4, 3), + ] + + expected_output_filenames = [ + "/tmp/fake_distributed_problem-unshuffled-train-00000-of-00002", + "/tmp/fake_distributed_problem-unshuffled-train-00001-of-00002", + "/tmp/fake_distributed_problem-unshuffled-dev-00000-of-00003", + "/tmp/fake_distributed_problem-unshuffled-dev-00001-of-00003", + "/tmp/fake_distributed_problem-unshuffled-dev-00002-of-00003", + "/tmp/fake_distributed_problem-unshuffled-test-00000-of-00004", + "/tmp/fake_distributed_problem-unshuffled-test-00001-of-00004", + "/tmp/fake_distributed_problem-unshuffled-test-00002-of-00004", + "/tmp/fake_distributed_problem-unshuffled-test-00003-of-00004" + ] + + actual_split_shard_and_offset = [] + actual_output_filenames = [] + for task_id in range(9): + actual_split_shard_and_offset.append( + problem._task_id_to_output_split(task_id)) + actual_output_filenames.append( + problem._task_id_to_output_file("/tmp", task_id)) + + self.assertSequenceEqual(expected_split_shard_and_offset, + actual_split_shard_and_offset) + + self.assertSequenceEqual(expected_output_filenames, actual_output_filenames) + + def testInputShardingNoGeneratePerSplit(self): + # 25 input shards (train only, is_generate_per_split = False). + # 9 output tasks in all (2 + 3 + 4), so + # + # Division should be like: + # task_id 0 -> 0, 1, 2 + # task_id 1 -> 3, 4, 5 + # ... + # task_id 6 -> 18, 19, 20 + # task_id 7 -> 21, 22 + # task_id 8 -> 23, 24 + + # tasks 0 to 6 + expected_input_file_sharding = [[ + "train-%05d-of-00025" % j for j in [i, i + 1, i + 2] + ] for i in range(0, 20, 3)] + # tasks 7 and 8 + expected_input_file_sharding.extend( + [["train-%05d-of-00025" % i for i in [21, 22]], + ["train-%05d-of-00025" % i for i in [23, 24]]]) + + problem = FakeDistributedProblemNotPerSplit() + + list_input_files = [] + for task_id in range(9): + input_files = problem._task_id_to_input_files(task_id) + list_input_files.append( + [os.path.basename(input_file) for input_file in input_files]) + + self.assertSequenceEqual(expected_input_file_sharding, list_input_files) + + def testInputShardingWithGeneratePerSplit(self): + # 25, 5, 11 train, dev, test input shards + # 9 output tasks in all (2 + 3 + 4), so + # + # Division should be like: + # + # Train + # task_id 0 -> 0, .. 12 + # task_id 1 -> 13 .. 24 + # + # Dev + # task_id 2 -> 0, 1 + # task_id 3 -> 2, 3, + # task_id 4 -> 4 + # + # Test + # task_id 5 -> 0, 1, 2 + # task_id 6 -> 3, 4, 5 + # task_id 7 -> 6, 7, 8 + # task_id 8 -> 9, 10 + + expected_input_file_sharding = [ + ["train-%05d-of-00025" % i for i in range(13)], # task_id 0 + ["train-%05d-of-00025" % i for i in range(13, 25)], # task_id 1 + ["dev-%05d-of-00005" % i for i in [0, 1]], # task_id 2 + ["dev-%05d-of-00005" % i for i in [2, 3]], # task_id 3 + ["dev-%05d-of-00005" % i for i in [4]], # task_id 4 + ["test-%05d-of-00011" % i for i in [0, 1, 2]], # task_id 5 + ["test-%05d-of-00011" % i for i in [3, 4, 5]], # task_id 6 + ["test-%05d-of-00011" % i for i in [6, 7, 8]], # task_id 7 + ["test-%05d-of-00011" % i for i in [9, 10]], # task_id 8 + ] + + problem = FakeDistributedProblem() + + list_input_files = [] + for task_id in range(9): + input_files = problem._task_id_to_input_files(task_id) + list_input_files.append( + [os.path.basename(input_file) for input_file in input_files]) + + self.assertSequenceEqual(expected_input_file_sharding, list_input_files) + + def testVocabularyIsAllTrain(self): + problem = FakeDistributedProblem() + + tmp_dir = problem.tmp_dir + + for text in problem.generate_text_for_vocab(tmp_dir, tmp_dir): + # All the vocabulary is coming from training input shards. + self.assertTrue("train_" in text, "train is not in %s" % text) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/timeseries.py b/tensor2tensor/data_generators/timeseries.py new file mode 100644 index 000000000..78048bc8a --- /dev/null +++ b/tensor2tensor/data_generators/timeseries.py @@ -0,0 +1,365 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Multi time series forecasting problem.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import timeseries_data_generator +from tensor2tensor.layers import modalities +from tensor2tensor.utils import metrics +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +class TimeseriesProblem(problem.Problem): + """Base Problem for multi timeseries datasets.""" + + def feature_encoders(self, data_dir): + del data_dir + return { + "inputs": text_encoder.RealEncoder(), + "targets": text_encoder.RealEncoder() + } + + @property + def is_generate_per_split(self): + # generate_data will shard the data into TRAIN and EVAL for us. + return False + + @property + def dataset_splits(self): + """Splits of data to produce and number the output shards for each.""" + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": self.num_train_shards, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": self.num_eval_shards, + }, { + "split": problem.DatasetSplit.TEST, + "shards": self.num_test_shards, + }] + + @property + def has_inputs(self): + return True + + @property + def num_train_shards(self): + """Number of training shards.""" + return 9 + + @property + def num_eval_shards(self): + """Number of eval shards.""" + return 1 + + @property + def num_test_shards(self): + """Number of test shards.""" + return 1 + + @property + def num_series(self): + """Number of timeseries.""" + raise NotImplementedError() + + @property + def num_input_timestamps(self): + """Number of timestamps to include in the input.""" + raise NotImplementedError() + + @property + def num_target_timestamps(self): + """Number of timestamps to include in the target.""" + raise NotImplementedError() + + def timeseries_dataset(self): + """Multi-timeseries data [ timestamps , self.num_series ] .""" + raise NotImplementedError() + + def eval_metrics(self): + eval_metrics = [metrics.Metrics.RMSE] + return eval_metrics + + @property + def normalizing_constant(self): + """Constant by which all data will be multiplied to be more normalized.""" + return 1.0 # Adjust so that your loss is around 1 or 10 or 100, not 1e+9. + + def preprocess_example(self, example, unused_mode, unused_hparams): + # Time series are flat on disk, we un-flatten them back here. + if self.has_inputs: + flat_inputs = example["inputs"] + flat_targets = example["targets"] + c = self.normalizing_constant + # Tensor2Tensor models expect [height, width, depth] examples, here we + # use height for time and set width to 1 and num_series is our depth. + if self.has_inputs: + example["inputs"] = tf.reshape( + flat_inputs, [self.num_input_timestamps, 1, self.num_series]) * c + example["targets"] = tf.reshape( + flat_targets, [self.num_target_timestamps, 1, self.num_series]) * c + return example + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + del data_dir + del tmp_dir + del dataset_split + + series = self.timeseries_dataset() + num_timestamps = len(series) + + # Generate samples with num_input_timestamps for "inputs" and + # num_target_timestamps in the "targets". + for split_index in range(self.num_input_timestamps, + num_timestamps - self.num_target_timestamps + 1): + inputs = series[split_index - + self.num_input_timestamps:split_index, :].tolist() + targets = series[split_index:split_index + + self.num_target_timestamps, :].tolist() + # We need to flatten the lists on disk for tf,Example to work. + flat_inputs = [item for sublist in inputs for item in sublist] + flat_targets = [item for sublist in targets for item in sublist] + if self.has_inputs: + example_keys = ["inputs", "targets"] + ex_dict = dict(zip(example_keys, [flat_inputs, flat_targets])) + else: + example_keys = ["targets"] + ex_dict = dict(zip(example_keys, [flat_targets])) + + yield ex_dict + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = {"inputs": modalities.ModalityType.REAL_L2_LOSS, + "targets": modalities.ModalityType.REAL_L2_LOSS} + p.vocab_size = {"inputs": self.num_series, + "targets": self.num_series} + p.input_space_id = problem.SpaceID.REAL + p.target_space_id = problem.SpaceID.REAL + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + filepath_fns = { + problem.DatasetSplit.TRAIN: self.training_filepaths, + problem.DatasetSplit.EVAL: self.dev_filepaths, + problem.DatasetSplit.TEST: self.test_filepaths, + } + + split_paths = [(split["split"], filepath_fns[split["split"]]( + data_dir, split["shards"], shuffled=False)) + for split in self.dataset_splits] + + all_paths = [] + for _, paths in split_paths: + all_paths.extend(paths) + + if self.is_generate_per_split: + for split, paths in split_paths: + generator_utils.generate_files( + self.generate_samples(data_dir, tmp_dir, split), paths) + else: + generator_utils.generate_files( + self.generate_samples(data_dir, tmp_dir, problem.DatasetSplit.TRAIN), + all_paths) + + generator_utils.shuffle_dataset(all_paths) + + def example_reading_spec(self): + data_fields = { + "inputs": tf.VarLenFeature(tf.float32), + "targets": tf.VarLenFeature(tf.float32), + } + data_items_to_decoders = None + return (data_fields, data_items_to_decoders) + + +@registry.register_problem +class TimeseriesToyProblem(TimeseriesProblem): + """Timeseries problem with a toy dataset.""" + + @property + def num_train_shards(self): + """Number of training shards.""" + return 1 + + @property + def num_eval_shards(self): + """Number of eval shards.""" + return 1 + + @property + def num_test_shards(self): + """Number of eval shards.""" + return 0 + + @property + def num_series(self): + """Number of timeseries.""" + return 2 + + @property + def num_input_timestamps(self): + """Number of timestamps to include in the input.""" + return 2 + + @property + def num_target_timestamps(self): + """Number of timestamps to include in the target.""" + return 2 + + def timeseries_dataset(self): + series = [[float(i + n) for n in range(self.num_series)] for i in range(10)] + + return np.array(series) + + +@registry.register_problem +class TimeseriesToyProblemNoInputs(TimeseriesToyProblem): + """Timeseries problem with a toy dataset and without inputs.""" + + @property + def has_inputs(self): + return False + + @property + def num_input_timestamps(self): + """Number of timestamps to include in the input.""" + return 0 + + +@registry.register_problem +class TimeseriesSyntheticDataSeries10Samples100k(TimeseriesProblem): + """10 synthetic timeseries with 100K samples/timestamps.""" + + @property + def num_train_shards(self): + """Number of training shards.""" + return 9 + + @property + def num_eval_shards(self): + """Number of eval shards.""" + return 1 + + @property + def num_series(self): + """Number of timeseries.""" + return 10 + + @property + def num_input_timestamps(self): + """Number of timestamps to include in the input.""" + return 250 + + @property + def num_target_timestamps(self): + """Number of timestamps to include in the target.""" + return 100 + + @property + def normalizing_constant(self): + return 0.01 + + @property + def timeseries_params(self): + """Parameters for each timeseries.""" + timeseries_params = [{ + "m": 0.006, + "b": 300.0, + "A": 50.0, + "freqcoeff": 1500.0, + "rndA": 15.0, + "fn": np.sin + }, { + "m": 0.000, + "b": 500.0, + "A": 35.0, + "freqcoeff": 3500.0, + "rndA": 25.0, + "fn": np.cos + }, { + "m": -0.003, + "b": 800.0, + "A": 65.0, + "freqcoeff": 2500.0, + "rndA": 5.0, + "fn": np.sin + }, { + "m": 0.009, + "b": 600.0, + "A": 20.0, + "freqcoeff": 1000.0, + "rndA": 1.0, + "fn": np.cos + }, { + "m": 0.002, + "b": 700.0, + "A": 40.0, + "freqcoeff": 2000.0, + "rndA": 35.0, + "fn": np.sin + }, { + "m": -0.008, + "b": 1000.0, + "A": 70.0, + "freqcoeff": 3000.0, + "rndA": 25.0, + "fn": np.cos + }, { + "m": 0.000, + "b": 100.0, + "A": 25.0, + "freqcoeff": 1500.0, + "rndA": 10.0, + "fn": np.sin + }, { + "m": 0.004, + "b": 1500.0, + "A": 54.0, + "freqcoeff": 900.0, + "rndA": 55.0, + "fn": np.cos + }, { + "m": 0.005, + "b": 2000.0, + "A": 32.0, + "freqcoeff": 1100.0, + "rndA": 43.0, + "fn": np.sin + }, { + "m": 0.010, + "b": 2500.0, + "A": 43.0, + "freqcoeff": 1900.0, + "rndA": 53.0, + "fn": np.cos + }] + + return timeseries_params + + def timeseries_dataset(self): + series = np.array( + timeseries_data_generator.generate_data(100000, self.timeseries_params)) + + series = series.transpose() + return series diff --git a/tensor2tensor/data_generators/timeseries_data_generator.py b/tensor2tensor/data_generators/timeseries_data_generator.py new file mode 100644 index 000000000..d0bb165f9 --- /dev/null +++ b/tensor2tensor/data_generators/timeseries_data_generator.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generator for the timeseries problem.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + + +def generate_data(timeseries_length, timeseries_params): + """Generates synthetic timeseries using input parameters. + + Each generated timeseries has timeseries_length data points. + Parameters for each timeseries are specified by timeseries_params. + + Args: + timeseries_length: Number of data points to generate for each timeseries. + timeseries_params: Parameters used to generate the timeseries. The following + parameters need to be specified for each timeseries: + m = Slope of the timeseries used to compute the timeseries trend. + b = y-intercept of the timeseries used to compute the timeseries trend. + A = Timeseries amplitude used to compute timeseries period. + freqcoeff = Frequency coefficient used to compute timeseries period. + rndA = Random amplitude used to inject noise into the timeseries. + fn = Base timeseries function (np.cos or np.sin). + Example params for two timeseries. + [{"m": 0.006, "b": 300.0, "A":50.0, "freqcoeff":1500.0, "rndA":15.0, + "fn": np.sin}, + {"m": 0.000, "b": 500.0, "A":35.0, "freqcoeff":3500.0, "rndA":25.0, + "fn": np.cos}] + + Returns: + Multi-timeseries (list of list). + """ + x = range(timeseries_length) + + multi_timeseries = [] + for p in timeseries_params: + # Trend + y1 = [p["m"] * i + p["b"] for i in x] + # Period + y2 = [p["A"] * p["fn"](i / p["freqcoeff"]) for i in x] + # Noise + y3 = np.random.normal(0, p["rndA"], timeseries_length).tolist() + # Sum of Trend, Period and Noise. Replace negative values with zero. + y = [max(a + b + c, 0) for a, b, c in zip(y1, y2, y3)] + multi_timeseries.append(y) + + return multi_timeseries diff --git a/tensor2tensor/data_generators/timeseries_data_generator_test.py b/tensor2tensor/data_generators/timeseries_data_generator_test.py new file mode 100644 index 000000000..256038b10 --- /dev/null +++ b/tensor2tensor/data_generators/timeseries_data_generator_test.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Timeseries data generator tests.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from tensor2tensor.data_generators import timeseries_data_generator + +import tensorflow.compat.v1 as tf + + +class TimeseriesDataGeneratorTest(tf.test.TestCase): + + def testGenerateData(self): + timeseries_params = [{ + "m": 0.006, + "b": 300.0, + "A": 50.0, + "freqcoeff": 1500.0, + "rndA": 15.0, + "fn": np.sin + }, { + "m": 0.000, + "b": 500.0, + "A": 35.0, + "freqcoeff": 3500.0, + "rndA": 25.0, + "fn": np.cos + }, { + "m": -0.003, + "b": 800.0, + "A": 65.0, + "freqcoeff": 2500.0, + "rndA": 5.0, + "fn": np.sin + }, { + "m": 0.009, + "b": 600.0, + "A": 20.0, + "freqcoeff": 1000.0, + "rndA": 1.0, + "fn": np.cos + }, { + "m": 0.002, + "b": 700.0, + "A": 40.0, + "freqcoeff": 2000.0, + "rndA": 35.0, + "fn": np.sin + }, { + "m": -0.008, + "b": 1000.0, + "A": 70.0, + "freqcoeff": 3000.0, + "rndA": 25.0, + "fn": np.cos + }, { + "m": 0.000, + "b": 100.0, + "A": 25.0, + "freqcoeff": 1500.0, + "rndA": 10.0, + "fn": np.sin + }, { + "m": 0.004, + "b": 1500.0, + "A": 54.0, + "freqcoeff": 900.0, + "rndA": 55.0, + "fn": np.cos + }, { + "m": 0.005, + "b": 2000.0, + "A": 32.0, + "freqcoeff": 1100.0, + "rndA": 43.0, + "fn": np.sin + }, { + "m": 0.010, + "b": 2500.0, + "A": 43.0, + "freqcoeff": 1900.0, + "rndA": 53.0, + "fn": np.cos + }] + multi_timeseries = timeseries_data_generator.generate_data( + 20, timeseries_params) + + self.assertEqual(10, len(multi_timeseries)) + self.assertEqual(20, len(multi_timeseries[0])) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/timeseries_test.py b/tensor2tensor/data_generators/timeseries_test.py new file mode 100644 index 000000000..441e3dac7 --- /dev/null +++ b/tensor2tensor/data_generators/timeseries_test.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Timeseries generators tests.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import shutil + +from tensor2tensor.data_generators import timeseries + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class TimeseriesTest(tf.test.TestCase): + + @classmethod + def setUpClass(cls): + cls.tmp_dir = tf.test.get_temp_dir() + shutil.rmtree(cls.tmp_dir) + os.mkdir(cls.tmp_dir) + + def testTimeseriesToyProblem(self): + problem = timeseries.TimeseriesToyProblem() + problem.generate_data(self.tmp_dir, self.tmp_dir) + + dataset = problem.dataset(tf_estimator.ModeKeys.TRAIN, self.tmp_dir) + features = dataset.make_one_shot_iterator().get_next() + + examples = [] + exhausted = False + with self.test_session() as sess: + examples.append(sess.run(features)) + examples.append(sess.run(features)) + examples.append(sess.run(features)) + examples.append(sess.run(features)) + + try: + sess.run(features) + except tf.errors.OutOfRangeError: + exhausted = True + + self.assertTrue(exhausted) + self.assertEqual(4, len(examples)) + + self.assertNotEqual( + list(examples[0]["inputs"][0, 0]), list(examples[1]["inputs"][0, 0])) + + def testTimeseriesToyProblemNoInputs(self): + problem = timeseries.TimeseriesToyProblemNoInputs() + problem.generate_data(self.tmp_dir, self.tmp_dir) + + dataset = problem.dataset(tf_estimator.ModeKeys.TRAIN, self.tmp_dir) + features = dataset.make_one_shot_iterator().get_next() + + examples = [] + exhausted = False + with self.test_session() as sess: + examples.append(sess.run(features)) + examples.append(sess.run(features)) + examples.append(sess.run(features)) + examples.append(sess.run(features)) + examples.append(sess.run(features)) + + try: + sess.run(features) + except tf.errors.OutOfRangeError: + exhausted = True + + self.assertTrue(exhausted) + self.assertEqual(5, len(examples)) + + def testTimeseriesSyntheticData10Series100kSamples(self): + problem = timeseries.TimeseriesSyntheticDataSeries10Samples100k() + self.assertEqual(10, problem.num_series) + self.assertEqual(250, problem.num_input_timestamps) + self.assertEqual(100, problem.num_target_timestamps) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/tokenizer.py b/tensor2tensor/data_generators/tokenizer.py index 15b199907..9e5eb5108 100644 --- a/tensor2tensor/data_generators/tokenizer.py +++ b/tensor2tensor/data_generators/tokenizer.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,104 +15,183 @@ """A simple invertible tokenizer. -Converts from a raw string to a list of tokens (strings). +Converts from a unicode string to a list of tokens +(represented as Unicode strings). This tokenizer has the following desirable properties: - It is invertible. - - Punctuation is broken away from adjacent letters. + - Alphanumeric characters are broken away from non-alphanumeric characters. - A single space between words does not produce an extra token. + - The full Unicode punctuation and separator set is recognized. The tokenization algorithm is as follows: -0. We classify the 256 characters into "word characters" and - "separator characters". Separator characters are defined as the union of - string.punctuation and string.whitespace. All other characters are - "word characters". - -1. Split the text into a list of tokens, splitting at every boundary of a - "word character" and a "separator character". This produces a list which - alternates between "word tokens" (strings of word characters) and - "separator tokens" (strings of of separator characters). +1. Split the text into a list of tokens, splitting at every boundary of an + alphanumeric character and a non-alphanumeric character. This produces + a list which alternates between "alphanumeric tokens" + (strings of alphanumeric characters) and "non-alphanumeric tokens" + (strings of non-alphanumeric characters). 2. Remove every token consisting of a single space, unless it is the very first or very last token in the list. These tokens are now - implied by the fact that there are two adjacent word tokens. + implied by the fact that there are two adjacent alphanumeric tokens. -e.g. "Dude - that's so cool." - -> ["Dude", " - ", "that", "'", "s", "so", "cool", "."] +e.g. u"Dude - that's so cool." + -> [u"Dude", u" - ", u"that", u"'", u"s", u"so", u"cool", u"."] """ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import array -import string +import collections +import sys +import unicodedata +import six +from six.moves import range # pylint: disable=redefined-builtin +from tensor2tensor.utils import mlperf_log +import tensorflow.compat.v1 as tf + +# Conversion between Unicode and UTF-8, if required (on Python2) +_native_to_unicode = (lambda s: s.decode("utf-8")) if six.PY2 else (lambda s: s) + + +# This set contains all letter and number characters. +_ALPHANUMERIC_CHAR_SET = set( + six.unichr(i) for i in range(sys.maxunicode) + if (unicodedata.category(six.unichr(i)).startswith("L") or + unicodedata.category(six.unichr(i)).startswith("N"))) + + +def encode(text): + """Encode a unicode string as a list of tokens. + + Args: + text: a unicode string + Returns: + a list of tokens as Unicode strings + """ + if not text: + return [] + ret = [] + token_start = 0 + # Classify each character in the input string + is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text] + for pos in range(1, len(text)): + if is_alnum[pos] != is_alnum[pos - 1]: + token = text[token_start:pos] + if token != u" " or token_start == 0: + ret.append(token) + token_start = pos + final_token = text[token_start:] + ret.append(final_token) + return ret + + +def decode(tokens): + """Decode a list of tokens to a unicode string. + + Args: + tokens: a list of Unicode strings + Returns: + a unicode string + """ + token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens] + ret = [] + for i, token in enumerate(tokens): + if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]: + ret.append(u" ") + ret.append(token) + return "".join(ret) + + +def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True): + """Reads files matching a wildcard pattern, yielding the contents. + + Args: + filepattern: A wildcard pattern matching one or more files. + max_lines: If set, stop reading after reading this many lines. + split_on_newlines: A boolean. If true, then split files by lines and strip + leading and trailing whitespace from each line. Otherwise, treat each + file as a single string. + + Yields: + The contents of the files as lines, if split_on_newlines is True, or + the entire contents of each file if False. + """ + filenames = sorted(tf.gfile.Glob(filepattern)) + lines_read = 0 + for filename in filenames: + with tf.gfile.Open(filename) as f: + if split_on_newlines: + for line in f: + yield line.strip() + lines_read += 1 + if max_lines and lines_read >= max_lines: + return + + else: + if max_lines: + doc = [] + for line in f: + doc.append(line) + lines_read += 1 + if max_lines and lines_read >= max_lines: + yield "".join(doc) + return + yield "".join(doc) + + else: + yield f.read() + + +def corpus_token_counts( + text_filepattern, corpus_max_lines, split_on_newlines=True): + """Read the corpus and compute a dictionary of token counts. + + Args: + text_filepattern: A pattern matching one or more files. + corpus_max_lines: An integer; maximum total lines to read. + split_on_newlines: A boolean. If true, then split files by lines and strip + leading and trailing whitespace from each line. Otherwise, treat each + file as a single string. + + Returns: + a dictionary mapping token to count. + """ + counts = collections.Counter() + for doc in _read_filepattern( + text_filepattern, + max_lines=corpus_max_lines, + split_on_newlines=split_on_newlines): + counts.update(encode(_native_to_unicode(doc))) -# Dependency imports + mlperf_log.transformer_print( + key=mlperf_log.PREPROC_VOCAB_SIZE, value=len(counts)) + return counts -from six.moves import xrange # pylint: disable=redefined-builtin +def vocab_token_counts(text_filepattern, max_lines): + """Read a vocab file and return a dictionary of token counts. -class Tokenizer(object): - """Vocab for breaking words into wordpieces. + Reads a two-column CSV file of tokens and their frequency in a dataset. The + tokens are presumed to be generated by encode() or the equivalent. + + Args: + text_filepattern: A pattern matching one or more files. + max_lines: An integer; maximum total lines to read. + + Returns: + a dictionary mapping token to count. """ + ret = {} + for i, line in enumerate( + _read_filepattern(text_filepattern, max_lines=max_lines)): + if "," not in line: + tf.logging.warning("Malformed vocab line #%d '%s'", i, line) + continue + + token, count = line.rsplit(",", 1) + ret[_native_to_unicode(token)] = int(count) - def __init__(self): - self._separator_chars = string.punctuation + string.whitespace - self._separator_char_mask = array.array( - "l", [chr(i) in self._separator_chars for i in xrange(256)]) - self.token_counts = dict() - - def _increment_token_count(self, token): - if token in self.token_counts: - self.token_counts[token] += 1 - else: - self.token_counts[token] = 1 - - def encode(self, raw_text): - """Encode a raw string as a list of tokens. - - Args: - raw_text: a string - Returns: - a list of stirngs. - """ - if not raw_text: - return [] - ret = [] - token_start = 0 - for pos in xrange(1, len(raw_text)): - if (self._is_separator_char(raw_text[pos]) != - self._is_separator_char(raw_text[pos - 1])): - token = raw_text[token_start:pos] - if token != " " or token_start == 0: - ret.append(token) - self._increment_token_count(token) - token_start = pos - final_token = raw_text[token_start:] - ret.append(final_token) - self._increment_token_count(final_token) - return ret - - def decode(self, tokens): - """Decode a list of tokens to a string. - - Args: - tokens: a list of stirngs - Returns: - a string. - """ - ret = "" - for i, token in enumerate(tokens): - if (i > 0 and self._is_word_char(tokens[i - 1][0]) and - self._is_word_char(token[0])): - ret += " " - ret += token - return ret - - def _is_separator_char(self, c): - return self._separator_char_mask[ord(c)] - - def _is_word_char(self, c): - return not self._is_separator_char(c) + return ret diff --git a/tensor2tensor/data_generators/tokenizer_test.py b/tensor2tensor/data_generators/tokenizer_test.py index 4102051e6..1f4f955e2 100644 --- a/tensor2tensor/data_generators/tokenizer_test.py +++ b/tensor2tensor/data_generators/tokenizer_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,45 +20,121 @@ from __future__ import division from __future__ import print_function +import os import random - -# Dependency imports - import six -from six.moves import xrange # pylint: disable=redefined-builtin +from six.moves import range # pylint: disable=redefined-builtin from tensor2tensor.data_generators import tokenizer +import tensorflow.compat.v1 as tf -import tensorflow as tf + +pkg_dir, _ = os.path.split(__file__) +_TESTDATA = os.path.join(pkg_dir, "test_data") class TokenizerTest(tf.test.TestCase): - def testEncode(self): - t = tokenizer.Tokenizer() - self.assertEqual( - t.encode("Dude - that's so cool."), - ["Dude", " - ", "that", "'", "s", "so", "cool", "."]) - self.assertEqual( - t.encode("Łukasz est né en 1981."), - ["Łukasz", "est", "né", "en", "1981", "."]) - self.assertEqual( - t.encode(" Spaces at the ends "), - [" ", "Spaces", "at", "the", "ends", " "]) - self.assertEqual(t.encode("802.11b"), ["802", ".", "11b"]) - self.assertEqual(t.encode("two. \nlines"), ["two", ". \n", "lines"]) + def test_encode(self): + self.assertListEqual( + [u"Dude", u" - ", u"that", u"'", u"s", u"so", u"cool", u"."], + tokenizer.encode(u"Dude - that's so cool.")) + self.assertListEqual([u"Łukasz", u"est", u"né", u"en", u"1981", u"."], + tokenizer.encode(u"Łukasz est né en 1981.")) + self.assertListEqual([u" ", u"Spaces", u"at", u"the", u"ends", u" "], + tokenizer.encode(u" Spaces at the ends ")) + self.assertListEqual([u"802", u".", u"11b"], tokenizer.encode(u"802.11b")) + self.assertListEqual([u"two", u". \n", u"lines"], + tokenizer.encode(u"two. \nlines")) - def testDecode(self): - t = tokenizer.Tokenizer() + def test_decode(self): self.assertEqual( - t.decode(["Dude", " - ", "that", "'", "s", "so", "cool", "."]), - "Dude - that's so cool.") - - def testInvertibilityOnRandomStrings(self): - t = tokenizer.Tokenizer() - random.seed(123) - for _ in xrange(10000): - s = "".join([six.int2byte(random.randint(0, 255)) for _ in xrange(10)]) - self.assertEqual(s, t.decode(t.encode(s))) + u"Dude - that's so cool.", + tokenizer.decode( + [u"Dude", u" - ", u"that", u"'", u"s", u"so", u"cool", u"."])) + + def test_invertibility_on_random_strings(self): + for _ in range(1000): + s = u"".join(six.unichr(random.randint(0, 65535)) for _ in range(10)) + self.assertEqual(s, tokenizer.decode(tokenizer.encode(s))) + + +class TestTokenCounts(tf.test.TestCase): + + def setUp(self): + super(TestTokenCounts, self).setUp() + self.corpus_path = os.path.join(_TESTDATA, "corpus-*.txt") + self.vocab_path = os.path.join(_TESTDATA, "vocab-*.txt") + + def test_corpus_token_counts_split_on_newlines(self): + token_counts = tokenizer.corpus_token_counts( + self.corpus_path, corpus_max_lines=0, split_on_newlines=True) + + expected = { + u"'": 2, + u".": 2, + u". ": 1, + u"... ": 1, + u"Groucho": 1, + u"Marx": 1, + u"Mitch": 1, + u"Hedberg": 1, + u"I": 3, + u"in": 2, + u"my": 2, + u"pajamas": 2, + } + self.assertDictContainsSubset(expected, token_counts) + self.assertNotIn(u".\n\n", token_counts) + self.assertNotIn(u"\n", token_counts) + + def test_corpus_token_counts_no_split_on_newlines(self): + token_counts = tokenizer.corpus_token_counts( + self.corpus_path, corpus_max_lines=0, split_on_newlines=False) + + self.assertDictContainsSubset({u".\n\n": 2, u"\n": 3}, token_counts) + + def test_corpus_token_counts_split_with_max_lines(self): + token_counts = tokenizer.corpus_token_counts( + self.corpus_path, corpus_max_lines=5, split_on_newlines=True) + + self.assertIn(u"slept", token_counts) + self.assertNotIn(u"Mitch", token_counts) + + def test_corpus_token_counts_no_split_with_max_lines(self): + token_counts = tokenizer.corpus_token_counts( + self.corpus_path, corpus_max_lines=5, split_on_newlines=False) + + self.assertIn(u"slept", token_counts) + self.assertNotIn(u"Mitch", token_counts) + self.assertDictContainsSubset({ + u".\n\n": 1, + u"\n": 2, + u".\n": 1 + }, token_counts) + + def test_vocab_token_counts(self): + token_counts = tokenizer.vocab_token_counts(self.vocab_path, 0) + + expected = { + u"lollipop": 8, + u"reverberated": 12, + u"kattywampus": 11, + u"balderdash": 10, + u"jiggery-pokery": 14, + } + self.assertDictEqual(expected, token_counts) + + def test_vocab_token_counts_with_max_lines(self): + # vocab-1 has 2 lines, vocab-2 has 3 + token_counts = tokenizer.vocab_token_counts(self.vocab_path, 5) + + expected = { + u"lollipop": 8, + u"reverberated": 12, + u"kattywampus": 11, + u"balderdash": 10, + } + self.assertDictEqual(expected, token_counts) if __name__ == "__main__": diff --git a/tensor2tensor/data_generators/transduction_problems.py b/tensor2tensor/data_generators/transduction_problems.py new file mode 100644 index 000000000..e0b729548 --- /dev/null +++ b/tensor2tensor/data_generators/transduction_problems.py @@ -0,0 +1,260 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A suite of sequence transduction problems. + +Each problem generates pairs of tokenized input and output sequences which +represent the effect of the transduction algorithm which must be learned. + +These problems are based on the benchmarks outlined in: + +Learning to Transduce with Unbounded Memory +Edward Grefenstette, Karl Moritz Hermann, Mustafa Suleyman, Phil Blunsom +https://arxiv.org/abs/1506.02516, 2015 + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import random + +from six.moves import range # pylint: disable=redefined-builtin + +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +MAX_GENERATOR_ATTEMPTS = 100 + + +class TransductionProblem(text_problems.Text2TextProblem): + """Abstract base clase which all transduction problems inherit from. + """ + + def __init__(self, was_reversed=False, was_copy=False): + super(TransductionProblem, self).__init__(was_reversed=False, + was_copy=False) + self.vocab = self.build_vocab() + + @property + def num_symbols(self): + """The number of symbols that can be used as part of a sequence.""" + return 128 + + def min_sequence_length(self, dataset_split): + """Determine the minimum sequence length given a dataset_split. + + Args: + dataset_split: A problem.DatasetSplit. + + Returns: + The minimum length that a sequence can be for this dataset_split. + """ + return { + problem.DatasetSplit.TRAIN: 8, + problem.DatasetSplit.EVAL: 65, + problem.DatasetSplit.TEST: 65 + }[dataset_split] + + def max_sequence_length(self, dataset_split): + """Determine the maximum sequence length given a dataset_split. + + Args: + dataset_split: A problem.DatasetSplit. + + Returns: + The maximum length that a sequence can be for this dataset_split. + """ + return { + problem.DatasetSplit.TRAIN: 64, + problem.DatasetSplit.EVAL: 128, + problem.DatasetSplit.TEST: 128 + }[dataset_split] + + def num_samples(self, dataset_split): + """Determine the dataset sized given a dataset_split. + + Args: + dataset_split: A problem.DatasetSplit. + + Returns: + The desired number of samples for this dataset_split. + """ + return { + problem.DatasetSplit.TRAIN: 1000000, + problem.DatasetSplit.EVAL: 10000, + problem.DatasetSplit.TEST: 10000 + }[dataset_split] + + @property + def num_shards(self): + """Used to split up datasets into multiple files.""" + return 10 + + @property + def is_generate_per_split(self): + return False + + @property + def vocab_type(self): + return text_problems.VocabType.TOKEN + + def sequence_length(self, dataset_split): + return random.randint(self.min_sequence_length(dataset_split), + self.max_sequence_length(dataset_split)) + + def build_vocab(self): + return ["sym_%d" % i for i in range(1, self.num_symbols + 1)] + + def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False): + vocab_filename = os.path.join(data_dir, self.vocab_filename) + if not tf.gfile.Exists(vocab_filename): + encoder = text_encoder.TokenTextEncoder(None, + vocab_list=sorted(self.vocab)) + encoder.store_to_file(vocab_filename) + else: + encoder = text_encoder.TokenTextEncoder(vocab_filename, + replace_oov=self.oov_token) + return encoder + + def generate_random_sequence(self, dataset_split): + return [random.choice(self.vocab) + for _ in range(self.sequence_length(dataset_split))] + + def transpose_sequence(self, input_sequence): + raise NotImplementedError() + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + for _ in range(self.num_samples(dataset_split)): + source = self.generate_random_sequence(dataset_split) + target = self.transpose_sequence(source) + yield { + "inputs": " ".join(source), + "targets": " ".join(target), + } + + +@registry.register_problem +class CopySequence(TransductionProblem): + """Reproduce a sequence exactly as it was input.""" + + def transpose_sequence(self, input_sequence): + return input_sequence + + +@registry.register_problem +class CopySequenceSmall(CopySequence): + """Same as CopySequence but with smaller sequences. + """ + + @property + def num_symbols(self): + return 64 + + def min_sequence_length(self, dataset_split): + return { + problem.DatasetSplit.TRAIN: 4, + problem.DatasetSplit.EVAL: 17, + problem.DatasetSplit.TEST: 17 + }[dataset_split] + + def max_sequence_length(self, dataset_split): + return { + problem.DatasetSplit.TRAIN: 16, + problem.DatasetSplit.EVAL: 32, + problem.DatasetSplit.TEST: 32 + }[dataset_split] + + def num_samples(self, dataset_split): + return { + problem.DatasetSplit.TRAIN: 100000, + problem.DatasetSplit.EVAL: 10000, + problem.DatasetSplit.TEST: 10000 + }[dataset_split] + + +@registry.register_problem +class ReverseSequence(TransductionProblem): + """Reverses the order of the sequence. + """ + + def transpose_sequence(self, input_sequence): + return input_sequence[::-1] + + +@registry.register_problem +class ReverseSequenceSmall(ReverseSequence): + """Same as ReverseSequence but with smaller sequences. + """ + + @property + def num_symbols(self): + return 64 + + def min_sequence_length(self, dataset_split): + return { + problem.DatasetSplit.TRAIN: 4, + problem.DatasetSplit.EVAL: 17, + problem.DatasetSplit.TEST: 17 + }[dataset_split] + + def max_sequence_length(self, dataset_split): + return { + problem.DatasetSplit.TRAIN: 16, + problem.DatasetSplit.EVAL: 32, + problem.DatasetSplit.TEST: 32 + }[dataset_split] + + def num_samples(self, dataset_split): + return { + problem.DatasetSplit.TRAIN: 100000, + problem.DatasetSplit.EVAL: 10000, + problem.DatasetSplit.TEST: 10000 + }[dataset_split] + + +@registry.register_problem +class FlipBiGramSequence(TransductionProblem): + """Flip every pair of tokens: 1 2 3 4 -> 2 1 4 3. + """ + + def sequence_length(self, dataset_split): + """Only generate sequences with even lengths. + + Args: + dataset_split: A problem.DatasetSplit specifying which dataset the + sequence is a part of. + + Returns: + An even number >= min_sequence_length(dataset_split) + and <= max_sequence_length(dataset_split) + """ + min_length = self.min_sequence_length(dataset_split) + min_length += min_length % 2 + max_length = self.max_sequence_length(dataset_split) + max_length -= max_length % 2 + length = random.randint(min_length, max_length) + return length - (length % 2) + + def transpose_sequence(self, input_sequence): + return [input_sequence[i+1] if i%2 == 0 else input_sequence[i-1] + for i in range(len(input_sequence))] diff --git a/tensor2tensor/data_generators/transduction_problems_test.py b/tensor2tensor/data_generators/transduction_problems_test.py new file mode 100644 index 000000000..20fa474f1 --- /dev/null +++ b/tensor2tensor/data_generators/transduction_problems_test.py @@ -0,0 +1,83 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.data_generators.transduction_problems.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import shutil +import tempfile + +from absl.testing import parameterized + +import numpy as np + +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import transduction_problems + +import tensorflow.compat.v1 as tf + + +class TransductionProblem(parameterized.TestCase): + + def setUp(self): + super(TransductionProblem, self).setUp() + # Create a temporary directory + self.test_dir = tempfile.mkdtemp() + + def tearDown(self): + super(TransductionProblem, self).tearDown() + # Remove the directory after the test + shutil.rmtree(self.test_dir) + + @parameterized.named_parameters( + ('CopySequence', + transduction_problems.CopySequence(), + lambda x: x), + ('CopySequenceSmall', + transduction_problems.CopySequenceSmall(), + lambda x: x), + ('FlipBiGramSequence', + transduction_problems.FlipBiGramSequence(), + lambda x: [x[i+1] if i%2 == 0 else x[i-1] for i in range(len(x))]), + ('ReverseSequence', + transduction_problems.ReverseSequence(), + lambda x: x[::-1]), + ('ReverseSequenceSmall', + transduction_problems.ReverseSequenceSmall(), + lambda x: x[::-1]), + ) + def testTransduction(self, p, transformation): + data_dir = '' + dataset_split = problem.DatasetSplit.TEST + for sample in p.generate_samples(data_dir, self.test_dir, dataset_split): + input_tokens = sample['inputs'].split(' ') + target_tokens = sample['targets'].split(' ') + self.assertBetween(len(input_tokens), + p.min_sequence_length(dataset_split), + p.max_sequence_length(dataset_split)) + self.assertBetween(len(target_tokens), + p.min_sequence_length(dataset_split), + p.max_sequence_length(dataset_split)) + + transformed_inputs = np.array(transformation(input_tokens)) + + np.testing.assert_equal(transformed_inputs, target_tokens) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/data_generators/translate.py b/tensor2tensor/data_generators/translate.py new file mode 100644 index 000000000..81baa4315 --- /dev/null +++ b/tensor2tensor/data_generators/translate.py @@ -0,0 +1,356 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for translation data-sets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import gzip +import os +import tarfile +import zipfile +from tensor2tensor.data_generators import cleaner_en_xx +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import bleu_hook +from tensor2tensor.utils import contrib +from tensor2tensor.utils import mlperf_log + +import tensorflow.compat.v1 as tf + + +class TranslateProblem(text_problems.Text2TextProblem): + """Base class for translation problems.""" + + @property + def is_generate_per_split(self): + return True + + @property + def approx_vocab_size(self): + return 2**15 + + @property + def datatypes_to_clean(self): + return None + + def source_data_files(self, dataset_split): + """Files to be passed to compile_data.""" + raise NotImplementedError() + + def vocab_data_files(self): + """Files to be passed to get_or_generate_vocab.""" + return self.source_data_files(problem.DatasetSplit.TRAIN) + + def generate_samples( + self, + data_dir, + tmp_dir, + dataset_split, + custom_iterator=text_problems.text2text_txt_iterator): + datasets = self.source_data_files(dataset_split) + tag = "dev" + datatypes_to_clean = None + if dataset_split == problem.DatasetSplit.TRAIN: + tag = "train" + datatypes_to_clean = self.datatypes_to_clean + data_path = compile_data( + tmp_dir, datasets, "%s-compiled-%s" % (self.name, tag), + datatypes_to_clean=datatypes_to_clean) + + return custom_iterator(data_path + ".lang1", data_path + ".lang2") + + def generate_text_for_vocab(self, data_dir, tmp_dir): + return generator_utils.generate_lines_for_vocab(tmp_dir, + self.vocab_data_files()) + + @property + def decode_hooks(self): + return [compute_bleu_summaries] + + +def compute_bleu_summaries(hook_args): + """Compute BLEU core summaries using the decoder output. + + Args: + hook_args: DecodeHookArgs namedtuple + Returns: + A list of tf.Summary values if hook_args.hparams contains the + reference file and the translated file. + """ + decode_hparams = hook_args.decode_hparams + + if not (decode_hparams.decode_reference and decode_hparams.decode_to_file): + return None + + values = [] + bleu = 100 * bleu_hook.bleu_wrapper( + decode_hparams.decode_reference, decode_hparams.decode_to_file) + values.append(tf.Summary.Value(tag="BLEU", simple_value=bleu)) + tf.logging.info("%s: BLEU = %6.2f" % (decode_hparams.decode_to_file, bleu)) + if hook_args.hparams.mlperf_mode: + current_step = decode_hparams.mlperf_decode_step + mlperf_log.transformer_print( + key=mlperf_log.EVAL_TARGET, value=decode_hparams.mlperf_threshold) + mlperf_log.transformer_print( + key=mlperf_log.EVAL_ACCURACY, + value={ + "epoch": max(current_step // decode_hparams.iterations_per_loop - 1, + 0), + "value": bleu + }) + mlperf_log.transformer_print(key=mlperf_log.EVAL_STOP) + + if bleu >= decode_hparams.mlperf_threshold: + decode_hparams.set_hparam("mlperf_success", True) + + return values + + +def _preprocess_sgm(line, is_sgm): + """Preprocessing to strip tags in SGM files.""" + if not is_sgm: + return line + # In SGM files, remove ,

, lines. + if line.startswith("") or line.startswith("

"): + return "" + # Strip tags. + line = line.strip() + if line.startswith(""): + i = line.index(">") + return line[i + 1:-6] # Strip first and last . + + +def _clean_sentences(sentence_pairs): + res_pairs = [] + for cleaned in cleaner_en_xx.clean_en_xx_pairs(sentence_pairs): + res_pairs.append(cleaned) + return res_pairs + + +def _tmx_to_source_target(tmx_file, source_resfile, target_resfile, + do_cleaning=False): + source_target_pairs = cleaner_en_xx.paracrawl_v3_pairs(tmx_file) + if do_cleaning: + source_target_pairs = cleaner_en_xx.clean_en_xx_pairs(source_target_pairs) + for source, target in source_target_pairs: + source_resfile.write(source) + source_resfile.write("\n") + target_resfile.write(target) + target_resfile.write("\n") + + +def compile_data(tmp_dir, datasets, filename, datatypes_to_clean=None): + """Concatenates all `datasets` and saves to `filename`.""" + datatypes_to_clean = datatypes_to_clean or [] + filename = os.path.join(tmp_dir, filename) + lang1_fname = filename + ".lang1" + lang2_fname = filename + ".lang2" + if tf.gfile.Exists(lang1_fname) and tf.gfile.Exists(lang2_fname): + tf.logging.info("Skipping compile data, found files:\n%s\n%s", lang1_fname, + lang2_fname) + return filename + with tf.gfile.GFile(lang1_fname, mode="w") as lang1_resfile: + with tf.gfile.GFile(lang2_fname, mode="w") as lang2_resfile: + for dataset in datasets: + url = dataset[0] + compressed_filename = os.path.basename(url) + compressed_filepath = os.path.join(tmp_dir, compressed_filename) + if url.startswith("http"): + generator_utils.maybe_download(tmp_dir, compressed_filename, url) + if compressed_filename.endswith(".zip"): + zipfile.ZipFile(os.path.join(compressed_filepath), + "r").extractall(tmp_dir) + + if dataset[1][0] == "tmx": + cleaning_requested = "tmx" in datatypes_to_clean + tmx_filename = os.path.join(tmp_dir, dataset[1][1]) + if tmx_filename.endswith(".gz"): + with gzip.open(tmx_filename, "rb") as tmx_file: + _tmx_to_source_target(tmx_file, lang1_resfile, lang2_resfile, + do_cleaning=cleaning_requested) + else: + with tf.gfile.Open(tmx_filename) as tmx_file: + _tmx_to_source_target(tmx_file, lang1_resfile, lang2_resfile, + do_cleaning=cleaning_requested) + + elif dataset[1][0] == "tsv": + _, src_column, trg_column, glob_pattern = dataset[1] + filenames = tf.gfile.Glob(os.path.join(tmp_dir, glob_pattern)) + if not filenames: + # Capture *.tgz and *.tar.gz too. + mode = "r:gz" if compressed_filepath.endswith("gz") else "r" + with tarfile.open(compressed_filepath, mode) as corpus_tar: + corpus_tar.extractall(tmp_dir) + filenames = tf.gfile.Glob(os.path.join(tmp_dir, glob_pattern)) + for tsv_filename in filenames: + if tsv_filename.endswith(".gz"): + new_filename = tsv_filename.strip(".gz") + generator_utils.gunzip_file(tsv_filename, new_filename) + tsv_filename = new_filename + with tf.gfile.Open(tsv_filename) as tsv_file: + for line in tsv_file: + if line and "\t" in line: + parts = line.split("\t") + source, target = parts[src_column], parts[trg_column] + source, target = source.strip(), target.strip() + clean_pairs = [(source, target)] + if "tsv" in datatypes_to_clean: + clean_pairs = cleaner_en_xx.clean_en_xx_pairs(clean_pairs) + for source, target in clean_pairs: + if source and target: + lang1_resfile.write(source) + lang1_resfile.write("\n") + lang2_resfile.write(target) + lang2_resfile.write("\n") + + else: + lang1_filename, lang2_filename = dataset[1] + lang1_filepath = os.path.join(tmp_dir, lang1_filename) + lang2_filepath = os.path.join(tmp_dir, lang2_filename) + is_sgm = ( + lang1_filename.endswith("sgm") and lang2_filename.endswith("sgm")) + + if not (tf.gfile.Exists(lang1_filepath) and + tf.gfile.Exists(lang2_filepath)): + # For .tar.gz and .tgz files, we read compressed. + mode = "r:gz" if compressed_filepath.endswith("gz") else "r" + with tarfile.open(compressed_filepath, mode) as corpus_tar: + corpus_tar.extractall(tmp_dir) + if lang1_filepath.endswith(".gz"): + new_filepath = lang1_filepath.strip(".gz") + generator_utils.gunzip_file(lang1_filepath, new_filepath) + lang1_filepath = new_filepath + if lang2_filepath.endswith(".gz"): + new_filepath = lang2_filepath.strip(".gz") + generator_utils.gunzip_file(lang2_filepath, new_filepath) + lang2_filepath = new_filepath + + for example in text_problems.text2text_txt_iterator( + lang1_filepath, lang2_filepath): + line1res = _preprocess_sgm(example["inputs"], is_sgm) + line2res = _preprocess_sgm(example["targets"], is_sgm) + clean_pairs = [(line1res, line2res)] + if "txt" in datatypes_to_clean: + clean_pairs = cleaner_en_xx.clean_en_xx_pairs(clean_pairs) + for line1res, line2res in clean_pairs: + if line1res and line2res: + lang1_resfile.write(line1res) + lang1_resfile.write("\n") + lang2_resfile.write(line2res) + lang2_resfile.write("\n") + + return filename + + +class TranslateDistillProblem(TranslateProblem): + """Base class for translation problems.""" + + @property + def is_generate_per_split(self): + return True + + def example_reading_spec(self): + data_fields = {"dist_targets": tf.VarLenFeature(tf.int64)} + + if self.has_inputs: + data_fields["inputs"] = tf.VarLenFeature(tf.int64) + + # hack: ignoring true targets and putting dist_targets in targets + data_items_to_decoders = { + "inputs": contrib.slim().tfexample_decoder.Tensor("inputs"), + "targets": contrib.slim().tfexample_decoder.Tensor("dist_targets"), + } + + return (data_fields, data_items_to_decoders) + + def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False): + """Get vocab for distill problems.""" + # We assume that vocab file is present in data_dir directory where the + # data generated will be stored. + vocab_filepath = os.path.join(data_dir, self.vocab_filename) + encoder = text_encoder.SubwordTextEncoder(vocab_filepath) + return encoder + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + generator = self.generate_samples(data_dir, tmp_dir, dataset_split) + vocab = self.get_or_create_vocab(data_dir, tmp_dir) + # For each example, encode the text and append EOS ID. + for sample in generator: + if self.has_inputs: + sample["inputs"] = vocab.encode(sample["inputs"]) + sample["inputs"].append(text_encoder.EOS_ID) + sample["targets"] = vocab.encode(sample["targets"]) + sample["targets"].append(text_encoder.EOS_ID) + sample["dist_targets"] = vocab.encode(sample["dist_targets"]) + sample["dist_targets"].append(text_encoder.EOS_ID) + yield sample + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + data_path = self.source_data_files(dataset_split) + assert tf.gfile.Exists(data_path) + return text_problems.text2text_distill_iterator(data_path + "inputs", + data_path + "gold", + data_path + "prediction") + + +class TranslateWmt20Problem(TranslateProblem): + """Base class for WMT20 Datasets.""" + + @property + def is_generate_per_split(self): + return True + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + generator = self.generate_samples(data_dir, tmp_dir, dataset_split) + vocab = self.get_or_create_vocab(data_dir, tmp_dir) + # For each example, encode the text and append EOS ID. + for sample in generator: + if self.has_inputs: + sample["inputs"] = vocab.encode(sample["inputs"]) + sample["inputs"].append(text_encoder.EOS_ID) + sample["targets"] = vocab.encode(sample["targets"]) + sample["targets"].append(text_encoder.EOS_ID) + yield sample + + def generate_text_for_vocab(self, data_dir, tmp_dir): + for i, sample in enumerate( + self.generate_samples(data_dir, tmp_dir, problem.DatasetSplit.TRAIN)): + if self.has_inputs: + yield sample["inputs"] + yield sample["targets"] + if self.max_samples_for_vocab and (i + 1) >= self.max_samples_for_vocab: + break + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + data_path = self.source_data_files(dataset_split)[0] + return text_problems.text2text_txt_tab_iterator(data_path) + + +class TranslateSamanantarProblem(TranslateWmt20Problem): + """Base class for Samanantar Datasets.""" + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + src_data_path = self.source_data_files(dataset_split)[0] + tgt_data_path = self.source_data_files(dataset_split)[1] + return text_problems.text2text_txt_iterator(src_data_path, tgt_data_path) diff --git a/tensor2tensor/data_generators/translate_encs.py b/tensor2tensor/data_generators/translate_encs.py new file mode 100644 index 000000000..bce8a243e --- /dev/null +++ b/tensor2tensor/data_generators/translate_encs.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for translation data-sets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import translate +from tensor2tensor.utils import registry + + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + +_ENCS_TRAIN_DATASETS = [ + [("https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/" + "11234/1-1458/data-plaintext-format.tar"), + ("tsv", 3, 2, "data.plaintext-format/*train.gz")], + [ + "http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz", # pylint: disable=line-too-long + ("training-parallel-nc-v13/news-commentary-v13.cs-en.en", + "training-parallel-nc-v13/news-commentary-v13.cs-en.cs") + ], + [ + "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz", + ("commoncrawl.cs-en.en", "commoncrawl.cs-en.cs") + ], + [ + "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz", + ("training/europarl-v7.cs-en.en", "training/europarl-v7.cs-en.cs") + ], +] +_ENCS_TEST_DATASETS = [ + [ + "http://data.statmt.org/wmt17/translation-task/dev.tgz", + ("dev/newstest2013.en", "dev/newstest2013.cs") + ], +] + + +@registry.register_problem +class TranslateEncsWmt32k(translate.TranslateProblem): + """Problem spec for WMT English-Czech translation.""" + + @property + def approx_vocab_size(self): + return 2**15 # 32768 + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + return _ENCS_TRAIN_DATASETS if train else _ENCS_TEST_DATASETS + + def vocab_data_files(self): + datasets = self.source_data_files(problem.DatasetSplit.TRAIN) + vocab_datasets = [] + if datasets[0][0].endswith("data-plaintext-format.tar"): + vocab_datasets.append([ + datasets[0][0], [ + "%s-compiled-train.lang1" % self.name, + "%s-compiled-train.lang2" % self.name + ] + ]) + datasets = datasets[1:] + vocab_datasets += [[item[0], [item[1][0], item[1][1]]] for item in datasets] + return vocab_datasets + + +@registry.register_problem +class TranslateEncsWmtCharacters(translate.TranslateProblem): + """Problem spec for WMT En-Cs character-based translation.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + datasets = _ENCS_TRAIN_DATASETS if train else _ENCS_TEST_DATASETS + tag = "train" if train else "dev" + data_path = translate.compile_data(tmp_dir, datasets, + "wmt_encs_chr_%s" % tag) + return text_problems.text2text_txt_iterator(data_path + ".lang1", + data_path + ".lang2") diff --git a/tensor2tensor/data_generators/translate_encs_cubbitt.py b/tensor2tensor/data_generators/translate_encs_cubbitt.py new file mode 100644 index 000000000..f2a813b5b --- /dev/null +++ b/tensor2tensor/data_generators/translate_encs_cubbitt.py @@ -0,0 +1,98 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for English-Czech backtranslation NMT data-sets. + +To use this problem you need to provide backtranslated (synthetic) data to +tmp_dir (cs_mono_{en,cs}.txt{0,1,2} - each file of a similar size to the +authentic training data). +You can either translate the monolingual data yourself or you can download +"csmono" data from CzEng2.0 (http://ufal.mff.cuni.cz/czeng, registration needed) +which comes with synthetic translations into English using a +backtranslation-trained model, thus the final model will be using +"iterated" backtranslation. + +To get the best results out of the Block-Backtranslation +(where blocks of synthetic and authentic training data are concatenated +without shuffling), you should use checkpoint averaging (see t2t-avg-all). +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import translate +from tensor2tensor.data_generators import translate_encs +from tensor2tensor.utils import registry + + +@registry.register_problem +class TranslateEncsCubbitt(translate_encs.TranslateEncsWmt32k): + """Problem spec for English-Czech CUBBITT (CUni Block-Backtranslation-Improved Transformer Translation).""" + + @property + def use_vocab_from_other_problem(self): + return translate_encs.TranslateEncsWmt32k() + + @property + def already_shuffled(self): + return True + + @property + def skip_random_fraction_when_training(self): + return False + + @property + def backtranslate_data_filenames(self): + """List of pairs of files with matched back-translated data.""" + # Files must be placed in tmp_dir, each similar size to authentic data. + return [("cs_mono_en.txt%d" % i, "cs_mono_cs.txt%d" % i) for i in [0, 1, 2]] + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each.""" + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 1, # Use just 1 shard so as to not mix data. + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + datasets = self.source_data_files(dataset_split) + tag = "train" if dataset_split == problem.DatasetSplit.TRAIN else "dev" + data_path = translate.compile_data( + tmp_dir, datasets, "%s-compiled-%s" % (self.name, tag)) + # For eval, use authentic data. + if dataset_split != problem.DatasetSplit.TRAIN: + for example in text_problems.text2text_txt_iterator( + data_path + ".lang1", data_path + ".lang2"): + yield example + else: # For training, mix synthetic and authentic data as follows. + for (file1, file2) in self.backtranslate_data_filenames: + path1 = os.path.join(tmp_dir, file1) + path2 = os.path.join(tmp_dir, file2) + # Synthetic data first. + for example in text_problems.text2text_txt_iterator(path1, path2): + yield example + # Now authentic data. + for example in text_problems.text2text_txt_iterator( + data_path + ".lang1", data_path + ".lang2"): + yield example diff --git a/tensor2tensor/data_generators/translate_ende.py b/tensor2tensor/data_generators/translate_ende.py new file mode 100644 index 000000000..98182aec8 --- /dev/null +++ b/tensor2tensor/data_generators/translate_ende.py @@ -0,0 +1,249 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for translation data-sets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import translate +from tensor2tensor.data_generators import wiki_lm +from tensor2tensor.utils import registry + + +_ENDE_TRAIN_DATASETS = [ + [ + "http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz", # pylint: disable=line-too-long + ("training-parallel-nc-v13/news-commentary-v13.de-en.en", + "training-parallel-nc-v13/news-commentary-v13.de-en.de") + ], + [ + "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz", + ("commoncrawl.de-en.en", "commoncrawl.de-en.de") + ], + [ + "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz", + ("training/europarl-v7.de-en.en", "training/europarl-v7.de-en.de") + ], +] + +_ENDE_EVAL_DATASETS = [ + [ + "http://data.statmt.org/wmt17/translation-task/dev.tgz", + ("dev/newstest2013.en", "dev/newstest2013.de") + ], +] + +_ENDE_RAPID_TRAIN_DATASET = [ + # additional training data available for WMT 18 news task training data + # as defined by http://www.statmt.org/wmt18/translation-task.html + [ + "http://data.statmt.org/wmt18/translation-task/rapid2016.tgz", + ("rapid2016.de-en.en", "rapid2016.de-en.de"), + ], +] + +_ENDE_PARACRAWL_DATASETS = [ + [ + "https://s3.amazonaws.com/web-language-models/paracrawl/release4/en-de.bicleaner07.tmx.gz", # pylint: disable=line-too-long + ("tmx", "en-de.bicleaner07.tmx.gz") + ] +] + + +@registry.register_problem +class TranslateEndeWmt32k(translate.TranslateProblem): + """En-de translation trained on WMT corpus.""" + + @property + def additional_training_datasets(self): + """Allow subclasses to add training datasets.""" + return [] + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + train_datasets = _ENDE_TRAIN_DATASETS + self.additional_training_datasets + return train_datasets if train else _ENDE_EVAL_DATASETS + + +@registry.register_problem +class TranslateEnde2018Wmt32k(translate.TranslateProblem): + """En-de translation trained on WMT18 corpus.""" + + @property + def use_vocab_from_other_problem(self): + return TranslateEndeWmt32k() + + @property + def additional_training_datasets(self): + """WMT18 adds rapid data.""" + return _ENDE_RAPID_TRAIN_DATASET + + +@registry.register_problem +class TranslateEndeWmtClean32k(TranslateEndeWmt32k): + """En-de translation trained on WMT with further cleaning.""" + + @property + def use_vocab_from_other_problem(self): + return TranslateEndeWmt32k() + + @property + def datatypes_to_clean(self): + return ["txt"] + + +@registry.register_problem +class TranslateEndePc32k(translate.TranslateProblem): + """En-de translation trained on Paracrawl (bicleaner corpus).""" + + @property + def use_vocab_from_other_problem(self): + return TranslateEndeWmt32k() + + @property + def additional_training_datasets(self): + """Allow subclasses to add training datasets.""" + return [] + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + train_datasets = ( + _ENDE_PARACRAWL_DATASETS + self.additional_training_datasets) + return train_datasets if train else _ENDE_EVAL_DATASETS + + +@registry.register_problem +class TranslateEndePcClean32k(TranslateEndePc32k): + """En-de translation trained on Paracrawl with further cleaning.""" + + @property + def datatypes_to_clean(self): + return ["tmx"] + + +@registry.register_problem +class TranslateEndeWmtPc32k(TranslateEndeWmt32k): + """En-de translation trained on WMT plus Paracrawl.""" + + @property + def use_vocab_from_other_problem(self): + return TranslateEndeWmt32k() + + @property + def additional_training_datasets(self): + return _ENDE_PARACRAWL_DATASETS + + +@registry.register_problem +class TranslateEndeWmtCleanPc32k(TranslateEndeWmtPc32k): + """En-de translation trained on cleaned WMT plus Paracrawl.""" + + @property + def datatypes_to_clean(self): + return ["txt"] + + +@registry.register_problem +class TranslateEndeWmtPcClean32k(TranslateEndeWmtPc32k): + """En-de translation trained on WMT plus cleaned Paracrawl.""" + + @property + def datatypes_to_clean(self): + return ["tmx"] + + +@registry.register_problem +class TranslateEndeWmtCleanPcClean32k(TranslateEndeWmtPcClean32k): + """En-de translation trained on cleaned WMT plus cleaned Paracrawl.""" + + @property + def datatypes_to_clean(self): + return ["txt", "tmx"] + + +@registry.register_problem +class TranslateEndeWmt32kPacked(TranslateEndeWmt32k): + + @property + def packed_length(self): + return 256 + + @property + def use_vocab_from_other_problem(self): + return TranslateEndeWmt32k() + + +@registry.register_problem +class TranslateEndeWmt8k(TranslateEndeWmt32k): + """Problem spec for WMT En-De translation.""" + + @property + def approx_vocab_size(self): + return 2**13 # 8192 + + +@registry.register_problem +class TranslateEndeWmt8kPacked(TranslateEndeWmt8k): + + @property + def packed_length(self): + return 256 + + @property + def use_vocab_from_other_problem(self): + return TranslateEndeWmt8k() + + +@registry.register_problem +class TranslateEndeWmtCharacters(TranslateEndeWmt8k): + """Problem spec for WMT En-De translation.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + +@registry.register_problem +class TranslateEndeWmtMulti64k(TranslateEndeWmt8k): + """Translation with muli-lingual vocabulary.""" + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelDeEnFrRoWiki64k() + + +@registry.register_problem +class TranslateEndeWmtMulti64kPacked1k(TranslateEndeWmtMulti64k): + """Translation with muli-lingual vocabulary.""" + + @property + def packed_length(self): + return 1024 + + @property + def num_training_examples(self): + return 173800 + + @property + def inputs_prefix(self): + return "translate English German " + + @property + def targets_prefix(self): + return "translate German English " diff --git a/tensor2tensor/data_generators/translate_ende_test.py b/tensor2tensor/data_generators/translate_ende_test.py new file mode 100644 index 000000000..37443b620 --- /dev/null +++ b/tensor2tensor/data_generators/translate_ende_test.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.data_generators.translate_ende.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import translate_ende + +import tensorflow.compat.v1 as tf + + +class TranslateEndeTest(tf.test.TestCase): + """Tests that some TranslateEnde subclasses inherit information correctly.""" + + def test_vocab_size(self): + wmt_8k = translate_ende.TranslateEndeWmt8k() + wmt_32k = translate_ende.TranslateEndeWmt32k() + self.assertEqual(wmt_8k.approx_vocab_size, 8192) + self.assertEqual(wmt_32k.approx_vocab_size, 32768) + + def test_additional_datasets(self): + wmt_8k = translate_ende.TranslateEndeWmt8k() + wmt_32k = translate_ende.TranslateEndeWmt32k() + self.assertListEqual(wmt_8k.additional_training_datasets, []) + self.assertListEqual(wmt_32k.additional_training_datasets, []) + + def test_source_data_files(self): + wmt_8k = translate_ende.TranslateEndeWmt8k() + wmt_32k = translate_ende.TranslateEndeWmt32k() + eval_split = problem.DatasetSplit.EVAL + train_split = problem.DatasetSplit.TRAIN + + wmt_8k_eval_files = wmt_8k.source_data_files(eval_split) + wmt_32k_eval_files = wmt_32k.source_data_files(eval_split) + self.assertListEqual(wmt_8k_eval_files, wmt_32k_eval_files) + self.assertGreater(len(wmt_8k_eval_files), 0) + + wmt_8k_train_files = wmt_8k.source_data_files(train_split) + wmt_32k_train_files = wmt_32k.source_data_files(train_split) + self.assertListEqual(wmt_8k_train_files, wmt_32k_train_files) + self.assertGreater(len(wmt_8k_train_files), 0) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/data_generators/translate_enes.py b/tensor2tensor/data_generators/translate_enes.py new file mode 100644 index 000000000..4a971e43f --- /dev/null +++ b/tensor2tensor/data_generators/translate_enes.py @@ -0,0 +1,127 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for translation data-sets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import translate +from tensor2tensor.utils import registry + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + +_ENES_TRAIN_DATASETS = [ + [ + "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz", + ("commoncrawl.es-en.en", "commoncrawl.es-en.es") + ], + [ + "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz", + ("training/europarl-v7.es-en.en", "training/europarl-v7.es-en.es") + ], + [ + "http://www.statmt.org/wmt13/training-parallel-un.tgz", + ("un/undoc.2000.es-en.en", "un/undoc.2000.es-en.es") + ], + [ + "https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-es.zipporah0-dedup-clean.tgz", + ("paracrawl-release1.en-es.zipporah0-dedup-clean.en", + "paracrawl-release1.en-es.zipporah0-dedup-clean.es") + ] +] +_ENES_TEST_DATASETS = [ + [ + "http://data.statmt.org/wmt17/translation-task/dev.tgz", + ("dev/newstest2013.en", "dev/newstest2013.es") + ], +] + + +@registry.register_problem +class TranslateEnesWmt32k(translate.TranslateProblem): + """En-es translation trained on WMT corpus.""" + + @property + def additional_training_datasets(self): + """Allow subclasses to add training datasets.""" + return [] + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + train_datasets = _ENES_TRAIN_DATASETS + self.additional_training_datasets + return train_datasets if train else _ENES_TEST_DATASETS + + def vocab_data_files(self): + return _ENES_TRAIN_DATASETS + + +@registry.register_problem +class TranslateEnesWmtClean32k(TranslateEnesWmt32k): + """En-es translation trained on WMT with further cleaning.""" + + @property + def use_vocab_from_other_problem(self): + return TranslateEnesWmt32k() + + @property + def datatypes_to_clean(self): + return ["txt"] + + +@registry.register_problem +class TranslateEnesWmt32kPacked(TranslateEnesWmt32k): + + @property + def packed_length(self): + return 256 + + @property + def use_vocab_from_other_problem(self): + return TranslateEnesWmt32k() + + +@registry.register_problem +class TranslateEnesWmt8k(TranslateEnesWmt32k): + """Problem spec for WMT En-Es translation.""" + + @property + def approx_vocab_size(self): + return 2**13 # 8192 + + +@registry.register_problem +class TranslateEnesWmt8kPacked(TranslateEnesWmt8k): + + @property + def packed_length(self): + return 256 + + @property + def use_vocab_from_other_problem(self): + return TranslateEnesWmt8k() + + +@registry.register_problem +class TranslateEnesWmtCharacters(TranslateEnesWmt8k): + """Problem spec for WMT En-Es translation.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER diff --git a/tensor2tensor/data_generators/translate_enet.py b/tensor2tensor/data_generators/translate_enet.py new file mode 100644 index 000000000..be91cb3c3 --- /dev/null +++ b/tensor2tensor/data_generators/translate_enet.py @@ -0,0 +1,78 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for En-Et translation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import translate +from tensor2tensor.utils import registry + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + +# For English-Estonian the WMT18 data is used +# The complete corpus has ~ 2,18M sentences +_ENET_TRAIN_DATASETS = [ + [ + "http://data.statmt.org/wmt18/translation-task/training-parallel-ep-v8.tgz", # pylint: disable=line-too-long + ("training/europarl-v8.et-en.en", "training/europarl-v8.et-en.et") + ], + [ + "https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-et.zipporah0-dedup-clean.tgz", # pylint: disable=line-too-long + ("paracrawl-release1.en-et.zipporah0-dedup-clean.en", + "paracrawl-release1.en-et.zipporah0-dedup-clean.et") + ], + [ + "http://data.statmt.org/wmt18/translation-task/rapid2016.tgz", + ("rapid2016.en-et.en", "rapid2016.en-et.et") + ], +] + +# For development 2,000 parallel sentences are used +_ENET_TEST_DATASETS = [[ + "https://github.com/stefan-it/nmt-en-et/raw/master/data/newsdev2018-enet.tar.gz", # pylint: disable=line-too-long + ("newsdev2018-enet-src.en", "newsdev2018-enet-ref.et") +]] + + +@registry.register_problem +class TranslateEnetWmt32k(translate.TranslateProblem): + """Problem spec for WMT18 En-Et translation.""" + + @property + def approx_vocab_size(self): + return 2**15 # 32768 + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + return _ENET_TRAIN_DATASETS if train else _ENET_TEST_DATASETS + + +@registry.register_problem +class TranslateEnetWmtCharacters(translate.TranslateProblem): + """Problem spec for WMT18 En-Et translation.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + return _ENET_TRAIN_DATASETS if train else _ENET_TEST_DATASETS diff --git a/tensor2tensor/data_generators/translate_enfr.py b/tensor2tensor/data_generators/translate_enfr.py new file mode 100644 index 000000000..ed05e3e1d --- /dev/null +++ b/tensor2tensor/data_generators/translate_enfr.py @@ -0,0 +1,270 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for translation data-sets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import translate +from tensor2tensor.data_generators import wiki_lm +from tensor2tensor.utils import registry + + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + +_ENFR_TRAIN_SMALL_DATA = [ + [ + "https://s3.amazonaws.com/opennmt-trainingdata/baseline-1M-enfr.tgz", + ("baseline-1M-enfr/baseline-1M_train.en", + "baseline-1M-enfr/baseline-1M_train.fr") + ], +] +_ENFR_TEST_SMALL_DATA = [ + [ + "https://s3.amazonaws.com/opennmt-trainingdata/baseline-1M-enfr.tgz", + ("baseline-1M-enfr/baseline-1M_valid.en", + "baseline-1M-enfr/baseline-1M_valid.fr") + ], +] +_ENFR_TRAIN_LARGE_DATA = [ + [ + "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz", + ("commoncrawl.fr-en.en", "commoncrawl.fr-en.fr") + ], + [ + "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz", + ("training/europarl-v7.fr-en.en", "training/europarl-v7.fr-en.fr") + ], + [ + "http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz", + ("training/news-commentary-v9.fr-en.en", + "training/news-commentary-v9.fr-en.fr") + ], + [ + "http://www.statmt.org/wmt10/training-giga-fren.tar", + ("giga-fren.release2.fixed.en.gz", + "giga-fren.release2.fixed.fr.gz") + ], + [ + "http://www.statmt.org/wmt13/training-parallel-un.tgz", + ("un/undoc.2000.fr-en.en", "un/undoc.2000.fr-en.fr") + ], +] +_ENFR_TEST_LARGE_DATA = [ + [ + "http://data.statmt.org/wmt17/translation-task/dev.tgz", + ("dev/newstest2013.en", "dev/newstest2013.fr") + ], +] + + +@registry.register_problem +class TranslateEnfrWmtSmall8k(translate.TranslateProblem): + """Problem spec for WMT En-Fr translation.""" + + @property + def approx_vocab_size(self): + return 2**13 # 8192 + + @property + def use_small_dataset(self): + return True + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + if self.use_small_dataset: + datasets = _ENFR_TRAIN_SMALL_DATA if train else _ENFR_TEST_SMALL_DATA + else: + datasets = _ENFR_TRAIN_LARGE_DATA if train else _ENFR_TEST_LARGE_DATA + return datasets + + def vocab_data_files(self): + return (_ENFR_TRAIN_SMALL_DATA if self.use_small_dataset + else _ENFR_TRAIN_LARGE_DATA) + + +@registry.register_problem +class TranslateEnfrWmtSmall32k(TranslateEnfrWmtSmall8k): + + @property + def approx_vocab_size(self): + return 2**15 # 32768 + + +@registry.register_problem +class TranslateEnfrWmt8k(TranslateEnfrWmtSmall8k): + + @property + def use_small_dataset(self): + return False + + +@registry.register_problem +class TranslateEnfrWmt32k(TranslateEnfrWmtSmall32k): + + @property + def use_small_dataset(self): + return False + + +@registry.register_problem +class TranslateEnfrWmt32kPacked(TranslateEnfrWmt32k): + + @property + def packed_length(self): + return 256 + + @property + def use_vocab_from_other_problem(self): + return TranslateEnfrWmt32k() + + +@registry.register_problem +class TranslateEnfrWmt32kWithBacktranslateFr(TranslateEnfrWmt32k): + """En-Fr translation with added French data, back-translated.""" + + @property + def use_vocab_from_other_problem(self): + return TranslateEnfrWmt32k() + + @property + def already_shuffled(self): + return True + + @property + def skip_random_fraction_when_training(self): + return False + + @property + def backtranslate_data_filenames(self): + """List of pairs of files with matched back-translated data.""" + # Files must be placed in tmp_dir, each similar size to authentic data. + return [("fr_mono_en.txt", "fr_mono_fr.txt")] + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each.""" + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 1, # Use just 1 shard so as to not mix data. + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + datasets = self.source_data_files(dataset_split) + tag = "train" if dataset_split == problem.DatasetSplit.TRAIN else "dev" + data_path = translate.compile_data( + tmp_dir, datasets, "%s-compiled-%s" % (self.name, tag)) + # For eval, use authentic data. + if dataset_split != problem.DatasetSplit.TRAIN: + for example in text_problems.text2text_txt_iterator( + data_path + ".lang1", data_path + ".lang2"): + yield example + else: # For training, mix synthetic and authentic data as follows. + for (file1, file2) in self.backtranslate_data_filenames: + path1 = os.path.join(tmp_dir, file1) + path2 = os.path.join(tmp_dir, file2) + # Synthetic data first. + for example in text_problems.text2text_txt_iterator(path1, path2): + yield example + # Now authentic data. + for example in text_problems.text2text_txt_iterator( + data_path + ".lang1", data_path + ".lang2"): + yield example + + +@registry.register_problem +class TranslateEnfrWmt32kWithBacktranslateEn( + TranslateEnfrWmt32kWithBacktranslateFr): + """En-Fr translation with added English data, back-translated.""" + + @property + def backtranslate_data_filenames(self): + """List of pairs of files with matched back-translated data.""" + # Files must be placed in tmp_dir, each similar size to authentic data. + return [("en_mono_en.txt%d" % i, "en_mono_fr.txt%d" % i) for i in [0, 1, 2]] + + +@registry.register_problem +class TranslateEnfrWmtSmallCharacters(translate.TranslateProblem): + """Problem spec for WMT En-Fr translation.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + @property + def use_small_dataset(self): + return True + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + if self.use_small_dataset: + datasets = _ENFR_TRAIN_SMALL_DATA if train else _ENFR_TEST_SMALL_DATA + else: + datasets = _ENFR_TRAIN_LARGE_DATA if train else _ENFR_TEST_LARGE_DATA + return datasets + + +@registry.register_problem +class TranslateEnfrWmtCharacters(TranslateEnfrWmtSmallCharacters): + + @property + def use_small_dataset(self): + return False + + +@registry.register_problem +class TranslateEnfrWmtMulti64k(TranslateEnfrWmtSmall32k): + """Translation with muli-lingual vocabulary.""" + + @property + def use_small_dataset(self): + return False + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelDeEnFrRoWiki64k() + + +@registry.register_problem +class TranslateEnfrWmtMulti64kPacked1k(TranslateEnfrWmtMulti64k): + """Translation with muli-lingual vocabulary.""" + + @property + def packed_length(self): + return 1024 + + @property + def num_training_examples(self): + return 1760600 + + @property + def inputs_prefix(self): + return "translate English French " + + @property + def targets_prefix(self): + return "translate French English " diff --git a/tensor2tensor/data_generators/translate_enid.py b/tensor2tensor/data_generators/translate_enid.py new file mode 100644 index 000000000..936827f5c --- /dev/null +++ b/tensor2tensor/data_generators/translate_enid.py @@ -0,0 +1,85 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for En-Id translation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# Dependency imports + +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import translate +from tensor2tensor.utils import registry + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + +_REPO = "https://github.com/prasastoadi/parallel-corpora-en-id/raw/master/" + +# IWSLT17 : +# 109335 sentences +# https://wit3.fbk.eu/mt.php?release=2017-01-more +# PANL-BPPT : +# 24024 sentences +# http://www.panl10n.net/english/outputs/Indonesia/BPPT/0902/BPPTIndToEngCorpusHalfM.zip # pylint: disable=line-too-long +_ENID_TRAIN_DATASETS = [ + [ + _REPO + "IWSLT17.train.en-id.tgz", + ("IWSLT17.train.en-id.en", "IWSLT17.train.en-id.id") + ], + [ + _REPO + "PANL-BPPT-ECO-EN-ID-150Kw.tgz", + ("PANL-BPPT-ECO-EN-150Kw.txt", "PANL-BPPT-ECO-ID-150Kw.txt") + ], + [ + _REPO + "PANL-BPPT-INT-EN-ID-150Kw.tgz", + ("PANL-BPPT-INT-EN-150Kw.txt", "PANL-BPPT-INT-ID-150Kw.txt") + ], + [ + _REPO + "PANL-BPPT-SCI-EN-ID-100Kw.tgz", + ("PANL-BPPT-SCI-EN-100Kw.txt", "PANL-BPPT-SCI-ID-100Kw.txt") + ], + [ + _REPO + "PANL-BPPT-SPO-EN-ID-100Kw.tgz", + ("PANL-BPPT-SPO-EN-100Kw.txt", "PANL-BPPT-SPO-ID-100Kw.txt") + ], +] + +# IWSLT17 : +# 1478 sentences +# https://wit3.fbk.eu/mt.php?release=2017-01-more +_ENID_TEST_DATASETS = [ + [ + _REPO + "IWSLT17.TED.tst2017plus.en-id.tgz", + ("IWSLT17.TED.tst2017plus.en-id.en", + "IWSLT17.TED.tst2017plus.en-id.id") + ] +] + + +@registry.register_problem +class TranslateEnidIwslt32k(translate.TranslateProblem): + """Problem spec for IWSLT'15 En-Vi translation.""" + + @property + def approx_vocab_size(self): + return 2**15 # 32768 + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + return _ENID_TRAIN_DATASETS if train else _ENID_TEST_DATASETS diff --git a/tensor2tensor/data_generators/translate_enmk.py b/tensor2tensor/data_generators/translate_enmk.py new file mode 100644 index 000000000..60b23b179 --- /dev/null +++ b/tensor2tensor/data_generators/translate_enmk.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for translation data-sets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import translate +from tensor2tensor.utils import registry + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + +# For English-Macedonian the SETimes corpus +# from http://nlp.ffzg.hr/resources/corpora/setimes/ is used. +_ENMK_TRAIN_DATASETS = [[ + "http://nlp.ffzg.hr/data/corpora/setimes/setimes.en-mk.txt.tgz", + ("setimes.en-mk.en.txt", "setimes.en-mk.mk.txt") +]] + +# For development the MULTEXT-East "1984" corpus from +# https://www.clarin.si/repository/xmlui/handle/11356/1043 is used. +# 4,986 parallel sentences are used for evaluation. +_ENMK_DEV_DATASETS = [[ + "https://github.com/stefan-it/nmt-en-mk/raw/master/data/MTE-1984-dev.enmk.tgz", # pylint: disable=line-too-long + ("MTE1984-dev.en", "MTE1984-dev.mk") +]] + + +# See this PR on github for some results with Transformer on these Problems. +# https://github.com/tensorflow/tensor2tensor/pull/738 + + +@registry.register_problem +class TranslateEnmkSetimes32k(translate.TranslateProblem): + """Problem spec for SETimes En-Mk translation.""" + + @property + def approx_vocab_size(self): + return 2**15 # 32768 + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + return _ENMK_TRAIN_DATASETS if train else _ENMK_DEV_DATASETS + + +@registry.register_problem +class TranslateEnmkSetimesCharacters(translate.TranslateProblem): + """Problem spec for SETimes En-Mk translation.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + return _ENMK_TRAIN_DATASETS if train else _ENMK_DEV_DATASETS diff --git a/tensor2tensor/data_generators/translate_enro.py b/tensor2tensor/data_generators/translate_enro.py new file mode 100644 index 000000000..1c17b77c7 --- /dev/null +++ b/tensor2tensor/data_generators/translate_enro.py @@ -0,0 +1,173 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for translation data-sets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import random + +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import translate +from tensor2tensor.data_generators import wiki_lm +from tensor2tensor.utils import registry + + +_ENRO_TRAIN_DATASETS = [ + [ + "http://www.statmt.org/europarl/v7/ro-en.tgz", + ("europarl-v7.ro-en.en", "europarl-v7.ro-en.ro") + ], + [ + "http://opus.nlpl.eu/download.php?f=SETIMES/v2/moses/en-ro.txt.zip", + ("SETIMES.en-ro.en", "SETIMES.en-ro.ro") + ] +] +_ENRO_TEST_DATASETS = [ + [ + ("http://data.statmt.org/wmt16/translation-task/" + "dev-romanian-updated.tgz"), + ("dev/newsdev2016-roen-ref.en.sgm", "dev/newsdev2016-roen-src.ro.sgm") + ], +] + + +@registry.register_problem +class TranslateEnroWmt8k(translate.TranslateProblem): + """Problem spec for WMT En-Ro translation.""" + + @property + def approx_vocab_size(self): + return 2**13 # 8192 + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + return _ENRO_TRAIN_DATASETS if train else _ENRO_TEST_DATASETS + + +@registry.register_problem +class TranslateEnroWmt32k(TranslateEnroWmt8k): + + @property + def approx_vocab_size(self): + return 2**15 # 32768 + + +@registry.register_problem +class TranslateEnroWmtCharacters(TranslateEnroWmt8k): + """Problem spec for WMT En-Ro translation.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + +@registry.register_problem +class TranslateEnroWmtMulti64k(TranslateEnroWmt8k): + """Translation with muli-lingual vocabulary.""" + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelDeEnFrRoWiki64k() + + +@registry.register_problem +class TranslateEnroWmtMultiSmall64k(TranslateEnroWmt8k): + """Translation with muli-lingual vocabulary, small (6K) training data.""" + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each.""" + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 16, # It's a small dataset, TPUs like at least a few shards. + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelDeEnFrRoWiki64k() + + @property + def how_many_examples_to_sample(self): + return 6000 + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Generate just the first 6k samples for training.""" + # If not training, do the same as before. + if dataset_split != problem.DatasetSplit.TRAIN: + for x in super(TranslateEnroWmtMultiSmall64k, self).generate_samples( + data_dir, tmp_dir, dataset_split): + yield x + raise StopIteration + # Now we assume we're training. + counter = 0 + # The size of this data-set in total is around 614K, we want to sample so + # that in expectation we take the requested number of samples in 1 go. + sample_prob = self.how_many_examples_to_sample / float(614000) + # Let's sample. + for x in super(TranslateEnroWmtMultiSmall64k, self).generate_samples( + data_dir, tmp_dir, dataset_split): + if random.random() > sample_prob: + continue + counter += 1 + if counter > self.how_many_examples_to_sample: + raise StopIteration + yield x + # We do it again if we don't have enough samples. + if counter < self.how_many_examples_to_sample: + for x in super(TranslateEnroWmtMultiSmall64k, self).generate_samples( + data_dir, tmp_dir, dataset_split): + if random.random() > sample_prob: + continue + counter += 1 + if counter > self.how_many_examples_to_sample: + raise StopIteration + yield x + + +@registry.register_problem +class TranslateEnroWmtMultiTiny64k(TranslateEnroWmtMultiSmall64k): + """Translation with muli-lingual vocabulary, tiny (600) training data.""" + + @property + def how_many_examples_to_sample(self): + return 600 + + +@registry.register_problem +class TranslateEnroWmtMultiTiny64kPacked1k(TranslateEnroWmtMultiTiny64k): + """Translation with muli-lingual vocabulary.""" + + @property + def packed_length(self): + return 1024 + + @property + def num_training_examples(self): + return 32 + + @property + def inputs_prefix(self): + return "translate English Romanian " + + @property + def targets_prefix(self): + return "translate Romanian English " diff --git a/tensor2tensor/data_generators/translate_entn.py b/tensor2tensor/data_generators/translate_entn.py new file mode 100644 index 000000000..fa11081e3 --- /dev/null +++ b/tensor2tensor/data_generators/translate_entn.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for translation data-sets.""" + + +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import translate +from tensor2tensor.utils import registry + + +EOS = text_encoder.EOS_ID + +_URL = "https://github.com/LauraMartinus/ukuxhumana/blob/master/data/en_tn" + +_ENTN_TRAIN_DATASETS = [[ + _URL + "/eng_tswane.train.tar.gz?raw=true", + ("entn_parallel.train.en", "entn_parallel.train.tn") +]] + +_ENTN_TEST_DATASETS = [[ + _URL + "/eng_tswane.dev.tar.gz?raw=true", + ("entn_parallel.dev.en", "entn_parallel.dev.tn") +]] + + +@registry.register_problem +class TranslateEntnRma(translate.TranslateProblem): + """Problem spec for English-Setswana translation. + + Uses the RMA Autshumato dataset. + """ + + @property + def approx_vocab_size(self): + return 2**15 # 32768 + + @property + def vocab_filename(self): + return "vocab.entn.%d" % self.approx_vocab_size + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + return _ENTN_TRAIN_DATASETS if train else _ENTN_TEST_DATASETS diff --git a/tensor2tensor/data_generators/translate_envi.py b/tensor2tensor/data_generators/translate_envi.py new file mode 100644 index 000000000..479883e36 --- /dev/null +++ b/tensor2tensor/data_generators/translate_envi.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for En-Vi translation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import translate +from tensor2tensor.utils import registry + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + +# For English-Vietnamese the IWSLT'15 corpus +# from https://nlp.stanford.edu/projects/nmt/ is used. +# The original dataset has 133K parallel sentences. +_ENVI_TRAIN_DATASETS = [[ + "https://github.com/stefan-it/nmt-en-vi/raw/master/data/train-en-vi.tgz", # pylint: disable=line-too-long + ("train.en", "train.vi") +]] + +# For development 1,553 parallel sentences are used. +_ENVI_TEST_DATASETS = [[ + "https://github.com/stefan-it/nmt-en-vi/raw/master/data/dev-2012-en-vi.tgz", # pylint: disable=line-too-long + ("tst2012.en", "tst2012.vi") +]] + + +# See this PR on github for some results with Transformer on this Problem. +# https://github.com/tensorflow/tensor2tensor/pull/611 + + +@registry.register_problem +class TranslateEnviIwslt32k(translate.TranslateProblem): + """Problem spec for IWSLT'15 En-Vi translation.""" + + @property + def approx_vocab_size(self): + return 2**15 # 32768 + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + return _ENVI_TRAIN_DATASETS if train else _ENVI_TEST_DATASETS diff --git a/tensor2tensor/data_generators/translate_enzh.py b/tensor2tensor/data_generators/translate_enzh.py new file mode 100644 index 000000000..14e0d8a63 --- /dev/null +++ b/tensor2tensor/data_generators/translate_enzh.py @@ -0,0 +1,280 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for translation data-sets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import translate +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +# End-of-sentence marker. +EOS = text_encoder.EOS_ID + +# This is far from being the real WMT18 task - only toyset here +# you need to register to get UN data and CWT data. Also, by convention, +# this is EN to ZH - use translate_enzh_wmt8k_rev for ZH to EN task +# +# News Commentary, around 252k lines +# This dataset is only a small fraction of full WMT18 task +_STAT_MT_URL = "http://data.statmt.org/wmt18/translation-task/" +_NC_TRAIN_DATASETS = [[ + _STAT_MT_URL + "training-parallel-nc-v13.tgz", [ + "training-parallel-nc-v13/news-commentary-v13.zh-en.en", + "training-parallel-nc-v13/news-commentary-v13.zh-en.zh" + ] +]] + +# Test set from News Commentary. 2000 lines +_NC_TEST_DATASETS = [[ + _STAT_MT_URL + "dev.tgz", + ("dev/newsdev2017-enzh-src.en.sgm", "dev/newsdev2017-enzh-ref.zh.sgm") +]] + +# UN parallel corpus. 15,886,041 lines +# Visit source website to download manually: +# https://conferences.unite.un.org/UNCorpus +# +# NOTE: You need to register to download dataset from official source +# place into tmp directory e.g. /tmp/t2t_datagen/dataset.tgz +_UN_TRAIN_DATASETS = [[ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/UNv1.0.en-zh.tar" + ".gz", ["en-zh/UNv1.0.en-zh.en", "en-zh/UNv1.0.en-zh.zh"] +]] + +# CWMT corpus +# Visit source website to download manually: +# http://nlp.nju.edu.cn/cwmt-wmt/ +# +# casia2015: 1,050,000 lines +# casict2015: 2,036,833 lines +# datum2015: 1,000,003 lines +# datum2017: 1,999,968 lines +# NEU2017: 2,000,000 lines +# +# NOTE: You need to register to download dataset from official source +# place into tmp directory e.g. /tmp/t2t_datagen/dataset.tgz + +_CWMT_TRAIN_DATASETS = [[ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/casia2015/casia2015_en.txt", "cwmt/casia2015/casia2015_ch.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/casict2015/casict2015_en.txt", "cwmt/casict2015/casict2015_ch.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/neu2017/NEU_en.txt", "cwmt/neu2017/NEU_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2015/datum_en.txt", "cwmt/datum2015/datum_ch.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book1_en.txt", "cwmt/datum2017/Book1_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book2_en.txt", "cwmt/datum2017/Book2_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book3_en.txt", "cwmt/datum2017/Book3_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book4_en.txt", "cwmt/datum2017/Book4_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book5_en.txt", "cwmt/datum2017/Book5_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book6_en.txt", "cwmt/datum2017/Book6_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book7_en.txt", "cwmt/datum2017/Book7_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book8_en.txt", "cwmt/datum2017/Book8_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book9_en.txt", "cwmt/datum2017/Book9_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book10_en.txt", "cwmt/datum2017/Book10_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book11_en.txt", "cwmt/datum2017/Book11_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book12_en.txt", "cwmt/datum2017/Book12_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book13_en.txt", "cwmt/datum2017/Book13_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book14_en.txt", "cwmt/datum2017/Book14_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book15_en.txt", "cwmt/datum2017/Book15_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book16_en.txt", "cwmt/datum2017/Book16_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book17_en.txt", "cwmt/datum2017/Book17_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book18_en.txt", "cwmt/datum2017/Book18_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book19_en.txt", "cwmt/datum2017/Book19_cn.txt"] +], [ + "https://s3-us-west-2.amazonaws.com/twairball.wmt17.zh-en/cwmt.tgz", + ["cwmt/datum2017/Book20_en.txt", "cwmt/datum2017/Book20_cn.txt"] +]] + + +def get_filename(dataset): + return dataset[0][0].split("/")[-1] + + +@registry.register_problem +class TranslateEnzhWmt32k(translate.TranslateProblem): + """Problem spec for WMT En-Zh translation. + + Attempts to use full training dataset, which needs website + registration and downloaded manually from official sources: + + CWMT: + - http://nlp.nju.edu.cn/cwmt-wmt/ + - Website contains instructions for FTP server access. + - You'll need to download CASIA, CASICT, DATUM2015, DATUM2017, + NEU datasets + + UN Parallel Corpus: + - https://conferences.unite.un.org/UNCorpus + - You'll need to register your to download the dataset. + + NOTE: place into tmp directory e.g. /tmp/t2t_datagen/dataset.tgz + """ + + @property + def approx_vocab_size(self): + return 2**15 # 32k + + @property + def source_vocab_name(self): + return "%s.en" % self.vocab_filename + + @property + def target_vocab_name(self): + return "%s.zh" % self.vocab_filename + + def get_training_dataset(self, tmp_dir): + """UN Parallel Corpus and CWMT Corpus need to be downloaded manually. + + Append to training dataset if available + + Args: + tmp_dir: path to temporary dir with the data in it. + + Returns: + paths + """ + full_dataset = _NC_TRAIN_DATASETS + for dataset in [_CWMT_TRAIN_DATASETS, _UN_TRAIN_DATASETS]: + filename = get_filename(dataset) + tmp_filepath = os.path.join(tmp_dir, filename) + if tf.gfile.Exists(tmp_filepath): + full_dataset += dataset + else: + tf.logging.info("[TranslateEzhWmt] dataset incomplete, you need to " + "manually download %s" % filename) + return full_dataset + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + train_dataset = self.get_training_dataset(tmp_dir) + datasets = train_dataset if train else _NC_TEST_DATASETS + source_datasets = [[item[0], [item[1][0]]] for item in train_dataset] + target_datasets = [[item[0], [item[1][1]]] for item in train_dataset] + source_vocab = generator_utils.get_or_generate_vocab( + data_dir, + tmp_dir, + self.source_vocab_name, + self.approx_vocab_size, + source_datasets, + file_byte_budget=1e8, + max_subtoken_length=self.max_subtoken_length) + target_vocab = generator_utils.get_or_generate_vocab( + data_dir, + tmp_dir, + self.target_vocab_name, + self.approx_vocab_size, + target_datasets, + file_byte_budget=1e8, + max_subtoken_length=self.max_subtoken_length) + tag = "train" if train else "dev" + filename_base = "wmt_enzh_%sk_tok_%s" % (self.approx_vocab_size, tag) + data_path = translate.compile_data(tmp_dir, datasets, filename_base) + return text_problems.text2text_generate_encoded( + text_problems.text2text_txt_iterator(data_path + ".lang1", + data_path + ".lang2"), + source_vocab, target_vocab) + + def feature_encoders(self, data_dir): + source_vocab_filename = os.path.join(data_dir, self.source_vocab_name) + target_vocab_filename = os.path.join(data_dir, self.target_vocab_name) + source_token = text_encoder.SubwordTextEncoder(source_vocab_filename) + target_token = text_encoder.SubwordTextEncoder(target_vocab_filename) + return { + "inputs": source_token, + "targets": target_token, + } + + +@registry.register_problem +class TranslateEnzhWmt8k(TranslateEnzhWmt32k): + """Problem spec for WMT En-Zh translation. + + This is far from being the real WMT17 task - only toyset here + """ + + @property + def approx_vocab_size(self): + return 2**13 # 8192 + + @property + def dataset_splits(self): + return [ + { + "split": problem.DatasetSplit.TRAIN, + "shards": 10, # this is a small dataset + }, + { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + } + ] + + def get_training_dataset(self, tmp_dir): + """Uses only News Commentary Dataset for training.""" + return _NC_TRAIN_DATASETS diff --git a/tensor2tensor/data_generators/translate_test.py b/tensor2tensor/data_generators/translate_test.py new file mode 100644 index 000000000..2148ab3ed --- /dev/null +++ b/tensor2tensor/data_generators/translate_test.py @@ -0,0 +1,87 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Translate generators test.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import shutil +import tarfile +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import translate + +import tensorflow.compat.v1 as tf + + +class TranslateTest(tf.test.TestCase): + DATASETS = [ + ["data1.tgz", ("train1.en", "train1.de")], + ["data2.tgz", ("train2.en", "train2.de")], + ["data3.tgz", ("train3.en", "train3.de")], + ] + + @classmethod + def setUpClass(cls): + tmp_dir = tf.test.get_temp_dir() + compressed_dir = os.path.join(tmp_dir, "compressed") + shutil.rmtree(tmp_dir) + tf.gfile.MakeDirs(compressed_dir) + + en_data = [str(i) for i in range(10, 40)] + de_data = [str(i) for i in range(100, 130)] + data = list(zip(en_data, de_data)) + + for i, dataset in enumerate(cls.DATASETS): + tar_file = dataset[0] + en_file, de_file = [ + os.path.join(compressed_dir, name) for name in dataset[1] + ] + with tf.gfile.Open(en_file, "w") as en_f: + with tf.gfile.Open(de_file, "w") as de_f: + start = i * 10 + end = start + 10 + for en_line, de_line in data[start:end]: + en_f.write(en_line) + en_f.write("\n") + de_f.write(de_line) + de_f.write("\n") + + with tarfile.open(os.path.join(tmp_dir, tar_file), "w:gz") as tar_f: + tar_f.add(en_file, os.path.basename(en_file)) + tar_f.add(de_file, os.path.basename(de_file)) + + cls.tmp_dir = tmp_dir + cls.data = data + + def testCompileData(self): + filename = "out" + filepath = os.path.join(self.tmp_dir, filename) + translate.compile_data(self.tmp_dir, self.DATASETS, filename) + + count = 0 + for i, example in enumerate( + text_problems.text2text_txt_iterator(filepath + ".lang1", + filepath + ".lang2")): + expected = self.data[i] + self.assertEqual(list(expected), [example["inputs"], example["targets"]]) + count += 1 + self.assertEqual(count, len(self.data)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/video_generated.py b/tensor2tensor/data_generators/video_generated.py new file mode 100644 index 000000000..e57eafd1f --- /dev/null +++ b/tensor2tensor/data_generators/video_generated.py @@ -0,0 +1,202 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for video problems with artificially generated frames.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +import numpy as np + +from tensor2tensor.data_generators import video_utils +from tensor2tensor.layers import modalities +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +try: + import matplotlib # pylint: disable=g-import-not-at-top + matplotlib.use("agg") + import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top +except ImportError: + pass + + +@registry.register_problem +class VideoStochasticShapes10k(video_utils.VideoProblem): + """Shapes moving in a stochastic way.""" + + @property + def is_generate_per_split(self): + """Whether we have a train/test split or just hold out data.""" + return False # Just hold out some generated data for evals. + + @property + def frame_height(self): + return 64 + + @property + def frame_width(self): + return 64 + + @property + def total_number_of_frames(self): + # 10k videos + return 10000 * self.video_length + + @property + def video_length(self): + return 5 + + @property + def random_skip(self): + return False + + @property + def only_keep_videos_from_0th_frame(self): + return True + + @property + def use_not_breaking_batching(self): + return True + + def eval_metrics(self): + return [] + + @property + def extra_reading_spec(self): + """Additional data fields to store on disk and their decoders.""" + data_fields = { + "frame_number": tf.FixedLenFeature([1], tf.int64), + } + decoders = { + "frame_number": + contrib.slim().tfexample_decoder.Tensor(tensor_key="frame_number"), + } + return data_fields, decoders + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = { + "inputs": modalities.ModalityType.VIDEO, + "targets": modalities.ModalityType.VIDEO, + } + p.vocab_size = { + "inputs": 256, + "targets": 256, + } + + @staticmethod + def get_circle(x, y, z, c, s): + """Draws a circle with center(x, y), color c, size s and z-order of z.""" + cir = plt.Circle((x, y), s, fc=c, zorder=z) + return cir + + @staticmethod + def get_rectangle(x, y, z, c, s): + """Draws a rectangle with center(x, y), color c, size s and z-order of z.""" + rec = plt.Rectangle((x-s, y-s), s*2.0, s*2.0, fc=c, zorder=z) + return rec + + @staticmethod + def get_triangle(x, y, z, c, s): + """Draws a triangle with center (x, y), color c, size s and z-order of z.""" + points = np.array([[0, 0], [s, s*math.sqrt(3.0)], [s*2.0, 0]]) + tri = plt.Polygon(points + [x-s, y-s], fc=c, zorder=z) + return tri + + def generate_stochastic_shape_instance(self): + """Yields one video of a shape moving to a random direction. + + The size and color of the shapes are random but + consistent in a single video. The speed is fixed. + + Raises: + ValueError: The frame size is not square. + """ + if self.frame_height != self.frame_width or self.frame_height % 2 != 0: + raise ValueError("Generator only supports square frames with even size.") + + lim = 10.0 + direction = np.array([[+1.0, +1.0], + [+1.0, +0.0], + [+1.0, -1.0], + [+0.0, +1.0], + [+0.0, -1.0], + [-1.0, +1.0], + [-1.0, +0.0], + [-1.0, -1.0] + ]) + + sp = np.array([lim/2.0, lim/2.0]) + rnd = np.random.randint(len(direction)) + di = direction[rnd] + + colors = ["b", "g", "r", "c", "m", "y"] + color = np.random.choice(colors) + + shape = np.random.choice([ + VideoStochasticShapes10k.get_circle, + VideoStochasticShapes10k.get_rectangle, + VideoStochasticShapes10k.get_triangle]) + speed = 1.0 + + size = np.random.uniform(0.5, 1.5) + + back_color = str(0.0) + plt.ioff() + + xy = np.array(sp) + + for _ in range(self.video_length): + fig = plt.figure() + fig.set_dpi(self.frame_height//2) + fig.set_size_inches(2, 2) + ax = plt.axes(xlim=(0, lim), ylim=(0, lim)) + + # Background + ax.add_patch(VideoStochasticShapes10k.get_rectangle( + 0.0, 0.0, -1.0, back_color, 25.0)) + # Foreground + ax.add_patch(shape(xy[0], xy[1], 0.0, color, size)) + + plt.axis("off") + plt.tight_layout(pad=-2.0) + fig.canvas.draw() + image = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") + image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,)) + image = np.copy(np.uint8(image)) + + plt.close() + xy += speed * di + + yield image + + def generate_samples(self, data_dir, tmp_dir, unused_dataset_split): + counter = 0 + done = False + while not done: + for frame_number, frame in enumerate( + self.generate_stochastic_shape_instance()): + if counter >= self.total_number_of_frames: + done = True + break + + yield {"frame": frame, "frame_number": [frame_number]} + counter += 1 diff --git a/tensor2tensor/data_generators/video_utils.py b/tensor2tensor/data_generators/video_utils.py new file mode 100644 index 000000000..8bdfbdf0c --- /dev/null +++ b/tensor2tensor/data_generators/video_utils.py @@ -0,0 +1,788 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base classes and utilities for video datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import os + +from absl import flags +import numpy as np +import six +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import image_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import common_video +from tensor2tensor.layers import modalities +from tensor2tensor.utils import contrib +from tensor2tensor.utils import metrics +from tensor2tensor.utils import video_metrics +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +FLAGS = flags.FLAGS + +flags.DEFINE_bool( + "disable_ffmpeg", False, "Disable FFMPEG when generating debug videos." +) + + +def resize_video_frames(images, size): + return [tf.to_int64(tf.image.resize_images( + image, [size, size], tf.image.ResizeMethod.BILINEAR)) for image in images] + + +def video_augmentation(features, hue=False, saturate=False, contrast=False): + """Augments video with optional hue, saturation and constrast. + + Args: + features: dict, with keys "inputs", "targets". + features["inputs"], 4-D Tensor, shape=(THWC) + features["targets"], 4-D Tensor, shape=(THWC) + hue: bool, apply hue_transform. + saturate: bool, apply saturation transform. + contrast: bool, apply constrast transform. + Returns: + augment_features: dict with transformed "inputs" and "targets". + """ + inputs, targets = features["inputs"], features["targets"] + in_steps = common_layers.shape_list(inputs)[0] + + # makes sure that the same augmentation is applied to both input and targets. + # if input is 4-D, then tf.image applies the same transform across the batch. + video = tf.concat((inputs, targets), axis=0) + if hue: + video = tf.image.random_hue(video, max_delta=0.2) + if saturate: + video = tf.image.random_saturation(video, lower=0.5, upper=1.5) + if contrast: + video = tf.image.random_contrast(video, lower=0.5, upper=1.5) + features["inputs"], features["targets"] = video[:in_steps], video[in_steps:] + return features + + +def create_border(video, color="blue", border_percent=2): + """Creates a border around each frame to differentiate input and target. + + Args: + video: 5-D NumPy array. + color: string, "blue", "red" or "green". + border_percent: Percentarge of the frame covered by the border. + Returns: + video: 5-D NumPy array. + """ + # Do not create border if the video is not in RGB format + if video.shape[-1] != 3: + return video + color_to_axis = {"blue": 2, "red": 0, "green": 1} + axis = color_to_axis[color] + _, _, height, width, _ = video.shape + border_height = np.ceil(border_percent * height / 100.0).astype(int) + border_width = np.ceil(border_percent * width / 100.0).astype(int) + video[:, :, :border_height, :, axis] = 255 + video[:, :, -border_height:, :, axis] = 255 + video[:, :, :, :border_width, axis] = 255 + video[:, :, :, -border_width:, axis] = 255 + return video + + +def convert_videos_to_summaries(input_videos, output_videos, target_videos, + tag, decode_hparams, + display_ground_truth=False): + """Converts input, output and target videos into video summaries. + + Args: + input_videos: 5-D NumPy array, (NTHWC) conditioning frames. + output_videos: 5-D NumPy array, (NTHWC) model predictions. + target_videos: 5-D NumPy array, (NTHWC) target frames. + tag: tf summary tag. + decode_hparams: HParams. + display_ground_truth: Whether or not to display ground truth videos. + Returns: + summaries: a list of tf frame-by-frame and video summaries. + """ + fps = decode_hparams.frames_per_second + border_percent = decode_hparams.border_percent + max_outputs = decode_hparams.max_display_outputs + target_steps = target_videos.shape[1] + all_summaries = [] + input_videos = create_border( + input_videos, color="blue", border_percent=border_percent) + target_videos = create_border( + target_videos, color="red", border_percent=border_percent) + output_videos = create_border( + output_videos, color="red", border_percent=border_percent) + + all_input = np.concatenate((input_videos, target_videos), axis=1) + all_output = np.concatenate((input_videos, output_videos), axis=1) + output_summ_vals, _ = common_video.py_gif_summary( + "%s/output" % tag, all_output, max_outputs=max_outputs, fps=fps, + return_summary_value=True) + all_summaries.extend(output_summ_vals) + + # Optionally display ground truth. + if display_ground_truth: + input_summ_vals, _ = common_video.py_gif_summary( + "%s/input" % tag, all_input, max_outputs=max_outputs, fps=fps, + return_summary_value=True) + all_summaries.extend(input_summ_vals) + + # Frame-by-frame summaries + iterable = zip(output_videos[:max_outputs, :target_steps], + target_videos[:max_outputs]) + for ind, (input_video, output_video) in enumerate(iterable): + t, h, w, c = input_video.shape + # Tile vertically + input_frames = np.reshape(input_video, (t*h, w, c)) + output_frames = np.reshape(output_video, (t*h, w, c)) + + # Concat across width. + all_frames = np.concatenate((input_frames, output_frames), axis=1) + tag = "input/output/%s_sample_%d" % (tag, ind) + frame_by_frame_summ = image_utils.image_to_tf_summary_value( + all_frames, tag=tag) + all_summaries.append(frame_by_frame_summ) + return all_summaries + + +def display_video_hooks(hook_args): + """Hooks to display videos at decode time.""" + predictions = hook_args.predictions + max_outputs = hook_args.decode_hparams.max_display_outputs + max_decodes = hook_args.decode_hparams.max_display_decodes + + with tf.Graph().as_default(): + _, best_decodes = video_metrics.compute_video_metrics_from_predictions( + predictions, decode_hparams=hook_args.decode_hparams) + + all_summaries = [] + # Displays decodes corresponding to the best/worst metric, + for metric, metric_decode_inds in best_decodes.items(): + curr_metric_inds = metric_decode_inds[:max_outputs] + best_inputs, best_outputs, best_targets = [], [], [] + for sample_ind, decode_ind in enumerate(curr_metric_inds): + curr_decode = predictions[decode_ind][sample_ind] + best_inputs.append(curr_decode["inputs"]) + best_outputs.append(curr_decode["outputs"]) + best_targets.append(curr_decode["targets"]) + best_inputs = np.array(best_inputs, dtype=np.uint8) + best_outputs = np.array(best_outputs, dtype=np.uint8) + best_targets = np.array(best_targets, dtype=np.uint8) + summaries = convert_videos_to_summaries( + best_inputs, best_outputs, best_targets, + tag=metric, decode_hparams=hook_args.decode_hparams) + all_summaries.extend(summaries) + + # Display random decodes for ten conditioning frames. + for decode_ind, decode in enumerate(predictions[: max_decodes]): + target_videos = video_metrics.stack_data_given_key(decode, "targets") + output_videos = video_metrics.stack_data_given_key(decode, "outputs") + input_videos = video_metrics.stack_data_given_key(decode, "inputs") + target_videos = np.asarray(target_videos, dtype=np.uint8) + output_videos = np.asarray(output_videos, dtype=np.uint8) + input_videos = np.asarray(input_videos, dtype=np.uint8) + summaries = convert_videos_to_summaries( + input_videos, output_videos, target_videos, + tag="decode_%d" % decode_ind, decode_hparams=hook_args.decode_hparams, + display_ground_truth=decode_ind == 0) + all_summaries.extend(summaries) + return all_summaries + + +def summarize_video_metrics(hook_args): + """Computes video metrics summaries using the decoder output.""" + problem_name = hook_args.problem.name + current_problem = hook_args.problem + hparams = hook_args.hparams + output_dirs = hook_args.output_dirs + predictions = hook_args.predictions + frame_shape = [ + current_problem.frame_height, current_problem.frame_width, + current_problem.num_channels + ] + metrics_graph = tf.Graph() + with metrics_graph.as_default(): + if predictions: + metrics_results, _ = video_metrics.compute_video_metrics_from_predictions( + predictions, decode_hparams=hook_args.decode_hparams) + else: + metrics_results, _ = video_metrics.compute_video_metrics_from_png_files( + output_dirs, problem_name, hparams.video_num_target_frames, + frame_shape) + + summary_values = [] + for name, array in six.iteritems(metrics_results): + for ind, val in enumerate(array): + tag = "metric_{}/{}".format(name, ind) + summary_values.append(tf.Summary.Value(tag=tag, simple_value=val)) + return summary_values + + +def debug_video_writer_factory(output_dir): + """Creates a VideoWriter for debug videos.""" + if FLAGS.disable_ffmpeg: + return common_video.IndividualFrameWriter(output_dir) + else: + output_path = os.path.join(output_dir, "video.avi") + return common_video.WholeVideoWriter( + fps=10, output_path=output_path, file_format="avi" + ) + + +class VideoProblem(problem.Problem): + """Base class for problems with videos.""" + + def __init__(self, *args, **kwargs): + super(VideoProblem, self).__init__(*args, **kwargs) + # Path to a directory to dump generated frames as png for debugging. + # If empty, no debug frames will be generated. + self.debug_dump_frames_path = "" + # Whether to skip random inputs at the beginning or not. + self.settable_random_skip = True + self.settable_use_not_breaking_batching = True + self.shuffle = True + + def max_frames_per_video(self, hparams): + """Maximum number of frames per video as determined by the dataset. + + This is used only in PREDICT mode and handles the corner case where + video_num_input_frames + video_num_target_frames is greater than the + maximum number of frames per video in the dataset. For eg, 30 in BAIR. + + For this special case, setting this to return "x" limits the input pipeline + to handle "x" (input + target) frames. The corresponding video model can + then decode arbitrary number of target frames via + hparams.video_num_target_frames. + + Args: + hparams: HParams. + Returns: + num_frames: int. + """ + return hparams.video_num_input_frames + hparams.video_num_target_frames + + @property + def num_channels(self): + """Number of color channels in each frame.""" + return 3 + + @property + def frame_height(self): + """Height of each frame.""" + raise NotImplementedError + + @property + def frame_width(self): + """Width of each frame.""" + raise NotImplementedError + + @property + def frame_shape(self): + """Shape of a frame: a list [height , width , channels].""" + return [self.frame_height, self.frame_width, self.num_channels] + + @property + def total_number_of_frames(self): + """The total number of frames, needed for sharding.""" + # It can also be a lower number -- we will switch shards every + # total_number_of_frames // num_shards time, so for example if + # you know that every video is 30 frames long and you have 100 shards + # then it's sufficient to set this to 30 * 100 so no shard-switching + # occurs during the generation of a video. For videos of variable length, + # just make this large so switching shards mid-video is very rare. + raise NotImplementedError + + @property + def random_skip(self): + """Whether to skip random inputs at the beginning or not.""" + return True + + @property + def extra_reading_spec(self): + """Additional data fields to store on disk and their decoders.""" + return {}, {} + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each.""" + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 10, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def only_keep_videos_from_0th_frame(self): + return True + + @property + def avoid_overlapping_frames(self): + """When True, each video has non overlapping frames with every other.""" + return False + + @property + def use_not_breaking_batching(self): + return True + + def preprocess_example(self, example, mode, hparams): + """Runtime preprocessing, e.g., resize example["frame"].""" + if getattr(hparams, "preprocess_resize_frames", None) is not None: + example["frame"] = tf.image.resize_images( + example["frame"], hparams.preprocess_resize_frames, + tf.image.ResizeMethod.BILINEAR) + return example + + @property + def decode_hooks(self): + return [summarize_video_metrics, display_video_hooks] + + @property + def is_generate_per_split(self): + """A single call to `generate_samples` generates for all `dataset_splits`. + + Set to True if you already have distinct subsets of data for each dataset + split specified in `self.dataset_splits`. `self.generate_samples` will be + called once for each split. + + Set to False if you have a unified dataset that you'd like to have split out + into training and evaluation data automatically. `self.generate_samples` + will be called only once and the data will be sharded across the dataset + splits specified in `self.dataset_splits`. + + Returns: + bool + """ + raise NotImplementedError() + + def example_reading_spec(self): + extra_data_fields, extra_data_items_to_decoders = self.extra_reading_spec + + data_fields = { + "image/encoded": tf.FixedLenFeature((), tf.string), + "image/format": tf.FixedLenFeature((), tf.string), + } + data_fields.update(extra_data_fields) + + data_items_to_decoders = { + "frame": + contrib.slim().tfexample_decoder.Image( + image_key="image/encoded", + format_key="image/format", + shape=[self.frame_height, self.frame_width, self.num_channels], + channels=self.num_channels), + } + data_items_to_decoders.update(extra_data_items_to_decoders) + + return data_fields, data_items_to_decoders + + def serving_input_fn(self, hparams): + """For serving/predict, assume that only video frames are provided.""" + video_input_frames = tf.placeholder( + dtype=tf.float32, + shape=[ + None, hparams.video_num_input_frames, self.frame_width, + self.frame_height, self.num_channels + ]) + + # TODO(michalski): add support for passing input_action and input_reward. + return tf_estimator.export.ServingInputReceiver( + features={"inputs": video_input_frames}, + receiver_tensors=video_input_frames) + + def preprocess(self, dataset, mode, hparams, interleave=True): + + def split_on_batch(x): + """Split x on batch dimension into x[:size, ...] and x[size:, ...].""" + length = len(x.get_shape()) + size = hparams.video_num_input_frames + if length < 1: + raise ValueError("Batched tensor of length < 1.") + if length == 1: + return x[:size], x[size:] + if length == 2: + return x[:size, :], x[size:, :] + if length == 3: + return x[:size, :, :], x[size:, :, :] + if length == 4: + return x[:size, :, :, :], x[size:, :, :, :] + # TODO(lukaszkaiser): use tf.split for the general case. + raise ValueError("Batch splitting on general dimensions not done yet.") + + def features_from_batch(batched_prefeatures): + """Construct final features from the batched inputs. + + This function gets prefeatures. + + Args: + batched_prefeatures: single-frame features (from disk) as batch tensors. + + Returns: + Features dictionary with joint features per-frame. + """ + features = {} + for k, v in six.iteritems(batched_prefeatures): + if k == "frame": # We rename past frames to inputs and targets. + s1, s2 = split_on_batch(v) + features["inputs"] = s1 + features["targets"] = s2 + else: + s1, s2 = split_on_batch(v) + features["input_%s" % k] = s1 + features["target_%s" % k] = s2 + return features + + # Batch and construct features. + def _preprocess(example): + return self.preprocess_example(example, mode, hparams) + + def avoid_break_batching(dataset): + """Smart preprocessing to avoid break between videos! + + Simple batching of images into videos may result into broken videos + with two parts from two different videos. This preprocessing avoids + this using the frame number. + + Args: + dataset: raw not-batched dataset. + + Returns: + batched not-broken videos. + + """ + + def check_integrity_and_batch(*datasets): + """Checks whether a sequence of frames are from the same video. + + Args: + *datasets: datasets each skipping 1 frame from the previous one. + + Returns: + batched data and the integrity flag. + """ + not_broken = tf.constant(True) + if "frame_number" in datasets[0]: + frame_numbers = [dataset["frame_number"][0] for dataset in datasets] + + not_broken = tf.equal(frame_numbers[-1] - frame_numbers[0], + num_frames - 1) + if self.only_keep_videos_from_0th_frame: + not_broken = tf.logical_and(not_broken, tf.equal( + frame_numbers[0], 0)) + if self.avoid_overlapping_frames: + non_overlap = tf.equal(tf.mod(frame_numbers[0], num_frames), 0) + not_broken = tf.logical_and(not_broken, non_overlap) + else: + tf.logging.warning("use_not_breaking_batching is True but " + "no frame_number is in the dataset.") + + features = {} + for key in datasets[0].keys(): + values = [dataset[key] for dataset in datasets] + batch = tf.stack(values) + features[key] = batch + return features, not_broken + + ds = [dataset.skip(i) for i in range(num_frames)] + dataset = tf.data.Dataset.zip(tuple(ds)) + dataset = dataset.map(check_integrity_and_batch) + dataset = dataset.filter(lambda _, not_broken: not_broken) + dataset = dataset.map(lambda features, _: features) + + return dataset + + preprocessed_dataset = dataset.map(_preprocess) + + num_frames = ( + hparams.video_num_input_frames + hparams.video_num_target_frames) + if mode == tf_estimator.ModeKeys.PREDICT: + num_frames = min(self.max_frames_per_video(hparams), num_frames) + + # We jump by a random position at the beginning to add variety. + if (self.random_skip and self.settable_random_skip and interleave and + mode == tf_estimator.ModeKeys.TRAIN): + random_skip = tf.random_uniform([], maxval=num_frames, dtype=tf.int64) + preprocessed_dataset = preprocessed_dataset.skip(random_skip) + if (self.use_not_breaking_batching and + self.settable_use_not_breaking_batching): + batch_dataset = avoid_break_batching(preprocessed_dataset) + else: + batch_dataset = preprocessed_dataset.batch(num_frames, + drop_remainder=True) + dataset = batch_dataset.map(features_from_batch) + if self.shuffle and interleave and mode == tf_estimator.ModeKeys.TRAIN: + dataset = dataset.shuffle(hparams.get("shuffle_buffer_size", 128)) + return dataset + + def eval_metrics(self): + eval_metrics = [ + metrics.Metrics.ACC, metrics.Metrics.ACC_PER_SEQ, + metrics.Metrics.NEG_LOG_PERPLEXITY, metrics.Metrics.IMAGE_SUMMARY + ] + return eval_metrics + + def validate_frame(self, frame): + height, width, channels = frame.shape + if channels != self.num_channels: + raise ValueError("Generated frame has %d channels while the class " + "assumes %d channels." % (channels, self.num_channels)) + if height != self.frame_height: + raise ValueError("Generated frame has height %d while the class " + "assumes height %d." % (height, self.frame_height)) + if width != self.frame_width: + raise ValueError("Generated frame has width %d while the class " + "assumes width %d." % (width, self.frame_width)) + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Generate samples of the frames with possible extra data. + + Args: + data_dir: final data directory. Typically only used in this method to copy + over user-supplied vocab files if there are extra fields needing them. + tmp_dir: temporary directory that you can use for downloading and scratch. + dataset_split: problem.DatasetSplit, which data split to generate samples + for (for example, training and evaluation). You can assume it's TRAIN if + self. + + Yields: + Sample: dict; we assume that there is + a "frame" feature with unencoded frame which is a numpy arrays of shape + [frame_height, frame_width, num_channels] and which will be transcoded + into an image format by generate_encodeded_samples. + """ + raise NotImplementedError() + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + """Generate samples of the encoded frames with possible extra data. + + By default this function just encodes the numpy array returned as "frame" + from `self.generate_samples` into a PNG image. Override this function to + get other encodings on disk. + + Args: + data_dir: final data directory. Typically only used in this method to copy + over user-supplied vocab files if there are extra fields needing them. + tmp_dir: temporary directory that you can use for downloading and scratch. + dataset_split: problem.DatasetSplit, which data split to generate samples + for (for example, training and evaluation). + + Yields: + Sample: dict which is in disk encoding. + + Raises: + ValueError: if the frame has a different number of channels than required. + """ + writer = None + + with tf.Graph().as_default(): + image_t = tf.placeholder(dtype=tf.uint8, shape=(None, None, None)) + encoded_image_t = tf.image.encode_png(image_t) + with tf.Session() as sess: + for features in self.generate_samples(data_dir, tmp_dir, dataset_split): + unencoded_frame = features.pop("frame") + self.validate_frame(unencoded_frame) + height, width, _ = unencoded_frame.shape + encoded_frame = sess.run( + encoded_image_t, feed_dict={image_t: unencoded_frame}) + features["image/encoded"] = [encoded_frame] + features["image/format"] = ["png"] + features["image/height"] = [height] + features["image/width"] = [width] + + has_debug_image = "image/debug" in features + if has_debug_image: + unencoded_debug = features.pop("image/debug") + encoded_debug = sess.run( + encoded_image_t, feed_dict={image_t: unencoded_debug}) + features["image/encoded_debug"] = [encoded_debug] + + if self.debug_dump_frames_path: + # Defer creating debug writer until we know debug_dump_frames_path. + if writer is None: + if not tf.gfile.Exists(self.debug_dump_frames_path): + tf.gfile.MkDir(self.debug_dump_frames_path) + writer = debug_video_writer_factory(self.debug_dump_frames_path) + img = unencoded_debug if has_debug_image else unencoded_frame + encoded_img = encoded_debug if has_debug_image else encoded_frame + writer.write(img, encoded_img) + + yield features + + if self.debug_dump_frames_path: + writer.finish_to_disk() + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + """The function generating the data.""" + filepath_fns = { + problem.DatasetSplit.TRAIN: self.training_filepaths, + problem.DatasetSplit.EVAL: self.dev_filepaths, + problem.DatasetSplit.TEST: self.test_filepaths, + } + + # We set shuffled=True as we don't want to shuffle on disk later. + split_paths = [(split["split"], filepath_fns[split["split"]]( + data_dir, split["shards"], shuffled=True)) + for split in self.dataset_splits] + all_paths = [] + for _, paths in split_paths: + all_paths.extend(paths) + + if self.is_generate_per_split: + for split, paths in split_paths: + generator_utils.generate_files( + self.generate_encoded_samples(data_dir, tmp_dir, split), + paths, + cycle_every_n=self.total_number_of_frames // len(paths)) + else: + generator_utils.generate_files( + self.generate_encoded_samples(data_dir, tmp_dir, + problem.DatasetSplit.TRAIN), + all_paths, + cycle_every_n=self.total_number_of_frames // len(all_paths)) + + +# TODO(lukaszkaiser): remove this version after everything is ported. +class VideoProblemOld(problem.Problem): + """Base class for problems with videos: previous version.""" + + @property + def num_channels(self): + """Number of color channels.""" + return 3 + + def example_reading_spec(self): + data_fields = { + "image/encoded": tf.FixedLenFeature((), tf.string), + "image/format": tf.FixedLenFeature((), tf.string), + } + + data_items_to_decoders = { + "inputs": + contrib.slim().tfexample_decoder.Image( + image_key="image/encoded", + format_key="image/format", + channels=self.num_channels), + } + + return data_fields, data_items_to_decoders + + def eval_metrics(self): + eval_metrics = [ + metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5, + metrics.Metrics.NEG_LOG_PERPLEXITY + ] + return eval_metrics + + +class VideoAugmentationProblem(VideoProblem): + """Base class for video data-augmentation. + + By default applies a random hue, contrast and saturation transformation + to every video. To disable any of these transformations, inherit + this class and set the corresponding property to False. + """ + + @property + def hue(self): + return True + + @property + def contrast(self): + return True + + @property + def saturate(self): + return True + + def preprocess(self, dataset, mode, hparams, interleave=True): + dataset = super(VideoAugmentationProblem, self).preprocess( + dataset=dataset, mode=mode, hparams=hparams, interleave=interleave) + video_augment_func = functools.partial( + video_augmentation, hue=self.hue, contrast=self.contrast, + saturate=self.saturate) + if mode == tf_estimator.ModeKeys.TRAIN: + dataset = dataset.map(video_augment_func) + return dataset + + +class Video2ClassProblem(VideoProblemOld): + """Base class for image classification problems.""" + + @property + def is_small(self): + raise NotImplementedError() + + @property + def num_classes(self): + raise NotImplementedError() + + @property + def train_shards(self): + raise NotImplementedError() + + @property + def dev_shards(self): + return 1 + + @property + def class_labels(self): + return ["ID_%d" % i for i in range(self.num_classes)] + + @property + def image_size(self): + raise NotImplementedError() + + def feature_encoders(self, data_dir): + del data_dir + return { + "inputs": text_encoder.ImageEncoder(), + "targets": text_encoder.ClassLabelEncoder(self.class_labels) + } + + def generator(self, data_dir, tmp_dir, is_training): + raise NotImplementedError() + + def example_reading_spec(self): + label_key = "image/class/label" + data_fields, data_items_to_decoders = ( + super(Video2ClassProblem, self).example_reading_spec()) + data_fields[label_key] = tf.FixedLenFeature((1,), tf.int64) + data_items_to_decoders["targets"] = contrib.slim().tfexample_decoder.Tensor( + label_key) + return data_fields, data_items_to_decoders + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = {"inputs": modalities.ModalityType.IMAGE, + "targets": modalities.ModalityType.CLASS_LABEL} + p.vocab_size = {"inputs": 256, + "targets": self.num_classes} + p.input_space_id = problem.SpaceID.IMAGE + p.target_space_id = problem.SpaceID.IMAGE_LABEL + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + generator_utils.generate_dataset_and_shuffle( + self.generator(data_dir, tmp_dir, True), + self.training_filepaths(data_dir, self.train_shards, shuffled=False), + self.generator(data_dir, tmp_dir, False), + self.dev_filepaths(data_dir, self.dev_shards, shuffled=False)) diff --git a/tensor2tensor/data_generators/video_utils_test.py b/tensor2tensor/data_generators/video_utils_test.py new file mode 100644 index 000000000..7091a65ae --- /dev/null +++ b/tensor2tensor/data_generators/video_utils_test.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""video_utils test.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from absl.testing import parameterized +import numpy as np +from tensor2tensor.data_generators import video_generated # pylint: disable=unused-import +from tensor2tensor.data_generators import video_utils +from tensor2tensor.utils import decoding +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +class VideoUtilsTest(parameterized.TestCase, tf.test.TestCase): + + def get_predictions(self, num_decodes=2): + rng = np.random.RandomState(0) + # num_samples=4 + inputs = rng.randint(0, 255, (4, 2, 64, 64, 3)) + outputs = rng.randint(0, 255, (4, 5, 64, 64, 3)) + targets = rng.randint(0, 255, (4, 5, 64, 64, 3)) + predictions = [] + for input_, output, target in zip(inputs, outputs, targets): + curr_pred = {"inputs": input_, "outputs": output, "targets": target} + predictions.append(curr_pred) + + # num_decodes=2 + predictions = [predictions] * num_decodes + problem = registry.problem("video_stochastic_shapes10k") + return predictions, problem + + def testVideoAugmentation(self): + # smoke-test, test for shapes. + with tf.Graph().as_default(): + inputs = tf.random_uniform(shape=(3, 64, 64, 3)) + targets = tf.random_uniform(shape=(10, 64, 64, 3)) + features = {"inputs": inputs, "targets": targets} + augment = video_utils.video_augmentation( + features, hue=True, saturate=True, contrast=True) + with tf.Session() as sess: + augment_dict = sess.run(augment) + self.assertEqual(augment_dict["inputs"].shape, (3, 64, 64, 3)) + self.assertEqual(augment_dict["targets"].shape, (10, 64, 64, 3)) + + def testDecodeInMemoryTrue(self): + predictions, problem = self.get_predictions() + decode_hparams = decoding.decode_hparams() + decode_hparams.decode_in_memory = True + decode_hooks = decoding.DecodeHookArgs( + estimator=None, problem=problem, output_dirs=None, + hparams=decode_hparams, decode_hparams=decode_hparams, + predictions=predictions) + metrics = video_utils.summarize_video_metrics(decode_hooks) + + @parameterized.named_parameters( + ("d5_o6", 5, 6)) + # ("d5", 5), ("d10", 10), ("d5_o6", 5, 6)) + def testConvertPredictionsToVideoSummaries(self, num_decodes=5, + max_output_steps=5): + # Initialize predictions. + rng = np.random.RandomState(0) + inputs = rng.randint(0, 255, (2, 32, 32, 3)) + outputs = rng.randint(0, 255, (max_output_steps, 32, 32, 3)) + targets = rng.randint(0, 255, (5, 32, 32, 3)) + + # batch it up. + prediction = [{"outputs": outputs, "inputs": inputs, "targets": targets}]*5 + predictions = [prediction] * num_decodes + decode_hparams = decoding.decode_hparams( + overrides="max_display_decodes=5") + + decode_hooks = decoding.DecodeHookArgs( + estimator=None, problem=None, output_dirs=None, + hparams=decode_hparams, decode_hparams=decode_hparams, + predictions=predictions) + summaries = video_utils.display_video_hooks(decode_hooks) + + for summary in summaries: + self.assertIsInstance(summary, tf.Summary.Value) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/vqa.py b/tensor2tensor/data_generators/vqa.py new file mode 100644 index 000000000..8d1eb40dd --- /dev/null +++ b/tensor2tensor/data_generators/vqa.py @@ -0,0 +1,454 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for VQA data sets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import base64 +import csv +import json +import os +import random +import sys +import tarfile +import zipfile +import numpy as np + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import image_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import vqa_utils +from tensor2tensor.layers import modalities +from tensor2tensor.utils import contrib +from tensor2tensor.utils import metrics +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +def _get_vqa_v2_annotations(directory, + annotation_url, + annotation_filename="vqa_v2.tar.gz"): + """Extract the VQA V2 annotation files to directory unless it's there.""" + annotation_file = generator_utils.maybe_download_from_drive( + directory, annotation_filename, annotation_url) + with tarfile.open(annotation_file, "r:gz") as annotation_tar: + annotation_tar.extractall(directory) + + +def _get_vqa_v2_image_raw_dataset(directory, image_root_url, image_urls): + """Extract the VQA V2 image data set to directory unless it's there.""" + for url in image_urls: + filename = os.path.basename(url) + download_url = os.path.join(image_root_url, url) + path = generator_utils.maybe_download(directory, filename, download_url) + unzip_dir = os.path.join(directory, filename.strip(".zip")) + if not tf.gfile.Exists(unzip_dir): + zipfile.ZipFile(path, "r").extractall(directory) + + +def _get_vqa_v2_image_feature_dataset( + directory, feature_url, feature_filename="mscoco_feat.tar.gz"): + """Extract the VQA V2 feature data set to directory unless it's there.""" + feature_file = generator_utils.maybe_download_from_drive( + directory, feature_filename, feature_url) + with tarfile.open(feature_file, "r:gz") as feature_tar: + feature_tar.extractall(directory) + + +class ImageQuestion2MultilabelProblem(image_utils.ImageProblem): + """Base class for image question answer problem.""" + + @property + def target_space_id(self): + raise NotImplementedError() + + @property + def vocab_size(self): + raise NotImplementedError + + @property + def num_classes(self): + raise NotImplementedError() + + @property + def vocab_filename(self): + raise NotImplementedError() + + @property + def label_filename(self): + raise NotImplementedError() + + @property + def train_shards(self): + raise NotImplementedError() + + @property + def dev_shards(self): + raise NotImplementedError() + + def source_data_files(self, dataset_split): + raise NotImplementedError() + + def generator(self, data_dir, tmp_dir, dataset_split): + raise NotImplementedError() + + def eval_metrics(self): + return [ + metrics.Metrics.ACC_MULTILABEL_MATCH3, + ] + + def feature_encoders(self, data_dir): + input_encoder = text_encoder.ImageEncoder(channels=self.num_channels) + vocab_file = os.path.join(data_dir, self.vocab_filename) + question_encoder = text_encoder.TokenTextEncoder( + vocab_file, replace_oov="UNK") + label_file = os.path.join(data_dir, self.label_filename) + target_encoder = text_encoder.ClassLabelEncoder( + class_labels_fname=label_file) + return {"inputs": input_encoder, + "question": question_encoder, + "targets": target_encoder} + + def hparams(self, defaults, unused_model_hparams): + p = defaults + question_encoder = self._encoders["question"] + targets_encoder = self._encoders["targets"] + + p.modality = { + "inputs": modalities.ModalityType.IDENTITY, + "question": modalities.ModalityType.SYMBOL, + "targets": modalities.ModalityType.MULTI_LABEL, + } + p.vocab_size = { + "inputs": None, + "question": question_encoder.vocab_size, + "targets": targets_encoder.vocab_size, + } + p.input_space_id = problem.SpaceID.IMAGE # multiple input features? + p.target_space_id = self.target_space_id + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + generator_utils.generate_dataset_and_shuffle( + self.generator(data_dir, tmp_dir, problem.DatasetSplit.TRAIN), + self.training_filepaths(data_dir, self.train_shards, shuffled=False), + self.generator(data_dir, tmp_dir, problem.DatasetSplit.EVAL), + self.dev_filepaths(data_dir, self.dev_shards, shuffled=False)) + + +@registry.register_problem +class ImageVqav2Tokens10kLabels3k(ImageQuestion2MultilabelProblem): + """VQA V2, raw images, 10k question vocab, 3k answer label.""" + _MSCOCO_ROOT_URL = "http://msvocds.blob.core.windows.net/" + _MSCOCO_IMAGE_URLS = [ + "coco2014/train2014.zip", "coco2014/val2014.zip", "coco2014/test2014.zip", + ] + _VQA_V2_ANNOTATION_URL = ("https://drive.google.com/uc?export=download&id=" + "1xfMU54ObCLvMRAekT3cfcIg-AgY39fWB") + + _VQA_V2_TRAIN_DATASETS = [ + ("trainval_resnet101_faster_rcnn_genome_36.tsv", + "v2_train2014_annotations.json"), + ] + _VQA_V2_DEV_DATASETS = [ + ("trainval_resnet101_faster_rcnn_genome_36.tsv", + "v2_val2014_annotations.json"), + ] + _VQA_V2_TEST_DATASETS = [ + ("test2015_resnet101_faster_rcnn_genome_36.tsv", + "v2_test2015_annotations.json"), + ] + + def source_data_files(self, dataset_split): + train = dataset_split == problem.DatasetSplit.TRAIN + return self._VQA_V2_TRAIN_DATASETS if train else self._VQA_V2_DEV_DATASETS + + @property + def target_space_id(self): + return problem.SpaceID.GENERIC + + @property + def vocab_size(self): + return 10000 + + @property + def num_classes(self): + return 3000 + + @property + def vocab_filename(self): + return "question.vocab.%d" % self.vocab_size + + @property + def label_filename(self): + return "answer.label.%d" % self.num_classes + + @property + def train_shards(self): + return 128 + + @property + def dev_shards(self): + return 64 + + def example_reading_spec(self): + data_fields, data_items_to_decoders = ( + super(ImageVqav2Tokens10kLabels3k, self).example_reading_spec()) + data_fields["image/image_id"] = tf.FixedLenFeature((), tf.int64) + data_fields["image/question_id"] = tf.FixedLenFeature((), tf.int64) + data_fields["image/question"] = tf.FixedLenSequenceFeature( + (), tf.int64, allow_missing=True) + data_fields["image/answer"] = tf.FixedLenSequenceFeature( + (), tf.int64, allow_missing=True) + + slim = contrib.slim() + data_items_to_decoders["question"] = slim.tfexample_decoder.Tensor( + "image/question") + data_items_to_decoders["targets"] = slim.tfexample_decoder.Tensor( + "image/answer") + return data_fields, data_items_to_decoders + + def preprocess_example(self, example, mode, hparams): + # hparams is model_hparams + image = example["inputs"] + example["inputs"] = vqa_utils.vqa_v2_preprocess_image( + image, hparams.height, hparams.width, mode, + resize_side=hparams.resize_side, distort=hparams.distort, + image_model_fn=hparams.image_model_fn) + return example + + def generator(self, data_dir, tmp_dir, dataset_split): + datasets = self.source_data_files(dataset_split) + return self.vqa_v2_generator(data_dir, tmp_dir, datasets) + + def vqa_v2_generator(self, data_dir, tmp_dir, datasets): + """VQA v2 generator using raw images.""" + _get_vqa_v2_annotations(tmp_dir, self._VQA_V2_ANNOTATION_URL) + _get_vqa_v2_image_raw_dataset(tmp_dir, self._MSCOCO_ROOT_URL, + self._MSCOCO_IMAGE_URLS) + vocab_path = os.path.join(data_dir, self.vocab_filename) + if not tf.gfile.Exists(vocab_path): + vocab_tmp_path = os.path.join(tmp_dir, self.vocab_filename) + tf.gfile.Copy(vocab_tmp_path, vocab_path) + with tf.gfile.GFile(vocab_path, mode="r") as f: + vocab_data = "\n\n" + f.read() + "UNK\n" + with tf.gfile.GFile(vocab_path, mode="w") as f: + f.write(vocab_data) + label_path = os.path.join(data_dir, self.label_filename) + if not tf.gfile.Exists(label_path): + label_tmp_path = os.path.join(tmp_dir, self.label_filename) + tf.gfile.Copy(label_tmp_path, label_path) + + vocab_encoder = text_encoder.TokenTextEncoder(vocab_path, replace_oov="UNK") + label_encoder = text_encoder.ClassLabelEncoder( + class_labels_fname=label_path) + + prefix_annotation = [] + for prefix, annotation_file in datasets: + annotation_path = os.path.join(tmp_dir, annotation_file) + with tf.gfile.Open(annotation_path) as f: + annotation_json = json.loads(f.read()) + prefix_annotation += [(prefix, anno) for anno in annotation_json] + random.shuffle(prefix_annotation) + annotation_count = len(prefix_annotation) + tf.logging.info("Processing %d annotations for vqa v2" %(annotation_count)) + + for prefix, anno in prefix_annotation: + image_id = anno["image_id"] + question = vocab_encoder.encode(anno["question"]) + answer = [label_encoder.encode(ans) for ans in anno["answer"]] + answer = answer if answer else [0] # 0 indicates padding + image_filename = "COCO_" + prefix + "_" + str(image_id).zfill(12) + ".jpg" + image_filepath = os.path.join(tmp_dir, prefix, image_filename) + with tf.gfile.Open(image_filepath, "r") as f: + encoded_image_data = f.read() + yield { + "image/encoded": [encoded_image_data], + "image/format": ["jpeg"], + "image/image_id": [image_id], + "image/question_id": [anno["question_id"]], + "image/question": question, + "image/answer": answer, + } + + +@registry.register_problem +class ImageVqav2RcnnFeatureTokens10kLabels3k(ImageVqav2Tokens10kLabels3k): + """VQA V2, image feature, 10k question vocab, 3k answer label.""" + _VQA_V2_FEATURE_URL = ("https://drive.google.com/uc?export=download&id=" + "1yTTFUWqx1SScC-Whs2vRbF3tDsEEjrtt") + + @property + def num_boxes(self): + return 36 + + @property + def feature_dimension(self): + return 2048 + + @property + def spatial_feature_dimension(self): + return 6 + + @property + def feature_file_field_names(self): + return ["image_id", + "image_w", + "image_h", + "num_boxes", + "boxes", + "features"] + + def preprocess_example(self, example, mode, hparams): + # reshape some features + example["inputs"] = tf.reshape( + example["inputs"], [self.num_boxes, 1, self.feature_dimension]) + example["spatial_feature"] = tf.reshape( + example["spatial_feature"], + [self.num_boxes, 1, self.spatial_feature_dimension]) + return example + + def example_reading_spec(self): + slim = contrib.slim() + data_fields, data_items_to_decoders = {}, {} + data_fields["image/feature"] = tf.FixedLenSequenceFeature( + (), tf.float32, allow_missing=True) + data_fields["image/spatial_feature"] = tf.FixedLenSequenceFeature( + (), tf.float32, allow_missing=True) + data_fields["image/image_id"] = tf.FixedLenFeature((), tf.int64) + data_fields["image/question_id"] = tf.FixedLenFeature((), tf.int64) + data_fields["image/question"] = tf.FixedLenSequenceFeature( + (), tf.int64, allow_missing=True) + data_fields["image/answer"] = tf.FixedLenSequenceFeature( + (), tf.int64, allow_missing=True) + + data_items_to_decoders["inputs"] = slim.tfexample_decoder.Tensor( + "image/feature") + data_items_to_decoders["question_id"] = slim.tfexample_decoder.Tensor( + "image/question_id") + data_items_to_decoders["image_id"] = slim.tfexample_decoder.Tensor( + "image/image_id") + + data_items_to_decoders["spatial_feature"] = slim.tfexample_decoder.Tensor( + "image/spatial_feature") + data_items_to_decoders["question"] = slim.tfexample_decoder.Tensor( + "image/question") + data_items_to_decoders["targets"] = slim.tfexample_decoder.Tensor( + "image/answer") + + return data_fields, data_items_to_decoders + + def vqa_v2_generator(self, data_dir, tmp_dir, datasets): + """VQA v2 generator using image features.""" + _get_vqa_v2_annotations(tmp_dir, self._VQA_V2_ANNOTATION_URL) + _get_vqa_v2_image_feature_dataset(tmp_dir, self._VQA_V2_FEATURE_URL) + vocab_path = os.path.join(data_dir, self.vocab_filename) + if not tf.gfile.Exists(vocab_path): + vocab_tmp_path = os.path.join(tmp_dir, self.vocab_filename) + tf.gfile.Copy(vocab_tmp_path, vocab_path) + with tf.gfile.GFile(vocab_path, mode="r") as f: + vocab_data = "\n\n" + f.read() + "UNK\n" + with tf.gfile.GFile(vocab_path, mode="w") as f: + f.write(vocab_data) + label_path = os.path.join(data_dir, self.label_filename) + if not tf.gfile.Exists(label_path): + label_tmp_path = os.path.join(tmp_dir, self.label_filename) + tf.gfile.Copy(label_tmp_path, label_path) + + vocab_encoder = text_encoder.TokenTextEncoder(vocab_path, replace_oov="UNK") + label_encoder = text_encoder.ClassLabelEncoder( + class_labels_fname=label_path) + + # merge annotations + annotation_json = [] + for _, annotation_file in datasets: + annotation_path = os.path.join(tmp_dir, annotation_file) + with tf.gfile.Open(annotation_path) as f: + annotation_json += json.loads(f.read()) + annotation_count = len(annotation_json) + tf.logging.info("Processing %d annotations for vqa v2" %(annotation_count)) + + imageid2annotation = {} + for anno in annotation_json: + if anno["image_id"] not in imageid2annotation: + imageid2annotation[anno["image_id"]] = [anno] + else: + imageid2annotation[anno["image_id"]].append(anno) + + csv.field_size_limit(sys.maxsize) + for feature_file, _ in datasets: + feature_file_path = os.path.join(tmp_dir, feature_file) + with open(feature_file_path, "r+b") as tsv_file: + csv_reader = csv.DictReader( + tsv_file, delimiter="\t", fieldnames=self.feature_file_field_names) + for item in csv_reader: + item["num_boxes"] = int(item["num_boxes"]) + image_id = int(item["image_id"]) + image_w = float(item["image_w"]) + image_h = float(item["image_h"]) + bboxes = np.frombuffer(base64.decodestring(item["boxes"]), + dtype=np.float32).reshape( + (item["num_boxes"], -1)) + + box_width = bboxes[:, 2] - bboxes[:, 0] + box_height = bboxes[:, 3] - bboxes[:, 1] + scaled_width = box_width / image_w + scaled_height = box_height / image_h + scaled_x = bboxes[:, 0] / image_w + scaled_y = bboxes[:, 1] / image_h + + box_width = box_width[..., np.newaxis] + box_height = box_height[..., np.newaxis] + scaled_width = scaled_width[..., np.newaxis] + scaled_height = scaled_height[..., np.newaxis] + scaled_x = scaled_x[..., np.newaxis] + scaled_y = scaled_y[..., np.newaxis] + + spatial_features = np.concatenate( + (scaled_x, + scaled_y, + scaled_x + scaled_width, + scaled_y + scaled_height, + scaled_width, + scaled_height), + axis=1) + + if image_id in imageid2annotation: + for anno in imageid2annotation[image_id]: + question = vocab_encoder.encode(anno["question"]) + answer = [label_encoder.encode(ans) for ans in anno["answer"]] + answer = answer if answer else [0] # 0 indicates padding + yield { + "image/feature": + np.frombuffer(base64.decodestring(item["features"]), + dtype=np.float32).tolist(), + "image/spatial_feature": spatial_features.flatten().tolist(), + "image/height": [image_h], + "image/width": [image_w], + "image/bboxes": bboxes.flatten().tolist(), + "image/image_id": [image_id], + "image/question_id": [anno["question_id"]], + "image/question": question, + "image/answer": answer, + } + + del imageid2annotation[image_id] + + # assert all annotations are included + assert not imageid2annotation diff --git a/tensor2tensor/data_generators/vqa_utils.py b/tensor2tensor/data_generators/vqa_utils.py new file mode 100644 index 000000000..38042b139 --- /dev/null +++ b/tensor2tensor/data_generators/vqa_utils.py @@ -0,0 +1,237 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for VQA data sets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +from tensorflow.python.ops import control_flow_ops + +# some functions are copied and modified from +# vgg_preprocessing and inception_preprocessing in +# models/research/slim/preprocessing/ + +_R_MEAN = 123.68 +_G_MEAN = 116.78 +_B_MEAN = 103.94 + + +def _smallest_size_at_least(height, width, smallest_side): + """Computes new shape with the smallest side equal to `smallest_side`. + + Computes new shape with the smallest side equal to `smallest_side` while + preserving the original aspect ratio. + + Args: + height: an int32 scalar tensor indicating the current height. + width: an int32 scalar tensor indicating the current width. + smallest_side: A python integer or scalar `Tensor` indicating the size of + the smallest side after resize. + + Returns: + new_height: an int32 scalar tensor indicating the new height. + new_width: and int32 scalar tensor indicating the new width. + """ + smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) + + height = tf.to_float(height) + width = tf.to_float(width) + smallest_side = tf.to_float(smallest_side) + + scale = tf.cond( + tf.greater(height, width), lambda: smallest_side / width, + lambda: smallest_side / height) + new_height = tf.to_int32(height * scale) + new_width = tf.to_int32(width * scale) + return new_height, new_width + + +def _aspect_preserving_resize(image, smallest_side): + """Resize images preserving the original aspect ratio. + + Args: + image: A 3-D image `Tensor`. + smallest_side: A python integer or scalar `Tensor` indicating the size of + the smallest side after resize. + + Returns: + resized_image: A 3-D tensor containing the resized image. + """ + smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) + + shape = tf.shape(image) + height = shape[0] + width = shape[1] + new_height, new_width = _smallest_size_at_least(height, width, smallest_side) + image = tf.expand_dims(image, 0) + resized_image = tf.image.resize_images( + image, size=[new_height, new_width], method=tf.image.ResizeMethod.BICUBIC) + + resized_image = tf.squeeze(resized_image) + resized_image.set_shape([None, None, 3]) + return resized_image + + +def _flip(image): + """Random horizontal image flip.""" + image = tf.image.random_flip_left_right(image) + return image + + +def _distort_color(image, color_ordering=0, scope=None): + """Distort the color of a Tensor image. + + Each color distortion is non-commutative and thus ordering of the color ops + matters. Ideally we would randomly permute the ordering of the color ops. + Rather then adding that level of complication, we select a distinct ordering + of color ops for each preprocessing thread. + + Args: + image: 3-D Tensor containing single image in [0, 1]. + color_ordering: Python int, a type of distortion (valid values: 0-3). + scope: Optional scope for name_scope. + Returns: + 3-D Tensor color-distorted image on range [0, 1] + Raises: + ValueError: if color_ordering not in [0, 3] + """ + with tf.name_scope(scope, "distort_color", [image]): + if color_ordering == 0: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + elif color_ordering == 1: + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + elif color_ordering == 2: + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + elif color_ordering == 3: + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + else: + raise ValueError("color_ordering must be in [0, 3]") + + # The random_* ops do not necessarily clamp. + return tf.clip_by_value(image, 0.0, 1.0) + + +def _apply_with_random_selector(x, func, num_cases): + """Computes func(x, sel), with sel sampled from [0...num_cases-1]. + + Args: + x: input Tensor. + func: Python function to apply. + num_cases: Python int32, number of cases to sample sel from. + + Returns: + The result of func(x, sel), where func receives the value of the + selector as a python integer, but sel is sampled dynamically. + """ + sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32) + # Pass the real x only to one of the func calls. + return control_flow_ops.merge([ + func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case) + for case in range(num_cases) + ])[0] + + +def _mean_image_subtraction(image, means): + """Subtracts the given means from each image channel. + + For example: + means = [123.68, 116.779, 103.939] + image = _mean_image_subtraction(image, means) + + Note that the rank of `image` must be known. + + Args: + image: a tensor of size [height, width, C]. + means: a C-vector of values to subtract from each channel. + + Returns: + the centered image. + + Raises: + ValueError: If the rank of `image` is unknown, if `image` has a rank other + than three or if the number of channels in `image` doesn't match the + number of values in `means`. + """ + if image.get_shape().ndims != 3: + raise ValueError("Input must be of size [height, width, C>0]") + num_channels = image.get_shape().as_list()[-1] + if len(means) != num_channels: + raise ValueError("len(means) must match the number of channels") + + channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image) + for i in range(num_channels): + channels[i] -= means[i] + return tf.concat(axis=2, values=channels) + + +def vqa_v2_preprocess_image( + image, + height, + width, + mode, + resize_side=512, + distort=True, + image_model_fn="resnet_v1_152", +): + """vqa v2 preprocess image.""" + + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + assert resize_side > 0 + if resize_side: + image = _aspect_preserving_resize(image, resize_side) + if mode == tf_estimator.ModeKeys.TRAIN: + image = tf.random_crop(image, [height, width, 3]) + else: + # Central crop, assuming resize_height > height, resize_width > width. + image = tf.image.resize_image_with_crop_or_pad(image, height, width) + + image = tf.clip_by_value(image, 0.0, 1.0) + + if mode == tf_estimator.ModeKeys.TRAIN and distort: + image = _flip(image) + num_distort_cases = 4 + # pylint: disable=unnecessary-lambda + image = _apply_with_random_selector( + image, lambda x, ordering: _distort_color(x, ordering), + num_cases=num_distort_cases) + + if image_model_fn.startswith("resnet_v1"): + # resnet_v1 uses vgg preprocessing + image = image * 255. + image = _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) + elif image_model_fn.startswith("resnet_v2"): + # resnet v2 uses inception preprocessing + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + + return image diff --git a/tensor2tensor/data_generators/wiki.py b/tensor2tensor/data_generators/wiki.py new file mode 100644 index 000000000..892f189b5 --- /dev/null +++ b/tensor2tensor/data_generators/wiki.py @@ -0,0 +1,421 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generator for Wikipedia title to article dataset.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import subprocess +import numpy as np + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +@registry.register_problem +class LanguagemodelWikiXmlV8kL1k(text_problems.ChoppedTextProblem): + """A language model on English Wikipedia. + + XML dump is chopped arbitrarily into sequences of length 1024 tokens, + without regard to article boundaries. + """ + + def maybe_prepare_text(self, tmp_dir): + """Download corpus if necessary, decompress, split into multiple text files. + + Args: + tmp_dir: directory containing dataset. + + Returns: + list of filepaths for local text files. + """ + compressed_filename = os.path.basename(self.corpus_url) + compressed_filepath = os.path.join(tmp_dir, compressed_filename) + decompressed_filepath = compressed_filepath[:-4] + split_file_prefix = decompressed_filepath + "-part-" + split_filepattern = split_file_prefix + "?????" + split_files = sorted(tf.gfile.Glob(split_filepattern)) + if not split_files: + if not tf.gfile.Exists(decompressed_filepath): + if not tf.gfile.Exists(compressed_filepath): + generator_utils.maybe_download( + tmp_dir, compressed_filepath, self.corpus_url) + assert not subprocess.call(["bunzip2", compressed_filepath]) + assert tf.gfile.Exists(decompressed_filepath) + assert not subprocess.call([ + "split", "--line-bytes=4M", "--suffix-length=5", + "--numeric-suffixes", decompressed_filepath, split_file_prefix]) + split_files = sorted(tf.gfile.Glob(split_filepattern)) + assert split_files + return split_files + + def train_text_filepaths(self, tmp_dir): + all_files = self.maybe_prepare_text(tmp_dir) + return [f for i, f in enumerate(all_files) if i % self.dev_fraction != 0] + + def dev_text_filepaths(self, tmp_dir): + all_files = self.maybe_prepare_text(tmp_dir) + return [f for i, f in enumerate(all_files) if i % self.dev_fraction == 0] + + @property + def dev_fraction(self): + return 5000 + + @property + def corpus_url(http://github.com/self): + return ("https://archive.org/download/enwiki-20171201/" + "enwiki-20171201-pages-articles.xml.bz2") + + @property + def approx_vocab_size(self): + return 2**13 # 8192 + + @property + def sequence_length(self): + """Length of each example (in tokens).""" + return 1024 + + @property + def max_chars_for_vocab(self): + """Number of characters of training data to use for generating vocab.""" + # magic number for backwards compatibility + return 41800829 + + +@registry.register_problem +class LanguagemodelWikiXmlV8kL4k(LanguagemodelWikiXmlV8kL1k): + """A language model on English Wikipedia. + + XML dump is chopped arbitrarily into sequences of length 4096 tokens, + without regard to article boundaries. + """ + + @property + def sequence_length(self): + """Length of each example (in tokens).""" + return 4096 + + +class LanguagemodelWikiScramble(LanguagemodelWikiXmlV8kL1k): + """Language modeling on English wikipedia. + + "targets" is a sequence of sequence_length tokens - a fragment of an article. + "inputs" is a copy of "targets", but with a random scramble_fraction of the + tokens randomly permuted. + + This dataset is intended to test parallel (non-autoregressive) prediction + of the target sequence given the input sequence. + """ + + def example_generator(self, encoder, tmp_dir, task_id): + for x in super(LanguagemodelWikiScramble, self).example_generator( + encoder, tmp_dir, task_id): + x["inputs"] = self.scramble(x["targets"]) + yield x + + @property + def scramble_fraction(self): + raise NotImplementedError() + + @property + def has_inputs(self): + return True + + @property + def input_space_id(self): + return problem.SpaceID.EN_TOK + + @property + def targeted_vocab_size(self): + return 2**13 # 8192 + + @property + def remainder_policy(self): + """What to do with leftover tokens.""" + return "drop" + + def scramble(self, seq): + seq = np.array(seq) + num_permute = int(self.sequence_length * self.scramble_fraction) + full_permutation = np.random.permutation(self.sequence_length) + inverse_full_permutation = np.argsort(full_permutation) + partial_permutation = np.random.permutation(num_permute) + seq = seq[full_permutation] + seq = np.concatenate( + (seq[:num_permute][partial_permutation], seq[num_permute:])) + seq = seq[inverse_full_permutation] + seq = list(seq) + return seq + + +@registry.register_problem +class LanguagemodelWikiScrambleL128(LanguagemodelWikiScramble): + """Sequence length 128, 50% scrambled.""" + + @property + def sequence_length(self): + return 128 + + @property + def scramble_fraction(self): + return 0.5 + + +@registry.register_problem +class LanguagemodelWikiScrambleL1k(LanguagemodelWikiScramble): + """Sequence length 1024, 50% scrambled.""" + + @property + def sequence_length(self): + return 1024 + + @property + def scramble_fraction(self): + return 0.5 + + +@registry.register_problem +class LanguagemodelWikiNorefV8kL1k(LanguagemodelWikiXmlV8kL1k): + """A language model on English Wikipedia. + + References and internal links are removed from the raw XML. + + Special pages (non-articles) are dropped. + + This more closely resembles plain text, though there are still some xml + elements, like tables. + + Each article is prefixed by a line containing the title and length in + characters - e.g. + title: "Price of Tea in China" length: 12345 + During inference time, you can forward generate starting with such a header + in order to obtain a randomly generated article with a given title and + (approximate) length. + + Result is chopped arbitrarily into sequences of length 1024 tokens, + without regard to article boundaries. + """ + + def filepath_to_unicode_strings(self, filepath): + """Overrides the base class to clean up the xml dump before tokenizing.""" + dump = text_encoder.to_unicode_ignore_errors(tf.gfile.Open(filepath).read()) + pages = _dump_to_pages(dump) + ret = u"" + for p in pages: + title = _page_to_title(p) + text = _page_to_text(p) + text = _remove_triple_quotes( + _remove_double_brackets(_remove_references(text))) + if u":" in title: + # not a regular article + continue + if len(text) <= 140: + # Probably a redirect or something like that. Skip it. + continue + ret += u"title: \"%s\" length: %d\n%s\n" % (title, len(text), text) + yield ret + + @property + def max_chars_for_vocab(self): + """Number of characters of training data to use for generating vocab.""" + # magic number for backwards compatibility + return 21240483 + + +def _dump_to_pages(dump): + """Extract pages from an xml dump. + + Args: + dump: a unicode string + Returns: + a list of unicode strings + """ + pos = 0 + ret = [] + start_tag = u"\n" + end_tag = u"\n" + while True: + start_pos = dump.find(start_tag, pos) + if start_pos == -1: + break + start_pos += len(start_tag) + end_pos = dump.find(end_tag, start_pos) + if end_pos == -1: + break + ret.append(dump[start_pos:end_pos]) + pos = end_pos + len(end_tag) + return ret + + +def _page_to_title(page): + """Extract the title from a page. + + Args: + page: a unicode string + Returns: + a unicode string + """ + # print("page=%s" % page) + start_tag = u"" + end_tag = u"" + start_pos = page.find(start_tag) + end_pos = page.find(end_tag) + assert start_pos != -1 + assert end_pos != -1 + start_pos += len(start_tag) + return page[start_pos:end_pos] + + +def _page_to_text(page): + """Extract the text from a page. + + Args: + page: a unicode string + Returns: + a unicode string + """ + # text start tag looks like "" + start_pos = page.find(u"", start_pos) + assert end_tag_pos != -1 + end_tag_pos += len(u">") + end_pos = page.find(u"") + if end_pos == -1: + return u"" + return page[end_tag_pos:end_pos] + + +def _find_and_replace(text, start_string, end_string, replace_fn): + """Remove everything found between instances of start_string and end_string. + + Replace each such instance with replace_fn(removed_text) + + e.g. _find_and_replace(u"the [[fat]] cat [[sat]]", u"[[", u"]]", lambda x: x) + = u"the fat cat sat" + + Args: + text: a unicode string + start_string: a unicode string + end_string: a unicode string + replace_fn: a unary function from unicode string to unicode string + + Returns: + a string + """ + ret = u"" + current_pos = 0 + while True: + start_pos = text.find(start_string, current_pos) + if start_pos == -1: + ret += text[current_pos:] + break + ret += text[current_pos:start_pos] + end_pos = text.find(end_string, start_pos + len(start_string)) + if end_pos == -1: + break + ret += replace_fn(text[start_pos + len(start_string):end_pos]) + current_pos = end_pos + len(end_string) + return ret + + +def _remove_references(text): + """Strip out references from wikipedia xml.""" + return _find_and_replace(text, u"<ref", u"</ref>", lambda s: "") + + +def _remove_triple_quotes(text): + """Strip out triple quotes from wikipedia xml.""" + return _find_and_replace(text, u"'''", u"'''", lambda s: s) + + +def _remove_double_brackets(text): + """Remove double brackets (internal links) but leave the viewable text. + + Args: + text: a unicode string + Returns: + a unicode string + """ + def replacement_fn(s): + if u":" in s: + # this is probably a category or something like that. + return "" + # keep the part after the bar. + bar_pos = s.find(u"|") + if bar_pos == -1: + return s + return s[bar_pos + 1:] + return _find_and_replace(text, u"[[", u"]]", replacement_fn) + + +@registry.register_problem +class LanguagemodelWikiNorefV8kL16k(LanguagemodelWikiNorefV8kL1k): + """A language model on English Wikipedia. + + References removed. Chopped into segments of 16k tokens. + """ + + @property + def sequence_length(self): + """Length of each example (in tokens).""" + return 2**14 + + +@registry.register_problem +class LanguagemodelWikiNorefV32kL1k(LanguagemodelWikiNorefV8kL1k): + """32k vocab.""" + + @property + def approx_vocab_size(self): + return 2**15 # 32768 + + @property + def max_chars_for_vocab(self): + return 100 * (10 ** 6) + + +@registry.register_problem +class LanguagemodelWikiNorefV32kL16k(LanguagemodelWikiNorefV32kL1k): + """A language model on English Wikipedia. + + References removed. Chopped into segments of 16k tokens. + """ + + @property + def sequence_length(self): + """Length of each example (in tokens).""" + return 2**14 + + +@registry.register_problem +class LanguagemodelWikiNorefV128kL1k(LanguagemodelWikiNorefV8kL1k): + """128k vocab.""" + + @property + def approx_vocab_size(self): + return 2**17 # 131072 + + @property + def max_chars_for_vocab(self): + return 100 * (10 ** 6) diff --git a/tensor2tensor/data_generators/wiki_lm.py b/tensor2tensor/data_generators/wiki_lm.py new file mode 100644 index 000000000..61713bab5 --- /dev/null +++ b/tensor2tensor/data_generators/wiki_lm.py @@ -0,0 +1,277 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for untokenized wikipedia LM dataset.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import six + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +def concat_generator(filename, up_threshold, low_threshold=10): + """Generate concatenated lines from file upto up_threshold characters.""" + txt = "" + for line in tf.gfile.Open(filename): + line = line.strip() + if len(txt) + len(line) + 1 >= up_threshold: + ret = txt + txt = "" + # We don't yield very short long parts to prevent noisy examples. + if len(ret) > low_threshold and len(ret) < up_threshold: + yield {"targets": ret} + + if not txt: + txt = line + else: + txt = " ".join([txt, line]) + + +def mix_generators(generator_list): + """Given python generators, generate from one, then from another, etc.""" + i = 0 + l = len(generator_list) + stopiters_seen = 0 + while stopiters_seen <= l: + try: + yield six.next(generator_list[i % l]) + i += 1 + stopiters_seen = 0 + except StopIteration: + i += 1 + stopiters_seen += 1 + + +# File names and Google drive ids for the training/eval/test Wikipedia data. +_EN_TRAIN_NAME_ID = ("enwiki_train.txt.gz", "1-l02fI15ieMIZk8EnXhzhsvuEYRoznZ8") +_EN_EVAL_NAME_ID = ("enwiki_eval.txt.gz", "1odhDxWKtAPKXwxRw1KCrmlrVewxdXYq7") +_EN_TEST_NAME_ID = ("enwiki_test.txt.gz", "1i1Bg6XqvdRl1LuOiIWbg7ww8Y02Ip5VK") + +_DE_TRAIN_NAME_ID = ("dewiki_train.txt.gz", "1FzEwoPonw9xlwX34vLPFInUF8F4X5yJy") +_DE_EVAL_NAME_ID = ("dewiki_eval.txt.gz", "1EKwRRPHyWny0RJ-aqSGMcNfjAlzFl51B") +_DE_TEST_NAME_ID = ("dewiki_test.txt.gz", "1Kr13Y7y_OD3JtUM9riXpFQP9UiHDkcFY") + +_FR_TRAIN_NAME_ID = ("frwiki_train.txt.gz", "1etUIEZxMQKORwLGkssE5wlfCxxkeo8WV") +_FR_EVAL_NAME_ID = ("frwiki_eval.txt.gz", "13qrR5ZnHRgIMdcURVpixKL9gTO23GcPc") +_FR_TEST_NAME_ID = ("frwiki_test.txt.gz", "1mQpHRkAV9KXt68de69RwR8dkDi8EEusV") + +_RO_TRAIN_NAME_ID = ("rowiki_train.txt.gz", "1wUJTEAlQeDcAwFnBxa8PzE-DCiXSU_W7") +_RO_EVAL_NAME_ID = ("rowiki_eval.txt.gz", "1uIPy2ZgkyArPy_gnsILENjgv4QQmSKtx") +_RO_TEST_NAME_ID = ("rowiki_test.txt.gz", "1kphjN4jXTbw8HyRYKaRE2zY4D7Fr-p7-") + + +@registry.register_problem +class LanguagemodelEnWiki32k(text_problems.Text2SelfProblem): + """A language model on the untokenized wikipedia corpus, English.""" + + train_names_ids = [_EN_TRAIN_NAME_ID] + eval_names_ids = [_EN_EVAL_NAME_ID] + test_names_ids = [_EN_TEST_NAME_ID] + + @property + def approx_vocab_size(self): + return 32000 + + @property + def max_samples_for_vocab(self): + return 128000 + + @property + def combine_characters_threshold(self): + """Threshold for upto how many characters to combine in examples.""" + return 512*8 # So we should have 512 tokens on average, maybe more. + + def is_generate_per_split(self): + return True + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each.""" + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 100, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }, { + "split": problem.DatasetSplit.TEST, + "shards": 1, + }] + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Generate samples.""" + if dataset_split == problem.DatasetSplit.TRAIN: + file_names_ids = self.train_names_ids + elif dataset_split == problem.DatasetSplit.TEST: + file_names_ids = self.test_names_ids + else: + file_names_ids = self.eval_names_ids + + wiki_generators = [] + for (fname, fid) in file_names_ids: + url = "https://drive.google.com/uc?export=download&id=" + fid + download_path = generator_utils.maybe_download_from_drive( + tmp_dir, fname, url) + wiki_file = os.path.join(tmp_dir, fname[:-3]) + if not tf.gfile.Exists(wiki_file): + generator_utils.gunzip_file(download_path, wiki_file) + wiki_generators.append( + concat_generator(wiki_file, self.combine_characters_threshold)) + + for example in mix_generators(wiki_generators): + yield example + + +@registry.register_problem +class LanguagemodelEnWiki64k(LanguagemodelEnWiki32k): + """As above, with 64k vocabulary.""" + + @property + def approx_vocab_size(self): + return 64000 + + +@registry.register_problem +class LanguagemodelEnWiki64kShorter(LanguagemodelEnWiki64k): + """With 64k vocabulary and shorter truncation lengths.""" + + @property + def combine_characters_threshold(self): + """Threshold for upto how many characters to combine in examples.""" + return 384*8 + + @property + def use_vocab_from_other_problem(self): + return LanguagemodelEnWiki64k() + + +@registry.register_problem +class LanguagemodelDeWiki32k(LanguagemodelEnWiki32k): + """A language model on the untokenized wikipedia corpus, German.""" + + train_names_ids = [_DE_TRAIN_NAME_ID] + eval_names_ids = [_DE_EVAL_NAME_ID] + test_names_ids = [_DE_TEST_NAME_ID] + + +@registry.register_problem +class LanguagemodelDeWiki64k(LanguagemodelDeWiki32k): + """As above, with 64k vocabulary.""" + + @property + def approx_vocab_size(self): + return 64000 + + +@registry.register_problem +class LanguagemodelFrWiki32k(LanguagemodelEnWiki32k): + """A language model on the untokenized wikipedia corpus, French.""" + + train_names_ids = [_FR_TRAIN_NAME_ID] + eval_names_ids = [_FR_EVAL_NAME_ID] + test_names_ids = [_FR_TEST_NAME_ID] + + +@registry.register_problem +class LanguagemodelFrWiki64k(LanguagemodelFrWiki32k): + """As above, with 64k vocabulary.""" + + @property + def approx_vocab_size(self): + return 64000 + + +@registry.register_problem +class LanguagemodelRoWiki32k(LanguagemodelEnWiki32k): + """A language model on the untokenized wikipedia corpus, Romanian.""" + + train_names_ids = [_RO_TRAIN_NAME_ID] + eval_names_ids = [_RO_EVAL_NAME_ID] + test_names_ids = [_RO_TEST_NAME_ID] + + +@registry.register_problem +class LanguagemodelRoWiki64k(LanguagemodelRoWiki32k): + """As above, with 64k vocabulary.""" + + @property + def approx_vocab_size(self): + return 64000 + + +@registry.register_problem +class LanguagemodelDeEnFrRoWiki64k(LanguagemodelEnWiki32k): + """A language model on untokenized Wikipedia, 4 languages together.""" + + train_names_ids = [_DE_TRAIN_NAME_ID, _FR_TRAIN_NAME_ID, + _EN_TRAIN_NAME_ID, _RO_TRAIN_NAME_ID] + eval_names_ids = [_DE_EVAL_NAME_ID, _FR_EVAL_NAME_ID, + _EN_EVAL_NAME_ID, _RO_EVAL_NAME_ID] + test_names_ids = [_DE_TEST_NAME_ID, _FR_TEST_NAME_ID, + _EN_TEST_NAME_ID, _RO_TEST_NAME_ID] + + @property + def approx_vocab_size(self): + return 64000 + + @property + def max_samples_for_vocab(self): + return 256000 # Samples are intertwined, take more to cover 4 languages. + + +@registry.register_problem +class LanguagemodelDeEnFrRoWiki64kFitbPacked1k( + LanguagemodelDeEnFrRoWiki64k): + """4 languages fill-in-the-blanks text-to-text problem.""" + + @property + def use_vocab_from_other_problem(self): + return LanguagemodelDeEnFrRoWiki64k() + + @property + def has_inputs(self): + return True + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + for example in super( + LanguagemodelDeEnFrRoWiki64kFitbPacked1k, self).generate_samples( + data_dir, tmp_dir, dataset_split): + a, b = generator_utils.random_deinterleave(example["targets"]) + yield {"inputs": a, "targets": b} + + @property + def num_training_examples(self): + return 3597800 + + @property + def packed_length(self): + return 1024 + + @property + def inputs_prefix(self): + return "wiki fill " + + @property + def targets_prefix(self): + return "wiki fill " diff --git a/tensor2tensor/data_generators/wiki_multi_problems.py b/tensor2tensor/data_generators/wiki_multi_problems.py new file mode 100644 index 000000000..135648d34 --- /dev/null +++ b/tensor2tensor/data_generators/wiki_multi_problems.py @@ -0,0 +1,362 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for Wiki LM and MNLI combined datasets.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.data_generators import cnn_dailymail +from tensor2tensor.data_generators import multi_problem +from tensor2tensor.data_generators import multi_problem_v2 +from tensor2tensor.data_generators import multinli +from tensor2tensor.data_generators import squad +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import translate_ende +from tensor2tensor.data_generators import translate_enfr +from tensor2tensor.data_generators import translate_enro +from tensor2tensor.data_generators import wiki_lm +from tensor2tensor.utils import registry + + +@registry.register_problem +class LanguagemodelEnWikiLMMultiNLISubwords(multi_problem.MultiProblem): + """Wiki LM and MNLI mixed problem class.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelEnWikiLMMultiNLISubwords, self).__init__( + was_reversed, was_copy) + self.task_list.append(wiki_lm.LanguagemodelEnWiki32k()) + self.task_list.append(multinli.MultiNLIWikiLMSharedVocab()) + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + + +@registry.register_problem +class LanguagemodelEnWikiLMMultiNLISubwordsV2( + multi_problem_v2.MultiText2TextProblem): + """Wiki LM and MNLI mixed problem class.""" + + def __init__(self, was_reversed=False, was_copy=False): + problems = [ + wiki_lm.LanguagemodelEnWiki32k(), + multinli.MultiNLIWikiLMSharedVocab(), + ] + schedule = multi_problem_v2.constant_schedule([0.5, 0.5]) + super(LanguagemodelEnWikiLMMultiNLISubwordsV2, self).__init__( + problems, schedule, was_reversed=was_reversed, was_copy=was_copy) + + @property + def has_inputs(self): + return False + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelEnWiki32k() + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + + +@registry.register_problem +class LanguagemodelMultiWikiTranslatePacked1k( + multi_problem_v2.MultiText2TextProblem): + """Wiki-LM, Translation, MNLI, SQUAD mixed problem class.""" + + def __init__(self, was_reversed=False, was_copy=False): + problems = [] + rates = [] + for rate, also_reverse, cls in self.problems_and_rates: + for r in [False, True] if also_reverse else [False]: + problems.append(cls(was_reversed=r)) + rates.append(rate) + pmf = multi_problem_v2.epoch_rates_to_pmf(problems, epoch_rates=rates) + schedule = multi_problem_v2.constant_schedule(pmf) + super(LanguagemodelMultiWikiTranslatePacked1k, self).__init__( + problems, schedule, was_reversed=was_reversed, was_copy=was_copy) + + @property + def problems_and_rates(self): + """Returns a list of (weight, also_reverse, problem_class) triples.""" + return [ + (1.0, True, wiki_lm.LanguagemodelDeEnFrRoWiki64kFitbPacked1k), + (1.0, True, translate_ende.TranslateEndeWmtMulti64kPacked1k), + (1.0, True, translate_enfr.TranslateEnfrWmtMulti64kPacked1k), + (1.0, True, translate_enro.TranslateEnroWmtMultiTiny64kPacked1k), + (1.0, True, cnn_dailymail.SummarizeCnnDailymailMulti64kPacked1k), + (1.0, False, multinli.MultiNLIText2textMulti64kPacked1k), + (1.0, False, squad.SquadText2textMulti64kPacked1k), + ] + + @property + def has_inputs(self): + return True + + @property + def use_vocab_from_other_problem(self): + return wiki_lm.LanguagemodelDeEnFrRoWiki64k() + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + + @property + def packed_length(self): + return 1024 + + +@registry.register_problem +class LanguagemodelMultiWikiTranslatePacked1kV2( + LanguagemodelMultiWikiTranslatePacked1k): + """Higher rates for rarer problems.""" + + @property + def problems_and_rates(self): + """Returns a list of (weight, also_reverse, problem_class) triples.""" + return [ + (1.0, True, wiki_lm.LanguagemodelDeEnFrRoWiki64kFitbPacked1k), + (3.0, True, translate_ende.TranslateEndeWmtMulti64kPacked1k), + (1.0, True, translate_enfr.TranslateEnfrWmtMulti64kPacked1k), + (100.0, True, translate_enro.TranslateEnroWmtMultiTiny64kPacked1k), + (1.0, True, cnn_dailymail.SummarizeCnnDailymailMulti64kPacked1k), + (10.0, False, multinli.MultiNLIText2textMulti64kPacked1k), + (10.0, False, squad.SquadText2textMulti64kPacked1k), + ] + + +@registry.register_problem +class LanguagemodelEnWikiLMMultiNLISubwords64k(multi_problem.MultiProblem): + """Wiki LM and MNLI mixed problem class.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelEnWikiLMMultiNLISubwords64k, self).__init__( + was_reversed, was_copy) + self.task_list.append(wiki_lm.LanguagemodelEnWiki64k()) + self.task_list.append(multinli.MultiNLIWikiLMSharedVocab64k()) + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + + +@registry.register_problem +class LanguagemodelEnWikiLMShortMultiNLISubwords64k(multi_problem.MultiProblem): + """Wiki LM and MNLI mixed problem class.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelEnWikiLMShortMultiNLISubwords64k, self).__init__( + was_reversed, was_copy) + self.task_list.append(wiki_lm.LanguagemodelEnWiki64kShorter()) + self.task_list.append(multinli.MultiNLIWikiLMSharedVocab64k()) + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + + +@registry.register_problem +class LanguagemodelEnWikiLMSummarizeCnndmSubwords(multi_problem.MultiProblem): + """Wiki LM and CNN/DM summarization mixed problem class.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelEnWikiLMSummarizeCnndmSubwords, self).__init__( + was_reversed, was_copy) + self.task_list.append(wiki_lm.LanguagemodelEnWiki32k()) + self.task_list.append( + cnn_dailymail.SummarizeCnnDailymailWikiLMSharedVocab()) + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + + +@registry.register_problem +class LanguagemodelEnWikiLMSummarizeCnndmSubwords64k( + multi_problem.MultiProblem): + """Wiki LM and CNN/DM summarization mixed problem class.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelEnWikiLMSummarizeCnndmSubwords64k, self).__init__( + was_reversed, was_copy) + self.task_list.append(wiki_lm.LanguagemodelEnWiki64k()) + self.task_list.append( + cnn_dailymail.SummarizeCnnDailymailWikiLMSharedVocab64k()) + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + + +@registry.register_problem +class LanguagemodelMultiWikiTranslateFr(multi_problem.MultiProblem): + """Wiki multi-lingual LM and En-Fr translation.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelMultiWikiTranslateFr, self).__init__( + was_reversed, was_copy) + self.task_list.append(wiki_lm.LanguagemodelDeEnFrRoWiki64k()) + self.task_list.append(translate_enfr.TranslateEnfrWmtMulti64k()) + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + + +@registry.register_problem +class LanguagemodelMultiWikiTranslate(multi_problem.MultiProblem): + """Wiki multi-lingual LM and multiple translations.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelMultiWikiTranslate, self).__init__( + was_reversed, was_copy) + self.task_list.append(wiki_lm.LanguagemodelDeEnFrRoWiki64k()) + self.task_list.append(translate_ende.TranslateEndeWmtMulti64k()) + self.task_list.append(translate_enfr.TranslateEnfrWmtMulti64k()) + self.task_list.append(translate_enro.TranslateEnroWmtMultiTiny64k()) + self.task_list.append(translate_ende.TranslateEndeWmtMulti64k( + was_reversed=True)) + self.task_list.append(translate_enfr.TranslateEnfrWmtMulti64k( + was_reversed=True)) + self.task_list.append(translate_enro.TranslateEnroWmtMultiTiny64k( + was_reversed=True)) + self.task_list.append( + cnn_dailymail.SummarizeCnnDailymailWikiLMMultiVocab64k()) + self.task_list.append(multinli.MultiNLIWikiLMMultiVocab64k()) + self.task_list.append(squad.SquadConcatMulti64k()) + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + + +@registry.register_problem +class LanguagemodelEnWikiLMSummarizeFrac1CnndmSubwords64k( + multi_problem.MultiProblem): + """Wiki LM and CNN/DM summarization mixed problem class.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelEnWikiLMSummarizeFrac1CnndmSubwords64k, self).__init__( + was_reversed, was_copy) + self.task_list.append(wiki_lm.LanguagemodelEnWiki64k()) + self.task_list.append( + cnn_dailymail.SummarizeFrac1CnnDailymailWikiLMSharedVocab64k()) + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + + +@registry.register_problem +class LanguagemodelEnWikiLMSummarizeFrac2CnndmSubwords64k( + multi_problem.MultiProblem): + """Wiki LM and CNN/DM summarization mixed problem class.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelEnWikiLMSummarizeFrac2CnndmSubwords64k, self).__init__( + was_reversed, was_copy) + self.task_list.append(wiki_lm.LanguagemodelEnWiki64k()) + self.task_list.append( + cnn_dailymail.SummarizeFrac2CnnDailymailWikiLMSharedVocab64k()) + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + + +@registry.register_problem +class LanguagemodelEnWikiLMSummarizeFrac5CnndmSubwords64k( + multi_problem.MultiProblem): + """Wiki LM and CNN/DM summarization mixed problem class.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelEnWikiLMSummarizeFrac5CnndmSubwords64k, self).__init__( + was_reversed, was_copy) + self.task_list.append(wiki_lm.LanguagemodelEnWiki64k()) + self.task_list.append( + cnn_dailymail.SummarizeFrac5CnnDailymailWikiLMSharedVocab64k()) + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + + +@registry.register_problem +class LanguagemodelEnWikiLMSummarizeFrac10CnndmSubwords64k( + multi_problem.MultiProblem): + """Wiki LM and CNN/DM summarization mixed problem class.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelEnWikiLMSummarizeFrac10CnndmSubwords64k, self).__init__( + was_reversed, was_copy) + self.task_list.append(wiki_lm.LanguagemodelEnWiki64k()) + self.task_list.append( + cnn_dailymail.SummarizeFrac10CnnDailymailWikiLMSharedVocab64k()) + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + + +@registry.register_problem +class LanguagemodelEnWikiLMSummarizeFrac20CnndmSubwords64k( + multi_problem.MultiProblem): + """Wiki LM and CNN/DM summarization mixed problem class.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelEnWikiLMSummarizeFrac20CnndmSubwords64k, self).__init__( + was_reversed, was_copy) + self.task_list.append(wiki_lm.LanguagemodelEnWiki64k()) + self.task_list.append( + cnn_dailymail.SummarizeFrac20CnnDailymailWikiLMSharedVocab64k()) + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + + +@registry.register_problem +class LanguagemodelEnWikiLMSummarizeFrac50CnndmSubwords64k( + multi_problem.MultiProblem): + """Wiki LM and CNN/DM summarization mixed problem class.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelEnWikiLMSummarizeFrac50CnndmSubwords64k, self).__init__( + was_reversed, was_copy) + self.task_list.append(wiki_lm.LanguagemodelEnWiki64k()) + self.task_list.append( + cnn_dailymail.SummarizeFrac50CnnDailymailWikiLMSharedVocab64k()) + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD + + +@registry.register_problem +class LanguagemodelEnWikiLMSquadConcatSubwords(multi_problem.MultiProblem): + """Wiki LM and MNLI mixed problem class.""" + + def __init__(self, was_reversed=False, was_copy=False): + super(LanguagemodelEnWikiLMSquadConcatSubwords, self).__init__( + was_reversed, was_copy) + self.task_list.append(wiki_lm.LanguagemodelEnWiki32k()) + self.task_list.append(multinli.SquadConcatSharedVocab()) + + @property + def vocab_type(self): + return text_problems.VocabType.SUBWORD diff --git a/tensor2tensor/data_generators/wiki_revision.py b/tensor2tensor/data_generators/wiki_revision.py new file mode 100644 index 000000000..ffc737a6a --- /dev/null +++ b/tensor2tensor/data_generators/wiki_revision.py @@ -0,0 +1,500 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Data extraction/preprocessing for processing wiki history dumps for GEC. + +We use a set of heuristics to distill prose from the wikipedia xml. We produce +source-target pairs of text reflecting wikipedia edits. + +WikiRevision problem - fragment of older revision -> fragment of newer revision. + +This implements data extraction from wikipedia as desribed in the paper, +Weakly Supervised Grammatical Error Correction using Iterative Decoding +(https://arxiv.org/pdf/1811.01710.pdf). +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import random + +from absl import flags +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.data_generators import wiki_revision_utils +from tensor2tensor.utils import metrics +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +FLAGS = flags.FLAGS + +flags.DEFINE_integer("wiki_revision_num_train_shards", 50, + "Set the number of training shards to be output.") +flags.DEFINE_integer("wiki_revision_num_dev_shards", 1, + "Set the number of dev shards to be output.") + +flags.DEFINE_string( + "wiki_revision_data_prefix", "", + "Specify the prefix for input data. Expects 7z compressed Wikipedia XML " + "files, available at https://dumps.wikimedia.org/enwiki/latest/.") +flags.DEFINE_string( + "wiki_revision_vocab_file", "", + "Specify a wordpieces vocabulary with which to encode the text. Will " + "generate one from data if not specified.") + +flags.DEFINE_integer( + "wiki_revision_max_examples_per_shard", 0, + "Use this to set a cap on examples per shard. " + "0 is no cap.") + +# Data filtration heuristics: +flags.DEFINE_integer("wiki_revision_max_page_size_exp", 26, + "Exponent for 2**X byte cap on page size.") +flags.DEFINE_float( + "wiki_revision_max_equal_to_diff_ratio", 0, + "Max ratio between count of equal, diff chars for generated " + "examples. Ratio of 1 means examples with more diff chars " + "than equal chars will be tossed out.") +flags.DEFINE_float( + "wiki_revision_revision_skip_factor", 1.5, + "If >1, process only logarithmically many revisions. " + "This avoids blowup in runtime due to many-revision pages. " + "See wiki_revision_utils.include_revision for details.") +flags.DEFINE_float("wiki_revision_percent_identical_examples", 0.04, + "Percent of generated examples for which source == target.") +flags.DEFINE_bool( + "wiki_revision_introduce_errors", True, "Add errors to the data." + "See wiki_revision_utils.introduce_errors for details.") + + +@registry.register_problem +class WikiRevision(text_problems.Text2TextProblem): + """Old segment -> revised segment. + + Data filtration heuristics: + wiki_revision_max_page_size_exp: + pages above this # of bytes are thrown out + + wiki_revision_revision_skip_factor: + rate of logarithmic downsampling of revision history list + + wiki_revision_percent_identical_examples: + how many identitcal examples to admit, as percent of total examples + + wiki_revision_introduce_errors: + whether or not to introduce spelling-type errors on the source side + + wiki_revision_max_equal_to_diff_ratio: + whether or not to introduce spelling-type errors on the source side + + + Vocab size=32k + Maximum input/target length = 1024 wordpiece tokens + """ + num_identity_examples = 0 + num_total_examples = 0 + num_identity_examples = 0 + num_pages = 0 + num_revisions_total = 0 + num_revisions_admitted = 0 + num_examples_thrown_out_identity = 0 + num_examples_thrown_out_too_long = 0 + num_examples_thrown_out_edit_distance = 0 + num_examples_with_introduced_error = 0 + num_introduced_errors = 0 + num_source_tokens = 0 + num_target_tokens = 0 + corpus_files = None + + @property + def approx_vocab_size(self): + return 2**15 # 32K + + @property + def strip(self): + """Whether to strip wikipedia-stuff to get plain text.""" + return True + + @property + def wiki_revision_skip_factor(self): + """If this value is >1.0, process only logarithmically many revisions.""" + return FLAGS.wiki_revision_revision_skip_factor + + @property + def max_segment_length(self): + """Maximum number of input/target wordpiece tokens.""" + return 256 + + @property + def max_examples_per_shard(self): + """Maximum number of examples to generate per shard. 0=unlimited.""" + return FLAGS.wiki_revision_max_examples_per_shard + + def aggregate_job_stats(self): + # Aggregate job stats for output. + stat = [] + # Run stats. + stat.append("Flags for job:\n" + "Dev shards: {}\n" + "Train shards: {}\n" + "Revision skip factor: {}\n" + "Max page size: 2**{}\n" + "Introduce errors: {}\n" + "Max edit ratio: {}\n" + "Percent Identical Examples: {}\n" + "".format(FLAGS.wiki_revision_num_dev_shards, + FLAGS.wiki_revision_num_train_shards, + FLAGS.wiki_revision_revision_skip_factor, + FLAGS.wiki_revision_max_page_size_exp, + FLAGS.wiki_revision_introduce_errors, + FLAGS.wiki_revision_max_equal_to_diff_ratio, + FLAGS.wiki_revision_percent_identical_examples)) + + # File stats. + stat.append("corpus files: {}\n" + "\tnames: {}\n" + "\tpages per input file: {:.1f}\n" + "".format( + len(self.corpus_files), self.corpus_files, + (0 if not self.corpus_files else + self.num_pages / len(self.corpus_files)))) + # Page stats. + stat.append( + "pages processed: {}\n" + "\trevisions per page: {:.2f}, total: {}\n" + "\trevisions admitted per page: {:.2f}, percent of total: {:.2f}\n" + "".format( + self.num_pages, (0 if not self.num_pages else + self.num_revisions_total / self.num_pages), + self.num_revisions_total, + (0 if not self.num_pages else + self.num_revisions_admitted / self.num_pages), + (0 if not self.num_revisions_total else + 100 * self.num_revisions_admitted / self.num_revisions_total))) + # Revision stats. + stat.append( + "revisions admitted: {}\n" + "\texamples generated per revision: {:.2f}\n" + "".format(self.num_revisions_admitted, + (0 if not self.num_revisions_admitted else + self.num_total_examples / self.num_revisions_admitted))) + # Example stats. + stat.append( + "examples generated: {}\n" + "\twith error introduced: {}, percent of total: {:.2f}\n" + "\ttotal errors introduced: {}, errors per errorred example: {:.2f}\n" + "\texamples thrown out: {}\n" + "\t\ttoo long: {}\n" + "\t\tidentity: {}\n" + "\t\tedit distance: {}\n" + "\tremaining identity examples: {}\n" + "\tratio identity (actual, desired): {:.3f}, {}\n" + "".format( + self.num_total_examples, self.num_examples_with_introduced_error, + (0 if not self.num_total_examples else 100 * + self.num_examples_with_introduced_error / self.num_total_examples), + self.num_introduced_errors, + (0 if not self.num_examples_with_introduced_error else + self.num_introduced_errors / + self.num_examples_with_introduced_error), + self.num_examples_thrown_out_too_long + + self.num_examples_thrown_out_identity + + self.num_examples_thrown_out_edit_distance, + self.num_examples_thrown_out_too_long, + self.num_examples_thrown_out_identity, + self.num_examples_thrown_out_edit_distance, + self.num_identity_examples, + (0 if not self.num_total_examples else + self.num_identity_examples / self.num_total_examples), + FLAGS.wiki_revision_percent_identical_examples)) + # Token stats. + stat.append("tokens generated: {}\n" + "\tsource: {}\n" + "\ttarget: {}\n" + "\tper example: {:.2f}\n" + "\t\tsource: {:.2f}\n" + "\t\ttarget: {:.2f}\n" + "".format(self.num_source_tokens + self.num_target_tokens, + self.num_source_tokens, self.num_target_tokens, + (0 if not self.num_total_examples else + (self.num_source_tokens + self.num_target_tokens) / + self.num_total_examples), + (0 if not self.num_total_examples else + self.num_source_tokens / self.num_total_examples), + (0 if not self.num_total_examples else + self.num_target_tokens / self.num_total_examples))) + return "\n".join(stat) + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + + if task_id == -1 or task_id is None: + for i in range(FLAGS.wiki_revision_num_train_shards + + FLAGS.wiki_revision_num_dev_shards): + self.generate_data(data_dir, tmp_dir, i) + return + + tf.logging.info( + "Flags for job (task_id {}): " + "Dev shards: {}, Train shards: {}, " + "Revision skip factor: {}, Max page size: 2**{}, Introduce errors: {}," + "Percent Identical Examples: {}" + "".format(task_id, FLAGS.wiki_revision_num_dev_shards, + FLAGS.wiki_revision_num_train_shards, + FLAGS.wiki_revision_revision_skip_factor, + FLAGS.wiki_revision_max_page_size_exp, + FLAGS.wiki_revision_introduce_errors, + FLAGS.wiki_revision_percent_identical_examples)) + + if FLAGS.wiki_revision_vocab_file: + encoder = wiki_revision_utils.get_encoder_from_vocab( + FLAGS.wiki_revision_vocab_file) + else: + encoder = wiki_revision_utils.get_or_generate_vocabulary( + data_dir, tmp_dir, FLAGS.wiki_revision_data_prefix, + FLAGS.wiki_revision_max_page_size_exp, self.approx_vocab_size, + self.strip) + + random.seed(123) + if task_id < FLAGS.wiki_revision_num_train_shards: + out_file = self.training_filepaths( + data_dir, FLAGS.wiki_revision_num_train_shards, + shuffled=False)[task_id] + else: + out_file = self.dev_filepaths( + data_dir, FLAGS.wiki_revision_num_dev_shards, + shuffled=False)[task_id - FLAGS.wiki_revision_num_train_shards] + + tf.logging.info("Generating files for path: %s", out_file) + self.corpus_files = wiki_revision_utils.corpus_files_for_shard( + task_id, FLAGS.wiki_revision_num_train_shards, + FLAGS.wiki_revision_num_dev_shards, FLAGS.wiki_revision_data_prefix) + example_generator = self.generator(encoder, self.corpus_files, tmp_dir) + + packed_example_generator = self._maybe_pack_examples(example_generator) + generator_utils.generate_files(packed_example_generator, [out_file]) + generator_utils.shuffle_dataset([out_file]) + + tf.logging.info( + "Job stats: identity examples: {}, total examples {}, ratio: {}".format( + self.num_identity_examples, self.num_total_examples, + (1 + self.num_identity_examples) / (1 + self.num_total_examples))) + + job_stats_string = self.aggregate_job_stats() + out_dir, filename = out_file.replace("-unshuffled", "").rsplit("/", 1) + stats_prefix = "/stats_" + stats_file_path = "".join([out_dir, stats_prefix, filename]) + if tf.gfile.Exists( + stats_file_path) and tf.gfile.Open(stats_file_path).size() != 0: + tf.logging.info("Skipping writing stats because output file exists.") + else: + with tf.gfile.Open(stats_file_path, "w") as out: + tf.logging.info("Writing job stats to {}".format(stats_file_path)) + out.write(job_stats_string) + + tf.logging.info(job_stats_string) + + def generator(self, encoder, corpus_files, tmp_dir): + for page in wiki_revision_utils.corpus_page_generator( + corpus_files, tmp_dir, FLAGS.wiki_revision_max_page_size_exp): + self.num_pages += 1 + examples = self.page_to_examples(page, encoder) + for x in examples: + yield x + if self.num_total_examples % 100000 == 0: + tf.logging.info( + u"page count={} num_total_examples={} id={} title={}".format( + self.num_pages, self.num_total_examples, page["id"], + page["title"])) + if (self.max_examples_per_shard and + self.num_total_examples >= self.max_examples_per_shard): + tf.logging.info( + "Examples per shard {} >= max_examples_per_shard {}. Shutting down." + .format(self.num_total_examples, self.max_examples_per_shard)) + break + tf.logging.info( + "Total pages: {}, total examples: {}, examples per page: {}".format( + self.num_pages, self.num_total_examples, 0 if not self.num_pages + else self.num_total_examples / self.num_pages)) + + def page_to_examples(self, page, encoder): + revisions = page["revisions"] + self.num_revisions_total += len(revisions) + if len(revisions) < 2: + return [] + revisions = [ + wiki_revision_utils.get_text(r) + for n, r in enumerate(revisions) + if wiki_revision_utils.include_revision( + n, self.wiki_revision_skip_factor) or n + 1 == len(revisions) + ] + self.num_revisions_admitted += len(revisions) + + ret = [] + for i in range(len(revisions) - 1): + old_revision = revisions[i] + new_revision = revisions[i + 1] + + if FLAGS.wiki_revision_introduce_errors: + old_revision_text, num_added_err = wiki_revision_utils.introduce_errors( + revisions[i]) + if num_added_err: + self.num_introduced_errors += num_added_err + self.num_examples_with_introduced_error += 1 + else: + old_revision_text = revisions[i] + new_revision_text = revisions[i + 1] + if encoder: + # Encode text into list of ids, if a text encoder is present. + old_revision = encoder.encode(old_revision_text) + new_revision = encoder.encode(new_revision_text) + else: + # Retain text (as list of characters), if a text encoder is not present. + old_revision = old_revision_text + new_revision = new_revision_text + ret.extend( + self.make_examples( + encoder, + old_revision, + new_revision, + max_length=self.max_segment_length, + percent_identical_examples=FLAGS + .wiki_revision_percent_identical_examples)) + return ret + + def make_examples(self, + encoder, + old_snapshot, + new_snapshot, + max_length=1024, + percent_identical_examples=0.01, + max_length_distance=0): + """Produce training examples based on a pair of snapshots. + + Aligns the snapshots, then chops at a random subset of the alignment points + to create (old snippet -> new snippet) examples. + + Most negative examples (those with no changes) are discarded, but we + keep some of them, maintaining a proportion in the final data + determined by percent_identical_examples. + + Args: + encoder: the subword text encoder + old_snapshot: a list of ids + new_snapshot: a list of ids + max_length: an integer. Maximum length of "inputs" and "targets". + percent_identical_examples: a float + max_length_distance: an integer. Max token edit dist for admitted examples + + Returns: + a list of feature dictionaries. The dictionaries have + "inputs" and "targets" populated. text_encoder.EOS is appended to both. + """ + ret = [] + eos_sequence = [text_encoder.EOS_ID] + # Pick a per-token cut probability with a log-uniform distribution between + # 1/4 and 1/(max_length / 2) + bound1 = -math.log(4.0) + bound2 = -math.log(max_length / 2.0) + cut_prob = math.exp(random.random() * (bound2 - bound1) + bound1) + opcodes = wiki_revision_utils.fast_match_sequences(old_snapshot, + new_snapshot) + cut_points = [(0, 0)] + for tag, i1, i2, j1, j2 in opcodes: + if tag == "equal": + for i in range(i1, i2 + 1): + if random.random() < cut_prob: + cut_points.append((i, i + j1 - i1)) + cut_points.append((len(old_snapshot), len(new_snapshot))) + src_tgt_pairs = [] + for cut_number in range(len(cut_points) - 1): + i1, j1 = cut_points[cut_number] + i2, j2 = cut_points[cut_number + 1] + old_segment = old_snapshot[i1:i2] + new_segment = new_snapshot[j1:j2] + src_tgt_pairs.append((old_segment, new_segment)) + + src_tgt_pairs, thrown_edit_count = wiki_revision_utils.edit_distance_filter( + wiki_revision_utils.throw_empty_pairs(src_tgt_pairs), + FLAGS.wiki_revision_max_equal_to_diff_ratio) + + self.num_examples_thrown_out_edit_distance += thrown_edit_count + + for source, target in src_tgt_pairs: + # Add EOS segment. + old_segment = source + eos_sequence + new_segment = target + eos_sequence + if len(old_segment) <= max_length and len(new_segment) <= max_length: + if max_length_distance and (abs(len(old_segment) - len(new_segment)) > + max_length_distance): + self.num_examples_thrown_out_edit_distance += 1 + continue + if old_segment == new_segment: + # If current proportion of identity is below target + # percent_identical_examples, then roll for a 50% chance to add an + # identitical example. Random roll preserves nondeterminism. + # percent_identical_examples, then add identitical example. + # Random roll preserves nondeterminism in selecting identity examples. + if (((self.num_identity_examples) / (1 + self.num_total_examples)) > + percent_identical_examples) or random.random() > 0.5: + self.num_examples_thrown_out_identity += 1 + continue + else: + self.num_identity_examples += 1 + self.num_total_examples += 1 + self.num_source_tokens += len(old_segment) - 1 + self.num_target_tokens += len(new_segment) - 1 + ret.append({"inputs": old_segment, "targets": new_segment}) + else: + self.num_examples_thrown_out_too_long += 1 + + return ret + + def eval_metrics(self): + return [ + metrics.Metrics.ACC, + metrics.Metrics.ACC_TOP5, + metrics.Metrics.ACC_PER_SEQ, + metrics.Metrics.NEG_LOG_PERPLEXITY, + ] + + @property + def invert_prob(self): + """Ratio of e^2 positive forward to backward examples.""" + return 1.0 / (1.0 + math.exp(2.0)) + + +@registry.register_problem +class WikiRevisionPacked1k(WikiRevision): + """Packed version for TPU.""" + + @property + def packed_length(self): + return 1024 + + +@registry.register_problem +class WikiRevisionPacked256(WikiRevision): + """Packed version for TPU.""" + + @property + def packed_length(self): + return 256 + + @property + def max_segment_length(self): + return 256 diff --git a/tensor2tensor/data_generators/wiki_revision_utils.py b/tensor2tensor/data_generators/wiki_revision_utils.py new file mode 100644 index 000000000..4df263d6b --- /dev/null +++ b/tensor2tensor/data_generators/wiki_revision_utils.py @@ -0,0 +1,682 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilties for data generation for Wikipedia Revision problem. +""" + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import os +import random +import re +import subprocess + +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import text_encoder + +import tensorflow.compat.v1 as tf + + +def to_unicode(s): + return s.decode("utf-8") + + +def include_revision(revision_num, skip_factor=1.1): + """Decide whether to include a revision. + + If the number of revisions is large, we exclude some revisions to avoid + a quadratic blowup in runtime, since the article is likely also large. + + We make the ratio between consecutive included revision numbers + appproximately equal to "factor". + + Args: + revision_num: an integer + skip_factor: a floating point number >= 1.0 + + Returns: + a boolean + """ + if skip_factor <= 1.0: + return True + return (int(math.log1p(revision_num) / math.log(skip_factor)) != int( + math.log(revision_num + 2.0) / math.log(skip_factor))) + + +def file_page_generator(my_file, max_page_size=2**28): + """Read wikipedia pages from a history dump. + + Since some pages can be terabytes in size (with all the revisions), + we limit page size to max_page_size bytes. + + Args: + my_file: an open file object. + max_page_size: an integer + + Yields: + strings + """ + page_start = " \n" + page_end = " \n" + chunk_size = max_page_size + page_start = " \n" + page_end = " \n" + leftovers = "" + while True: + chunk = my_file.read(chunk_size) + try: + chunk = to_unicode(chunk) + except UnicodeDecodeError: + chunk = "" + if not chunk: + break + chunk = leftovers + chunk + current_pos = 0 + while True: + start_pos = chunk.find(page_start, current_pos) + if start_pos == -1: + break + end_pos = chunk.find(page_end, start_pos) + if end_pos == -1: + if len(chunk) - start_pos > max_page_size: + leftovers = "" + else: + leftovers = chunk[start_pos:] + break + raw_page = chunk[start_pos + len(page_start):end_pos] + if len(raw_page) < max_page_size: + ret = parse_page(raw_page) + if ret: + yield ret + current_pos = end_pos + len(page_end) + + +def get_title(page): + """Extract the title from a page. + + Args: + page: a string + Returns: + a string + """ + start_pos = page.find("") + end_pos = page.find("") + assert start_pos != -1 + assert end_pos != -1 + start_pos += len("") + return page[start_pos:end_pos] + + +def get_id(page): + """Extract the id from a page. + + Args: + page: a string + Returns: + an integer + """ + start_pos = page.find("<id>") + end_pos = page.find("</id>") + assert start_pos != -1 + assert end_pos != -1 + start_pos += len("<id>") + return int(page[start_pos:end_pos]) + + +def get_revisions(page): + """Extract the revisions of a page. + + Args: + page: a string + Returns: + a list of strings + """ + start_string = " <revision>\n" + end_string = " </revision>\n" + ret = [] + current_pos = 0 + while True: + start_pos = page.find(start_string, current_pos) + if start_pos == -1: + break + end_pos = page.find(end_string, start_pos) + assert end_pos != -1 + ret.append(page[start_pos + len(start_string):end_pos]) + current_pos = end_pos + len(end_string) + return ret + + +def parse_page(raw_page): + """Create a dictionary with title, id, and list of revisions. + + The dictionary contains: + "title": a string + "id": an integer + "revisions": a list of strings + + Args: + raw_page: a string + + Returns: + a dictionary, or None in the case of an error. + """ + ret = {"title": get_title(raw_page), "id": get_id(raw_page)} + if ":" in ret["title"]: + return None + ret["revisions"] = get_revisions(raw_page) + return ret + + +def maybe_copy_file_to_directory(source_filepath, target_directory): + """Copy a file to a directory if it is not already there. + + Returns the target filepath. + + Args: + source_filepath: a string + target_directory: a string + + Returns: + a string + """ + if not tf.gfile.Exists(target_directory): + tf.logging.info("Creating directory %s" % target_directory) + os.mkdir(target_directory) + target_filepath = os.path.join(target_directory, + os.path.basename(source_filepath)) + if not tf.gfile.Exists(target_filepath): + tf.logging.info("Copying %s to %s" % (source_filepath, target_filepath)) + tf.gfile.Copy(source_filepath, target_filepath) + statinfo = os.stat(target_filepath) + tf.logging.info("Successfully copied %s, %s bytes." % (target_filepath, + statinfo.st_size)) + else: + tf.logging.info("Not copying, file already found: %s" % target_filepath) + return target_filepath + + +def corpus_page_generator(corpus_files, tmp_dir, max_page_size_exp): + """Generate pages from a list of .7z encoded history dumps. + + Args: + corpus_files: a list of strings + tmp_dir: a string + max_page_size_exp: an integer + + Yields: + strings + """ + for remote_filepath in corpus_files: + + filepath = maybe_copy_file_to_directory(remote_filepath, tmp_dir) + tf.logging.info("Reading from " + filepath) + + command = ["7z", "x", "-so", filepath] + tf.logging.info("Running command: %s", command) + + p = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=-1) + + for page in file_page_generator(p.stdout, 2**max_page_size_exp): + yield page + + +def get_text(revision, strip=True): + """Extract the text from a revision. + + Args: + revision: a string + strip: a boolean + + Returns: + a string + """ + # text start tag looks like "<text ..otherstuff>" + start_pos = revision.find("<text") + assert start_pos != -1 + end_tag_pos = revision.find(">", start_pos) + assert end_tag_pos != -1 + end_tag_pos += len(">") + end_pos = revision.find("</text>") + if end_pos == -1: + ret = "" + else: + ret = revision[end_tag_pos:end_pos] + if strip: + ret = strip_text(ret) + return ret + + +def strip_text(text): + """Strip wikipedia-stuff out of text, making it mostly prose. + + The reason for this is to learn a model that is good at editing prose. + + Args: + text: a string + + Returns: + a string + """ + return _remove_boring_lines( + _remove_triple_quotes( + _remove_double_brackets( + _remove_references(_remove_curly_braces(text))))) + + +def _find_and_replace(text, start_string, end_string, replace_fn): + """Remove everything found between instances of start_string and end_string. + + Replace each such instance with replace_fn(removed_text) + + e.g. _find_and_replace("the [[fat]] cat [[sat]]", "[[", "]]", lambda x: x) + = "the fat cat sat" + + Args: + text: a string + start_string: a string + end_string: a string + replace_fn: a unary function from string to string + + Returns: + a string + """ + ret = "" + current_pos = 0 + while True: + start_pos = text.find(start_string, current_pos) + if start_pos == -1: + ret += text[current_pos:] + break + ret += text[current_pos:start_pos] + end_pos = text.find(end_string, start_pos + len(start_string)) + if end_pos == -1: + break + ret += replace_fn(text[start_pos + len(start_string):end_pos]) + current_pos = end_pos + len(end_string) + return ret + + +def _remove_references(text): + return _find_and_replace(text, "<ref", "</ref>", lambda s: "") + + +def _remove_triple_quotes(text): + return _find_and_replace(text, "'''", "'''", lambda s: s) + + +def _remove_curly_braces(text): + """Remove everything in curly braces. + + Curly braces may be nested, so we keep track of depth. + + Args: + text: a string + Returns: + a string + """ + current_pos = 0 + depth = 0 + ret = "" + for match in re.finditer("[{}]", text): + if depth == 0: + ret += text[current_pos:match.start()] + depth += 1 if text[match.start()] == "{" else -1 + current_pos = match.end() + if depth != 0: + # Many articles have mismatched braces, but it still seems better to remove + # them than not. + pass + else: + ret += text[current_pos:] + return ret + + +def _remove_double_brackets(text): + """Remove double brackets, but leave the viewable text. + + Args: + text: a string + Returns: + a string + """ + + def replacement_fn(s): + if ":" in s: + # this is probably a category or something like that. + return "" + # keep the part after the bar. + bar_pos = s.find("|") + if bar_pos == -1: + return s + return s[bar_pos + 1:] + + return _find_and_replace(text, "[[", "]]", replacement_fn) + + +def _remove_boring_lines(text): + """Remove lines that do not start with a letter or a quote. + + From inspecting the data, this seems to leave in most prose and remove + most weird stuff. + + Args: + text: a string + Returns: + a string + """ + lines = text.split("\n") + filtered = [line for line in lines if re.match("[a-zA-z\"\']", line)] + return "\n".join(filtered) + + +def all_corpus_files(data_prefix): + return sorted(tf.gfile.Glob(data_prefix + "*")) + + +def corpus_files_for_shard(shard_num, train_shards, dev_shards, data_prefix): + corpus_files = [ + filename for i, filename in enumerate(all_corpus_files(data_prefix)) + if i % (train_shards + dev_shards) == shard_num + ] + tf.logging.info("Corpus files for shard %s: %s", shard_num, corpus_files) + + assert shard_num < (train_shards + dev_shards) + return corpus_files + + +def vocab_filename(approx_vocab_size, strip): + return "vocab.wiki_revision%s.%d" % (".strip" if strip else "", + approx_vocab_size) + + +def get_or_generate_vocabulary(data_dir, + tmp_dir, + data_prefix, + max_page_size_exp, + approx_vocab_size=32768, + strip=True): + """Get or generate the vocabulary. + + Args: + data_dir: a string + tmp_dir: a string + data_prefix: a string + max_page_size_exp: an integer + approx_vocab_size: an integer + strip: a boolean + + Returns: + a TextEncoder + """ + num_pages_for_vocab_generation = approx_vocab_size // 3 + vocab_file = vocab_filename(approx_vocab_size, strip) + + def my_generator(data_prefix): + """Line generator for vocab.""" + count = 0 + for page in corpus_page_generator( + all_corpus_files(data_prefix)[::-1], tmp_dir, max_page_size_exp): + revisions = page["revisions"] + if revisions: + text = get_text(revisions[-1], strip=strip) + yield text + count += 1 + if count % 100 == 0: + tf.logging.info("reading pages for vocab %d" % count) + if count > num_pages_for_vocab_generation: + break + + return generator_utils.get_or_generate_vocab_inner(data_dir, vocab_file, + approx_vocab_size, + my_generator(data_prefix)) + + +def get_encoder_from_vocab(vocab_filepath): + """Get encoder from vocab file. + + If vocab is not found in output dir, it will be copied there by + copy_vocab_to_output_dir to clarify the vocab used to generate the data. + + Args: + vocab_filepath: path to vocab, either local or cns + + Returns: + A SubwordTextEncoder vocabulary object. None if the output_parallel_text + is set. + """ + if not tf.gfile.Exists(vocab_filepath): + raise ValueError("Vocab file does not exist: {}.".format(vocab_filepath)) + + tf.logging.info("Found vocab file: %s", vocab_filepath) + encoder = text_encoder.SubwordTextEncoder(vocab_filepath) + return encoder + + +def throw_empty_pairs(src_tgt_pairs): + """Filter [src,tgt] tuple from input list of pairs if either element is empty. + + Args: + src_tgt_pairs: list of (src,tgt) pairs + + Returns: + subset of input pair list for which all elements are non-empty + """ + return [x for x in src_tgt_pairs if x[0] and x[1]] + + +def edit_distance_filter(source_target_input, max_equal_to_diff_ratio=0): + """Filter out examples that exceed max_edit_ratio between source and target. + + Args: + source_target_input: a list of [source, target] pairs + max_equal_to_diff_ratio: cutoff for ratio of equal chars / diff chars + between source and target + + Returns: + source_target_output: filtered subset of [source, target] input pairs + thrown_out_count: number of examples filtered out + """ + thrown_out_count = 0 + source_target_output = [] + + if not max_equal_to_diff_ratio: + return source_target_input, thrown_out_count + + for src_tgt in source_target_input: + opcodes = fast_match_sequences(*src_tgt) + diff_char_count = 0 + equal_char_count = 0 + for tag, i1, i2, j1, j2 in opcodes: + if tag == "diff": + # max() prevents double-counting substitutions. + diff_char_count += max(i2 - i1, j2 - j1) + else: + equal_char_count += i2 - i1 + if diff_char_count <= max_equal_to_diff_ratio * equal_char_count: + source_target_output.append(src_tgt) + else: + thrown_out_count += 1 + return source_target_output, thrown_out_count + + +def introduce_errors(s, + corruption_rate=3e-3, + infill_marker="|?|", + max_infill_len=8): + """Artificially add spelling errors and infill markers. + + This function should be applied to the inputs of a correction model. + + The artificial errors are particularly useful to train a network to + correct spelling when the training data does not contain many + natural errors. + + Also replaces some substrings with an "infill" marker. e.g. + "the fat cat sat on the mat" -> "the fat ca??? the mat" + + This causes the trained model to learn infilling (predicting what text + to insert at the current cursor position). + + Args: + s: a string (the uncorrupted text) + corruption_rate: a floating point value. Probability of introducing an + error/infill at each character. + infill_marker: a string + max_infill_len: an optional integer - maximum number of characters to remove + and replace by an infill marker. None means no infilling. + + Returns: + a string + """ + num_errors = 0 + ret = [] + operations = [ + "delete", # delete a character + "insert", # insert a random character from the input string + "replace", # replace a character with a random character from + # the input string + "transpose", # transpose two adjacent characters + ] + if max_infill_len: + operations.append("infill") + pos = 0 + while pos < len(s): + if random.random() >= corruption_rate: + ret.append(s[pos]) + pos += 1 + continue + num_errors += 1 + operation = operations[random.randint(0, len(operations) - 1)] + if operation == "delete": + pos += 1 + elif operation == "insert": + ret.append(s[random.randint(0, len(s) - 1)]) + elif operation == "replace": + ret.append(s[random.randint(0, len(s) - 1)]) + pos += 1 + elif operation == "transpose": + ret.append(s[pos + 1] if pos + 1 < len(s) else "") + ret.append(s[pos]) + pos += 2 + else: + assert operation == "infill" + ret.append(infill_marker) + pos += random.randint(0, max_infill_len) + return "".join(ret), num_errors + + +def fast_match_sequences(a, + b, + a_start=0, + a_end=None, + b_start=0, + b_end=None, + min_match_length=3, + max_recursion_depth=128): + """Compute diffs between two sequences. + + This function is similar in functionality and spirit to + difflib.SequenceMatcher.get_opcodes, but it seems to run faster. + + if a_start, a_end, b_start, b_end are specified, then we compute diffs of + the segments a[a_start:a_end] and b[b_start:b_end]. Returned indices + are relative to the full sequence. + + We try to match the longest matching segments first, but due to heuristics + in finding the matches, this is not guaranteed. + + Matching segments shorter than min_match_length are counted as part of the + surrounding differing segments, unless they are at the beginning or end of + both sequences. This helps eliminate junk matches. + + Args: + a: a sequence + b: a sequence + a_start: an optional integer + a_end: an optional integer + b_start: an optional integer + b_end: an optional integer + min_match_length: an integer + max_recursion_depth: an integer - avoids crashes in weird corner cases + involving pairs of long repetitive sequences. + + Returns: + a list of 5-tuples (tag, i1, i2, j1, j2). + Each tuple represents the alignment of segment a[i1:i2] with b[j1:j2]. + tag is either "equal" or "diff". Note that the tags differ from those + returned by difflib.SequenceMatcher.get_opcodes. + """ + if a_end is None: + a_end = len(a) + if b_end is None: + b_end = len(b) + if a_start == a_end and b_start == b_end: + return [] + if a_start == a_end or b_start == b_end: + return [("diff", a_start, a_end, b_start, b_end)] + # Compute an index from value to first occurrence in the b segment. + # Technically, we should index and explore all occurrences of a value, + # but that might be much slower. + b_index = {} + for j in range(b_end - 1, b_start - 1, -1): + b_index[b[j]] = j + # we will look for the longest match we can find. + max_match_length = 0 + a_pos = a_start + while a_pos < a_end: + val = a[a_pos] + b_pos = b_index.get(val) + if b_pos is None: + a_pos += 1 + continue + else: + a_match_start = a_pos + a_match_end = a_pos + 1 + b_match_start = b_pos + b_match_end = b_pos + 1 + while (a_match_start > a_start and b_match_start > b_start and + a[a_match_start - 1] == b[b_match_start - 1]): + a_match_start -= 1 + b_match_start -= 1 + while (a_match_end < a_end and b_match_end < b_end and + a[a_match_end] == b[b_match_end]): + a_match_end += 1 + b_match_end += 1 + # Compute the length of the matching segment. We prefer the longest. + match_length = a_match_end - a_match_start + # Extra credit for matching at the beginning or end of the sequence. + if a_match_start == 0 and b_match_start == 0: + match_length += min_match_length + if a_match_end == len(a) and b_match_end == len(b): + match_length += min_match_length + if match_length > max_match_length: + max_match_length = match_length + best_match = (a_match_start, a_match_end, b_match_start, b_match_end) + # advance a_pos to the end of this match to avoid wasting time + # rediscovering this match. + a_pos = a_match_end + if max_match_length < min_match_length or max_recursion_depth == 0: + return [("diff", a_start, a_end, b_start, b_end)] + a_match_start, a_match_end, b_match_start, b_match_end = best_match + return (fast_match_sequences( + a, b, a_start, a_match_start, b_start, b_match_start, min_match_length, + max_recursion_depth - 1) + [ + ("equal", a_match_start, a_match_end, b_match_start, b_match_end) + ] + fast_match_sequences(a, b, a_match_end, a_end, b_match_end, b_end, + min_match_length, max_recursion_depth - 1)) diff --git a/tensor2tensor/data_generators/wikifact/README.md b/tensor2tensor/data_generators/wikifact/README.md new file mode 100644 index 000000000..024ad72ac --- /dev/null +++ b/tensor2tensor/data_generators/wikifact/README.md @@ -0,0 +1,4 @@ +# Assessing the Factual Accuracy of Generated Text + +This directory will contain the code and scripts to generate data and train +models from the paper *Assessing the Factual Accuracy of Generated Text*. diff --git a/tensor2tensor/data_generators/wikisum/README.md b/tensor2tensor/data_generators/wikisum/README.md new file mode 100644 index 000000000..0692a19eb --- /dev/null +++ b/tensor2tensor/data_generators/wikisum/README.md @@ -0,0 +1,312 @@ +# Generating Wikipedia by Summarizing Long Sequences + +This directory contains the code and scripts to generate the dataset from the +paper [Generating Wikipedia by Summarizing Long +Sequences](https://arxiv.org/abs/1801.10198). The task is to generate a +Wikipedia article based on the contents of the cited references in that article +and the top 10 Google search results for the article's title. + +There are 2 sources for the reference URLs used: + +1. [CommonCrawl](http://commoncrawl.org/), an open-source crawl of the web. The + advantage of using CommonCrawl is that the dataset is perfectly reproducible. + However, there is limited coverage of the reference URLs. +1. Live web fetches. Coverage is considerably increased, but the content is + subject to change. + +This document provides instructions for producing both datasets. + +## Support files + +Some files that are used in dataset generation have already been generated and +uploaded to Google Cloud Storage as `gs://tensor2tensor-data/wikisum`. + +**URLs:** The dataset contains ~90M URLs total (~2.3M Wikipedia articles, each +with ~40 reference URLs). The URLs in the dataset are available in sharded JSON +files here: `gs://tensor2tensor-data/wikisum/wiki_urls/`. + +**Wikipedia Articles:** We have processed the Wikipedia articles slightly to +extract the title, section breaks, and section headings. The processed Wikipedia +content is available in sharded `TFRecord` files containing serialized +`tensorflow.Example` protocol buffers here: +`gs://tensor2tensor-data/wikisum/wiki_content/`. The sharding is determined by a +hash of the Wikpedia article's title. The `Example`s contain features `[url, +title, section_titles, section_texts]`. + +**CommonCrawl References Index:** To enable efficiently extracting the reference +URLs from CommonCrawl, we provide a JSON file per CommonCrawl file which maps a +reference URL contained in that CommonCrawl file to a list of shard ids: +`gs://tensor2tensor-data/wikisum/commoncrawl_metadata/`. These shards are the +ones that contain one or more Wikipedia articles that cite this reference. The +scripts in this directory will use this information to efficiently join the +reference with their Wikipedia articles. + +*Note*: You can use [`gsutil`](https://cloud.google.com/storage/docs/gsutil) to +view the support files. + +## Data generation + +Data generation will first extract reference content (from either CommonCrawl or +the web), then generate a vocabulary, join the references with their Wikipedia +articles, run TF-IDF to rank reference paragraphs for a given article, and then +encode the references and the Wikipedia article with the vocabulary and write +the encoded training or evaluation example out to disk. + +The output of data generation is a set of `TFRecord` files containing serialized +`tensorflow.Example` protocol buffers, with feature keys `"inputs"` and +`"targets"`. The inputs are the reference tokens, and the targets are the +Wikipedia article tokens. + +In both cases, you must use multiple machines to extract references and produce +the final data to disk because of the size of the data. See `parallel_launch.py` +which is a script that will launch N machines in parallel on GCP. You can use it +as a guide if you'd like to launch on other infrastructure. + +There are 3 jobs to run: + +1. Extract references: `get_references_commoncrawl.py` for `WikisumCommoncrawl` + and `get_references_web.py` for `WikisumWeb`. +1. Build vocabulary (single-machine): `generate_vocab.py` +1. Produce Examples: `produce_examples.py` + +With 1,000 machines with a good internet connection, data generation takes well +under 24 hours. + +## Setup if using `parallel_launch.py` to launch on Google Cloud Platform + +First, [install the `gcloud` CLI](https://cloud.google.com/sdk/downloads). + +``` +# Initialize the CLI +gcloud init + +# Login +gcloud auth login + +# Update the CLI +gcloud components update + +# Set the default project and zone +gcloud config set core/project myproject +gcloud config set compute/zone us-central1-c +``` + +You'll also need to request the requisite +[quotas](https://console.cloud.google.com/iam-admin/quotas) in the zone you'll +be launching the machines in (whatever default zone you set above): + +* In-use IP addresses: 1,000 +* Internal IP addresses: 1,000 +* Persistent Disk Standard (GB): 10,000 +* CPUs: 4,000 + +**Running the commands below will launch instances on Google Cloud Platform and +you will incur charges.** If any of the commands go bad, immediately delete any +stranded instances. `delete_instances.sh` helps you delete instances in bulk +from the command-line, or you can delete many instances at once from the +[GCP Console](https://console.cloud.google.com/). + +### Cost estimates + +These are rough (and **not** guaranteed) estimates of cost if you were to launch +on GCP. + +Pricing is taken from +[here](https://cloud.google.com/compute/pricing#custommachinetypepricing). + +* `WikisumCommoncrawl` + * `get_references_commoncrawl`: $50 (1k machines, 1 CPU, 2G memory, 1 hour) + * `produce_examples`: $25 (1k machines, 1 CPU, 3G memory, 30 minutes) +* `WikisumWeb` + * `get_references_web`: $600 (1k machines, 4 CPU, 4G memory, 4 hours) + * `produce_examples`: $25 (1k machines, 1 CPU, 3G memory, 30 minutes) + +## Commands to generate `WikisumCommoncrawl` + +``` +pip install tensor2tensor -U --user + +# Set to your own GCS bucket +BUCKET=gs://my-gcs-bucket/wikisum_commoncrawl + +# Extract references from CommonCrawl +python -m tensor2tensor.data_generators.wikisum.parallel_launch \ + --num_instances=1000 \ + --cpu=1 --mem=2 \ + --name=wikisum-cc-refs \ + --log_dir=$BUCKET/logs \ + --setup_command="pip install tensor2tensor tensorflow -U -q --user" \ + --command_prefix="python -m tensor2tensor.data_generators.wikisum.get_references_commoncrawl --num_tasks=1000 --out_dir=$BUCKET/wiki_references --task_id" + +# Generate vocabulary file +python -m tensor2tensor.data_generators.wikisum.generate_vocab \ + --out_dir=$BUCKET/data \ + --refs_dir=$BUCKET/wiki_references \ + --for_commoncrawl + +# Produce examples +python -m tensor2tensor.data_generators.wikisum.parallel_launch \ + --num_instances=1000 \ + --cpu=1 --mem=3 \ + --name=wikisum-cc-produce \ + --log_dir=$BUCKET/logs \ + --setup_command="pip install tensor2tensor tensorflow -U -q --user" \ + --command_prefix="python -m tensor2tensor.data_generators.wikisum.produce_examples --out_dir=$BUCKET/data --refs_dir=$BUCKET/wiki_references --num_tasks=1000 --for_commoncrawl --task_id" + +# Validate data +python -m tensor2tensor.data_generators.wikisum.validate_data \ + --out_dir=$BUCKET/data \ + --for_commoncrawl +``` + +## Commands to generate `WikisumWeb` + +``` +pip install tensor2tensor -U --user + +# Set to your own GCS bucket +BUCKET=gs://my-gcs-bucket/wikisum_web + +# Fetch references from web +python -m tensor2tensor.data_generators.wikisum.parallel_launch \ + --num_instances=1000 \ + --cpu=4 --mem=4 \ + --name=wikisum-web-refs \ + --log_dir=$BUCKET/logs \ + --setup_command="pip3 install tensorflow tensor2tensor aiohttp cchardet aiodns bs4 -U -q --user" \ + --command_prefix="python3 -m tensor2tensor.data_generators.wikisum.get_references_web --out_dir=$BUCKET/wiki_references --shard_id" + +# Generate vocabulary file +python -m tensor2tensor.data_generators.wikisum.generate_vocab \ + --out_dir=$BUCKET/data \ + --refs_dir=$BUCKET/wiki_references + +# Produce examples +python -m tensor2tensor.data_generators.wikisum.parallel_launch \ + --num_instances=1000 \ + --cpu=1 --mem=3 \ + --name=wikisum-web-produce \ + --log_dir=$BUCKET/logs \ + --setup_command="pip install tensor2tensor tensorflow -U -q --user" \ + --command_prefix="python -m tensor2tensor.data_generators.wikisum.produce_examples --out_dir=$BUCKET/data --refs_dir=$BUCKET/wiki_references --num_tasks=1000 --task_id" + +# Validate data +python -m tensor2tensor.data_generators.wikisum.validate_data \ + --out_dir=$BUCKET/data +``` + +## Training + +**TODO(rsepassi)**: Put actual results achieved on `wikisum_web` and/or +`wikisum_commoncrawl` and with what `hparams_set`. + +``` +PROBLEM=wikisum_web # or wikisum_commoncrawl +t2t-trainer \ + --problem=$PROBLEM \ + --model=transformer \ + --hparams_set=transformer_base \ + --train_steps=250000 \ + --eval_steps=100 \ + --data_dir=$DATA_DIR \ + --output_dir=$TRAIN_DIR +``` + + +## Dataset Metadata +The following table is necessary for this dataset to be indexed by search +engines such as <a href="https://g.co/datasetsearch">Google Dataset Search</a>. +<div itemscope itemtype="http://schema.org/Dataset"> +<table> + <tr> + <th>property</th> + <th>value</th> + </tr> + <tr> + <td>name</td> + <td><code itemprop="name">wikisum</code></td> + </tr> + <tr> + <td>alternateName</td> + <td><code itemprop="alternateName">WikisumCommonCrawl</code></td> + </tr> + <tr> + <td>alternateName</td> + <td><code itemprop="alternateName">WikisumWeb</code></td> + </tr> + <tr> + <td>alternateName</td> + <td><code itemprop="alternateName">wkisum_commoncrawl</code></td> + </tr> + <tr> + <td>alternateName</td> + <td><code itemprop="alternateName">wikisum_web</code></td> + </tr> + <tr> + <td>url</td> + <td><code itemprop="url">https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/wikisum</code></td> + </tr> + <tr> + <td>sameAs</td> + <td><code itemprop="sameAs">https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/data_generators/wikisum</code></td> + </tr> + <tr> + <td>description</td> + <td><code itemprop="description">The dataset from the +paper [Generating Wikipedia by Summarizing Long +Sequences](https://arxiv.org/abs/1801.10198). The task is to generate a +Wikipedia article based on the contents of the cited references in that article +and the top 10 Google search results for the article's title.\n +\n +There are 2 sources for the reference URLs used: +\n +1. [CommonCrawl](http://commoncrawl.org/), an open-source crawl of the web. The + advantage of using CommonCrawl is that the dataset is perfectly reproducible. + However, there is limited coverage of the reference URLs. +1. Live web fetches. Coverage is considerably increased, but the content is + subject to change.\n +\n +The dataset includes:\n +\n +**URLs:** The dataset contains ~90M URLs total (~2.3M Wikipedia articles, each +with ~40 reference URLs). The URLs in the dataset are available in sharded JSON +files.\n +\n +**Wikipedia Articles:** We have processed the Wikipedia articles slightly to +extract the title, section breaks, and section headings. The processed Wikipedia +content is available in sharded `TFRecord` files containing serialized +`tensorflow.Example` protocol buffers.\n +\n +**CommonCrawl References Index:** To enable efficiently extracting the reference +URLs from CommonCrawl, we provide a JSON file per CommonCrawl file which maps a +reference URL contained in that CommonCrawl file to a list of shard ids. +These shards are the ones that contain one or more Wikipedia articles that cite +this reference.</code></td> + </tr> + <tr> + <td>citation</td> + <td><code itemprop="citation">https://identifiers.org/arxiv:1801.10198</code></td> + </tr> + <tr> + <td>provider</td> + <td> + <div itemscope itemtype="http://schema.org/Organization" itemprop="provider"> + <table> + <tr> + <th>property</th> + <th>value</th> + </tr> + <tr> + <td>name</td> + <td><code itemprop="name">Google</code></td> + </tr> + <tr> + <td>sameAs</td> + <td><code itemprop="sameAs">https://en.wikipedia.org/wiki/Google</code></td> + </tr> + </table> + </div> + </td> + </tr> +</table> +</div> diff --git a/tensor2tensor/data_generators/wikisum/__init__.py b/tensor2tensor/data_generators/wikisum/__init__.py new file mode 100644 index 000000000..ff174dd63 --- /dev/null +++ b/tensor2tensor/data_generators/wikisum/__init__.py @@ -0,0 +1,15 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tensor2tensor/data_generators/wikisum/delete_instances.sh b/tensor2tensor/data_generators/wikisum/delete_instances.sh new file mode 100755 index 000000000..c35e48d8d --- /dev/null +++ b/tensor2tensor/data_generators/wikisum/delete_instances.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# Delete Google Compute Engine instances with naming structure $NAME-$INDEX +# (e.g. machines created with parallel_launch.py). +# Example usage: +# delete_instances.sh fetch-ref-urls 1000 + +NAME=$1 +MAX=$2 +MIN=${3:-0} + +LOG_F=/tmp/delete-$NAME-logs.txt + +echo "Deleting $MAX instances starting with $NAME-$MIN" + +for i in $(seq $MIN $MAX) +do + gcloud compute instances delete --quiet $NAME-$i > $LOG_F 2>&1 & + if [[ $(( i % 100 )) == 0 ]] + then + # Give it some room to breathe every 100 + sleep 30 + fi +done + +echo "Delete commands launched. Logs redirected to $LOG_F" diff --git a/tensor2tensor/data_generators/wikisum/generate_vocab.py b/tensor2tensor/data_generators/wikisum/generate_vocab.py new file mode 100644 index 000000000..5769a650d --- /dev/null +++ b/tensor2tensor/data_generators/wikisum/generate_vocab.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generate vocab from references and wikis.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.data_generators.wikisum import wikisum + +import tensorflow.compat.v1 as tf + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string("out_dir", None, "Directory to write vocab to.") +flags.DEFINE_string("wikis_dir", + "gs://tensor2tensor-data/wikisum/wiki_content/", + "Directory with wiki_content.tfrecords shards.") +flags.DEFINE_string("refs_dir", None, + "Directory with process_X folders with reference shards.") +flags.DEFINE_bool("for_commoncrawl", False, + "Whether to use WikisumCommoncrawl or WikisumWeb.") + + +def main(_): + if FLAGS.for_commoncrawl: + problem = wikisum.WikisumCommoncrawl() + else: + problem = wikisum.WikisumWeb() + problem.generate_vocab(FLAGS.out_dir, FLAGS.wikis_dir, FLAGS.refs_dir) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/data_generators/wikisum/get_references_commoncrawl.py b/tensor2tensor/data_generators/wikisum/get_references_commoncrawl.py new file mode 100644 index 000000000..2f19ca09d --- /dev/null +++ b/tensor2tensor/data_generators/wikisum/get_references_commoncrawl.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Extract references from CommonCrawl files.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tempfile + +from tensor2tensor.data_generators.wikisum import utils +from tensor2tensor.data_generators.wikisum import wikisum + +import tensorflow.compat.v1 as tf + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_integer("num_tasks", 1000, "Number of parallel tasks.") +flags.DEFINE_integer("task_id", 0, "Task id in a parallel run.") +flags.DEFINE_string("metadata_dir", + "gs://tensor2tensor-data/wikisum/commoncrawl_metadata/", + "Path to metadata files specifying what references are in " + "which CommonCrawl files.") +flags.DEFINE_string("out_dir", None, "Directory to write references to.") +flags.DEFINE_string("commoncrawl_wet_dir", None, + "Path to CommonCrawl wet.gz files locally. If not " + "provided, will download.") + + +def main(_): + assert FLAGS.out_dir + assert FLAGS.metadata_dir + out_dir = os.path.join(FLAGS.out_dir, "process_%d" % FLAGS.task_id) + tf.gfile.MakeDirs(out_dir) + + with utils.timing("get_refs_commoncrawl"): + # Get all WET files + if FLAGS.commoncrawl_wet_dir: + wet_files = tf.gfile.Glob( + os.path.join(FLAGS.commoncrawl_wet_dir, "*.wet.gz")) + else: + tmp_dir = tempfile.gettempdir() + wet_files = list( + utils.wet_download_urls(utils.WET_PATHS_BY_DATE["0917"], tmp_dir)) + + # Shard and select this task's work + wet_files.sort() + wet_files = utils.shard(wet_files, FLAGS.num_tasks)[FLAGS.task_id] + tf.logging.info("Sharded out WET files. Processing %d files", + len(wet_files)) + + wikisum.extract_references_from_wets(wet_files, FLAGS.metadata_dir, out_dir) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/data_generators/wikisum/get_references_web.py b/tensor2tensor/data_generators/wikisum/get_references_web.py new file mode 100644 index 000000000..3bd8a69a4 --- /dev/null +++ b/tensor2tensor/data_generators/wikisum/get_references_web.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=line-too-long +r"""Fetch reference URLs from all groups for a single shard id. + +Because of an SSL memory leak in Python 3.5, fetching too many URLs in the same +Python process will OOM. This script wraps get_references_web_single_group.py +and calls it through subprocess for each group in the shard, where each group is +~5k URLs. + +Launch with parallel_launch.py + +Each job should finish in ~5 hours with the settings below. + +GCS_BUCKET=gs://my-bucket +python parallel_launch.py \ + --num_instances=1000 \ + --cpu=4 \ + --mem=4 \ + --name=get-refs-web \ + --code_dir=./ \ + --log_dir=$GCS_BUCKET/logs \ + --setup_command="pip3 install aiohttp cchardet aiodns bs4 -q --user" \ + --command_prefix="python3 wikisum/get_references_web.py --out_dir=$GCS_BUCKET/wiki_references --shard_id" +""" +# pylint: enable=line-too-long +import math +import os +import subprocess as sp + +from tensor2tensor.data_generators.wikisum import get_references_web_single_group as fetch +from tensor2tensor.data_generators.wikisum import utils + +import tensorflow.compat.v1 as tf + + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string( + "command", + "python3 -m " + "tensor2tensor.data_generators.wikisum.get_references_web_single_group", + "Command to run get_references_web_single_group, without flags.") + + +def main(_): + shard_urls = fetch.get_urls_for_shard(FLAGS.urls_dir, FLAGS.shard_id) + num_groups = int(math.ceil(len(shard_urls) / fetch.URLS_PER_CLIENT)) + tf.logging.info("Launching get_references_web_single_group sequentially for " + "%d groups in shard %d. Total URLs: %d", + num_groups, FLAGS.shard_id, len(shard_urls)) + command_prefix = FLAGS.command.split() + [ + "--urls_dir=%s" % FLAGS.urls_dir, + "--shard_id=%d" % FLAGS.shard_id, + "--debug_num_urls=%d" % FLAGS.debug_num_urls, + ] + with utils.timing("all_groups_fetch"): + for i in range(num_groups): + command = list(command_prefix) + out_dir = os.path.join(FLAGS.out_dir, "process_%d" % i) + command.append("--out_dir=%s" % out_dir) + command.append("--group_id=%d" % i) + try: + # Even on 1 CPU, each group should finish within an hour. + sp.check_call(command, timeout=60*60) + except sp.TimeoutExpired: + tf.logging.error("Group %d timed out", i) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/data_generators/wikisum/get_references_web_single_group.py b/tensor2tensor/data_generators/wikisum/get_references_web_single_group.py new file mode 100644 index 000000000..18208d742 --- /dev/null +++ b/tensor2tensor/data_generators/wikisum/get_references_web_single_group.py @@ -0,0 +1,307 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Fetch reference URLs for a single group_id within a single shard_id. + +See get_references_web.py to fetch URLs for all groups in within a single +shard_id. + +Requires Python 3.5 +pip3 install aiohttp cchardet aiodns bs4 tensorflow +""" + +import datetime +import json +import math +import multiprocessing +import os +import random + +import asyncio +import aiohttp +import tensorflow as tf + +from tensor2tensor.data_generators.wikisum import html +from tensor2tensor.data_generators.wikisum import utils + + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string("urls_dir", "gs://tensor2tensor-data/wikisum/wiki_urls/", + "Directory with wiki_urls.json files.") +flags.DEFINE_string("out_dir", None, "Directory to write reference files.") +flags.DEFINE_integer("max_parallel_requests", 50, + "Number of web requests to make in parallel.") + +# Identify which URLs to fetch +flags.DEFINE_integer("shard_id", 0, "ID of URL shard to process.") +flags.DEFINE_integer("group_id", 0, "ID of group within the shard to process.") + +flags.DEFINE_bool("log_samples", False, + "Whether to write out samples of the text extraction.") +flags.DEFINE_integer("log_every", 1000, + "How often to log and write out samples.") +flags.DEFINE_integer("debug_num_urls", 0, + "If >0, limits number of URLs fetched per input shard. " + "For debugging purposes only.") + + +WIKI_URLS_FILE = "wiki_urls.json-%05d-of-01000" +REF_SHARD_FILE = "references.tfrecords.gz-%05d-of-01000" + +# Note that this program leaks memory, likely due to a bug in Python's SSL +# implementation that leaks sockets. This constant is used here and in +# get_references_web.py to limit the number of requests made by a single +# Python process. The more requests made, the more memory required due to the +# leak. +# TODO(rsepassi): Document memory impact of changing this. +URLS_PER_CLIENT = 5000 + + +def concat_tfrecord_files(fnames, out_fname, rm_after=True): + with tf.gfile.Open(out_fname, "wb") as out_f: + for fname in fnames: + with tf.gfile.Open(fname, "rb") as in_f: + while True: + read = in_f.read(1000) + if not read: + break + out_f.write(read) + if rm_after: + tf.gfile.Remove(fname) + + +def shard(items, num_shards): + """Split items into num_shards groups.""" + sharded = [] + num_per_shard = len(items) // num_shards + start = 0 + for _ in range(num_shards): + sharded.append(items[start:start + num_per_shard]) + start += num_per_shard + + remainder = len(items) % num_shards + start = len(items) - remainder + for i in range(remainder): + sharded[i].append(items[start + i]) + + assert sum([len(fs) for fs in sharded]) == len(items) + return sharded + + +def mp_get_text(url, html): + return url, html.get_text_from_html(html) + + +def encode(s): + return bytes(s, "utf-8") + + +def make_example_from_ref(url, ref): + try: + url = encode(url) + ref = encode(ref) + except UnicodeEncodeError: + return None + + features = { + "url": + tf.train.Feature(bytes_list=tf.train.BytesList(value=[url])), + "content": + tf.train.Feature( + bytes_list=tf.train.BytesList(value=[ref])), + } + return tf.train.Example(features=tf.train.Features(feature=features)) + + +def tfrecord_fname(out_dir, shard_id, idx=None): + fname = os.path.join(out_dir, REF_SHARD_FILE % shard_id) + if idx is not None: + fname += ".%d" % idx + return fname + + +def make_tfrecord_writer(fname): + opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.GZIP) + return tf.python_io.TFRecordWriter(fname, opts) + + +def write_ref_content(url, ref, f): + if not ref: + return False + ex = make_example_from_ref(url, ref) + if ex is None: + return False + f.write(ex.SerializeToString()) + return True + + +async def fetch_url(http://github.com/url,%20session,%20side_data): + text = None + try: + async with session.get(url, timeout=10, verify_ssl=False) as response: + if response.status == 200: + text = await response.text() + else: + tf.logging.error("Status %d, url: %s", response.status, url) + except: + # Request can fail for many reasons. + pass + + return text, side_data + + +async def throttled_fetch_url(http://github.com/url,%20sem,%20session,%20side_data): + async with sem: + return await fetch_url(http://github.com/url,%20session,%20side_data) + + +async def fetch_urls(urls, + out_fname, + logging_fnames=None): + tasks = [] + connector = aiohttp.TCPConnector(limit_per_host=1) + async with aiohttp.ClientSession( + connector=connector, cookie_jar=aiohttp.DummyCookieJar()) as session: + # Async fetch the urls + sem = asyncio.Semaphore(FLAGS.max_parallel_requests) + for url in urls: + side_data = {"url": url} + task = asyncio.ensure_future( + throttled_fetch_url(http://github.com/url,%20sem,%20session,%20side_data)) + tasks.append(task) + tf.logging.info("Async requested %d urls", len(urls)) + + # Setup output files + file_handles = [] + out_f = make_tfrecord_writer(out_fname) + file_handles.append(out_f) + + logging_fnames = logging_fnames or {} + + samples_f = None + if "samples" in logging_fnames: + samples_f = tf.gfile.Open(logging_fnames["samples"], "w") + file_handles.append(samples_f) + + refs_written = [0] # Made a list so can be mutated + + def text_extraction_callback(callback_arg): + url, text = callback_arg + written = write_ref_content(url, text, out_f) + if not written: + return + if not refs_written[0] % FLAGS.log_every: + timestamp = datetime.datetime.now().strftime("%H:%M") + tf.logging.info("%s: Wrote ref %d in group", timestamp, refs_written[0]) + if samples_f is not None: + samples_f.write(url) + samples_f.write("\n") + samples_f.write(text) + samples_f.write("\n\n---\n\n") + refs_written[0] += 1 + + try: + # Process each URL as it comes in. + # Using a multiprocessing Pool because the text extraction is expensive + # and so we distribute across cores. + pool = multiprocessing.Pool() + results = [] + for task in asyncio.as_completed(tasks): + html, side_data = await task + url = side_data["url"] + if not html: + continue + res = pool.apply_async(mp_get_text, (url, html), {}, + text_extraction_callback) + results.append(res) + for res in results: + try: + res.get(timeout=10) + except multiprocessing.TimeoutError: + pass + finally: + for f in file_handles: + f.close() + + return refs_written[0] + + +def get_urls_per_shard(urls_files): + total_urls = 0 + per_shard = {} + for urls_file in urls_files: + ref_urls = set() + shard_id = int(os.path.basename(urls_file)[15:20]) + with tf.gfile.Open(urls_file) as f: + wiki_urls = json.loads(f.read()) + for _, wiki_info in wiki_urls.items(): + ref_urls |= set(wiki_info["refs"]) + + per_shard[shard_id] = list(ref_urls) + total_urls += len(ref_urls) + return per_shard, total_urls + + +def get_urls_for_shard(urls_dir, shard_id): + urls_file = os.path.join(urls_dir, WIKI_URLS_FILE % shard_id) + urls_per_shard, _ = get_urls_per_shard([urls_file]) + assert len(urls_per_shard) == 1 + return urls_per_shard[shard_id] + + +def get_urls_for_shard_group(urls_dir, shard_id, group_id): + shard_urls = get_urls_for_shard(urls_dir, shard_id) + + # Deterministic sort and shuffle to prepare for sharding + shard_urls.sort() + random.seed(123) + random.shuffle(shard_urls) + groups = shard(shard_urls, int(math.ceil(len(shard_urls) / URLS_PER_CLIENT))) + group_urls = groups[group_id] + if FLAGS.debug_num_urls: + group_urls = group_urls[:FLAGS.debug_num_urls] + return group_urls + + +def main(_): + urls = get_urls_for_shard_group( + FLAGS.urls_dir, FLAGS.shard_id, FLAGS.group_id) + tf.logging.info("Fetching %d URLs for shard %d, group %d", + len(urls), FLAGS.shard_id, FLAGS.group_id) + + tf.gfile.MakeDirs(FLAGS.out_dir) + out_fname = tfrecord_fname(FLAGS.out_dir, FLAGS.shard_id) + + with utils.timing("group_fetch"): + logging_fnames = {} + if FLAGS.log_samples: + logging_fnames["samples"] = os.path.join( + FLAGS.out_dir, "samples.%d.txt" % FLAGS.shard_id) + loop = asyncio.get_event_loop() + num_written = loop.run_until_complete(asyncio.ensure_future( + fetch_urls(urls, + out_fname, + logging_fnames))) + + tf.logging.info("Total URLs: %d", len(urls)) + tf.logging.info("Num written: %d", num_written) + tf.logging.info("Coverage: %.1f", (num_written / len(urls)) * 100) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/data_generators/wikisum/html.py b/tensor2tensor/data_generators/wikisum/html.py new file mode 100644 index 000000000..2cf300b1d --- /dev/null +++ b/tensor2tensor/data_generators/wikisum/html.py @@ -0,0 +1,78 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utils to parse HTML content into plaintext.""" + +import bs4 + + +def get_text_from_html(html): + """Returns a plaintext representation of HTML content.""" + + try: + soup = bs4.BeautifulSoup(html, "html.parser") + except: # pylint: disable=bare-except + # Some docs don't parse + return "" + # Remove script and style tags + for s in soup(["script", "style"]): + s.decompose() + return "\n".join([s for s in _soup_strings(soup)]) + + +def _soup_strings(soup): + """Return text strings in soup.""" + paragraph_tags = set([ + "caption", "details", "h1", "h2", "h3", "h4", "h5", "h6", "li", "p", "td", + "div", "span" + ]) + + skip_children = None + for descendant in soup.descendants: + # If we've treated a tag as a contiguous paragraph, don't re-emit the + # children (see below). + if skip_children is not None: + try: + in_skip = descendant in skip_children # pylint: disable=unsupported-membership-test + except RecursionError: # pylint: disable=undefined-variable + # Possible for this check to hit a nasty infinite recursion because of + # BeautifulSoup __eq__ checks. + in_skip = True + if in_skip: + continue + else: + skip_children = None + + # Treat some tags as contiguous paragraphs, regardless of other tags nested + # inside (like <a> or <b>). + if isinstance(descendant, bs4.Tag): + if descendant.name in paragraph_tags: + if descendant.find_all(paragraph_tags): + # If there are nested paragraph tags, don't treat it as a single + # contiguous tag. + continue + skip_children = list(descendant.descendants) + text = " ".join(descendant.get_text(" ", strip=True).split()) + if text: + yield text + continue + + if (isinstance(descendant, bs4.Comment) or + not isinstance(descendant, bs4.NavigableString)): + continue + + text = " ".join(descendant.strip().split()) + if text: + yield text diff --git a/tensor2tensor/data_generators/wikisum/parallel_launch.py b/tensor2tensor/data_generators/wikisum/parallel_launch.py new file mode 100644 index 000000000..0cdc8403a --- /dev/null +++ b/tensor2tensor/data_generators/wikisum/parallel_launch.py @@ -0,0 +1,293 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# pylint: disable=line-too-long +r"""Launch a script in parallel on GCP. + +For each instance (`--num_instances`), the script will copy the code in +`--code_dir` to the instance, run `--setup_command` and then run +`--command_prefix` joined with the task's id or a line in +`--per_instance_suffix_file`. + +Note that the machines will attempt to down themselves on completion or failure. +If they do not, you can delete them manually or use delete_instances.sh to +delete many at once. + +Example usage: + +``` +BUCKET=gs://my-bucket +python parallel_launch.py \ + --num_instances=1000 \ + --cpu=4 --mem=4 \ + --name=wikisum-refs-web \ + --code_dir=./ \ + --log_dir=$BUCKET/refs_logs \ + --setup_command="pip3 install aiohttp cchardet aiodns bs4 -q --user" \ + --command_prefix="python3 wikisum/get_references_web.py --out_dir=$BUCKET/wiki_references --shard_id" +``` +""" +# pylint: enable=line-too-long + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import contextlib +import multiprocessing as mp +import os +import socket +import subprocess as sp +import time + +from tensor2tensor.utils import cloud_mlengine as cloud +import tensorflow.compat.v1 as tf + +flags = tf.flags +FLAGS = flags.FLAGS + + +flags.DEFINE_integer("num_instances", None, "Number of instances to launch.") +flags.DEFINE_string("name", None, "Instance name prefix.") +flags.DEFINE_string("log_dir", None, "GCS bucket to copy logs out to.") +flags.DEFINE_string("code_dir", None, "Directory to copy.") +flags.DEFINE_string("setup_command", None, "Setup command to run.") +flags.DEFINE_string("command_prefix", None, "Command to run, prefix.") +flags.DEFINE_string("per_instance_suffix_file", None, + "Command to run, suffix per instance. If None, suffix will " + "be instance id.") +flags.DEFINE_integer("cpu", 1, "Number of CPUs per instance.") +flags.DEFINE_integer("mem", 4, "Memory in GB per instance.") +flags.DEFINE_integer("num_threads", 48, + "Number of threads to use to spin up jobs.") +flags.DEFINE_bool("debug_keep_up", False, + "If True, will keep the machine up. num_instances must be 1.") +flags.DEFINE_string("instance_ids", None, + "Comma-separated list of integer instance ids to launch. " + "Useful if some failed on a previous run and you only want " + "to rerun specific tasks.") + + +DELETE = "gcloud compute instances delete {name}" +DELETE_SELF = ("gcloud compute instances delete $(hostname) --quiet " + "--zone={zone}") +CREATE_INSTANCE = ("gcloud compute instances create {instance_name} " + "--custom-cpu {cpu} --custom-memory {mem} " + "--custom-extensions " + "--image-project=ml-images --image-family=tf-1-7 " + "--scopes=cloud-platform") +COPY_CODE = "gcloud compute scp --recurse {local_dir} {instance_name}:~/" +SSH = "gcloud compute ssh {instance_name} --command" +SCREEN = "screen -dmS test bash -c \"{command}\"" +DEFAULT_ZONE = "gcloud config get-value compute/zone" +LOGS = "> ~/logs-{task_id}.txt 2>&1; gsutil cp ~/logs-{task_id}.txt {bucket}" + + +def remote_run(cmd, instance_name, detach=False, retries=1): + """Run command on GCS instance, optionally detached.""" + if detach: + cmd = SCREEN.format(command=cmd) + args = SSH.format(instance_name=instance_name).split() + args.append(cmd) + for i in range(retries + 1): + try: + if i > 0: + tf.logging.info("Retry %d for %s", i, args) + return sp.check_call(args) + except sp.CalledProcessError as e: + if i == retries: + raise e + + +def default_zone(): + return cloud.shell_output(DEFAULT_ZONE).strip() + + +@contextlib.contextmanager +def safe_socket(timeout=2): + s = socket.socket() + s.settimeout(timeout) + try: + yield s + finally: + s.close() + + +def wait_for_ssh(ip): + """Wait for SSH to be available at given IP address.""" + for _ in range(12): + with safe_socket() as s: + try: + s.connect((ip, 22)) + return True + except socket.timeout: + pass + time.sleep(10) + return False + + +def create_instance(instance_name, cpu=1, mem=4): + tf.logging.info("Creating instance %s", instance_name) + out = cloud.shell_output(CREATE_INSTANCE, instance_name=instance_name, + cpu=cpu, mem=mem) + return out.split("\n")[1:-1][0].split()[8] + + +def list_vm_names_and_ips(): + list_out = cloud.shell_output(cloud.LIST_VM) + lines = [l.split() for l in list_out.split("\n")[1:-1]] + names_and_ips = [(l[0].strip(), l[-2].strip()) for l in lines] + return names_and_ips + + +def shell_run_with_retry(cmd, retries=1, **kwargs): + for i in range(retries + 1): + try: + if i > 0: + tf.logging.info("Retry %d for %s", i, cmd) + cloud.shell_run(cmd, **kwargs) + return + except sp.CalledProcessError as e: + if i == retries: + raise e + + +def delete_instance(instance_name): + cloud.shell_run(DELETE, name=instance_name) + + +def launch_instance(instance_name, + command, + existing_ip=None, + cpu=1, + mem=4, + code_dir=None, + setup_command=None): + """Launch a GCE instance.""" + # Create instance + ip = existing_ip or create_instance(instance_name, cpu=cpu, mem=mem) + tf.logging.info("Waiting for SSH %s", instance_name) + ready = wait_for_ssh(ip) + if not ready: + raise ValueError("Instance %s never ready for SSH" % instance_name) + + # Copy code + if code_dir: + shell_run_with_retry(COPY_CODE, retries=2, + local_dir=code_dir, instance_name=instance_name) + + # Run setup + if setup_command: + tf.logging.info("Running setup on %s", instance_name) + remote_run(setup_command, instance_name) + + # Run command + tf.logging.info("Running command on %s", instance_name) + remote_run(command, instance_name, detach=True) + + +def main(_): + assert FLAGS.num_instances + assert FLAGS.name + zone = default_zone() + assert zone + + code_dir = None + if FLAGS.code_dir: + code_dir = os.path.abspath(os.path.expanduser(FLAGS.code_dir)) + + # Suffixes per instance + if FLAGS.per_instance_suffix_file: + with tf.gfile.Open(FLAGS.per_instance_suffix_file) as f: + suffixes = [l.strip() for l in f.readlines()] + else: + suffixes = list(range(FLAGS.num_instances)) + assert len(suffixes) == FLAGS.num_instances + + vm_info = list_vm_names_and_ips() + vm_names = list(zip(*vm_info))[0] if vm_info else [] + + pool = mp.Pool(FLAGS.num_threads) + async_results = [] + + assert FLAGS.log_dir + log_dir = os.path.join(FLAGS.log_dir, FLAGS.name) + tf.gfile.MakeDirs(log_dir) + assert log_dir.startswith("gs://") + if not log_dir.endswith("/"): + log_dir += "/" + # Write a test file to make sure gcloud GCS APIs are enabled + test_filename = os.path.join(log_dir, "check_write") + with tf.gfile.Open(test_filename, "w") as f: + f.write("testing GCS write") + tf.gfile.Remove(test_filename) + + instance_ids = list(range(FLAGS.num_instances)) + if FLAGS.instance_ids: + instance_ids = [int(i) for i in FLAGS.instance_ids.split(",")] + tf.logging.info("Launching %d instances", len(instance_ids)) + + for i in instance_ids: + instance_name = "%s-%d" % (FLAGS.name, i) + existing_ip = (vm_info[vm_names.index(instance_name)][1] + if instance_name in vm_names else None) + logging = LOGS.format(task_id=i, bucket=log_dir) if log_dir else "" + delete = DELETE_SELF.format(zone=zone) + if FLAGS.debug_keep_up: + assert len(instance_ids) == 1 + delete = "" + command = "{prefix} {suffix} {logging}; {delete}".format( + prefix=FLAGS.command_prefix, + suffix=suffixes[i], + delete=delete, + logging=logging) + args = (instance_name, command, existing_ip, + FLAGS.cpu, FLAGS.mem, code_dir, + FLAGS.setup_command) + res = pool.apply_async(launch_instance, args) + async_results.append((res, instance_name, i)) + + failed = [] + for res, instance_name, i in async_results: + try: + res.get() + except Exception as e: # pylint: disable=broad-except + failed.append((instance_name, i)) + tf.logging.error("Failed to launch task %s due to exception %s", + instance_name, str(e)) + + results = [] + if failed: + ids_for_flag = ",".join([str(i) for i in list(zip(*failed))[1]]) + tf.logging.error("Failed to launch %d jobs. Tasks: %s. " + "Attempting delete in case they are still up. Rerun with " + "--instance_ids='%s' to attempt relaunch.", + len(failed), str(failed), ids_for_flag) + for instance_name, _ in failed: + res = pool.apply_async(delete_instance, (instance_name,)) + results.append(res) + + for res in results: + try: + res.get() + except: # pylint: disable=bare-except + pass + + tf.logging.info("Launching complete.") + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/data_generators/wikisum/produce_examples.py b/tensor2tensor/data_generators/wikisum/produce_examples.py new file mode 100644 index 000000000..25cad27a5 --- /dev/null +++ b/tensor2tensor/data_generators/wikisum/produce_examples.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Produce examples given a vocab, wikis, references, and dataset URLs.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from six.moves import range +from tensor2tensor.data_generators.wikisum import utils +from tensor2tensor.data_generators.wikisum import wikisum + +import tensorflow.compat.v1 as tf + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_integer("num_tasks", 1000, "Number of parallel tasks.") +flags.DEFINE_integer("task_id", 0, "Task id in a parallel run.") +flags.DEFINE_string("out_dir", None, "Directory to write to.") +flags.DEFINE_string("wikis_dir", + "gs://tensor2tensor-data/wikisum/wiki_content/", + "Directory with wiki_content.tfrecords.") +flags.DEFINE_string("refs_dir", None, "Directory with process_X dirs") +flags.DEFINE_string("urls_dir", "gs://tensor2tensor-data/wikisum/wiki_urls/", + "Directory with wiki_urls.json") +flags.DEFINE_string("vocab_dir", None, "Directory with vocab file") +flags.DEFINE_bool("for_commoncrawl", False, + "Whether to use WikisumCommoncrawl or WikisumWeb.") + + +def main(_): + if FLAGS.for_commoncrawl: + problem = wikisum.WikisumCommoncrawl() + else: + problem = wikisum.WikisumWeb() + + out_filepaths = problem.out_filepaths(FLAGS.out_dir) + out_filepaths = utils.shard(out_filepaths, FLAGS.num_tasks)[FLAGS.task_id] + + if not FLAGS.vocab_dir: + FLAGS.vocab_dir = FLAGS.out_dir + + shard_ids = utils.shard(list(range(utils.NUM_SHARDS)), + FLAGS.num_tasks)[FLAGS.task_id] + + with utils.timing("produce_examples"): + wikisum.produce_examples( + shard_ids=shard_ids, + wikis_dir=FLAGS.wikis_dir, + refs_dir=FLAGS.refs_dir, + urls_dir=FLAGS.urls_dir, + vocab_path=os.path.join(FLAGS.vocab_dir, problem.vocab_filename), + out_filepaths=out_filepaths) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/data_generators/wikisum/test_data/para_bad1.txt b/tensor2tensor/data_generators/wikisum/test_data/para_bad1.txt new file mode 100644 index 000000000..b15107bd9 --- /dev/null +++ b/tensor2tensor/data_generators/wikisum/test_data/para_bad1.txt @@ -0,0 +1,11 @@ +kolkata ward no 97 37 +you are here : india » west bengal » kolkata » kolkata +this paragraph too short +a | b | c | d | e | f | g | h | i | j | k | l | m | n | o | p | q | r | s | t | u | v | w | x | y | z +123 123 123 123 985 9880 1230 0980 . 12398 . +- 5 . 7 % - 5 . 2 % - 15 . 1 % 4 . 7 % - 13 . 3 % +http : / / www . bbc . co . uk / sport / football / 24351521 +no . - 26 beadon street . +{ { / playpopup } } { { ^ playpopup } } { { # playinvideopage } } { { / playinvideopage } } { { ^ playinvideopage } } { { / playinvideopage } } { { / playpopup } } <p> { { # playpopup } } { { / playpopup } } { { ^ playpopup } } { { # playinvideopage } } { { / playinvideopage } } { { ^ playinvideopage } } { { / playinvideopage } } { { / playpopup } } { { genre } } +denham , samuel coulter , sally 133 oct 28 1819 +browse by diff --git a/tensor2tensor/data_generators/wikisum/test_data/para_good1.txt b/tensor2tensor/data_generators/wikisum/test_data/para_good1.txt new file mode 100644 index 000000000..99f78ef45 --- /dev/null +++ b/tensor2tensor/data_generators/wikisum/test_data/para_good1.txt @@ -0,0 +1,15 @@ +this is a very good paragraph . it even has two sentences . +the castle that was soon to figure so largely in lee’s life lay fourteen miles +to the southwest of where he sat perched atop his tank . topped with storybook +crenelations and accompanied by a rich history , schloss itter , as it’s called +in german , was first mentioned in land records as early as 1240 . since then , +itter has passed through a number of hands . after germany’s march 1938 +annexation of austria , the castle’s robust construction and relatively remote +location attracted the attention of the notoriously secretive nazis . within +months of absorbing austria into the greater reich , the german government +requisitioned castle itter for unspecified “official use”—which included housing +for several months in 1942 an organization called the “german association for +combating the dangers of tobacco . ” on february 7 , 1943 , it fell into new +hands yet again , for on that day , the structure and all its outbuildings were +requisitioned by the wehrmacht on behalf of the ss . +the url for the site is http : / / www . bbc . co . uk / sport / football / 24351521 . diff --git a/tensor2tensor/data_generators/wikisum/utils.py b/tensor2tensor/data_generators/wikisum/utils.py new file mode 100644 index 000000000..f45566e22 --- /dev/null +++ b/tensor2tensor/data_generators/wikisum/utils.py @@ -0,0 +1,269 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Wikisum data generation utilities.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import contextlib +import datetime +import gzip +import os +import re +import urllib + +import tensorflow.compat.v1 as tf + +# pylint: disable=g-import-not-at-top +# To maintain compatibility with Python 2 and 3 +try: + import cStringIO as StringIO +except ImportError: + import io as StringIO +# pylint: enable=g-import-not-at-top + + +# Each entry is a URL to the wet.paths.gz file for that CommonCrawl dump. +WET_PATHS_BY_DATE = { + '0917': ('https://commoncrawl.s3.amazonaws.com/crawl-data/CC-MAIN-2017-39/' + 'wet.paths.gz'), +} + +S3_HTTP_PREFIX = 'https://commoncrawl.s3.amazonaws.com/' +NUM_SHARDS = 1000 +METADTA_SUFFIX = '.metadata.json' + + + +def readahead(path): + return path + + +class WETHeader(collections.namedtuple('WETHeader', ['url', 'length'])): + URI_HEADER = 'WARC-Target-URI: ' + LENGTH_HEADER = 'Content-Length: ' + + @classmethod + def read(cls, f): + """Read header from file. Headers end with length and then 1 blank line.""" + url = None + + line = f.readline() + if not line: + # EOF + return None + while not line.startswith(cls.LENGTH_HEADER): + if line.startswith(cls.URI_HEADER): + url = line[len(cls.URI_HEADER):].strip() + line = f.readline() + + # Consume empty separator + f.readline() + + # Read content + length = int(line.split(':')[1]) + + return cls(url, length) + + +class WETRecord(collections.namedtuple('WETRecord', ['url', 'content'])): + + @classmethod + def read(cls, f): + """Read WETRecord from file. Records end with 2 blank lines.""" + header = WETHeader.read(f) + if header is None: + # EOF + return None + content = f.read(header.length) + + # Consume empty separators + f.readline() + f.readline() + + return cls(header.url, content) + + +def wet_records_from_file_obj(f, take_ownership=False): + """Iterate through records in WET file object.""" + while True: + record = WETRecord.read(f) + + if record is None: + break + + if not record.url: + continue + + yield record + + if take_ownership: + f.close() + + +def wet_records(wet_filepath): + """Generate WETRecords from filepath.""" + if wet_filepath.endswith('.gz'): + fopen = gzip.open + else: + fopen = tf.gfile.GFile + + with fopen(wet_filepath) as f: + for record in wet_records_from_file_obj(f): + yield record + + +def download(url, download_dir): + outname = os.path.join(download_dir, os.path.basename(url)) + if tf.gfile.Exists(outname): + print('Found %s, skipping download' % outname) + return outname + inprogress = outname + '.incomplete' + print('Downloading %s' % url) + inprogress, _ = urllib.urlretrieve(url, inprogress) + tf.gfile.Rename(inprogress, outname) + return outname + + +def wet_download_urls(wet_paths_url, tmp_dir, rm_after=True): + paths_gz = download(wet_paths_url, tmp_dir) + with gzip.open(paths_gz) as f: + path = f.readline() + while path: + download_path = S3_HTTP_PREFIX + path[:-1] + yield download_path + path = f.readline() + if rm_after: + tf.gfile.Remove(paths_gz) + + +def wet_records_from_url(http://github.com/download_url,%20tmp_dir,%20rm_after=True): + wet_gz = download(download_url, tmp_dir) + try: + for wet_record in wet_records(wet_gz): + yield wet_record + finally: + if rm_after: + tf.gfile.Remove(wet_gz) + + +class DummyPool(object): + + def __init__(self, processes=None): + pass + + def apply_async(self, fn, args=None): + args = args or tuple() + return DummyResult(fn(*args)) + + def map(self, fn, arg_list): + return [fn(a) for a in arg_list] + + +class DummyResult(object): + + def __init__(self, result): + self.result = result + + def get(self): + return self.result + + +def shard(items, num_shards): + """Split items into num_shards groups.""" + sharded = [] + num_per_shard = len(items) // num_shards + start = 0 + for _ in range(num_shards): + sharded.append(items[start:start + num_per_shard]) + start += num_per_shard + + remainder = len(items) % num_shards + start = len(items) - remainder + for i in range(remainder): + sharded[i].append(items[start + i]) + + assert sum([len(fs) for fs in sharded]) == len(items) + return sharded + + +def gzip_memfile(fname): + with tf.gfile.Open(readahead(fname)) as f: + memfile = StringIO.StringIO(f.read()) + return gzip.GzipFile(fileobj=memfile) + + +_SOME_ALPHA_RE = re.compile(r'[A-Za-z]+') +_ONLY_ALPHA_RE = re.compile(r'^[A-Za-z]*$') + + +def filter_paragraph(p): + """Simple filter to remove obviously bad paragraphs (bad text extraction). + + Note this needs to run very quickly as it is applied to every paragraph + in the corpus, so nothing fancy! This whole method should be linear + expected time in len(p). + + Args: + p: string, paragraph + + Returns: + True if we should remove the paragraph. + """ + # Expect a minimum number of words. + tokens = p.split() + if len(tokens) < 6: + return True + + # Require some letters. + if not re.search(_SOME_ALPHA_RE, p): + return True + + # Keep this one at the end, probably the most complicated logic. + # We try to detect sentences, which should have a minimum of 3 tokens + # with only alphabetic characters. + last = 0 + found_sentence = False + num_alpha = 0 + for i, x in enumerate(tokens): + if x == '.': + if i - last > 3 and num_alpha >= 3: + found_sentence = True + break + last = i + num_alpha = 0 + if re.match(_ONLY_ALPHA_RE, x): + num_alpha += 1 + if not found_sentence: + return True + + return False + + +@contextlib.contextmanager +def timing(name=''): + """Log start, end, and duration.""" + start = datetime.datetime.now() + timestamp = start.strftime('%H:%M') + tf.logging.info('Starting job [%s] at %s', name, timestamp) + yield + end = datetime.datetime.now() + timestamp = end.strftime('%H:%M') + tf.logging.info('Finished job [%s] at %s', name, timestamp) + duration = end - start + duration_mins = duration.total_seconds() / 60 + tf.logging.info('Total time [%s] (m): %d', name, int(duration_mins)) diff --git a/tensor2tensor/data_generators/wikisum/utils_test.py b/tensor2tensor/data_generators/wikisum/utils_test.py new file mode 100644 index 000000000..559889d39 --- /dev/null +++ b/tensor2tensor/data_generators/wikisum/utils_test.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.data_generators.wikisum.utils.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from tensor2tensor.data_generators.wikisum import utils + +import tensorflow.compat.v1 as tf + +pkg_dir = os.path.abspath(__file__) +pkg_dir, _ = os.path.split(pkg_dir) +_TESTDATA = os.path.join(pkg_dir, "test_data") + + +def _get_testdata(filename): + with tf.io.gfile.GFile(filename) as f: + return f.read() + + +class UtilsTest(tf.test.TestCase): + + def test_filter_paragraph(self): + for bad in tf.io.gfile.glob(os.path.join(_TESTDATA, "para_bad*.txt")): + for p in _get_testdata(bad).split("\n"): + self.assertTrue(utils.filter_paragraph(p), + msg="Didn't filter %s" % p) + for good in tf.io.gfile.glob(os.path.join(_TESTDATA, "para_good*.txt")): + for p in _get_testdata(good).split("\n"): + p = _get_testdata(good) + self.assertFalse(utils.filter_paragraph(p), msg="Filtered %s" % p) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/data_generators/wikisum/validate_data.py b/tensor2tensor/data_generators/wikisum/validate_data.py new file mode 100644 index 000000000..b03bef990 --- /dev/null +++ b/tensor2tensor/data_generators/wikisum/validate_data.py @@ -0,0 +1,173 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Aggregate stats from produce_examples.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import os + +import numpy as np + +import six +from six.moves import zip +from tensor2tensor.data_generators.wikisum import wikisum + +import tensorflow.compat.v1 as tf + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string("out_dir", None, "Directory with data and stats files.") +flags.DEFINE_bool("for_commoncrawl", False, + "Whether to use WikisumCommoncrawl or WikisumWeb.") +flags.DEFINE_bool("rm_per_shard_stats", True, + "Whether to remove the per-shard stats files after writing " + "out the aggregated stats.") + + +def aggregate_stats(stats_files): + """Aggregate stats in per-shard stats files.""" + all_stats = {} + for fname in stats_files: + with tf.gfile.Open(fname) as f: + stats = json.loads(f.read()) + for k, v in six.iteritems(stats): + if k not in all_stats: + if isinstance(v, list): + all_stats[k] = [] + else: + all_stats[k] = 0 + + if isinstance(v, list): + all_stats[k].extend(v) + else: + all_stats[k] += v + + stats = all_stats + ref_coverage = float(stats["total_found_refs"]) / stats["total_original_refs"] + len_bounds = [0, 2, 10, 100, 1000, 5000, 10000, 20000, 50000, 100000, 1000000] + len_counts, len_bounds = np.histogram(stats["ref_lengths"], len_bounds) + len_dist = len_counts.astype(np.float32) / len_counts.sum() + wiki_coverage = (float(stats["num_wikis_written"]) / + stats["total_original_wikis"]) + wikis_skipped_no_ref = (float(stats["wikis_skipped_no_refs"]) / + stats["total_original_wikis"]) + wikis_skipped_no_lead = (float(stats["wikis_skipped_short_lead"]) / + stats["total_original_wikis"]) + wiki_ref_coverage = [ + float(found) / orig for found, orig + in zip(stats["wiki_found_refs"], stats["wiki_original_refs"]) if found + ] + coverage_bounds = np.arange(21).astype(np.float32) / 20 + coverage_counts, coverage_bounds = np.histogram(wiki_ref_coverage, + coverage_bounds) + coverage_dist = coverage_counts.astype(np.float32) / coverage_counts.sum() + + agg_stats = dict( + total_original_wikis=stats["total_original_wikis"], + total_original_refs=stats["total_original_refs"], + wiki_coverage=wiki_coverage, + wikis_skipped_no_ref=wikis_skipped_no_ref, + wikis_skipped_no_lead=wikis_skipped_no_lead, + overall_ref_coverage=ref_coverage, + per_wiki_ref_coverage_dist=list((coverage_dist * 100).astype(int)), + per_wiki_ref_coverage_bounds=list((coverage_bounds * 100).astype(int)), + ref_len_dist=list((len_dist * 100).astype(int)), + ref_len_bounds=list(len_bounds), + ) + return agg_stats + + +def filename_to_task_id(fname): + """Map filename to the task id that created it assuming 1k tasks.""" + # This matches the order and size in WikisumBase.out_filepaths + fname = os.path.basename(fname) + shard_id_increment = { + "train": 0, + "dev": 800, + "test": 900, + } + parts = fname.split("-") + split = parts[1] + shard_id = parts[2] + task_id = int(shard_id) + shard_id_increment[split] + return task_id + + +def get_length(fname): + return tf.gfile.Stat(fname).length + + +def validate_data_files(problem, data_files, min_size): + """Validate presence and minimum size of files.""" + # Check that all files are present + data_dir = os.path.split(data_files[0])[0] + out_filepaths = problem.out_filepaths(data_dir) + missing_filepaths = set(out_filepaths) - set(data_files) + if missing_filepaths: + tf.logging.error("Missing %d data files", len(missing_filepaths)) + + # Check that each file is at least 100M + too_small = [] + for data_file in data_files: + length = get_length(data_file) + if length < min_size: + too_small.append(data_file) + if too_small: + tf.logging.error("%d files too small", len(too_small)) + + bad_files = too_small + list(missing_filepaths) + return bad_files + + +def main(_): + if FLAGS.for_commoncrawl: + problem = wikisum.WikisumCommoncrawl() + else: + problem = wikisum.WikisumWeb() + prefix = problem.dataset_filename() + data_files = tf.gfile.Glob(os.path.join(FLAGS.out_dir, "%s*" % prefix)) + missing_files = validate_data_files( + problem, data_files, + min_size=(60 if FLAGS.for_commoncrawl else 120) * 1e6) + + task_ids = [filename_to_task_id(fname) for fname in missing_files] + ids_for_flag = ",".join([str(i) for i in task_ids]) + tf.logging.error("You should (re)generate %d of the data files. " + "Rerun produce_examples with --instance_ids='%s'.", + len(missing_files), ids_for_flag) + + # Compute and write out aggregated stats + stats_files = tf.gfile.Glob(os.path.join(FLAGS.out_dir, "stats*")) + agg_stats = aggregate_stats(stats_files) + if not FLAGS.for_commoncrawl: + coverage = agg_stats["overall_ref_coverage"] * 100 + if not coverage > 80: + tf.logging.error("Overall reference coverage is expected to be > 80%. " + "It is %0.1f. You may want to rerun get_references_web.", + coverage) + with tf.gfile.Open( + os.path.join(FLAGS.out_dir, "stats.json"), "w") as f: + f.write(json.dumps(agg_stats)) + if FLAGS.rm_per_shard_stats and not missing_files: + for fname in stats_files: + tf.gfile.Remove(fname) + + +if __name__ == "__main__": + tf.app.run() diff --git a/tensor2tensor/data_generators/wikisum/wikisum.py b/tensor2tensor/data_generators/wikisum/wikisum.py new file mode 100644 index 000000000..f2e2c8c13 --- /dev/null +++ b/tensor2tensor/data_generators/wikisum/wikisum.py @@ -0,0 +1,557 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Wikipedia Summarization Problems.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import json +import math +import os +import re +import string +import tempfile + +import six +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import tokenizer +from tensor2tensor.data_generators.wikisum import utils as cc_utils +from tensor2tensor.layers import modalities +from tensor2tensor.utils import metrics +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + +PROCESS_FOLDER_PREFIX = "process" +REF_SHARD_FILE_PREFIX = "references.tfrecords.gz" +REF_SHARD_FILE = REF_SHARD_FILE_PREFIX + "-%05d-of-01000" + +# Support files +BASE_SUPPORT_DIR = "gs://tensor2tensor-data/wikisum" +WIKI_CONTENT_DIR = os.path.join(BASE_SUPPORT_DIR, "wiki_content") +WIKI_URLS_DIR = os.path.join(BASE_SUPPORT_DIR, "wiki_urls") +WET_METADATA_DIR = os.path.join(BASE_SUPPORT_DIR, "commoncrawl_metadata") +WIKI_CONTENT_FILE = "wiki_content.tfrecords-%05d-of-01000" +WIKI_URLS_FILE = "wiki_urls.json-%05d-of-01000" + +EOT = "<EOT>" # end-of-title string +_MIN_REFS = 1 +_MIN_LEADSECTION_TOKENS = 1 + + +class WikisumBase(problem.Problem): + """Base class for Wikisum problems.""" + + def example_reading_spec(self): + data_fields = { + "inputs": tf.VarLenFeature(tf.int64), + "targets": tf.VarLenFeature(tf.int64), + "section_boundaries": tf.VarLenFeature(tf.int64), + } + data_items_to_decoders = None + return (data_fields, data_items_to_decoders) + + @property + def target_vocab_size(self): + return 2**15 + + @property + def vocab_filename(self): + return "vocab.%s.%d" % (self.dataset_filename(), self.target_vocab_size) + + def feature_encoders(self, data_dir): + vocab_filename = os.path.join(data_dir, self.vocab_filename) + encoder = text_encoder.SubwordTextEncoder(vocab_filename) + # Shared encoder for inputs and targets + return {"inputs": encoder, "targets": encoder} + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.stop_at_eos = True + + p.vocab_size = { + "inputs": self._encoders["inputs"].vocab_size, + "targets": self._encoders["targets"].vocab_size, + } + p.modality = { + "inputs": modalities.ModalityType.SYMBOL, + "targets": modalities.ModalityType.SYMBOL, + } + + def eval_metrics(self): + return super(WikisumBase, self).eval_metrics() + [ + metrics.Metrics.ROUGE_2_F, metrics.Metrics.ROUGE_L_F + ] + + def generate_lines_for_vocab(self, wikis_dir, refs_dir, max_chars=10**7): + total_chars = 0 + ref_files_by_shard = _references_files_by_shard(refs_dir) + for shard_id in range(cc_utils.NUM_SHARDS): + # Wikipedia articles + for wiki in _wiki_articles(shard_id, wikis_dir): + yield _normalize_text(wiki.title) + EOT + for section in wiki.sections: + yield _format_title(_normalize_text(section.title)) + yield _normalize_text(section.text) + total_chars += len(section.title) + total_chars += len(section.text) + + # References + for i, content in enumerate( + six.itervalues(_references_content(ref_files_by_shard[shard_id]))): + for line in content.split("\n"): + if line: + yield _normalize_text(line) + total_chars += len(line) + + # Make sure we use at least 1k references + if i >= 1000 and total_chars >= max_chars: + break + + if total_chars >= max_chars: + tf.logging.info("Seen enough chars: %d; finished.", max_chars) + break + tf.logging.info("Built vocabulary using %d chars", total_chars) + + def generate_vocab(self, data_dir, wikis_dir, refs_dir): + # Produce a SubwordTextEncoder from a subset of the data + return generator_utils.get_or_generate_vocab_inner( + data_dir, self.vocab_filename, self.target_vocab_size, + self.generate_lines_for_vocab(wikis_dir, refs_dir)) + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + tf.logging.warn("See wikisum/README.md for instructions to generate data.") + + def out_filepaths(self, data_dir): + train_shards = 800 + dev_shards = 100 + test_shards = 100 + train_filepaths = self.training_filepaths( + data_dir, train_shards, shuffled=True) + dev_filepaths = self.dev_filepaths(data_dir, dev_shards, shuffled=True) + test_filepaths = self.test_filepaths(data_dir, test_shards, shuffled=True) + out_filepaths = train_filepaths + dev_filepaths + test_filepaths + out_filepaths.sort() + assert len(out_filepaths) == cc_utils.NUM_SHARDS + return out_filepaths + + +@registry.register_problem +class WikisumCommoncrawl(WikisumBase): + """Wikipedia references->article summarization task based on CommonCrawl.""" + pass + + +@registry.register_problem +class WikisumWeb(WikisumBase): + """Wikipedia references->article summarization task based on web data.""" + pass + + +@registry.register_problem +class WikisumCommoncrawlLeadSection(WikisumCommoncrawl): + """Wikipedia references->lead section summarization task.""" + + def preprocess_example(self, example, mode, hparams): + example["targets"] = _truncate_to_lead_section(example) + return super(WikisumCommoncrawlLeadSection, self).preprocess_example( + example, mode, hparams) + + def dataset_filename(self): + return WikisumCommoncrawl.name + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + tf.logging.warn("Problem %s reuses data from problem %s", self.name, + WikisumCommoncrawl.name) + + +@registry.register_problem +class WikisumWebLeadSection(WikisumWeb): + """Wikipedia references->lead section summarization task.""" + + def preprocess_example(self, example, mode, hparams): + example["targets"] = _truncate_to_lead_section(example) + return super(WikisumWebLeadSection, self).preprocess_example( + example, mode, hparams) + + def dataset_filename(self): + return WikisumWeb.name + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + tf.logging.warn("Problem %s reuses data from problem %s", self.name, + WikisumWeb.name) + + +def make_ref_shard_files(out_dir): + tf.gfile.MakeDirs(out_dir) + opts = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.GZIP) + files = [ + tf.python_io.TFRecordWriter( + os.path.join(out_dir, REF_SHARD_FILE % i), opts) + for i in range(cc_utils.NUM_SHARDS) + ] + return files + + +def _truncate_to_lead_section(example): + wiki = example["targets"] + lead_boundary = example["section_boundaries"][0] + # Concat a new EOS to the lead since the original one gets truncated. + lead = tf.concat((wiki[:lead_boundary], [text_encoder.EOS_ID]), 0) + return lead + + +def _make_example_from_record(record): + features = { + "url": + tf.train.Feature(bytes_list=tf.train.BytesList(value=[record.url])), + "content": + tf.train.Feature( + bytes_list=tf.train.BytesList(value=[record.content])), + } + return tf.train.Example(features=tf.train.Features(feature=features)) + + +def _shard_id_for_file(sharded_filename): + suffix = "00000-of-00000" + parts = sharded_filename[-len(suffix):].split("-") + assert len(parts) == 3 + return int(parts[0]) + + +def _references_files_by_shard(refs_dir): + process_dirs = _process_folders(refs_dir) + shards = collections.defaultdict(list) + for d in process_dirs: + ref_files = tf.gfile.Glob(os.path.join(d, REF_SHARD_FILE_PREFIX) + "*") + for f in ref_files: + shards[_shard_id_for_file(f)].append(f) + return shards + + +def _references_content(ref_files): + """Returns dict<str ref_url, str ref_content>.""" + example_spec = { + "url": tf.FixedLenFeature([], tf.string), + "content": tf.FixedLenFeature([], tf.string), + } + data = {} + for ex in generator_utils.tfrecord_iterator( + ref_files, gzipped=True, example_spec=example_spec): + data[ex["url"]] = text_encoder.to_unicode(ex["content"]) + return data + + +def _wiki_urls_for_shard(shard_id, urls_dir=None): + """Urls for chunk: dict<str wiki_url, list<str> ref_urls>.""" + urls_dir = urls_dir or WIKI_URLS_DIR + urls_filepath = os.path.join(urls_dir, WIKI_URLS_FILE % shard_id) + with tf.gfile.GFile(urls_filepath) as f: + return json.loads(f.read()) + + +class WikipediaSection( + collections.namedtuple("WikipediaSection", ["title", "text"])): + pass + + +class WikipediaArticle( + collections.namedtuple("WikipediaArticle", ["url", "title", "sections"])): + pass + + +def _wiki_articles(shard_id, wikis_dir=None): + """Generates WikipediaArticles from GCS that are part of shard shard_id.""" + if not wikis_dir: + wikis_dir = WIKI_CONTENT_DIR + with tf.Graph().as_default(): + dataset = tf.data.TFRecordDataset( + cc_utils.readahead( + os.path.join(wikis_dir, WIKI_CONTENT_FILE % shard_id)), + buffer_size=16 * 1000 * 1000) + + def _parse_example(ex_ser): + """Parse serialized Example containing Wikipedia article content.""" + features = { + "url": tf.VarLenFeature(tf.string), + "title": tf.VarLenFeature(tf.string), + "section_titles": tf.VarLenFeature(tf.string), + "section_texts": tf.VarLenFeature(tf.string), + } + ex = tf.parse_single_example(ex_ser, features) + for k in ex.keys(): + ex[k] = ex[k].values + ex["url"] = ex["url"][0] + ex["title"] = ex["title"][0] + return ex + + dataset = dataset.map(_parse_example, num_parallel_calls=32) + dataset = dataset.prefetch(100) + record_it = dataset.make_one_shot_iterator().get_next() + + with tf.Session() as sess: + while True: + try: + ex = sess.run(record_it) + except tf.errors.OutOfRangeError: + break + + sections = [ + WikipediaSection(title=text_encoder.to_unicode(title), + text=text_encoder.to_unicode(text)) + for title, text in zip(ex["section_titles"], ex["section_texts"]) + ] + yield WikipediaArticle( + url=text_encoder.to_unicode(ex["url"]), + title=text_encoder.to_unicode(ex["title"]), + sections=sections) + + +def _token_counts(text, token_set=None): + counts = collections.defaultdict(int) + for token in tokenizer.encode(text_encoder.native_to_unicode(text)): + if token_set and token not in token_set: + continue + counts[token] += 1 + return counts + + +def _normalize_text(text): + text = text.lower() + # Space around punctuation + text = re.sub("[%s]" % re.escape(string.punctuation), r" \g<0> ", text) + text = re.sub(r"\s+", " ", text) + text = text.strip() + return text + + +def _tokens_to_score(tokens): + return {t for t in tokens if re.search("[a-z0-9]", t)} + + +def rank_reference_paragraphs(wiki_title, references_content, normalize=True): + """Rank and return reference paragraphs by tf-idf score on title tokens.""" + normalized_title = _normalize_text(wiki_title) + title_tokens = _tokens_to_score( + set(tokenizer.encode(text_encoder.native_to_unicode(normalized_title)))) + ref_paragraph_info = [] + doc_counts = collections.defaultdict(int) + for ref in references_content: + for paragraph in ref.split("\n"): + normalized_paragraph = _normalize_text(paragraph) + if cc_utils.filter_paragraph(normalized_paragraph): + # Skip paragraph + continue + counts = _token_counts(normalized_paragraph, title_tokens) + for token in title_tokens: + if counts[token]: + doc_counts[token] += 1 + content = normalized_paragraph if normalize else paragraph + info = {"content": content, "counts": counts} + ref_paragraph_info.append(info) + + for info in ref_paragraph_info: + score = 0. + for token in title_tokens: + term_frequency = info["counts"][token] + inv_doc_frequency = ( + float(len(ref_paragraph_info)) / max(doc_counts[token], 1)) + score += term_frequency * math.log(inv_doc_frequency) + info["score"] = score + + ref_paragraph_info.sort(key=lambda el: el["score"], reverse=True) + return [info["content"] for info in ref_paragraph_info] + + +def produce_examples(shard_ids, wikis_dir, refs_dir, urls_dir, vocab_path, + out_filepaths): + """Produce examples from shard_ids to out_filepaths.""" + # * Join the Wikipedia articles with their references + # * Run Tf-idf to sort reference paragraphs + # * Encode the Wikipedia and reference text with the vocabulary + # * Write out TFRecords of tensorflow.Example + tf.logging.info("Processing %d input shards into %d output files.", + len(shard_ids), len(out_filepaths)) + + vocab = text_encoder.SubwordTextEncoder(vocab_path) + eot_ids = vocab.encode(EOT) + + def example_generator(): + """Generate Example dicts.""" + stats = dict(total_original_wikis=0, total_original_refs=0, + total_found_refs=0, ref_lengths=[], wiki_original_refs=[], + wiki_found_refs=[], wikis_skipped_no_refs=0, + wikis_skipped_short_lead=0, num_wikis_written=0) + ref_files_by_shard = _references_files_by_shard(refs_dir) + for shard_id in shard_ids: + tf.logging.info("Processing shard %d", shard_id) + wiki_urls = _wiki_urls_for_shard(shard_id, urls_dir) + tf.logging.info("Loaded wiki URLs for shard") + refs_content = _references_content(ref_files_by_shard[shard_id]) + tf.logging.info("Loaded reference content for shard") + for i, wiki in enumerate(_wiki_articles(shard_id, wikis_dir)): + if not i % 1000: + tf.logging.info("Processing wiki index %d for shard %d", i, shard_id) + stats["total_original_wikis"] += 1 + + # Get reference content + wiki_ref_content = [] + ref_urls = wiki_urls[wiki.url]["refs"] + stats["total_original_refs"] += len(ref_urls) + stats_wiki_original_refs = len(ref_urls) + stats_wiki_found_refs = 0 + for ref_url in ref_urls: + ref_content = refs_content.get(ref_url) + if not ref_content: + continue + stats["total_found_refs"] += 1 + stats["ref_lengths"].append(len(ref_content)) + stats_wiki_found_refs += 1 + wiki_ref_content.append(ref_content) + + stats["wiki_original_refs"].append(stats_wiki_original_refs) + stats["wiki_found_refs"].append(stats_wiki_found_refs) + if not wiki_ref_content or len(wiki_ref_content) < _MIN_REFS: + # No/few refs were found + stats["wikis_skipped_no_refs"] += 1 + continue + + # Rank reference paragraphs with TFIDF + wiki_title = _normalize_text(wiki.title) + ranked_paragraphs = rank_reference_paragraphs(wiki_title, + wiki_ref_content) + + # Construct inputs from Wiki title and references + inputs = [] + inputs.extend(vocab.encode(wiki_title)) + inputs.extend(eot_ids) + for paragraph in ranked_paragraphs: + if len(inputs) >= 1e6: + break + paragraph += " " + inputs.extend(vocab.encode(paragraph)) + + # Construct targets from article sections + targets, section_boundaries = _encode_wiki_sections( + wiki.sections, vocab) + + # Skip if lead section is too short + if (not section_boundaries or + section_boundaries[0] < _MIN_LEADSECTION_TOKENS): + stats["wikis_skipped_short_lead"] += 1 + continue + + inputs.append(text_encoder.EOS_ID) + targets.append(text_encoder.EOS_ID) + + stats["num_wikis_written"] += 1 + yield { + "inputs": inputs, + "targets": targets, + "section_boundaries": section_boundaries, + } + + tf.logging.info("Total: %d, Skipped: %d", + stats["num_wikis_written"], + stats["total_original_wikis"] - stats["num_wikis_written"]) + tf.logging.info("Total refs: %d, Skipped refs: %d", + stats["total_found_refs"], + stats["total_original_refs"] - stats["total_found_refs"]) + stats_fname = os.path.join(os.path.split(out_filepaths[0])[0], + "stats.%d.json" % shard_ids[0]) + with tf.gfile.Open(stats_fname, "w") as f: + f.write(json.dumps(stats)) + + generator_utils.generate_files(example_generator(), out_filepaths) + + +def _format_title(title): + return " == %s == " % title + + +def _encode_wiki_sections(sections, vocab): + """Encodes sections with vocab. Returns ids and section boundaries.""" + ids = [] + section_boundaries = [] + for i, section in enumerate(sections): + if i > 0: + # Skip including article title + ids.extend(vocab.encode(_format_title(_normalize_text(section.title)))) + ids.extend(vocab.encode(_normalize_text(section.text))) + section_boundaries.append(len(ids)) + + return ids, section_boundaries + + +def _process_folders(tmp_dir): + return tf.gfile.Glob(os.path.join(tmp_dir, PROCESS_FOLDER_PREFIX) + "*") + + +def extract_references_from_wets(wet_files, metadata_dir, out_dir, + tmp_dir=None): + """Extract references from WET files into sharded output files.""" + # Setup output files + shard_files = make_ref_shard_files(out_dir) + + num_refs = 0 + for i, wet_file in enumerate(wet_files): + num_refs_in_wet = 0 + tf.logging.info("Processing file %d", i) + + # Read metadata file + metadata_fname = os.path.join( + metadata_dir, os.path.basename(wet_file)) + cc_utils.METADTA_SUFFIX + with tf.gfile.Open(cc_utils.readahead(metadata_fname)) as f: + wet_metadata = json.loads(f.read()) + + if not wet_metadata: + # No references in this WET file + continue + + if wet_file.startswith("http"): + # download + if not tmp_dir: + tmp_dir = tempfile.gettempdir() + record_gen = cc_utils.wet_records_from_url(http://github.com/wet_file,%20tmp_dir) + else: + # local + record_gen = cc_utils.wet_records_from_file_obj( + cc_utils.gzip_memfile(wet_file), take_ownership=True) + + for wet_record in record_gen: + shard_ids = wet_metadata.get(wet_record.url) + if not shard_ids: + # URL not in dataset + continue + + # Serialize and write out + ex = _make_example_from_record(wet_record) + ex_str = ex.SerializeToString() + for shard_id in shard_ids: + shard_files[shard_id].write(ex_str) + num_refs += 1 + num_refs_in_wet += 1 + + tf.logging.info("Wrote out %d references for this WET", num_refs_in_wet) + + tf.logging.info("Wrote out %d references total", num_refs) + + # Cleanup + for shard_file in shard_files: + shard_file.close() diff --git a/tensor2tensor/data_generators/wikitext103.py b/tensor2tensor/data_generators/wikitext103.py new file mode 100644 index 000000000..4d8ec3957 --- /dev/null +++ b/tensor2tensor/data_generators/wikitext103.py @@ -0,0 +1,203 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for wikitext-103. + +Wikitext-103: Long term dependency language modeling dataset +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import os +import zipfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +def _build_vocab(filename, vocab_dir, vocab_name): + """Reads a file to build a vocabulary. + + Args: + filename: file to read list of words from. + vocab_dir: directory where to save the vocabulary. + vocab_name: vocab file name. + + Returns: + text encoder. + """ + vocab_path = os.path.join(vocab_dir, vocab_name) + if not tf.gfile.Exists(vocab_path): + with tf.gfile.GFile(filename, "r") as f: + data = f.read().split() + counter = collections.Counter(data) + count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) + words, _ = list(zip(*count_pairs)) + encoder = text_encoder.TokenTextEncoder(None, vocab_list=words) + encoder.store_to_file(vocab_path) + else: + encoder = text_encoder.TokenTextEncoder(vocab_path) + return encoder + + +def _maybe_download_corpus(tmp_dir, vocab_type): + """Download and unpack the corpus. + + Args: + tmp_dir: directory containing dataset. + vocab_type: which vocabulary are we using. + + Returns: + The list of names of files. + """ + if vocab_type == text_problems.VocabType.CHARACTER: + + dataset_url = ("https://s3.amazonaws.com/research.metamind.io/wikitext" + "/wikitext-103-raw-v1.zip") + dir_name = "wikitext-103-raw" + else: + dataset_url = ("https://s3.amazonaws.com/research.metamind.io/wikitext" + "/wikitext-103-v1.zip") + dir_name = "wikitext-103" + + fname = os.path.basename(dataset_url) + compressed_filepath = generator_utils.maybe_download(tmp_dir, fname, + dataset_url) + zip_ref = zipfile.ZipFile(compressed_filepath, "r") + zip_ref.extractall(tmp_dir) + zip_ref.close() + + files = os.path.join(tmp_dir, dir_name, "*") + train_file, valid_file, test_file = None, None, None + for f in tf.gfile.Glob(files): + fname = os.path.basename(f) + if "train" in fname: + train_file = f + elif "valid" in fname: + valid_file = f + elif "test" in fname: + test_file = f + + assert train_file, "Training file not found" + assert valid_file, "Validation file not found" + assert test_file, "Testing file not found" + + return train_file, valid_file, test_file + + +@registry.register_problem +class LanguagemodelWikitext103(text_problems.Text2SelfProblem): + """Wikitext103 dataset token-level.""" + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 10, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }, { + "split": problem.DatasetSplit.TEST, + "shards": 1, + }] + + @property + def is_generate_per_split(self): + return True + + @property + def vocab_type(self): + return text_problems.VocabType.TOKEN + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + train_file, valid_file, test_file = _maybe_download_corpus( + tmp_dir, self.vocab_type) + + if dataset_split == problem.DatasetSplit.TRAIN: + filepath = train_file + if self.vocab_type == text_problems.VocabType.TOKEN: + _build_vocab(train_file, data_dir, self.vocab_filename) + + elif dataset_split == problem.DatasetSplit.EVAL: + filepath = valid_file + + elif dataset_split == problem.DatasetSplit.TEST: + filepath = test_file + + def _generate_samples(): + with tf.gfile.GFile(filepath, "r") as f: + for line in f: + line = " ".join(line.strip().split()) + if line: + yield {"targets": line} + + return _generate_samples() + + +@registry.register_problem +class LanguagemodelWikitext103Characters(LanguagemodelWikitext103): + """Wikitext-103, character-level.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + +@registry.register_problem +class LanguagemodelWikitext103L4k(LanguagemodelWikitext103): + """Wikitext-103, token-level, with examples up to 4,096 tokens long.""" + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + samples_by_line = super(LanguagemodelWikitext103L4k, + self).generate_samples(data_dir, tmp_dir, + dataset_split) + + def _generate_samples(): + tokens = [] + for sample in samples_by_line: + sample_tokens = sample["targets"].split() + if len(tokens) + len(sample_tokens) < self.sequence_length: + tokens.extend(sample_tokens) + else: + yield {"targets": " ".join(tokens)} + tokens = sample_tokens + + return _generate_samples() + + def max_length(self, model_hparams): + return model_hparams.split_to_length or self.sequence_length + + @property + def sequence_length(self): + """Length of each example (in tokens).""" + return 4096 + + +@registry.register_problem +class LanguagemodelWikitext103L16k(LanguagemodelWikitext103L4k): + """Wikitext-103, token-level, with examples up to 16,384 tokens long.""" + + @property + def sequence_length(self): + """Length of each example (in tokens).""" + return 16384 diff --git a/tensor2tensor/data_generators/wmt.py b/tensor2tensor/data_generators/wmt.py deleted file mode 100644 index 0be28ab73..000000000 --- a/tensor2tensor/data_generators/wmt.py +++ /dev/null @@ -1,271 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Data generators for WMT data-sets.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os -import tarfile - -# Dependency imports - -from tensor2tensor.data_generators import generator_utils -from tensor2tensor.data_generators import text_encoder - -import tensorflow as tf - - -def character_generator(source_path, target_path, eos=None): - """Generator for sequence-to-sequence tasks that just uses characters. - - This generator assumes the files at source_path and target_path have - the same number of lines and yields dictionaries of "inputs" and "targets" - where inputs are characters from the source lines converted to integers, - and targets are characters from the target lines, also converted to integers. - - Args: - source_path: path to the file with source sentences. - target_path: path to the file with target sentences. - eos: integer to append at the end of each sequence (default: None). - - Yields: - A dictionary {"inputs": source-line, "targets": target-line} where - the lines are integer lists converted from characters in the file lines. - """ - eos_list = [] if eos is None else [eos] - with tf.gfile.GFile(source_path, mode="r") as source_file: - with tf.gfile.GFile(target_path, mode="r") as target_file: - source, target = source_file.readline(), target_file.readline() - while source and target: - source_ints = [ord(c) for c in source.strip()] + eos_list - target_ints = [ord(c) for c in target.strip()] + eos_list - yield {"inputs": source_ints, "targets": target_ints} - source, target = source_file.readline(), target_file.readline() - - -def token_generator(source_path, target_path, token_vocab, eos=None): - """Generator for sequence-to-sequence tasks that uses tokens. - - This generator assumes the files at source_path and target_path have - the same number of lines and yields dictionaries of "inputs" and "targets" - where inputs are token ids from the " "-split source (and target, resp.) lines - converted to integers using the token_map. - - Args: - source_path: path to the file with source sentences. - target_path: path to the file with target sentences. - token_vocab: text_encoder.TextEncoder object. - eos: integer to append at the end of each sequence (default: None). - - Yields: - A dictionary {"inputs": source-line, "targets": target-line} where - the lines are integer lists converted from tokens in the file lines. - """ - eos_list = [] if eos is None else [eos] - with tf.gfile.GFile(source_path, mode="r") as source_file: - with tf.gfile.GFile(target_path, mode="r") as target_file: - source, target = source_file.readline(), target_file.readline() - while source and target: - source_ints = token_vocab.encode(source.strip()) + eos_list - target_ints = token_vocab.encode(target.strip()) + eos_list - yield {"inputs": source_ints, "targets": target_ints} - source, target = source_file.readline(), target_file.readline() - - -def _get_wmt_ende_dataset(directory, filename): - """Extract the WMT en-de corpus `filename` to directory unless it's there.""" - train_path = os.path.join(directory, filename) - if not (tf.gfile.Exists(train_path + ".de") and - tf.gfile.Exists(train_path + ".en")): - # We expect that this file has been downloaded from: - # https://drive.google.com/open?id=0B_bZck-ksdkpM25jRUN2X2UxMm8 and placed - # in `directory`. - corpus_file = os.path.join(directory, "wmt16_en_de.tar.gz") - with tarfile.open(corpus_file, "r:gz") as corpus_tar: - corpus_tar.extractall(directory) - return train_path - - -def ende_bpe_token_generator(tmp_dir, train): - """Instance of token generator for the WMT en->de task, training set.""" - dataset_path = ("train.tok.clean.bpe.32000" - if train else "newstest2013.tok.bpe.32000") - train_path = _get_wmt_ende_dataset(tmp_dir, dataset_path) - token_path = os.path.join(tmp_dir, "vocab.bpe.32000") - token_vocab = text_encoder.TokenTextEncoder(vocab_filename=token_path) - return token_generator(train_path + ".en", train_path + ".de", token_vocab, 1) - - -_ENDE_TRAIN_DATASETS = [ - [ - "http://data.statmt.org/wmt16/translation-task/training-parallel-nc-v11.tgz", # pylint: disable=line-too-long - ("training-parallel-nc-v11/news-commentary-v11.de-en.en", - "training-parallel-nc-v11/news-commentary-v11.de-en.de") - ], - [ - "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz", - ("commoncrawl.de-en.en", "commoncrawl.de-en.de") - ], - [ - "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz", - ("training/europarl-v7.de-en.en", "training/europarl-v7.de-en.de") - ], -] -_ENDE_TEST_DATASETS = [ - [ - "http://data.statmt.org/wmt16/translation-task/dev.tgz", - ("dev/newstest2013.en", "dev/newstest2013.de") - ], -] - -_ENFR_TRAIN_DATASETS = [ - [ - "http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz", - ("commoncrawl.fr-en.en", "commoncrawl.fr-en.fr") - ], - [ - "http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz", - ("training/europarl-v7.fr-en.en", "training/europarl-v7.fr-en.fr") - ], - [ - "http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz", - ("training/news-commentary-v9.fr-en.en", - "training/news-commentary-v9.fr-en.fr") - ], - [ - "http://www.statmt.org/wmt10/training-giga-fren.tar", - ("giga-fren.release2.fixed.en.gz", "giga-fren.release2.fixed.fr.gz") - ], - [ - "http://www.statmt.org/wmt13/training-parallel-un.tgz", - ("un/undoc.2000.fr-en.en", "un/undoc.2000.fr-en.fr") - ], -] -_ENFR_TEST_DATASETS = [ - [ - "http://data.statmt.org/wmt16/translation-task/dev.tgz", - ("dev/newstest2013.en", "dev/newstest2013.fr") - ], -] - - -def _compile_data(tmp_dir, datasets, filename): - """Concatenate all `datasets` and save to `filename`.""" - filename = os.path.join(tmp_dir, filename) - lang1_lines, lang2_lines = [], [] - for dataset in datasets: - url = dataset[0] - compressed_filename = os.path.basename(url) - compressed_filepath = os.path.join(tmp_dir, compressed_filename) - - lang1_filename, lang2_filename = dataset[1] - lang1_filepath = os.path.join(tmp_dir, lang1_filename) - lang2_filepath = os.path.join(tmp_dir, lang2_filename) - - if not os.path.exists(compressed_filepath): - generator_utils.maybe_download(tmp_dir, compressed_filename, url) - if not os.path.exists(lang1_filepath) or not os.path.exists(lang2_filepath): - mode = "r:gz" if "gz" in compressed_filepath else "r" - with tarfile.open(compressed_filepath, mode) as corpus_tar: - corpus_tar.extractall(tmp_dir) - if ".gz" in lang1_filepath: - new_filepath = lang1_filepath.strip(".gz") - generator_utils.gunzip_file(lang1_filepath, new_filepath) - lang1_filepath = new_filepath - if ".gz" in lang2_filepath: - new_filepath = lang2_filepath.strip(".gz") - generator_utils.gunzip_file(lang2_filepath, new_filepath) - lang2_filepath = new_filepath - with tf.gfile.GFile(lang1_filepath, mode="r") as lang1_file: - with tf.gfile.GFile(lang2_filepath, mode="r") as lang2_file: - lang1_file_lines = lang1_file.readlines() - lang2_file_lines = lang2_file.readlines() - assert len(lang1_file_lines) == len(lang2_file_lines), lang1_filepath - lang1_lines.extend(lang1_file_lines) - lang2_lines.extend(lang2_file_lines) - - write_chunk_size = 10000 - assert len(lang1_lines) == len(lang2_lines) - with tf.gfile.GFile(filename + ".lang1", mode="w") as lang1_file: - i = 0 - while i <= len(lang1_lines): - for line in lang1_lines[i * write_chunk_size:(i + 1) * write_chunk_size]: - lang1_file.write(line) - i += 1 - for line in lang1_lines[i * write_chunk_size:]: - lang1_file.write(line) - with tf.gfile.GFile(filename + ".lang2", mode="w") as lang2_file: - i = 0 - while i <= len(lang2_lines): - for line in lang2_lines[i * write_chunk_size:(i + 1) * write_chunk_size]: - lang2_file.write(line) - i += 1 - for line in lang2_lines[i * write_chunk_size:]: - lang2_file.write(line) - return filename - - -def ende_wordpiece_token_generator(tmp_dir, train, vocab_size): - symbolizer_vocab = generator_utils.get_or_generate_vocab( - tmp_dir, "tokens.vocab.%d" % vocab_size, vocab_size) - datasets = _ENDE_TRAIN_DATASETS if train else _ENDE_TEST_DATASETS - tag = "train" if train else "dev" - data_path = _compile_data(tmp_dir, datasets, "wmt_ende_tok_%s" % tag) - return token_generator(data_path + ".lang1", data_path + ".lang2", - symbolizer_vocab, 1) - - -def ende_character_generator(tmp_dir, train): - datasets = _ENDE_TRAIN_DATASETS if train else _ENDE_TEST_DATASETS - tag = "train" if train else "dev" - data_path = _compile_data(tmp_dir, datasets, "wmt_ende_chr_%s" % tag) - return character_generator(data_path + ".lang1", data_path + ".lang2", 1) - - -def enfr_wordpiece_token_generator(tmp_dir, train, vocab_size): - """Instance of token generator for the WMT en->fr task.""" - symbolizer_vocab = generator_utils.get_or_generate_vocab( - tmp_dir, "tokens.vocab.%d" % vocab_size, vocab_size) - datasets = _ENFR_TRAIN_DATASETS if train else _ENFR_TEST_DATASETS - tag = "train" if train else "dev" - data_path = _compile_data(tmp_dir, datasets, "wmt_enfr_tok_%s" % tag) - return token_generator(data_path + ".lang1", data_path + ".lang2", - symbolizer_vocab, 1) - - -def enfr_character_generator(tmp_dir, train): - """Instance of character generator for the WMT en->fr task.""" - datasets = _ENFR_TRAIN_DATASETS if train else _ENFR_TEST_DATASETS - tag = "train" if train else "dev" - data_path = _compile_data(tmp_dir, datasets, "wmt_enfr_chr_%s" % tag) - return character_generator(data_path + ".lang1", data_path + ".lang2", 1) - - -def parsing_character_generator(tmp_dir, train): - filename = "parsing_%s" % ("train" if train else "dev") - text_filepath = os.path.join(tmp_dir, filename + ".text") - tags_filepath = os.path.join(tmp_dir, filename + ".tags") - return character_generator(text_filepath, tags_filepath, 1) - - -def parsing_token_generator(tmp_dir, train, vocab_size): - symbolizer_vocab = generator_utils.get_or_generate_vocab( - tmp_dir, "tokens.vocab.%d" % vocab_size, vocab_size) - filename = "parsing_%s" % ("train" if train else "dev") - text_filepath = os.path.join(tmp_dir, filename + ".text") - tags_filepath = os.path.join(tmp_dir, filename + ".tags") - return token_generator(text_filepath, tags_filepath, symbolizer_vocab, 1) diff --git a/tensor2tensor/data_generators/wmt_test.py b/tensor2tensor/data_generators/wmt_test.py deleted file mode 100644 index 7121e3d8a..000000000 --- a/tensor2tensor/data_generators/wmt_test.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""WMT generators test.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import io -import os -import tempfile - -# Dependency imports - -import six -from tensor2tensor.data_generators import wmt - -import tensorflow as tf - - -class WMTTest(tf.test.TestCase): - - def testCharacterGenerator(self): - # Generate a trivial source and target file. - tmp_dir = self.get_temp_dir() - (_, tmp_file_path) = tempfile.mkstemp(dir=tmp_dir) - with io.open(tmp_file_path + ".src", "wb") as src_file: - src_file.write("source1\n") - src_file.write("source2\n") - with io.open(tmp_file_path + ".tgt", "wb") as tgt_file: - tgt_file.write("target1\n") - tgt_file.write("target2\n") - - # Call character generator on the generated files. - results_src, results_tgt = [], [] - for dictionary in wmt.character_generator(tmp_file_path + ".src", - tmp_file_path + ".tgt"): - self.assertEqual(sorted(list(dictionary)), ["inputs", "targets"]) - results_src.append(dictionary["inputs"]) - results_tgt.append(dictionary["targets"]) - - # Check that the results match the files. - self.assertEqual(len(results_src), 2) - self.assertEqual("".join([six.int2byte(i) - for i in results_src[0]]), "source1") - self.assertEqual("".join([six.int2byte(i) - for i in results_src[1]]), "source2") - self.assertEqual("".join([six.int2byte(i) - for i in results_tgt[0]]), "target1") - self.assertEqual("".join([six.int2byte(i) - for i in results_tgt[1]]), "target2") - - # Clean up. - os.remove(tmp_file_path + ".src") - os.remove(tmp_file_path + ".tgt") - os.remove(tmp_file_path) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensor2tensor/data_generators/wnli.py b/tensor2tensor/data_generators/wnli.py new file mode 100644 index 000000000..b56746ba0 --- /dev/null +++ b/tensor2tensor/data_generators/wnli.py @@ -0,0 +1,119 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Data generators for the Winograd NLI dataset.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import zipfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + +EOS = text_encoder.EOS + + +@registry.register_problem +class WinogradNLI(text_problems.TextConcat2ClassProblem): + """Winograd NLI classification problems.""" + + # Link to data from GLUE: https://gluebenchmark.com/tasks + _WNLI_URL = ("https://firebasestorage.googleapis.com/v0/b/" + "mtl-sentence-representations.appspot.com/o/" + "data%2FWNLI.zip?alt=media&token=068ad0a0-ded7-" + "4bd7-99a5-5e00222e0faf") + + @property + def is_generate_per_split(self): + return True + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 1, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def approx_vocab_size(self): + return 2**13 # 8k vocab suffices for this small dataset. + + @property + def vocab_filename(self): + return "vocab.wnli.%d" % self.approx_vocab_size + + @property + def num_classes(self): + return 2 + + def class_labels(self, data_dir): + del data_dir + # Note this binary classification is different from usual MNLI. + return ["not_entailment", "entailment"] + + def _maybe_download_corpora(self, tmp_dir): + wnli_filename = "WNLI.zip" + wnli_finalpath = os.path.join(tmp_dir, "WNLI") + if not tf.gfile.Exists(wnli_finalpath): + zip_filepath = generator_utils.maybe_download( + tmp_dir, wnli_filename, self._WNLI_URL) + zip_ref = zipfile.ZipFile(zip_filepath, "r") + zip_ref.extractall(tmp_dir) + zip_ref.close() + + return wnli_finalpath + + def example_generator(self, filename): + for idx, line in enumerate(tf.gfile.Open(filename, "rb")): + if idx == 0: continue # skip header + line = text_encoder.to_unicode_utf8(line.strip()) + _, s1, s2, l = line.split("\t") + inputs = [s1, s2] + yield { + "inputs": inputs, + "label": int(l) + } + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + wnli_dir = self._maybe_download_corpora(tmp_dir) + if dataset_split == problem.DatasetSplit.TRAIN: + filesplit = "train.tsv" + else: + filesplit = "dev.tsv" + + filename = os.path.join(wnli_dir, filesplit) + for example in self.example_generator(filename): + yield example + + +@registry.register_problem +class WinogradNLICharacters(WinogradNLI): + """Winograd NLI classification problems, character level""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def global_task_id(self): + return problem.TaskID.EN_NLI diff --git a/tensor2tensor/data_generators/wsj_parsing.py b/tensor2tensor/data_generators/wsj_parsing.py index a2dda4d9d..86107e0a8 100644 --- a/tensor2tensor/data_generators/wsj_parsing.py +++ b/tensor2tensor/data_generators/wsj_parsing.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,13 +15,66 @@ """Data generators for parsing data-sets.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + import os -# Dependency imports +from absl import flags +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + +flags.DEFINE_string("parsing_path", "", "Path to parsing files in tmp_dir.") + -from tensor2tensor.data_generators import generator_utils +FLAGS = flags.FLAGS + + +@registry.register_problem +class WsjParsing(text_problems.Text2textTmpdir): + """Generate vocabulary and training data for parsing. + """ -import tensorflow as tf + # These files are used for vocab generation + TRAIN_FILES = ("wsj.train.text.txt", "wsj.train.tags.txt") + + # These files are used for generating encoded samples + TRAIN_FILES_TREE = "wsjTrain.trees" + EVAL_FILES_TREE = "wsjEval.trees" + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + del data_dir + is_training = dataset_split == problem.DatasetSplit.TRAIN + tree_file = self.TRAIN_FILES_TREE if is_training else self.EVAL_FILES_TREE + tree_file_path = os.path.join(tmp_dir, tree_file) + with tf.gfile.GFile(tree_file_path, mode="r") as cur_tree_file: + for line in cur_tree_file: + (words, tags) = words_and_tags_from_wsj_tree(line) + yield {"inputs": words, "targets": tags} + + def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split): + generator = self.generate_samples(data_dir, tmp_dir, dataset_split) + encoder = self.get_or_create_vocab(data_dir, tmp_dir) + return text_problems.text2text_generate_encoded(generator, encoder, + has_inputs=self.has_inputs) + + def generate_text_for_vocab(self, data_dir, tmp_dir): + files = [os.path.join(tmp_dir, f) for f in self.TRAIN_FILES] + inputs_file, targets_file = files + for i, sample in enumerate(text_problems.text2text_txt_iterator(inputs_file, + targets_file + )): + yield sample["inputs"] + yield sample["targets"] + if self.max_samples_for_vocab and (i + 1) >= self.max_samples_for_vocab: + break + + @property + def max_samples_for_vocab(self): + return 1000 def words_and_tags_from_wsj_tree(tree_string): @@ -56,11 +110,11 @@ def token_generator(tree_path, source_token_vocab, target_token_vocab, This generator assumes the files at source_path and target_path have the same number of lines and yields dictionaries of "inputs" and "targets" - where inputs and targets are token ids from source and taret lines + where inputs and targets are token ids from source and target lines converted to integers using the token_map. Args: - tree_path: path to the file with wsj format trees, one per line. + tree_path: path to the file with WSJ format trees, one per line. source_token_vocab: GenericVocabulary object for source vocabulary. target_token_vocab: GenericVocabulary object for target vocabulary. eos: integer to append at the end of each sequence (default: None). @@ -80,30 +134,34 @@ def token_generator(tree_path, source_token_vocab, target_token_vocab, tree_line = tree_file.readline() -def parsing_token_generator(tmp_dir, train, source_vocab_size, +def parsing_token_generator(data_dir, tmp_dir, train, source_vocab_size, target_vocab_size): """Generator for parsing as a sequence-to-sequence task that uses tokens. - This generator assumes the files parsing_{train,dev}.wsj, which contain trees - in wsj format and wsj_{source,target}.tokens.vocab.<vocab_size> exist in - tmp_dir. + This generator assumes the files parsing_{train,dev}.trees, which contain + trees in WSJ format. Args: - tmp_dir: path to the file with source sentences. - train: path to the file with target sentences. + data_dir: path to the data directory. + tmp_dir: path to temporary storage directory. + train: whether we're training or not. source_vocab_size: source vocab size. target_vocab_size: target vocab size. Returns: A generator to a dictionary of inputs and outputs. """ - source_symbolizer_vocab = generator_utils.get_or_generate_vocab( - tmp_dir, "wsj_source.tokens.vocab.%d" % source_vocab_size, - source_vocab_size) - target_symbolizer_vocab = generator_utils.get_or_generate_vocab( - tmp_dir, "wsj_target.tokens.vocab.%d" % target_vocab_size, - target_vocab_size) - filename = "parsing_%s.trees" % ("train" if train else "dev") - tree_filepath = os.path.join(tmp_dir, filename) - return token_generator(tree_filepath, source_symbolizer_vocab, - target_symbolizer_vocab, 1) + # TODO(lukaszkaiser): Correct these calls to generate vocabularies. No data + # sources are being passed. + del (data_dir, tmp_dir, train, source_vocab_size, target_vocab_size) + assert False, "Vocabulary generation not implemented" + # source_symbolizer_vocab = generator_utils.get_or_generate_vocab( + # data_dir, tmp_dir, "wsj_source.vocab.%d" % source_vocab_size, + # source_vocab_size) + # target_symbolizer_vocab = generator_utils.get_or_generate_vocab( + # data_dir, tmp_dir, "wsj_target.vocab.%d" % target_vocab_size, + # target_vocab_size) + # filename = "%s_%s.trees" % (FLAGS.parsing_path, "train" if train else "dev") + # tree_filepath = os.path.join(tmp_dir, filename) + # return token_generator(tree_filepath, source_symbolizer_vocab, + # target_symbolizer_vocab, 1) diff --git a/tensor2tensor/data_generators/yelp_full.py b/tensor2tensor/data_generators/yelp_full.py new file mode 100644 index 000000000..5606fd745 --- /dev/null +++ b/tensor2tensor/data_generators/yelp_full.py @@ -0,0 +1,106 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Yelp dataset.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tarfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +@registry.register_problem +class SentimentYelpFull(text_problems.Text2ClassProblem): + """Yelp dataset.""" + URL = "https://s3.amazonaws.com/fast-ai-nlp/yelp_review_full_csv.tgz" + + @property + def is_generate_per_split(self): + return True + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 10, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def approx_vocab_size(self): + return 2**13 # 8k vocab suffices for this small dataset. + + @property + def num_classes(self): + return 5 + + def class_labels(self, data_dir): + del data_dir + return ["1", "2", "3", "4", "5"] + + def doc_generator(self, yelp_dir, dataset, include_label=False): + + file_path = os.path.join(yelp_dir, dataset + ".csv") + with tf.gfile.Open(file_path) as yelp_f: + lines = yelp_f.readlines() + for line in lines: + label = line[1] + doc = line[5:-2].strip() + if include_label: + yield doc, label + else: + yield doc + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Generate examples.""" + # Download and extract + compressed_filename = os.path.basename(self.URL) + download_path = generator_utils.maybe_download(tmp_dir, compressed_filename, + self.URL) + yelp_dir = os.path.join(tmp_dir, "yelp_review_full_csv") + if not tf.gfile.Exists(yelp_dir): + with tarfile.open(download_path, "r:gz") as tar: + tar.extractall(tmp_dir) + + # Generate examples + train = dataset_split == problem.DatasetSplit.TRAIN + dataset = "train" if train else "test" + for doc, label in self.doc_generator(yelp_dir, dataset, include_label=True): + yield { + "inputs": doc, + "label": int(label), + } + + +@registry.register_problem +class SentimentYelpFullCharacters(SentimentYelpFull): + """Yelp dataset, character level.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def global_task_id(self): + return problem.TaskID.EN_CHR_SENT diff --git a/tensor2tensor/data_generators/yelp_polarity.py b/tensor2tensor/data_generators/yelp_polarity.py new file mode 100644 index 000000000..e108d6ce8 --- /dev/null +++ b/tensor2tensor/data_generators/yelp_polarity.py @@ -0,0 +1,106 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Yelp dataset.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tarfile +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +@registry.register_problem +class SentimentYelpPolarity(text_problems.Text2ClassProblem): + """Yelp dataset.""" + URL = "https://s3.amazonaws.com/fast-ai-nlp/yelp_review_polarity_csv.tgz" + + @property + def is_generate_per_split(self): + return True + + @property + def dataset_splits(self): + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 10, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + @property + def approx_vocab_size(self): + return 2**13 # 8k vocab suffices for this small dataset. + + @property + def num_classes(self): + return 2 + + def class_labels(self, data_dir): + del data_dir + return ["1", "2"] + + def doc_generator(self, yelp_dir, dataset, include_label=False): + + file_path = os.path.join(yelp_dir, dataset + ".csv") + with tf.gfile.Open(file_path) as yelp_f: + lines = yelp_f.readlines() + for line in lines: + label = line[1] + doc = line[5:-2].strip() + if include_label: + yield doc, label + else: + yield doc + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + """Generate examples.""" + # Download and extract + compressed_filename = os.path.basename(self.URL) + download_path = generator_utils.maybe_download(tmp_dir, compressed_filename, + self.URL) + yelp_dir = os.path.join(tmp_dir, "yelp_review_polarity_csv") + if not tf.gfile.Exists(yelp_dir): + with tarfile.open(download_path, "r:gz") as tar: + tar.extractall(tmp_dir) + + # Generate examples + train = dataset_split == problem.DatasetSplit.TRAIN + dataset = "train" if train else "test" + for doc, label in self.doc_generator(yelp_dir, dataset, include_label=True): + yield { + "inputs": doc, + "label": int(label), + } + + +@registry.register_problem +class SentimentYelpPolarityCharacters(SentimentYelpPolarity): + """Yelp dataset, character level.""" + + @property + def vocab_type(self): + return text_problems.VocabType.CHARACTER + + def global_task_id(self): + return problem.TaskID.EN_CHR_SENT diff --git a/tensor2tensor/docs/distributed_training.md b/tensor2tensor/docs/distributed_training.md deleted file mode 100644 index be3726f06..000000000 --- a/tensor2tensor/docs/distributed_training.md +++ /dev/null @@ -1,68 +0,0 @@ -# Distributed Training - -The `t2t-trainer` supports both synchronous and asynchronous distributed -training. - -T2T uses TensorFlow Estimators and so distributed training is configured with -the `TF_CONFIG` environment variable that is read by the -[RunConfig](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/learn/python/learn/estimators/run_config.py) -along with a set of flags. - -## `TF_CONFIG` - -Both workers and parameter servers must have the `TF_CONFIG` environment -variable set. - -The `TF_CONFIG` environment variable is a json-encoded string with the addresses -of the workers and parameter servers (in the `'cluster'` key) and the -identification of the current task (in the `'task'` key). - -For example: - -``` -cluster = { - 'ps': ['host1:2222', 'host2:2222'], - 'worker': ['host3:2222', 'host4:2222', 'host5:2222'] -} -os.environ['TF_CONFIG'] = json.dumps({ - 'cluster': cluster, - 'task': {'type': 'worker', 'index': 1} -}) -``` - -## Command-line flags - -The following T2T command-line flags must also be set on the workers for -distributed training: - -- `--master=$ADDRESS` -- `--worker_replicas=$NUM_WORKERS` -- `--worker_gpu=$NUM_GPUS_PER_WORKER` -- `--worker_id=$WORKER_ID` -- `--ps_replicas=$NUM_PS` -- `--ps_gpu=$NUM_GPUS_PER_PS` -- `--schedule=train` -- `--sync`, if you want synchronous training, i.e. for there to be a single - master worker coordinating the work across "ps" jobs (yes, the naming is - unfortunate). If not set, then each worker operates independently while - variables are shared on the parameter servers. - -Parameter servers only need `--schedule=run_std_server`. - -## Utility to produce `TF_CONFIG` and flags - -[`bin/make_tf_configs.py`](https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/bin/make_tf_configs.py)) -generates the `TF_CONFIG` json strings and the above-mentioned command-line -flags for the workers and parameter servers. - -## Command-line flags for eval jobs - -Eval jobs should set the following flags and do not need the `TF_CONFIG` -environment variable to be set as the eval jobs run locally and do not -communicate to the other jobs (the eval jobs read the model checkpoints that the -trainer writes out): - -- `--schedule=continuous_eval_on_train_data` or - `--schedule=continuous_eval` (for test data) -- `--worker_job='/job:localhost'` -- `--output_dir=$TRAIN_DIR` diff --git a/tensor2tensor/envs/__init__.py b/tensor2tensor/envs/__init__.py new file mode 100644 index 000000000..9833a8ba3 --- /dev/null +++ b/tensor2tensor/envs/__init__.py @@ -0,0 +1,24 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Environments defined in T2T. Imports here force registration.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.envs import gym_env_problem +from tensor2tensor.envs import tic_tac_toe_env +from tensor2tensor.envs import tic_tac_toe_env_problem diff --git a/tensor2tensor/envs/env_problem.py b/tensor2tensor/envs/env_problem.py new file mode 100644 index 000000000..4d25e9459 --- /dev/null +++ b/tensor2tensor/envs/env_problem.py @@ -0,0 +1,654 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base class for envs that store their history. + +EnvProblem subclasses Problem and also implements the Gym interface (step, +reset, render, close, seed) +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl import logging +from gym.core import Env +import numpy as np +import six +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.envs import gym_spaces_utils +from tensor2tensor.envs import trajectory +from tensor2tensor.layers import modalities +from tensor2tensor.utils import contrib +import tensorflow.compat.v1 as tf + +# Names for data fields in stored tf.Examples. +TIMESTEP_FIELD = "timestep" +ACTION_FIELD = "action" +RAW_REWARD_FIELD = "raw_reward" +PROCESSED_REWARD_FIELD = "reward" +DONE_FIELD = "done" +OBSERVATION_FIELD = "observation" + + +class EnvProblem(Env, problem.Problem): + """Base class of an env which generates data like a problem class. + + EnvProblem is both a gym Env and a Problem, since it subclasses both. + + Conceptually it contains `batch_size` environments on which step (and reset) + are called. The data that is generated by the repeated application of step and + reset is stored within this class and is persisted on disk when we call + `generate_data` on it. + + Subclasses *should* override the following functions: + - initialize_environments + - observation_space + - action_space + - reward_range + - _reset + - _step + - _render + + In addition, they should ovveride the following functions, which are used in + the `hparams` function to return modalities and vocab_sizes. + - input_modality + - input_vocab_size + - target_modality + - target_vocab_size + - action_modality + - reward_modality + + NON NATIVELY BATCHED ENVS: + + The implementation for cases where the env is not batched by default is + `gym_env_problem.GymEnvProblem`. + + NATIVELY BATCHED ENVS: + + If however, our env is a neural network, which can be batched by default, we + should: + + # 1 - Give it a gym style interface, by overriding observation_space and + action_space. + + # 2 - Override `_reset` and `_step` to do the reset and step in a natively + batched manner. + + # 3 - More generally any function that iterates over the self._env list will + need to be overridden, ex: `_verify_same_spaces` and `initialize_environments` + + KNOWN LIMITATIONS: + + - observation_space and action_space should be subclasses of gym.spaces + - not all subclasses of gym.spaces are supported + + """ + + def __init__(self, + batch_size=None, + discrete_rewards=True, + parallelism=1, + **env_kwargs): + """Initializes this class by creating the envs and managing trajectories. + + Args: + batch_size: (int or None) How many envs to make in the non natively + batched mode. + discrete_rewards: (bool) whether to round the rewards to the nearest + integer. + parallelism: (int) If this is greater than one then we run the envs in + parallel using multi-threading. + **env_kwargs: (dict) Additional kwargs to pass to the environments. + """ + + # Call the super's ctor. + problem.Problem.__init__(self, was_reversed=False, was_copy=False) + + # An env generates data when it is given actions by an agent which is either + # a policy or a human -- this is supposed to be the `id` of the agent. + # + # In practice, this is used only to store (and possibly retrieve) history + # to an appropriate directory. + self._agent_id = "default" + + # If set, we discretize the rewards and treat them as integers. + self._discrete_rewards = discrete_rewards + + # A data structure to hold the `batch_size` currently active trajectories + # and also the ones that are completed, i.e. done. + self._trajectories = None + + self._batch_size = None + + self._parallelism = None + # The parallelism is passes in via env_kwargs because it will be used by + # `GymEnvProblem` to paralellize env actions across a batch. + env_kwargs["parallelism"] = parallelism + + if batch_size is not None: + self.initialize(batch_size=batch_size, **env_kwargs) + + @property + def batch_size(self): + # TODO(afrozm): I've added this here since it is being used in a lot of + # places in ppo_learner.py -- re-evaluate if needed. + return self._batch_size + + @property + def trajectories(self): + return self._trajectories + + @trajectories.setter + def trajectories(self, trajectories_): + assert self.trajectories.batch_size == trajectories_.batch_size + self._trajectories = trajectories_ + + def initialize(self, batch_size=1, **kwargs): + self.initialize_environments(batch_size=batch_size, **kwargs) + + self._batch_size = batch_size + + # This data structure stores the history of each env. + # + # NOTE: Even if the env is a NN and can step in all batches concurrently, it + # is still valuable to store the trajectories separately. + self._trajectories = trajectory.BatchTrajectory(batch_size=batch_size) + + # Assert that *all* the above are now set, we should do this since + # subclasses can override `initialize_environments`. + self.assert_common_preconditions() + assert self.observation_space is not None + assert self.action_space is not None + assert self.reward_range is not None + + def initialize_environments(self, batch_size=1, parallelism=1, **kwargs): + """Initializes the environments. + + Args: + batch_size: (int) Number of envs to initialize. + parallelism: (int) If this is greater than one then we allow the + implementation to use multi-threading to step the envs. + **kwargs: (dict) Any additional args needed to initialize the envs. + """ + raise NotImplementedError + + def assert_common_preconditions(self): + pass + + @property + def observation_space(self): + raise NotImplementedError + + @property + def observation_spec(self): + """The spec for reading an observation stored in a tf.Example.""" + return gym_spaces_utils.gym_space_spec(self.observation_space) + + def process_observations(self, observations): + """Processes observations prior to saving in the trajectories. + + Args: + observations: (np.ndarray) observations to be processed. + + Returns: + processed observation + + """ + return observations + + @property + def action_space(self): + raise NotImplementedError + + @property + def action_spec(self): + """The spec for reading an observation stored in a tf.Example.""" + return gym_spaces_utils.gym_space_spec(self.action_space) + + @property + def action_modality(self): + raise NotImplementedError + + @property + def num_actions(self): + """Returns the number of actions in a discrete action space.""" + return gym_spaces_utils.cardinality(self.action_space) + + @property + def reward_range(self): + # We clip rewards to this range before processing them further, as described + # in `process_rewards`. + raise NotImplementedError + + @property + def is_reward_range_finite(self): + min_reward, max_reward = self.reward_range + return (min_reward != -np.inf) and (max_reward != np.inf) + + @property + def discrete_rewards(self): + return self._discrete_rewards + + def process_rewards(self, rewards): + """Clips the rewards, optionally rounds them and casts to integer. + + Args: + rewards: numpy array of raw (float) rewards. + + Returns: + processed_rewards: numpy array of np.int64 + """ + + min_reward, max_reward = self.reward_range + + # Clips at min and max reward. + rewards = np.clip(rewards, min_reward, max_reward) + + if self._discrete_rewards: + # Round to (nearest) int and convert to integral type. + rewards = np.around(rewards, decimals=0).astype(np.int64) + return rewards + + @property + def is_processed_rewards_discrete(self): + """Returns true if `self.process_rewards` returns discrete rewards.""" + + # Subclasses can override, but it should match their self.process_rewards. + + # This check is a little hackily. + return self.process_rewards(0.0).dtype == np.int64 + + @property + def num_rewards(self): + """Returns the number of distinct rewards. + + Returns: + Returns None if the reward range is infinite or the processed rewards + aren't discrete, otherwise returns the number of distinct rewards. + """ + + # Pre-conditions: reward range is finite. + # : processed rewards are discrete. + if not self.is_reward_range_finite: + logging.warn("Infinite reward range, `num_rewards returning None`") + return None + if not self.is_processed_rewards_discrete: + logging.warn( + "Processed rewards are not discrete, `num_rewards` returning None") + return None + + min_reward, max_reward = self.reward_range + return max_reward - min_reward + 1 + + @property + def input_modality(self): + raise NotImplementedError + + @property + def reward_modality(self): + raise NotImplementedError + + @property + def input_vocab_size(self): + raise NotImplementedError + + @property + def target_modality(self): + raise NotImplementedError + + @property + def target_vocab_size(self): + raise NotImplementedError + + @property + def unwrapped(self): + return self + + def seed(self, seed=None): + return [seed] + + def close(self): + pass + + def _reset(self, indices): + """Resets environments at indices shouldn't pre-process or record. + + Args: + indices: list of indices of underlying envs to call reset on. + + Returns: + np.ndarray of stacked observations from the reset-ed envs. + """ + raise NotImplementedError + + def truncate(self, indices=None, num_to_keep=1): + """Truncates trajectories at the specified indices.""" + + if indices is None: + indices = np.arange(self.batch_size) + + self.trajectories.truncate_trajectories(indices, num_to_keep=num_to_keep) + + def reset(self, indices=None): + """Resets environments at given indices. + + Subclasses should override _reset to do the actual reset if something other + than the default implementation is desired. + + NOTE: With `indices` as None the recorded trajectories are also erased since + the expecation is that we want to re-use the whole env class from + scratch. + + Args: + indices: Indices of environments to reset. If None all envs are reset as + well as trajectories are erased. + + Returns: + Batch of initial observations of reset environments. + """ + + if indices is None: + self.trajectories.reset_batch_trajectories() + indices = np.arange(self.batch_size) + + # If this is empty (not None) then don't do anything, no env was done. + if indices.size == 0: + logging.warning( + "`reset` called with empty indices array, this is a no-op.") + return None + + # Pre-conditions: common_preconditions, see `assert_common_preconditions`. + self.assert_common_preconditions() + + observations = self._reset(indices) + processed_observations = self.process_observations(observations) + + # Record history. + self.trajectories.reset(indices, processed_observations) + + return processed_observations + + def _render(self, indices, mode="human"): + """Renders the environments with the given mode on the specified indices. + + Args: + indices: array of indices. + mode: rendering mode. + + Returns: + a list of return values from the environments rendered. + """ + raise NotImplementedError + + def render(self, indices=None, mode="human"): + """Renders the environments with the given mode on the specified indices. + + Args: + indices: array of indices, calls render on everything if indices is None. + mode: rendering mode. + + Returns: + a list of return values from the environments rendered. + """ + + if indices is None: + indices = np.arange(self.batch_size) + return self._render(indices, mode) + + def _step(self, actions): + """Takes a step in all environments, shouldn't pre-process or record. + + Args: + actions: (np.ndarray) with first dimension equal to the batch size. + + Returns: + a tuple of stacked raw observations, raw rewards, dones and infos. + """ + raise NotImplementedError + + def step(self, actions, infos=None): + """Takes a step in all environments. + + Subclasses should override _step to do the actual reset if something other + than the default implementation is desired. + + Args: + actions: Batch of actions. + infos: (optional) a dictionary of keys and values, where all the values + have the first dimension as batch_size. + + Returns: + (preprocessed_observations, processed_rewards, dones, env_infos). + """ + # Pre-conditions: common_preconditions, see `assert_common_preconditions`. + # : len(actions) == len(self._envs) + self.assert_common_preconditions() + assert self.batch_size == len(actions) + + observations, raw_rewards, dones, env_infos = self._step(actions) + + # Process rewards. + raw_rewards = raw_rewards.astype(np.float32) + processed_rewards = self.process_rewards(raw_rewards) + + # Process observations. + processed_observations = self.process_observations(observations) + + # Record history. + self.trajectories.step(processed_observations, raw_rewards, + processed_rewards, dones, actions, + infos=infos) + + return processed_observations, processed_rewards, dones, env_infos + + def example_reading_spec(self): + """Data fields to store on disk and their decoders.""" + + # Subclasses can override and/or extend. + + processed_reward_type = tf.float32 + if self.is_processed_rewards_discrete: + processed_reward_type = tf.int64 + + data_fields = { + TIMESTEP_FIELD: tf.FixedLenFeature((1,), tf.int64), + RAW_REWARD_FIELD: tf.FixedLenFeature((1,), tf.float32), + PROCESSED_REWARD_FIELD: tf.FixedLenFeature((1,), processed_reward_type), + DONE_FIELD: tf.FixedLenFeature((1,), tf.int64), # we wrote this as int. + + # Special treatment because we need to determine type and shape, also + # enables classes to override. + OBSERVATION_FIELD: self.observation_spec, + ACTION_FIELD: self.action_spec, + } + + data_items_to_decoders = { + field: contrib.slim().tfexample_decoder.Tensor(field) + for field in data_fields + } + + return data_fields, data_items_to_decoders + + def hparams(self, defaults, model_hparams): + # Usually when using the environment in a supervised setting, given the + # observation we are predicting the reward. + p = defaults + + # Have to add these the 'proper' way, otherwise __str__ doesn't show them. + if "modality" not in p: + p.add_hparam("modality", {}) + if "vocab_size" not in p: + p.add_hparam("vocab_size", {}) + + # TODO(afrozm): Document what all of these keys are and are supposed to do. + p.modality.update({ + "inputs": self.input_modality, + "targets": self.target_modality, + "input_reward": self.reward_modality, + "target_reward": self.reward_modality, + "input_action": self.action_modality, + "target_action": self.action_modality, + "target_policy": modalities.ModalityType.IDENTITY, + "target_value": modalities.ModalityType.IDENTITY, + }) + + p.vocab_size.update({ + "inputs": self.input_vocab_size, + "targets": self.target_vocab_size, + "input_reward": self.num_rewards, + "target_reward": self.num_rewards, + "input_action": self.num_actions, + "target_action": self.num_actions, + "target_policy": None, + "target_value": None, + }) + + p.input_space_id = problem.SpaceID.GENERIC + p.target_space_id = problem.SpaceID.GENERIC + + @property + def agent_id(self): + return self._agent_id + + @agent_id.setter + def agent_id(self, agent_id): + # Lets us call agent_id with integers that we increment. + agent_id = str(agent_id) + # We use `-` in self.dataset_filename, disallow it here for convenience. + if "-" in agent_id: + raise ValueError("agent_id shouldn't have - in it.") + self._agent_id = agent_id + + def dataset_filename(self): + return "{}-{}".format(self.name, self.agent_id) + + @property + def num_shards(self): + return { + problem.DatasetSplit.TRAIN: 10, + problem.DatasetSplit.EVAL: 1, + } + + def _generate_time_steps(self, trajectory_list): + """A generator to yield single time-steps from a list of trajectories.""" + for single_trajectory in trajectory_list: + assert isinstance(single_trajectory, trajectory.Trajectory) + + # Skip writing trajectories that have only a single time-step -- this + # could just be a repeated reset. + + if single_trajectory.num_time_steps <= 1: + continue + + for index, time_step in enumerate(single_trajectory.time_steps): + + # The first time-step doesn't have reward/processed_reward, if so, just + # setting it to 0.0 / 0 should be OK. + raw_reward = time_step.raw_reward + if not raw_reward: + raw_reward = 0.0 + + processed_reward = time_step.processed_reward + if not processed_reward: + processed_reward = 0 + + action = time_step.action + if action is None: + # The last time-step doesn't have action, and this action shouldn't be + # used, gym's spaces have a `sample` function, so let's just sample an + # action and use that. + action = self.action_space.sample() + action = gym_spaces_utils.gym_space_encode(self.action_space, action) + + if six.PY3: + # py3 complains that, to_example cannot handle np.int64 ! + + action_dtype = self.action_space.dtype + if action_dtype in [np.int64, np.int32]: + action = list(map(int, action)) + elif action_dtype in [np.float64, np.float32]: + action = list(map(float, action)) + + # same with processed_reward. + processed_reward = int(processed_reward) + + assert time_step.observation is not None + + yield { + TIMESTEP_FIELD: [index], + ACTION_FIELD: + action, + # to_example errors on np.float32 + RAW_REWARD_FIELD: [float(raw_reward)], + PROCESSED_REWARD_FIELD: [processed_reward], + # to_example doesn't know bools + DONE_FIELD: [int(time_step.done)], + OBSERVATION_FIELD: + gym_spaces_utils.gym_space_encode(self.observation_space, + time_step.observation), + } + + def generate_data(self, data_dir, tmp_dir, task_id=-1): + # List of files to generate data in. + # NOTE: We don't want to shuffle, so we mark the files as shuffled. + files_list = [] + for split, num_shards in self.num_shards.items(): + files_list.extend(self.data_filepaths(split, data_dir, num_shards, True)) + + # At this point some trajectories haven't finished. However we still want to + # write those down. + + # A simple way of doing this is to call `self.reset()` here, this will make + # all the envs take one (extra) step, but would be a clean way to do it. + # + # self.reset() + + self.trajectories.complete_all_trajectories() + + # Write the completed data into these files + + num_completed_trajectories = self.trajectories.num_completed_trajectories + num_shards = len(files_list) + if num_completed_trajectories < num_shards: + logging.warning( + "Number of completed trajectories [%d] is less than " + "the number of shards [%d], some shards maybe empty.", + num_completed_trajectories, num_shards) + + for i, f in enumerate(files_list[:num_completed_trajectories]): + # Start at index i of completed trajectories and take every `num_shards` + # trajectory. This ensures that the data is approximately a balanced + # partition of completed trajectories, also because of the above slicing + # of files_list, i will be a valid index into completed_trajectories. + trajectories_to_write = self.trajectories.completed_trajectories[ + i::num_shards] + + # Convert each trajectory from `trajectories_to_write` to a sequence of + # time-steps and then send that generator to `generate_files`. + + # `cycle_every_n` isn't needed since file list given to it is a singleton. + generator_utils.generate_files( + self._generate_time_steps(trajectories_to_write), [f]) + + def print_state(self): + for t in self.trajectories.trajectories: + print("---------") + if not t.is_active: + print("trajectory isn't active.") + continue + last_obs = t.last_time_step.observation + print(str(last_obs)) diff --git a/tensor2tensor/envs/env_problem_utils.py b/tensor2tensor/envs/env_problem_utils.py new file mode 100644 index 000000000..32d5499ca --- /dev/null +++ b/tensor2tensor/envs/env_problem_utils.py @@ -0,0 +1,276 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities to deal with EnvProblem.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import time + +import gym +import numpy as np + +from tensor2tensor.envs import gym_env_problem +from tensor2tensor.envs import rendered_env_problem +from tensor2tensor.rl import gym_utils + + +def done_indices(dones): + """Calculates the indices where dones has True.""" + return np.argwhere(dones).squeeze(axis=1) + + +def play_env_problem_randomly(env_problem, num_steps): + """Plays the env problem by randomly sampling actions for `num_steps`.""" + # Reset all environments. + env_problem.reset() + + # Play all environments, sampling random actions each time. + for _ in range(num_steps): + # Sample batch_size actions from the action space and stack them. + actions = np.stack([ + env_problem.action_space.sample() for _ in range(env_problem.batch_size) + ]) + + # Execute actions, observations are stored in `env_problem`. + _, _, dones, _ = env_problem.step(actions) + + # Get the indices where we are done and reset those. + env_problem.reset(indices=done_indices(dones)) + + +def get_completed_trajectories_from_env(env, + n_trajectories, + raw_trajectory=False): + """Returns completed `n_trajectories` from `env`.""" + + # Just the raw trajectories. + if raw_trajectory: + return env.trajectories.completed_trajectories[:n_trajectories] + + # The numpy version of the above. + completed_trajectories = [] + for trajectory in env.trajectories.completed_trajectories[:n_trajectories]: + completed_trajectories.append(trajectory.as_numpy) + return completed_trajectories + + +def play_env_problem_with_policy(env, + policy_fun, + num_trajectories=1, + max_timestep=None, + reset=True, + state=None, + rng=None, + temperature=1.0, + boundary=32, + len_history_for_policy=32, + num_to_keep=1, + abort_fn=None, + raw_trajectory=False): + """Plays the given env with the policy function to collect trajectories. + + Args: + env: environment object, should be a subclass of env_problem.EnvProblem. + policy_fun: callable, taking in observations((B, RT) + OBS) and returning + back log-probabilities (B, AT, A). + num_trajectories: int, number of trajectories to collect. + max_timestep: int or None, if not None or a negative number, we cut any + trajectory that exceeds this time put it in the completed bin, and *dont* + reset the env. + reset: bool, true if we want to reset the envs. The envs are also reset if + max_max_timestep is None or < 0. + state: the state for `policy_fn`. + rng: jax rng, splittable. + temperature: float, temperature used in Gumbel sampling. + boundary: int, pad the sequences to the multiples of this number. + len_history_for_policy: int or None, the maximum history to keep for + applying the policy on. If None, use the whole history. + num_to_keep: int, while truncating trajectory how many time-steps to keep. + abort_fn: callable, If not None, then at every step call and abort the + trajectory collection if it returns True, if so reset the env and return + None. + raw_trajectory: bool, if True a list of trajectory.Trajectory objects is + returned, otherwise a list of numpy representations of + `trajectory.Trajectory` is returned. + + Returns: + A tuple, (trajectories, number of completed trajectories). Where + trajectories is a list of triples of (observation, action, reward) ndarrays. + """ + + def gumbel_sample(log_probs): + """Gumbel sampling.""" + u = np.random.uniform(low=1e-6, high=1.0 - 1e-6, size=log_probs.shape) + g = -np.log(-np.log(u)) + return np.argmax((log_probs / temperature) + g, axis=-1) + + # We need to reset all environments, if we're coming here the first time. + if reset or max_timestep is None or max_timestep <= 0: + env.reset() + else: + # Clear completed trajectories held internally. + env.trajectories.clear_completed_trajectories() + + num_done_trajectories = 0 + + policy_application_total_time = 0 + env_actions_total_time = 0 + bare_env_run_time = 0 + while env.trajectories.num_completed_trajectories < num_trajectories: + # Check if we should abort and return nothing. + if abort_fn and abort_fn(): + # We should also reset the environment, since it will have some + # trajectories (complete and incomplete) that we want to discard. + env.reset() + return None, 0, {}, state + + # Get all the observations for all the active trajectories. + # Shape is (B, RT) + OBS + # Bucket on whatever length is needed. + padded_observations, lengths = env.trajectories.observations_np( + boundary=boundary, + len_history_for_policy=len_history_for_policy) + + B = padded_observations.shape[0] # pylint: disable=invalid-name + + assert B == env.batch_size + assert (B,) == lengths.shape + + t1 = time.time() + log_probs, value_preds, state, rng = policy_fun( + padded_observations, lengths, state=state, rng=rng) + policy_application_total_time += (time.time() - t1) + + assert B == log_probs.shape[0] + + actions = gumbel_sample(log_probs) + if isinstance(env.action_space, gym.spaces.Discrete): + actions = np.squeeze(actions, axis=1) + + # Step through the env. + t1 = time.time() + _, _, dones, env_infos = env.step( + actions, + infos={ + "log_prob_actions": log_probs, + "value_predictions": value_preds, + }) + env_actions_total_time += (time.time() - t1) + bare_env_run_time += sum( + info["__bare_env_run_time__"] for info in env_infos) + + # Count the number of done trajectories, the others could just have been + # truncated. + num_done_trajectories += np.sum(dones) + + # Get the indices where we are done ... + done_idxs = done_indices(dones) + + # ... and reset those. + t1 = time.time() + if done_idxs.size: + env.reset(indices=done_idxs) + env_actions_total_time += (time.time() - t1) + + if max_timestep is None or max_timestep < 1: + continue + + # Are there any trajectories that have exceeded the time-limit we want. + lengths = env.trajectories.trajectory_lengths + exceeded_time_limit_idxs = done_indices(lengths > max_timestep) + + # If so, reset these as well. + t1 = time.time() + if exceeded_time_limit_idxs.size: + # This just cuts the trajectory, doesn't reset the env, so it continues + # from where it left off. + env.truncate(indices=exceeded_time_limit_idxs, num_to_keep=num_to_keep) + env_actions_total_time += (time.time() - t1) + + # We have the trajectories we need, return a list of triples: + # (observations, actions, rewards) + completed_trajectories = get_completed_trajectories_from_env( + env, num_trajectories, raw_trajectory=raw_trajectory) + + timing_info = { + "trajectory_collection/policy_application": policy_application_total_time, + "trajectory_collection/env_actions": env_actions_total_time, + "trajectory_collection/env_actions/bare_env": bare_env_run_time, + } + timing_info = {k: round(1000 * v, 2) for k, v in timing_info.items()} + + return completed_trajectories, num_done_trajectories, timing_info, state + + +def make_env(batch_size=1, + env_problem_name="", + resize=True, + resize_dims=(105, 80), + max_timestep="None", + clip_rewards=True, + parallelism=1, + use_tpu=False, + num_actions=None, + rendered_env=True, + **env_kwargs): + """Creates the env.""" + + if clip_rewards: + env_kwargs.update({"reward_range": (-1, 1), "discrete_rewards": True}) + else: + env_kwargs.update({"discrete_rewards": False}) + + # TODO(henrykm) - below someone linked "resize" with "abnormality" + # Probably we need more nuanced concept of "abnormality" + # decoupled from "resize". Currently the resize flag implies + # that we switch from a generic env to a wrapped env. + # Overall this file and gym_utils.py look like good candidates + # for a refactor. + + # No resizing needed, so let's be on the normal EnvProblem. + if not resize: # None or False + return gym_env_problem.GymEnvProblem( + base_env_name=env_problem_name, + batch_size=batch_size, + parallelism=parallelism, + **env_kwargs) + + try: + max_timestep = int(max_timestep) + except Exception: # pylint: disable=broad-except + max_timestep = None + + wrapper_fn = functools.partial( + gym_utils.gym_env_wrapper, **{ + "rl_env_max_episode_steps": max_timestep, + "maxskip_env": True, + "rendered_env": rendered_env, + "rendered_env_resize_to": resize_dims, + "sticky_actions": False, + "output_dtype": np.int32 if use_tpu else None, + "num_actions": num_actions, + }) + + return rendered_env_problem.RenderedEnvProblem( + base_env_name=env_problem_name, + batch_size=batch_size, + parallelism=parallelism, + rendered_env=rendered_env, + env_wrapper_fn=wrapper_fn, + **env_kwargs) diff --git a/tensor2tensor/envs/env_problem_utils_test.py b/tensor2tensor/envs/env_problem_utils_test.py new file mode 100644 index 000000000..333863d08 --- /dev/null +++ b/tensor2tensor/envs/env_problem_utils_test.py @@ -0,0 +1,101 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for env_problem_utils.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from tensor2tensor.envs import env_problem_utils +from tensor2tensor.envs import gym_env_problem +from tensor2tensor.envs import tic_tac_toe_env # pylint: disable=unused-import +from tensor2tensor.envs import tic_tac_toe_env_problem + +import tensorflow.compat.v1 as tf + + +class EnvProblemUtilsTest(tf.test.TestCase): + + def test_play_env_problem_randomly(self): + batch_size = 5 + num_steps = 100 + + ep = tic_tac_toe_env_problem.TicTacToeEnvProblem() + ep.initialize(batch_size=batch_size) + + env_problem_utils.play_env_problem_randomly(ep, num_steps) + + # We've played num_steps * batch_size steps + everytime we get 'done' we + # create another step + batch_size number of pending steps. + self.assertEqual( + num_steps * batch_size + len(ep.trajectories.completed_trajectories) + + batch_size, ep.trajectories.num_time_steps) + + def test_play_env_problem_with_policy(self): + env = gym_env_problem.GymEnvProblem( + base_env_name="CartPole-v0", batch_size=2, reward_range=(-1, 1)) + + # Let's make sure that at-most 4 observations come to the policy function. + len_history_for_policy = 4 + + def policy_fun(observations, lengths, state=None, rng=None): + del lengths + b = observations.shape[0] + # Assert that observations from time-step len_history_for_policy onwards + # are zeros. + self.assertTrue( + np.all(observations[:, len_history_for_policy:, ...] == 0)) + self.assertFalse( + np.all(observations[:, :len_history_for_policy, ...] == 0)) + a = env.action_space.n + p = np.random.uniform(size=(b, 1, a)) + p = np.exp(p) + p = p / np.sum(p, axis=-1, keepdims=True) + return np.log(p), np.mean(p, axis=-1), state, rng + + max_timestep = 15 + num_trajectories = 2 + trajectories, _, _, _ = env_problem_utils.play_env_problem_with_policy( + env, + policy_fun, + num_trajectories=num_trajectories, + max_timestep=max_timestep, + len_history_for_policy=len_history_for_policy) + + self.assertEqual(num_trajectories, len(trajectories)) + + # Check shapes within trajectories. + traj = trajectories[0] + T = traj[1].shape[0] # pylint: disable=invalid-name + self.assertEqual((T + 1, 4), traj[0].shape) # (4,) is OBS + self.assertEqual((T,), traj[2].shape) + self.assertEqual(T, len(traj[4]["log_prob_actions"])) + self.assertEqual(T, len(traj[4]["value_predictions"])) + self.assertLessEqual(T, max_timestep) + + traj = trajectories[1] + T = traj[1].shape[0] # pylint: disable=invalid-name + self.assertEqual((T + 1, 4), traj[0].shape) + self.assertEqual((T,), traj[2].shape) + self.assertEqual(T, len(traj[4]["log_prob_actions"])) + self.assertEqual(T, len(traj[4]["value_predictions"])) + self.assertLessEqual(T, max_timestep) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/envs/gym_env_problem.py b/tensor2tensor/envs/gym_env_problem.py new file mode 100644 index 000000000..f5856a485 --- /dev/null +++ b/tensor2tensor/envs/gym_env_problem.py @@ -0,0 +1,334 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base class for envs that store their history. + +EnvProblem subclasses Problem and also implements the Gym interface (step, +reset, render, close, seed) +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import multiprocessing.pool +import time +from absl import logging +import gym +import numpy as np +from tensor2tensor.envs import env_problem +from tensor2tensor.envs import trajectory + + +# This is a compatibility shim introduced to support NumPy 1.24. See: +# https://numpy.org/neps/nep-0034-infer-dtype-is-object.html +def _stack(xs): + try: + return np.stack(xs) + except ValueError: + return np.stack(np.asarray(xs, dtype=object)) + + +class GymEnvProblem(env_problem.EnvProblem): + """An EnvProblem implemented as a batch of gym envs. + + This implementation should work well for cases where the env is not batched by + default ex: any gym env. In this case we create `batch_size` number of envs + and store them in a list. Any function then that interacts with the envs, like + reset, step or close goes over the env list to do the needful, ex: when reset + is called with specific indices we reset only those indices, etc. + + The usage of this class will look like the following: + + # 1. Creates and initializes the env_problem. + ep = env_problem.EnvProblem(...) + + # 2. One needs to call reset() at the start, this resets all envs. + ep.reset() + + # 3. Call step with actions for all envs, i.e. len(action) = batch_size + obs, rewards, dones, infos = ep.step(actions) + + # 4. Figure out which envs got done and reset only those. + ep.reset(indices=env_problem_utils.done_indices(dones)) + + # 5. Go back to Step #3 to further interact with the env or just dump the + # generated data to disk by calling: + ep.generate_data(...) + + # 6. If we now need to use this object again to play a few more iterations + # perhaps with a different batch size or maybe not recording the data, then + # we need to re-initialize environments and do some book-keeping, call: + ep.initialize_environments(batch_size) + + # 7. Go back to Step #2, i.e. reset all envs. + + NOTE: Look at `EnvProblemTest.test_interaction_with_env` and/or + `EnvProblemTest.test_generate_data` + + NOTE: We rely heavily that the underlying environments expose a gym style + interface, i.e. in addition to reset(), step() and close() we have access to + the following properties: observation_space, action_space, reward_range. + """ + + def __init__(self, + base_env_name=None, + env_wrapper_fn=None, + reward_range=None, + **kwargs): + """Initializes this class by creating the envs and managing trajectories. + + Args: + base_env_name: (string) passed to `gym.make` to make the underlying + environment. + env_wrapper_fn: (callable(env): env) Applies gym wrappers to the base + environment. + reward_range: (tuple(number, number) or None) the first element is the + minimum reward and the second is the maximum reward, used to clip and + process the raw reward in `process_rewards`. If None, this is inferred + from the inner environments. + **kwargs: (dict) Arguments passed to the base class. + """ + # Name for the base environment, will be used in `gym.make` in + # the default implementation of `initialize_environments`. + self._base_env_name = base_env_name + + # An env generates data when it is given actions by an agent which is either + # a policy or a human -- this is supposed to be the `id` of the agent. + # + # In practice, this is used only to store (and possibly retrieve) history + # to an appropriate directory. + self._agent_id = "default" + + # We clip rewards to this range before processing them further, as described + # in `process_rewards`. + self._reward_range = reward_range + + # Initialize the environment(s). + + # This can either be a list of environments of len `batch_size` or this can + # be a Neural Network, in which case it will be fed input with first + # dimension = `batch_size`. + self._envs = None + self._pool = None + + self._env_wrapper_fn = env_wrapper_fn + + # Call the super's ctor. It will use some of the member fields, so we call + # it in the end. + super(GymEnvProblem, self).__init__(**kwargs) + + @property + def base_env_name(self): + return self._base_env_name + + def _verify_same_spaces(self): + """Verifies that all the envs have the same observation and action space.""" + + # Pre-conditions: self._envs is initialized. + + if self._envs is None: + raise ValueError("Environments not initialized.") + + if not isinstance(self._envs, list): + logging.warning("Not checking observation and action space " + "compatibility across envs, since there is just one.") + return + + # NOTE: We compare string representations of observation_space and + # action_space because compositional classes like space.Tuple don't return + # true on object comparison. + + if not all( + str(env.observation_space) == str(self.observation_space) + for env in self._envs): + err_str = ("All environments should have the same observation space, but " + "don't.") + logging.error(err_str) + # Log all observation spaces. + for i, env in enumerate(self._envs): + logging.error("Env[%d] has observation space [%s]", i, + env.observation_space) + raise ValueError(err_str) + + if not all( + str(env.action_space) == str(self.action_space) for env in self._envs): + err_str = "All environments should have the same action space, but don't." + logging.error(err_str) + # Log all action spaces. + for i, env in enumerate(self._envs): + logging.error("Env[%d] has action space [%s]", i, env.action_space) + raise ValueError(err_str) + + def initialize_environments(self, + batch_size=1, + parallelism=1, + per_env_kwargs=None, + **kwargs): + """Initializes the environments. + + Args: + batch_size: (int) Number of `self.base_env_name` envs to initialize. + parallelism: (int) If this is greater than one then we run the envs in + parallel using multi-threading. + per_env_kwargs: (list or None) An optional list of dictionaries to pass to + gym.make. If not None, length should match `batch_size`. + **kwargs: (dict) Kwargs to pass to gym.make. + """ + assert batch_size >= 1 + if per_env_kwargs is not None: + assert batch_size == len(per_env_kwargs) + else: + per_env_kwargs = [{} for _ in range(batch_size)] + + # By now `per_env_kwargs` is a list of dictionaries of size batch_size. + # The individual dictionaries maybe empty. + + def union_dicts(dict1, dict2): + """Union `dict1` and `dict2`.""" + copy_dict1 = copy.copy(dict1) + copy_dict1.update(dict2) + return copy_dict1 + + self._envs = [ + gym.make(self.base_env_name, + **union_dicts(kwargs, env_kwarg)) + for env_kwarg in per_env_kwargs + ] + self._parallelism = parallelism + self._pool = multiprocessing.pool.ThreadPool(self._parallelism) + if self._env_wrapper_fn is not None: + self._envs = list(map(self._env_wrapper_fn, self._envs)) + + self._verify_same_spaces() + + # If self.reward_range is None, i.e. this means that we should take the + # reward range of the env. + if self.reward_range is None: + self._reward_range = self._envs[0].reward_range + + # This data structure stores the history of each env. + # + # NOTE: Even if the env is a NN and can step in all batches concurrently, it + # is still valuable to store the trajectories separately. + self._trajectories = trajectory.BatchTrajectory(batch_size=batch_size) + + def assert_common_preconditions(self): + # Asserts on the common pre-conditions of: + # - self._envs is initialized. + # - self._envs is a list. + assert self._envs + assert isinstance(self._envs, list) + + @property + def observation_space(self): + return self._envs[0].observation_space + + @property + def action_space(self): + return self._envs[0].action_space + + @property + def reward_range(self): + return self._reward_range + + def seed(self, seed=None): + if not self._envs: + logging.info("`seed` called on non-existent envs, doing nothing.") + return None + + if not isinstance(self._envs, list): + logging.warning("`seed` called on non-list envs, doing nothing.") + return None + + logging.warning( + "Called `seed` on EnvProblem, calling seed on the underlying envs.") + for env in self._envs: + env.seed(seed) + + return super(GymEnvProblem, self).seed(seed=seed) + + def close(self): + if not self._envs: + logging.info("`close` called on non-existent envs, doing nothing.") + return + + if not isinstance(self._envs, list): + logging.warning("`close` called on non-list envs, doing nothing.") + return + + # Call close on all the envs one by one. + for env in self._envs: + env.close() + + def _reset(self, indices): + """Resets environments at indices shouldn't pre-process or record. + + Args: + indices: list of indices of underlying envs to call reset on. + + Returns: + np.ndarray of stacked observations from the reset-ed envs. + """ + # This returns a numpy array with first dimension `len(indices)` and the + # rest being the dimensionality of the observation. + + num_envs_to_reset = len(indices) + observations = [None] * num_envs_to_reset + + def reset_at(idx): + observations[idx] = self._envs[indices[idx]].reset() + + if self._parallelism > 1: + self._pool.map(reset_at, range(num_envs_to_reset)) + else: + for i in range(num_envs_to_reset): + reset_at(i) + + return _stack(observations) + + def _step(self, actions): + """Takes a step in all environments, shouldn't pre-process or record. + + Args: + actions: (np.ndarray) with first dimension equal to the batch size. + + Returns: + a tuple of stacked raw observations, raw rewards, dones and infos. + """ + assert len(actions) == len(self._envs) + + observations = [None] * self.batch_size + rewards = [None] * self.batch_size + dones = [None] * self.batch_size + infos = [{} for _ in range(self.batch_size)] + + def apply_step(i): + t1 = time.time() + observations[i], rewards[i], dones[i], infos[i] = self._envs[i].step( + actions[i]) + t2 = time.time() + infos[i]["__bare_env_run_time__"] = t2 - t1 + + if self._parallelism > 1: + self._pool.map(apply_step, range(self.batch_size)) + else: + for i in range(self.batch_size): + apply_step(i) + + # Convert each list (observations, rewards, ...) into np.array and return a + # tuple. + return tuple(map(_stack, [observations, rewards, dones, infos])) diff --git a/tensor2tensor/envs/gym_env_problem_test.py b/tensor2tensor/envs/gym_env_problem_test.py new file mode 100644 index 000000000..5ac72b6e3 --- /dev/null +++ b/tensor2tensor/envs/gym_env_problem_test.py @@ -0,0 +1,438 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.envs.gym_env_problem.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import gym +from gym.spaces import Box +from gym.spaces import Discrete +import numpy as np +from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem +from tensor2tensor.envs import env_problem +from tensor2tensor.envs import env_problem_utils +from tensor2tensor.envs import gym_env_problem +from tensor2tensor.layers import modalities +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class GymEnvProblemTest(tf.test.TestCase): + + def setUp(self): + self.tmp_dir = os.path.join(tf.test.get_temp_dir(), "tmp_dir") + tf.gfile.MakeDirs(self.tmp_dir) + + def tearDown(self): + tf.gfile.DeleteRecursively(self.tmp_dir) + + def test_setup(self): + ep = gym_env_problem.GymEnvProblem( + base_env_name="CartPole-v0", batch_size=5) + # Checks that environments were created and they are `batch_size` in number. + ep.assert_common_preconditions() + + # Expectations on the observation space. + observation_space = ep.observation_space + self.assertIsInstance(observation_space, Box) + self.assertEqual(observation_space.shape, (4,)) + self.assertEqual(observation_space.dtype, np.float32) + + # Expectations on the action space. + action_space = ep.action_space + self.assertTrue(isinstance(action_space, Discrete)) + self.assertEqual(action_space.shape, ()) + self.assertEqual(action_space.dtype, np.int64) + self.assertEqual(ep.num_actions, 2) + + # Reward range is infinite here. + self.assertFalse(ep.is_reward_range_finite) + + def test_reward_range(self): + # Passing reward_range=None means take the reward range of the underlying + # environment as the reward range. + ep = gym_env_problem.GymEnvProblem( + base_env_name="FrozenLake-v1", batch_size=5, reward_range=None) + ep.assert_common_preconditions() + + # Assert reward range is finite here. + self.assertTrue(ep.is_reward_range_finite) + + # Assert that it is as expected of the underlying environment, since reward_ + self.assertEqual(0, ep.reward_range[0]) + self.assertEqual(1, ep.reward_range[1]) + + def test_default_processed_rewards_discrete(self): + # This differs in the above because it has a Tuple observation space. + ep = gym_env_problem.GymEnvProblem( + base_env_name="KellyCoinflip-v0", batch_size=5, reward_range=None) + ep.assert_common_preconditions() + + # Assert reward range is finite here. + self.assertTrue(ep.is_reward_range_finite) + + # Assert that it is as expected of the underlying environment. + reward_range = ep.reward_range + self.assertEqual(0, reward_range[0]) + + # Google's version of Gym has maxWealth, vs max_wealth externally. + max_wealth = getattr(ep._envs[0], "maxWealth", + getattr(ep._envs[0], "max_wealth", None)) + self.assertIsNotNone(max_wealth) + self.assertEqual(max_wealth, reward_range[1]) + + # Check that the processed rewards are discrete. + self.assertTrue(ep.is_processed_rewards_discrete) + + # Assert on the number of rewards. + self.assertEqual(ep.num_rewards, reward_range[1] - reward_range[0] + 1) + + def test_interaction_with_env(self): + batch_size = 5 + reward_range = (-1, 1) + ep = gym_env_problem.GymEnvProblem( + base_env_name="KellyCoinflip-v0", + batch_size=batch_size, + reward_range=reward_range) + + # Resets all environments. + ep.reset() + + # Let's play a few steps. + nsteps = 100 + num_trajectories_completed = 0 + num_timesteps_completed = 0 + # If batch_done_at_step[i] = j then it means that i^th env last got done at + # step = j. + batch_done_at_step = np.full(batch_size, -1) + for i in range(nsteps): + # Sample batch_size actions from the action space and stack them (since + # that is the expected type). + actions = np.stack([ep.action_space.sample() for _ in range(batch_size)]) + + _, _, dones, _ = ep.step(actions) + + # Do the book-keeping on number of trajectories completed and expect that + # it matches ep's completed number. + + num_done = sum(dones) + num_trajectories_completed += num_done + + self.assertEqual(num_trajectories_completed, + len(ep.trajectories.completed_trajectories)) + + # Get the indices where we are done ... + done_indices = env_problem_utils.done_indices(dones) + + # ... and reset those. + ep.reset(indices=done_indices) + + # If nothing got done, go on to the next step. + if done_indices.size == 0: + # i.e. this is an empty array. + continue + + # See when these indices were last done and calculate how many time-steps + # each one took to get done. + num_timesteps_completed += sum(i + 1 - batch_done_at_step[done_indices]) + batch_done_at_step[done_indices] = i + + # This should also match the number of time-steps completed given by ep. + num_timesteps_completed_ep = sum( + ct.num_time_steps for ct in ep.trajectories.completed_trajectories) + self.assertEqual(num_timesteps_completed, num_timesteps_completed_ep) + + # Reset the trajectories. + ep.trajectories.reset_batch_trajectories() + self.assertEqual(0, len(ep.trajectories.completed_trajectories)) + + def read_tfrecord_dataset(self, filenames, ep): + # Read the dataset at `filenames` into a tf.data.Dataset and returns the + # number of time-steps (just the number of records in the dataset) and the + # number of trajectories. + + last_timestep = -1 + num_time_steps = 0 + num_trajectories = 0 + for ex in generator_utils.tfrecord_iterator( + filenames, example_spec=ep.example_reading_spec()[0]): + num_time_steps += 1 + this_timestep = ex[env_problem.TIMESTEP_FIELD][0] + if 1 + last_timestep != this_timestep: + num_trajectories += 1 + self.assertEqual(0, this_timestep) + last_timestep = this_timestep + num_trajectories += 1 + + return num_trajectories, num_time_steps + + def play_env(self, + env=None, + nsteps=100, + base_env_name=None, + batch_size=5, + reward_range=None): + """Creates `GymEnvProblem` with the given arguments and plays it randomly. + + Args: + env: optional env. + nsteps: plays the env randomly for nsteps. + base_env_name: passed to GymEnvProblem's init. + batch_size: passed to GymEnvProblem's init. + reward_range: passed to GymEnvProblem's init. + + Returns: + tuple of gym_env_problem, number of trajectories done, + number of trajectories done in the last step. + """ + + if env is None: + env = gym_env_problem.GymEnvProblem( + base_env_name=base_env_name, + batch_size=batch_size, + reward_range=reward_range) + # Usually done by a registered subclass, we do this manually in the test. + env.name = base_env_name + + # Reset all environments. + env.reset() + + # Play for some steps to generate data. + num_dones = 0 + num_dones_in_last_step = 0 + for _ in range(nsteps): + # Sample actions. + actions = np.stack([env.action_space.sample() for _ in range(batch_size)]) + # Step through it. + _, _, dones, _ = env.step(actions) + # Get the indices where we are done ... + done_indices = env_problem_utils.done_indices(dones) + # ... and reset those. + env.reset(indices=done_indices) + # count the number of dones we got, in this step and overall. + num_dones_in_last_step = sum(dones) + num_dones += num_dones_in_last_step + + return env, num_dones, num_dones_in_last_step + + def test_generate_data(self): + base_env_name = "CartPole-v0" + batch_size = 5 + reward_range = (-1, 1) + nsteps = 100 + ep, num_dones, num_dones_in_last_step = self.play_env( + base_env_name=base_env_name, + batch_size=batch_size, + reward_range=reward_range, + nsteps=nsteps) + + # This is because every num_dones starts a new trajectory, and a further + # batch_size are active at the last step when we call generate_data, but + # the ones that got done in the last step (these have only one time-step in + # their trajectory) will be skipped. + expected_num_trajectories = num_dones + batch_size - num_dones_in_last_step + + # Similar logic as above, nsteps * batch_size overall `step` calls are made. + expected_num_time_steps = ( + nsteps * batch_size) + num_dones + batch_size - num_dones_in_last_step + + # Dump the completed data to disk. + ep.generate_data(self.tmp_dir, self.tmp_dir) + + # Read the written files and assert on the number of time steps. + training_filenames = ep.training_filepaths( + self.tmp_dir, ep.num_shards[problem.DatasetSplit.TRAIN], True) + dev_filenames = ep.dev_filepaths( + self.tmp_dir, ep.num_shards[problem.DatasetSplit.EVAL], True) + + training_trajectories, training_timesteps = self.read_tfrecord_dataset( + training_filenames, ep) + dev_trajectories, dev_timesteps = self.read_tfrecord_dataset( + dev_filenames, ep) + + # This tests what we wrote on disk matches with what we computed. + self.assertEqual(expected_num_time_steps, + training_timesteps + dev_timesteps) + self.assertEqual(expected_num_trajectories, + training_trajectories + dev_trajectories) + + def test_problem_dataset_works(self): + + # We need to derive this class to set the required methods. + class TestEnv(gym_env_problem.GymEnvProblem): + name = "TestEnv" + + @property + def input_modality(self): + return modalities.ModalityType.REAL_L2_LOSS + + @property + def input_vocab_size(self): + return None + + @property + def target_modality(self): + return modalities.ModalityType.SYMBOL_WEIGHTS_ALL + + @property + def target_vocab_size(self): + return 2 + + @property + def action_modality(self): + return modalities.ModalityType.SYMBOL_WEIGHTS_ALL + + @property + def reward_modality(self): + return modalities.ModalityType.SYMBOL_WEIGHTS_ALL + + base_env_name = "CartPole-v0" + batch_size = 5 + reward_range = (-1, 1) + + env = TestEnv( + base_env_name=base_env_name, + batch_size=batch_size, + reward_range=reward_range) + + nsteps = 100 + ep, _, _ = self.play_env(env=env, nsteps=nsteps) + + # Dump the completed data to disk. + ep.generate_data(self.tmp_dir, self.tmp_dir) + + # Read the actual files and count the trajectories and time-steps. + dev_filenames = ep.dev_filepaths( + self.tmp_dir, ep.num_shards[problem.DatasetSplit.EVAL], True) + dev_trajectories, dev_timesteps = self.read_tfrecord_dataset( + dev_filenames, ep) + + # Count them using a tf.data.Dataset. + dev_dataset = ep.dataset(tf_estimator.ModeKeys.EVAL, data_dir=self.tmp_dir) + + last_timestep = -1 + dev_timesteps_ds = 0 + dev_trajectories_ds = 0 + iterator = dev_dataset.make_one_shot_iterator() + next_element = iterator.get_next() + with tf.Session() as session: + while True: + try: + tf_example_dict = session.run(next_element) + + # We have a time-step. + dev_timesteps_ds += 1 + + this_timestep = tf_example_dict[env_problem.TIMESTEP_FIELD][ + 0] # [0] since every value in tf_example_dict is an array/list. + if 1 + last_timestep != this_timestep: + dev_trajectories_ds += 1 + self.assertEqual(0, this_timestep) + last_timestep = this_timestep + except tf.errors.OutOfRangeError: + dev_trajectories_ds += 1 + break + + # Make sure that they agree. + self.assertEqual(dev_trajectories, dev_trajectories_ds) + self.assertEqual(dev_timesteps, dev_timesteps_ds) + + def test_resets_properly(self): + base_env_name = "CartPole-v0" + batch_size = 5 + reward_range = (-1, 1) + nsteps = 100 + + env = gym_env_problem.GymEnvProblem( + base_env_name=base_env_name, + batch_size=batch_size, + reward_range=reward_range) + env.name = base_env_name + + num_dones = 0 + while num_dones == 0: + env, num_dones, _ = self.play_env(env=env, + nsteps=nsteps, + batch_size=batch_size, + reward_range=reward_range) + + # Some completed trajectories have been generated. + self.assertGreater(env.trajectories.num_completed_trajectories, 0) + + # This should clear the env completely of any state. + env.reset() + + # Assert that there aren't any completed trajectories in the env now. + self.assertEqual(env.trajectories.num_completed_trajectories, 0) + + def test_per_env_kwargs(self): + + # Creating a dummy class where we specify the action at which the env + # returns done. + class TestPerEnvKwargsEnv(gym.Env): + """Test environment with the `done action` specified.""" + + action_space = Discrete(3) + observation_space = Box(low=-1.0, high=1.0, shape=()) + + def __init__(self, done_action=0): + self._done_action = done_action + + def _generate_ob(self): + return self.observation_space.sample() + + def step(self, action): + done = self._done_action == action + reward = 1 if done else 0 + return (self._generate_ob(), reward, done, {}) + + def reset(self): + return self._generate_ob() + + # Registering it with gym. + test_env_name = "TestPerEnvKwargsEnv-v0" + gym.envs.register(id=test_env_name, entry_point=TestPerEnvKwargsEnv) + + # Creating a batch of those with different done actions. + base_env_name = test_env_name + batch_size = 2 + reward_range = (-1, 1) + per_env_kwargs = [{"done_action": 1}, {"done_action": 2}] + + env = gym_env_problem.GymEnvProblem( + base_env_name=base_env_name, + batch_size=batch_size, + reward_range=reward_range, + per_env_kwargs=per_env_kwargs) + + _ = env.reset() + + # Finally querying the done actions. + + _, _, d, _ = env.step(np.array([0, 0])) + self.assertFalse(d[0]) + self.assertFalse(d[1]) + + _, _, d, _ = env.step(np.array([1, 2])) + self.assertTrue(d[0]) + self.assertTrue(d[1]) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/envs/gym_spaces_utils.py b/tensor2tensor/envs/gym_spaces_utils.py new file mode 100644 index 000000000..ceca77dab --- /dev/null +++ b/tensor2tensor/envs/gym_spaces_utils.py @@ -0,0 +1,113 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Few utility functions to deal with gym spaces. + +gym.spaces.Box and gym.spaces.Discrete are easiest to support. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from gym.spaces import Box +from gym.spaces import Discrete + +import numpy as np +import tensorflow.compat.v1 as tf + + +def box_space_spec(box_space, tf_dtype): + return tf.FixedLenFeature(box_space.shape, tf_dtype) + + +def discrete_space_spec(discrete_space, tf_dtype): + del discrete_space # this is not needed. + return tf.FixedLenFeature((1,), tf_dtype) + + +def gym_space_spec(gym_space): + """Returns a reading spec of a gym space. + + NOTE: Only implemented currently for Box and Discrete. + + Args: + gym_space: instance of gym.spaces whose spec we want. + + Returns: + Reading spec for that space. + + Raises: + NotImplementedError: For spaces whose reading spec we haven't implemented. + """ + # First try to determine the type. + try: + tf_dtype = tf.as_dtype(gym_space.dtype) + except TypeError as e: + tf.logging.error("Cannot convert space's type [%s] to tf.dtype", + gym_space.dtype) + raise e + + # Now hand it over to the specialized functions. + if isinstance(gym_space, Box): + return box_space_spec(gym_space, tf_dtype) + elif isinstance(gym_space, Discrete): + return discrete_space_spec(gym_space, tf_dtype) + else: + raise NotImplementedError + + +def gym_space_encode(gym_space, observation): + # We should return something that generator_utils.to_example can consume. + if isinstance(gym_space, Discrete): + return [observation] + + if isinstance(gym_space, Box): + return observation.reshape(-1).tolist() + + raise NotImplementedError + + +def cardinality(gym_space): + """Number of elements that can be represented by the space. + + Makes the most sense for Discrete or Box type with integral dtype, ex: number + of actions in an action space. + + Args: + gym_space: The gym space. + + Returns: + np.int64 number of observations that can be represented by this space, or + returns None when this doesn't make sense, i.e. float boxes etc. + + Raises: + NotImplementedError when a space's cardinality makes sense but we haven't + implemented it. + """ + + if (gym_space.dtype == np.float32) or (gym_space.dtype == np.float64): + tf.logging.warn("Returning None for a float gym space's cardinality: %s", + gym_space) + return None + + if isinstance(gym_space, Discrete): + return gym_space.n + + if isinstance(gym_space, Box): + # Construct a box with all possible values in this box and take a product. + return np.prod(gym_space.high - gym_space.low + 1) + + raise NotImplementedError diff --git a/tensor2tensor/envs/gym_spaces_utils_test.py b/tensor2tensor/envs/gym_spaces_utils_test.py new file mode 100644 index 000000000..b68f62723 --- /dev/null +++ b/tensor2tensor/envs/gym_spaces_utils_test.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for gym_spaces_utils.py.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from gym.spaces import Box +from gym.spaces import Discrete +import numpy as np +from tensor2tensor.envs import gym_spaces_utils +import tensorflow.compat.v1 as tf + + +class GymSpacesUtilsTest(tf.test.TestCase): + + def test_discrete_space_spec(self): + discrete_space = Discrete(100) + spec = gym_spaces_utils.gym_space_spec(discrete_space) + self.assertIsInstance(spec, tf.FixedLenFeature) + self.assertEqual(spec.dtype, tf.int64) + self.assertListEqual(list(spec.shape), [1]) + + def test_box_space_spec(self): + box_space = Box(low=0, high=10, shape=[5, 6], dtype=np.float32) + spec = gym_spaces_utils.gym_space_spec(box_space) + self.assertIsInstance(spec, tf.FixedLenFeature) + self.assertEqual(spec.dtype, tf.float32) + self.assertListEqual(list(spec.shape), [5, 6]) + + def test_discrete_space_encode(self): + discrete_space = Discrete(100) + value = discrete_space.sample() + encoded_value = gym_spaces_utils.gym_space_encode(discrete_space, value) + self.assertListEqual([value], encoded_value) + + def test_box_space_encode(self): + box_space = Box(low=0, high=10, shape=[2], dtype=np.int64) + value = np.array([2, 3]) + encoded_value = gym_spaces_utils.gym_space_encode(box_space, value) + self.assertListEqual([2, 3], encoded_value) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/envs/mujoco_problems.py b/tensor2tensor/envs/mujoco_problems.py new file mode 100644 index 000000000..53f82887a --- /dev/null +++ b/tensor2tensor/envs/mujoco_problems.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Mujoco Gym environments.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +from tensor2tensor.envs import rendered_env_problem +from tensor2tensor.layers import modalities +from tensor2tensor.rl import gym_utils +from tensor2tensor.utils import registry + + + +@registry.register_env_problem +class ReacherEnvProblem(rendered_env_problem.RenderedEnvProblem): + """Mujoco's reacher environment.""" + + def __init__(self): + base_env_name = "Reacher-v2" + wrapper_fn = functools.partial( + gym_utils.gym_env_wrapper, **{ + "rl_env_max_episode_steps": -1, + "maxskip_env": False, + "rendered_env": True, + "rendered_env_resize_to": None, # Do not resize frames + "sticky_actions": False, + "output_dtype": None, + "num_actions": None, + }) + super(ReacherEnvProblem, self).__init__( + base_env_name=base_env_name, env_wrapper_fn=wrapper_fn) + + @property + def input_modality(self): + return modalities.ModalityType.VIDEO + + @property + def target_modality(self): + return modalities.ModalityType.VIDEO + + @property + def action_modality(self): + return modalities.ModalityType.IDENTITY + + @property + def reward_modality(self): + return modalities.ModalityType.IDENTITY + + @property + def input_vocab_size(self): + return 256 + + @property + def target_vocab_size(self): + return 256 diff --git a/tensor2tensor/envs/mujoco_problems_test.py b/tensor2tensor/envs/mujoco_problems_test.py new file mode 100644 index 000000000..a9c53c608 --- /dev/null +++ b/tensor2tensor/envs/mujoco_problems_test.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.envs.mujoco_problems.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from tensor2tensor.envs import env_problem_utils +from tensor2tensor.envs import mujoco_problems # pylint: disable=unused-import +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + + +class ReacherEnvProblemTest(tf.test.TestCase): + + def test_registration_and_interaction_with_env_problem(self): + batch_size = 5 + # This ensures that registration has occurred. + ep = registry.env_problem("reacher_env_problem", batch_size=batch_size) + ep.reset() + num_done = 0 + nsteps = 100 + for _ in range(nsteps): + actions = np.stack([ep.action_space.sample() for _ in range(batch_size)]) + obs, rewards, dones, infos = ep.step(actions) + + # Assert that things are happening batchwise. + self.assertEqual(batch_size, len(obs)) + self.assertEqual(batch_size, len(rewards)) + self.assertEqual(batch_size, len(dones)) + self.assertEqual(batch_size, len(infos)) + + done_indices = env_problem_utils.done_indices(dones) + ep.reset(done_indices) + num_done += sum(dones) + + # Assert that something got done atleast, + self.assertGreater(num_done, 0) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/envs/rendered_env_problem.py b/tensor2tensor/envs/rendered_env_problem.py new file mode 100644 index 000000000..b32c48226 --- /dev/null +++ b/tensor2tensor/envs/rendered_env_problem.py @@ -0,0 +1,139 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Base class for env problems with RGB array as observation space.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import png +import six +from tensor2tensor.data_generators import video_utils +from tensor2tensor.envs import env_problem +from tensor2tensor.envs import gym_env_problem +from tensor2tensor.utils import contrib +import tensorflow.compat.v1 as tf + +_IMAGE_ENCODED_FIELD = "image/encoded" +_IMAGE_FORMAT_FIELD = "image/format" +_IMAGE_HEIGHT_FIELD = "image/height" +_IMAGE_WIDTH_FIELD = "image/width" +_FRAME_NUMBER_FIELD = "frame_number" + +_FORMAT = "png" + + +class RenderedEnvProblem(gym_env_problem.GymEnvProblem, + video_utils.VideoProblem): + """An `EnvProblem` when observations are RGB arrays. + + This takes care of wrapping a rendered gym environment to behave like a + `VideoProblem`. This class assumes the underlying gym environment is either a + `gym_utils.RenderedEnv` or it natively returns rendered scene for + observations. i.e. the underlying gym environment should have a + `Box` observation space with the following shape: [frame_height, frame_width, + channels] + + Note: The method resolution order for this class is: + `RenderedEnvProblem`, `EnvProblem`, `Env`, `VideoProblem`, `Problem` + """ + + def __init__(self, *args, **kwargs): + """Initialize by calling both parents' constructors.""" + gym_env_problem.GymEnvProblem.__init__(self, *args, **kwargs) + video_utils.VideoProblem.__init__(self) + + def initialize_environments(self, + batch_size=1, + parallelism=1, + rendered_env=True, + per_env_kwargs=None, + **kwargs): + gym_env_problem.GymEnvProblem.initialize_environments( + self, batch_size=batch_size, parallelism=parallelism, + per_env_kwargs=per_env_kwargs, **kwargs) + # Assert the underlying gym environment has correct observation space + if rendered_env: + assert len(self.observation_spec.shape) == 3 + + def example_reading_spec(self): + """Return a mix of env and video data fields and decoders.""" + slim = contrib.slim() + video_fields, video_decoders = ( + video_utils.VideoProblem.example_reading_spec(self)) + env_fields, env_decoders = ( + gym_env_problem.GymEnvProblem.example_reading_spec(self)) + + # Remove raw observations field since we want to capture them as videos. + env_fields.pop(env_problem.OBSERVATION_FIELD) + env_decoders.pop(env_problem.OBSERVATION_FIELD) + + # Add frame number spec and decoder. + env_fields[_FRAME_NUMBER_FIELD] = tf.FixedLenFeature((1,), tf.int64) + env_decoders[_FRAME_NUMBER_FIELD] = slim.tfexample_decoder.Tensor( + _FRAME_NUMBER_FIELD) + + # Add video fields and decoders + env_fields.update(video_fields) + env_decoders.update(video_decoders) + return env_fields, env_decoders + + def _generate_time_steps(self, trajectory_list): + """Transforms time step observations to frames of a video.""" + for time_step in gym_env_problem.GymEnvProblem._generate_time_steps( + self, trajectory_list): + # Convert the rendered observations from numpy to png format. + frame_np = np.array(time_step.pop(env_problem.OBSERVATION_FIELD)) + frame_np = frame_np.reshape( + [self.frame_height, self.frame_width, self.num_channels]) + # TODO(msaffar) Add support for non RGB rendered environments + frame = png.from_array(frame_np, "RGB", info={"bitdepth": 8}) + frame_buffer = six.BytesIO() + frame.save(frame_buffer) + + # Put the encoded frame back. + time_step[_IMAGE_ENCODED_FIELD] = [frame_buffer.getvalue()] + time_step[_IMAGE_FORMAT_FIELD] = [_FORMAT] + time_step[_IMAGE_HEIGHT_FIELD] = [self.frame_height] + time_step[_IMAGE_WIDTH_FIELD] = [self.frame_width] + + # Add the frame number + time_step[_FRAME_NUMBER_FIELD] = time_step[env_problem.TIMESTEP_FIELD] + yield time_step + + @property + def num_channels(self): + return self.observation_spec.shape[2] + + @property + def frame_height(self): + return self.observation_spec.shape[0] + + @property + def frame_width(self): + return self.observation_spec.shape[1] + + @property + def total_number_of_frames(self): + """Upper bound on the total number of frames across all environments. + + This is used to decide sharding. See `VideoProblem.total_number_of_frames` + for more details. + + Returns: + number of frames among all examples in the dataset. + """ + return self.trajectories.num_time_steps diff --git a/tensor2tensor/envs/rendered_env_problem_test.py b/tensor2tensor/envs/rendered_env_problem_test.py new file mode 100644 index 000000000..c7e816434 --- /dev/null +++ b/tensor2tensor/envs/rendered_env_problem_test.py @@ -0,0 +1,70 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.envs.rendered_env_problem.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.envs import env_problem +from tensor2tensor.envs import env_problem_utils +from tensor2tensor.envs import rendered_env_problem +from tensor2tensor.envs.mujoco_problems import ReacherEnvProblem +import tensorflow.compat.v1 as tf + + +class RenderedEnvProblemTest(tf.test.TestCase): + + def test_generate_timesteps(self): + env = ReacherEnvProblem() + env.initialize(batch_size=2) + env_problem_utils.play_env_problem_randomly(env, num_steps=5) + env.trajectories.complete_all_trajectories() + + frame_number = 0 + for time_step in env._generate_time_steps( + env.trajectories.completed_trajectories): + # original observation should not be in time_step + self.assertNotIn(env_problem.OBSERVATION_FIELD, time_step) + # validate frame + self.assertIn(rendered_env_problem._IMAGE_ENCODED_FIELD, time_step) + self.assertIn(rendered_env_problem._IMAGE_HEIGHT_FIELD, time_step) + self.assertIn(rendered_env_problem._IMAGE_WIDTH_FIELD, time_step) + self.assertIn(rendered_env_problem._IMAGE_FORMAT_FIELD, time_step) + self.assertIn(rendered_env_problem._FRAME_NUMBER_FIELD, time_step) + + decoded_frame = tf.image.decode_png( + time_step[rendered_env_problem._IMAGE_ENCODED_FIELD][0]) + + decoded_frame = self.evaluate(decoded_frame) + + self.assertListEqual( + [env.frame_height, env.frame_width, env.num_channels], + list(decoded_frame.shape)) + self.assertListEqual([rendered_env_problem._FORMAT], + time_step[rendered_env_problem._IMAGE_FORMAT_FIELD]) + self.assertListEqual([frame_number], + time_step[rendered_env_problem._FRAME_NUMBER_FIELD]) + self.assertListEqual([env.frame_width], + time_step[rendered_env_problem._IMAGE_WIDTH_FIELD]) + self.assertListEqual([env.frame_height], + time_step[rendered_env_problem._IMAGE_HEIGHT_FIELD]) + frame_number += 1 + frame_number %= 6 + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/envs/tic_tac_toe_env.py b/tensor2tensor/envs/tic_tac_toe_env.py new file mode 100644 index 000000000..795bcd1b2 --- /dev/null +++ b/tensor2tensor/envs/tic_tac_toe_env.py @@ -0,0 +1,244 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Gym Tic-Tac-Toe environment. + +Environment acts like the second player and first player is either environment +or the agent. The environment follows a random policy for now. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import gym +from gym import spaces +from gym.utils import seeding +import numpy as np + +from tensor2tensor.data_generators import problem +from tensor2tensor.layers import modalities +from tensor2tensor.rl import gym_utils + + +def encode_pos(i, j): + """Encodes a pair (i, j) as a scalar position on the board.""" + return 3 * i + j + + +def decode_pos(pos): + """Decoes a scalar position on the board as a pair (i, j).""" + return pos // 3, pos % 3 + + +def get_open_spaces(board): + """Given a representation of the board, returns a list of open spaces.""" + open_spaces = [] + for i in range(3): + for j in range(3): + if board[i][j] == 0: + open_spaces.append(encode_pos(i, j)) + return open_spaces + + +def get_reward_and_done(board): + """Given a representation of the board, returns reward and done.""" + # Returns (reward, done) where: + # reward: -1 means lost, +1 means win, 0 means draw or continuing. + # done: True if the game is over, i.e. someone won or it is a draw. + + # Sum all rows ... + all_sums = [np.sum(board[i, :]) for i in range(3)] + # ... all columns + all_sums.extend([np.sum(board[:, i]) for i in range(3)]) + # and both diagonals. + all_sums.append(np.sum([board[i, i] for i in range(3)])) + all_sums.append(np.sum([board[i, 2 - i] for i in range(3)])) + + if -3 in all_sums: + return -1, True + + if 3 in all_sums: + return 1, True + + done = True + if get_open_spaces(board): + done = False + + return 0, done + + +# TODO(afrozm): This should eventually subclass Problem. +class TicTacToeEnv(gym.Env): + """Simple TicTacToe Env, starts the game randomly half of the time.""" + + def __init__(self, strict=False): + self.strict = strict + + # What about metadata and spec? + self.reward_range = (-1.0, 1.0) + + # Action space -- 9 positions that we can chose to mark. + self.action_space = spaces.Discrete(9) + + # Observation space -- this hopefully does what we need. + self.observation_space = spaces.Box( + low=-1, high=1, shape=(3, 3), dtype=np.int64) + + # Set the seed. + self.np_random = None + self.seed() + + # Start the game. + self.board_state = None + self.done = False + self.reset() + + def seed(self, seed=None): + self.np_random, seed = seeding.np_random(seed) + return [seed] + + # TODO(afrozm): Parametrize by some policy so that the env plays in an optimal + # way. + def play_random_move(self): + # Select open spaces. + open_spaces = get_open_spaces(self.board_state) + + if not open_spaces: + return False + + # Choose a space and mark it. + pos = self.np_random.choice(open_spaces) + i, j = decode_pos(pos) + + self.board_state[i, j] = -1 + + def reset(self): + self.board_state = np.zeros((3, 3), dtype=np.int64) + + # We"ll start with a 50% chance. + if self.np_random.choice([0, 1]) == 0: + self.play_random_move() + + # Return the observation. + return self.board_state + + def render(self, mode="human"): + # Unused. + del mode + board_str = "" + for i in range(3): + for j in range(3): + pos = self.board_state[i, j] + if pos == -1: + board_str += "x" + elif pos == 0: + board_str += "-" + else: + board_str += "o" + board_str += "\n" + return board_str + + def step(self, action): + # Are we already done? + if self.strict: + assert not self.done + + # Action has to belong to the action state. + assert self.action_space.contains(action) + + # Is it a legitimate move, i.e. is that position open to play? + is_legit_move = action in get_open_spaces(self.board_state) + + # Shouldn"t be an illegal action -- is a noop if not strict. + if self.strict: + assert is_legit_move + + # If strict mode is off, then let this be a noop and env not play either. + if not is_legit_move: + return self.board_state, 0, False, {} + + # This is a legit move, perform the action and check if done, etc etc. + i, j = decode_pos(action) + self.board_state[i, j] = 1 + reward, done = get_reward_and_done(self.board_state) + + if done: + self.done = True + return self.board_state, reward, True, {} + + # If not done already, play our move. + self.play_random_move() + reward, done = get_reward_and_done(self.board_state) + self.done = done + return self.board_state, reward, self.done, {} + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = { + "inputs": modalities.ModalityType.IDENTITY_SYMBOL, + "targets": modalities.ModalityType.IDENTITY_SYMBOL, + } + p.vocab_size = { + "inputs": 3, # since at each box, the input is either x, o or -. + # nevermind that we have a 3x3 box. + "targets": 3, # -1, 0, 1 + } + p.input_space_id = 0 # problem.SpaceID.GENERIC + p.target_space_id = 0 # problem.SpaceID.GENERIC + + +# TODO(afrozm): Figure out how to get rid of this. +class DummyPolicyProblemTTT(problem.Problem): + """Dummy Problem for running the policy.""" + + def __init__(self): + super(DummyPolicyProblemTTT, self).__init__() + self._ttt_env = TicTacToeEnv() + + def hparams(self, defaults, model_hparams): + # Update the env's hparams. + self._ttt_env.hparams(defaults, model_hparams) + # Do these belong here? + defaults.modality.update({ + "input_action": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, + "input_reward": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, + "target_action": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, + "target_reward": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, + "target_policy": modalities.ModalityType.IDENTITY, + "target_value": modalities.ModalityType.IDENTITY, + }) + defaults.vocab_size.update({ + "input_action": self.num_actions, + "input_reward": 3, # -1, 0, +1 ? + "target_action": self.num_actions, + "target_reward": 3, # -1, 0, +1 ? + "target_policy": None, + "target_value": None, + }) + + @property + def num_actions(self): + return self._ttt_env.action_space.n + + +def register(): + # Register this with gym. + unused_tictactoe_id, unused_tictactoe_env = gym_utils.register_gym_env( + "tensor2tensor.envs.tic_tac_toe_env:TicTacToeEnv", version="v0") + + +# TODO(afrozm): Fix the registration and make it automatic. +register() diff --git a/tensor2tensor/envs/tic_tac_toe_env_problem.py b/tensor2tensor/envs/tic_tac_toe_env_problem.py new file mode 100644 index 000000000..4f99516ff --- /dev/null +++ b/tensor2tensor/envs/tic_tac_toe_env_problem.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""TicTacToeEnvProblem wraps the TicTacToeEnv in an EnvProblem.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.envs import gym_env_problem +from tensor2tensor.layers import modalities +from tensor2tensor.utils import registry + + +@registry.register_env_problem +class TicTacToeEnvProblem(gym_env_problem.GymEnvProblem): + """Plays `batch_size` games of tic-tac-toe.""" + + def __init__(self): + super(TicTacToeEnvProblem, self).__init__( + base_env_name="T2TEnv-TicTacToeEnv-v0", + reward_range=(-1, 1)) + + @property + def input_modality(self): + return modalities.ModalityType.IDENTITY_SYMBOL + + @property + def input_vocab_size(self): + # Since a box can be either x or o or empty. + return 3 + + @property + def target_modality(self): + return modalities.ModalityType.IDENTITY_SYMBOL + + @property + def target_vocab_size(self): + # Since reward is either -1 or 0 or +1. + return 3 + + @property + def action_modality(self): + return modalities.ModalityType.SYMBOL_WEIGHTS_ALL diff --git a/tensor2tensor/envs/tic_tac_toe_env_problem_test.py b/tensor2tensor/envs/tic_tac_toe_env_problem_test.py new file mode 100644 index 000000000..6aac41db2 --- /dev/null +++ b/tensor2tensor/envs/tic_tac_toe_env_problem_test.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.envs.tic_tac_toe_env_problem.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from tensor2tensor.envs import env_problem_utils +from tensor2tensor.envs import tic_tac_toe_env # pylint: disable=unused-import +from tensor2tensor.envs import tic_tac_toe_env_problem # pylint: disable=unused-import +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf + + +class TicTacToeEnvProblemTest(tf.test.TestCase): + + def test_registration_and_interaction_with_env_problem(self): + batch_size = 5 + # This ensures that registration has occurred. + ep = registry.env_problem("tic_tac_toe_env_problem", batch_size=batch_size) + ep.reset() + num_done, num_lost, num_won, num_draw = 0, 0, 0, 0 + nsteps = 100 + for _ in range(nsteps): + actions = np.stack([ep.action_space.sample() for _ in range(batch_size)]) + obs, rewards, dones, infos = ep.step(actions) + + # Assert that things are happening batchwise. + self.assertEqual(batch_size, len(obs)) + self.assertEqual(batch_size, len(rewards)) + self.assertEqual(batch_size, len(dones)) + self.assertEqual(batch_size, len(infos)) + + done_indices = env_problem_utils.done_indices(dones) + ep.reset(done_indices) + num_done += sum(dones) + for r, d in zip(rewards, dones): + if not d: + continue + if r == -1: + num_lost += 1 + elif r == 0: + num_draw += 1 + elif r == 1: + num_won += 1 + else: + raise ValueError("reward should be -1, 0, 1 but is {}".format(r)) + + # Assert that something got done atleast, without that the next assert is + # meaningless. + self.assertGreater(num_done, 0) + + # Assert that things are consistent. + self.assertEqual(num_done, num_won + num_lost + num_draw) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/envs/tic_tac_toe_env_test.py b/tensor2tensor/envs/tic_tac_toe_env_test.py new file mode 100644 index 000000000..f277d94a2 --- /dev/null +++ b/tensor2tensor/envs/tic_tac_toe_env_test.py @@ -0,0 +1,82 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.envs.tic_tac_toe_env.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.envs import tic_tac_toe_env as ttt_env +import tensorflow.compat.v1 as tf + + +class TicTacToeEnvTest(tf.test.TestCase): + + def test_start(self): + ttt = ttt_env.TicTacToeEnv(strict=True) + self.assertFalse(ttt.done) + + # At max one move may have been played by the env. + spaces = ttt_env.get_open_spaces(ttt.board_state) + num_open_spaces = len(spaces) + # i.e. either 8 or 9 + self.assertGreater(num_open_spaces, 7) + + # Play a move + observation, reward, done, unused_info = ttt.step(spaces[0]) + + # The environment should also have played a move. + spaces = ttt_env.get_open_spaces(observation) + self.assertEqual(num_open_spaces - 2, len(spaces)) + + # Since at-max 3 moves have been played, the game can't end. + self.assertEqual(reward, 0) + self.assertFalse(done) + + def test_env_actions(self): + # Environment keeps taking actions and not us, we should eventually lose. + ttt = ttt_env.TicTacToeEnv(strict=True) + for _ in range(9): + ttt.play_random_move() + if ttt.done: + break + + reward, done = ttt_env.get_reward_and_done(ttt.board_state) + self.assertEqual(-1, reward) + self.assertTrue(done) + + def test_keep_playing(self): + ttt = ttt_env.TicTacToeEnv(strict=False) + done = False + while not done: + # sample an action from the action space. + action = ttt.action_space.sample() + # play it -- could be a no-op since we don't see if positions are empty. + unused_observation, reward, done, unused_info = ttt.step(action) + + # done is True, so either: + # we won + # env won or + # no space left + + we_won = reward == 1 + env_won = reward == -1 + space = bool(ttt_env.get_open_spaces(ttt.board_state)) + self.assertTrue(we_won or env_won or not space) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/envs/time_step.py b/tensor2tensor/envs/time_step.py new file mode 100644 index 000000000..58649652a --- /dev/null +++ b/tensor2tensor/envs/time_step.py @@ -0,0 +1,76 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""TimeStep is a simple class that holds the information seen at a time-step. + +Let: +r_t = Reward(s_{t-1}, a_{t-1}, s_t) - reward for getting into a state. +d_t = Done(s_t) - is this state terminal. +a_t = Action performed at state s_t +i_t = (optional) Dictionary of key, value pairs of miscellaneous data. + +Then the sequence of states, actions and rewards looks like the following: + +s0, a0/i0 s1/r1/d1, a1/i1 s2/r2/d2, a2/i2 s3/r3/d3, ... + +TimeStep holds (s_t, d_t, r_t, a_t, i_t). + +NOTE: When we call step on an environment at time-step t, we supply a_t and in +return the env gives us s_{t+1}, d_{t+1}, r_{t+1} + +So, we'd have to add the actions a_t/i_t to the current time-step, but add the +observations, rewards and dones to a new time-step. + +NOTE: wrt `info` - A good solution could be to have two additional fields in +TimeStep - structured algo_info (a namedtuple, possibly different for every +algorithm, or None if we don't use any) and unstructured env_info (a dict).)) +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + + +class TimeStep( + collections.namedtuple( + "TimeStep", + ["observation", "done", "raw_reward", "processed_reward", "action", + "info"])): + """This class represents the time-step as mentioned above.""" + + def replace(self, **kwargs): + """Exposes the underlying namedtuple replace.""" + + # NOTE: This RETURNS a NEW time-step with the replacements, i.e. doesn't + # modify self, since namedtuple is immutable. + + # This allows this to be called like ts.replace(action=a, raw_reward=r) etc. + + return self._replace(**kwargs) + + @classmethod + def create_time_step(cls, + observation=None, + done=False, + raw_reward=None, + processed_reward=None, + action=None, + info=None): + """Creates a TimeStep with both rewards and actions as optional.""" + + return cls(observation, done, raw_reward, processed_reward, action, + info) diff --git a/tensor2tensor/envs/time_step_test.py b/tensor2tensor/envs/time_step_test.py new file mode 100644 index 000000000..2b67639ef --- /dev/null +++ b/tensor2tensor/envs/time_step_test.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.envs.time_step.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.envs import time_step + +import tensorflow.compat.v1 as tf + + +class TimeStepTest(tf.test.TestCase): + + def test_create_time_step(self): + ts = time_step.TimeStep.create_time_step( + observation=1, done=True, raw_reward=1.0, processed_reward=1, action=1, + info={1: 1, 2: 4}) + + self.assertEqual(1, ts.observation) + self.assertTrue(ts.done) + self.assertNear(1.0, ts.raw_reward, 1e-6) + self.assertEqual(1, ts.processed_reward) + self.assertEqual(1, ts.action) + self.assertEqual({1: 1, 2: 4}, ts.info) + + def test_replace(self): + ts = time_step.TimeStep.create_time_step(observation=1, action=1) + self.assertFalse(ts.done) + + tsr = ts.replace(action=2, done=True, info={1: 1, 2: 4}) + + # Asert that ts didn't change. + self.assertFalse(ts.done) + self.assertEqual(1, ts.observation) + self.assertEqual(1, ts.action) + + # But tsr is as expected. + self.assertTrue(tsr.done) + self.assertEqual(1, tsr.observation) # unchanged + self.assertEqual(2, tsr.action) # changed + self.assertEqual({1: 1, 2: 4}, tsr.info) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/envs/trajectory.py b/tensor2tensor/envs/trajectory.py new file mode 100644 index 000000000..91b580893 --- /dev/null +++ b/tensor2tensor/envs/trajectory.py @@ -0,0 +1,576 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Trajectory manages a sequence of TimeSteps. + +BatchTrajectory manages a batch of trajectories, also keeping account of +completed trajectories. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os +import pickle +import re +import sys +import time +from absl import logging +import cloudpickle +import numpy as np +from tensor2tensor.envs import time_step +import tensorflow.compat.v1 as tf + +TRAJECTORY_FILE_FORMAT = r"trajectory_epoch_{epoch}_env_id_{env_id}_temperature_{temperature}_r_{r}.pkl" + + +def get_pickle_module(): + if sys.version_info[0] < 3: + return cloudpickle + return pickle + + +class Trajectory(object): + """Basically a list of TimeSteps with convenience methods.""" + + def __init__(self, time_steps=None): + # Contains a list of time steps. + if time_steps is None: + self._time_steps = [] + else: + self._time_steps = time_steps + + def __str__(self): + if not self.time_steps: + return "Trajectory[]" + return "Trajectory[{}]".format(", ".join(str(ts) for ts in self.time_steps)) + + def add_time_step(self, **create_time_step_kwargs): + """Creates a time-step and appends it to the list. + + Args: + **create_time_step_kwargs: Forwarded to + time_step.TimeStep.create_time_step. + """ + ts = time_step.TimeStep.create_time_step(**create_time_step_kwargs) + assert isinstance(ts, time_step.TimeStep) + self._time_steps.append(ts) + + def change_last_time_step(self, **replace_time_step_kwargs): + """Replace the last time-steps with the given kwargs.""" + + # Pre-conditions: self._time_steps shouldn't be empty. + assert self._time_steps + self._time_steps[-1] = self._time_steps[-1].replace( + **replace_time_step_kwargs) + + def truncate(self, num_to_keep=1): + """Truncate trajectories, keeping the last `num_to_keep` time-steps.""" + + # We return `ts_copy` back to the truncator. + ts_copy = self._time_steps[:] + + # We keep the last few observations. + self._time_steps = self._time_steps[-num_to_keep:] + + # NOTE: We will need to set the rewards to 0, to eliminate double counting. + for i in range(self.num_time_steps): + self._time_steps[i] = self._time_steps[i].replace( + raw_reward=0, processed_reward=0) + + return Trajectory(time_steps=ts_copy) + + @property + def last_time_step(self): + # Pre-conditions: self._time_steps shouldn't be empty. + assert self._time_steps + return self._time_steps[-1] + + @property + def num_time_steps(self): + return len(self._time_steps) + + @property + def is_active(self): + return bool(self.num_time_steps) + + @property + def time_steps(self): + return self._time_steps + + @property + def done(self): + return self.is_active and self.last_time_step.done + + # TODO(afrozm): Add discounting and rewards-to-go when it makes sense. + @property + def reward(self): + """Returns a tuple of sum of raw and processed rewards.""" + raw_rewards, processed_rewards = 0, 0 + for ts in self.time_steps: + # NOTE: raw_reward and processed_reward are None for the first time-step. + if ts.raw_reward is not None: + raw_rewards += ts.raw_reward + if ts.processed_reward is not None: + processed_rewards += ts.processed_reward + return raw_rewards, processed_rewards + + @property + def observations_np(self): + return np.stack([ts.observation for ts in self.time_steps]) + + def last_n_observations_np(self, n=None): + if n is not None: + n = -n # pylint: disable=invalid-unary-operand-type + return np.stack([ts.observation for ts in self.time_steps[n:]]) + + @property + def actions_np(self): + # The last action is None, so let's skip it. + return np.stack([ts.action for ts in self.time_steps[:-1]]) + + @property + def info_np(self): + if not self.time_steps or not self.time_steps[0].info: + return None + info_np_dict = {} + for info_key in self.time_steps[0].info: + # Same as actions, the last info is missing, so we skip it. + info_np_dict[info_key] = np.stack( + [ts.info[info_key] for ts in self.time_steps[:-1]]) + return info_np_dict + + @property + def rewards_np(self): + # The first reward is None, so let's skip it. + return np.stack([ts.processed_reward for ts in self.time_steps[1:]]) + + @property + def raw_rewards_np(self): + return np.stack([ts.raw_reward for ts in self.time_steps[1:]]) + + @property + def as_numpy(self): + # TODO(afrozm): Return a named tuple here, ex: TrajectoryArrays + return (self.observations_np, self.actions_np, self.rewards_np, + self.raw_rewards_np, self.info_np) + + +class BatchTrajectory(object): + """Basically a batch of active trajectories and a list of completed ones.""" + + def __init__(self, + batch_size=1, + trajectories=None, + completed_trajectories=None): + self.batch_size = batch_size + + # Stores trajectories that are currently active, i.e. aren't done or reset. + self._trajectories = trajectories or [ + Trajectory() for _ in range(self.batch_size) + ] + + # Stores trajectories that are completed. + # NOTE: We don't track the index this came from, as it's not needed, right? + self._completed_trajectories = completed_trajectories or [] + + def reset_batch_trajectories(self): + self.__init__(batch_size=self.batch_size) + + def __str__(self): + string = "BatchTrajectory[" + for i, t in enumerate(self.trajectories): + string += "Trajectory {} = {}\n".format(i, str(t)) + for i, t in enumerate(self.completed_trajectories): + string += "Completed Trajectory {} = {}\n".format(i, str(t)) + return string + "]" + + @property + def trajectories(self): + return self._trajectories + + @property + def completed_trajectories(self): + return self._completed_trajectories + + def clear_completed_trajectories(self, num=None): + """Clear the first `num` completed trajectories, or all if num is None.""" + if num is None: + self._completed_trajectories = [] + else: + self._completed_trajectories = self._completed_trajectories[num:] + + def _complete_trajectory(self, trajectory, index): + """Completes the given trajectory at the given index.""" + + assert isinstance(trajectory, Trajectory) + + # This *should* be the case. + assert trajectory.last_time_step.action is None + + # Add to completed trajectories. + self._completed_trajectories.append(trajectory) + + # Make a new one to replace it. + self._trajectories[index] = Trajectory() + + def truncate_trajectories(self, indices, num_to_keep=1): + """Truncate trajectories at specified indices. + + This puts the truncated trajectories in the completed list and makes new + trajectories with the observation from the trajectory that was truncated at + the same index. + + Args: + indices: iterable with the indices to truncate. + num_to_keep: int, number of last time-steps to keep while truncating. + """ + for index in indices: + trajectory = self._trajectories[index] + assert trajectory.is_active, "Trajectory to truncate can't be inactive." + + # Now `trajectory` just consists of the last `num_to_keep` observations + # and actions. Rewards are zeroed out. + # The old data is placed in `old_trajectory`. + old_trajectory = trajectory.truncate(num_to_keep=num_to_keep) + + # We put the old data in _completed_trajectories. + self._completed_trajectories.append(old_trajectory) + + def reset(self, indices, observations): + """Resets trajectories at given indices and populates observations. + + Reset can either be called right at the beginning, when there are no + time-steps, or to reset a currently active trajectory. + + If resetting a currently active trajectory then we save it in + self._completed_trajectories. + + Args: + indices: 1-D np.ndarray stating the indices to reset. + observations: np.ndarray of shape (indices len, obs.shape) of observations + """ + + # Pre-conditions: indices, observations are np arrays. + # : indices is one-dimensional. + # : their first dimension (batch) is the same. + assert isinstance(indices, np.ndarray) + assert len(indices.shape) == 1 + assert isinstance(observations, np.ndarray) + assert indices.shape[0] == observations.shape[0] + + for index, observation in zip(indices, observations): + trajectory = self._trajectories[index] + + # Are we starting a new trajectory at the given index? + if not trajectory.is_active: + # Then create a new time-step here with the given observation. + trajectory.add_time_step(observation=observation) + # That's all we need to do here. + continue + + # If however we are resetting a currently active trajectory then we need + # to put that in self._completed_trajectories and make a new trajectory + # with the current observation. + + # TODO(afrozm): Should we mark these are done? Or is the done=False and + # this being the last time-step in the trajectory good enough to recognize + # that this was reset? + + # Mark trajectory as completed and move into completed_trajectories. + self._complete_trajectory(trajectory, index) + + # Put the observation in the newly created trajectory. + # TODO(afrozm): Add 0 reward. + self._trajectories[index].add_time_step(observation=observation) + + def complete_all_trajectories(self): + """Essentially same as reset, but we don't have observations.""" + for index in range(self.batch_size): + trajectory = self._trajectories[index] + # TODO(pkozakowski): This assertion breaks something in SimPLe trajectory + # collection code - we're probably doing something wrong there. Commenting + # out the assertion as a temporary measure. + # assert trajectory.is_active + if trajectory.is_active: + self._complete_trajectory(trajectory, index) + + def step(self, + observations, + raw_rewards, + processed_rewards, + dones, + actions, + infos=None): + """Record the information obtained from taking a step in all envs. + + Records (observation, rewards, done) in a new time-step and actions in the + current time-step. + + If any trajectory gets done, we move that trajectory to + completed_trajectories. + + Args: + observations: ndarray of first dimension self.batch_size, which has the + observations after we've stepped, i.e. s_{t+1} where t is the current + state. + raw_rewards: ndarray of first dimension self.batch_size containing raw + rewards i.e. r_{t+1}. + processed_rewards: ndarray of first dimension self.batch_size containing + processed rewards. i.e. r_{t+1} + dones: ndarray of first dimension self.batch_size, containing true at an + index if that env is done, i.e. d_{t+1} + actions: ndarray of first dimension self.batch_size, containing actions + applied at the current time-step, which leads to the observations + rewards and done at the next time-step, i.e. a_t + infos: (optional) a dictionary of keys and values, where all the values + have the first dimension as self.batch_size. + """ + # Pre-conditions + assert isinstance(observations, np.ndarray) + assert isinstance(raw_rewards, np.ndarray) + assert isinstance(processed_rewards, np.ndarray) + assert isinstance(dones, np.ndarray) + assert isinstance(actions, np.ndarray) + if infos: + assert isinstance(infos, dict) + + # We assume that we step in all envs, i.e. not like reset where we can reset + # some envs and not others. + assert self.batch_size == observations.shape[0] + assert self.batch_size == raw_rewards.shape[0] + assert self.batch_size == processed_rewards.shape[0] + assert self.batch_size == dones.shape[0] + assert self.batch_size == actions.shape[0] + if infos: + for _, v in infos.items(): + assert self.batch_size == len(v) + + def extract_info_at_index(infos, index): + if not infos: + return None + return {k: v[index] for k, v in infos.items()} + + for index in range(self.batch_size): + trajectory = self._trajectories[index] + + # NOTE: If the trajectory isn't active, that means it doesn't have any + # time-steps in it, but we are in step, so the assumption is that it has + # a prior observation from which we are stepping away from. + + # TODO(afrozm): Let's re-visit this if it becomes too restrictive. + assert trajectory.is_active + + # To this trajectory's last time-step, set actions. + trajectory.change_last_time_step( + action=actions[index], info=extract_info_at_index(infos, index)) + + # Create a new time-step to add observation, done & rewards (no actions). + trajectory.add_time_step( + observation=observations[index], + done=dones[index], + raw_reward=raw_rewards[index], + processed_reward=processed_rewards[index]) + + # If the trajectory is completed, i.e. dones[index] == True, then we + # account for it right-away. + if dones[index]: + self._complete_trajectory(trajectory, index) + + # NOTE: The new trajectory at `index` is going to be in-active and + # `reset` should be called on it. + assert not self._trajectories[index].is_active + + @staticmethod + def _trajectory_lengths(trajectories): + return np.array([t.num_time_steps for t in trajectories]) + + @property + def num_completed_time_steps(self): + """Returns the number of time-steps in completed trajectories.""" + + return sum(BatchTrajectory._trajectory_lengths(self.completed_trajectories)) + + @property + def num_time_steps(self): + """Returns the number of time-steps in completed and incomplete trajectories.""" + + num_time_steps = sum(BatchTrajectory._trajectory_lengths(self.trajectories)) + return num_time_steps + self.num_completed_time_steps + + @property + def trajectory_lengths(self): + return BatchTrajectory._trajectory_lengths(self.trajectories) + + @property + def num_completed_trajectories(self): + """Returns the number of completed trajectories.""" + return len(self.completed_trajectories) + + # TODO(afrozm): Take in an already padded observation ndarray and just append + # the last time-step and adding more padding if needed. + def observations_np(self, boundary=20, len_history_for_policy=20): + """Pads the observations in all the trajectories and returns them. + + Args: + boundary: integer, Observations will be padded to (n * boundary) + 1 where + n is an integer. + len_history_for_policy: int, For each trajectory return only the last + `len_history_for_policy` observations. Set to None for all the + observations. + + Returns: + padded_observations: (self.batch_size, n * boundary + 1) + OBS + """ + list_observations_np_ts = [ + t.last_n_observations_np(n=len_history_for_policy) + for t in self.trajectories + ] + # Every element in `list_observations_np_ts` is shaped (t,) + OBS + OBS = list_observations_np_ts[0].shape[1:] # pylint: disable=invalid-name + + trajectory_lengths = np.stack( + [obs.shape[0] for obs in list_observations_np_ts]) + + t_max = max(trajectory_lengths) + # t_max is rounded to the next multiple of `boundary` + boundary = int(boundary) + bucket_length = boundary * int(np.ceil(float(t_max) / boundary)) + + def padding_config(obs): + # We're padding the first axis only, since that is the time-step. + num_to_pad = bucket_length + 1 - obs.shape[0] + return [(0, num_to_pad)] + [(0, 0)] * len(OBS) + + return np.stack([ + np.pad(obs, padding_config(obs), "constant") + for obs in list_observations_np_ts + ]), trajectory_lengths + + @staticmethod + def parse_trajectory_file_name(trajectory_file_name): + """Parse out the trajectory file's groups and return to caller.""" + base_trajectory_file_name = os.path.basename(trajectory_file_name) + trajectory_file_regexp = TRAJECTORY_FILE_FORMAT.format( + epoch="(.*)", + env_id="(.*)", + temperature="(.*)", + r="(.*)", + ) + compiled_regexp = re.compile(trajectory_file_regexp) + r = compiled_regexp.match(base_trajectory_file_name) + if not r: + return None + g = r.groups() + if len(g) is not compiled_regexp.groups: + return None + # epoch, env_id, temp, random string + try: + epoch = int(g[0]) + env_id = int(g[1]) + temperature = float(g[2]) + random_string = g[3] + except ValueError: + logging.error("Trajectory file name isn't parseable: %s", + base_trajectory_file_name) + return None + return epoch, env_id, temperature, random_string + + @staticmethod + def load_from_directory(trajectory_dir, + epoch=None, + temperature=None, + n_trajectories=None, + up_sample=False, + sleep_time_secs=0.1, + max_tries=100, + wait_forever=False): + """Load trajectories from specified dir and epoch. + + Args: + trajectory_dir: (string) directory to find trajectories. + epoch: (int) epoch for which to load trajectories, if None we don't filter + on an epoch. + temperature: (float) this is used to filter the trajectory files, if None + we don't filter on temperature. + n_trajectories: (int) This is the batch size of the returned + BatchTrajectory object if one is returned. If set to None, then the + number of trajectories becomes the batch size. If set to some number, + then we wait for those many trajectory files to be available. + up_sample: (bool) If there are fewer than required (n_trajectories) number + of incomplete trajectories, then we upsample to make up the numbers. + sleep_time_secs: (float) Sleep time, to wait for min_trajectories. We + exponentially back-off this up till a maximum of 10 seconds. + max_tries: (int) The number of tries to get min_trajectories trajectories. + wait_forever: (bool) If true, overrides max_tries and waits forever. + + Returns: + A BatchTrajectory object with all the constraints satisfied or None. + """ + + # Modify the format to get a glob with desired epoch and temperature. + trajectory_file_glob = TRAJECTORY_FILE_FORMAT.format( + epoch=epoch if epoch is not None else "*", + env_id="*", + temperature=temperature if temperature is not None else "*", + r="*", + ) + + trajectory_files = tf.io.gfile.glob( + os.path.join(trajectory_dir, trajectory_file_glob)) + + if n_trajectories: + # We need to get `n_trajectories` number of `trajectory_files`. + # This works out to a maximum ~3hr waiting period. + while ((max_tries > 0 or wait_forever) and + len(trajectory_files) < n_trajectories): + logging.info( + "Sleeping for %s seconds while waiting for %s trajectories, found " + "%s right now.", sleep_time_secs, n_trajectories, + len(trajectory_files)) + time.sleep(sleep_time_secs) + max_tries -= 1 + sleep_time_secs = min(10.0, sleep_time_secs * 2) + trajectory_files = tf.io.gfile.glob( + os.path.join(trajectory_dir, trajectory_file_glob)) + + # We can't get the required number of files and we can't up-sample either. + if (len(trajectory_files) < n_trajectories) and not up_sample: + return None + + # Sample up or down as the case maybe. + trajectory_files = list( + np.random.choice(trajectory_files, n_trajectories)) + + # We read and load all the files, revisit if this becomes a problem. + trajectories_buffer = [] + for trajectory_file in trajectory_files: + with tf.io.gfile.GFile(trajectory_file, "rb") as f: + trajectory = get_pickle_module().load(f) + assert isinstance(trajectory, Trajectory) + trajectories_buffer.append(trajectory) + + if not trajectories_buffer: + return None + + # If n_trajectories wasn't set, then set to the number of trajectories we're + # returning. + n_trajectories = n_trajectories or len(trajectories_buffer) + + # Construct and return a new BatchTrajectory object. + return BatchTrajectory( + batch_size=n_trajectories, + trajectories=[Trajectory() for _ in range(n_trajectories)], + completed_trajectories=trajectories_buffer) diff --git a/tensor2tensor/envs/trajectory_test.py b/tensor2tensor/envs/trajectory_test.py new file mode 100644 index 000000000..af0652d57 --- /dev/null +++ b/tensor2tensor/envs/trajectory_test.py @@ -0,0 +1,566 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.envs.trajectory.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import numpy as np +from tensor2tensor.envs import time_step +from tensor2tensor.envs import trajectory +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1.io import gfile + + +class TrajectoryTest(tf.test.TestCase): + + def test_empty_trajectory(self): + t = trajectory.Trajectory() + self.assertFalse(t.is_active) + self.assertEqual(0, t.num_time_steps) + self.assertFalse(t.done) + + def test_add_time_step(self): + t = trajectory.Trajectory() + t.add_time_step(observation=1, done=True) + + # Test that the trajectory is now active. + self.assertTrue(t.is_active) + + added_t = t.last_time_step + self.assertEqual(1, added_t.observation) + self.assertTrue(added_t.done) + self.assertIsNone(None, added_t.raw_reward) + self.assertIsNone(None, added_t.processed_reward) + self.assertIsNone(None, added_t.action) + + self.assertEqual(1, t.num_time_steps) + + def test_change_last_time_step(self): + t = trajectory.Trajectory() + t.add_time_step(observation=1, done=False) + t.add_time_step(observation=1, done=True) + self.assertTrue(t.is_active) + + num_ts_old = t.num_time_steps + self.assertEqual(2, num_ts_old) + + # Assert on what the last time-step is currently. + ts = t.last_time_step + self.assertEqual(1, ts.observation) + self.assertTrue(ts.done) + self.assertEqual(None, ts.action) + + # Change the last time-step. + t.change_last_time_step(done=False, action=5) + + # Assert that it changed. + ts = t.last_time_step + self.assertEqual(1, ts.observation) # unchanged, since we didn't change it. + self.assertFalse(ts.done) # was True earlier + self.assertEqual(5, ts.action) # was None earlier + + # Assert on the number of steps remaining the same as before. + self.assertEqual(num_ts_old, t.num_time_steps) + + def test_reward(self): + t = trajectory.Trajectory() + # first time-step doesn't have rewards, since they are on entering a state. + t.add_time_step( + observation=1, raw_reward=None, processed_reward=None, done=False) + t.add_time_step( + observation=2, raw_reward=2, processed_reward=200, done=False) + t.add_time_step( + observation=3, raw_reward=3, processed_reward=300, done=True) + + raw_reward, processed_reward = t.reward + + self.assertEqual(5, raw_reward) + self.assertEqual(500, processed_reward) + + def test_observation_np(self): + t = trajectory.Trajectory() + ts = 5 + shape = (3, 4) + for _ in range(ts): + t.add_time_step(observation=np.random.uniform(size=shape), done=False) + + self.assertEqual((ts,) + shape, t.observations_np.shape) + + def test_truncate_and_last_n_observations_np(self): + t = trajectory.Trajectory() + ts = 5 + shape = (3, 4) + for _ in range(ts): + t.add_time_step(observation=np.random.uniform(size=shape), done=False) + + original_obs = np.copy(t.observations_np) + self.assertEqual((ts,) + shape, original_obs.shape) + + # Now let's just get the observations from the last 2 steps. + num_to_keep = 2 + truncated_original_obs = original_obs[-num_to_keep:, ...] + + # Let's get the last `num_to_keep` observations + last_n_observations_np = np.copy(t.last_n_observations_np(n=num_to_keep)) + + # Now truncate the trajectory and get the same. + _ = t.truncate(num_to_keep=num_to_keep) + truncated_np = np.copy(t.observations_np) + + # These should be the expected length. + self.assertEqual((2,) + shape, last_n_observations_np.shape) + self.assertEqual((2,) + shape, truncated_np.shape) + + # Test the last `num_to_keep` are the same. + self.assertAllEqual(truncated_np, truncated_original_obs) + self.assertAllEqual(last_n_observations_np, truncated_original_obs) + + def test_as_numpy(self): + t = trajectory.Trajectory() + shape = (3, 4) + + # We'll have `ts` observations and `ts-1` actions and rewards. + ts = 5 + num_actions = 6 + observations = np.random.uniform(size=(ts,) + shape) + actions = np.random.choice(range(num_actions), size=(ts - 1,)) + rewards = np.random.choice([-1, 0, 1], size=(ts - 1,)) + squares = np.arange(ts - 1)**2 + cubes = np.arange(ts - 1)**3 + + def get_info(i): + return {"sq": squares[i], "cu": cubes[i]} + + # First time-step has no reward. + t.add_time_step( + observation=observations[0], + done=False, + action=actions[0], + info=get_info(0)) + for i in range(1, ts - 1): + t.add_time_step( + observation=observations[i], + done=False, + raw_reward=rewards[i - 1], + processed_reward=rewards[i - 1], + action=actions[i], + info=get_info(i)) + # Last time-step has no action. + t.add_time_step( + observation=observations[-1], + done=False, + raw_reward=rewards[-1], + processed_reward=rewards[-1]) + + traj_np = t.as_numpy + + self.assertAllEqual(observations, traj_np[0]) + self.assertAllEqual(actions, traj_np[1]) + self.assertAllEqual(rewards, traj_np[2]) + + self.assertAllEqual(squares, traj_np[4]["sq"]) + self.assertAllEqual(cubes, traj_np[4]["cu"]) + + +class BatchTrajectoryTest(tf.test.TestCase): + + BATCH_SIZE = 10 + OBSERVATION_SHAPE = (3, 4) + + def get_random_observations_rewards_actions_dones(self, batch_size=None): + batch_size = batch_size or self.BATCH_SIZE + # Random observations, rewards, actions, done of the expected shape. + observations = np.random.rand(*((batch_size,) + self.OBSERVATION_SHAPE)) + raw_rewards = np.random.randn(batch_size) + actions = np.random.randn(batch_size) + # 40% change of being done. + dones = np.random.random((batch_size,)) > 0.6 + + return observations, raw_rewards, actions, dones + + def test_creation(self): + bt = trajectory.BatchTrajectory(batch_size=self.BATCH_SIZE) + + self.assertEqual(self.BATCH_SIZE, len(bt.trajectories)) + self.assertEqual(0, bt.num_completed_trajectories) + + def test_reset_all(self): + bt = trajectory.BatchTrajectory(batch_size=self.BATCH_SIZE) + + indices = np.arange(self.BATCH_SIZE) + observations, _, _, _ = self.get_random_observations_rewards_actions_dones() + + # Call reset. + bt.reset(indices, observations) + + # Assert that all trajectories are active and not done (reset never marks + # anything as done). + self.assertTrue(all(t.is_active for t in bt.trajectories)) + self.assertEqual(0, bt.num_completed_trajectories) + + def test_num_time_steps(self): + bt = trajectory.BatchTrajectory(batch_size=self.BATCH_SIZE) + + self.assertEqual(0, bt.num_completed_time_steps) + self.assertEqual(0, bt.num_time_steps) + + def test_reset_some(self): + bt = trajectory.BatchTrajectory(batch_size=self.BATCH_SIZE) + + indices = np.arange(self.BATCH_SIZE // 2) + observations, _, _, _ = self.get_random_observations_rewards_actions_dones( + batch_size=self.BATCH_SIZE // 2) + + # Just reset the first half. + bt.reset(indices, observations) + + # So first half are active, rest aren't. + self.assertTrue( + all(t.is_active for t in bt.trajectories[:self.BATCH_SIZE // 2])) + self.assertTrue( + all(not t.is_active for t in bt.trajectories[self.BATCH_SIZE // 2:])) + + # Nothing is done anyways. + self.assertEqual(0, bt.num_completed_trajectories) + + def test_truncate(self): + batch_size = 1 + bt = trajectory.BatchTrajectory(batch_size=batch_size) + + indices = np.arange(batch_size) + observations, _, _, _ = ( + self.get_random_observations_rewards_actions_dones( + batch_size=batch_size)) + + # Have to call reset first. + bt.reset(indices, observations) + + # Take a few steps. + ts = 5 + for _ in range(ts): + (observations, rewards, actions, + dones) = self.get_random_observations_rewards_actions_dones( + batch_size=batch_size) + dones[...] = False + bt.step(observations, rewards, rewards, dones, actions) + + self.assertEqual(0, bt.num_completed_trajectories) + + num_to_keep = 2 + bt.truncate_trajectories(indices, num_to_keep=num_to_keep) + + self.assertEqual(batch_size, bt.num_completed_trajectories) + + # Assert they are all active. + # Since the last `num_to_keep` observations were duplicated. + self.assertTrue(all(t.is_active for t in bt.trajectories)) + + orig_obs = bt.completed_trajectories[0].observations_np + # + 1 because of the initial reset + self.assertEqual(ts + 1, orig_obs.shape[0]) + + trunc_obs = bt.trajectories[0].observations_np + self.assertEqual(num_to_keep, trunc_obs.shape[0]) + self.assertEqual(num_to_keep, bt.trajectories[0].num_time_steps) + + # Test that the observations are the same. + self.assertAllEqual(orig_obs[-num_to_keep:, ...], trunc_obs) + + def test_step(self): + bt = trajectory.BatchTrajectory(batch_size=self.BATCH_SIZE) + + indices = np.arange(self.BATCH_SIZE) + observations, _, _, _ = self.get_random_observations_rewards_actions_dones() + + # Have to call reset first. + bt.reset(indices, observations) + + # Create some fake data for calling step. + new_observations, raw_rewards, actions, dones = ( + self.get_random_observations_rewards_actions_dones()) + processed_rewards = raw_rewards.astype(np.int64) + + # Force mark the first one as done anyways, so that there is something to + # test. + dones[0] = True + + num_done = sum(dones) + self.assertLessEqual(1, num_done) # i.e. num_done is atleast 1. + + num_not_done = len(dones) - num_done + + # Finally call step. + bt.step(new_observations, raw_rewards, processed_rewards, dones, actions) + + # Expect to see `num_done` number of completed trajectories. + self.assertEqual(num_done, bt.num_completed_trajectories) + + # Expect to see that the rest are marked as active. + num_active = sum(t.is_active for t in bt.trajectories) + self.assertEqual(num_not_done, num_active) + + def test_desired_placement_of_rewards_and_actions(self): + batch_size = 1 + bt = trajectory.BatchTrajectory(batch_size=batch_size) + + indices = np.arange(batch_size) + observations, _, _, _ = self.get_random_observations_rewards_actions_dones( + batch_size=batch_size) + + # Have to call reset first. + bt.reset(indices, observations) + + # Create some fake data for calling step. + new_observations, raw_rewards, actions, _ = ( + self.get_random_observations_rewards_actions_dones( + batch_size=batch_size)) + processed_rewards = raw_rewards.astype(np.int64) + dones = np.full(batch_size, False) + + # Call step. + bt.step(new_observations, raw_rewards, processed_rewards, dones, actions) + + # Assert that nothing is done, since dones is False + self.assertEqual(0, bt.num_completed_trajectories) + + # The only trajectory is active. + self.assertEqual(batch_size, len(bt.trajectories)) + t = bt.trajectories[0] + self.assertTrue(t.is_active) + self.assertEqual(2, t.num_time_steps) + + ts = t.time_steps + + # Now assert on placements + + # i.e. the old observation/done is first and the new one comes later. + self.assertAllEqual(observations[0], ts[0].observation) + self.assertAllEqual(new_observations[0], ts[1].observation) + + self.assertEqual(False, ts[0].done) + self.assertEqual(False, ts[1].done) + + # Similarly actions went to the first time-step. + self.assertEqual(actions[0], ts[0].action) + self.assertIsNone(ts[1].action) + + # However make sure reward went into the second time-step and not the first. + self.assertNear(raw_rewards[0], ts[1].raw_reward, 1e-6) + self.assertIsNone(ts[0].raw_reward) + + # Similarly with processed_rewards. + self.assertEqual(processed_rewards[0], ts[1].processed_reward) + self.assertIsNone(ts[0].processed_reward) + + def test_observations_np(self): + bt = trajectory.BatchTrajectory(batch_size=self.BATCH_SIZE) + indices = np.arange(self.BATCH_SIZE) + observations, _, _, _ = self.get_random_observations_rewards_actions_dones() + + # Have to call reset first. + bt.reset(indices, observations) + + # Number of time-steps now looks like the following: + # (1, 1, 1, 1, 1, 1, 1, 1, 1, 1) + lengths = np.full((self.BATCH_SIZE,), 1) + + ts = 5 + for _ in range(ts): + (observations, rewards, actions, + dones) = self.get_random_observations_rewards_actions_dones() + dones[...] = False + bt.step(observations, rewards, rewards, dones, actions) + + # Number of time-steps now looks like the following: + # (6, 6, 6, 6, 6, 6, 6, 6, 6, 6) + lengths = lengths + ts + + # Now let's mark the first two as done. + observations, _, _, _ = self.get_random_observations_rewards_actions_dones( + batch_size=2) + bt.reset(np.array([0, 1]), observations) + + # Number of time-steps now looks like the following: + # (1, 1, 6, 6, 6, 6, 6, 6, 6, 6) + lengths[0] = lengths[1] = 1 + + for _ in range(ts): + (observations, rewards, actions, + dones) = self.get_random_observations_rewards_actions_dones() + dones[...] = False + bt.step(observations, rewards, rewards, dones, actions) + + # Number of time-steps now looks like the following: + # (6, 6, 11, 11, 11, 11, 11, 11, 11, 11) + lengths = lengths + ts + + boundary = 20 + len_history_for_policy = 40 + + padded_obs_np, padded_lengths = bt.observations_np( + boundary=boundary, len_history_for_policy=len_history_for_policy) + + # The lengths are what we expect them to be. + self.assertAllEqual(lengths, padded_lengths) + + # The padded_observations are the shape we expect them to be. + self.assertEqual((self.BATCH_SIZE, boundary + 1) + self.OBSERVATION_SHAPE, + padded_obs_np.shape) + + # Let's now request the last n = [1, 2 * boundary) steps for the history. + for len_history_for_policy in range(1, 2 * boundary): + # The expected lengths will now be: + truncated_lengths = [min(l, len_history_for_policy) for l in lengths] + + padded_obs_np, padded_lengths = bt.observations_np( + boundary=boundary, len_history_for_policy=len_history_for_policy) + + self.assertAllEqual(truncated_lengths, padded_lengths) + + # This shouldn't change, since even if we request lengths > boundary + 1 + # there are no trajectories that long. + self.assertEqual((self.BATCH_SIZE, boundary + 1) + self.OBSERVATION_SHAPE, + padded_obs_np.shape) + + # Let's do 10 more steps (to go on the other side of the boundary. + ts = 10 + for _ in range(ts): + (observations, rewards, actions, + dones) = self.get_random_observations_rewards_actions_dones() + dones[...] = False + bt.step(observations, rewards, rewards, dones, actions) + + # Number of time-steps now looks like the following: + # (16, 16, 21, 21, 21, 21, 21, 21, 21, 21) + lengths = lengths + ts + + len_history_for_policy = 40 + padded_obs_np, padded_lengths = bt.observations_np( + boundary=boundary, len_history_for_policy=len_history_for_policy) + + # The lengths are what we expect them to be. + self.assertAllEqual(lengths, padded_lengths) + + # The padded_observations are the shape we expect them to be. + self.assertEqual( + (self.BATCH_SIZE, (2 * boundary) + 1) + self.OBSERVATION_SHAPE, + padded_obs_np.shape) + + # Test that the padding is the only part that is all 0s. + # NOTE: There is almost 0 probability that the random observation is all 0s. + zero_obs = np.full(self.OBSERVATION_SHAPE, 0.) + for b in range(self.BATCH_SIZE): + # The first lengths[b] will be actual data, rest is 0s. + for ts in range(lengths[b]): + self.assertFalse(np.all(zero_obs == padded_obs_np[b][ts])) + + for ts in range(lengths[b], len(padded_obs_np[b])): + self.assertAllEqual(zero_obs, padded_obs_np[b][ts]) + + def test_parse_trajectory_file_name(self): + self.assertEqual( + (12, 13, 1.0, "abc"), + trajectory.BatchTrajectory.parse_trajectory_file_name( + "/tmp/trajectory_epoch_000012_env_id_000013_temperature_1.0_r_abc.pkl" + )) + + self.assertIsNone( + trajectory.BatchTrajectory.parse_trajectory_file_name( + "/tmp/trajectory_epoch_000012_env_id_000013.pkl")) + + def test_load_from_directory(self): + output_dir = self.get_temp_dir() + + epochs = [0, 1, 2] + env_ids = [0, 1, 2] + temperatures = [0.5, 1.0] + random_strings = ["a", "b"] + + # Write some trajectories. + # There are 3x3x2x2 (36) trajectories, and of them 3x2x2 (12) are done. + for epoch in epochs: + for env_id in env_ids: + for temperature in temperatures: + for random_string in random_strings: + traj = trajectory.Trajectory(time_steps=[ + time_step.TimeStep( + observation=epoch, + done=(epoch == 0), + raw_reward=1.0, + processed_reward=1.0, + action=env_id, + info={}) + ]) + + trajectory_file_name = trajectory.TRAJECTORY_FILE_FORMAT.format( + epoch=epoch, + env_id=env_id, + temperature=temperature, + r=random_string) + + with gfile.GFile( + os.path.join(output_dir, trajectory_file_name), "w") as f: + trajectory.get_pickle_module().dump(traj, f) + + # Load everything and check. + bt = trajectory.BatchTrajectory.load_from_directory(output_dir) + + self.assertIsInstance(bt, trajectory.BatchTrajectory) + self.assertEqual(36, bt.num_completed_trajectories) + self.assertEqual(36, bt.batch_size) + + bt = trajectory.BatchTrajectory.load_from_directory(output_dir, epoch=0) + self.assertEqual(12, bt.num_completed_trajectories) + self.assertEqual(12, bt.batch_size) + + # Get 100 trajectories, but there aren't any. + bt = trajectory.BatchTrajectory.load_from_directory( + output_dir, epoch=0, n_trajectories=100, max_tries=0) + self.assertIsNone(bt) + + bt = trajectory.BatchTrajectory.load_from_directory( + output_dir, epoch=0, temperature=0.5) + self.assertEqual(6, bt.num_completed_trajectories) + self.assertEqual(6, bt.batch_size) + + bt = trajectory.BatchTrajectory.load_from_directory(output_dir, epoch=1) + self.assertEqual(12, bt.num_completed_trajectories) + self.assertEqual(12, bt.batch_size) + + # Constraints cannot be satisfied. + bt = trajectory.BatchTrajectory.load_from_directory( + output_dir, epoch=1, n_trajectories=100, up_sample=False, max_tries=0) + self.assertIsNone(bt) + + # Constraints can be satisfied. + bt = trajectory.BatchTrajectory.load_from_directory( + output_dir, epoch=1, n_trajectories=100, up_sample=True, max_tries=0) + self.assertEqual(100, bt.num_completed_trajectories) + self.assertEqual(100, bt.batch_size) + + bt = trajectory.BatchTrajectory.load_from_directory( + output_dir, epoch=1, n_trajectories=10) + self.assertEqual(10, bt.num_completed_trajectories) + self.assertEqual(10, bt.batch_size) + + gfile.rmtree(output_dir) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/insights/README.md b/tensor2tensor/insights/README.md new file mode 100644 index 000000000..65ca95d59 --- /dev/null +++ b/tensor2tensor/insights/README.md @@ -0,0 +1,76 @@ +# Tensor2Tensor Insights + +The Insights packages provides an interactive webservice for understanding the +inner workings of a Tensor2Tensor model. It will provide a series of +visualizations extracted from a requested T2T model that informs model developers +and model users on how to improve or best utilize a model. + +## Dependencies + +Before using the Insights server, you must install [Bower](https://bower.io/) +which we use to manage our web component dependencies. You can easily install +this with the [Node Package Manager](https://www.npmjs.com/). + +## Setup Instructions + +After training a model, such as according to the Quick Start guide, you can run +the `t2t-insights-server` binary and begin querying it. + +First, prepare the bower dependencies by navigating into the +`tensor2tensor/insights/polymer` directory and running `bower install`: + +``` +pushd tensor2tensor/insights/polymer +bower install +popd +``` + +The models run by server is then configured by a JSON version of the +InsightsConfiguration protocol buffer. Using the model trained in the Quick +Start guide, a sample configuration would be: + +``` + { + "configuration": [{ + "source_language": "en", + "target_language": "de", + "label": "transformers_wmt32k", + "transformer": { + "model": "transformer", + "model_dir": "/tmp/t2t/train", + "data_dir": "/tmp/t2t/data", + "hparams": "", + "hparams_set": "transformer_base_single_gpu", + "problem": "translate_ende_wmt32k" + } + }], + "language": [{ + "code": "en", + "name": "English" + },{ + "code": "de", + "name": "German" + }] + } +``` + +With that saved to `configuration.json`, run the following: + +``` +t2t-insights-server \ + --configuration=configuration.json \ + --static_path=`pwd`/tensor2tensor/insights/polymer +``` + +This will bring up a minimal [Flask](http://flask.pocoo.org/) REST service +served by a [GUnicorn](http://gunicorn.org/) HTTP Server. + +## Features to be developed + +This is a minimal web server. We are in the process of adding additional +exciting features that give insight into a model's behavior: + + * Integrating a multi-head attention visualization. + * Registering multiple models to compare their behavior. + * Indexing training data to find examples related to a current query. + * Tracking interesting query + translation pairs for deeper analysis. diff --git a/tensor2tensor/insights/__init__.py b/tensor2tensor/insights/__init__.py new file mode 100644 index 000000000..ff174dd63 --- /dev/null +++ b/tensor2tensor/insights/__init__.py @@ -0,0 +1,15 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tensor2tensor/insights/graph.py b/tensor2tensor/insights/graph.py new file mode 100644 index 000000000..fc2eb577d --- /dev/null +++ b/tensor2tensor/insights/graph.py @@ -0,0 +1,155 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Graph representation for building decoding graph visualizations.""" + + +class Vertex(object): + """Vertex stores in and out edge connections to other Vertex instances. + + The Vertex class supports serialization to a JSON data format expected by the + client side representation. When serializing, it generates the following + fields: + in_edge_index: The list of directed edge indices into the Vertex. + out_edge_index: The list of directed edge indices from the Vertex. + """ + + def __init__(self, idx): + """Initialize the Vertex. + + Args: + idx: The index of the vertex. + """ + self.idx = idx + self.in_edges = [] + self.out_edges = [] + + def to_dict(self): + """Returns a simplified dictionary representing the Vertex. + + Returns: + A dictionary that can easily be serialized to JSON. + """ + return { + "in_edge_index": self.in_edges, + "out_edge_index": self.out_edges, + } + + +class Edge(object): + """Edge stores edge details connecting two Vertex instances. + + The Edge class supports serialization to a JSON data format expected by the + client side representation. When serializing, it generates the following + fields: + source_index: The source Vertex index for this Edge. + target_index: The target Vertex index for this Edge. + data: Arbitrary data for this Edge. + """ + + def __init__(self, idx): + """Initialize the Edge. + + Args: + idx: The index of the Edge. + """ + self.idx = idx + self.source = -1 + self.target = -1 + self.data = {} + + def to_dict(self): + """Returns a simplified dictionary representing the Vertex. + + Returns: + A dictionary that can easily be serialized to JSON. + """ + return { + "source_index": self.source, + "target_index": self.target, + "data": self.data, + } + + def __str__(self): + return str(self.to_dict()) + + +class Graph(object): + """A directed graph that can easily be JSON serialized for visualization. + + When serializing, it generates the following fields: + edge: The list of all serialized Edge instances. + node: The list of all serialized Vertex instances. + """ + + def __init__(self): + self.vertices = [] + self.edges = [] + self.vertex_map = {} + + def new_vertex(self): + """Creates and returns a new vertex. + + Returns: + A new Vertex instance with a unique index. + """ + vertex = Vertex(len(self.vertices)) + self.vertices.append(vertex) + return vertex + + def get_vertex(self, key): + """Returns or Creates a Vertex mapped by key. + + Args: + key: A string reference for a vertex. May refer to a new Vertex in which + case it will be created. + + Returns: + A the Vertex mapped to by key. + """ + if key in self.vertex_map: + return self.vertex_map[key] + vertex = self.new_vertex() + self.vertex_map[key] = vertex + return vertex + + def add_edge(self, source, target): + """Returns a new edge connecting source and target vertices. + + Args: + source: The source Vertex. + target: The target Vertex. + + Returns: + A new Edge linking source to target. + """ + edge = Edge(len(self.edges)) + self.edges.append(edge) + source.out_edges.append(edge.idx) + target.in_edges.append(edge.idx) + edge.source = source.idx + edge.target = target.idx + return edge + + def to_dict(self): + """Returns a simplified dictionary representing the Graph. + + Returns: + A dictionary that can easily be serialized to JSON. + """ + return { + "node": [v.to_dict() for v in self.vertices], + "edge": [e.to_dict() for e in self.edges] + } diff --git a/tensor2tensor/insights/insight_configuration.proto b/tensor2tensor/insights/insight_configuration.proto new file mode 100644 index 000000000..6a1656eac --- /dev/null +++ b/tensor2tensor/insights/insight_configuration.proto @@ -0,0 +1,55 @@ +syntax = "proto3"; + +package tensor2tensor; + +// Configures the Neural Machine Translation Insight Frontend with a set of +// supported query processors and languages. +message InsightConfiguration { + // Specifies zero or more models to inspect. + repeated QueryProcessorConfiguration configuration = 1; + + // Specifies language codes and display names. + repeated Language language = 2; +} + +// A displayable language name. +message Language { + // The BCP-47 Language code. + string code = 1; + // The language's display name. + string name = 2; +} + +// Configures a QueryProcessor and registers it with the Insight Frontend when +// responding to analysis queries. +message QueryProcessorConfiguration { + // The model's BCP-47 source language code. + string source_language = 1; + // The model's BCP-47 target language code. + string target_language = 2; + // A short label for the model. + string label = 3; + // The QueryProcessor to use. By default we just use the TransformerModel. + string query_processor = 4; + + // Configuration for the TransformerModel. + TransformerConfiguration transformer = 5; +} + +// Specifies the parameters for a trained Transformer model to inspect. These +// parameters match those in t2t-trainer and t2t-decoder. +message TransformerConfiguration { + // The model type. + string model = 1; + // The trained model directory. + string model_dir = 2; + // The data directory for the model. + string data_dir = 3; + + // The hyperparameter set for running the model. + string hparams_set = 4; + // Overriding hyperparameters. + string hparams = 5; + // The problem sets over which this model was trained and configured. + string problems = 6; +} diff --git a/tensor2tensor/insights/polymer/.bowerrc b/tensor2tensor/insights/polymer/.bowerrc new file mode 100644 index 000000000..b316080f0 --- /dev/null +++ b/tensor2tensor/insights/polymer/.bowerrc @@ -0,0 +1,3 @@ +{ + "directory": "." +} diff --git a/tensor2tensor/insights/polymer/attention_visualization/attention-visualization.html b/tensor2tensor/insights/polymer/attention_visualization/attention-visualization.html new file mode 100644 index 000000000..02db7fe09 --- /dev/null +++ b/tensor2tensor/insights/polymer/attention_visualization/attention-visualization.html @@ -0,0 +1,130 @@ +<!-- +@license +Copyright 2018 The Tensor2Tensor Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +--> + +<link rel="import" href="http://github.com/polymer/polymer.html"> + +<link rel="import" href="http://github.com/iron-flex-layout/iron-flex-layout-classes.html"> +<link rel="import" href="http://github.com/iron-icon/iron-icon.html"> +<link rel="import" href="http://github.com/iron-icons/iron-icons.html"> + +<link rel="import" href="http://github.com/paper-icon-button/paper-icon-button.html"> +<link rel="import" href="http://github.com/paper-slider/paper-slider.html"> + +<dom-module id="attention-visualization"> + <template> + <custom-style> + <style is="custom-style" include="iron-flex iron-flex-alignment"></style> + </custom-style> + <style> + .background { + fill: #eee; + } + + svg *::selection { + background: transparent; + } + + rect.selection { + fill: transparent; + stroke: #333; + stroke-dasharray: 4px; + stroke-opacity: 0.5; + } + + rect.cell-border { + stroke: #eee; + stroke-width: 0.3px; + } + + rect.cell-selected { + stroke: rgb(51, 102, 153); + stroke-width: 0.5px; + } + + g.cell-group { + pointer-events: all; + } + + g.cell-hover rect { + stroke: #f00; + stroke-width: 1px; + } + + text.mono { + fill: #aaa; + } + + text.text-highlight { + fill: #c00; + } + + text.weight-label { + fill: #ffffff; + font-size: 16px; + stroke: #ffffff; + } + + text.text-selected { + fill: #000; + } + + .svg-container { + display: inline-block; + overflow: hidden; + padding-bottom: 100%; + position: relative; + vertical-align: top; + width: 100%; + } + + .svg-content-responsive { + display: inline-block; + left: 0px; + position: absolute; + top: 10px; + } + + #tooltip { + background-color: white; + border-radius: 10px; + box-shadow: 4px 4px 10px rgba(0, 0, 0, 0.4); + height: auto; + padding: 10px; + pointer-events: none; + position: absolute; + width: auto; + z-index: 10; + } + + #tooltip.hidden { + display: none; + } + + </style> + <div class="layout horizontal"> + <paper-icon-button id="home" on-tap="reset_" icon=home></paper-icon-button> + <paper-slider id="slider" min=0 max=100 value="{{zoomDepth_}}"></paper-slider> + </div> + <div id="tooltip" class="hidden"> + <span>{{selectedProbability}}<span> + </div> + + <div id="chart"> + </div> + </template> + <script src="http://github.com/attention-visualization.js"></script> +</dom-module> diff --git a/tensor2tensor/insights/polymer/attention_visualization/attention-visualization.js b/tensor2tensor/insights/polymer/attention_visualization/attention-visualization.js new file mode 100644 index 000000000..4e9c6e862 --- /dev/null +++ b/tensor2tensor/insights/polymer/attention_visualization/attention-visualization.js @@ -0,0 +1,317 @@ +/** + * @license + * Copyright 2018 The Tensor2Tensor Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * `<attention-visualization>` presents a heatmap of input-output associations. + * + * The heat map association shows source to target word association strengths + * according to some method. + * + * ### Usage + * + * <attention-visualization data="[[data]]"></attention-visualization> + */ +class AttentionVisualization extends Polymer.Element { + constructor() { + super(); + + /** + * D3.js DOM element. + * @private + */ + this.container_ = undefined; + /** + * @private + */ + this.margin_ = { + top: 150, + bottom: 50, + right: 10, + left: 100 + }; + /** + * D3.js DOM element. + * @private + */ + this.svg_ = undefined; + /** + * D3.js DOM element. + * @private + */ + this.vis_ = undefined; + /** + * D3.js DOM element. + * @private + */ + this.zoom_ = undefined; + } + + /** + * @return {string} The component name. + */ + static get is() { + return 'attention-visualization'; + } + + /** + * @return {!Object} The component properties. + */ + static get properties() { + return { + /** + * @type {AttentionData} + */ + data: { + type: Object, + observer: 'dataUpdated_', + }, + /** + * @type {number} + */ + zoomDepth_: { + type: Number, + }, + }; + } + + /** + * @return {!Array<string>} The component observers. + */ + static get observers() { + return [ + 'zoomDepthChanged_(zoomDepth_)', + ]; + } + + /** + * Sets the default zoom depth. + * @override + */ + ready() { + super.ready(); + this.set('zoomDepth_', 20); + } + + /** + * Sets the zoom state based on the updated depth. + * @param {number} zoomDepth the zoom depth. + * @private + */ + zoomDepthChanged_(zoomDepth) { + if (!this.container_) { return; } + + if (zoomDepth == 0) { + zoomDepth = 0.000001; + } + let transform = d3.zoomTransform(this.vis_.node()).scale(zoomDepth / 20.0); + this.container_.attr("transform", transform); + } + + /** + * Updates the heatmap. + * @param {!AttentionData} newData the new alignment data. + * @private + */ + dataUpdated_(newData) { + // Create the bounding areas and margins for the heatmap. + let cellDimension = 40; + let sourceTokens = newData.source_tokens; + let targetTokens = newData.target_tokens; + + // Convert the attention weights to cell objects which also give access to + // the row and column indices. + let mapCells = newData.weights.map(function(d, i) { + return { + value: d, + row: Math.floor(i / targetTokens.length), + col: i % targetTokens.length + }; + }); + + // Create the color scale. + let colorScale = d3.scaleQuantile().domain([0.0, 1.0]).range([ + '#cccccc', '#b2b2b2', '#999999', '#7f7f7f', + '#666666', '#4c4c4c', '#333333', '#191919' + ]); + + this.zoom_ = d3.zoom().scaleExtent([1, 10]).on('zoom', zoomed.bind(this)); + + d3.select(this.$.chart).selectAll("*").remove(); + + // Create the bounding div and svgs which will contain all details. + this.svg_ = d3.select(this.$.chart) + .append('div') + .classed('svg-container', true) + .append('svg') + .attr('width', '100%') + .attr('height', '100%') + .classed('svg-content-responsive', true); + + this.vis_ = this.svg_.append('g') + .attr('transform', + 'translate(' + this.margin_.left + ',' + this.margin_.top + ')') + .call(this.zoom_) + .on('dblclick.zoom', null) + .on('wheel.zoom', null); + + // Create a bounding rectangle upon which zooming and panning will take + // place. + this.vis_.append('rect') + .attr('width', '100%') + .attr('height', '100%') + .style('fill', 'none') + .style('pointer-events', 'all'); + + this.container_ = this.vis_.append('g'); + + // Initiate the panning and/or zooming. + function zoomed() { + this.container_.attr("transform", + d3.event.transform.scale(this.zoomDepth_ / 20.0)); + } + + // Place the source tokens along the vertical axis. Each token has an id + // based on it's index. + var sourceLabels = this.container_.append('g'); + + sourceLabels.selectAll('.source-label') + .data(sourceTokens) + .enter() + .append('text') + .text(function(d) { + return d; + }) + .style('text-anchor', 'end') + .attr( + 'id', + function(d, i) { + return 'row-' + i; + }) + .attr('class', 'source-label mono') + .attr('transform', 'translate(-6,' + cellDimension / 1.5 + ')') + .attr('x', 0) + .attr('y', function(d, i) { + return i * cellDimension; + }); + + var targetLabels = this.container_.append('g'); + + // Place the target tokens along the horizontal axis. Each token has an id + // based on it's index. + targetLabels.selectAll('.target-label') + .data(targetTokens) + .enter() + .append('text') + .text(function(d) { + return d; + }) + .style('text-anchor', 'left') + .attr( + 'id', + function(d, i) { + return 'col-' + i; + }) + .attr('class', 'target-label mono') + .attr( + 'transform', 'translate(' + cellDimension / 2 + ',-6) rotate(-90)') + .attr( + 'y', + function(d, i) { + return i * cellDimension; + }) + .attr('x', 0); + + // Create the heat map and populate with cells. Each cell will + // highlight when hovered over. Additionally, the column and row tokens + // will highlight to make clear which tokens are being observed. Lastly, + // each cell will trigger a popup showing details of the alignment state. + var heatMap = this.container_.append('g'); + + // Group the rectangle and text elements and capture the mouse events from + // both so that the rectangle can be highlighted when it's in focus. + let cellGroup = heatMap.selectAll('.cell') + .data(mapCells) + .enter() + .append('g') + .attr('class', 'cell-group') + .on('mouseover', function(d, i) { + // Highlight the newly hovered over cell and it's row/column + // tokens. + d3.select(this).classed('cell-hover', true); + sourceLabels.select('#row-' + d.row) + .classed('text-highlight', true); + targetLabels.select('#col-' + d.col) + .classed('text-highlight', true); + }) + .on('mouseout', function(d) { + // Clear all highlighting. + d3.select(this).classed('cell-hover', false); + + sourceLabels.select('#row-' + d.row) + .classed('text-highlight', false); + targetLabels.select('#col-' + d.col) + .classed('text-highlight', false); + }); + + // Add the rectangles for each cell. + cellGroup + .append('rect') + .attr( + 'id', + function(d, i) { + return 'cell-' + i; + }) + .attr('class', 'cell cell-border') + .attr( + 'x', + function(d) { + return d.col * cellDimension; + }) + .attr( + 'y', + function(d) { + return d.row * cellDimension; + }) + .attr('width', cellDimension) + .attr('height', cellDimension) + .style( + 'fill', + function(d) { + return colorScale(d.value); + }); + + // Add the text for each cell. + cellGroup + .append('text') + .text(function(d) { return d.value.toFixed(2); }) + .attr('class', 'weight weight-label') + .attr('x', function(d) { return 5 + (d.col * cellDimension); }) + .attr('y', function(d) { return 25 + (d.row * cellDimension); }); + } + + /** + * Resets the pan and zoom state. + * @private + */ + reset_() { + if (!this.svg_) { return; } + this.vis_.call(this.zoom_.transform, d3.zoomIdentity); + this.set('zoomDepth_', 20); + } +} + +customElements.define(AttentionVisualization.is, AttentionVisualization); diff --git a/tensor2tensor/insights/polymer/bower.json b/tensor2tensor/insights/polymer/bower.json new file mode 100644 index 000000000..da1f4aaed --- /dev/null +++ b/tensor2tensor/insights/polymer/bower.json @@ -0,0 +1,80 @@ +{ + "name": "tensor2tensor-insights", + "homepage": "https://github.com/tensorflow/tensor2tensor", + "description": "Components for analyzing tensor2tensor neural machine translation models.", + "main": "index.html", + "keywords": [ + "neural", + "machine", + "translation" + ], + "authors": [ + "kstevens@google.com" + ], + "license": "Apache 2.0", + "private": true, + "ignore": [ + "**/.*", + "node_modules", + "bower_components", + "test", + "tests" + ], + "dependencies": { + "app-layout": "PolymerElements/app-layout#2.0.4", + "app-route": "PolymerElements/app-route#2.0.3", + "d3": "d3#4.12.2", + "iron-a11y-keys": "PolymerElements/iron-a11y-keys#2.0.0", + "iron-ajax": "PolymerElements/iron-ajax#2.0.0", + "iron-flex-layout": "PolymerElements/iron-flex-layout#2.0.0", + "iron-icon": "PolymerElements/iron-icon#2.0.0", + "iron-icons": "PolymerElements/iron-icons#2.0.0", + "iron-list": "PolymerElements/iron-list#2.0.0", + "iron-pages": "PolymerElements/iron-pages#2.0.0", + "iron-selector": "PolymerElements/iron-selector#2.0.0", + "neon-animation": "PolymerElements/neon-animation#2.0.0", + "paper-button": "PolymerElements/paper-button#2.0.0", + "paper-card": "PolymerElements/paper-card#2.0.0", + "paper-dialog": "PolymerElements/paper-dialog#2.0.0", + "paper-dropdown-menu": "PolymerElements/paper-dropdown-menu#2.0.0", + "paper-icon-button": "PolymerElements/paper-icon-button#2.0.0", + "paper-input": "PolymerElements/paper-input#2.0.0", + "paper-item": "PolymerElements/paper-item#2.0.0", + "paper-listbox": "PolymerElements/paper-listbox#2.0.0", + "paper-slider": "PolymerElements/paper-slider#2.0.0", + "paper-tabs": "PolymerElements/paper-tabs#2.0.0", + "paper-toggle-button": "PolymerElements/paper-toggle-button#2.0.0", + "paper-tooltip": "PolymerElements/paper-tooltip#2.0.0", + "paper-progress": "PolymerElements/paper-progress#2.0.0", + "polymer": "polymer/polymer#v2.3.1" + }, + "resolutions": { + "webcomponentsjs": "^v1.0.19", + "polymer": "^v2.3.1", + "app-route": "^2.0.3", + "app-layout": "^2.0.4", + "iron-location": "1 - 2", + "iron-selector": "^2.0.0", + "neon-animation": "^2.0.0", + "iron-icon": "^2.0.0", + "iron-pages": "^2.0.0", + "iron-icons": "^2.0.0", + "paper-icon-button": "^2.0.0", + "paper-item": "^2.0.0", + "iron-flex-layout": "^2.0.0", + "paper-listbox": "^2.0.0", + "iron-a11y-keys": "^2.0.0", + "paper-dialog": "^2.0.0", + "iron-ajax": "^2.0.0", + "paper-progress": "^2.0.0", + "paper-dropdown-menu": "^2.0.0", + "paper-tabs": "^2.0.0", + "paper-input": "^2.0.0", + "paper-toggle-button": "^2.0.0", + "paper-slider": "^2.0.0", + "iron-list": "^2.0.0", + "paper-card": "^2.0.0", + "paper-tooltip": "^2.0.0", + "iron-overlay-behavior": "^2.2.0" + } +} diff --git a/tensor2tensor/insights/polymer/common-types.js b/tensor2tensor/insights/polymer/common-types.js new file mode 100644 index 000000000..9abdfa9af --- /dev/null +++ b/tensor2tensor/insights/polymer/common-types.js @@ -0,0 +1,164 @@ +/** + * @license + * Copyright 2018 The Tensor2Tensor Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @fileoverview A set of shared types that will be replaced by js proto types. + * @externs + */ + +/** + * A typedef for a nlp.nmt.mt_debug_fe.LanguageConfiguration message. + * This can't be converted to javascript yet because it transitively depends on + * tensorflow protos that can't be converted to javascript. + * TODO(kstevens): Remove this typedef when we remove the dependency on + * non-convertible tensorflow protos. + * @typedef {{ + * code: string, + * name: string, + * hidden: ?boolean, + * }} + */ +let Language; + +/** + * A typedef for a nlp.nmt.mt_debug_fe.SerializedConfiguration message. + * This can't be converted to javascript yet because it transitively depends on + * tensorflow protos that can't be converted to javascript. + * TODO(kstevens): Remove this typedef when we remove the dependency on + * non-convertible tensorflow protos. + * @typedef {{ + * id: string, + * target: string, + * source_language: Language, + * target_language: Language, + * }} + */ +let Model; + +/** + * @typedef {{ + * name: string, + * localProbability: number, + * cumalitiveProbability: number, + * attention: Array<number>, + * children: Array<TreeNode>, + * }} + */ +let TreeNode; + +/** + * @typedef {{ + * source_tokens: Array<string>, + * target_tokens: Array<string>, + * weights: !Array<number> + * }} + */ +let AttentionData; + +/** + * @typedef {{ + * label: string, + * label_id: number, + * log_probability: number, + * total_log_probability: number, + * score: number, + * parent_id: number, + * }} + */ +let Candidate; + +/** + * @typedef {{ + * id: number, + * stepIndex: number, + * candidate: !Candidate, + * children: !Array<InteractiveNode>, + * }} + */ +let InteractiveNode; + +/** + * @typedef {{ + * step_name: string, + * segment: !Array<!{ + * text: string, + * }> + * }} + */ +let QueryProcessingRewriteStep; + +/** + * @typedef {{ + * source_processing: !Array<!QueryProcessingRewriteStep>, + * target_processing: !Array<!QueryProcessingRewriteStep>, + * }} + */ +let QueryProcessingVisualization; + +/** + * @typedef {{ + * in_edge_index: !Array<number>, + * out_edge_index: !Array<number>, + * }} + */ +let BeamSearchNode; + +/** + * @typedef {{ + * label_id: number, + * label: string, + * log_probability: number, + * total_log_probability: number, + * score: number, + * completed: boolean, + * }} + */ +let BeamSearchCandidate; + +/** + * @typedef {{ + * source_index: number, + * target_index: number, + * data: !BeamSearchCandidate, + * }} + */ +let BeamSearchEdge; + +/** +/** + * @typedef {{ + * node: !Array<!BeamSearchNode>, + * edge: !Array<!BeamSearchEdge>, + * }} + */ +let SearchGraphVisualization; + +/** + * @typedef {{ + * candidate_list: !Array<{ + * candidate: !Array<!BeamSearchCandidate>, + * }>, + * }} + */ +let GenerateCandidateResponse; + +/** + * @typedef {{ + * session_id: number, + * }} + */ +let StartTranslationResponse; diff --git a/tensor2tensor/insights/polymer/explore_view/explore-view.html b/tensor2tensor/insights/polymer/explore_view/explore-view.html new file mode 100644 index 000000000..9d40ea551 --- /dev/null +++ b/tensor2tensor/insights/polymer/explore_view/explore-view.html @@ -0,0 +1,155 @@ +<!-- +@license +Copyright 2018 The Tensor2Tensor Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +--> +<link rel="import" href="http://github.com/polymer/polymer.html"> + +<link rel="import" href="http://github.com/app-route/app-location.html"> +<link rel="import" href="http://github.com/app-route/app-route.html"> + +<link rel="import" href="http://github.com/iron-a11y-keys/iron-a11y-keys.html"> +<link rel="import" href="http://github.com/iron-ajax/iron-ajax.html"> +<link rel="import" href="http://github.com/iron-flex-layout/iron-flex-layout-classes.html"> +<link rel="import" href="http://github.com/iron-icon/iron-icon.html"> +<link rel="import" href="http://github.com/iron-icons/iron-icons.html"> +<link rel="import" href="http://github.com/iron-list/iron-list.html"> + +<link rel="import" href="http://github.com/paper-icon-button/paper-icon-button.html"> +<link rel="import" href="http://github.com/paper-input/paper-input.html"> +<link rel="import" href="http://github.com/paper-toggle-button/paper-toggle-button.html"> +<link rel="import" href="http://github.com/paper-progress/paper-progress.html"> + +<link rel="import" href="http://github.com/query_card/query-card.html"> +<link rel="import" href="http://github.com/translation_result/translation-result.html"> + +<dom-module id="explore-view"> + <template> + <style include="iron-flex iron-flex-alignment iron-flex-reverse"> + :host { + padding: 24px; + @apply --layout-vertical; + @apply --layout-center; + } + + query-card { + margin: 0px; + width: 90%; + } + + div.rule { + @apply --layout-vertical; + } + + paper-progress { + --paper-progress-active-color: #4285f4; + --paper-progress-height: 10px; + width: 90%; + } + + translation-result { + margin: 12px 0px; + } + + paper-input { + padding: 0px 6px; + } + + paper-icon-button#clear { + color: var(--paper-red-300); + --paper-icon-button-ink-color: var(--paper-red-a100); + height: 23px; + padding: 0px 4px; + width: 23px; + } + + paper-icon-button#translate { + background-color: #4d90fe; + color: #fff; + border-radius: 50%; + } + + #result-list { + margin: 24px 0px; + width: 90%; + } + </style> + <!-- + Extract the query information from the url if it exists. + --> + <app-route + route="{{subroute}}" + pattern="/:query" + tail="{{tailRoute}}" + data="{{queryData}}"> + </app-route> + + <query-card + route="{{route}}" + url="/api/list_models" + sub-route="{{subroute}}" + model="{{model_}}"> + <!-- + Include a text area and actionable button for sending translations. + --> + <div id="search-bar" class="layout horizontal center-center"> + <paper-input class="flex" value="{{query_}}" label="translate" id="input"> + </paper-input> + <iron-a11y-keys target="{{input}}" keys="enter" on-keys-pressed="translate_"> + </iron-a11y-keys> + <paper-icon-button id="translate" on-tap="translate_" icon="translate" title="translate"> + </paper-icon-button> + </div> + <div id="extra"> + <h4>Rapid Response</h4> + <template is="dom-repeat" items="{{rules_}}"> + <div class="rule"> + <span on-tap="deleteRule_"> + <iron-icon icon="remove-circle-outline"> + </iron-icon> + Rule + </span> + <paper-input label="Source" value="{{item.source}}" type="text"></paper-input> + <paper-input label="Bad Target" value="{{item.bad_translations}}" type="text"></paper-input> + <paper-input value="{{item.good_translations}}" label="Good Target" type="text"></paper-input> + <paper-input value="{{item.attention_threshold}}" label="Threshold" type="number"></paper-input> + </div> + </template> + <span on-tap="addRule_"> + <iron-icon icon="add-circle-outline"> + </iron-icon> + Rule + </span> + </div> + </query-card> + + <paper-progress id="loading" indeterminate disabled="[[!fetchingResult]]"> + </paper-progress> + + <div id="result-list" class="layout vertical vertical-reverse"> + <template is="dom-repeat" items="[[results]]" as="result"> + <translation-result result="[[result]]"></translation-result> + </template> + </div> + + <iron-ajax + id="translateAjax" + url="{{url}}" + handle-as="json" + on-response="handleTranslationResponse_"> + </iron-ajax> + </template> + <script src="http://github.com/d3/d3.js"></script> + <script src="http://github.com/explore-view.js"></script> +</dom-module> diff --git a/tensor2tensor/insights/polymer/explore_view/explore-view.js b/tensor2tensor/insights/polymer/explore_view/explore-view.js new file mode 100644 index 000000000..ee78d47ed --- /dev/null +++ b/tensor2tensor/insights/polymer/explore_view/explore-view.js @@ -0,0 +1,211 @@ +/** + * @license + * Copyright 2018 The Tensor2Tensor Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * `<explore-view>` Presents a view for debuging translations. + * + * This provides an interactive interface for querying a backend service to + * fetch detailed analysis of a translation process. Each result will be + * provided as a stack. + * + * ### Usage + * + * <explore-view></explore-view> + */ +class ExploreView extends Polymer.Element { + /** + * @return {string} The component name. + */ + static get is() { + return 'explore-view'; + } + + /** + * @return {!Object} The component properties. + */ + static get properties() { + return { + route: { + type: Object, + }, + /** + * @type {!Array<!{ + * source: string, + * bad_translations: string, + * good_translations: string, + * attention_threshold: number + * }>} + */ + rules_: { + type: Array, + }, + /** + * @type {?Model} + */ + model_: { + type: Object + }, + /** + * @type {string} + */ + query_: { + type: Object, + } + }; + } + + /** + * @return {!Array<string>} The component observers. + */ + static get observers() { + return [ + 'modelChanged_(queryData, model_)', + ]; + } + + /** + * @override + */ + ready() { + super.ready(); + this.set('rules_', []); + this.set('fetchingResult', false); + } + + /** + * Noop + * @public + */ + refresh() { + // Noop + } + + /** + * Resets the results when a model changes and triggers a query automatically + * if one exists. + * @param {?{query: string}} queryData The current route data. + * @param {?Model} model Unused, but needed for triggering. + * @private + */ + modelChanged_(queryData, model) { + if (queryData && queryData.query) { + // Compose the query from the querydata field and the path in the rest of + // the route. If the link includes an escaped "/" app-route splits the + // query and remaining path on that escaped "/". So query appears to not + // include the rest of the intended query. + let query = unescape(queryData.query) + this.get('tailRoute').path; + this.set('query_', query); + this.translate_(); + } + this.set('results', []); + this.set('rules_', []); + } + + /** + * Sends a translation request to the server. + * @private + */ + translate_() { + if (!this.model_ || !this.model_.id) { + return; + } + + var params = { + 'source': this.query_, + 'id': this.model_.id, + 'sl': this.model_.source_language.code, + 'tl': this.model_.target_language.code, + }; + var paramList = this.createBodyValue_(params); + this.set('url', '/debug?' + paramList); + this.set('fetchingResult', true); + this.$.translateAjax.generateRequest(); + } + + /** + * Returns a string with all the query parameters composed together. This + * also serializes the rapid response rules provided. + * @param {!Object} params The params to combine. + * @returns {string} The params collapsed together. + * @private + */ + createBodyValue_(params) { + // Add the key value body parts. + var bodyParts = []; + for (var param in params) { + var value = window.encodeURIComponent(params[param]); + bodyParts.push(param + "=" + value); + } + + // Add the rapid response rules. + for (var i = 0; i < this.rules_.length; ++i) { + var rule = this.rules_[i]; + var value = + 'src_lang: "' + this.model_.source_language.code + '" ' + + 'trg_lang: "' + this.model_.target_language.code + '" ' + + 'source: "' + rule['source'] + '" ' + + 'bad_translations: "' + rule.bad_translations + '" ' + + 'good_translations: "' + rule.good_translations + '" ' + + 'attention_threshold: ' + rule.attention_threshold; + bodyParts.push('rule=' + window.encodeURIComponent(value)); + } + + // Combine everything together. + return bodyParts.join('&'); + } + + /** + * Adds the translation response to the list of results. + * @param {!Event} event The event object from the `response` event. This is + * required to access the current response, as there are timing issues when + * accessing the latest response with iron-ajax's `last-response` attribute. + * @private + */ + handleTranslationResponse_(event) { + this.set('fetchingResult', false); + this.push('results', { + response: event.detail.response, + query: this.query_, + model: this.model_, + }); + } + + /** + * Adds a new rapid response rule to be filled out. + * @private + */ + addRule_() { + this.push('rules_', { + source: '', + bad_translations: '', + good_translations: '', + attention_threshold: 0.9, + }); + } + + /** + * Deletes a rapid response rule. + * @param {Event} e The event in the dom repeat template element. + * @private + */ + deleteRule_(e) { + let model = e.model; + this.splice('rules_', model.index, 1); + } +} + +customElements.define(ExploreView.is, ExploreView); diff --git a/tensor2tensor/insights/polymer/graph_visualization/graph-visualization.html b/tensor2tensor/insights/polymer/graph_visualization/graph-visualization.html new file mode 100644 index 000000000..152e7612a --- /dev/null +++ b/tensor2tensor/insights/polymer/graph_visualization/graph-visualization.html @@ -0,0 +1,186 @@ +<!-- +@license +Copyright 2018 The Tensor2Tensor Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +--> +<link rel="import" href="http://github.com/polymer/polymer.html"> + +<link rel="import" href="http://github.com/iron-ajax/iron-ajax.html"> +<link rel="import" href="http://github.com/iron-flex-layout/iron-flex-layout-classes.html"> +<link rel="import" href="http://github.com/iron-icon/iron-icon.html"> +<link rel="import" href="http://github.com/iron-icons/av-icons.html"> +<link rel="import" href="http://github.com/iron-icons/iron-icons.html"> + +<link rel="import" href="http://github.com/paper-button/paper-button.html"> +<link rel="import" href="http://github.com/paper-icon-button/paper-icon-button.html"> +<link rel="import" href="http://github.com/paper-slider/paper-slider.html"> +<link rel="import" href="http://github.com/paper-tooltip/paper-tooltip.html"> + +<dom-module id="graph-visualization"> + <template> + <style include="iron-flex iron-flex-alignment"> + #chart { + border: 1px #ccc solid; + position: relative; + } + + #help { + position: absolute; + top: 10px; + right: 10px; + z-index: 1; + } + + .background { + fill: #eee; + } + + line { + stroke: #000; + stroke-width: 1.5px; + } + + rect { + fill: transparent; + } + + .node circle { + cursor: pointer; + fill: #fff; + stroke: steelblue; + stroke-width: 1.5px; + } + + g.selected circle { + fill: lightsteelblue; + } + + .node text { + font-size: 12px; + } + + path.link { + fill: none; + stroke-width: 1.5px; + } + + circle.terminal { + fill: lightsteelblue; + } + + circle.nonterminal { + fill: #fff; + } + + text { + fill: #222; + } + + g.fade circle, + g.fade text { + opacity: 0.1; + } + + .svg-container { + display: inline-block; + position: relative; + width: 100%; + padding-bottom: 100%; + vertical-align: top; + overflow: hidden; + } + + .svg-content-responsive { + display: inline-block; + position: absolute; + top: 10px; + left: 0px; + } + + #info { + background: #fff; + border: 1px solid #bce8f1; + border-radius: 5px; + position: absolute; + right: 12px; + top: 12px; + z-index: 10; + } + + #info .header { + background-color: #bce8f1; + border-color: #bce8f1; + color: #31708f; + } + + #info .header, + #info .details { + padding: 12px 6px; + } + </style> + <div class="layout horizontal"> + <paper-icon-button id="home" on-tap="reset_" icon=home></paper-icon-button> + <paper-slider id="slider" min=0 max=100 value="{{zoomDepth_}}"></paper-slider> + <div class="flex"></div> + <iron-pages selected="[[stepMode]]" attr-for-selected="name"> + <div name="view"> + <paper-button raised on-tap="startStepMode_">Start Step Decoding</paper-button> + </div> + <div name="edit"> + <paper-icon-button on-tap="step_" icon=av:play-arrow></paper-icon-button> + <paper-button raised on-tap="exitStepMode_">Exit Step Decoding</paper-button> + </div> + </iron-pages> + </div> + <div id="chart"> + <div id="info"> + <div class="header"> + Node Details + </div> + <div class="details"> + <div>Token: <span>[[currentName]]</span></div> + <div>Token Probability: <span>[[currentProbability]]</span></div> + <div>Total Probability: <span>[[currentTotalProbability]]</span></div> + <div>Score: <span>[[score]]</span></div> + </div> + </div> + </div> + + <iron-ajax + id="startAjax" + url="/api/remote_decoder_start" + method="POST" + body="[[startBody]]" + handle-as="json" + on-error="handleStartError_" + on-response="handleStartResponse_" + last-response="{{startResponse_}}"> + </iron-ajax> + + <iron-ajax + id="generateAjax" + url="/api/remote_decoder_generate" + method="POST" + body="[[generateBody]]" + params="[[generateParams]]" + handle-as="json" + on-error="handleGenerateError_" + on-response="handleGenerateResponse_" + last-response="{{generateResponse_}}"> + </iron-ajax> + + </template> + <script src="http://github.com/graph-visualization.js"></script> +</dom-module> + diff --git a/tensor2tensor/insights/polymer/graph_visualization/graph-visualization.js b/tensor2tensor/insights/polymer/graph_visualization/graph-visualization.js new file mode 100644 index 000000000..c273c348f --- /dev/null +++ b/tensor2tensor/insights/polymer/graph_visualization/graph-visualization.js @@ -0,0 +1,828 @@ +/** + * @license + * Copyright 2018 The Tensor2Tensor Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * `<graph-visualization>` Presents a beam search decoding graph. + * + * The Beam Search decoding graph visualizes the entire search space of a + * sequence generation model. Each layer in the graph displays a decoding step + * with nodes in that layer representing generated candidates. If supported by + * the backend server, the graph can enter interactive mode where candidates can + * be selected for each generation step. + * + * + * ### Usage + * + * <graph-visualization data="[[data]]"></graph-visualization> + */ +class GraphVisualization extends Polymer.Element { + constructor() { + super(); + + /** + * @private + */ + this.svg_ = undefined; + /** + * @private + */ + this.vis_ = undefined; + + /** + * @type {!TreeNode} + * @private + */ + this.rootTree_ = { + name: '', + localProbability: 0, + cumalitiveProbability: 0, + score: 0, + attention: [], + children: [], + }; + /** + * @type {!InteractiveNode} + * @private + */ + this.interactiveRoot_ = { + id: this.nodeId_, + stepIndex: 0, + candidate: { + label: '<s>', + label_id: 1, + log_probability: 0, + total_log_probability: 0, + score: 0, + parent_id: 0 + }, + children: [], + }; + /** + * @type {Array<!InteractiveNode>} + * @private + */ + this.selectedNodes_ = []; + /** + * @private + */ + this.stepNodes_ = []; + + /** + * Metadata for navigating nodes. + * @private + */ + this.nodeId_ = 0; + + /** + * D3.js helper object. + * @private + */ + this.partition_ = undefined; + /** + * D3.js helper object. + * @private + */ + this.zoom_ = undefined; + + /** + * D3.js DOM element. + * @private + */ + this.container_ = undefined; + } + + /** + * @return {string} The component name. + */ + static get is() { + return 'graph-visualization'; + } + + /** + * @return {!Object} The component properties. + */ + static get properties() { + return { + /** + * @type {!SearchGraphVisualization} + */ + data: { + type: Object, + observer: 'dataUpdated_', + }, + /** + * @type {!Model} + */ + model: { + type: Object, + }, + /** + * @type {string} + */ + query: { + type: String, + }, + /** + * @type {number} + */ + zoomDepth_: { + type: Number, + value: 20, + }, + /** + * @type {!StartTranslationResponse} + */ + startResponse_: { + type: Object, + }, + /** + * @type {!GenerateCandidateResponse} + */ + generateResponse_: { + type: Object, + }, + }; + } + + /** + * @return {!Array<string>} The component observers. + */ + static get observers() { + return [ + 'zoomDepthChanged_(zoomDepth_)', + ]; + } + + /** + * Sets the default zoom depth. + * @override + */ + ready() { + super.ready(); + + this.set('zoomDepth_', 20); + this.set('stepMode', 'view'); + } + + /** + * Sets the zoom state based on the updated depth. + * @param {number} zoomDepth the zoom depth. + * @private + */ + zoomDepthChanged_(zoomDepth) { + if (!this.svg_) { + return; + } + + if (zoomDepth == 0) { + zoomDepth = 0.000001; + } + let transform = d3.zoomTransform(this.svg_.node()).scale(zoomDepth / 20.0); + this.vis_.attr("transform", transform); + } + + /** + * Converts the NMT Graph JSON format to a nested tree heirachy and plots the + * tree as a collapsible tree visualization. + * @private + */ + dataUpdated_() { + // We need to determine two key nodes in the graph: + // Root: This is the node with no in links and some out links. + // Term: This is the terminal node with no out links and some in links. + // + // Our plot will associate token with actual nodes. For all nodes except + // the Term node, this will work fine since in the tree, each node is + // referenced only once as the head of an edge. + // + // The Term node however needs to be duplicated for each edge ending at it + // so that each instance can have a unique token associated with it. + + // Step 1) Find Root and Term node indices so they can be refered to later. + + var rootIndex = -1; + var nodes = this.data.node; + for (var i = 0; i < nodes.length && rootIndex == -1; ++i) { + var node = nodes[i]; + if (node.in_edge_index.length == 0 && node.out_edge_index.length != 0) { + rootIndex = i; + } + } + + // Step 2) Create the root node in the tree. The tree structure will have + // the following components: + // name: The display name of the node. This will be some token. + // localProbability: The per time step probability of this node. + // cumulativeProbability: The total probability of this path in the beam + // search. + // score: A final score for this path in the beam search. This is + // typically the cumulativeProbability with zero or more penalties. + // attention: The attention vector associated with this node transition. + // children: The list of children in the tree, which are themselves trees. + this.rootTree_ = { + name: '', + localProbability: 0, + cumalitiveProbability: 0, + score: 0, + attention: [], + children: [], + }; + + // Step3) Add each child and it's children recursively starting from the + // root node. + var rootNode = nodes[rootIndex]; + var edges = this.data.edge; + for (var i = 0; i < rootNode.out_edge_index.length; ++i) { + // Get the edge. + var outEdge = edges[rootNode.out_edge_index[i]]; + this.addChildToTree_(this.rootTree_, outEdge, nodes, edges); + } + this.propagateLabel_(this.rootTree_); + + this.createSVG_(); + this.plotTree_(this.rootTree_); + } + + /** + * Forwards path labels from a node's child to the current node. + * @param {!TreeNode} node The node to annotate. + * @private + */ + propagateLabel_(node) { + var hasNBest = false; + var hasBeam = false; + var hasAlternative = false; + for (var i = 0; i < node.children.length; ++i) { + hasNBest = hasNBest || node.children[i].pathType == 'nbest'; + hasBeam = hasBeam || node.children[i].pathType == 'beam'; + hasAlternative = hasAlternative || + node.children[i].pathType == 'alternative'; + } + + if (hasNBest) { + node.pathType = 'nbest'; + } else if (hasBeam) { + node.pathType = 'beam'; + } else if (hasAlternative) { + node.pathType = 'beam'; + } else { + node.pathType = 'unknown'; + } + } + + /** + * Iterates through all the children in tree and adds them as children to the + * top level tree. + * @param {!TreeNode} tree The current node in the tree to update with + * children. + * @param {!BeamSearchEdge} currentEdge The edge going into tree. + * @param {!Array<!BeamSearchNode>} nodes The list of all node objects. + * @param {!Array<!BeamSearchEdge>} edges The list of all edges between nodes. + * @private + */ + addChildToTree_(tree, currentEdge, nodes, edges) { + // The real edge information is nested in wonderfully named proto + // extensions. Extract the extension information appropriately. + var candidate = currentEdge.data; + + // When the label for the new child is empty, we're at a terminal sink. So + // we ignore that node and instead label the parent. + if (candidate.label == '') { + tree.pathType = 'alternative'; + return; + } + + var node = nodes[currentEdge.target_index]; + /** + * @type {TreeNode} + */ + var childTree = { + name: candidate.label, + attention: [], + localProbability: Math.pow(Math.E, candidate.log_probability), + cumalitiveProbability: Math.pow(Math.E, candidate.total_log_probability), + score: Math.pow(Math.E, candidate.score), + finished: currentEdge.completed || false, + children: [], + node: node, + edge: currentEdge, + pathType: 'unknown', + }; + tree.children.push(childTree); + + if (node.out_edge_index.length == 0) { + if (childTree.name == '</s>') { + childTree.pathType = 'nbest'; + } else if (childTree.name == '' || candidate.finished) { + childTree.pathType = 'alternative'; + } else { + childTree.pathType = 'beam'; + } + } else { + for (var i = 0; i < node.out_edge_index.length; ++i) { + // Get the edge. + var outEdge = edges[node.out_edge_index[i]]; + this.addChildToTree_(childTree, outEdge, nodes, edges); + this.propagateLabel_(childTree); + } + } + } + + /** + * Creates the initial SVG canvas and associated structures. This will remove + * all previous svg elements. + * @private + */ + createSVG_() { + // Create the margins, width, and height. + var maxWidth = 1600; + var maxHeight = 1600; + var margins = [20, 120, 20, 20]; + var width = maxWidth - margins[1] - margins[3]; + var height = maxHeight - margins[0] - margins[2]; + + // Use a d3 partition which will place each node based it's number of + // descendents with the highest ranked path along the top. + this.partition_ = d3.partition().size([height, width]).padding(1); + + // Set the initial position of the root of the tree to be a half the height + // and on the left.. + this.rootTree_.x0 = height / 2; + this.rootTree_.y0 = 0; + + this.zoom_ = d3.zoom() + .scaleExtent([1, 10]) + .on("zoom", zoomed.bind(this)); + + d3.select(this.$.chart).selectAll('.svg-container').remove(); + + // Embed the SVG to host the tree and rotate it so that horizontal matches + // the height of the canvas. + this.svg_ = d3.select(this.$.chart) + .append("div") + .classed("svg-container", true) + .append("svg") + .attr("height", "100%") + .attr("width", "100%") + .classed("svg-content-responsive", true) + .call(this.zoom_) + .on('dblclick.zoom', null) + .on('wheel.zoom', null); + + /** + * Note: For reasons not understood, the javascript compiler can't figure + * out the type of _zoomDepth at this line, so we need to coerce it into + * being a number. + * @type {number} + */ + let zoomDepth = parseInt(this.zoomDepth_, 10); + let transform = d3.zoomTransform(this.svg_.node()).scale(zoomDepth / 20.0); + this.vis_ = this.svg_.append('g') + .attr("transform", transform); + + // Ensure that the entire svg element can be used for panning. + this.vis_.append("rect") + .attr("width", maxWidth) + .attr("height", maxWidth) + .style("fill", "none") + .style("pointer-events", "all"); + + this.container_ = this.vis_.append("g"); + + // Apply the zoom transformation. + function zoomed() { + this.vis_.attr("transform", + d3.event.transform.scale(this.zoomDepth_ / 20.0)); + } + } + + /** + * Examines and plots all reachable nodes in the rootTree with respect to the + * given current root. + * @param {!TreeNode} root The current root node to focus on. + * @private + */ + plotTree_(root) { + // Create the hierarchy. We accumulate node values by just counting the + // number of elements, rather than placing a weight on each node.. + var treeHierachy = d3.hierarchy(this.rootTree_) + .sum(function(d) { + return 1; + }) + .sort(function(a, b) { + return a.data.score - b.data.score; + }); + + this.partition_(treeHierachy); + + // Create an enter object where we can add both nodes and links. + var enter = this.container_.selectAll(".node") + .data(treeHierachy.descendants()) + .enter(); + + // Add the nodes in four steps: + // 1) A general group element to hold all node portions. + // 2) A rectangle with no visible elements. + // 3) A circle for the node. + // 4) a text label. + var node = enter.append("g") + .attr("class", function(d) { + return "node" + (d.children ? " node--internal" : " node--leaf"); + }) + .attr("transform", function(d) { + return "translate(" + d.y0 + "," + d.x0 + ")"; + }) + .attr('id', function(d, i) { return "g-" + i; }); + + node.append("rect") + .attr("width", function(d) { return d.y1 - d.y0; }) + .attr("height", 24); + + node.append("circle") + .attr("r", 10) + .attr("transform", "translate(10, 10)"); + + node.append("text") + .attr("x", 24) + .attr("y", 13) + .text(function(d) { return d.data.name; }); + + // Add out links from each node to it's parent. We link two nodes using the + // bottom center of the circle so that the text label can be placed at + // approximately the vertical center of the circle. This gives a decent + // layout while also not hiding any text. + enter.append("path") + .attr("class", "link") + .attr("d", function(d) { + if (!d.parent) { return ""; } + // Pad the placement of the links just below the center. We have to + // use x0 and y0 for location due to partition, which doesn't create + // standard x/y fields. + var nodeX = d.x0 + 16; + var nodeY = d.y0 + 10; + var parentX = d.parent.x0 + 16; + var parentY = d.parent.y0 + 10; + return "M" + + nodeY + "," + nodeX + + "C" + (nodeY + parentY) / 2 + "," + nodeX + " " + + (nodeY + parentY) / 2 + "," + parentX + " " + + parentY + "," + parentX; + }) + .style('stroke', function(d) { + // Associate a different path color depend on the path type for the + // node. + if (d.data.pathType == 'unknown') + return '#222'; + if (d.data.pathType == 'nbest') + return '#66ff33'; + if (d.data.pathType == 'beam') + return '#ccc'; + if (d.data.pathType == 'alternative') + return '#ff3300'; + }); + + // Setup hover events on each node to place focus and highligh on the node + // being hovered over. We do this by adding opacity to all other nodes. + var nodes = this.container_.selectAll(".node"); + node.on('mouseover', function(d, i) { + nodes.classed('fade', function(d, j) { + return i != j; + }); + d3.select(this).classed('hover', true); + this.set('currentName', d.data.name); + this.set( + 'currentProbability', this.displayNumber(d.data.localProbability)); + this.set( + 'currentTotalProbability', + this.displayNumber(d.data.cumalitiveProbability)); + this.set('score', this.displayNumber(d.data.score)); + }.bind(this)) + .on('mouseout', function(d, i) { + nodes.classed("fade", false); + d3.select(this).classed("hover", false); + }); + } + + /** + * Resets the pan and zoom state. + * @private + */ + reset_() { + if (!this.svg_) { + return; + } + this.svg_.call(this.zoom_.transform, d3.zoomIdentity); + this.set('zoomDepth_', 20); + } + + /** + * Returns the number value with only 2 significant digits. + * @param {number} value The value to present. + * @return {string} value with just two significant digits. + */ + displayNumber(value) { + return value.toFixed(2); + } + + /** + * Enters step by step decoding mode. + * @private + */ + startStepMode_() { + this.set('stepMode', 'edit'); + this.startTranslation_(); + } + + /** + * Exits step by step decoding mode. + * @private + */ + exitStepMode_() { + this.set('stepMode', 'view'); + this.dataUpdated_(); + } + + /** + * Begins step by step decoding with the current model and query. + * @private + */ + startTranslation_() { + this.set('startBody', JSON.stringify({ + model_id: { + language_pair: { + source_language: this.model.source_language.code, + target_language: this.model.target_language.code, + }, + name: this.model.id, + }, + input: this.query, + })); + this.$.startAjax.generateRequest(); + } + + /** + * Handles a start error. + * @private + */ + handleStartError_() { + console.log("failed"); + } + + /** + * Initializes the step by step decoding graph with the root note and makes + * the first generation step. + * @private + */ + handleStartResponse_() { + // Reset the node state and create the root of the tree. Later candidates + // that are returned from the generation call will be added. + this.nodeId_ = 0; + this.interactiveRoot_ = { + id: this.nodeId_, + stepIndex: 0, + candidate: { + label: '<s>', + label_id: 1, + log_probability: 0, + total_log_probability: 0, + score: 0, + parent_id: 0 + }, + children: [], + }; + this.nodeId_++; + + // Track which nodes are active and available as inputs to the next + // generation step. These will be updated with the candidates they + // generate. + this.selectedNodes_ = [this.interactiveRoot_]; + + // Redraw the entire plot with an interactive version. + this.createSVG_(); + this.drawInteractiveTree_(this.interactiveRoot_); + + // Make the first generation request. + this.step_(true); + } + + /** + * Handles a generate ajax error. + * @private + */ + handleGenerateError_() { + console.log("generate failed"); + } + + /** + * Processes the returned candidates and adds them to the graph. + * @private + */ + handleGenerateResponse_() { + // Add the candidates returned and tag them with unique identifiers so we + // can ensure later generation steps don't try to include candidates that + // can't be proccesed any more (we can only use candidates from the most + // recent generation step as input due to limitations in the remote + // decoder). + let stepIndex = 0; + let newlySelectedNodes = []; + this.stepNodes_ = []; + for (var i = 0; i < this.generateResponse_.candidate_list.length; ++i) { + let selectedNode = this.selectedNodes_[i]; + let candidateList = this.generateResponse_.candidate_list[i]; + for (var j = 0; j < candidateList.candidate.length && j < 5; ++j) { + let candidate = candidateList.candidate[j]; + // Tag the parent id so that the next generate call knows what network + // states to maintain. + candidate.parent_id = i; + let newNode = { + id: this.nodeId_, + stepIndex: stepIndex, + candidate: candidate, + children: [], + }; + this.nodeId_++; + stepIndex++; + this.stepNodes_.push(newNode); + selectedNode.children.push(newNode); + + // Select the first candidate. + if (j === 0) { + newNode.selected = true; + newlySelectedNodes.push(newNode); + } + } + } + this.selectedNodes_ = newlySelectedNodes; + + // Reset the graph. + this.createSVG_(); + this.drawInteractiveTree_(this.interactiveRoot_); + } + + /** + * Draws the interactive tree. + * @param {InteractiveNode} rootNode The root node to draw out. + * @private + */ + drawInteractiveTree_(rootNode) { + let treeHierachy = d3.hierarchy(rootNode) + .sum(function(d) { + return 1; + }) + .sort(function(a, b) { + return b.data.candidate.total_log_probability - + a.data.candidate.total_log_probability; + }); + + this.partition_(treeHierachy); + + // Create an enter object where we can add both nodes and links. + var enter = this.container_.selectAll(".node") + .data(treeHierachy.descendants()) + .enter(); + + // Add the nodes in four steps: + // 1) A general group element to hold all node portions. + // 2) A rectangle with no visible elements. + // 3) A circle for the node. + // 4) a text label. + var node = enter.append("g") + .attr("class", function(d) { + return "node" + + (d.children ? " node--internal" : " node--leaf") + + (d.data.selected ? " selected" : ""); + }) + .attr("transform", function(d) { + return "translate(" + d.y0 + "," + d.x0 + ")"; + }) + .attr('id', function(d, i) { return "g-" + i; }); + + node.append("rect") + .attr("width", function(d) { return d.y1 - d.y0; }) + .attr("height", 24); + + node.append("circle") + .attr("r", 10) + .attr("transform", "translate(10, 10)"); + + node.append("text") + .attr("x", 24) + .attr("y", 13) + .text(function(d) { return d.data.candidate.label; }); + + // Add out links from each node to it's parent. We link two nodes using the + // bottom center of the circle so that the text label can be placed at + // approximately the vertical center of the circle. This gives a decent + // layout while also not hiding any text. + enter.append("path") + .attr("class", "link") + .attr("d", function(d) { + if (!d.parent) { return ""; } + // Pad the placement of the links just below the center. We have to + // use x0 and y0 for location due to partition, which doesn't create + // standard x/y fields. + var nodeX = d.x0 + 16; + var nodeY = d.y0 + 10; + var parentX = d.parent.x0 + 16; + var parentY = d.parent.y0 + 10; + return "M" + + nodeY + "," + nodeX + + "C" + (nodeY + parentY) / 2 + "," + nodeX + " " + + (nodeY + parentY) / 2 + "," + parentX + " " + + parentY + "," + parentX; + }) + .style('stroke', '#ccc'); + + node.on('mouseover', function(d, i) { + this.set('currentName', d.data.candidate.label); + this.set( + 'currentProbability', + this.displayNumber(Math.exp(d.data.candidate.log_probability))); + this.set( + 'currentTotalProbability', + this.displayNumber(Math.exp(d.data.candidate.total_log_probability))); + this.set('score', this.displayNumber(Math.exp(d.data.candidate.score))); + }.bind(this)); + + // Store a local pointer to stepNodes and selectedNodes so that the click + // handler can access them without having to replace the 'this' pointer. + // The click handler needs the default 'this' handler to update the state of + // the clicked upon node. + let stepNodes = this.stepNodes_; + let selectedNodes = this.selectedNodes_; + + node.on('click', function(d, i) { + // Ignore nodes that fall out of bounds. + let stepIndex = d.data.stepIndex; + if (stepIndex >= stepNodes.length) { + return; + } + + // Ignore nodes that are from different steps. + let node = stepNodes[stepIndex]; + if (node.id != d.data.id) { + return; + } + + // Update the selected state of the node and either add it to the selected + // list or remove it. + if (!node.selected) { + node.selected = true; + selectedNodes.push(node); + } else { + node.selected = false; + selectedNodes.splice(selectedNodes.indexOf(node), 1); + } + d3.select(this).classed('selected', node.selected); + }); + } + + /** + * Make one generation step with the candidates in the current selectedNodes + * list. If no nodes are selected, this silently does nothing. + * @param {boolean=} opt_skipNext If true, skips the next step during + * generation. + * @private + */ + step_(opt_skipNext) { + // Running generate without any nodes can put the decoder into a bad state + // and make the session unusable, so for now, silently skip this case. + if (this.selectedNodes_.length == 0) { + console.log("Skipping empty step."); + return; + } + + this.set('generateParams', { + skip_next: opt_skipNext || false, + }); + this.set('generateBody', JSON.stringify({ + model_id: { + language_pair: { + source_language: this.model.source_language.code, + target_language: this.model.target_language.code, + }, + name: this.model.id, + }, + session_id: this.startResponse_.session_id, + candidate: this.selectedNodes_.map(function(node) { + return node.candidate; + }), + })); + this.$.generateAjax.generateRequest(); + } + +} + +customElements.define(GraphVisualization.is, GraphVisualization); diff --git a/tensor2tensor/insights/polymer/index.html b/tensor2tensor/insights/polymer/index.html new file mode 100644 index 000000000..315dbad24 --- /dev/null +++ b/tensor2tensor/insights/polymer/index.html @@ -0,0 +1,73 @@ +<!doctype html> +<!-- +@license +Copyright 2018 The Tensor2Tensor Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +--> +<html> +<head> + <meta name="viewport" content="width=device-width, minimum-scale=1.0, initial-scale=1, user-scalable=no"> + <meta name="mobile-web-app-capable" content="yes"> + <meta name="apple-mobile-web-app-capable" content="yes"> + <meta name="apple-touch-fullscreen" content="yes"> + <meta name="apple-mobile-web-app-status-bar-style" + content="black-translucent" > + <meta name="format-detection" content="telephone=no"> + + <title>NMT Research Frontend + + + + + + + + + + + + + + + + + + + + + + diff --git a/tensor2tensor/insights/polymer/insights_app/insights-app.html b/tensor2tensor/insights/polymer/insights_app/insights-app.html new file mode 100644 index 000000000..622337c8f --- /dev/null +++ b/tensor2tensor/insights/polymer/insights_app/insights-app.html @@ -0,0 +1,138 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tensor2tensor/insights/polymer/insights_app/insights-app.js b/tensor2tensor/insights/polymer/insights_app/insights-app.js new file mode 100644 index 000000000..65e1ca252 --- /dev/null +++ b/tensor2tensor/insights/polymer/insights_app/insights-app.js @@ -0,0 +1,78 @@ +/** + * @license + * Copyright 2018 The Tensor2Tensor Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * `` Manages the views of the NMT Insights App. + * + * ### Usage + * + * + * + */ +class InsightsApp extends Polymer.Element { + /** + * @return {string} The component name. + */ + static get is() { + return 'insights-app'; + } + + /** + * @return {!Object} The component properties. + */ + static get properties() { + return { + /** + * @type {string} + */ + page: { + type: String, + reflectToAttribute: true, + }, + }; + } + + /** + * @return {!Array} The component observers. + */ + static get observers() { + return [ + 'routePageChanged_(routeData.page)', + ]; + } + + /** + * Updates the page field if page exists or uses a default value. + * @param {?string} page The current page name being viewed. + * @private + */ + routePageChanged_(page) { + if (page == this.page) { + return; + } + this.page = page || 'explore'; + this.set('routeData.page', this.page); + + // Refresh the now selected page in case it needs new data on a new view. + let currentPage = this.get('currentPage'); + if (currentPage) { + currentPage.refresh(); + } + } +} + +customElements.define(InsightsApp.is, InsightsApp); diff --git a/tensor2tensor/insights/polymer/language_selector/language-selector-content.html b/tensor2tensor/insights/polymer/language_selector/language-selector-content.html new file mode 100644 index 000000000..23bb4f370 --- /dev/null +++ b/tensor2tensor/insights/polymer/language_selector/language-selector-content.html @@ -0,0 +1,62 @@ + + + + + + + + + + + + + + + + + diff --git a/tensor2tensor/insights/polymer/language_selector/language-selector-content.js b/tensor2tensor/insights/polymer/language_selector/language-selector-content.js new file mode 100644 index 000000000..c5d783fdc --- /dev/null +++ b/tensor2tensor/insights/polymer/language_selector/language-selector-content.js @@ -0,0 +1,243 @@ +/** + * @license + * Copyright 2018 The Tensor2Tensor Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * `` provides menu content for language selection. + * + * The content provides a search bar that will filter available languages by any + * language name or code that has the query text as a substring. + * + * By default, this will auto select a provided language with language code + * 'en'. + * + * ### Usage + * + * + * + */ +class LanguageSelectorContent extends Polymer.Element { + /** + * @return {string} The component name. + */ + static get is() { + return 'language-selector-content'; + } + + /** + * @return {!Object} The component properties. + */ + static get properties() { + return { + /** + * @type {?Array} + */ + languages: { + type: Array, + observer: 'languagesUpdated_', + }, + /** + * @type {!Language} + */ + value: { + type: Object, + notify: true, + }, + /** + * @type {string} + */ + defaultCode: { + type: String, + value: 'en', + } + }; + } + + /** + * @return {!Array} The component observers. + */ + static get observers() { + return [ + 'selectDefault_(languages, renderedItemCount)', + 'filterUpdated_(filter)', + ]; + } + + /** + * Selects the language in the drop down. + * @param {Language} language The language to pre-select. + * @public + */ + forceSelection(language) { + this.set('filter', ''); + for (var i = 0; i < this.languages.length; ++i) { + if (this.languages[i].code == language.code) { + this.set('value', this.languages[i]); + this.updateSelected_(Polymer.dom(this.$.items).children[i]); + return; + } + } + } + + /** + * Updates the internal languages and resets selection. + * @param {?Array} newLanguages The new language list. + * @private + */ + languagesUpdated_(newLanguages) { + if (newLanguages) { + for (var i = 0; i < newLanguages.length; ++i) { + newLanguages[i].hidden = false; + } + } + + this.set('filter', ''); + this.set('selected', undefined); + } + + /** + * Selects the default language if one can be found after all languages have + * been rendered in the menu. + * @param {?Array} languages The languages + * @param {number} renderedItemCount The number of languages rendered. + * @private + */ + selectDefault_(languages, renderedItemCount) { + if (this.get('selected') || !languages || + languages.length != renderedItemCount) { + return; + } + + this.$.languageList.render(); + if (this.value) { + for (var i = 0; i < languages.length; ++i) { + if (languages[i].code == this.value.code) { + this.updateSelected_(Polymer.dom(this.$.items).children[i]); + return; + } + } + } + + let defaultCode = this.get('defaultCode'); + for (var i = 0; i < languages.length; ++i) { + if (languages[i].code == defaultCode || languages.length == 1) { + this.set('value', languages[i]); + this.updateSelected_(Polymer.dom(this.$.items).children[i]); + return; + } + } + } + + /** + * Selects the rendered language if only one is visible given the current + * search filter. + * @private + */ + enterPressed_() { + let visibleLanguagesIndices = []; + for (var i = 0; i < this.languages.length; ++i) { + if (!this.languages[i].hidden) { + visibleLanguagesIndices.push(i); + } + } + if (visibleLanguagesIndices.length == 1) { + this.set('value', this.languages[visibleLanguagesIndices[0]]); + this.updateSelected_(Polymer.dom(this.$.items).children[0]); + } + } + + /** + * Sets the hidden state of languages given the current filter. + * @param {string} newFilter The new filter to match languages against. + * @private + */ + filterUpdated_(newFilter) { + if (!this.get('languages')) { + return; + } + + let filter = newFilter.toLowerCase(); + for (var i = 0; i < this.languages.length; ++i) { + let hidden = !this.languageMatchesQuery_(this.languages[i], filter); + this.set('languages.' + i + '.hidden', hidden); + } + } + + /** + * Returns true if the language is visible. + * @param {!Language} language The language being evaluated. + * @return {boolean} True if visible. + * @private + */ + isShown_(language) { + return !language.hidden; + } + + /** + * Returns true if the language matches the filter. + * @param {!Language} language The language being evaluated. + * @param {string} filter The filter to compare against. + * @return {boolean} True if language matches filter. + * @private + */ + languageMatchesQuery_(language, filter) { + let languageName = language.name.toLowerCase(); + return filter == '' || languageName.indexOf(filter) >= 0 || + language.code.indexOf(filter) >= 0; + } + + /** + * Selects the tapped element and updates the value with the corresponding + * language value. + * @param {!EventTarget} e The tap event. + * @private + */ + select_(e) { + let language = this.$.languageList.itemForElement(e.target); + this.set('value', language); + this.updateSelected_(e.target); + } + + /** + * Updates the selection with the given element. + * @param {!Element} ele The selected dom element. + * @private + */ + updateSelected_(ele) { + let oldSelection = this.get('selected'); + if (oldSelection) { + this.dispatchEvent(new CustomEvent('iron-deselect', { + bubbles: true, + composed: true, + detail: { + item: oldSelection, + }, + })); + } + this.set('selected', ele); + this.dispatchEvent(new CustomEvent('iron-select', { + bubbles: true, + composed: true, + detail: { + item: ele, + }, + })); + } +} + +customElements.define(LanguageSelectorContent.is, LanguageSelectorContent); diff --git a/tensor2tensor/insights/polymer/language_selector/language-selector.html b/tensor2tensor/insights/polymer/language_selector/language-selector.html new file mode 100644 index 000000000..9e6dbf1be --- /dev/null +++ b/tensor2tensor/insights/polymer/language_selector/language-selector.html @@ -0,0 +1,42 @@ + + + + + + + + + + + + + + + diff --git a/tensor2tensor/insights/polymer/language_selector/language-selector.js b/tensor2tensor/insights/polymer/language_selector/language-selector.js new file mode 100644 index 000000000..919049df0 --- /dev/null +++ b/tensor2tensor/insights/polymer/language_selector/language-selector.js @@ -0,0 +1,85 @@ +/** + * @license + * Copyright 2018 The Tensor2Tensor Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * `` provides a searchable dropdown of languages. + * + * The dropdown will present the selected language's Name. When opened, the + * search bar will filter available languages by any language name or code that + * has the query text as a substring. + * + * By default, this will auto select a provided language with language code + * 'en'. + * + * ### Usage + * + * + * + */ +class LanguageSelector extends Polymer.Element { + /** + * @return {string} The component name. + */ + static get is() { + return 'language-selector'; + } + + /** + * @return {!Object} The component properties. + */ + static get properties() { + return { + /** + * @type {string} + */ + label: { + type: String, + }, + /** + * @type {?Array} + */ + languages: { + type: Array, + }, + /** + * @type {!Language} + */ + value: { + type: Object, + notify: true, + }, + /** + * @type {string} + */ + defaultCode: { + type: String, + value: 'en', + }, + }; + } + + /** + * Selects the language in the drop down. + * @param {Language} language The language to pre-select. + * @public + */ + forceSelection(language) { + this.$.selector.forceSelection(language); + } +} + +customElements.define(LanguageSelector.is, LanguageSelector); diff --git a/tensor2tensor/insights/polymer/processing_visualization/processing-visualization.html b/tensor2tensor/insights/polymer/processing_visualization/processing-visualization.html new file mode 100644 index 000000000..82e2b21e4 --- /dev/null +++ b/tensor2tensor/insights/polymer/processing_visualization/processing-visualization.html @@ -0,0 +1,85 @@ + + + + + + + + + diff --git a/tensor2tensor/insights/polymer/processing_visualization/processing-visualization.js b/tensor2tensor/insights/polymer/processing_visualization/processing-visualization.js new file mode 100644 index 000000000..69379d88b --- /dev/null +++ b/tensor2tensor/insights/polymer/processing_visualization/processing-visualization.js @@ -0,0 +1,52 @@ +/** + * @license + * Copyright 2018 The Tensor2Tensor Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * `` summarises pre/post processing steps. + * + * This element presents the pre-processing segmentation steps and + * post-processing de-segmentation and rewrite steps that are applied to a + * translation query. + * + * ### Usage + * + * + */ +class ProcessingVisualization extends Polymer.Element { + /** + * @return {string} The component name. + */ + static get is() { + return 'processing-visualization'; + } + + /** + * @return {!Object} The component properties. + */ + static get properties() { + return { + /** + * @type {!QueryProcessingVisualization} + */ + data: { + type: Object, + }, + }; + } +} + +customElements.define(ProcessingVisualization.is, ProcessingVisualization); diff --git a/tensor2tensor/insights/polymer/query_card/query-card.html b/tensor2tensor/insights/polymer/query_card/query-card.html new file mode 100644 index 000000000..f670a4cf7 --- /dev/null +++ b/tensor2tensor/insights/polymer/query_card/query-card.html @@ -0,0 +1,93 @@ + + + + + + + + + + + + + + + + + + + diff --git a/tensor2tensor/insights/polymer/query_card/query-card.js b/tensor2tensor/insights/polymer/query_card/query-card.js new file mode 100644 index 000000000..c0d25e750 --- /dev/null +++ b/tensor2tensor/insights/polymer/query_card/query-card.js @@ -0,0 +1,336 @@ +/** + * @license + * Copyright 2018 The Tensor2Tensor Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * `` presents a material card for selecting a supported mdoel. + * + * This will fetch a set of supported models for debugging and provide three + * selectors: + * - Source Language + * - Target Language + * - Model + * Once all three have been populated, it will emit a `Model` object through + * `model`. + * + * ### Usage + * + * + * Custom InputField + * + */ +class QueryCard extends Polymer.Element { + constructor() { + super(); + + /** + * A general mapping from language code to the language objects. + * @type {!Object} + * @private + */ + this.languageToNameMap_ = {}; + + /** + * A nested mapping of languages to a list of models. + * @type {!Object>>>} + * @private + */ + this.languagePairToModelMap_ = {}; + } + + /** + * @return {string} The component name. + */ + static get is() { + return 'query-card'; + } + + /** + * @return {!Object} The component properties. + */ + static get properties() { + return { + /** + * @type {!Object} + */ + route: { + type: String, + }, + /** + * @type {!Object} + */ + subRoute: { + type: String, + notify: true, + }, + /** + * @type {?Model} + */ + model: { + type: Object, + notify: true, + }, + /** + * @type {string} + */ + url: { + type: String, + }, + /** + * @type {?Language} + */ + sourceLanguage_: { + type: Object, + }, + /** + * @type {?Language} + */ + targetLanguage_: { + type: Object, + }, + /** + * @type {string} + */ + defaultModelId: { + type: String, + value: 'prod', + } + }; + } + + /** + * @return {!Array} The component observers. + */ + static get observers() { + return [ + 'routeActiveUpdated_(routeActive)', + + 'modelsUpdated_(modelConfigurations)', + 'sourceLanguagesUpdated_(sourceLanguages, routeData)', + 'targetLanguagesUpdated_(targetLanguages, routeData)', + + 'sourceLanguageUpdated_(sourceLanguage_)', + 'targetLanguageUpdated_(targetLanguage_)', + 'modelListUpdated_(modelList, routeData)', + 'modelUpdated_(model)', + ]; + } + + /** + * Resets the route data if the route is inactive. + * @param {boolean} routeActive The active state of the route. + * @private + */ + routeActiveUpdated_(routeActive) { + if (!routeActive) { + this.set('routeData', {}); + } + } + + /** + * Sets the sourceLanguage if a new source language matches the route + * path or marks it as undefined. + * @param {Array} sourceLanguages A list of source languages. + * @param {{sourceLanguage: string}} routeData The current route paths. + * @private + */ + sourceLanguagesUpdated_(sourceLanguages, routeData) { + if (this.routeActive && sourceLanguages) { + for (var i = 0; i < sourceLanguages.length; ++i) { + if (routeData.sourceLanguage == sourceLanguages[i].code) { + this.$.sourceSelector.forceSelection(sourceLanguages[i]); + return; + } + } + } + } + + /** + * Selects the available target language list based on the new selected source + * language. + * @param {Language} sourceLanguage The selected source language index. + * @private + */ + sourceLanguageUpdated_(sourceLanguage) { + if (sourceLanguage == undefined) { + this.set('targetLanguages', []); + return; + } + + this.set('routeData.sourceLanguage', sourceLanguage.code); + + var targetLanguages = []; + for (var key in this.languagePairToModelMap_[sourceLanguage.code]) { + targetLanguages.push(this.languageToNameMap_[key]); + } + targetLanguages.sort(sort_); + this.set('targetLanguage', undefined); + this.set('targetLanguages', targetLanguages); + } + + /** + * Sets the targetLanguage if a new target language matches the route + * path or marks it as undefined. + * @param {Array} targetLanguages A list of target languages. + * @param {{targetLanguage: string}} routeData The current route paths. + * @private + */ + targetLanguagesUpdated_(targetLanguages, routeData) { + if (this.routeActive && targetLanguages) { + for (var i = 0; i < targetLanguages.length; ++i) { + if (routeData.targetLanguage == targetLanguages[i].code) { + this.$.targetSelector.forceSelection(targetLanguages[i]); + return; + } + } + } + } + + /** + * Selects the available model list based on the new selected target + * language. + * @param {Language} targetLanguage The selected target language index. + * @private + */ + targetLanguageUpdated_(targetLanguage) { + this.set('model', undefined); + if (targetLanguage == undefined) { + this.set('modelList', []); + return; + } + + let sourceLanguage = this.sourceLanguage_; + this.set('routeData.targetLanguage', targetLanguage.code); + var models = []; + var targetLanguageMap = this.languagePairToModelMap_[sourceLanguage.code]; + for (var key in targetLanguageMap[targetLanguage.code]) { + models.push(targetLanguageMap[targetLanguage.code][key]); + } + this.set('modelList', models); + } + + /** + * Sets the modelIndex if a new model matches the route path or marks it as + * undefined. + * @param {?Array} modelList A list of models. + * @param {{modelId: string}} routeData The current route paths. + * @private + */ + modelListUpdated_(modelList, routeData) { + if (this.routeActive && modelList) { + for (var i = 0; i < modelList.length; ++i) { + if (routeData.modelId == modelList[i].id) { + this.set('model', modelList[i]); + return; + } + } + } + + if (modelList && modelList.length >= 1) { + // Chose the default model if it exists, otherwise choose the first entry. + // This ensures that the ordering of models does't impact the default + // selection. + for (var i = 0; i < modelList.length; ++i) { + if (this.defaultModelId == modelList[i].id) { + this.set('model', modelList[i]); + return; + } + } + this.set('model', modelList[0]); + } + } + + /** + * Updates the selected model with the current model index. + * @param {?Model} model The current selected model index. + * @private + */ + modelUpdated_(model) { + if (!model) { + return; + } + + this.set('routeData.modelId', this.model.id); + } + + /** + * Updates the set of available language sets and models. + * @param {{configuration: !Array}} modelConfigurations A list of + * models. + * @private + */ + modelsUpdated_(modelConfigurations) { + var models = modelConfigurations.configuration; + + this.languageToNameMap_ = {}; + this.languagePairToModelMap_ = {}; + + for (var i = 0; i < models.length; ++i) { + let model = models[i]; + // Extract the language codes and store the code to language mappings. + var source_language = model.source_language.code; + this.languageToNameMap_[source_language] = model.source_language; + var target_language = model.target_language.code; + this.languageToNameMap_[target_language] = model.target_language; + + // Create the first level nested map, from source languages to target + // language maps. + var targetLanguageMap; + if (source_language in this.languagePairToModelMap_) { + targetLanguageMap = this.languagePairToModelMap_[source_language]; + } else { + targetLanguageMap = {}; + this.languagePairToModelMap_[source_language] = targetLanguageMap; + } + + // Create the second level nested map, from target languages to model + // maps. + var model_map; + if (target_language in targetLanguageMap) { + model_map = targetLanguageMap[target_language]; + } else { + model_map = {}; + targetLanguageMap[target_language] = model_map; + } + + // Store the mapping from a model id to a model. + model_map[model.id] = model; + } + + // Prepare the initial set of available source languages. + var sourceLanguageList = []; + for (var key in this.languagePairToModelMap_) { + sourceLanguageList.push(this.languageToNameMap_[key]); + } + sourceLanguageList.sort(sort_); + this.set('sourceLanguages', sourceLanguageList); + } +} + +customElements.define(QueryCard.is, QueryCard); + +/** + * Returns the ordering of two language's based on their name. + * @param {!Language} a The first language to compare. + * @param {!Language} b The second language to compare. + * @return {number} Negative if a comes before b. + */ +function sort_(a, b) { + if (a.name != b.name) { + return a.name < b.name ? -1 : 1; + } + return 0; +} diff --git a/tensor2tensor/insights/polymer/tensor2tensor.html b/tensor2tensor/insights/polymer/tensor2tensor.html new file mode 100644 index 000000000..23003a35f --- /dev/null +++ b/tensor2tensor/insights/polymer/tensor2tensor.html @@ -0,0 +1,73 @@ + + + + + + + + + + + + NMT Research Frontend + + + + + + + + + + + + + + + + + + + + + + diff --git a/tensor2tensor/insights/polymer/translation_result/translation-result.html b/tensor2tensor/insights/polymer/translation_result/translation-result.html new file mode 100644 index 000000000..bf152f81d --- /dev/null +++ b/tensor2tensor/insights/polymer/translation_result/translation-result.html @@ -0,0 +1,90 @@ + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tensor2tensor/insights/polymer/translation_result/translation-result.js b/tensor2tensor/insights/polymer/translation_result/translation-result.js new file mode 100644 index 000000000..995a5a822 --- /dev/null +++ b/tensor2tensor/insights/polymer/translation_result/translation-result.js @@ -0,0 +1,114 @@ +/** + * @license + * Copyright 2018 The Tensor2Tensor Authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * `` Presents zero or more visualization of a translation. + * + * This inspects the set of visualization fields provided and triggers the + * corresponding visualization component in the set of available views in tabbed + * layout. + * + * ### Usage + * + * + * + */ +class TranslationResult extends Polymer.Element { + /** + * @return {string} The component name. + */ + static get is() { + return 'translation-result'; + } + + /** + * @return {!Object} The component properties. + */ + static get properties() { + return { + /** + * @type {{ + * response: { + * visualization_name: string, + * title: string, + * name: string, + * query_processing: ?Object, + * search_graph: ?Object, + * word_heat_map: ?Object, + * }, + * model: !Model, + * query: string + * }} + */ + result: { + type: Object, + observer: 'resultUpdated_', + }, + /** + * @type {string} + */ + view: { + type: String, + value: 'processing', + }, + }; + } + + /** + * Sets internal data structures given the updated result. + * @private + */ + resultUpdated_() { + var response = this.result.response; + if (!response || !response.result || response.result.length == 0) { + return; + } + + for (var i = 0; i < response.result.length; ++i) { + let visualizationResult = response.result[i]; + + // Dynamically create the visualization element based on the name field. + // This will enable multiple versions of the same visualization to be + // created later on when the data mapping is generalized. + let analysisEle = document.createElement( + visualizationResult.visualization_name + '-visualization'); + + // Set the generic attributes. + analysisEle.name = visualizationResult.name; + analysisEle.model = this.result.model; + analysisEle.query = this.result.query; + + // Set the visualization specific data attribute. + // TODO(kstevens): Cleanup by setting visualization_name the same as the + // protobuffer field names so we don't need this mapping. + if (visualizationResult.visualization_name == 'processing') { + analysisEle.data = visualizationResult.query_processing; + } else if (visualizationResult.visualization_name == 'attention') { + analysisEle.data = visualizationResult.word_heat_map; + } else if (visualizationResult.visualization_name == 'graph') { + analysisEle.data = visualizationResult.search_graph; + } + + Polymer.dom(this.$.view).appendChild(analysisEle); + } + // Don't make assumptions about which visualizations are available. Instead + // preselect the initial view based on data. + this.set('view', response.result[0].name); + } +} + +customElements.define(TranslationResult.is, TranslationResult); diff --git a/tensor2tensor/insights/query_processor.py b/tensor2tensor/insights/query_processor.py new file mode 100644 index 000000000..7500b8467 --- /dev/null +++ b/tensor2tensor/insights/query_processor.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A base class for all query processing classes.""" + + +class QueryProcessor(object): + """Base class for any class that wants to process sequence queries. + + QueryProcessor classes are expected to convert a string query to a series of + visualization structures. + + TODO(kstevens): Define how the visualization structures should look once the + protos are in better shape. + """ + + def process(self, query): + """Returns the generated visualizations for query. + + Args: + query: The string input + + Returns: + A dictionary with one key: 'result' that maps to a list of visualization + objects. + """ + del query + return {"result": []} diff --git a/tensor2tensor/insights/server.py b/tensor2tensor/insights/server.py new file mode 100644 index 000000000..e666580c4 --- /dev/null +++ b/tensor2tensor/insights/server.py @@ -0,0 +1,213 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A GUnicorn + Flask Debug Frontend for Transformer models.""" + +import json +from flask import Flask +from flask import jsonify +from flask import request +from flask import send_from_directory +from flask.json import JSONEncoder +from gunicorn.app.base import BaseApplication +from gunicorn.six import iteritems +import numpy as np +from tensor2tensor.insights import transformer_model +import tensorflow.compat.v1 as tf + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string("configuration", "", + "A JSON InsightConfiguration message that configures which " + "models to run in the insight frontend.") +flags.DEFINE_string("static_path", "", + "Path to static javascript and html files to serve.") + + +_NUMPY_INT_DTYPES = [ + np.int8, np.int16, np.int32, np.int64 +] +_NUMPY_FP_DTYPES = [ + np.float16, np.float32, np.float64 +] + + +class NumpySerializationFix(JSONEncoder): + """json module cannot serialize numpy datatypes, reinterpret them first""" + + def default(self, obj): + obj_type = type(obj) + if obj_type in _NUMPY_INT_DTYPES: + return int(obj) + if obj_type in _NUMPY_FP_DTYPES: + return float(obj) + return json.JSONEncoder.default(self, obj) + + +class DebugFrontendApplication(BaseApplication): + """A local custom application for GUnicorns. + + This custom application enables us to run with a custom main that parses + tensorflow ops and does some internal setup prior to processing queries. The + underlying app registered instances of this class will be forked. + """ + + def __init__(self, app, options=None): + """Creates the GUnicorn application. + + Args: + app: A Flask application that will process requests. + options: A dict of GUnicorn options. + """ + self.options = options or {} + self.application = app + super(DebugFrontendApplication, self).__init__() + + def load_config(self): + """Loads the configuration.""" + config = dict([(key, value) for key, value in iteritems(self.options) + if key in self.cfg.settings and value is not None]) + for key, value in iteritems(config): + self.cfg.set(key.lower(), value) + + def load(self): + """Loads the application. + + Returns: + The Flask application. + """ + return self.application + + +def main(_): + # Create the models we support: + with open(FLAGS.configuration) as configuration_file: + configuration = json.load(configuration_file) + + # Read in the set of query processors. + processors = {} + for processor_configuration in configuration["configuration"]: + key = (processor_configuration["source_language"], + processor_configuration["target_language"], + processor_configuration["label"]) + + processors[key] = transformer_model.TransformerModel( + processor_configuration) + + # Read in the list of supported languages. + languages = {} + for language in configuration["language"]: + languages[language["code"]] = { + "code": language["code"], + "name": language["name"], + } + + # Create flask to serve all paths starting with '/polymer' from the static + # path. This is to served non-vulcanized components. + app = Flask( + __name__.split(".")[0], + static_url_path="/polymer", + static_folder=FLAGS.static_path) + app.json_encoder = NumpySerializationFix + + # Disable static file caching. + app.config["SEND_FILE_MAX_AGE_DEFAULT"] = 0 + + @app.route("/api/language_list/") + def language_list(): # pylint: disable=unused-variable + """Responds to /api/language_list with the supported languages. + + Returns: + JSON for the languages. + """ + return jsonify({ + "language": list(languages.values()) + }) + + @app.route("/api/list_models/") + def list_models(): # pylint: disable=unused-variable + """Responds to /api/list_models with the supported modes. + + + Returns: + JSON for the supported models. + """ + # pylint: disable=g-complex-comprehension + configuration_list = [{ + "id": label, + "source_language": languages[source_code], + "target_language": languages[target_code], + } for source_code, target_code, label in processors] + return jsonify({ + "configuration": configuration_list + }) + + @app.route("/debug", methods=["GET"]) + def query(): # pylint: disable=unused-variable + """Responds to /debug with processing results. + + Returns: + JSON for the query's result. + """ + query = request.args.get("source") + source_language = request.args.get("sl") + target_language = request.args.get("tl") + model_name = request.args.get("id") + processor = processors[(source_language, target_language, model_name)] + return jsonify(processor.process(query)) + + # Catchall for all other paths. Any other path should get the basic index + # page, the polymer side will determine what view to show and what REST calls + # to make for data. + @app.route("/", defaults={"path": ""}) + @app.route("/") + def root(path): # pylint: disable=unused-variable + """Responds to all other non-static paths with index.html. + + Args: + path: Unused path. + + Returns: + The landing page html text. + """ + if (path == "index.js" or + path == "webcomponentsjs/webcomponents-lite.js"): + # Some vulcanizing methods bundle the javascript into a index.js file + # paired with index.html but leave two important webcomponents js files + # outside of the bundle. If requesting those special files, fetch them + # directly rather than from a /static sub-directory. + return send_from_directory(FLAGS.static_path, path) + # Everything else should redirect to the main landing page. Since we + # use a single page app, any initial url requests may include random + # paths (that don't start with /api or /static) which all should be + # served by the main landing page. + return send_from_directory(FLAGS.static_path, "index.html") + + # Run the server. + tf.logging.info("############# READY ##################") + options = { + "bind": ":8010", + "timeout": 600, + "workers": 4, + "reload": True, + "spew": True, + "worker_class": "gevent", + } + DebugFrontendApplication(app, options).run() + + +if __name__ == "__main__": + tf.app.run() diff --git a/tensor2tensor/insights/transformer_model.py b/tensor2tensor/insights/transformer_model.py new file mode 100644 index 000000000..f0b4ac097 --- /dev/null +++ b/tensor2tensor/insights/transformer_model.py @@ -0,0 +1,306 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A QueryProcessor using the Transformer framework.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from collections import deque + +import glob +import os +import shutil +import time + +import numpy as np + +from six.moves import range +from tensor2tensor.bin import t2t_trainer +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.insights import graph +from tensor2tensor.insights import query_processor +from tensor2tensor.utils import decoding +from tensor2tensor.utils import trainer_lib +from tensor2tensor.utils import usr_dir + +import tensorflow.compat.v1 as tf +from tensorflow.python import debug as tfdbg + +flags = tf.flags +FLAGS = flags.FLAGS + + +def topk_watch_fn(feeds, fetches): + """TFDBG watch function for transformer beam search nodes. + + Args: + feeds: Unused. Required by tfdbg. + fetches: Unused. Required by tfdbg. + + Returns: + a WatchOptions instance that will capture all beam search ops. + """ + del fetches, feeds + return tfdbg.WatchOptions( + node_name_regex_whitelist= + ".*grow_(finished|alive)_(topk_scores|topk_seq).*", + debug_ops=["DebugIdentity"]) + + +def seq_filter(datum, tensor): + """TFDBG data directory filter for capturing topk_seq operation dumps. + + Args: + datum: A datum to filter by node_name. + tensor: Unused. Required by tfdbg + + Returns: + a true when datum should be returned. + """ + del tensor + return "topk_seq" in datum.node_name + + +def scores_filter(datum, tensor): + """TFDBG data directory filter for capturing topk_scores operation dumps. + + Args: + datum: A datum to filter by node_name. + tensor: Unused. Required by tfdbg + + Returns: + a true when datum should be returned. + """ + del tensor + return "topk_scores" in datum.node_name + + +def sequence_key(sequence): + """Returns a key for mapping sequence paths to graph vertices.""" + return ":".join([str(s) for s in sequence]) + + +class TransformerModel(query_processor.QueryProcessor): + """A QueryProcessor using a trained Transformer model. + + This processor supports the following visualizations: + - processing: Basic source and target text processing + - graph: A graph of the beam search process. + """ + + def __init__(self, processor_configuration): + """Creates the Transformer estimator. + + Args: + processor_configuration: A ProcessorConfiguration protobuffer with the + transformer fields populated. + """ + # Do the pre-setup tensor2tensor requires for flags and configurations. + transformer_config = processor_configuration["transformer"] + FLAGS.output_dir = transformer_config["model_dir"] + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + data_dir = os.path.expanduser(transformer_config["data_dir"]) + + # Create the basic hyper parameters. + self.hparams = trainer_lib.create_hparams( + transformer_config["hparams_set"], + transformer_config["hparams"], + data_dir=data_dir, + problem_name=transformer_config["problem"]) + + decode_hp = decoding.decode_hparams() + decode_hp.add_hparam("shards", 1) + decode_hp.add_hparam("shard_id", 0) + + # Create the estimator and final hyper parameters. + self.estimator = trainer_lib.create_estimator( + transformer_config["model"], + self.hparams, + t2t_trainer.create_run_config(self.hparams), + decode_hparams=decode_hp, use_tpu=False) + + # Fetch the vocabulary and other helpful variables for decoding. + self.source_vocab = self.hparams.problem_hparams.vocabulary["inputs"] + self.targets_vocab = self.hparams.problem_hparams.vocabulary["targets"] + self.const_array_size = 10000 + + # Prepare the Transformer's debug data directory. + run_dirs = sorted(glob.glob(os.path.join("/tmp/t2t_server_dump", "run_*"))) + for run_dir in run_dirs: + shutil.rmtree(run_dir) + + def process(self, query): + """Returns the visualizations for query. + + Args: + query: The query to process. + + Returns: + A dictionary of results with processing and graph visualizations. + """ + tf.logging.info("Processing new query [%s]" %query) + + # Create the new TFDBG hook directory. + hook_dir = "/tmp/t2t_server_dump/request_%d" %int(time.time()) + os.makedirs(hook_dir) + hooks = [tfdbg.DumpingDebugHook(hook_dir, watch_fn=topk_watch_fn)] + + # TODO(kstevens): This is extremely hacky and slow for responding to + # queries. Figure out a reasonable way to pre-load the model weights before + # forking and run queries through the estimator quickly. + def server_input_fn(): + """Generator that returns just the current query.""" + for _ in range(1): + input_ids = self.source_vocab.encode(query) + input_ids.append(text_encoder.EOS_ID) + x = [1, 100, len(input_ids)] + input_ids + x += [0] * (self.const_array_size - len(x)) + d = { + "inputs": np.array(x).astype(np.int32), + } + yield d + + def input_fn(): + """Generator that returns just the current query.""" + gen_fn = decoding.make_input_fn_from_generator(server_input_fn()) + example = gen_fn() + # TODO(kstevens): Make this method public + # pylint: disable=protected-access + return decoding._interactive_input_tensor_to_features_dict( + example, self.hparams) + + # Make the prediction for the current query. + result_iter = self.estimator.predict(input_fn, hooks=hooks) + result = None + for result in result_iter: + break + + # Extract the beam search information by reading the dumped TFDBG event + # tensors. We first read and record the per step beam sequences then record + # the beam scores. Afterwards we align the two sets of values to create the + # full graph vertices and edges. + decoding_graph = graph.Graph() + run_dirs = sorted(glob.glob(os.path.join(hook_dir, "run_*"))) + for run_dir in run_dirs: + # Record the different completed and active beam sequence ids. + alive_sequences = deque() + finished_sequences = deque() + + # Make the root vertex since it always needs to exist. + decoding_graph.get_vertex(sequence_key([0])) + + # Create the initial vertices and edges for the active and finished + # sequences. We uniquely define each vertex using it's full sequence path + # as a string to ensure there's no collisions when the same step has two + # instances of an output id. + dump_dir = tfdbg.DebugDumpDir(run_dir, validate=False) + seq_datums = dump_dir.find(predicate=seq_filter) + for seq_datum in seq_datums: + sequences = np.array(seq_datum.get_tensor()).astype(int)[0] + if "alive" in seq_datum.node_name: + alive_sequences.append(sequences) + if "finished" in seq_datum.node_name: + finished_sequences.append(sequences) + + for sequence in sequences: + pieces = self.targets_vocab.decode_list(sequence) + index = sequence[-1] + if index == 0: + continue + + parent = decoding_graph.get_vertex(sequence_key(sequence[:-1])) + current = decoding_graph.get_vertex(sequence_key(sequence)) + + edge = decoding_graph.add_edge(parent, current) + edge.data["label"] = pieces[-1] + edge.data["label_id"] = index + # Coerce the type to be a python bool. Numpy bools can't be easily + # converted to JSON. + edge.data["completed"] = bool(index == 1) + + # Examine the score results and store the scores with the associated edges + # in the graph. We fetch the vertices (and relevant edges) by looking + # into the saved beam sequences stored above. + score_datums = dump_dir.find(predicate=scores_filter) + for score_datum in score_datums: + if "alive" in score_datum.node_name: + sequences = alive_sequences.popleft() + + if "finished" in score_datum.node_name: + sequences = finished_sequences.popleft() + + scores = np.array(score_datum.get_tensor()).astype(float)[0] + for i, score in enumerate(scores): + sequence = sequences[i] + if sequence[-1] == 0: + continue + + vertex = decoding_graph.get_vertex(sequence_key(sequence)) + edge = decoding_graph.edges[vertex.in_edges[0]] + edge.data["score"] = score + edge.data["log_probability"] = score + edge.data["total_log_probability"] = score + + # Delete the hook dir to save disk space + shutil.rmtree(hook_dir) + + # Create the graph visualization data structure. + graph_vis = { + "visualization_name": "graph", + "title": "Graph", + "name": "graph", + "search_graph": decoding_graph.to_dict(), + } + + # Create the processing visualization data structure. + # TODO(kstevens): Make this method public + # pylint: disable=protected-access + output_ids = decoding._save_until_eos(result["outputs"].flatten(), False) + output_pieces = self.targets_vocab.decode_list(output_ids) + output_token = [{"text": piece} for piece in output_pieces] + output = self.targets_vocab.decode(output_ids) + + source_steps = [{ + "step_name": "Initial", + "segment": [{ + "text": query + }], + }] + + target_steps = [{ + "step_name": "Initial", + "segment": output_token, + }, { + "step_name": "Final", + "segment": [{ + "text": output + }], + }] + + processing_vis = { + "visualization_name": "processing", + "title": "Processing", + "name": "processing", + "query_processing": { + "source_processing": source_steps, + "target_processing": target_steps, + }, + } + + return { + "result": [processing_vis, graph_vis], + } diff --git a/tensor2tensor/layers/__init__.py b/tensor2tensor/layers/__init__.py new file mode 100644 index 000000000..ff174dd63 --- /dev/null +++ b/tensor2tensor/layers/__init__.py @@ -0,0 +1,15 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tensor2tensor/layers/area_attention.py b/tensor2tensor/layers/area_attention.py new file mode 100644 index 000000000..88ced00f2 --- /dev/null +++ b/tensor2tensor/layers/area_attention.py @@ -0,0 +1,433 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for area attention.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from six.moves import range # pylint: disable=redefined-builtin +from tensor2tensor.layers import common_layers +import tensorflow.compat.v1 as tf + + +def lengths_to_area_mask(feature_length, length, max_area_size): + """Generates a non-padding mask for areas based on lengths. + + Args: + feature_length: a tensor of [batch_size] + length: the length of the batch + max_area_size: the maximum area size considered + Returns: + mask: a tensor in shape of [batch_size, num_areas] + """ + + paddings = tf.cast(tf.expand_dims( + tf.logical_not( + tf.sequence_mask(feature_length, maxlen=length)), 2), tf.float32) + _, _, area_sum, _, _ = compute_area_features(paddings, + max_area_width=max_area_size) + mask = tf.squeeze(tf.logical_not(tf.cast(area_sum, tf.bool)), [2]) + return mask + + +def _pool_one_shape(features_2d, area_width, area_height, batch_size, + width, height, depth, fn=tf.reduce_max, name=None): + """Pools for an area in features_2d. + + Args: + features_2d: a Tensor in a shape of [batch_size, height, width, depth]. + area_width: the max width allowed for an area. + area_height: the max height allowed for an area. + batch_size: the batch size. + width: the width of the memory. + height: the height of the memory. + depth: the depth of the features. + fn: the TF function for the pooling. + name: the op name. + Returns: + pool_tensor: A Tensor of shape [batch_size, num_areas, depth] + """ + with tf.name_scope(name, default_name="pool_one_shape"): + images = [] + for y_shift in range(area_height): + image_height = tf.maximum(height - area_height + 1 + y_shift, 0) + for x_shift in range(area_width): + image_width = tf.maximum(width - area_width + 1 + x_shift, 0) + area = features_2d[:, y_shift:image_height, x_shift:image_width, :] + flatten_area = tf.reshape(area, [batch_size, -1, depth, 1]) + images.append(flatten_area) + image_tensor = tf.concat(images, axis=3) + max_tensor = fn(image_tensor, axis=3) + return max_tensor + + +def basic_pool(features, max_area_width, max_area_height=1, height=1, + fn=tf.reduce_max, name=None): + """Pools for each area based on a given pooling function (fn). + + Args: + features: a Tensor in a shape of [batch_size, height * width, depth]. + max_area_width: the max width allowed for an area. + max_area_height: the max height allowed for an area. + height: the height of the image. + fn: the TF function for the pooling. + name: the namescope. + Returns: + pool_results: A Tensor of shape [batch_size, num_areas, depth] + area_heights: A Tensor of shape [batch_size, num_areas, 1] + area_widths: A Tensor of shape [batch_size, num_areas, 1] + """ + with tf.name_scope(name, default_name="basic_pool"): + feature_shape = common_layers.shape_list(features) + batch_size = feature_shape[0] + length = feature_shape[-2] + depth = feature_shape[-1] + width = length // height + features_2d = tf.reshape(features, [batch_size, height, width, depth]) + height_list = [] + width_list = [] + pool_list = [] + size_tensor = tf.ones_like(features_2d[:, :, :, 0], dtype=tf.int32) + for area_height in range(max_area_height): + for area_width in range(max_area_width): + pool_tensor = _pool_one_shape(features_2d, + area_width=area_width + 1, + area_height=area_height + 1, + batch_size=batch_size, + width=width, + height=height, + depth=depth, + fn=fn) + pool_list.append( + tf.reshape(pool_tensor, [batch_size, -1, depth])) + height_list.append( + tf.reshape( + size_tensor[:, area_height:, area_width:] *\ + (area_height + 1), [batch_size, -1])) + width_list.append( + tf.reshape( + size_tensor[:, area_height:, area_width:] *\ + (area_width + 1), [batch_size, -1])) + pool_results = tf.concat(pool_list, axis=1) + area_heights = tf.expand_dims(tf.concat(height_list, axis=1), 2) + area_widths = tf.expand_dims(tf.concat(width_list, axis=1), 2) + return pool_results, area_heights, area_widths + + +def _compute_sum_image(features, max_area_width, max_area_height=1, height=1, + name=None): + """Computes area sums for features. + + Args: + features: a Tensor in a shape of [batch_size, height * width, depth]. + max_area_width: the max width allowed for an area. + max_area_height: the max height allowed for an area. + height: the height of the image. + name: the namescope. + Returns: + sum_image: A Tensor of shape [batch_size, num_areas, depth] + area_heights: A Tensor of shape [batch_size, num_areas, 1] + area_widths: A Tensor of shape [batch_size, num_areas, 1] + """ + with tf.name_scope(name, default_name="compute_sum_image"): + feature_shape = common_layers.shape_list(features) + batch_size = feature_shape[0] + length = feature_shape[-2] + depth = feature_shape[-1] + width = length // height + features_2d = tf.reshape(features, [batch_size, height, width, depth]) + width_cum = tf.cumsum(features_2d, axis=-2, name="compute_integral_h") + integral_image = tf.cumsum(width_cum, axis=-3, name="compute_integral_v") + padded_image = tf.pad( + integral_image, [[0, 0], [1, 0], [1, 0], [0, 0]], constant_values=0) + height_list = [] + width_list = [] + dst_images = [] + src_images_diag = [] + src_images_h = [] + src_images_v = [] + size_tensor = tf.ones_like(padded_image[:, :, :, 0], + dtype=tf.int32) + for area_height in range(max_area_height): + for area_width in range(max_area_width): + dst_images.append( + tf.reshape( + padded_image[:, area_height + 1:, area_width + 1:, :], + [batch_size, -1, depth])) + src_images_diag.append( + tf.reshape( + padded_image[:, :-area_height - 1, :-area_width - 1, :], + [batch_size, -1, depth])) + src_images_h.append( + tf.reshape( + padded_image[:, area_height + 1:, :-area_width - 1, :], + [batch_size, -1, depth])) + src_images_v.append( + tf.reshape( + padded_image[:, :-area_height - 1, area_width + 1:, :], + [batch_size, -1, depth])) + height_list.append( + tf.reshape( + size_tensor[:, area_height + 1:, area_width + 1:] *\ + (area_height + 1), [batch_size, -1])) + width_list.append( + tf.reshape( + size_tensor[:, area_height + 1:, area_width + 1:] *\ + (area_width + 1), [batch_size, -1])) + sum_image = tf.subtract( + tf.concat(dst_images, axis=1) + tf.concat(src_images_diag, axis=1), + tf.concat(src_images_v, axis=1) + tf.concat(src_images_h, axis=1)) + area_heights = tf.expand_dims(tf.concat(height_list, axis=1), 2) + area_widths = tf.expand_dims(tf.concat(width_list, axis=1), 2) + return sum_image, area_heights, area_widths + + +def compute_area_features(features, max_area_width, max_area_height=1, height=1, + epsilon=1e-6): + """Computes features for each area. + + Args: + features: a Tensor in a shape of [batch_size, height * width, depth]. + max_area_width: the max width allowed for an area. + max_area_height: the max height allowed for an area. + height: the height of the image. + epsilon: the epsilon added to the variance for computing standard deviation. + Returns: + area_mean: A Tensor of shape [batch_size, num_areas, depth] + area_std: A Tensor of shape [batch_size, num_areas, depth] + area_sum: A Tensor of shape [batch_size, num_areas, depth] + area_heights: A Tensor of shape [batch_size, num_areas, 1] + area_widths: A Tensor of shape [batch_size, num_areas, 1] + """ + with tf.name_scope("compute_area_features"): + tf.logging.info("area_attention compute_area_features: %d x %d", + max_area_height, max_area_width) + area_sum, area_heights, area_widths = _compute_sum_image( + features, max_area_width=max_area_width, + max_area_height=max_area_height, height=height) + area_squared_sum, _, _ = _compute_sum_image( + tf.pow(features, 2), max_area_width=max_area_width, + max_area_height=max_area_height, height=height) + sizes = tf.multiply(area_heights, area_widths) + float_area_sizes = tf.to_float(sizes) + area_mean = tf.div(area_sum, float_area_sizes) + s2_n = tf.div(area_squared_sum, float_area_sizes) + area_variance = tf.subtract(s2_n, tf.pow(area_mean, 2)) + area_std = tf.sqrt(tf.abs(area_variance) + epsilon) + return area_mean, area_std, area_sum, area_heights, area_widths + + +def compute_area_key(features, max_area_width, max_area_height=1, height=1, + mode="mean", training=True, name=None): + """Computes the key for each area. + + Args: + features: a Tensor in a shape of [batch_size, height * width, depth]. + max_area_width: the max width allowed for an area. + max_area_height: the max height allowed for an area. + height: the height of the image. + mode: whether to combine different area features or only use + the vector mean of each area, which can be "mean", "concat", "sum", + "sample_concat", and "sample_sum". + training: indicating if it is in the training mode. + name: the name for setting the variable scope. + Returns: + area_key: a Tensor in the shape of [batch_size, num_areas, depth] + """ + + tf.logging.info("area_attention mode=%s", mode) + area_mean, area_std, _, area_heights, area_widths =\ + compute_area_features(features, max_area_width=max_area_width, + max_area_height=max_area_height, height=height) + if mode == "mean": + return area_mean + elif mode == "max": + area_max, _, _ = basic_pool(features, max_area_width=max_area_width, + max_area_height=max_area_height, height=height) + return area_max + elif mode == "sample": + if training: + area_mean += (area_std * tf.random_normal(tf.shape(area_std))) + return area_mean + with tf.variable_scope( + name, default_name="combine_area_features", + values=[area_mean, area_std, area_heights, area_widths]): + depth = common_layers.shape_list(area_mean)[-1] + height_embed = tf.nn.embedding_lookup( + params=tf.get_variable("area_height_emb", + [max_area_height, depth // 2]), + ids=area_heights[:, :, 0] - 1) + width_embed = tf.nn.embedding_lookup( + params=tf.get_variable("area_width_emb", + [max_area_width, depth // 2]), + ids=area_widths[:, :, 0] - 1) + size_embed = tf.concat([height_embed, width_embed], -1) + if mode == "concat": + feature_concat = tf.concat([area_mean, area_std, size_embed], -1) + elif mode == "max_concat": + area_max, _, _ = basic_pool(features, max_area_width=max_area_width, + max_area_height=max_area_height, + height=height) + feature_concat = tf.concat([area_max, size_embed], -1) + elif mode == "sum": + feature_concat = size_embed + area_mean + area_std + elif mode == "sample_concat": + if training: + area_mean += (area_std * tf.random_normal(tf.shape(area_std))) + feature_concat = tf.concat([area_mean, size_embed], -1) + elif mode == "sample_sum": + if training: + area_mean += (area_std * tf.random_normal(tf.shape(area_std))) + feature_concat = area_mean + size_embed + else: + raise ValueError("Unsupported area key mode=%s" % mode) + feature_hidden = tf.layers.dense(inputs=feature_concat, + units=depth, + activation=tf.nn.relu) + area_key = tf.layers.dense(feature_hidden, units=depth) + return area_key + + +def dot_product_area_attention(q, + k, + v, + bias, + dropout_rate=0.0, + image_shapes=None, + name=None, + attention_image_summary=None, + save_weights_to=None, + dropout_broadcast_dims=None, + max_area_width=1, + max_area_height=1, + memory_height=1, + area_key_mode="mean", + area_value_mode="sum", + top_k_areas=0, + area_temperature=1.0, + training=True): + """Dot-product area attention. + + Args: + q: Tensor with shape [..., length_q, depth_k]. + k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must + match with q. + v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must + match with q. + bias: bias Tensor (see attention_bias()) + dropout_rate: a float. + image_shapes: optional tuple of integer scalars. + see comments for attention_image_summary() + name: an optional string + attention_image_summary: the callback for making image summary of attention. + save_weights_to: an optional dictionary to capture attention weights + for visualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + dropout_broadcast_dims: an optional list of integers less than rank of q. + Specifies in which dimensions to broadcast the dropout decisions. + max_area_width: the max width allowed for an area. + max_area_height: the max height allowed for an area. + memory_height: the height of the memory. + area_key_mode: the mode for computing area keys, which can be "mean", + "concat", "sum", "sample_concat", and "sample_sum". + area_value_mode: the mode for computing area values, which can be either + "mean", or "sum". + top_k_areas: Use the top key areas for attention. + area_temperature: the temperature for attention softmax. + training: indicating if it is in the training mode. + Returns: + Tensor with shape [..., length_q, depth_v]. + """ + + tf.logging.info("dot_product_area_attention: " + "area_h=%d, area_w=%d, mem_h=%d, " + "area_key_mode=%s, area_value_mode=%s, " + "area_temperature=%f", + max_area_height, max_area_width, memory_height, + area_key_mode, area_value_mode, + area_temperature) + with tf.variable_scope( + name, default_name="dot_product_area_attention", + values=[q, k, v]) as scope: + mem_shape = common_layers.shape_list(k) + batch_size = mem_shape[0] + head_size = mem_shape[1] + length = mem_shape[2] + depth = mem_shape[3] + k_area = compute_area_key( + tf.reshape(k, [-1, length, depth]), + max_area_width=max_area_width, + max_area_height=max_area_height, + height=memory_height, + mode=area_key_mode, + training=training) + if area_value_mode == "mean": + v_area, _, _, _, _ = compute_area_features( + tf.reshape(v, [-1, length, depth]), max_area_width=max_area_width, + max_area_height=max_area_height, height=memory_height) + elif area_value_mode == "max": + v_area, _, _ = basic_pool(tf.reshape(v, [-1, length, depth]), + max_area_width=max_area_width, + max_area_height=max_area_height, + height=memory_height, + fn=tf.reduce_max) + elif area_value_mode == "sum": + _, _, v_area, _, _ = compute_area_features( + tf.reshape(v, [-1, length, depth]), max_area_width=max_area_width, + max_area_height=max_area_height, height=memory_height) + else: + raise ValueError("Unsupported area value mode=%s" % area_value_mode) + k = tf.reshape(k_area, [batch_size, head_size, -1, depth]) + v = tf.reshape(v_area, [batch_size, head_size, -1, depth]) + logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv] + if bias is not None: + bias = common_layers.cast_like(bias, logits) + with tf.name_scope("compute_area_att_bias", values=[bias]): + bias_shape = common_layers.shape_list(bias) + mem_length = bias_shape[-1] + bias_values = tf.reshape( + tf.to_float(tf.less(bias, -1)), [-1, mem_length, 1]) + _, _, padding_sum, _, _ = compute_area_features( + bias_values, max_area_width=max_area_width, + max_area_height=max_area_height, height=memory_height) + bias = tf.where( + tf.cast(tf.to_int32(padding_sum), tf.bool), + tf.fill(tf.shape(padding_sum), -np.inf), + tf.zeros_like(padding_sum, dtype=tf.float32)) + bias = tf.reshape(bias, + [bias_shape[0], bias_shape[1], + bias_shape[2], -1]) + logits += bias + logits = logits / area_temperature + weights = tf.nn.softmax(logits, name="attention_weights") + if top_k_areas > 0: + tf.logging.info("area_attention top_k_areas=%d", top_k_areas) + top_k = tf.minimum(common_layers.shape_list(weights)[-1], top_k_areas) + top_weights, _ = tf.nn.top_k(weights, k=top_k) + min_values = tf.reduce_min(top_weights, -1, keepdims=True) + weights = tf.where(tf.greater_equal(weights, min_values), + weights, tf.zeros_like(weights)) + weights = tf.div(weights, tf.reduce_sum(weights, -1, keepdims=True)) + if save_weights_to is not None: + save_weights_to[scope.name] = weights + save_weights_to[scope.name + "/logits"] = logits + # Drop out attention links for each head. + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + if common_layers.should_generate_summaries() and attention_image_summary: + attention_image_summary(weights, image_shapes) + return tf.matmul(weights, v) diff --git a/tensor2tensor/layers/area_attention_test.py b/tensor2tensor/layers/area_attention_test.py new file mode 100644 index 000000000..dfc13eb73 --- /dev/null +++ b/tensor2tensor/layers/area_attention_test.py @@ -0,0 +1,276 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for area attention.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np +from tensor2tensor.layers import area_attention +import tensorflow.compat.v1 as tf + + +class AreaAttentionTest(parameterized.TestCase, tf.test.TestCase): + + def testComputeAreaFeatures1D(self): + features = tf.constant([[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]], + [[1.1, 2.1], [3.1, 4.1], [5.1, 6.1], [7.1, 8.1], + [9.1, 10.1]]], + dtype=tf.float32) + area_mean, area_std, area_sum, area_height, area_widths = ( + area_attention.compute_area_features(features, max_area_width=3, + epsilon=0.)) + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + res1, res2, res3, res4, res5 = session.run([area_mean, area_std, area_sum, + area_height, area_widths]) + self.assertAllClose(((((1, 2), (3, 4), (5, 6), (7, 8), (9, 10), + (2, 3), (4, 5), (6, 7), (8, 9), + (3, 4), (5, 6), (7, 8)), + ((1.1, 2.1), (3.1, 4.1), (5.1, 6.1), (7.1, 8.1), + (9.1, 10.1), + (2.1, 3.1), (4.1, 5.1), (6.1, 7.1), (8.1, 9.1), + (3.1, 4.1), (5.1, 6.1), (7.1, 8.1)))), + res1, + msg="mean_1d") + expected_std = np.array([[[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], + [1, 1], [1, 1], [1, 1], [1, 1], + [1.63299, 1.63299], [1.63299, 1.63299], + [1.63299, 1.63299]], + [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], + [1, 1], [1, 1], [1, 1], [1, 1], + [1.63299, 1.63299], [1.63299, 1.63299], + [1.63299, 1.63299]]]) + self.assertAllClose(expected_std, res2, atol=1e-2, msg="std_1d") + self.assertAllClose([[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], + [4, 6], [8, 10], [12, 14], [16, 18], + [9, 12], [15, 18], [21, 24]], + [[1.1, 2.1], [3.1, 4.1], [5.1, 6.1], [7.1, 8.1], + [9.1, 10.1], + [4.2, 6.2], [8.2, 10.2], [12.2, 14.2], [16.2, 18.2], + [9.3, 12.3], [15.3, 18.3], [21.3, 24.3]]], + res3, + msg="sum_1d") + self.assertAllEqual([[[1], [1], [1], [1], [1], + [1], [1], [1], [1], + [1], [1], [1]], + [[1], [1], [1], [1], [1], + [1], [1], [1], [1], + [1], [1], [1]]], + res4, + msg="height_1d") + self.assertAllEqual([[[1], [1], [1], [1], [1], + [2], [2], [2], [2], + [3], [3], [3]], + [[1], [1], [1], [1], [1], + [2], [2], [2], [2], + [3], [3], [3]]], + res5, + msg="width_1d") + + def testComputeAreaFeatures2D(self): + features = tf.constant([[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]], + [[1.1, 2.1], [3.1, 4.1], [5.1, 6.1], [7.1, 8.1], + [9.1, 10.1], [11.1, 12.1]]], + dtype=tf.float32) + area_mean, area_std, area_sum, area_height, area_widths = ( + area_attention.compute_area_features(features, max_area_width=3, + max_area_height=2, + height=2, epsilon=0.)) + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + res1, _, res3, res4, res5 = session.run([area_mean, area_std, area_sum, + area_height, area_widths]) + expected_means = [[[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], + [2, 3], [4, 5], [8, 9], [10, 11], + [3, 4], [9, 10], + [4, 5], [6, 7], [8, 9], + [5, 6], [7, 8], + [6, 7]], + [[1.1, 2.1], [3.1, 4.1], [5.1, 6.1], [7.1, 8.1], + [9.1, 10.1], [11.1, 12.1], + [2.1, 3.1], [4.1, 5.1], [8.1, 9.1], [10.1, 11.1], + [3.1, 4.1], [9.1, 10.1], + [4.1, 5.1], [6.1, 7.1], [8.1, 9.1], + [5.1, 6.1], [7.1, 8.1], + [6.1, 7.1]]] + self.assertAllClose(expected_means, res1, msg="mean_1d") + expected_heights = [[[1], [1], [1], [1], [1], [1], + # 1x2 + [1], [1], [1], [1], + # 1x3 + [1], [1], + # 2x1 + [2], [2], [2], + # 2x2 + [2], [2], + # 2x3 + [2]], + [[1], [1], [1], [1], [1], [1], + # 1x2 + [1], [1], [1], [1], + # 1x3 + [1], [1], + # 2x1 + [2], [2], [2], + # 2x2 + [2], [2], + # 2x3 + [2]]] + self.assertAllEqual(expected_heights, res4, msg="height_1d") + expected_widths = [[[1], [1], [1], [1], [1], [1], + # 1x2 + [2], [2], [2], [2], + # 1x3 + [3], [3], + # 2x1 + [1], [1], [1], + # 2x2 + [2], [2], + # 2x3 + [3]], + [[1], [1], [1], [1], [1], [1], + # 1x2 + [2], [2], [2], [2], + # 1x3 + [3], [3], + # 2x1 + [1], [1], [1], + # 2x2 + [2], [2], + # 2x3 + [3]]] + self.assertAllEqual(expected_widths, res5, msg="width_1d") + sizes = np.multiply(np.array(expected_heights), np.array(expected_widths)) + expected_sums = np.multiply(np.array(expected_means), sizes) + self.assertAllClose(expected_sums, res3, msg="sum_1d") + + def testAreaMean(self): + batch_size = 256 + feature_len = 100 + memory_height = 10 + heads = 2 + key_len = 2 + depth = 128 + max_area_height = 3 + max_area_width = 3 + queries = tf.random_uniform([batch_size, heads, key_len, depth], + minval=-10.0, maxval=10.0) + features = tf.random_uniform([batch_size, heads, feature_len, depth], + minval=-10.0, maxval=10.0) + target_values = tf.random_uniform([batch_size, heads, key_len, depth], + minval=-0.2, maxval=0.2) + keys = tf.layers.dense(features, units=depth) + values = tf.layers.dense(features, units=depth) + mean_attention = area_attention.dot_product_area_attention( + queries, keys, values, + bias=None, + area_key_mode="mean", + name="mean_key", + max_area_width=max_area_width, + max_area_height=max_area_height, + memory_height=memory_height) + mean_gradients = tf.gradients( + tf.reduce_mean( + tf.pow(target_values - mean_attention, 2)), features) + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + result = session.run([mean_gradients]) + self.assertFalse(np.any(np.logical_not(np.isfinite(result)))) + + def test2DAreaMax(self): + batch_size = 256 + feature_len = 100 + memory_height = 10 + heads = 2 + key_len = 6 + depth = 128 + max_area_height = 3 + max_area_width = 3 + queries = tf.random_uniform([batch_size, heads, key_len, depth], + minval=-10.0, maxval=10.0) + features = tf.random_uniform([batch_size, heads, feature_len, depth], + minval=-10.0, maxval=10.0) + target_values = tf.random_uniform([batch_size, heads, key_len, depth], + minval=-0.2, maxval=0.2) + keys = tf.layers.dense(features, units=depth) + values = tf.layers.dense(features, units=depth) + max_attention = area_attention.dot_product_area_attention( + queries, keys, values, + bias=None, + area_key_mode="max", + area_value_mode="max", + name="max_key", + max_area_width=max_area_width, + max_area_height=max_area_height, + memory_height=memory_height) + max_gradients = tf.gradients(tf.reduce_mean( + tf.pow(target_values - max_attention, 2)), features) + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + result1, result2 = session.run([max_gradients, max_attention]) + self.assertFalse(np.any(np.logical_not(np.isfinite(result1)))) + self.assertFalse(np.any(np.logical_not(np.isfinite(result2)))) + + def test1DAreaMax(self): + batch_size = 256 + feature_len = 100 + heads = 2 + key_len = 15 + depth = 128 + max_area_width = 3 + queries = tf.random_uniform([batch_size, heads, key_len, depth], + minval=-10.0, maxval=10.0) + features = tf.random_uniform([batch_size, heads, feature_len, depth], + minval=-10.0, maxval=10.0) + feature_length = tf.constant( + np.concatenate( + (np.random.randint(max_area_width, feature_len, [batch_size - 1]), + np.array([feature_len])), axis=0), tf.int32) + base_mask = tf.expand_dims(tf.sequence_mask(feature_length), 1) + mask = tf.expand_dims(base_mask, 3) + mask = tf.tile(mask, [1, heads, 1, depth]) + features = tf.where(mask, features, tf.zeros_like(features)) + # [batch, 1, 1, memory_length] + bias_mask = tf.expand_dims(base_mask, 1) + bias = tf.where( + bias_mask, + tf.zeros_like(bias_mask, tf.float32), + tf.ones_like(bias_mask, tf.float32) * -1e9) + target_values = tf.random_uniform([batch_size, heads, key_len, depth], + minval=-0.2, maxval=0.2) + keys = tf.layers.dense(features, units=depth) + values = tf.layers.dense(features, units=depth) + max_attention = area_attention.dot_product_area_attention( + queries, keys, values, + bias=bias, + area_key_mode="max", + area_value_mode="max", + name="max_key", + max_area_width=max_area_width) + max_gradients = tf.gradients( + tf.reduce_mean( + tf.pow(target_values - max_attention, 2)), features) + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + result1, result2 = session.run([max_gradients, max_attention]) + self.assertFalse(np.any(np.logical_not(np.isfinite(result1)))) + self.assertFalse(np.any(np.logical_not(np.isfinite(result2)))) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/layers/common_attention.py b/tensor2tensor/layers/common_attention.py new file mode 100644 index 000000000..11ce57fcf --- /dev/null +++ b/tensor2tensor/layers/common_attention.py @@ -0,0 +1,6233 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for attention.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import functools +import itertools +import math +import operator + +import numpy as np + +from six.moves import range # pylint: disable=redefined-builtin +from six.moves import zip # pylint: disable=redefined-builtin + +from tensor2tensor.layers import area_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import contrib +from tensor2tensor.utils import expert_utils + +import tensorflow.compat.v1 as tf +import tensorflow_probability as tfp + +# pylint: disable=g-direct-tensorflow-import +from tensorflow.python.framework import function +from tensorflow.python.ops import inplace_ops +# pylint: enable=g-direct-tensorflow-import + + +# TODO(lukaszkaiser): remove this function when not needed any more. +def layers(): + return common_layers.layers() + + +def large_compatible_negative(tensor_type): + """Large negative number as Tensor. + + This function is necessary because the standard value for epsilon + in this module (-1e9) cannot be represented using tf.float16 + + Args: + tensor_type: a dtype to determine the type. + + Returns: + a large negative number. + """ + if tensor_type == tf.float16: + return tf.float16.min + return -1e9 + + +def mixed_precision_is_enabled( + activation_dtype=None, weight_dtype=None, hparams=None): + assert not (hparams and (activation_dtype or weight_dtype)), ( + "Provide only hparams or activation_dtype and weight_dtype") + if (hparams and hasattr(hparams, "activation_dtype") and + hasattr(hparams, "weight_dtype")): + activation_dtype = hparams.activation_dtype + weight_dtype = hparams.weight_dtype + return activation_dtype == tf.float16 and weight_dtype == tf.float32 + + +def maybe_upcast(logits, + activation_dtype=None, weight_dtype=None, hparams=None): + if mixed_precision_is_enabled(activation_dtype, weight_dtype, hparams): + return tf.cast(logits, tf.float32) + return logits + + +# Struct containing the sequences ids and order on a batch (are send to the +# expert to allow them to compute the bias mask) +BatchInfo = collections.namedtuple("BatchInfo", "coordinates, order") + +_expert_count = 0 + + +def get_standardized_layers(hparams, dp=None): + """Get the common attention and feed-forward layers. + + The returned layer functions will have the following signature: + + y, extra_loss = fct(x) + + extra_loss is set to 0.0 if the layer doesn't have extra loss. + If dp is provided, the layers will be distributed within the devices. + If moe wants to be used, both dp and model need to be set. + + Args: + hparams (tf.HParams): the model hparameters + dp (expert_utils.Parallelism): A data parallelism object. If not given, + the dp calls are simply ignored. + + Returns: + dict[str:fct]: A dictionary containing the standardized functions + """ + + def partial(fct, *args, **kwargs): + """Same as functools.partial but with functools.wraps.""" + return functools.wraps(fct)(functools.partial(fct, *args, **kwargs)) + + def register_layer( + fct_in, + default_args=None, + default_kwargs=None, + use_dp=True, + recompute_grad=False, + ): + """Turn a function into its standardized version. + + Args: + fct_in (fct): The function to register + default_args (list): The default parameters to add to the function. + default_kwargs (dict): The default parameters to add to the function. + Those arguments can be overwritten when calling the function. + use_dp (bool): Wrap the function call within a dataparallelism object if + dp is available. Some layers (like MOE) must be called without dp. + recompute_grad (bool): If True, recompute the function during the + backward pass to save memory + + Returns: + fct: the standardized layer function. + """ + # The kwargs given when calling the function overwrite the default ones + fct_in = partial(fct_in, *(default_args or []), **(default_kwargs or {})) + + @functools.wraps(fct_in) + def decorator(x, *args, **kwargs): + """Call the layer function.""" + fct = fct_in # For closure. Could use nonlocal with Python 3 + # Eventually create the memory optimized version of the function + if recompute_grad: + fct = partial(fct, **kwargs) # recompute_grad only accept args + fct = common_layers.recompute_grad(fct) + kwargs = {} + + # Eventually use dp (if given and not MoE) + if use_dp and dp is not None: + y = dp(fct, x, *args, **kwargs) + else: + y = fct(x, *args, **kwargs) + + # Eventually capture the extra loss + extra_loss = 0.0 + if isinstance(y, tuple): + y, extra_loss = y + + return y, extra_loss + + return decorator + + total_key_depth = hparams.attention_key_channels or hparams.hidden_size + total_value_depth = hparams.attention_value_channels or hparams.hidden_size + + # Attention layers: + + # === Multi-head full attention layer === + multihead_attention_fn = register_layer( + multihead_attention, + default_kwargs=dict( + memory_antecedent=None, # Self-attention by default + bias=None, + total_key_depth=total_key_depth, + total_value_depth=total_value_depth, + output_depth=hparams.hidden_size, + num_heads=hparams.num_heads, + dropout_rate=hparams.attention_dropout, + )) + + # === Memory efficient full-attention layer === + # Save memory by not storing the activations and + # recomputing them during the backward pass + memeff_attention_base_fn = register_layer( + multihead_attention, + default_kwargs=dict( + total_key_depth=total_key_depth, + total_value_depth=total_value_depth, + output_depth=hparams.hidden_size, + num_heads=hparams.num_heads, + dropout_rate=hparams.attention_dropout, + ), + recompute_grad=True, + ) + + def memeff_attention_fn(*args, **kwargs): + """Modify args/kwargs for compatibility with recompute_grad.""" + kwargs = kwargs.copy() + assert len(args) == 1 + x = args[0] + memory_antecedent = kwargs.pop("memory_antecedent", x) # Same as x if None + if kwargs.get("bias", None) is not None: # Case where bias has been set + args = (x, memory_antecedent, kwargs.pop("bias")) + else: + # Otherwise, only 2 args. This is necessary as recompute_grad does not + # support None values. + args = (x, memory_antecedent) + return memeff_attention_base_fn(*args, **kwargs) + + # === Local attention (unmasked) layer === + # Reuse same parameters as multihead_attention + # Don't mask the future + local_attention_fn = partial( + multihead_attention_fn, + block_length=hparams.attention_loc_block_length, + block_width=hparams.attention_loc_block_width, + attention_type="local_unmasked", + ) + + # === Local attention (masked) layer === + # Reuse same parameters as multihead_attention + # Only works for self attention. Always mask the future. + local_attention_masked_fn = partial( + multihead_attention_fn, + block_length=hparams.attention_loc_block_length, + attention_type="local_mask_right", + ) + + # === Masked memory-compressed multihead self attention layer === + # Only works for self attention. Always mask the future. + compressed_attention_masked_fn = register_layer( + multihead_self_attention_reduced, + default_kwargs=dict( + factor=hparams.attention_red_factor, + nonlinearity=hparams.attention_red_nonlinearity, + reduction_type=hparams.attention_red_type, + multihead_params=dict( + total_key_depth=total_key_depth, + total_value_depth=total_value_depth, + num_heads=hparams.num_heads, + dropout_rate=hparams.attention_dropout, + ), + ), + ) + + # === Unmasked memory-compressed multihead self attention layer === + # Only works for self attention. Never mask the future. Bias never added + compressed_attention_fn = partial( + compressed_attention_masked_fn, + add_mask=False, + ) + + # Feed-forwards layers: + + # === FC layer === + conv_hidden_relu = register_layer( + common_layers.conv_hidden_relu, + default_kwargs=dict( + hidden_size=hparams.filter_size, + output_size=hparams.hidden_size, + dropout=hparams.relu_dropout, + ), + ) + + # === Separable convolution layer === + # No mask applied + sep_conv_relu = partial( + conv_hidden_relu, + padding="SAME", + # Parameters copied from the transformer model, could add hparams + kernel_size=(3, 1), + second_kernel_size=(31, 1), + ) + + # === Separable convolution layer (masked version) === + # Mask the future + sep_conv_relu_masked = partial( + sep_conv_relu, + padding="LEFT", # Mask future for decoder + ) + + # Define all available layers + + cur_layers = dict( + # Attention layers: + a=multihead_attention_fn, # Multihead full attention + loc=local_attention_fn, # Local attention + locm=local_attention_masked_fn, # Local attention (masked) + red=compressed_attention_fn, # Memory-compressed attention + redm=compressed_attention_masked_fn, # Memory-compressed att (masked) + mem=memeff_attention_fn, # Memory efficient + # Feed-forward layers: + fc=conv_hidden_relu, # Fully connected + sep=sep_conv_relu, # Separable convolution (unmasked) + sepm=sep_conv_relu_masked, # Separable convolution (masked) + ) + return cur_layers + + +def add_standard_attention_hparams(hparams): + """Adds the hparams used by get_standardized_layers.""" + # All hyperparameters ending in "dropout" are automatically set to 0.0 + # when not in training mode. + + # hparams used and which should have been defined outside (in + # common_hparams): + # Global flags + # hparams.mode + # hparams.hidden_size + # Pre-post processing flags + # hparams.layer_preprocess_sequence + # hparams.layer_postprocess_sequence + # hparams.layer_prepostprocess_dropout + # hparams.norm_type + # hparams.norm_epsilon + # Mixture-of-Expert flags + # hparams.moe_hidden_sizes + # hparams.moe_num_experts + # hparams.moe_k + # hparams.moe_loss_coef + + # Attention layers flags + hparams.add_hparam("num_heads", 8) + hparams.add_hparam("attention_key_channels", 0) + hparams.add_hparam("attention_value_channels", 0) + hparams.add_hparam("attention_dropout", 0.0) + # Attention: Local + hparams.add_hparam("attention_loc_block_length", 256) + # Attention: Local (unmasked only): How much to look left. + hparams.add_hparam("attention_loc_block_width", 128) + # Attention: Memory-compressed + hparams.add_hparam("attention_red_factor", 3) + hparams.add_hparam("attention_red_type", "conv") + hparams.add_hparam("attention_red_nonlinearity", "none") + + # Fully connected layers flags + # To be more consistent, should use filter_size to also control the MOE + # size if moe_hidden_sizes not set. + hparams.add_hparam("filter_size", 2048) + hparams.add_hparam("relu_dropout", 0.0) + + return hparams + + +def encoder_decoder_attention_loss(expected_attention_logits, + actual_attentions, + loss_type="kl_divergence", + loss_multiplier=1.0): + """Computes encdec attention loss between expected and actual attentions. + + Args: + expected_attention_logits: Tensor storing the expected encoder-decoder + attention logits with shape [batch_size, target_length, input_length]. + actual_attentions: Dictionary with actual attention logits for different + attention types and hidden layers. + loss_type: type of the loss function. + loss_multiplier: multiplier for the attention loss. + + Returns: + KL_divergence loss between the actual and expected attention logits. + """ + + def combine_attentions(attention_list): + """Combine different layer attentions and then average over layers/heads.""" + # Stack all hidden layer attention tensors to get a tensor with shape + # [num_hidden_layers, batch_size, num_heads, target_length, input_length]. + attentions = tf.stack(attention_list) + # Reduce mean across all layers (axis=0) and all heads (axis=2) to get a + # tensor with shape [batch_size, target_length, input_length]. + return tf.reduce_mean(attentions, [0, 2]) + + def kl_divergence_loss(expected_logits, actual_logits): + p = tfp.distributions.Categorical(logits=expected_logits) + q = tfp.distributions.Categorical(logits=actual_logits) + return tfp.distributions.kl_divergence(p, q) + + def mse_loss(expected_logits, actual_weights): + expected_weights = tf.nn.softmax(expected_logits) + return tf.losses.mean_squared_error(expected_weights, actual_weights) + + # For each hidden layer, we have attention-logit and attention-weight tensors + # with shape [batch_size, num_heads, target_length, input_length]. + loss = 0.0 + if loss_type == "mse": + actual_encdec_attention_weights = [ + t for layer_key, t in actual_attentions.items() + if "encdec_attention" in layer_key and not layer_key.endswith("/logits") + ] + actual_attention_weights = combine_attentions( + actual_encdec_attention_weights) + loss = mse_loss(expected_attention_logits, actual_attention_weights) + else: + actual_encdec_attention_logits = [ + t for layer_key, t in actual_attentions.items() + if "encdec_attention" in layer_key and layer_key.endswith("/logits") + ] + actual_attention_logits = combine_attentions(actual_encdec_attention_logits) + loss = kl_divergence_loss(expected_attention_logits, + actual_attention_logits) + return loss * loss_multiplier + + +@expert_utils.add_name_scope() +def get_timing_signal_1d(length, + channels, + min_timescale=1.0, + max_timescale=1.0e4, + start_index=0): + """Gets a bunch of sinusoids of different frequencies. + + Each channel of the input Tensor is incremented by a sinusoid of a different + frequency and phase. + + This allows attention to learn to use absolute and relative positions. + Timing signals should be added to some precursors of both the query and the + memory inputs to attention. + + The use of relative position is possible because sin(x+y) and cos(x+y) can be + expressed in terms of y, sin(x) and cos(x). + + In particular, we use a geometric sequence of timescales starting with + min_timescale and ending with max_timescale. The number of different + timescales is equal to channels / 2. For each timescale, we + generate the two sinusoidal signals sin(timestep/timescale) and + cos(timestep/timescale). All of these sinusoids are concatenated in + the channels dimension. + + Args: + length: scalar, length of timing signal sequence. + channels: scalar, size of timing embeddings to create. The number of + different timescales is equal to channels / 2. + min_timescale: a float + max_timescale: a float + start_index: index of first position + + Returns: + a Tensor of timing signals [1, length, channels] + """ + position = tf.to_float(tf.range(length) + start_index) + num_timescales = channels // 2 + log_timescale_increment = ( + math.log(float(max_timescale) / float(min_timescale)) / + tf.maximum(tf.to_float(num_timescales) - 1, 1)) + inv_timescales = min_timescale * tf.exp( + tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) + scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0) + # Please note that this slightly differs from the published paper. + # See a discussion here: https://github.com/tensorflow/tensor2tensor/pull/177 + signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) + signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]]) + signal = tf.reshape(signal, [1, length, channels]) + return signal + + +@expert_utils.add_name_scope() +def add_timing_signal_1d(x, + min_timescale=1.0, + max_timescale=1.0e4, + start_index=0): + """Adds a bunch of sinusoids of different frequencies to a Tensor. + + Each channel of the input Tensor is incremented by a sinusoid of a different + frequency and phase. + + This allows attention to learn to use absolute and relative positions. + Timing signals should be added to some precursors of both the query and the + memory inputs to attention. + + The use of relative position is possible because sin(x+y) and cos(x+y) can be + expressed in terms of y, sin(x) and cos(x). + + In particular, we use a geometric sequence of timescales starting with + min_timescale and ending with max_timescale. The number of different + timescales is equal to channels / 2. For each timescale, we + generate the two sinusoidal signals sin(timestep/timescale) and + cos(timestep/timescale). All of these sinusoids are concatenated in + the channels dimension. + + Args: + x: a Tensor with shape [batch, length, channels] + min_timescale: a float + max_timescale: a float + start_index: index of first position + + Returns: + a Tensor the same shape as x. + """ + length = common_layers.shape_list(x)[1] + channels = common_layers.shape_list(x)[2] + signal = get_timing_signal_1d(length, channels, min_timescale, max_timescale, + start_index) + return x + common_layers.cast_like(signal, x) + + +@expert_utils.add_name_scope() +def get_layer_timing_signal_learned_1d(channels, layer, num_layers): + """get n-dimensional embedding as the layer (vertical) timing signal. + + Adds embeddings to represent the position of the layer in the tower. + + Args: + channels: dimension of the timing signal + layer: layer num + num_layers: total number of layers + + Returns: + a Tensor of timing signals [1, 1, channels]. + """ + shape = [num_layers, 1, 1, channels] + layer_embedding = ( + tf.get_variable( + "layer_embedding", + shape, + initializer=tf.random_normal_initializer(0, channels**-0.5)) * + (channels**0.5)) + return layer_embedding[layer, :, :, :] + + +@expert_utils.add_name_scope() +def add_layer_timing_signal_learned_1d(x, layer, num_layers): + """Add n-dimensional embedding as the layer (vertical) timing signal. + + Adds embeddings to represent the position of the layer in the tower. + + Args: + x: a tensor with shape [batch, length, depth] + layer: layer num + num_layers: total number of layers + + Returns: + a Tensor the same shape as x. + """ + channels = common_layers.shape_list(x)[-1] + signal = get_layer_timing_signal_learned_1d(channels, layer, num_layers) + x += signal + return x + + +@expert_utils.add_name_scope() +def get_layer_timing_signal_sinusoid_1d(channels, layer, num_layers): + """Add sinusoids of different frequencies as layer (vertical) timing signal. + + Args: + channels: dimension of the timing signal + layer: layer num + num_layers: total number of layers + + Returns: + a Tensor of timing signals [1, 1, channels]. + """ + + signal = get_timing_signal_1d(num_layers, channels) + layer_signal = tf.expand_dims(signal[:, layer, :], axis=1) + + return layer_signal + + +@expert_utils.add_name_scope() +def add_layer_timing_signal_sinusoid_1d(x, layer, num_layers): + """Add sinusoids of different frequencies as layer (vertical) timing signal. + + Args: + x: a Tensor with shape [batch, length, channels] + layer: layer num + num_layers: total number of layers + + Returns: + a Tensor the same shape as x. + """ + + channels = common_layers.shape_list(x)[-1] + signal = get_layer_timing_signal_sinusoid_1d(channels, layer, num_layers) + + return x + signal + + +@expert_utils.add_name_scope() +def add_timing_signals_given_positions(x, + positions, + min_timescale=1.0, + max_timescale=1.0e4): + """Adds sinusoids of diff frequencies to a Tensor, with timing positions given. + + Args: + x: a Tensor with shape [batch, length, channels] + positions: a list of positions, each of which can either be a Tensor of + shape [batch, length] or None for a default of (0..length] + min_timescale: a float + max_timescale: a float + + Returns: + a Tensor the same shape as x. + """ + shape = common_layers.shape_list(x) + batch = shape[0] + length = shape[1] + channels = shape[2] + num_dims = len(positions) + num_timescales = channels // (num_dims * 2) + log_timescale_increment = ( + math.log(float(max_timescale) / float(min_timescale)) / + (tf.to_float(num_timescales) - 1)) + inv_timescales = min_timescale * tf.exp( + tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) + for dim, position in enumerate(positions): + if position is None: + # Create a [batch, length] Tensor of incrementing positions 0..length-1. + position = tf.tile( + tf.transpose(tf.expand_dims(tf.range(0, length), axis=1)), [batch, 1]) + scaled_time = ( + tf.expand_dims(tf.to_float(position), 2) * + tf.expand_dims(tf.expand_dims(inv_timescales, 0), 0)) + signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2) + prepad = dim * 2 * num_timescales + postpad = channels - (dim + 1) * 2 * num_timescales + signal = tf.pad(signal, [[0, 0], [0, 0], [prepad, postpad]]) + signal = common_layers.cast_like(signal, x) + x += signal + return x + + +@expert_utils.add_name_scope() +def add_timing_signals_from_features(x, + features, + position_features, + min_timescale=1.0, + max_timescale=1.0e4): + """Adds timing signals from features named in `position_features`. + + Args: + x: a Tensor with shape [batch, length, channels] + features: a features dictionary + position_features: a comma-delimited string where each item is either a + feature key or the empty string (which denotes a default position tensor + of [0..length]) + min_timescale: a float + max_timescale: a float + + Returns: + a Tensor the same shape as x. + """ + return add_timing_signals_given_positions(x, [ + features.get(position_feature) + for position_feature in position_features.split(",") + ], min_timescale, max_timescale) + + +@expert_utils.add_name_scope() +def add_timing_signal_1d_given_position(x, + position, + min_timescale=1.0, + max_timescale=1.0e4): + """Adds sinusoids of diff frequencies to a Tensor, with timing position given. + + Args: + x: a Tensor with shape [batch, length, channels] + position: a Tensor with shape [batch, length] + min_timescale: a float + max_timescale: a float + + Returns: + a Tensor the same shape as x. + """ + channels = common_layers.shape_list(x)[2] + num_timescales = channels // 2 + log_timescale_increment = ( + math.log(float(max_timescale) / float(min_timescale)) / + (tf.to_float(num_timescales) - 1)) + inv_timescales = min_timescale * tf.exp( + tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) + scaled_time = ( + tf.expand_dims(tf.to_float(position), 2) * tf.expand_dims( + tf.expand_dims(inv_timescales, 0), 0)) + signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=2) + signal = tf.pad(signal, [[0, 0], [0, 0], [0, tf.mod(channels, 2)]]) + signal = common_layers.cast_like(signal, x) + return x + signal + + +@expert_utils.add_name_scope() +def add_timing_signal_nd(x, min_timescale=1.0, max_timescale=1.0e4): + """Adds a bunch of sinusoids of different frequencies to a Tensor. + + Each channel of the input Tensor is incremented by a sinusoid of a different + frequency and phase in one of the positional dimensions. + + This allows attention to learn to use absolute and relative positions. + Timing signals should be added to some precursors of both the query and the + memory inputs to attention. + + The use of relative position is possible because sin(a+b) and cos(a+b) can be + expressed in terms of b, sin(a) and cos(a). + + x is a Tensor with n "positional" dimensions, e.g. one dimension for a + sequence or two dimensions for an image + + We use a geometric sequence of timescales starting with + min_timescale and ending with max_timescale. The number of different + timescales is equal to channels // (n * 2). For each timescale, we + generate the two sinusoidal signals sin(timestep/timescale) and + cos(timestep/timescale). All of these sinusoids are concatenated in + the channels dimension. + + Args: + x: a Tensor with shape [batch, d1 ... dn, channels] + min_timescale: a float + max_timescale: a float + + Returns: + a Tensor the same shape as x. + """ + num_dims = len(x.get_shape().as_list()) - 2 + channels = common_layers.shape_list(x)[-1] + num_timescales = channels // (num_dims * 2) + log_timescale_increment = ( + math.log(float(max_timescale) / float(min_timescale)) / + (tf.to_float(num_timescales) - 1)) + inv_timescales = min_timescale * tf.exp( + tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) + for dim in range(num_dims): + length = common_layers.shape_list(x)[dim + 1] + position = tf.to_float(tf.range(length)) + scaled_time = tf.expand_dims(position, 1) * tf.expand_dims( + inv_timescales, 0) + signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) + prepad = dim * 2 * num_timescales + postpad = channels - (dim + 1) * 2 * num_timescales + signal = tf.pad(signal, [[0, 0], [prepad, postpad]]) + for _ in range(1 + dim): + signal = tf.expand_dims(signal, 0) + for _ in range(num_dims - 1 - dim): + signal = tf.expand_dims(signal, -2) + x += signal + return x + + +def add_positional_embedding(x, max_length, name=None, positions=None): + """Adds positional embedding. + + Args: + x: Tensor with shape [batch, length, depth]. + max_length: int representing static maximum size of any dimension. + name: str representing name of the embedding tf.Variable. + positions: Tensor with shape [batch, length]. + + Returns: + Tensor of same shape as x. + """ + with tf.name_scope("add_positional_embedding"): + _, length, depth = common_layers.shape_list(x) + var = tf.cast(tf.get_variable(name, [max_length, depth]), x.dtype) + if positions is None: + pad_length = tf.maximum(0, length - max_length) + sliced = tf.cond( + tf.less(length, max_length), + lambda: tf.slice(var, [0, 0], [length, -1]), + lambda: tf.pad(var, [[0, pad_length], [0, 0]])) + return x + tf.expand_dims(sliced, 0) + else: + return x + tf.gather(var, tf.to_int32(positions)) + + +def add_positional_embedding_nd(x, max_length, name=None): + """Adds n-dimensional positional embedding. + + The embeddings add to all positional dimensions of the tensor. + + Args: + x: Tensor with shape [batch, p1 ... pn, depth]. It has n positional + dimensions, i.e., 1 for text, 2 for images, 3 for video, etc. + max_length: int representing static maximum size of any dimension. + name: str representing name of the embedding tf.Variable. + + Returns: + Tensor of same shape as x. + """ + with tf.name_scope("add_positional_embedding_nd"): + x_shape = common_layers.shape_list(x) + num_dims = len(x_shape) - 2 + depth = x_shape[-1] + base_shape = [1] * (num_dims + 1) + [depth] + base_start = [0] * (num_dims + 2) + base_size = [-1] + [1] * num_dims + [depth] + for i in range(num_dims): + shape = base_shape[:] + start = base_start[:] + size = base_size[:] + shape[i + 1] = max_length + size[i + 1] = x_shape[i + 1] + var = tf.get_variable( + name + "_%d" % i, + shape, + initializer=tf.random_normal_initializer(0, depth**-0.5)) + var = var * depth**0.5 + x += tf.slice(var, start, size) + return x + + +def make_edge_vectors(adjacency_matrix, num_edge_types, depth, name=None): + """Gets edge vectors for the edge types in the adjacency matrix. + + Args: + adjacency_matrix: A [batch, num_nodes, num_nodes] tensor of ints. + num_edge_types: Number of different edge types + depth: Number of channels + name: a string + Returns: + A [batch, num_nodes, num_nodes, depth] vector of tensors + """ + with tf.variable_scope(name, default_name="edge_vectors"): + att_adj_vectors_shape = [num_edge_types, depth] + adjacency_matrix_shape = common_layers.shape_list(adjacency_matrix) + adj_vectors = ( + tf.get_variable( + "adj_vectors", + att_adj_vectors_shape, + initializer=tf.random_normal_initializer(0, depth**-0.5)) * + (depth**0.5)) + # Avoiding gathers so that it works on TPUs + # adjacency_matrix_one_hot has shape + # [batch, num_nodes, num_nodes, num_edge_types] + + adjacency_matrix_one_hot = tf.one_hot(adjacency_matrix, num_edge_types) + + att_adj_vectors = tf.matmul( + tf.reshape(tf.to_float(adjacency_matrix_one_hot), [-1, num_edge_types]), + adj_vectors) + return tf.reshape(att_adj_vectors, + [adjacency_matrix_shape[0], adjacency_matrix_shape[1], + adjacency_matrix_shape[2], depth]) + + +class LshGating(object): + """Class to split key/queries into separate buckets.""" + + def __init__(self, depth, nb_hyperplanes, nb_replicat=1, trainable=False): + """Construct the gating function parameters. + + Compute the gates for a single head. + + Args: + depth (int): Dimension of the key/queries to dispatch + nb_hyperplanes (int): Nb of vectors use to split the space. Will determine + the number of buckets (2^nb_hyperplanes - 1). + nb_replicat (int): Redundancy to avoid the edge cases (to be in one bucket + the input should be in a majority) + trainable (bool): If True, a balance loss is added to force the hyperplane + to divide the key/query space evenly + """ + self.depth = depth + self.nb_hyperplanes = nb_hyperplanes + self.nb_buckets = 2**nb_hyperplanes + self.nb_replicat = nb_replicat # Unused for now + self.trainable = trainable # Unused for now + + self.dispatchers = {} + + assert self.nb_replicat == 1 # For now + + with tf.variable_scope("lsh_gating"): + # Vectors defining the hyperplanes + self.t_vectors = tf.get_variable( + "vector", + shape=(self.depth, self.nb_hyperplanes * self.nb_replicat), + dtype=tf.float32, + trainable=self.trainable, + ) + # Projection vector from the bit space to similarity score space + self.t_group = tf.constant( + [self._idx_to_bits(i) for i in range(self.nb_buckets)], + dtype=tf.float32, + name="group") + + def _idx_to_bits(self, i): + """Convert an group index to its bit representation.""" + bits = bin(i)[2:].zfill(self.nb_hyperplanes) # Pad the bits str with 0 + return [-1.0 if b == "0" else 1.0 for b in bits] + + @expert_utils.add_name_scope("lsh_gating") + def get_gates(self, x): + """Return the bucket id of the given tensor. + + Args: + x (tf.Tensor): float32 of shape [length, depth] + + Returns: + tf.Tensor: One-hot vector int64 of shape [heads, length, nb_buckets] + containing the id of the bucket + """ + + # The balance loss don't propagate to the rest of the network + x = tf.stop_gradient(x) + # [length, depth] * [depth, nb_vectors * replicat] + x = tf.matmul(x, self.t_vectors) + # [length, nb_vector * replicat] + x = tf.sign(x) # Get on which side of the hyperplane the keys are. + + # x = tf.reshape(x, [-1, nb_replicat, nb_vector]) + # [length, replicat, nb_vector] * [nb_vector, 2^nb_vector - 1] + + x = tf.matmul(x, self.t_group, transpose_b=True) / self.nb_hyperplanes + # We get a similarity score for each of the group between [-1, 1] + # [length, (replicat,) 2^nb_vector - 1] + # Do an argmax to get the most likely group for each replicat + x = tf.argmax(x, axis=-1) + # [length(, replicat)] + # One-hot for compatibility with the sparse dispatcher + x = tf.one_hot(x, self.nb_buckets) + # TODO(epot): Use a loss to force an even distribution + return x + + +@expert_utils.add_name_scope() +def embedding_to_padding(emb): + """Calculates the padding mask based on which embeddings are all zero. + + We have hacked symbol_modality to return all-zero embeddings for padding. + + Args: + emb: a Tensor with shape [..., depth]. + + Returns: + a float Tensor with shape [...]. Each element is 1 if its corresponding + embedding vector is all zero, and is 0 otherwise. + """ + emb_sum = tf.reduce_sum(tf.abs(emb), axis=-1) + return tf.to_float(tf.equal(emb_sum, 0.0)) + + +@expert_utils.add_name_scope() +def padding_to_length(padding): + """Calculate the length of mask based on padding. + + Args: + padding: a Tensor with shape [..., length]. + Returns: + a Tensor with shape [...]. + """ + non_padding = 1.0 - padding + return tf.to_int32(tf.reduce_sum(non_padding, axis=-1)) + + +@expert_utils.add_name_scope() +def attention_bias_local(length, max_backward, max_forward): + """Create an bias tensor to be added to attention logits. + + A position may attend to positions at most max_distance from it, + forward and backwards. + + This does not actually save any computation. + + Args: + length: int + max_backward: int, maximum distance backward to attend. Negative values + indicate unlimited. + max_forward: int, maximum distance forward to attend. Negative values + indicate unlimited. + + Returns: + a `Tensor` with shape [1, 1, length, length]. + """ + band = common_layers.ones_matrix_band_part( + length, + length, + max_backward, + max_forward, + out_shape=[1, 1, length, length]) + return -1e9 * (1.0 - band) + + +@expert_utils.add_name_scope() +def attention_bias_lower_triangle(length): + """Create an bias tensor to be added to attention logits. + + Allows a query to attend to all positions up to and including its own. + + Args: + length: a Scalar. + + Returns: + a `Tensor` with shape [1, 1, length, length]. + """ + return attention_bias_local(length, -1, 0) + + +@expert_utils.add_name_scope() +def attention_bias_same_segment(query_segment_id, memory_segment_id): + """Create an bias tensor to be added to attention logits. + + Positions with the same segment_ids can see each other. + + Args: + query_segment_id: a float `Tensor` with shape [batch, query_length]. + memory_segment_id: a float `Tensor` with shape [batch, memory_length]. + + Returns: + a `Tensor` with shape [batch, 1, query_length, memory_length]. + """ + ret = (tf.to_float( + tf.not_equal( + tf.expand_dims(query_segment_id, 2), + tf.expand_dims(memory_segment_id, 1))) * + large_compatible_negative(memory_segment_id.dtype)) + return tf.expand_dims(ret, axis=1) + + +@expert_utils.add_name_scope() +def attention_bias_ignore_padding(memory_padding): + """Create an bias tensor to be added to attention logits. + + Args: + memory_padding: a float `Tensor` with shape [batch, memory_length]. + + Returns: + a `Tensor` with shape [batch, 1, 1, memory_length]. + """ + ret = memory_padding * large_compatible_negative(memory_padding.dtype) + return tf.expand_dims(tf.expand_dims(ret, axis=1), axis=1) + + +@expert_utils.add_name_scope() +def attention_bias_to_padding(attention_bias, + cast_fn=(lambda x: tf.cast(x, tf.float32))): + """Inverse of attention_bias_ignore_padding(). + + Args: + attention_bias: a `Tensor` with shape [batch, 1, 1, memory_length], as + returned by attention_bias_ignore_padding(). + cast_fn: function used to cast to output type. + + Returns: + a Tensor with shape [batch, memory_length] with 1.0 in padding positions + and 0.0 in non-padding positions. Type is determined by cast_fn. + """ + # `attention_bias` is a large negative number in padding positions and 0.0 + # elsewhere. + return tf.squeeze(cast_fn(tf.less(attention_bias, -1)), axis=[1, 2]) + + +@expert_utils.add_name_scope() +def attention_bias_prepend_inputs_full_attention(padding): + """Create a bias tensor for prepend_mode="prepend_inputs_full_attention". + + See prepend_inputs in common_hparams.py. + + Produces a bias tensor to be used in self-attention. + + This bias tensor allows for full connectivity in the "inputs" part of + the sequence and masked connectivity in the targets part. + + Args: + padding: a float `Tensor` with shape [batch, length] with + ones in positions corresponding to padding. In each row, a single + padding position separates the input part from the target part. + + Returns: + a `Tensor` with shape [batch, 1, length, length]. + """ + # Everything past the first padding position is part of the target. + # This Tensor has zeros for the source portion and separator, + # and ones for the target portion. + in_target = tf.cumsum(padding, axis=1, exclusive=True) + # The position within the target, or 0 if part of the source. + target_pos = tf.cumsum(in_target, axis=1) + # A position with a lesser target_pos cannot see a position with greater + # target_pos. + illegal_connections = tf.greater( + tf.expand_dims(target_pos, 1), tf.expand_dims(target_pos, 2)) + bias = tf.to_float(illegal_connections) * -1e9 + bias = tf.expand_dims(bias, 1) + return bias + + +@expert_utils.add_name_scope() +def attention_bias_proximal(length): + """Bias for self-attention to encourage attention to close positions. + + Args: + length: an integer scalar. + + Returns: + a Tensor with shape [1, 1, length, length] + """ + r = tf.to_float(tf.range(length)) + diff = tf.expand_dims(r, 0) - tf.expand_dims(r, 1) + return tf.expand_dims(tf.expand_dims(-tf.log1p(tf.abs(diff)), 0), 0) + + +@expert_utils.add_name_scope() +def attention_bias_batch(batch_coordinates_q, + batch_coordinates_k=None, + condition_fn=None): + """Generate a mask to prevent the batch to attend to each others. + + Args: + batch_coordinates_q: Int-like Tensor of shape [length_q, 1] containing the + coordinates of the batches + batch_coordinates_k: Int-like Tensor of shape [length_k, 1] containing the + coordinates of the batches. If None, do self-attention. + condition_fn: Callable defining the attention mask. + + Returns: + Float-like Tensor of shape [length_q, length_k] containing either 0 or + -infinity (-1e9). + """ + if batch_coordinates_k is None: + batch_coordinates_k = batch_coordinates_q + + # Convert to float first because of b/25387198. + def to_float(bc): + bc = tf.squeeze(bc, 1) + bc = tf.to_float(bc) + return bc + + # Broadcast to create [length_q, length_k] mask. + bc_v = tf.expand_dims(to_float(batch_coordinates_q), 1) + bc_h = tf.expand_dims(to_float(batch_coordinates_k), 0) + bias_batch = bc_h - bc_v + bias_batch = condition_fn(bias_batch) + bias_batch *= -1e9 + return bias_batch + + +# Mask to prevent individual sequences of the same batch to attend to each other +attention_bias_coordinates = functools.partial( + attention_bias_batch, + condition_fn=lambda bias: tf.minimum(1.0, tf.abs(bias)), +) + +# Mask similar to upper triangular mask, but allow dispatching +attention_bias_future = functools.partial( + attention_bias_batch, + # Elems can attend to themselves (otherwise would use bias_batch + 1.0). + # No tf.abs to consider the order, + # tf.maximum and tf.minimum to threshold the values. + condition_fn=lambda bias: tf.maximum(0.0, tf.minimum(1.0, bias)), +) + + +@expert_utils.add_name_scope() +def split_last_dimension(x, n): + """Reshape x so that the last dimension becomes two dimensions. + + The first of these two dimensions is n. + + Args: + x: a Tensor with shape [..., m] + n: an integer. + + Returns: + a Tensor with shape [..., n, m/n] + """ + x_shape = common_layers.shape_list(x) + m = x_shape[-1] + if isinstance(m, int) and isinstance(n, int): + assert m % n == 0 + return tf.reshape(x, x_shape[:-1] + [n, m // n]) + + +@expert_utils.add_name_scope() +def combine_last_two_dimensions(x): + """Reshape x so that the last two dimension become one. + + Args: + x: a Tensor with shape [..., a, b] + + Returns: + a Tensor with shape [..., ab] + """ + x_shape = common_layers.shape_list(x) + a, b = x_shape[-2:] + return tf.reshape(x, x_shape[:-2] + [a * b]) + + +@expert_utils.add_name_scope() +def combine_first_two_dimensions(x): + """Reshape x so that the first two dimension become one. + + Args: + x: a Tensor with shape [a, b, ...] + + Returns: + a Tensor with shape [ab, ...] + """ + ret = tf.reshape(x, tf.concat([[-1], common_layers.shape_list(x)[2:]], 0)) + old_shape = x.get_shape().dims + a, b = old_shape[:2] + new_shape = [a * b if a and b else None] + old_shape[2:] + ret.set_shape(new_shape) + return ret + + +@expert_utils.add_name_scope() +def split_heads(x, num_heads): + """Split channels (dimension 2) into multiple heads (becomes dimension 1). + + Args: + x: a Tensor with shape [batch, length, channels] + num_heads: an integer + + Returns: + a Tensor with shape [batch, num_heads, length, channels / num_heads] + """ + return tf.transpose(split_last_dimension(x, num_heads), [0, 2, 1, 3]) + + +@expert_utils.add_name_scope() +def split_heads_2d(x, num_heads): + """Split channels (dimension 3) into multiple heads (becomes dimension 1). + + Args: + x: a Tensor with shape [batch, height, width, channels] + num_heads: an integer + + Returns: + a Tensor with shape [batch, num_heads, height, width, channels / num_heads] + """ + return tf.transpose(split_last_dimension(x, num_heads), [0, 3, 1, 2, 4]) + + +def split_heads_nd(x, num_heads): + """Split the depth dimension (last dimension) into multiple heads. + + Args: + x: a [batch, d1, ..., dn, depth] tensor + num_heads: an integer + + Returns: + a [batch, num_heads, d1, ..., dn, depth // num_heads] + """ + num_dimensions = len(common_layers.shape_list(x)) - 2 + return tf.transpose( + split_last_dimension(x, num_heads), [0, num_dimensions + 1] + + list(range(1, num_dimensions + 1)) + [num_dimensions + 2]) + + +@expert_utils.add_name_scope() +def combine_heads(x): + """Inverse of split_heads. + + Args: + x: a Tensor with shape [batch, num_heads, length, channels / num_heads] + + Returns: + a Tensor with shape [batch, length, channels] + """ + return combine_last_two_dimensions(tf.transpose(x, [0, 2, 1, 3])) + + +@expert_utils.add_name_scope() +def combine_heads_2d(x): + """Inverse of split_heads_2d. + + Args: + x: a Tensor with shape + [batch, num_heads, height, width, channels / num_heads] + + Returns: + a Tensor with shape [batch, height, width, channels] + """ + return combine_last_two_dimensions(tf.transpose(x, [0, 2, 3, 1, 4])) + + +def combine_heads_nd(x): + """Inverse of split_heads_nd. + + Args: + x: a [batch, num_heads, d1, ..., dn, depth // num_heads] tensor + + Returns: + a [batch, d1, ...., dn, depth] tensor + """ + num_dimensions = len(common_layers.shape_list(x)) - 3 + return combine_last_two_dimensions( + tf.transpose(x, [0] + list(range(2, num_dimensions + 2)) + + [1, num_dimensions + 2])) + + +def attention_image_summary(attn, image_shapes=None): + """Compute color image summary. + + Args: + attn: a Tensor with shape [batch, num_heads, query_length, memory_length] + image_shapes: optional tuple of integer scalars. + If the query positions and memory positions represent the + pixels of flattened images, then pass in their dimensions: + (query_rows, query_cols, memory_rows, memory_cols). + If the query positions and memory positions represent the + pixels x channels of flattened images, then pass in their dimensions: + (query_rows, query_cols, query_channels, + memory_rows, memory_cols, memory_channels). + """ + attn = tf.cast(attn, tf.float32) + num_heads = common_layers.shape_list(attn)[1] + # [batch, query_length, memory_length, num_heads] + image = tf.transpose(attn, [0, 2, 3, 1]) + image = tf.pow(image, 0.2) # for high-dynamic-range + # Each head will correspond to one of RGB. + # pad the heads to be a multiple of 3 + image = tf.pad(image, [[0, 0], [0, 0], [0, 0], [0, tf.mod(-num_heads, 3)]]) + image = split_last_dimension(image, 3) + image = tf.reduce_max(image, 4) + if image_shapes is not None: + if len(image_shapes) == 4: + q_rows, q_cols, m_rows, m_cols = list(image_shapes) + image = tf.reshape(image, [-1, q_rows, q_cols, m_rows, m_cols, 3]) + image = tf.transpose(image, [0, 1, 3, 2, 4, 5]) + image = tf.reshape(image, [-1, q_rows * m_rows, q_cols * m_cols, 3]) + else: + assert len(image_shapes) == 6 + q_rows, q_cols, q_channnels, m_rows, m_cols, m_channels = list( + image_shapes) + image = tf.reshape( + image, + [-1, q_rows, q_cols, q_channnels, m_rows, m_cols, m_channels, 3]) + image = tf.transpose(image, [0, 1, 4, 3, 2, 5, 6, 7]) + image = tf.reshape( + image, + [-1, q_rows * m_rows * q_channnels, q_cols * m_cols * m_channels, 3]) + tf.summary.image("attention", image, max_outputs=1) + + +def grouped_attention_multihead(query_antecedent, + memory_antecedent, + total_key_depth, + total_value_depth, + output_depth, + num_heads, + num_groups, + memory_target_density=2.0, + multiplicative_overhead=1.25, + additive_overhead=8.0, + mask_right=False, + make_image_summary=True, + name=None): + """Multi-head dot-product attention with sparsity. + + For each attention head, the queries are partitioned into groups. + For each group, only a subset of the key-value pairs are considered. + + The choices of groups are selected based on trained predictors of + the total attention given the group inclusion. + + memory_target_density indicates the average how many groups in which + a key-value pair should participate. + + We use auxiliary losses to ensure that each group contains roughly + the same number of queries and the same number of key-value pairs. + If for a given sequence, the actual number of queries/pairs sent to + an expert exceeds this target by a factor of more than + multiplicative_overhead, then the last ones are dropped. We use + this drop-last policy to avoid bleeding information backwards, which + is necessary when using this function with autoregressive + prediction. + + Args: + query_antecedent: a Tensor with shape [batch, length_q, channels] + memory_antecedent: a Tensor with shape [batch, length_m, channels] + total_key_depth: an integer + total_value_depth: an integer + output_depth: an integer + num_heads: an integer dividing total_key_depth and total_value_depth + num_groups: an integer + memory_target_density: a floating point scalar + multiplicative_overhead: a floating point scalar + additive_overhead: a floating point scalar + mask_right: a boolean + make_image_summary: a boolean + name: an optional string + + Returns: + A Tensor with shape [batch, length_q, output_depth] + + Raises: + ValueError: if the key depth or value depth are not divisible by the + number of attention heads. + """ + batch = common_layers.shape_list(query_antecedent)[0] + length_q = common_layers.shape_list(query_antecedent)[1] + length_kv = common_layers.shape_list(memory_antecedent)[1] + + if total_key_depth % num_heads != 0: + raise ValueError("Key depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_key_depth, num_heads)) + depth_qk = total_key_depth // num_heads + if total_value_depth % num_heads != 0: + raise ValueError("Value depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_value_depth, num_heads)) + depth_v = total_value_depth // num_heads + with tf.variable_scope( + name, default_name="multihead_attention_sparse", + values=[query_antecedent, memory_antecedent]): + q = common_layers.dense( + query_antecedent, total_key_depth, use_bias=False, name="q_transform") + kv = common_layers.dense( + memory_antecedent, + total_key_depth + total_value_depth, + use_bias=False, + name="kv_transform") + q = split_heads(q, num_heads) + kv = split_heads(kv, num_heads) + # Make predictions about q_total and m_total. + # These are used to determine group inclusion. + # We will train these by auxiliary losses. We use stop_gradient here + # to keep these losses from back-propagating to the rest of the model. + # We add biases that help balance the usage of the experts. + q_pred = common_layers.dense( + tf.stop_gradient(query_antecedent), + num_heads * num_groups, + use_bias=False, + name="q_pred") + q_pred = split_heads(q_pred, num_heads) + q_bias = tf.get_variable("q_bias", [1, num_heads, 1, num_groups]) + q_pred_biased = q_pred + q_bias + m_pred = common_layers.dense( + tf.stop_gradient(memory_antecedent), + num_heads * num_groups, + use_bias=False, + name="m_pred") + m_pred = split_heads(m_pred, num_heads) + m_bias = tf.get_variable("m_bias", [1, num_heads, 1, num_groups]) + m_pred_biased = m_pred + m_bias + q *= depth_qk**-0.5 + # q, kv, q_pred, m_pred are all [batch, heads, length_[q/m], ?] + # now reshape them all to [batch * heads, length, ?] + q = combine_first_two_dimensions(q) + kv = combine_first_two_dimensions(kv) + q_pred = combine_first_two_dimensions(q_pred) + m_pred = combine_first_two_dimensions(m_pred) + q_pred_biased = combine_first_two_dimensions(q_pred_biased) + m_pred_biased = combine_first_two_dimensions(m_pred_biased) + q_group = tf.argmax(q_pred_biased, axis=2) + q_requests = tf.one_hot(q_group, num_groups, axis=-1) + m_requests = tf.to_float(tf.greater(m_pred_biased, 0.0)) + # include first memory position in all groups, to avoid division by zero. + m_requests = tf.maximum( + m_requests, tf.reshape(tf.one_hot([0], length_kv), [1, length_kv, 1])) + q_group_size = tf.reduce_sum(q_requests, 1) + m_group_size = tf.reduce_sum(m_requests, 1) + q_group_target_size = tf.to_float(length_q) / tf.to_float(num_groups) + m_group_target_size = ( + tf.to_float(length_kv) * memory_target_density / + tf.to_float(num_groups)) + capacity_q = tf.minimum( + length_q, + tf.to_int32(q_group_target_size * multiplicative_overhead + + additive_overhead)) + capacity_m = tf.minimum( + length_kv, + tf.to_int32(m_group_target_size * multiplicative_overhead + + additive_overhead)) + q_dispatcher = expert_utils.TruncatingDispatcher(q_requests, capacity_q) + m_dispatcher = expert_utils.TruncatingDispatcher(m_requests, capacity_m) + q_gates = q_dispatcher.gates() + m_gates = m_dispatcher.gates() + dispatched_q = q_dispatcher.dispatch(q) + dispatched_kv = m_dispatcher.dispatch(kv) + # dispatched_q: [batch * num_heads, num_groups, capacity_q, depth_qk] + # dispatched_kv: + # [batch * num_heads, num_groups, capacity_m, depth_qk + depth_v] + k, v = tf.split(dispatched_kv, [depth_qk, depth_v], axis=3) + logits = tf.matmul(dispatched_q, k, transpose_b=True) + bias = tf.expand_dims((m_dispatcher.nonpadding() - 1.0) * 1e9, 2) + if mask_right: + q_coordinate = tf.to_float( + tf.expand_dims(q_dispatcher.length_coordinate(), 3)) + m_coordinate = tf.to_float( + tf.expand_dims(m_dispatcher.length_coordinate(), 2)) + bias += tf.to_float(tf.greater(m_coordinate, q_coordinate)) * -1e9 + logits += bias + log_weights = tf.nn.log_softmax(logits) + weights = tf.exp(log_weights) + # For each query, this is the log of the sum of the unnormalized weights. + q_total = tf.stop_gradient(logits[:, :, :, :1] - log_weights[:, :, :, :1]) + # For each key, this is the sum of the normalized weights. + m_total = tf.expand_dims( + tf.reduce_sum(tf.stop_gradient(weights), axis=2), -1) + o = tf.matmul(weights, v) + o = q_dispatcher.combine(o) + + o = tf.reshape(o, [batch, num_heads, length_q, depth_v]) + o = combine_heads(o) + o = common_layers.dense( + o, output_depth, use_bias=False, name="output_transform") + + m_total = m_dispatcher.combine(m_total) + q_total = q_dispatcher.combine(q_total) + q_total = tf.squeeze(q_total, -1) + m_total = tf.squeeze(m_total, -1) + # Compute summed m predictions for all groups + m_pred_used = tf.reduce_sum(tf.exp(m_pred) * m_dispatcher.gates(), axis=2) + q_pred_used = tf.reduce_sum(q_pred * q_dispatcher.gates(), axis=2) + epsilon = 1e-3 + m_pred_used = tf.log(m_pred_used + epsilon) + m_total = tf.log(m_total + epsilon) + m_loss = tf.nn.l2_loss(m_total - m_pred_used) + q_loss = tf.nn.l2_loss( + (q_total - q_pred_used) * tf.reduce_sum(q_gates, axis=2)) + + q_loss /= tf.to_float(batch * length_q) + m_loss /= tf.to_float(batch * length_kv) + + # We would like the query groups to be equal sized. The group + # size is discrete, so we need some trick here. We add a loss + # proportional to the product of the group size and the + # predictions for that group. This encourages the predictions to + # decrease for groups that are too big. + q_group_deviation = (q_group_size / q_group_target_size) - 1.0 + q_balance_loss = tf.reduce_sum( + tf.reduce_mean(q_pred_biased, axis=1) * + q_group_deviation) / tf.to_float(batch) + m_group_deviation = (m_group_size / m_group_target_size) - 1.0 + m_balance_loss = tf.reduce_sum( + tf.reduce_mean(m_pred_biased, axis=1) * + m_group_deviation) / tf.to_float(batch) + + # The losses in this function only propagate back to variables + # defined in this function, and the losses outside of this + # function only propagate back to variables outside of this + # function. Assuming some kind of adaptive learning algorithm, + # it should not matter how much we scale the losses in this function. + # Still we scale them down a lot so that they should not show up + # much in the overall loss for the model. + extra_loss_multiplier = 1e-3 + extra_loss = q_loss + m_loss + q_balance_loss + m_balance_loss + extra_loss *= extra_loss_multiplier + + # Show a bunch of summaries. + if common_layers.should_generate_summaries() and make_image_summary: + tf.summary.histogram("q_group_size", q_group_size) + tf.summary.histogram("m_group_size", m_group_size) + tf.summary.scalar("q_loss", q_loss) + tf.summary.scalar("m_loss", m_loss) + tf.summary.scalar("q_balance_loss", q_balance_loss) + tf.summary.scalar("m_balance_loss", m_balance_loss) + tf.summary.histogram("m_pred_used", m_pred_used) + tf.summary.histogram("m_total", m_total) + tf.summary.histogram("q_pred_used", q_pred_used) + tf.summary.histogram("q_total", q_total) + if make_image_summary: + # image summaries are expensive. + # So we restrict them to head_num<4, query_position<512, batch_index=0. + trunc_heads = min(4, num_heads) + trunc_length_q = tf.minimum(length_q, 512) + # We recompute the attention for the first example, in an inefficient + # way - masking. This lets us show pretty pictures. + # [trunc_heads, length_q, group] + q_gates_trunc = q_gates[:trunc_heads, :trunc_length_q, :] + # [trunc_heads, length_kv, group] + m_gates_trunc = m_gates[:trunc_heads, :, :] + grouping_mask = tf.matmul( + q_gates_trunc, m_gates_trunc, transpose_b=True) + q_trunc = q[:trunc_heads, :trunc_length_q, :] + k_trunc = kv[:trunc_heads, :, :depth_qk] + logits_trunc = tf.matmul(q_trunc, k_trunc, transpose_b=True) + if mask_right: + band = common_layers.ones_matrix_band_part(trunc_length_q, length_kv, + -1, 0) + trunc_bias = tf.expand_dims((1.0 - band) * -1e9, 0) + logits_trunc += trunc_bias + att_trunc = tf.nn.softmax(logits_trunc) + mask_coverage = tf.reduce_sum(grouping_mask * att_trunc) / ( + tf.to_float(trunc_length_q) * trunc_heads) + tf.summary.scalar("coverage", mask_coverage) + att_trunc_hdr = tf.pow(att_trunc, 0.2) # for high-dynamic-range + mask_channel = grouping_mask * tf.maximum(att_trunc_hdr, 0.3) + image = tf.stack([att_trunc_hdr, mask_channel, mask_channel], axis=3) + tf.summary.image("att", image, max_outputs=trunc_heads) + # show one group for each head. + att_per_group = tf.expand_dims(weights[:trunc_heads, 0, :, :], -1) + tf.summary.image( + "att_per_group_%d", + tf.pow(att_per_group, 0.2), + max_outputs=trunc_heads) + return o, extra_loss + + +def harden_attention_weights(weights, k, gumbel_noise_weight): + """Make attention weights non-0 only on the top k ones.""" + if gumbel_noise_weight > 0.: + gumbel_noise = -tf.log(-tf.log(tf.random_uniform(tf.shape(weights), + minval=1e-5, + maxval=1 - 1e-5))) + weights += gumbel_noise * gumbel_noise_weight + + # Subtract the top-kth weight and zero-out all lower ones. + # Note that currently in case of numerical ties it will retain more + # than k elements. In the future, we may want to avoid this. + weights -= common_layers.top_kth_iterative(weights, k) + weights = tf.nn.relu(weights) + # Re-normalize the weights. + weights_sum = tf.reduce_sum(weights, axis=-1, keep_dims=True) + weights_sum = tf.maximum(weights_sum, 1e-6) # Avoid division by 0. + weights /= weights_sum + return weights + + +def dot_product_attention(q, + k, + v, + bias, + dropout_rate=0.0, + image_shapes=None, + name=None, + make_image_summary=True, + save_weights_to=None, + dropout_broadcast_dims=None, + activation_dtype=None, + weight_dtype=None, + hard_attention_k=0, + gumbel_noise_weight=0.0): + """Dot-product attention. + + Args: + q: Tensor with shape [..., length_q, depth_k]. + k: Tensor with shape [..., length_kv, depth_k]. Leading dimensions must + match with q. + v: Tensor with shape [..., length_kv, depth_v] Leading dimensions must + match with q. + bias: bias Tensor (see attention_bias()) + dropout_rate: a float. + image_shapes: optional tuple of integer scalars. + see comments for attention_image_summary() + name: an optional string + make_image_summary: True if you want an image summary. + save_weights_to: an optional dictionary to capture attention weights + for visualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + dropout_broadcast_dims: an optional list of integers less than rank of q. + Specifies in which dimensions to broadcast the dropout decisions. + activation_dtype: Used to define function activation dtype when using + mixed precision. + weight_dtype: The dtype weights are stored in when using mixed precision + hard_attention_k: integer, if > 0 triggers hard attention (picking top-k) + gumbel_noise_weight: if > 0, apply Gumbel noise with weight + `gumbel_noise_weight` before picking top-k. This is a no op if + hard_attention_k <= 0. + + Returns: + Tensor with shape [..., length_q, depth_v]. + """ + with tf.variable_scope( + name, default_name="dot_product_attention", values=[q, k, v]) as scope: + logits = tf.matmul(q, k, transpose_b=True) # [..., length_q, length_kv] + if bias is not None: + bias = common_layers.cast_like(bias, logits) + logits += bias + # If logits are fp16, upcast before softmax + logits = maybe_upcast(logits, activation_dtype, weight_dtype) + weights = tf.nn.softmax(logits, name="attention_weights") + if hard_attention_k > 0: + weights = harden_attention_weights(weights, hard_attention_k, + gumbel_noise_weight) + weights = common_layers.cast_like(weights, q) + if save_weights_to is not None: + save_weights_to[scope.name] = weights + save_weights_to[scope.name + "/logits"] = logits + # Drop out attention links for each head. + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + if common_layers.should_generate_summaries() and make_image_summary: + attention_image_summary(weights, image_shapes) + return tf.matmul(weights, v) + + +def _generate_relative_positions_matrix(length_q, length_k, + max_relative_position, + cache=False): + """Generates matrix of relative positions between inputs.""" + if not cache: + if length_q == length_k: + range_vec_q = range_vec_k = tf.range(length_q) + else: + range_vec_k = tf.range(length_k) + range_vec_q = range_vec_k[-length_q:] + distance_mat = range_vec_k[None, :] - range_vec_q[:, None] + else: + distance_mat = tf.expand_dims(tf.range(-length_k+1, 1, 1), 0) + distance_mat_clipped = tf.clip_by_value(distance_mat, -max_relative_position, + max_relative_position) + # Shift values to be >= 0. Each integer still uniquely identifies a relative + # position difference. + final_mat = distance_mat_clipped + max_relative_position + return final_mat + + +def _generate_relative_positions_embeddings(length_q, length_k, depth, + max_relative_position, name, + cache=False): + """Generates tensor of size [1 if cache else length_q, length_k, depth].""" + with tf.variable_scope(name): + relative_positions_matrix = _generate_relative_positions_matrix( + length_q, length_k, max_relative_position, cache=cache) + vocab_size = max_relative_position * 2 + 1 + # Generates embedding for each relative position of dimension depth. + embeddings_table = tf.get_variable("embeddings", [vocab_size, depth]) + embeddings = tf.gather(embeddings_table, relative_positions_matrix) + return embeddings + + +def _relative_attention_inner(x, y, z, transpose): + """Relative position-aware dot-product attention inner calculation. + + This batches matrix multiply calculations to avoid unnecessary broadcasting. + + Args: + x: Tensor with shape [batch_size, heads, length or 1, length or depth]. + y: Tensor with shape [batch_size, heads, length or 1, depth]. + z: Tensor with shape [length or 1, length, depth]. + transpose: Whether to transpose inner matrices of y and z. Should be true if + last dimension of x is depth, not length. + + Returns: + A Tensor with shape [batch_size, heads, length, length or depth]. + """ + batch_size = tf.shape(x)[0] + heads = x.get_shape().as_list()[1] + length = tf.shape(x)[2] + + # xy_matmul is [batch_size, heads, length or 1, length or depth] + xy_matmul = tf.matmul(x, y, transpose_b=transpose) + # x_t is [length or 1, batch_size, heads, length or depth] + x_t = tf.transpose(x, [2, 0, 1, 3]) + # x_t_r is [length or 1, batch_size * heads, length or depth] + x_t_r = tf.reshape(x_t, [length, heads * batch_size, -1]) + # x_tz_matmul is [length or 1, batch_size * heads, length or depth] + x_tz_matmul = tf.matmul(x_t_r, z, transpose_b=transpose) + # x_tz_matmul_r is [length or 1, batch_size, heads, length or depth] + x_tz_matmul_r = tf.reshape(x_tz_matmul, [length, batch_size, heads, -1]) + # x_tz_matmul_r_t is [batch_size, heads, length or 1, length or depth] + x_tz_matmul_r_t = tf.transpose(x_tz_matmul_r, [1, 2, 0, 3]) + return xy_matmul + x_tz_matmul_r_t + + +def dot_product_attention_relative(q, + k, + v, + bias, + max_relative_position, + dropout_rate=0.0, + image_shapes=None, + save_weights_to=None, + name=None, + make_image_summary=True, + cache=False, + allow_memory=False, + hard_attention_k=0, + gumbel_noise_weight=0.0): + """Calculate relative position-aware dot-product self-attention. + + The attention calculation is augmented with learned representations for the + relative position between each element in q and each element in k and v. + + Args: + q: a Tensor with shape [batch, heads, length, depth]. + k: a Tensor with shape [batch, heads, length, depth]. + v: a Tensor with shape [batch, heads, length, depth]. + bias: bias Tensor. + max_relative_position: an integer specifying the maximum distance between + inputs that unique position embeddings should be learned for. + dropout_rate: a floating point number. + image_shapes: optional tuple of integer scalars. + save_weights_to: an optional dictionary to capture attention weights + for visualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + name: an optional string. + make_image_summary: Whether to make an attention image summary. + cache: whether use cache mode + allow_memory: whether to assume that recurrent memory is in use. If True, + the length dimension of k/v/bias may be longer than the queries, and it is + assumed that the extra memory entries precede the non-memory entries. + hard_attention_k: integer, if > 0 triggers hard attention (picking top-k) + gumbel_noise_weight: if > 0, apply Gumbel noise with weight + `gumbel_noise_weight` before picking top-k. This is a no op if + hard_attention_k <= 0. + + Returns: + A Tensor. + + Raises: + ValueError: if max_relative_position is not > 0. + """ + if not max_relative_position: + raise ValueError("Max relative position (%s) should be > 0 when using " + "relative self attention." % (max_relative_position)) + with tf.variable_scope( + name, default_name="dot_product_attention_relative", + values=[q, k, v]) as scope: + + # This calculation only works for self attention. + # q, k and v must therefore have the same shape, unless memory is enabled. + if not cache and not allow_memory: + q.get_shape().assert_is_compatible_with(k.get_shape()) + q.get_shape().assert_is_compatible_with(v.get_shape()) + + # Use separate embeddings suitable for keys and values. + depth = k.get_shape().as_list()[3] + length_k = common_layers.shape_list(k)[2] + length_q = common_layers.shape_list(q)[2] if allow_memory else length_k + relations_keys = _generate_relative_positions_embeddings( + length_q, length_k, depth, max_relative_position, + "relative_positions_keys", cache=cache) + relations_values = _generate_relative_positions_embeddings( + length_q, length_k, depth, max_relative_position, + "relative_positions_values", cache=cache) + + # Compute self attention considering the relative position embeddings. + logits = _relative_attention_inner(q, k, relations_keys, True) + if bias is not None: + logits += bias + weights = tf.nn.softmax(logits, name="attention_weights") + if hard_attention_k > 0: + weights = harden_attention_weights(weights, hard_attention_k, + gumbel_noise_weight) + if save_weights_to is not None: + save_weights_to[scope.name] = weights + save_weights_to[scope.name + "/logits"] = logits + weights = tf.nn.dropout(weights, 1.0 - dropout_rate) + if (not tf.get_variable_scope().reuse and + common_layers.should_generate_summaries() and + make_image_summary): + attention_image_summary(weights, image_shapes) + return _relative_attention_inner(weights, v, relations_values, False) + + +def _relative_position_to_absolute_position_masked(x): + """Helper to dot_product_self_attention_relative_v2. + + Rearrange an attention logits or weights Tensor. + + The dimensions of the input represent: + [batch, heads, query_position, memory_position - query_position + length - 1] + + The dimensions of the output represent: + [batch, heads, query_position, memory_position] + + Only works with masked_attention. Undefined behavior for regions of the + input where memory_position > query_position. + + Args: + x: a Tensor with shape [batch, heads, length, length] + + Returns: + a Tensor with shape [batch, heads, length, length] + """ + batch, heads, length, _ = common_layers.shape_list(x) + x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0]]) + x = tf.reshape(x, [batch, heads, 1 + length, length]) + x = tf.slice(x, [0, 0, 1, 0], [-1, -1, -1, -1]) + return x + + +def _absolute_position_to_relative_position_masked(x): + """Helper to dot_product_self_attention_relative_v2. + + Rearrange an attention logits or weights Tensor. + + The dimensions of the input represent: + [batch, heads, query_position, memory_position] + + The dimensions of the output represent: + [batch, heads, query_position, memory_position - query_position + length - 1] + + Only works with masked_attention. Undefined behavior for regions of the + input where memory_position > query_position. + + Args: + x: a Tensor with shape [batch, heads, length, length] + + Returns: + a Tensor with shape [batch, heads, length, length] + """ + batch, heads, length, _ = common_layers.shape_list(x) + x = tf.pad(x, [[0, 0], [0, 0], [1, 0], [0, 0]]) + x = tf.reshape(x, [batch, heads, length, length + 1]) + x = tf.slice(x, [0, 0, 0, 1], [batch, heads, length, length]) + return x + + +def get_relative_embeddings_left(max_relative_position, length, depth, + num_heads, heads_share_relative_embedding, + name): + """Instantiate or retrieve relative embeddings, sliced according to length. + + Use for masked case where the relative attention is only looking left. + + Args: + max_relative_position: an Integer for the number of entries in the relative + embedding, which corresponds to the max relative distance that is + considered. + length: an Integer, specifies the length of the input sequence for which + this relative embedding is retrieved for. + depth: an Integer, specifies the depth for relative embeddings. + num_heads: an Integer, specifies the number of heads. + heads_share_relative_embedding: a Boolean specifying if the relative + embedding is shared across heads. + name: a string giving the name of the embedding variables. + + Returns: + a Tensor with shape [length, depth] + """ + initializer_stddev = depth**-0.5 + if heads_share_relative_embedding: + embedding_shape = (max_relative_position, depth) + else: + embedding_shape = (num_heads, max_relative_position, depth) + relative_embeddings = tf.get_variable( + name=name, shape=embedding_shape, + initializer=tf.random_normal_initializer(stddev=initializer_stddev)) + # Pad first before slice to avoid using tf.cond. + pad_length = tf.maximum(length - max_relative_position, 0) + start_slice_position = tf.maximum(max_relative_position - length, 0) + if heads_share_relative_embedding: + padded_relative_embeddings = tf.pad( + relative_embeddings, + [[pad_length, 0], [0, 0]]) + used_relative_embeddings = tf.slice( + padded_relative_embeddings, + [start_slice_position, 0], [length, -1]) + else: + padded_relative_embeddings = tf.pad( + relative_embeddings, + [[0, 0], [pad_length, 0], [0, 0]]) + used_relative_embeddings = tf.slice( + padded_relative_embeddings, + [0, start_slice_position, 0], [-1, length, -1]) + return used_relative_embeddings + + +def dot_product_self_attention_relative_v2(q, + k, + v, + bias, + max_relative_position=None, + dropout_rate=0.0, + image_shapes=None, + save_weights_to=None, + name=None, + make_image_summary=True, + dropout_broadcast_dims=None, + heads_share_relative_embedding=False, + add_relative_to_values=False): + """Calculate relative position-aware dot-product self-attention. + + Only works for masked self-attention (no looking forward). + + The attention calculation is augmented with learned representations for the + relative position between each element in q and each element in k and v. + + Args: + q: a Tensor with shape [batch, heads, length, depth]. + k: a Tensor with shape [batch, heads, length, depth]. + v: a Tensor with shape [batch, heads, length, depth]. + bias: bias Tensor. + max_relative_position: an integer indicating the maximum relative distance + to look back - changing this invalidates checkpoints + dropout_rate: a floating point number. + image_shapes: optional tuple of integer scalars. + save_weights_to: an optional dictionary to capture attention weights + for visualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + name: an optional string. + make_image_summary: Whether to make an attention image summary. + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + heads_share_relative_embedding: a boolean indicating wheather to share + relative embeddings between attention heads. + add_relative_to_values: a boolean for whether to add relative component to + values. + + Returns: + A Tensor. + + Raises: + ValueError: if max_relative_position is not > 0. + """ + if not max_relative_position: + raise ValueError("Max relative position (%s) should be > 0 when using " + "relative self attention." % (max_relative_position)) + with tf.variable_scope( + name, + default_name="dot_product_self_attention_relative_v2", + values=[q, k, v]) as scope: + + # This calculation only works for self attention. + # q, k and v must therefore have the same shape. + # (Except v can have different depth.) + q.get_shape().assert_is_compatible_with(k.get_shape()) + q.get_shape()[:-1].assert_is_compatible_with(v.get_shape()[:-1]) + + # Use separate embeddings suitable for keys and values. + _, num_heads, length, depth_k = common_layers.shape_list(k) + + # [batch, num_heads, query_length, memory_length] + logits = tf.matmul(q, k, transpose_b=True) + key_relative_embeddings = get_relative_embeddings_left( + max_relative_position, length, depth_k, num_heads, + heads_share_relative_embedding, "key_relative_embeddings") + + rel_logits = matmul_with_relative_keys(q, key_relative_embeddings, + heads_share_relative_embedding) + rel_logits = _relative_position_to_absolute_position_masked(rel_logits) + logits += rel_logits + if bias is not None: + logits += bias + + weights = tf.nn.softmax(logits, name="attention_weights") + if save_weights_to is not None: + save_weights_to[scope.name] = weights + save_weights_to[scope.name + "/logits"] = logits + # Dropping out the attention links for each of the heads. + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + if common_layers.should_generate_summaries() and make_image_summary: + attention_image_summary(weights, image_shapes) + output = tf.matmul(weights, v) + if add_relative_to_values: + # [batch, num_heads, query_length, memory_length] + relative_weights = _absolute_position_to_relative_position_masked(weights) + depth_v = common_layers.shape_list(v)[3] + value_relative_embeddings = get_relative_embeddings_left( + max_relative_position, length, depth_v, num_heads, + heads_share_relative_embedding, "value_relative_embeddings") + output += matmul_with_relative_values( + relative_weights, value_relative_embeddings, + heads_share_relative_embedding) + return output + + +def _absolute_position_to_relative_position_unmasked(x): + """Helper function for dot_product_unmasked_self_attention_relative_v2. + + Rearrange an attention logits or weights Tensor. + + The dimensions of the input represent: + [batch, heads, query_position, memory_position] + + The dimensions of the output represent: + [batch, heads, query_position, memory_position - query_position + length - 1] + + Only works with unmasked_attention. + + Args: + x: a Tensor with shape [batch, heads, length, length] + + Returns: + a Tensor with shape [batch, heads, length, 2*length-1] + """ + batch, heads, length, _ = common_layers.shape_list(x) + # padd along column + x = tf.pad(x, [[0, 0], [0, 0], [0, 0], [0, length-1]]) + x_flat = tf.reshape(x, [batch, heads, length**2 + length*(length -1)]) + # add 0's in the beginning that will skew the elements after reshape + x_flat = tf.pad(x_flat, [[0, 0], [0, 0], [length, 0]]) + x = tf.reshape(x_flat, [batch, heads, length, 2*length]) + x = tf.slice(x, [0, 0, 0, 1], [batch, heads, length, + 2*length -1]) + return x + + +def get_relative_embeddings_left_right(max_relative_position, length, depth, + num_heads, + heads_share_relative_embedding, + name): + """Instantiate or retrieve relative embeddings, sliced according to length. + + Use for unmasked case where the relative attention looks both left and right. + + Args: + max_relative_position: an Integer for the number of entries in the relative + embedding, which corresponds to the max relative distance that is + considered. + length: an Integer, specifies the length of the input sequence for which + this relative embedding is retrieved for. + depth: an Integer, specifies the depth for relative embeddings. + num_heads: an Integer, specifies the number of heads. + heads_share_relative_embedding: a Boolean specifying if the relative + embedding is shared across heads. + name: a string giving the name of the embedding variables. + + Returns: + a Tensor with shape [length, depth] + """ + initializer_stddev = depth**-0.5 + max_relative_position_unmasked = 2 * max_relative_position - 1 + if heads_share_relative_embedding: + embedding_shape = (max_relative_position_unmasked, depth) + else: + embedding_shape = (num_heads, max_relative_position_unmasked, depth) + relative_embeddings = tf.get_variable( + name=name, shape=embedding_shape, + initializer=tf.random_normal_initializer(stddev=initializer_stddev)) + # Pad first before slice to avoid using tf.cond. + pad_length = tf.maximum(length - max_relative_position, 0) + slice_start_position = tf.maximum(max_relative_position-length, 0) + if heads_share_relative_embedding: + padded_relative_embeddings = tf.pad( + relative_embeddings, + [[pad_length, pad_length], [0, 0]]) + used_relative_embeddings = tf.slice( + padded_relative_embeddings, + [slice_start_position, 0], [2 * length - 1, -1]) + else: + padded_relative_embeddings = tf.pad( + relative_embeddings, + [[0, 0], [pad_length, pad_length], [0, 0]]) + used_relative_embeddings = tf.slice( + padded_relative_embeddings, + [0, slice_start_position, 0], [-1, 2 * length - 1, -1]) + return used_relative_embeddings + + +def dot_product_unmasked_self_attention_relative_v2( + q, k, v, bias, max_relative_position=None, dropout_rate=0.0, + image_shapes=None, save_weights_to=None, name=None, make_image_summary=True, + dropout_broadcast_dims=None, heads_share_relative_embedding=False, + add_relative_to_values=False): + """Calculate relative position-aware dot-product self-attention. + + The attention calculation is augmented with learned representations for the + relative position between each element in q and each element in k and v. + + Args: + q: a Tensor with shape [batch, heads, length, depth]. + k: a Tensor with shape [batch, heads, length, depth]. + v: a Tensor with shape [batch, heads, length, depth]. + bias: bias Tensor. + max_relative_position: an integer the max relative embedding considered. + Changing this invalidates checkpoints. + dropout_rate: a floating point number. + image_shapes: optional tuple of integer scalars. + save_weights_to: an optional dictionary to capture attention weights + for visualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + name: an optional string. + make_image_summary: Whether to make an attention image summary. + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + heads_share_relative_embedding: a boolean indicating wheather to share + relative embeddings between attention heads. + add_relative_to_values: a boolean for whether to add relative component to + values. + + Returns: + A Tensor. + + Raises: + ValueError: if max_relative_position is not > 0. + """ + if not max_relative_position: + raise ValueError("Max relative position (%s) should be > 0 when using " + "relative self attention." % (max_relative_position)) + + with tf.variable_scope( + name, + default_name="dot_product_unmasked_self_attention_relative_v2", + values=[q, k, v]) as scope: + + # This calculation only works for self attention. + # q, k and v must therefore have the same shape. + q.get_shape().assert_is_compatible_with(k.get_shape()) + q.get_shape().assert_is_compatible_with(v.get_shape()) + + # [batch, num_heads, query_length, memory_length] + logits = tf.matmul(q, k, transpose_b=True) + + length = common_layers.shape_list(q)[2] + k_shape = common_layers.shape_list(k) + num_heads = k_shape[1] + depth_k = k_shape[-1] + + key_relative_embeddings = get_relative_embeddings_left_right( + max_relative_position, length, depth_k, num_heads, + heads_share_relative_embedding, + "key_relative_embeddings") + unmasked_rel_logits = matmul_with_relative_keys( + q, key_relative_embeddings, heads_share_relative_embedding) + unmasked_rel_logits = _relative_position_to_absolute_position_unmasked( + unmasked_rel_logits) + logits += unmasked_rel_logits + + if bias is not None: + logits += bias + weights = tf.nn.softmax(logits, name="attention_weights") + if save_weights_to is not None: + save_weights_to[scope.name] = weights + save_weights_to[scope.name + "/logits"] = logits + # dropping out the attention links for each of the heads + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + # relative_weights.set_shape([None, None, None, max_length]) + if common_layers.should_generate_summaries() and make_image_summary: + attention_image_summary(weights, image_shapes) + ret = tf.matmul(weights, v) + if add_relative_to_values: + # Adds the contribution of the weighted relative embeddings to the values. + # [batch, num_heads, query_length, 2*memory_length-1] + relative_weights = _absolute_position_to_relative_position_unmasked( + weights) + depth_v = common_layers.shape_list(v)[3] + value_relative_embeddings = get_relative_embeddings_left_right( + max_relative_position, length, depth_v, num_heads, + heads_share_relative_embedding, "value_relative_embeddings") + ret += matmul_with_relative_values( + relative_weights, value_relative_embeddings, + heads_share_relative_embedding) + return ret + + +def _matmul_with_relative_keys_2d(x, y, heads_share_relative_embedding): + """Helper function for dot_product_unmasked_self_attention_relative_2d.""" + if heads_share_relative_embedding: + ret = tf.einsum("bhxyd,md->bhxym", x, y) + else: + ret = tf.einsum("bhxyd,hmd->bhxym", x, y) + return ret + + +def dot_product_unmasked_self_attention_relative_2d( + q, k, v, bias, max_relative_position=None, dropout_rate=0.0, + image_shapes=None, name=None, make_image_summary=True, + dropout_broadcast_dims=None, heads_share_relative_embedding=False, + add_relative_to_values=False): + """Calculate relative position unmasked dot-product self-attention 2d. + + + The attention calculation is augmented with learned representations for the + relative position between each element in q and each element in k and v in + height and width dimensions. for query index (i,j) and key index (l, m), + the logit is q_i k_j^T + q_i rh_{l-i}^T + q_i rw_{m-j}^T, where rh and ry are + the set of relative embeddings in height and width spatial dimensions, + respectively. + + Args: + q: a Tensor with shape [batch, heads, height, width, depth]. + k: a Tensor with shape [batch, heads, height, width, depth]. + v: a Tensor with shape [batch, heads, height, width, depth]. + bias: bias Tensor. + max_relative_position: an integer the max relative embedding considered. + Changing this invalidates checkpoints. + dropout_rate: a floating point number. + image_shapes: optional tuple of integer scalars. + name: an optional string. + make_image_summary: Whether to make an attention image summary. + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + heads_share_relative_embedding: a boolean indicating wheather to share + relative embeddings between attention heads. + add_relative_to_values: a boolean for adding relative embeddings to values. + + Returns: + [batch, heads, height, width, depth] tensor, the output of attention. + height_key_relative_embeddings: a 3d or 2d tensor, depending on head sharing + settings, which are the relative embeddings for height. + width_key_relative_embeddings: a 3d or 2d tensor, depending on head sharing + settings, which are the relative embeddings for width. + + Raises: + ValueError: if max_relative_position is not > 0. + """ + if not max_relative_position: + raise ValueError("Max relative position (%s) should be > 0 when using " + "relative self attention." % (max_relative_position)) + + if add_relative_to_values: + raise ValueError("Adding relative embeddings to values is not implemented") + + with tf.variable_scope( + name, + default_name="dot_product_self_attention_relative_v2", + values=[q, k, v]): + + # This calculation only works for self attention. + # q, k and v must therefore have the same shape. + q.get_shape().assert_is_compatible_with(k.get_shape()) + q.get_shape()[:-1].assert_is_compatible_with(v.get_shape()[:-1]) + + (height, width) = (common_layers.shape_list(q)[2], + common_layers.shape_list(q)[3]) + k_shape = common_layers.shape_list(k) + num_heads = k_shape[1] + depth_k = k_shape[-1] + depth_v = common_layers.shape_list(v)[-1] + # flatten height width + flatten_hw = lambda x, d: tf.reshape(x, [-1, num_heads, height*width, d]) + # [batch, num_heads, query_length, memory_length] + logits = tf.matmul(flatten_hw(q, depth_k), flatten_hw(k, depth_k), + transpose_b=True) + + def _compute_2d_relative_logits( + query, key_relative_embeddings, height, width, + heads_share_relative_embedding, transpose_mask): + """compute relative logits.""" + unmasked_rel_logits = _matmul_with_relative_keys_2d( + query, key_relative_embeddings, heads_share_relative_embedding) + # collapse height and heads + unmasked_rel_logits = tf.reshape(unmasked_rel_logits, + [-1, num_heads*height, width, + 2*width-1]) + unmasked_rel_logits = ( + _relative_position_to_absolute_position_unmasked( + unmasked_rel_logits)) + # shape it back for tiling + unmasked_rel_logits = tf.reshape( + unmasked_rel_logits, [-1, num_heads, height, width, width]) + # tiling it height times + unmasked_rel_logits = tf.expand_dims( + unmasked_rel_logits, axis=3) + unmasked_rel_logits = tf.tile(unmasked_rel_logits, + [1, 1, 1, height, 1, 1]) + # bringing it to the right shape for adding to the logits. + unmasked_rel_logits = tf.transpose(unmasked_rel_logits, transpose_mask) + unmasked_rel_logits = tf.reshape(unmasked_rel_logits, + [-1, num_heads, height*width, + height*width]) + return unmasked_rel_logits + + # Relative logits in width dimension first. + width_key_relative_embeddings = get_relative_embeddings_left_right( + max_relative_position, width, depth_k, num_heads, + heads_share_relative_embedding, + "width_key_relative_embeddings") + # [batch, heads, height, 2*width-1, 2*width-1] + width_unmasked_rel_logits = _compute_2d_relative_logits( + q, width_key_relative_embeddings, height, width, + heads_share_relative_embedding, [0, 1, 2, 4, 3, 5]) + logits += width_unmasked_rel_logits + # Relative logits in height dimension next. For ease, we transpose + # height and width and repeat the above steps, and transpose to eventually + # put the logits in their right positions. + # [batch, heads, height, 2*height-1, 2*width-1] + height_key_relative_embeddings = get_relative_embeddings_left_right( + max_relative_position, height, depth_k, num_heads, + heads_share_relative_embedding, + "height_key_relative_embeddings") + + height_unmasked_rel_logits = _compute_2d_relative_logits( + tf.transpose(q, [0, 1, 3, 2, 4]), + height_key_relative_embeddings, + width, + height, + heads_share_relative_embedding, [0, 1, 4, 2, 5, 3]) + logits += height_unmasked_rel_logits + if bias is not None: + logits += bias + weights = tf.nn.softmax(logits, name="attention_weights") + # dropping out the attention links for each of the heads + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + if common_layers.should_generate_summaries() and make_image_summary: + attention_image_summary(weights, image_shapes) + ret = tf.matmul(weights, flatten_hw(v, depth_v)) + # reshape back the same spatial dimensions as q + return ( + tf.reshape(ret, [-1, num_heads, height, width, depth_v]), + height_key_relative_embeddings, + width_key_relative_embeddings) + + +def _split_along_width(x_left_right_blocks): + """Helper function for local 2d attention. + + Takes a tensor of [batch, heads, num_h_blocks, num_w_blocks, + height, width, depth] and returns two tensors which contain every alternate + position along the width + + + Args: + x_left_right_blocks: A [batch, num_h_blocks, num_w_blocks, + height, width, depth] tensor + + Returns: + x_left_blocks, x_right_blocks: two [batch, num_h_blocks, + (num_w_blocks-2)/2, height, width, + depth] tensors + + """ + (_, x_num_h_blocks, x_num_outer_w_blocks, x_memory_flange_h, + x_memory_flange_w, depth) = common_layers.shape_list(x_left_right_blocks) + x_num_w_blocks = (x_num_outer_w_blocks-1)//2 + # get it ready for splitting the left and right memory blocks + x_left_right_blocks = tf.reshape(x_left_right_blocks, + [-1, + x_num_h_blocks, + x_num_outer_w_blocks//2, 2, + x_memory_flange_h, + x_memory_flange_w, depth]) + + x_left_blocks, x_right_blocks = tf.split(x_left_right_blocks, + num_or_size_splits=2, axis=3) + x_left_blocks = tf.squeeze(x_left_blocks, axis=3) + x_right_blocks = tf.squeeze(x_right_blocks, axis=3) + x_left_blocks = tf.slice(x_left_blocks, [0, 0, 0, 0, 0, 0], + [-1, -1, x_num_w_blocks, -1, -1, -1]) + x_right_blocks = tf.slice(x_right_blocks, [0, 0, 1, 0, 0, 0], + [-1, -1, x_num_w_blocks, -1, -1, -1]) + return x_left_blocks, x_right_blocks + + +def _get_left_right_blocks(x): + """Helper function. Assumes that memory_flange is half of query sizes. + + This function splits the tensor of width 'n' into two halves, where the + first half gets the width indices 0, 2, 4.. and the second half gets the + width indices 3, 5, ... We also fuse two blocks along the h dimension. + + Args: + x: a 6-d tensor. + + Returns: + x_left_blocks, x_right_blocks: Two 6-d tensors + """ + (_, x_num_outer_h_blocks, x_num_outer_w_blocks, x_memory_flange_h, + x_memory_flange_w, depth) = common_layers.shape_list(x) + x_left_right_blocks = tf.slice(x, + [0, 1, 0, 0, 0, 0], + [-1, x_num_outer_h_blocks-2, -1, -1, + -1, -1]) + num_blocks_h = (x_num_outer_h_blocks-2)//2 + x_left_right_blocks = tf.reshape(x_left_right_blocks, + [-1, + num_blocks_h, + 2, x_num_outer_w_blocks, + x_memory_flange_h, + x_memory_flange_w, depth]) + x_left_right_blocks = tf.transpose(x_left_right_blocks, + [0, 1, 3, 2, 4, 5, 6]) + x_left_right_blocks = tf.reshape(x_left_right_blocks, + [-1, num_blocks_h, + x_num_outer_w_blocks, 2*x_memory_flange_h, + x_memory_flange_w, depth]) + # get it ready for splitting the left and right memory blocks + x_left_blocks, x_right_blocks = _split_along_width(x_left_right_blocks) + + return x_left_blocks, x_right_blocks + # return x_left_right_blocks + + +def _extract_blocks(x, block_h, block_w): + """Helper function for local 2d attention. + + Args: + x: a [batch, height, width, depth] tensor + block_h: An integer. block height + block_w: An inteter. block width + + Returns: + a [batch, num_heads, height/block_h, width/block_w, depth] tensor + """ + (_, height, width, depth) = common_layers.shape_list(x) + assert height % block_h == 0 + assert width % block_w == 0 + x = tf.reshape(x, [-1, height//block_h, block_h, + width//block_w, block_w, depth]) + return tf.transpose(x, [0, 1, 3, 2, 4, 5]) + + +def get_2d_local_memory(x, query_shape, memory_flange): + """Stitches together the local 2d memory blocks. + + Args: + x: a [batch, height, width, depth tensor] + query_shape: 2-d integer list of query shape + memory_flange: 2-d integer list of memory flanges + + Returns: + x: A [batch, num_h_blocks, num_w_blocks, + query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]] + tensor. + """ + (_, height, width, depth_x) = common_layers.shape_list(x) + x_center_blocks = _extract_blocks(x, query_shape[0], query_shape[1]) + # add extra padding to x so that we can extract the memory region + # around the center + paddings = [[0, 0], [memory_flange[0], memory_flange[0]], + [memory_flange[1], memory_flange[1]], [0, 0]] + padded_x = tf.pad(x, paddings) + padded_x.set_shape([None, height+2*memory_flange[0], + width+2*memory_flange[1], depth_x]) + x_outer_memory_blocks = _extract_blocks(padded_x, + memory_flange[0], memory_flange[1]) + # We'll extract left and right memory blocks, top and bottom memory blocks, + # and then the corner memory blocks + + # Each of these after will have shape + # [batch, num_h_blocks, num_w_blocks, query_shape[0], + # memory_flange[1], depth] + x_left_blocks, x_right_blocks = _get_left_right_blocks( + x_outer_memory_blocks) + t_hw_block = lambda x: tf.transpose(x, [0, 2, 1, 4, 3, 5]) + # now to get top and bottom blocks, we should just transpose the outer + # blocks, call the same function and transpose back to get shape + # [batch, num_h_blocks, num_w_blocks, memory_flange[0], + # query_shape[1], depth] + x_top_center_blocks, x_bottom_center_blocks = ( + map(t_hw_block, _get_left_right_blocks( + t_hw_block(x_outer_memory_blocks)))) + + # now to get the corner blocks + x_left_corner_blocks, x_right_corner_blocks = _split_along_width( + x_outer_memory_blocks) + # now to extract top and bottom for both k and v + # we need to transpose because _split_along_width separates along + # the width + # each of these should have shape [batch, num_h_blocks, + # num_w_blocks, memory_flange[0], memory_flange[1], depth] + + t_hw = lambda x: tf.transpose(x, [0, 2, 1, 3, 4, 5]) + x_top_left_corner_blocks, x_bottom_left_corner_blocks = ( + map(t_hw, _split_along_width(t_hw(x_left_corner_blocks)))) + x_top_right_corner_blocks, x_bottom_right_corner_blocks = ( + map(t_hw, _split_along_width(t_hw(x_right_corner_blocks)))) + + # The memory is top_left top_center top_right + # left_center middle right_center + # bottom_left bottom_center bottom_right + # Assembling the above row by row + # first [x_top_left, x_top, x_top_right] + # to get [batch, num_h_blocks, num_w_blocks, memory_flange[0], + # query_shape[1]+2*memory_flange[1], depth] + # then [x_left, x_center, x_right] + # then [x_bottom_left, x_bottom, x_bottom_right] + x_top_memory = tf.concat( + [x_top_left_corner_blocks, + x_top_center_blocks, + x_top_right_corner_blocks], axis=4) + x_middle_memory = tf.concat( + [x_left_blocks, x_center_blocks, x_right_blocks], axis=4) + x_bottom_memory = tf.concat( + [x_bottom_left_corner_blocks, + x_bottom_center_blocks, + x_bottom_right_corner_blocks], axis=4) + + # concat along height + x = tf.concat([x_top_memory, x_middle_memory, x_bottom_memory], axis=3) + return x + + +def get_2d_local_memory_v2(x, query_shape, memory_flange): + """Gathering memory blocks around query blocks. flange is half of query . + + Only works if memory flanges are half of query sizes. + + Args: + x: a [batch, height, width, depth tensor] + query_shape: 2-d integer list of query shape + memory_flange: 2-d integer list of memory flanges + + Returns: + x: A [batch, num_h_blocks, num_w_blocks, + query_shape[0]+2*memory_flange[0],query_shape[1]+2*memory_flange[1]] + tensor. + """ + (_, height, width, depth_x) = common_layers.shape_list(x) + # add extra padding to x so that we can extract the memory region + # around the center + paddings = [[0, 0], [memory_flange[0], memory_flange[0]], + [memory_flange[1], memory_flange[1]], [0, 0]] + padded_x = tf.pad(x, paddings) + padded_x.set_shape([None, height+2*memory_flange[0], + width+2*memory_flange[1], depth_x]) + num_h_memory_blocks = height//query_shape[0] + 1 + num_w_memory_blocks = width//query_shape[1] + 1 + x_memory_blocks = _extract_blocks(padded_x, + query_shape[0], query_shape[1]) + x_width_blocks = tf.split(x_memory_blocks, num_w_memory_blocks, + 2) + x_left_width = tf.concat(x_width_blocks[:num_w_memory_blocks - 1], axis=2) + x_right_width = tf.concat(x_width_blocks[1:], axis=2) + x_memory_blocks = tf.concat([x_left_width, x_right_width], axis=4) + + x_height_blocks = tf.split(x_memory_blocks, num_h_memory_blocks, 1) + x_top_height = tf.concat(x_height_blocks[:num_h_memory_blocks - 1], axis=1) + x_bottom_height = tf.concat(x_height_blocks[1:], axis=1) + x = tf.concat([x_top_height, x_bottom_height], axis=3) + + return x + + +def dot_product_unmasked_attention_local_2d_tpu( + q, k, v, bias, max_relative_position=None, query_shape=(8, 8), + dropout_rate=0.0, image_shapes=None, name=None, make_image_summary=False, + dropout_broadcast_dims=None): + """Calculate unmasked dot-product local self-attention 2d on tpu. + + Args: + q: a Tensor with shape [batch, heads, height, width, depth]. + k: a Tensor with shape [batch, heads, height, width, depth]. + v: a Tensor with shape [batch, heads, height, width, depth]. + bias: bias Tensor. + max_relative_position: an integer the max relative embedding considered. + Changing this invalidates checkpoints. + query_shape: a two tuple indicating query shape + dropout_rate: a floating point number. + image_shapes: optional tuple of integer scalars. + name: an optional string. + make_image_summary: Whether to make an attention image summary. + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + + Returns: + [batch, heads, height, width, depth] tensor, the output of attention. + + """ + if max_relative_position: + raise ValueError("Relative local 2d attention not implemented") + + with tf.variable_scope( + name, + default_name="dot_product_unmasked_attention_local_2d_tpu", + values=[q, k, v]): + + # This calculation only works for self attention. + # q, k and v must therefore have the same shape. + q.get_shape().assert_is_compatible_with(k.get_shape()) + q.get_shape().assert_is_compatible_with(v.get_shape()) + orig_q_shape = common_layers.shape_list(q) + # Pad query, key, value to ensure multiple of corresponding lengths. + memory_flange = [int(query_shape[0]//2), int(query_shape[1]//2)] + q = pad_to_multiple_2d(q, query_shape) + k = pad_to_multiple_2d(k, query_shape) + v = pad_to_multiple_2d(v, query_shape) + q_shape = common_layers.shape_list(q) + (height, width) = (q_shape[2], + q_shape[3]) + _, num_heads, height, width, depth_k = common_layers.shape_list(k) + depth_v = common_layers.shape_list(v)[-1] + num_h_blocks = height//query_shape[0] + num_w_blocks = width//query_shape[1] + # Extract center queries, keys, and values + q = tf.reshape(q, [-1, height, width, depth_k]) + queries = _extract_blocks( + q, query_shape[0], query_shape[1]) + k = tf.reshape(k, [-1, height, width, depth_k]) + keys = get_2d_local_memory_v2( + k, query_shape, memory_flange) + v = tf.reshape(v, [-1, height, width, depth_v]) + values = get_2d_local_memory_v2( + v, query_shape, memory_flange) + memory_h = query_shape[0] + 2*memory_flange[0] + memory_w = query_shape[1] + 2*memory_flange[1] + queries = tf.reshape(queries, [-1, num_heads, num_h_blocks, num_w_blocks, + query_shape[0]*query_shape[1], depth_k]) + keys = tf.reshape(keys, [-1, num_heads, num_h_blocks, num_w_blocks, + memory_h*memory_w, depth_k]) + values = tf.reshape(values, [-1, num_heads, num_h_blocks, num_w_blocks, + memory_h*memory_w, depth_v]) + logits = tf.matmul(queries, keys, transpose_b=True) + if bias is not None: + logits += bias + + weights = tf.nn.softmax(logits, name="attention_weights") + # Dropping out the attention links for each of the heads + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + if common_layers.should_generate_summaries() and make_image_summary: + attention_image_summary(weights, image_shapes) + ret = tf.matmul(weights, values) + # we need to get it back to shape [batch, heads, height, width] + ret = tf.reshape(ret, [-1, num_heads, num_h_blocks, num_w_blocks, + query_shape[0], query_shape[1], depth_v]) + ret = tf.transpose(ret, [0, 1, 2, 4, 3, 5, 6]) + ret = tf.reshape(ret, [-1, num_heads, num_h_blocks*query_shape[0], + num_w_blocks*query_shape[1], depth_v]) + # slice if padding was introduced + ret = tf.slice(ret, [0, 0, 0, 0, 0], [-1, -1, orig_q_shape[2], + orig_q_shape[3], -1]) + return ret + + +def dot_product_unmasked_attention_local_2d_tpu_simple( + x, bias, total_key_depth, total_value_depth, num_heads, + query_shape=(8, 8), + dropout_rate=0.0, image_shapes=None, make_image_summary=False, + dropout_broadcast_dims=None): + + """Calculate simple unmasked dot-product local self-attention 2d on tpu. + + The query, key, and value blocks are the same. We do not do a second linear + transformation after computing the values + + Args: + x: a Tensor with shape [batch, height, width, depth]. + bias: bias Tensor. + total_key_depth: the dimensions of the keys + total_value_depth: the dimensions of the values + num_heads: number of heads + query_shape: a two tuple indicating query shape + dropout_rate: a floating point number. + image_shapes: optional tuple of integer scalars. + make_image_summary: Whether to make an attention image summary. + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + + Returns: + ret: [batch, height, width, total_value_depth] tensor, + the output of attention. + q: [batch, height, width, total_key_depth] query tensor + k: [batch, height, width, total_key_depth] key tensor + v: [batch, height, width, total_value_depth] value tensor + + """ + # This calculation only works for self attention. + # q, k and v must therefore have the same shape. + orig_x_shape = common_layers.shape_list(x) + # Pad query, key, value to ensure multiple of corresponding lengths if + # necessary + is_padded = False + if (orig_x_shape[1]%query_shape[0]) != 0 or ( + orig_x_shape[2]%query_shape[1]) != 0: + x = pad_to_multiple_2d(x, query_shape) + is_padded = True + _, height, width, depth = common_layers.shape_list(x) + assert depth%num_heads == 0 + num_h_blocks = height//query_shape[0] + num_w_blocks = width//query_shape[1] + # Extract center queries, keys, and values + x_blocks = _extract_blocks(x, query_shape[0], query_shape[1]) + x_blocks = tf.reshape(x_blocks, [-1, query_shape[0]*query_shape[1], depth]) + q, k, v = compute_qkv(x_blocks, None, total_key_depth, total_value_depth) + hsplit = lambda x: split_heads(x, num_heads) + q, k, v = map(hsplit, [q, k, v]) + logits = tf.matmul(q, k, transpose_b=True) + if bias is not None: + logits += bias + weights = tf.nn.softmax(logits, name="attention_weights") + # Dropping out the attention links for each of the heads + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + if common_layers.should_generate_summaries() and make_image_summary: + attention_image_summary(weights, image_shapes) + output = tf.matmul(weights, v) + output = combine_heads(output) + # we need to get it back to shape [batch, height, width] + ret = tf.reshape(output, [-1, num_h_blocks, num_w_blocks, + query_shape[0], query_shape[1], total_value_depth]) + + ret = tf.transpose(ret, [0, 1, 3, 2, 4, 5]) + ret = tf.reshape(ret, [-1, num_h_blocks*query_shape[0], + num_w_blocks*query_shape[1], total_value_depth]) + # slice if padding was introduced + if is_padded: + ret = tf.slice(ret, [0, 0, 0, 0], [-1, orig_x_shape[1], + orig_x_shape[2], -1]) + return ret, q, k, v + + +def masked_within_block_local_attention_1d(q, k, v, block_length=64, name=None): + """Attention to the source and a neighborhood to the left within a block. + + The sequence is divided into blocks of length block_length. Attention for a + given query position can only see memory positions less than or equal to the + query position in the corresponding block. + + Args: + q: a Tensor with shape [batch, heads, length, depth_k] + k: a Tensor with shape [batch, heads, length, depth_k] + v: a Tensor with shape [batch, heads, length, depth_v] + block_length: an integer + name: an optional string + + Returns: + a Tensor of shape [batch, heads, length, depth_v] + """ + with tf.variable_scope( + name, default_name="within_local_attention_1d", values=[q, k, v]): + batch, heads, length, depth_k = common_layers.shape_list(q) + depth_v = common_layers.shape_list(v)[-1] + if isinstance(block_length, tf.Tensor): + const = contrib.util().constant_value(block_length) + if const is not None: + block_length = int(const) + + # Pad query, key, value to ensure multiple of block length. + original_length = length + padding_size = tf.mod(-length, block_length) + length += padding_size + padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]] + q = tf.pad(q, padding) + k = tf.pad(k, padding) + v = tf.pad(v, padding) + + # Compute attention for all subsequent query blocks. + num_blocks = tf.div(length, block_length) + q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k]) + k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k]) + v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v]) + # [batch, heads, num_blocks, block_length, block_length] + attention = tf.matmul(q, k, transpose_b=True) + attention += tf.reshape(attention_bias_lower_triangle(block_length), + [1, 1, 1, block_length, block_length]) + attention = tf.nn.softmax(attention) + # [batch, heads, num_blocks, block_length, depth_v] + output = tf.matmul(attention, v) + output = tf.reshape(output, [batch, heads, -1, depth_v]) + + # Remove the padding if introduced. + output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) + output.set_shape([None if isinstance(dim, tf.Tensor) else dim for dim in + (batch, heads, length, depth_v)]) + return output + + +def _relative_position_to_absolute_position_unmasked(x): + """Converts tensor from relative to aboslute indexing for local attention. + + Args: + x: a Tensor of shape [batch (or batch*num_blocks), heads, + length, 2 * length - 1] + + Returns: + A Tensor of shape [batch (or batch*num_blocks), heads, length, length] + """ + x_shape = common_layers.shape_list(x) + batch = x_shape[0] + heads = x_shape[1] + length = x_shape[2] + # Concat columns of pad to shift from relative to absolute indexing. + col_pad = tf.zeros((batch, heads, length, 1)) + x = tf.concat([x, col_pad], axis=3) + + # Concat extra elements so to add up to shape (len+1, 2*len-1). + flat_x = tf.reshape(x, [batch, heads, length * 2 * length]) + flat_pad = tf.zeros((batch, heads, length-1)) + flat_x_padded = tf.concat([flat_x, flat_pad], axis=2) + + # Reshape and slice out the padded elements. + final_x = tf.reshape(flat_x_padded, [batch, heads, length+1, 2*length-1]) + final_x = final_x[:, :, :, length-1:] + final_x = final_x[:, :, :length, :] + return final_x + + +def masked_local_attention_1d(q, + k, + v, + block_length=128, + make_image_summary=False, + dropout_rate=0., + name=None): + """Attention to the source position and a neighborhood to the left of it. + + The sequence is divided into blocks of length block_length. Attention for a + given query position can only see memory positions less than or equal to the + query position, in the corresponding block and the previous block. + + Args: + q: a Tensor with shape [batch, heads, length, depth_k] + k: a Tensor with shape [batch, heads, length, depth_k] + v: a Tensor with shape [batch, heads, length, depth_v] + block_length: an integer + make_image_summary: a boolean, whether to make an attention image summary. + dropout_rate: Dropout rate for attention dropout + name: an optional string + + Returns: + a Tensor of shape [batch, heads, length, depth_v] + """ + with tf.variable_scope( + name, default_name="local_attention_1d", values=[q, k, v]): + batch, heads, length, depth_k = common_layers.shape_list(q) + depth_v = common_layers.shape_list(v)[-1] + if isinstance(block_length, tf.Tensor): + const = contrib.util().constant_value(block_length) + if const is not None: + block_length = int(const) + # If (length < 2 * block_length), then we use only one block. + if isinstance(length, int) and isinstance(block_length, int): + block_length = length if length < block_length * 2 else block_length + else: + block_length = tf.where( + tf.less(length, block_length * 2), length, block_length) + + # Pad query, key, value to ensure multiple of block length. + original_length = length + padding_size = tf.mod(-length, block_length) + length += padding_size + padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]] + q = tf.pad(q, padding) + k = tf.pad(k, padding) + v = tf.pad(v, padding) + + if isinstance(length, int) and isinstance(block_length, int): + num_blocks = length // block_length + else: + num_blocks = tf.div(length, block_length) + + # Compute attention for the first query block. + first_q = tf.slice(q, [0, 0, 0, 0], [-1, -1, block_length, -1]) + first_k = tf.slice(k, [0, 0, 0, 0], [-1, -1, block_length, -1]) + first_v = tf.slice(v, [0, 0, 0, 0], [-1, -1, block_length, -1]) + + first_output = dot_product_attention( + first_q, + first_k, + first_v, + attention_bias_lower_triangle(block_length), + dropout_rate=dropout_rate, + make_image_summary=make_image_summary, + name="first_block") + + # Compute attention for all subsequent query blocks. + q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k]) + k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k]) + v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v]) + + local_k = _make_local_block(k, depth_k, batch, heads, num_blocks, + block_length) + local_v = _make_local_block(v, depth_v, batch, heads, num_blocks, + block_length) + tail_q = tf.slice(q, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1]) + tail_q = tf.reshape(tail_q, + [batch, heads, num_blocks - 1, block_length, depth_k]) + local_length = common_layers.shape_list(local_k)[3] + + # make sure source_pos <= target_pos + good_part = common_layers.ones_matrix_band_part( + block_length, + local_length, + -1, + block_length, + out_shape=[1, 1, 1, block_length, local_length]) + bias = (1.0 - good_part) * -1e9 + # TODO(noam): figure out how to show a summary for the remaining blocks. + # The naive way currently causes errors due to empty tensors. + # output: [batch, heads, num_blocks-1, block_length, depth_v] + tail_output = dot_product_attention( + tail_q, + local_k, + local_v, + bias, + dropout_rate=dropout_rate, + make_image_summary=False, + name="tail_block") + tail_output = tf.reshape( + tail_output, [batch, heads, (num_blocks - 1) * block_length, depth_v]) + output = tf.concat([first_output, tail_output], axis=2) + + # Remove the padding if introduced. + output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) + output = tf.reshape(output, [batch, heads, original_length, depth_v]) + return output + + +def _make_local_block(x, depth, batch, heads, num_blocks, block_length): + """Helper function to create a local version of the keys or values for 1d.""" + prev_block = tf.slice(x, [0, 0, 0, 0, 0], + [-1, -1, num_blocks - 1, -1, -1]) + cur_block = tf.slice(x, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1]) + local_block = tf.concat([prev_block, cur_block], 3) + return tf.reshape(local_block, + [batch, heads, num_blocks - 1, block_length * 2, depth]) + + +def masked_relative_local_attention_1d(q, + k, + v, + block_length=128, + make_image_summary=False, + dropout_rate=0., + heads_share_relative_embedding=False, + add_relative_to_values=False, + name=None): + """Masked local 1d attention with relative positions. + + The sequence is divided into blocks of length block_size. + Attention for a given query position can only see memory positions + less than or equal to the query position, in the corresponding block + and the previous block. + + If mask_right is True, then a target position cannot see greater source + positions. + + Args: + q: a Tensor with shape [batch, heads, length, depth_k] + k: a Tensor with shape [batch, heads, length, depth_k] + v: a Tensor with shape [batch, heads, length, depth_v] + block_length: an integer + make_image_summary: a boolean, whether to make an attention image summary. + dropout_rate: Dropout rate for attention dropout + heads_share_relative_embedding: a boolean for sharing relative embeddings. + add_relative_to_values: a boolean for whether to add relative component to + values. + name: an optional string + + Returns: + a Tensor of shape [batch, heads, length, depth_v] + + Raises: + ValueError: wwhen the name for the variable scope is not passed. + """ + if not name: + raise ValueError("Name must be assigned since reuse for variable scope is " + "set to tf.AUTO_REUSE, in order to reuse relative " + "embeddings of keys and values.") + + # Reuse flag is set to auto_reuse to reuse relative embeddings of keys and + # values across blocks (first and tail blocks). + with tf.variable_scope( + name, default_name="masked_relative_local_attention_1d", + values=[q, k, v], reuse=tf.AUTO_REUSE): + + default_block_length = block_length + batch = common_layers.shape_list(q)[0] + heads = common_layers.shape_list(q)[1] + length = common_layers.shape_list(q)[2] + # If (length < 2 * block_length), then we use only one block. + if isinstance(length, int) and isinstance(block_length, int): + block_length = length if length < block_length * 2 else block_length + else: + block_length = tf.where( + tf.less(length, block_length * 2), length, block_length) + depth_k = common_layers.shape_list(k)[3] + depth_v = common_layers.shape_list(v)[3] + original_length = length + padding_size = tf.mod(-length, block_length) + length += padding_size + padding = [[0, 0], [0, 0], [0, padding_size], [0, 0]] + q = tf.pad(q, padding) + k = tf.pad(k, padding) + v = tf.pad(v, padding) + + num_blocks = length // block_length + # compute attention for the first query block. + first_q = tf.slice(q, [0, 0, 0, 0], [-1, -1, block_length, -1]) + first_k = tf.slice(k, [0, 0, 0, 0], [-1, -1, block_length, -1]) + first_v = tf.slice(v, [0, 0, 0, 0], [-1, -1, block_length, -1]) + # Relative embeddings will be used later as well. + # TODO(avaswani,annahuang): check why 2*bl was breaking for music + # Needs to be known at static shape inference time, hence cannot be + # 2 * block_length. + rel_embed_length = 4 * default_block_length + # We only multiply with the needed embeddings as we slice them out. + first_rel_embeddings = get_relative_embeddings_left( + rel_embed_length, block_length, depth_k, heads, + heads_share_relative_embedding, "relative_embeddings") + first_rel_logits = matmul_with_relative_keys( + first_q, first_rel_embeddings, heads_share_relative_embedding) + first_logits = tf.matmul(first_q, first_k, transpose_b=True) + first_logits += ( + _relative_position_to_absolute_position_masked(first_rel_logits)) + # adding a mask + first_logits += ( + common_layers.cast_like(attention_bias_lower_triangle(block_length), + first_logits)) + first_att = tf.nn.softmax(first_logits, + name="first_attention_weights") + # dropping out the attention links for each of the heads + first_att = common_layers.dropout_with_broadcast_dims( + first_att, 1.0 - dropout_rate, + broadcast_dims=None) + # only call image summary for the first block + if common_layers.should_generate_summaries() and make_image_summary: + attention_image_summary(first_att, None) + first_output = tf.matmul(first_att, first_v) + + # compute attention for all subsequent query blocks. + q = tf.reshape(q, [batch, heads, num_blocks, block_length, depth_k]) + k = tf.reshape(k, [batch, heads, num_blocks, block_length, depth_k]) + v = tf.reshape(v, [batch, heads, num_blocks, block_length, depth_v]) + local_k = _make_local_block(k, depth_k, batch, heads, num_blocks, + block_length) + local_v = _make_local_block(v, depth_v, batch, heads, num_blocks, + block_length) + tail_q = tf.slice(q, [0, 0, 1, 0, 0], [-1, -1, -1, -1, -1]) + tail_q = tf.reshape(tail_q, + [batch, heads, num_blocks - 1, block_length, depth_k]) + local_length = common_layers.shape_list(local_k)[3] + + # collapsing num blocks and batch size so that we can reuse + # functions + def _reshape_for_relative(x): + x_shape = common_layers.shape_list(x) + # [batch, num_blocks, heads, length, depth] + x = tf.transpose(x, [0, 2, 1, 3, 4]) + x = tf.reshape(x, [batch*x_shape[2], heads, x_shape[3], + x_shape[4]]) + return x + rel_tail_q = _reshape_for_relative(tail_q) + rel_k = _reshape_for_relative(local_k) + rel_v = _reshape_for_relative(local_v) + rel_embeddings = get_relative_embeddings_left( + rel_embed_length, 2 * block_length, depth_k, heads, + heads_share_relative_embedding, "relative_embeddings") + rel_logits = matmul_with_relative_keys( + rel_tail_q, rel_embeddings, heads_share_relative_embedding) + # Computing relative logits separately for the masked and unmasked parts + # because the reshaping logic is different for both + masked_rel_logits = tf.slice(rel_logits, [0, 0, 0, block_length], + [-1, -1, -1, -1]) + masked_rel_logits = _relative_position_to_absolute_position_masked( + masked_rel_logits) + unmasked_rel_logits = tf.slice(rel_logits, [0, 0, 0, 0], + [-1, -1, -1, 2*block_length-1]) + unmasked_rel_logits = _relative_position_to_absolute_position_unmasked( + unmasked_rel_logits) + all_rel_logits = tf.concat([unmasked_rel_logits, masked_rel_logits], + axis=3) + all_logits = ( + tf.matmul(rel_tail_q, rel_k, transpose_b=True) + all_rel_logits) + # make sure source_pos <= target_pos + good_part = common_layers.ones_matrix_band_part(block_length, + local_length, + -1, block_length) + mask = (1.0 - good_part) * -1e9 + mask = common_layers.cast_like(mask, all_logits) + all_logits += tf.reshape(mask, [1, 1, block_length, local_length]) + weights = tf.nn.softmax(all_logits, name="attention_weights") + # [batch (* num_blocks), heads, query_length (=block_length), + # key_length (=2*block_length)] + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, + broadcast_dims=None) + + output = tf.matmul(weights, rel_v) + if add_relative_to_values: + # Adds the contribution of the weighted relative embeddings to the values. + weights_for_unmasked, weights_for_masked = ( + tf.split(weights, 2, axis=3)) + rel_weights_unmasked = _absolute_position_to_relative_position_unmasked( + weights_for_unmasked) + rel_weights_masked = _absolute_position_to_relative_position_masked( + weights_for_masked) + + value_rel_embeddings_unmasked = get_relative_embeddings_left( + rel_embed_length, 2 * block_length, depth_v, + heads, heads_share_relative_embedding, + "value_relative_embeddings") + # The unmasked part starts with index -1 as opposed 0 has take uptil last. + if heads_share_relative_embedding: + value_rel_embeddings_unmasked = value_rel_embeddings_unmasked[:-1, :] + else: + value_rel_embeddings_unmasked = value_rel_embeddings_unmasked[:, :-1, :] + value_rel_embeddings_masked = get_relative_embeddings_left( + rel_embed_length, block_length, depth_v, + heads, heads_share_relative_embedding, + "value_relative_embeddings") + + # [batch (*num_blocks), heads, query length, key length] + rel_weights = tf.concat( + [rel_weights_unmasked, rel_weights_masked], axis=3) + if heads_share_relative_embedding: + value_rel_embeddings_concat_axis = 0 + else: + value_rel_embeddings_concat_axis = 1 + value_rel_embeddings = tf.concat( + [value_rel_embeddings_unmasked, value_rel_embeddings_masked], + axis=value_rel_embeddings_concat_axis) + output_rel = matmul_with_relative_values( + rel_weights, value_rel_embeddings, heads_share_relative_embedding) + output += output_rel + + # bring to [batch, heads, num_blocks-1, block_length, depth] + output = tf.reshape(output, + [batch, num_blocks-1, heads, block_length, depth_v]) + output = tf.transpose(output, [0, 2, 1, 3, 4]) + + output = tf.reshape( + output, [batch, heads, (num_blocks - 1) * block_length, depth_v]) + output = tf.concat([first_output, output], axis=2) + output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) + output = tf.reshape(output, [batch, heads, original_length, depth_v]) + return output + + +def matmul_with_relative_values(x, y, heads_share_relative_embedding): + if heads_share_relative_embedding: + ret = tf.einsum("bhlm,md->bhld", x, y) + else: + ret = tf.einsum("bhlm,hmd->bhld", x, y) + return ret + + +def matmul_with_relative_keys(x, y, heads_share_relative_embedding): + if heads_share_relative_embedding: + ret = tf.einsum("bhld,md->bhlm", x, y) + else: + ret = tf.einsum("bhld,hmd->bhlm", x, y) + return ret + + +def local_attention_1d(q, k, v, block_length=128, filter_width=100, name=None): + """Strided block local self-attention. + + The sequence is divided into blocks of length block_length. Attention for a + given query position can see all memory positions in the corresponding block + and filter_width many positions to the left and right of the block. + + Args: + q: a Tensor with shape [batch, heads, length, depth_k] + k: a Tensor with shape [batch, heads, length, depth_k] + v: a Tensor with shape [batch, heads, length, depth_v] + block_length: an integer + filter_width: an integer indicating how much to look left and right of the + block. + name: an optional string + + Returns: + a Tensor of shape [batch, heads, length, depth_v] + """ + with tf.variable_scope( + name, default_name="local_self_attention_1d", values=[q, k, v]): + # Check that q, k, v have the same shape except in their depth dimension. + q.get_shape()[:-1].assert_is_compatible_with(k.get_shape()[:-1]) + q.get_shape()[:-1].assert_is_compatible_with(v.get_shape()[:-1]) + + batch_size, num_heads, original_length, _ = common_layers.shape_list(q) + + # Pad query, key, value to ensure multiple of corresponding lengths. + def pad_to_multiple(x, pad_length): + x_length = common_layers.shape_list(x)[2] + return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]]) + + def pad_l_and_r(x, pad_length): + return tf.pad(x, [[0, 0], [0, 0], [pad_length, pad_length], [0, 0]]) + + # Set up query blocks. + # [batch, heads, blocks_q, block_length, depth_k] + q = pad_to_multiple(q, block_length) + q = reshape_by_blocks(q, common_layers.shape_list(q), block_length) + total_query_blocks = common_layers.shape_list(q)[2] + + # Set up key and value blocks. + # [batch, heads, blocks_k, block_length, depth_k] + blocks_per_filter_width = filter_width // block_length + remaining_items = filter_width % block_length + k = pad_to_multiple(k, block_length) + v = pad_to_multiple(v, block_length) + k = pad_l_and_r(k, filter_width + block_length - remaining_items) + v = pad_l_and_r(v, filter_width + block_length - remaining_items) + k = reshape_by_blocks(k, common_layers.shape_list(k), block_length) + v = reshape_by_blocks(v, common_layers.shape_list(v), block_length) + + total_kv_blocks = common_layers.shape_list(k)[2] + + slices = [] + # prepare the left-most and right-most partial blocks if needed + if remaining_items: + first_partial_block_k = tf.slice( + k, [0, 0, 0, block_length - remaining_items, 0], + [-1, -1, total_query_blocks, -1, -1]) + first_partial_block_v = tf.slice( + v, [0, 0, 0, block_length - remaining_items, 0], + [-1, -1, total_query_blocks, -1, -1]) + last_partial_block_k = tf.slice( + k, [0, 0, total_kv_blocks - total_query_blocks, 0, 0], + [-1, -1, -1, remaining_items, -1]) + last_partial_block_v = tf.slice( + v, [0, 0, total_kv_blocks - total_query_blocks, 0, 0], + [-1, -1, -1, remaining_items, -1]) + slices.append((first_partial_block_k, first_partial_block_v)) + slices.append((last_partial_block_k, last_partial_block_v)) + + # Prepare the rest of the blocks + first_block_index = 1 if remaining_items else 0 + attention_blocks = 2 * blocks_per_filter_width + 1 + for i in range(first_block_index, attention_blocks + first_block_index): + block_k = tf.slice(k, [0, 0, i, 0, 0], + [-1, -1, total_query_blocks, -1, -1]) + block_v = tf.slice(v, [0, 0, i, 0, 0], + [-1, -1, total_query_blocks, -1, -1]) + slices.append((block_k, block_v)) + # [batch, heads, blocks_q, block_length + 2 * filter_width, depth_k] + k = tf.concat([s[0] for s in slices], axis=3) + v = tf.concat([s[1] for s in slices], axis=3) + + attention_bias = tf.expand_dims(embedding_to_padding(k) * -1e9, axis=-2) + depth_v = common_layers.shape_list(v)[-1] + + output = dot_product_attention( + q, + k, + v, + attention_bias, + dropout_rate=0., + name="local_1d", + make_image_summary=False) + output = tf.reshape(output, [batch_size, num_heads, -1, depth_v]) + + # Remove the padding if introduced. + output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) + output.set_shape([None if isinstance(dim, tf.Tensor) else dim for dim in + (batch_size, num_heads, original_length, depth_v)]) + return output + + +def reshape_by_blocks(x, x_shape, memory_block_size): + """Reshapes input by splitting its length over blocks of memory_block_size. + + Args: + x: a Tensor with shape [batch, heads, length, depth] + x_shape: tf.TensorShape of x. + memory_block_size: Integer which divides length. + + Returns: + Tensor with shape + [batch, heads, length // memory_block_size, memory_block_size, depth]. + """ + x = tf.reshape(x, [ + x_shape[0], x_shape[1], x_shape[2] // memory_block_size, + memory_block_size, x_shape[3] + ]) + return x + + +def dilated_self_attention_1d(q, + k, + v, + query_block_size=128, + memory_block_size=128, + gap_size=2, + num_memory_blocks=2, + name=None): + """Dilated self-attention. + + Args: + q: a Tensor with shape [batch, heads, length, depth] + k: a Tensor with shape [batch, heads, length, depth] + v: a Tensor with shape [batch, heads, length, depth] + query_block_size: an integer indicating size of query block + memory_block_size: an integer indicating the size of a memory block. + gap_size: an integer indicating the gap size + num_memory_blocks: how many memory blocks to look at to the left and right. + Each will be separated by gap_size. + name: an optional string + + Returns: + a Tensor of shape [batch, heads, length, depth] + """ + with tf.variable_scope( + name, default_name="dilated_self_attention_1d", values=[q, k, v]): + v_list_shape = v.get_shape().as_list() + assert v_list_shape == k.shape.as_list(), "K and V depths must be equal" + v_shape = common_layers.shape_list(v) + depth_v = v_shape[3] + batch_size = v_shape[0] + num_heads = v_shape[1] + original_length = common_layers.shape_list(q)[2] + + # Pad query, key, value to ensure multiple of corresponding lengths. + def pad_to_multiple(x, pad_length): + x_length = common_layers.shape_list(x)[2] + return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]]) + + def pad_l_and_r(x, pad_length): + return tf.pad(x, [[0, 0], [0, 0], [pad_length, pad_length], [0, 0]]) + + q = pad_to_multiple(q, query_block_size) + v = pad_to_multiple(v, query_block_size) + k = pad_to_multiple(k, query_block_size) + + # Set up query blocks. + new_q_shape = common_layers.shape_list(q) + q = reshape_by_blocks(q, new_q_shape, query_block_size) + self_k_part = reshape_by_blocks(k, new_q_shape, query_block_size) + self_v_part = reshape_by_blocks(v, new_q_shape, query_block_size) + + # Set up key and value windows. + k_v_padding = (gap_size + memory_block_size) * num_memory_blocks + k = pad_l_and_r(k, k_v_padding) + v = pad_l_and_r(v, k_v_padding) + + # Get gather indices. + index_length = (new_q_shape[2] - query_block_size + memory_block_size) + indices = tf.range(0, index_length, delta=1, name="index_range") + indices = tf.reshape(indices, [1, -1, 1]) # [1, length, 1] for convs + kernel = tf.expand_dims(tf.eye(memory_block_size), axis=1) + gather_indices = tf.nn.conv1d( + tf.cast(indices, tf.float32), + kernel, + query_block_size, + padding="VALID", + name="gather_conv") + + gather_indices = tf.squeeze(tf.cast(gather_indices, tf.int32), axis=0) + + # Get left and right memory blocks for each query. + # [length, batch, heads, dim] + k_t = tf.transpose(k, [2, 0, 1, 3]) + v_t = tf.transpose(v, [2, 0, 1, 3]) + left_k = gather_dilated_memory_blocks( + k_t[:-k_v_padding, :, :, :], num_memory_blocks, gap_size, + query_block_size, memory_block_size, gather_indices) + left_v = gather_dilated_memory_blocks( + v_t[:-k_v_padding, :, :, :], num_memory_blocks, gap_size, + query_block_size, memory_block_size, gather_indices) + + right_k = gather_dilated_memory_blocks( + k_t[k_v_padding:, :, :, :], + num_memory_blocks, + gap_size, + query_block_size, + memory_block_size, + gather_indices, + direction="right") + right_v = gather_dilated_memory_blocks( + v_t[k_v_padding:, :, :, :], + num_memory_blocks, + gap_size, + query_block_size, + memory_block_size, + gather_indices, + direction="right") + + k_windows = tf.concat([left_k, self_k_part, right_k], axis=3) + v_windows = tf.concat([left_v, self_v_part, right_v], axis=3) + attention_bias = tf.expand_dims( + embedding_to_padding(k_windows) * -1e9, axis=-2) + + output = dot_product_attention( + q, + k_windows, + v_windows, + attention_bias, + dropout_rate=0., + name="dilated_1d", + make_image_summary=False) + output = tf.reshape(output, [batch_size, num_heads, -1, depth_v]) + + # Remove the padding if introduced. + output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) + output.set_shape(v_list_shape) + return output + + +def gather_dilated_memory_blocks(x, + num_memory_blocks, + gap_size, + query_block_size, + memory_block_size, + gather_indices, + direction="left"): + """Gathers blocks with gaps in between. + + Args: + x: Tensor of shape [length, batch, heads, depth] + num_memory_blocks: how many memory blocks to look in "direction". Each will + be separated by gap_size. + gap_size: an integer indicating the gap size + query_block_size: an integer indicating size of query block + memory_block_size: an integer indicating the size of a memory block. + gather_indices: The indices to gather from. + direction: left or right + + Returns: + Tensor of shape [batch, heads, blocks, block_length, depth] + """ + gathered_blocks = [] + # gathering memory blocks + for block_id in range(num_memory_blocks): + block_end_index = -(query_block_size + gap_size * + (block_id + 1) + memory_block_size * block_id) + block_start_index = ( + (memory_block_size + gap_size) * (num_memory_blocks - (block_id + 1))) + if direction != "left": + [block_end_index, + block_start_index] = [-block_start_index, -block_end_index] + if block_end_index == 0: + x_block = x[block_start_index:] + else: + x_block = x[block_start_index:block_end_index] + + def gather_dilated_1d_blocks(x, gather_indices): + x_new = tf.gather(x, gather_indices) + # [batch, heads, blocks, block_length, dim] + return tf.transpose(x_new, [2, 3, 0, 1, 4]) + + gathered_blocks.append(gather_dilated_1d_blocks(x_block, gather_indices)) + return tf.concat(gathered_blocks, 3) + + +def masked_dilated_self_attention_1d(q, + k, + v, + query_block_size=64, + memory_block_size=64, + gap_size=2, + num_memory_blocks=2, + name=None): + """Dilated self-attention. TODO(avaswani): Try it and write a paper on it. + + Args: + q: a Tensor with shape [batch, heads, length, depth] + k: a Tensor with shape [batch, heads, length, depth] + v: a Tensor with shape [batch, heads, length, depth] + query_block_size: an integer + memory_block_size: an integer indicating how much to look left. + gap_size: an integer indicating the gap size + num_memory_blocks: how many memory blocks to look at to the left. Each will + be separated by gap_size. + name: an optional string + + Returns: + a Tensor of shape [batch, heads, length, depth] + """ + with tf.variable_scope( + name, default_name="masked_dilated_self_attention_1d", values=[q, k, v]): + v_list_shape = v.get_shape().as_list() + assert v_list_shape == k.shape.as_list(), "K and V depths must be equal" + v_shape = common_layers.shape_list(v) + depth_v = v_shape[3] + batch_size = v_shape[0] + num_heads = v_shape[1] + original_length = common_layers.shape_list(q)[2] + + # Pad query, key, value to ensure multiple of corresponding lengths. + def pad_to_multiple(x, pad_length): + x_length = common_layers.shape_list(x)[2] + return tf.pad(x, [[0, 0], [0, 0], [0, -x_length % pad_length], [0, 0]]) + + def pad_l(x, left_pad_length): + return tf.pad(x, [[0, 0], [0, 0], [left_pad_length, 0], [0, 0]]) + + q = pad_to_multiple(q, query_block_size) + v = pad_to_multiple(v, query_block_size) + k = pad_to_multiple(k, query_block_size) + + # Set up query blocks. + new_q_shape = common_layers.shape_list(q) + q = reshape_by_blocks(q, new_q_shape, query_block_size) + + # Set up key and value windows. + self_k_part = reshape_by_blocks(k, new_q_shape, query_block_size) + self_v_part = reshape_by_blocks(v, new_q_shape, query_block_size) + k_v_padding = (gap_size + memory_block_size) * num_memory_blocks + k = pad_l(k, k_v_padding) + v = pad_l(v, k_v_padding) + + # Get gather indices. + index_length = (new_q_shape[2] - query_block_size + memory_block_size) + + indices = tf.range(0, index_length, delta=1, name="index_range") + indices = tf.reshape(indices, [1, -1, 1]) # [1, length, 1] for convs + kernel = tf.expand_dims(tf.eye(memory_block_size), axis=1) + gather_indices = tf.nn.conv1d( + tf.cast(indices, tf.float32), + kernel, + query_block_size, + padding="VALID", + name="gather_conv") + gather_indices = tf.squeeze(tf.cast(gather_indices, tf.int32), axis=0) + + # Get left and right memory blocks for each query. + # [length, batch, heads, dim] + k_t = tf.transpose(k, [2, 0, 1, 3]) + v_t = tf.transpose(v, [2, 0, 1, 3]) + + k_unmasked_windows = gather_dilated_memory_blocks( + k_t, num_memory_blocks, gap_size, query_block_size, memory_block_size, + gather_indices) + v_unmasked_windows = gather_dilated_memory_blocks( + v_t, num_memory_blocks, gap_size, query_block_size, memory_block_size, + gather_indices) + + # Combine memory windows. + block_q_shape = common_layers.shape_list(q) + masked_attention_bias = tf.tile( + tf.expand_dims(attention_bias_lower_triangle(query_block_size), axis=0), + [block_q_shape[0], block_q_shape[1], block_q_shape[2], 1, 1]) + padding_attention_bias = tf.expand_dims( + embedding_to_padding(k_unmasked_windows) * -1e9, axis=-2) + padding_attention_bias = tf.tile(padding_attention_bias, + [1, 1, 1, query_block_size, 1]) + attention_bias = tf.concat( + [masked_attention_bias, padding_attention_bias], axis=-1) + # combine memory windows + k_windows = tf.concat([self_k_part, k_unmasked_windows], 3) + v_windows = tf.concat([self_v_part, v_unmasked_windows], 3) + output = dot_product_attention( + q, + k_windows, + v_windows, + attention_bias, + dropout_rate=0., + name="dilated_1d", + make_image_summary=False) + output = tf.reshape(output, [batch_size, num_heads, -1, depth_v]) + + # Remove the padding if introduced. + output = tf.slice(output, [0, 0, 0, 0], [-1, -1, original_length, -1]) + output.set_shape(v_list_shape) + return output + + +def local_attention_2d(q, + k, + v, + query_shape=(8, 16), + memory_flange=(8, 16), + name=None): + """Strided block local self-attention. + + The 2-D sequence is divided into 2-D blocks of shape query_shape. Attention + for a given query position can only see memory positions less than or equal to + the query position. The memory positions are the corresponding block with + memory_flange many positions to add to the height and width of the block + (namely, left, top, and right). + + Args: + q: a Tensor with shape [batch, heads, h, w, depth_k] + k: a Tensor with shape [batch, heads, h, w, depth_k] + v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current + implementation, depth_v must be equal to depth_k. + query_shape: an tuple indicating the height and width of each query block. + memory_flange: an integer indicating how much to look in height and width + from each query block. + name: an optional string + + Returns: + a Tensor of shape [batch, heads, h, w, depth_v] + """ + with tf.variable_scope( + name, default_name="local_self_attention_2d", values=[q, k, v]): + v_shape = common_layers.shape_list(v) + + # Pad query, key, value to ensure multiple of corresponding lengths. + q = pad_to_multiple_2d(q, query_shape) + k = pad_to_multiple_2d(k, query_shape) + v = pad_to_multiple_2d(v, query_shape) + paddings = [[0, 0], [0, 0], [memory_flange[0], memory_flange[1]], + [memory_flange[0], memory_flange[1]], [0, 0]] + k = tf.pad(k, paddings) + v = tf.pad(v, paddings) + + # Set up query blocks. + q_indices = gather_indices_2d(q, query_shape, query_shape) + q_new = gather_blocks_2d(q, q_indices) + + # Set up key and value blocks. + memory_shape = (query_shape[0] + 2 * memory_flange[0], + query_shape[1] + 2 * memory_flange[1]) + k_and_v_indices = gather_indices_2d(k, memory_shape, query_shape) + k_new = gather_blocks_2d(k, k_and_v_indices) + v_new = gather_blocks_2d(v, k_and_v_indices) + + attention_bias = tf.expand_dims( + tf.to_float(embedding_to_padding(k_new)) * -1e9, axis=-2) + output = dot_product_attention( + q_new, + k_new, + v_new, + attention_bias, + dropout_rate=0., + name="local_2d", + make_image_summary=False) + # Put representations back into original shapes. + padded_q_shape = common_layers.shape_list(q) + output = scatter_blocks_2d(output, q_indices, padded_q_shape) + + # Remove the padding if introduced. + output = tf.slice(output, [0, 0, 0, 0, 0], + [-1, -1, v_shape[2], v_shape[3], -1]) + return output + + +def pad_to_multiple_2d(x, block_shape): + """Making sure x is a multiple of shape. + + Args: + x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor + block_shape: a 2-d list of integer shapes + + Returns: + padded_x: a [batch, heads, h, w, depth] or [batch, h, w, depth] tensor + """ + old_shape = x.get_shape().dims + last = old_shape[-1] + if len(old_shape) == 4: + height_padding = -common_layers.shape_list(x)[1] % block_shape[0] + width_padding = -common_layers.shape_list(x)[2] % block_shape[1] + paddings = [[0, 0], [0, height_padding], [0, width_padding], [0, 0]] + elif len(old_shape) == 5: + height_padding = -common_layers.shape_list(x)[2] % block_shape[0] + width_padding = -common_layers.shape_list(x)[3] % block_shape[1] + paddings = [[0, 0], [0, 0], [0, height_padding], [0, width_padding], [0, 0]] + + padded_x = tf.pad(x, paddings) + padded_shape = padded_x.get_shape().as_list() + padded_shape = padded_shape[:-1] + [last] + padded_x.set_shape(padded_shape) + return padded_x + + +def reshape_range(tensor, i, j, shape): + """Reshapes a tensor between dimensions i and j.""" + t_shape = common_layers.shape_list(tensor) + target_shape = t_shape[:i] + shape + t_shape[j:] + return tf.reshape(tensor, target_shape) + + +def gather_blocks_2d(x, indices): + """Gathers flattened blocks from x.""" + x_shape = common_layers.shape_list(x) + x = reshape_range(x, 2, 4, [tf.reduce_prod(x_shape[2:4])]) + # [length, batch, heads, dim] + x_t = tf.transpose(x, [2, 0, 1, 3]) + x_new = tf.gather(x_t, indices) + # returns [batch, heads, num_blocks, block_length ** 2, dim] + return tf.transpose(x_new, [2, 3, 0, 1, 4]) + + +def scatter_blocks_2d(x, indices, shape): + """scatters blocks from x into shape with indices.""" + x_shape = common_layers.shape_list(x) + # [length, batch, heads, dim] + x_t = tf.transpose( + tf.reshape(x, [x_shape[0], x_shape[1], -1, x_shape[-1]]), [2, 0, 1, 3]) + x_t_shape = common_layers.shape_list(x_t) + indices = tf.reshape(indices, [-1, 1]) + scattered_x = tf.scatter_nd(indices, x_t, x_t_shape) + scattered_x = tf.transpose(scattered_x, [1, 2, 0, 3]) + return tf.reshape(scattered_x, shape) + + +def gather_indices_2d(x, block_shape, block_stride): + """Getting gather indices.""" + # making an identity matrix kernel + kernel = tf.eye(block_shape[0] * block_shape[1]) + kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1]) + # making indices [1, h, w, 1] to appy convs + x_shape = common_layers.shape_list(x) + indices = tf.range(x_shape[2] * x_shape[3]) + indices = tf.reshape(indices, [1, x_shape[2], x_shape[3], 1]) + indices = tf.nn.conv2d( + tf.cast(indices, tf.float32), + kernel, + strides=[1, block_stride[0], block_stride[1], 1], + padding="VALID") + # making indices [num_blocks, dim] to gather + dims = common_layers.shape_list(indices)[:3] + if all([isinstance(dim, int) for dim in dims]): + num_blocks = functools.reduce(operator.mul, dims, 1) + else: + num_blocks = tf.reduce_prod(dims) + indices = tf.reshape(indices, [num_blocks, -1]) + return tf.cast(indices, tf.int32) + + +def make_2d_block_raster_mask(query_shape, memory_flange): + """Creates a mask for 2d block raster scan. + + The query mask can look to the left, top left, top, and top right, but + not to the right. Inside the query, we have the standard raster scan + masking. + Args: + query_shape: A tuple of ints (query_height, query_width) + memory_flange: A tuple of ints + (memory_flange_height, memory_flange_width) + + Returns: + A tensor of shape query_size, memory_size + """ + # mask inside the query block + query_triangle = common_layers.ones_matrix_band_part( + np.prod(query_shape), np.prod(query_shape), -1, 0) + split_query_masks = tf.split(query_triangle, query_shape[0], axis=1) + # adding mask for left and right + mask_pieces = [ + tf.concat( # pylint: disable=g-complex-comprehension + [tf.ones([np.prod(query_shape), memory_flange[1]]), + split_query_masks[i], + tf.zeros([np.prod(query_shape), memory_flange[1]])], + axis=1) for i in range(query_shape[0]) + ] + # adding mask for top + final_mask = tf.concat( + [ + tf.ones([ + np.prod(query_shape), + (query_shape[1] + 2 * memory_flange[1]) * memory_flange[0] + ]), + tf.concat(mask_pieces, axis=1) + ], + axis=1) + # 0.0 is visible location, 1.0 is masked. + return 1. - final_mask + + +def get_memory_region(x, query_block_shape, memory_flange, q_indices): + """Get the memory regions that surround a 2d query. + + The memory regions will be the left and top right. + + Args: + x: A tensor with shape [batch, heads, height, width, depth] + query_block_shape: a 2-d tuple of integers + memory_flange: a 2-d tuple of integers + q_indices: a tensor of indices for each of the center blocks. + [num_blocks, block_length] + Returns: + x_flange: A tensor of shape [batch, heads, #blocks, block_length, depth] + """ + # Padding x to be multiple of query_shape and then + # extracting the memory blocks from the same regions as the query blocks + x_query_padded = pad_to_multiple_2d(x, query_block_shape) + x_center = gather_blocks_2d(x_query_padded, q_indices) + # Then padding the flange region + paddings = [[0, 0], [0, 0], [memory_flange[0], 0], + [memory_flange[1], memory_flange[1]], [0, 0]] + x_memory_padded = tf.pad(x_query_padded, paddings) + left_x = None + top_x = None + # Extracting the memory regions around the query block. left_x_region extends + # to the left and the top_x_region is the combination of top left, top, and + # top right of the query block + # if no left region + if memory_flange[1] > 0: + left_x_region = x_memory_padded[:, :, memory_flange[ + 0]:, :-(query_block_shape[1] + memory_flange[1]), :] + left_memory_shape = (query_block_shape[0], memory_flange[1]) + left_indices = gather_indices_2d(left_x_region, left_memory_shape, + query_block_shape) + left_x = gather_blocks_2d(left_x_region, left_indices) + # if no top region + if memory_flange[0] > 0: + top_x_region = x_memory_padded[:, :, :-query_block_shape[0], :, :] + + top_memory_shape = (memory_flange[0], + query_block_shape[1] + 2 * memory_flange[1]) + + top_indices = gather_indices_2d(top_x_region, top_memory_shape, + query_block_shape) + + top_x = gather_blocks_2d(top_x_region, top_indices) + x_flange = None + if top_x is not None and left_x is not None: + x_flange = tf.concat([top_x, left_x], axis=3) + else: + x_flange = top_x if top_x is not None else left_x + return x_flange, x_center + + +def get_shifted_center_blocks(x, indices): + """Get right shifted blocks for masked local attention 2d. + + Args: + x: A tensor with shape [batch, heads, height, width, depth] + indices: The indices to gather blocks + + Returns: + x_shifted: a tensor of extracted blocks, each block right shifted along + length. + """ + center_x = gather_blocks_2d(x, indices) + + # Shift right along the length dimension + def shift_right_2d_blocks(x): + """Shift the second to last dimension of x right by one.""" + shifted_targets = ( + tf.pad(x, [[0, 0], [0, 0], [0, 0], [1, 0], [0, 0]])[:, :, :, :-1, :]) + return shifted_targets + + x_shifted = shift_right_2d_blocks(center_x) + return x_shifted + + +def right_shift_blockwise(x, query_shape, name=None): + """Right shifts once in every block. + + Args: + x: a tensor of shape [batch, height, width, depth] + query_shape: A 2d tuple of ints + name: a string + + Returns: + output: a tensor of the same shape as x + """ + with tf.variable_scope( + name, default_name="right_shift_blockwise", values=[x]): + x_list_shape = x.get_shape().as_list() + x_shape = common_layers.shape_list(x) + # Add a dummy dimension for heads. + x = tf.expand_dims(x, axis=1) + x = pad_to_multiple_2d(x, query_shape) + padded_x_shape = common_layers.shape_list(x) + # Set up q blocks. + x_indices = gather_indices_2d(x, query_shape, query_shape) + x_new = get_shifted_center_blocks(x, x_indices) + + # Put representations back into original shapes. + output = scatter_blocks_2d(x_new, x_indices, padded_x_shape) + # Remove the dummy head dimension. + output = tf.squeeze(output, axis=1) + # Remove the padding if introduced. + output = tf.slice(output, [0, 0, 0, 0], [-1, x_shape[1], x_shape[2], -1]) + output.set_shape(x_list_shape) + return output + + +def right_shift_blockwise_nd(x, block_shape): + """Right shift once in every block. + + Args: + x: a [batch, d1, d2, ..., dn, depth] tensor + block_shape: a tuple (q1, q2, ..., qn) representing the block shape + + Returns: + a [batch, d1, d2, ..., dn, depth] tensor, right shifted. + """ + blocked_x = break_into_blocks_nd(x, block_shape) + blocked_x_shape = common_layers.shape_list(blocked_x) + blocked_x = tf.reshape(blocked_x, + [blocked_x_shape[0], -1, blocked_x_shape[-1]]) + padded_x = tf.pad(blocked_x, [[0, 0], [1, 0], [0, 0]]) + x = tf.slice(padded_x, [0, 0, 0], + [-1, np.prod(blocked_x_shape[1:-1], dtype=np.int32), -1]) + x = tf.reshape(x, blocked_x_shape) + return put_back_blocks_nd(x, block_shape) + + +def masked_local_attention_2d(q, + k, + v, + query_shape=(8, 16), + memory_flange=(8, 16), + name=None): + """Strided block local self-attention. + + Each position in a query block can attend to all the generated queries in + the query block, which are generated in raster scan, and positions that are + generated to the left and top. The shapes are specified by query shape and + memory flange. Note that if you're using this function, you do not need to + right shift. Right shifting happens inside this function separately for each + block. + + Args: + q: a Tensor with shape [batch, heads, h, w, depth_k] + k: a Tensor with shape [batch, heads, h, w, depth_k] + v: a Tensor with shape [batch, heads, h, w, depth_v]. In the current + implementation, depth_v must be equal to depth_k. + query_shape: an tuple indicating the height and width of each query block. + query_shape = block_shape + memory_flange: an integer indicating how much to look in height and width + from each query block. + memory shape = query_shape + (block_flange[0], 2*block_flange[1]) + name: an optional string + + Returns: + a Tensor of shape [batch, heads, h, w, depth_v] + """ + with tf.variable_scope( + name, default_name="local_masked_self_attention_2d", values=[q, k, v]): + v_shape = common_layers.shape_list(v) + + # Pad query to ensure multiple of corresponding lengths. + q = pad_to_multiple_2d(q, query_shape) + + # Set up query blocks. + q_indices = gather_indices_2d(q, query_shape, query_shape) + q_new = gather_blocks_2d(q, q_indices) + + # Set up key and value blocks. + k_flange, k_center = get_memory_region(k, query_shape, memory_flange, + q_indices) + v_flange, v_center = get_memory_region(v, query_shape, memory_flange, + q_indices) + if k_flange is not None: + k_new = tf.concat([k_flange, k_center], axis=3) + v_new = tf.concat([v_flange, v_center], axis=3) + else: + k_new = k_center + v_new = v_center + + # Set up the masks. + query_elements = np.prod(query_shape) + padding_mask = None + if k_flange is not None: + padding_mask = tf.expand_dims( + embedding_to_padding(k_flange) * -1e9, axis=-2) + padding_mask = tf.tile(padding_mask, [1, 1, 1, query_elements, 1]) + + center_attention_bias = attention_bias_lower_triangle( + np.prod(query_elements)) + center_attention_bias = tf.reshape( + center_attention_bias, [1, 1, 1, query_elements, query_elements]) + v_center_shape = common_layers.shape_list(v_center) + center_attention_bias = tf.tile( + center_attention_bias, + [v_center_shape[0], v_center_shape[1], v_center_shape[2], 1, 1]) + if padding_mask is not None: + # Combine the mask for padding and visible region. + attention_bias = tf.concat([padding_mask, center_attention_bias], axis=4) + else: + attention_bias = center_attention_bias + + output = dot_product_attention( + q_new, + k_new, + v_new, + attention_bias, + dropout_rate=0., + name="masked_local_2d", + make_image_summary=False) + # Put representations back into original shapes. + padded_q_shape = common_layers.shape_list(q) + output = scatter_blocks_2d(output, q_indices, padded_q_shape) + + # Remove the padding if introduced. + output = tf.slice(output, [0, 0, 0, 0, 0], + [-1, -1, v_shape[2], v_shape[3], -1]) + return output + + +def masked_local_attention_nd(q, + k, + v, + query_shape, + memory_flange, + decode_step=None, + name=None): + """Masked local attention nd. + + Each position in q can attend to positions in memory that are positioned less + than or equal to query position according to raster scan ordering and are in + the same memory block. A memory block is n-dimensional and each dimension 'i' + is of size q[i] + 2 * m[i] except for the first dimension which is of size + q[0] + m[0]. NOTE: This computation assumes memory_flange is divisible by + query_shape in every dimension. + + Args: + q: a [batch, heads, d1, d2, ..., dn, depth_k] tensor or a [batch, heads, 1, + 1, ..., 1, depth_k] tensor in decoding mode. + k: a [batch, heads, d1, d2, ..., dn, depth_k] tensor + v: a [batch, heads, d1, d2, ..., dn, depth_v] tensor + query_shape: a tuple (q1, q2, ..., qn) indicating the shape of query blocks. + memory_flange: a tuple (m1, m2, ..., mn) indicating the number of extra + positions in the attention memory. memory_shape=[q1 + m1, d2 + 2 * m2, + ..., dn + 2 * mn] + decode_step: an integer in fast decoding mode. + name: an optional string + + Returns: + a [batch, head, d1, d2, ..., dn, depth_v] tensor or + [batch, head, 1, 1, ..., 1, depth_v] if decode_step is not None. + """ + assert all([m % b == 0 for m, b in zip(memory_flange, query_shape)]) + with tf.variable_scope( + name, default_name="masked_local_attention_nd", values=[q, k, v]): + # This computation only applies to self attention, so assert q, k and v have + # the same dimensions. + if decode_step is None: + q.get_shape().assert_is_compatible_with(k.get_shape()) + q.get_shape()[:-1].assert_is_compatible_with(v.get_shape()[:-1]) + else: + k.get_shape().assert_is_compatible_with(v.get_shape()) + + # move heads to batch dimension. This is needed to reduce number of + # dimensions as much as possible, since most ops support only up to 7 + # dimensions. + q_shape = common_layers.shape_list(q) + k_shape = common_layers.shape_list(k) + v_shape = common_layers.shape_list(v) + q = tf.reshape(q, [-1] + q_shape[2:]) + k = tf.reshape(k, [-1] + k_shape[2:]) + v = tf.reshape(v, [-1] + v_shape[2:]) + + # Pad query, key, value to ensure multiple of corresponding lengths. + if decode_step is None: + # don't pad query in fast decoding mode. We only need to calculate self + # attention for one position. + q = pad_to_multiple_nd(q, query_shape) + k = pad_to_multiple_nd(k, query_shape) + v = pad_to_multiple_nd(v, query_shape) + + # extract query and memory blocks + if decode_step is None: + q = break_into_blocks_nd(q, query_shape) + else: + # in fast decoding, q has 1 block with 1 item in it + # q shape will be [batch] + [1] * n + [1, depth] which is equivalent of + # [batch, b1, b2, ..., bn, items_in_block, depth] where there is 1 block + # and 1 item in that block + q = tf.reshape(q, [-1] + [1] * (len(q_shape) - 3) + [q_shape[-1]]) + k = break_into_memory_blocks_nd(k, query_shape, memory_flange, masked=True) + v = break_into_memory_blocks_nd(v, query_shape, memory_flange, masked=True) + + # extract just one block of k and v in fast decoding mode. + if decode_step is not None: + k = select_block_for_decode_step(k, decode_step, query_shape) + v = select_block_for_decode_step(v, decode_step, query_shape) + + # flatten q, k and v to [batch, num_blocks, items_in_block, depth] + q, blocks_per_dim = flatten_blocks_nd(q) + k, _ = flatten_blocks_nd(k) + v, _ = flatten_blocks_nd(v) + + # make attention bias for causal attention. + causal_attn_bias = causal_attention_bias_nd( + query_shape, memory_flange, decode_step=decode_step) + padding_attn_bias = tf.expand_dims( + embedding_to_padding(v[:1, :, :, :]) * -1e9, axis=-2) + + if decode_step is None: + num_blocks = common_layers.shape_list(v)[1] + causal_attn_bias = tf.tile(causal_attn_bias, [1, num_blocks, 1, 1]) + padding_attn_bias = tf.tile( + padding_attn_bias, + [1, 1, np.prod(query_shape, dtype=np.int32), 1]) + attn_bias = tf.minimum(causal_attn_bias, padding_attn_bias) + + # Calculate dot product attention + output = dot_product_attention( + q, + k, + v, + attn_bias, + dropout_rate=0., + name=name or "masked_local_nd", + make_image_summary=False) + + # restructure the output from blocks ordering to the original ordering + output = unflatten_blocks_nd(output, blocks_per_dim) + if decode_step is None: + # In fast decoding, output only contains one element, this is not needed. + output = put_back_blocks_nd(output, query_shape) + + # bring back the heads dimension + output_shape = common_layers.shape_list(output) + output = tf.reshape(output, q_shape[:2] + output_shape[1:]) + if decode_step is None: + # No padding is introduced in fast decoding, no need to do this. + output_shape = common_layers.shape_list(output) + output = tf.slice(output, [0] * len(output_shape), + [-1, -1] + q_shape[2:-1] + [-1]) + return output + + +def select_block_for_decode_step(blocked_x, decode_step, query_shape): + """Selects one block from `x` that contains position `decode_step`. + + NOTE: This method only works for blocked inputs. It selects one block around + `decode_step` position in blocked raster scan order. + + Args: + blocked_x: a [batch, blocks_per_d1, ..., blocks_per_dn, b1 * ...* bn, depth] + tensor + decode_step: an integer + query_shape: a tuple (q1, q2, ..., qn) representing query shape + + Returns: + a [batch, [1] * n, b1 * ... * bn, depth] tensor + """ + blocked_x_shape = common_layers.shape_list(blocked_x) + # calculate the shape of the normal x + x_shape = [b * q for b, q in zip(blocked_x_shape[1:-2], query_shape)] + # Get the position of `decode_step` element in the unblocked x. + index = decode_step_to_index(decode_step, query_shape, x_shape) + # Convert it to the blocked positions. + blocked_index = [i // q for i, q in zip(index, query_shape)] + # TPU needs size to be non negative for the case when begin is not + # compile-time constants. + return tf.slice(blocked_x, [0] + blocked_index + [0, 0], + [blocked_x_shape[0]] + [1] * len(blocked_index) + + blocked_x_shape[-2:]) + + +def flatten_blocks_nd(x): + """Flattens blocks of the input tensor. + + Args: + x: a [batch, b1, ..., bn, items_in_block, depth] tensor + + Returns: + a flattened tensor of shape [batch, b1 * ...* bm, items_in_block, depth] + a list of [b1, ..., bn] which is used for unflattening. + """ + x_shape = common_layers.shape_list(x) + num_blocks = np.prod(x_shape[1:-2], dtype=np.int32) + return tf.reshape(x, [-1, num_blocks] + x_shape[-2:]), x_shape[1:-2] + + +def unflatten_blocks_nd(x, blocks_per_dimension): + """Converts a flattened tensor into a normal blocked tensor. + + Args: + x: a [batch, d1 * ... dn, items_in_block, depth] tensor + blocks_per_dimension: a n-d list of integers for number of blocks in each + dimension. + + Returns: + a [batch, d1, d2, ..., dn, items_in_block, depth] tensor + """ + x_shape = common_layers.shape_list(x) + assert x_shape[1] == np.prod(blocks_per_dimension, dtype=np.int32) + return tf.reshape(x, [-1] + list(blocks_per_dimension) + x_shape[-2:]) + + +def break_into_memory_blocks_nd(x, query_shape, memory_flange, masked=False): + """Break a tensor into memory blocks around query blocks. + + This requires memory_flange to be divisible by query_shape in every dimension. + + Args: + x: a [batch, d1, d2, ..., dn, depth] tensor + query_shape: a n-d list of integers representing query shape + memory_flange: an n-d list of integers representing memory flange. + masked: a boolean for masked vs unmasked attention. + + Returns: + a [batch, blocks_per_d1, ..., blocks_per_dn, b1 * ...* bn, depth] where bi + is the memory block size in dimension i which is equal to q[i] + 2m[i] or + q[i] + m[i] if masked attention and i = 1. + """ + assert all([m % b == 0 for b, m in zip(query_shape, memory_flange)]) + + original_x_shape = common_layers.shape_list(x) + # calculate the total number of query blocks in each dimension + blocks_in_memory_flange = [m // b for b, m in zip(query_shape, memory_flange)] + num_query_blocks = [ + l // q for l, q in zip(original_x_shape[1:-1], query_shape) + ] + # pad x to have enough items on the corners to form the memory blocks. + if masked: + # Only pad the beginning of first dimension in masked mode. + x = tf.pad(x, [[0, 0], [memory_flange[0], 0]] + + [[p, p] for p in memory_flange[1:]] + [[0, 0]]) + else: + x = tf.pad(x, [[0, 0]] + [[p, p] for p in memory_flange] + [[0, 0]]) + + query_blocks = break_into_blocks_nd(x, query_shape) + # stitch query blocks together to form memory blocks of the desired size. + start_indices_per_dimension = [] + for dimension, blocks in enumerate(blocks_in_memory_flange): + if masked and dimension == 0: + # num blocks for first dimension in masked mode is blocks + 1 + size = blocks + 1 + else: + size = 2 * blocks + 1 + start_indices_per_dimension.append(range(size)) + + slices = [] + for start_indices in itertools.product(*start_indices_per_dimension): + start = [0] + list(start_indices) + [0, 0] + size = [-1] + num_query_blocks + [-1, -1] + s = tf.slice(query_blocks, start, size) + slices.append(s) + # concat slices in their query block dimension to form the full memory blocks + return tf.concat(slices, axis=-2) + + +def break_into_blocks_nd(x, block_shape): + """Break input tensor into blocks of `block_shape`. + + Args: + x: a [batch, d1, d2, ..., dn, depth] tensor + block_shape: a n-d list of integers representing block shape + + Returns: + a [batch, d1//block1, ..., dn//blockn, block1 *... * blockn, depth] tensor + """ + x_shape = common_layers.shape_list(x) + assert all([l % b == 0 for l, b in zip(x_shape[1:], block_shape)]) + blocks_per_dimension = [l // b for l, b in zip(x_shape[1:], block_shape)] + # reshape to [-1, d1 // block1, block1, ..., dn // blockn, blockn, depth] + reshape_to = list( + itertools.chain.from_iterable(zip(blocks_per_dimension, block_shape))) + x = tf.reshape(x, [-1] + reshape_to + x_shape[-1:]) + # transpose dimensions to bring the n-d blocks in consecutive dimensions. + block_dimensions_index = [2 * (i + 1) for i in range(len(block_shape))] + x = tf.transpose(x, [0] + [i - 1 for i in block_dimensions_index] + + block_dimensions_index + [2 * len(block_shape) + 1]) + return tf.reshape(x, [-1] + blocks_per_dimension + + [np.prod(block_shape, dtype=np.int32)] + x_shape[-1:]) + + +def put_back_blocks_nd(x, block_shape): + """Restructure input tensor from blocks to normal ordering. + + Args: + x: a [batch, b1, ..., bn, items_in_block, depth] tensor + block_shape: a n-d list of integers representing block shape. + + Returns: + a [batch, d1, ..., dn, depth] where blocks are put back to form the + original tensor. + """ + x_shape = common_layers.shape_list(x) + assert x_shape[-2] == np.prod(block_shape) + x = tf.reshape(x, x_shape[:-2] + list(block_shape) + x_shape[-1:]) + block_dimension_index = [i + 1 for i in range(len(block_shape))] + block_shape_index = [b + len(block_shape) for b in block_dimension_index] + interleaved_dimensions = list( + itertools.chain.from_iterable( + zip(block_dimension_index, block_shape_index))) + x = tf.transpose(x, [0] + interleaved_dimensions + [2 * len(block_shape) + 1]) + x_shape = common_layers.shape_list(x) + x = tf.reshape(x, [-1] + [ + x_shape[2 * i + 1] * x_shape[2 * i + 2] for i in range(len(block_shape)) + ] + x_shape[-1:]) + return x + + +def pad_to_multiple_nd(x, block_shape): + """Making sure x is a multiple of shape. + + Args: + x: a [batch, d1, d2, ..., dn, depth] tensor + block_shape: a n-d list of integers representing block shape + + Returns: + padded x where each dimension is a multiple of corresponding block length. + """ + shape = common_layers.shape_list(x) + paddings = [-l % b for l, b in zip(shape[1:-1], block_shape)] + return tf.pad(x, [[0, 0]] + [[0, p] for p in paddings] + [[0, 0]]) + + +def causal_attention_bias_nd(query_shape, memory_flange, decode_step=None): + """Creates causal attention bias for local nd attention. + + This assumes memory_flange is divisible by query_shape in every dimension. + + Args: + query_shape: a n-d list of integers representing query shape + memory_flange: a n-d list of integers representing memory flange + decode_step: an integer + + Returns: + a [1, 1, query_items, memory_items] tensor for masked attention bias or + a [1, 1, 1, memory_items] tensor if decode_step is not None. + """ + assert all([m % q == 0 for q, m in zip(query_shape, memory_flange)]) + blocks_per_memory_flange = [ + m // q for q, m in zip(query_shape, memory_flange) + ] + # previous blocks will be half the number of all blocks if we select blocks + # to the left and right of center block in every dimension. + prev_blocks = np.prod([2 * b + 1 for b in blocks_per_memory_flange], + dtype=np.int32) // 2 + all_blocks = np.prod( + [blocks_per_memory_flange[0] + 1] + + [2 * b + 1 for b in blocks_per_memory_flange[1:]], + dtype=np.int32) + future_blocks = all_blocks - prev_blocks - 1 + # add unmasked biases for all prev blocks and a lower triangle for the center + # block and all masked for future blocks. + items_in_block = np.prod(query_shape, dtype=np.int32) + items_in_query = items_in_block if decode_step is None else 1 + prev_blocks_attn = tf.zeros( + [1, 1, items_in_query, prev_blocks * items_in_block]) + + # add mask for the center block + if decode_step is None: + center_block_attn = attention_bias_lower_triangle(items_in_block) + else: + step_in_block = decode_step % items_in_block + cond = tf.reshape( + tf.less_equal(tf.range(items_in_block, dtype=tf.int32), step_in_block), + [1, 1, items_in_query, items_in_block]) + center_block_attn = tf.where( + cond, tf.zeros([1, 1, items_in_query, items_in_block]), + -1e9 * tf.ones([1, 1, items_in_query, items_in_block])) + + # add mask for all future blocks + future_blocks_attn = -1e9 * tf.ones( + [1, 1, items_in_query, future_blocks * items_in_block]) + return tf.concat([prev_blocks_attn, center_block_attn, future_blocks_attn], + axis=3) + + +def compute_attention_component(antecedent, + total_depth, + filter_width=1, + padding="VALID", + name="c", + vars_3d_num_heads=0, + layer_collection=None): + """Computes attention component (query, key or value). + + Args: + antecedent: a Tensor with shape [batch, length, channels] + total_depth: an integer + filter_width: An integer specifying how wide you want the attention + component to be. + padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. + name: a string specifying scope name. + vars_3d_num_heads: an optional integer (if we want to use 3d variables) + layer_collection: A tensorflow_kfac.LayerCollection. Only used by the + KFAC optimizer. Default is None. + + Returns: + c : [batch, length, depth] tensor + """ + if layer_collection is not None: + if filter_width != 1 or vars_3d_num_heads != 0: + raise ValueError( + "KFAC implementation only supports filter_width=1 (actual: {}) and " + "vars_3d_num_heads=0 (actual: {}).".format( + filter_width, vars_3d_num_heads)) + if vars_3d_num_heads is not None and vars_3d_num_heads > 0: + assert filter_width == 1 + input_depth = antecedent.get_shape().as_list()[-1] + depth_per_head = total_depth // vars_3d_num_heads + initializer_stddev = input_depth ** -0.5 + if "q" in name: + initializer_stddev *= depth_per_head ** -0.5 + var = tf.get_variable( + name, [input_depth, + vars_3d_num_heads, + total_depth // vars_3d_num_heads], + initializer=tf.random_normal_initializer(stddev=initializer_stddev)) + var = tf.cast(var, antecedent.dtype) + var = tf.reshape(var, [input_depth, total_depth]) + return tf.tensordot(antecedent, var, axes=1) + if filter_width == 1: + return common_layers.dense( + antecedent, total_depth, use_bias=False, name=name, + layer_collection=layer_collection) + else: + return common_layers.conv1d( + antecedent, total_depth, filter_width, padding=padding, name=name) + + +def compute_qkv(query_antecedent, + memory_antecedent, + total_key_depth, + total_value_depth, + q_filter_width=1, + kv_filter_width=1, + q_padding="VALID", + kv_padding="VALID", + vars_3d_num_heads=0, + layer_collection=None): + """Computes query, key and value. + + Args: + query_antecedent: a Tensor with shape [batch, length_q, channels] + memory_antecedent: a Tensor with shape [batch, length_m, channels] + total_key_depth: an integer + total_value_depth: an integer + q_filter_width: An integer specifying how wide you want the query to be. + kv_filter_width: An integer specifying how wide you want the keys and values + to be. + q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. + kv_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. + vars_3d_num_heads: an optional (if we want to use 3d variables) + layer_collection: A tensorflow_kfac.LayerCollection. Only used by the + KFAC optimizer. Default is None. + + Returns: + q, k, v : [batch, length, depth] tensors + """ + if memory_antecedent is None: + memory_antecedent = query_antecedent + q = compute_attention_component( + query_antecedent, + total_key_depth, + q_filter_width, + q_padding, + "q", + vars_3d_num_heads=vars_3d_num_heads, + layer_collection=layer_collection) + k = compute_attention_component( + memory_antecedent, + total_key_depth, + kv_filter_width, + kv_padding, + "k", + vars_3d_num_heads=vars_3d_num_heads, + layer_collection=layer_collection) + v = compute_attention_component( + memory_antecedent, + total_value_depth, + kv_filter_width, + kv_padding, + "v", + vars_3d_num_heads=vars_3d_num_heads, + layer_collection=layer_collection) + return q, k, v + + +def multihead_attention(query_antecedent, + memory_antecedent, + bias, + total_key_depth, + total_value_depth, + output_depth, + num_heads, + dropout_rate, + attention_type="dot_product", + max_relative_position=None, + heads_share_relative_embedding=False, + add_relative_to_values=False, + image_shapes=None, + block_length=128, + block_width=128, + q_filter_width=1, + kv_filter_width=1, + q_padding="VALID", + kv_padding="VALID", + cache=None, + gap_size=0, + num_memory_blocks=2, + name="multihead_attention", + save_weights_to=None, + make_image_summary=True, + dropout_broadcast_dims=None, + vars_3d=False, + layer_collection=None, + recurrent_memory=None, + chunk_number=None, + hard_attention_k=0, + gumbel_noise_weight=0.0, + max_area_width=1, + max_area_height=1, + memory_height=1, + area_key_mode="mean", + area_value_mode="sum", + training=True, + **kwargs): + """Multihead scaled-dot-product attention with input/output transformations. + + Args: + query_antecedent: a Tensor with shape [batch, length_q, channels] + memory_antecedent: a Tensor with shape [batch, length_m, channels] or None + bias: bias Tensor (see attention_bias()) + total_key_depth: an integer + total_value_depth: an integer + output_depth: an integer + num_heads: an integer dividing total_key_depth and total_value_depth + dropout_rate: a floating point number + attention_type: a string, either "dot_product", "dot_product_relative", + "local_mask_right", "local_unmasked", "masked_dilated_1d", + "unmasked_dilated_1d", graph, or any attention function + with the signature (query, key, value, **kwargs) + max_relative_position: Maximum distance between inputs to generate + unique relation embeddings for. Only relevant + when using "dot_product_relative" attention. + heads_share_relative_embedding: boolean to share relative embeddings + add_relative_to_values: a boolean for whether to add relative component to + values. + image_shapes: optional tuple of integer scalars. + see comments for attention_image_summary() + block_length: an integer - relevant for "local_mask_right" + block_width: an integer - relevant for "local_unmasked" + q_filter_width: An integer specifying how wide you want the query to be. + kv_filter_width: An integer specifying how wide you want the keys and values + to be. + q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. + kv_padding: One of "VALID", "SAME" or "LEFT". Default is "VALID": + no padding. + cache: dict containing Tensors which are the results of previous + attentions, used for fast decoding. Expects the dict to contrain two + keys ('k' and 'v'), for the initial call the values for these keys + should be empty Tensors of the appropriate shape. + 'k' [batch_size, 0, key_channels] + 'v' [batch_size, 0, value_channels] + gap_size: Integer option for dilated attention to indicate spacing between + memory blocks. + num_memory_blocks: Integer option to indicate how many memory blocks to look + at. + name: an optional string. + save_weights_to: an optional dictionary to capture attention weights + for vizualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + vars_3d: use 3-dimensional variables for input/output transformations + layer_collection: A tensorflow_kfac.LayerCollection. Only used by the + KFAC optimizer. Default is None. + recurrent_memory: An optional transformer_memory.RecurrentMemory, which + retains state across chunks. Default is None. + chunk_number: an optional integer Tensor with shape [batch] used to operate + the recurrent_memory. + hard_attention_k: integer, if > 0 triggers hard attention (picking top-k). + gumbel_noise_weight: if > 0, apply Gumbel noise with weight + `gumbel_noise_weight` before picking top-k. This is a no op if + hard_attention_k <= 0. + max_area_width: the max width allowed for an area. + max_area_height: the max height allowed for an area. + memory_height: the height of the memory. + area_key_mode: the mode for computing area keys, which can be "mean", + "concat", "sum", "sample_concat", and "sample_sum". + area_value_mode: the mode for computing area values, which can be either + "mean", or "sum". + training: indicating if it is in the training mode. + **kwargs (dict): Parameters for the attention function. + + Caching: + WARNING: For decoder self-attention, i.e. when memory_antecedent == None, + the caching assumes that the bias contains future masking. + + The caching works by saving all the previous key and value values so that + you are able to send just the last query location to this attention + function. I.e. if the cache dict is provided it assumes the query is of the + shape [batch_size, 1, hidden_dim] rather than the full memory. + + Returns: + The result of the attention transformation. The output shape is + [batch_size, length_q, hidden_dim] + unless the cache dict is provided in which case only the last memory + position is calculated and the output shape is [batch_size, 1, hidden_dim] + Optionally returns an additional loss parameters (ex: load balance loss for + the experts) returned by the attention_type function. + + Raises: + ValueError: if the key depth or value depth are not divisible by the + number of attention heads. + """ + if total_key_depth % num_heads != 0: + raise ValueError("Key depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_key_depth, num_heads)) + if total_value_depth % num_heads != 0: + raise ValueError("Value depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_value_depth, num_heads)) + vars_3d_num_heads = num_heads if vars_3d else 0 + + if layer_collection is not None: + if cache is not None: + raise ValueError("KFAC implementation only supports cache is None.") + if vars_3d: + raise ValueError("KFAC implementation does not support 3d vars.") + + if recurrent_memory is not None: + if memory_antecedent is not None: + raise ValueError("Recurrent memory requires memory_antecedent is None.") + if cache is not None: + raise ValueError("Cache is not supported when using recurrent memory.") + if vars_3d: + raise ValueError("3d vars are not supported when using recurrent memory.") + if layer_collection is not None: + raise ValueError("KFAC is not supported when using recurrent memory.") + if chunk_number is None: + raise ValueError("chunk_number is required when using recurrent memory.") + + with tf.variable_scope(name, default_name="multihead_attention", + values=[query_antecedent, memory_antecedent]): + + if recurrent_memory is not None: + ( + recurrent_memory_transaction, + query_antecedent, memory_antecedent, bias, + ) = recurrent_memory.pre_attention( + chunk_number, + query_antecedent, memory_antecedent, bias, + ) + + if cache is None or memory_antecedent is None: + q, k, v = compute_qkv(query_antecedent, memory_antecedent, + total_key_depth, total_value_depth, q_filter_width, + kv_filter_width, q_padding, kv_padding, + vars_3d_num_heads=vars_3d_num_heads, + layer_collection=layer_collection) + if cache is not None: + if attention_type not in ["dot_product", "dot_product_relative"]: + # TODO(petershaw): Support caching when using relative position + # representations, i.e. "dot_product_relative" attention. + raise NotImplementedError( + "Caching is not guaranteed to work with attention types other than" + " dot_product.") + if bias is None: + raise ValueError("Bias required for caching. See function docstring " + "for details.") + + if memory_antecedent is not None: + # Encoder-Decoder Attention Cache + q = compute_attention_component(query_antecedent, total_key_depth, + q_filter_width, q_padding, "q", + vars_3d_num_heads=vars_3d_num_heads) + k = cache["k_encdec"] + v = cache["v_encdec"] + else: + k = split_heads(k, num_heads) + v = split_heads(v, num_heads) + decode_loop_step = kwargs.get("decode_loop_step") + if decode_loop_step is None: + k = cache["k"] = tf.concat([cache["k"], k], axis=2) + v = cache["v"] = tf.concat([cache["v"], v], axis=2) + else: + # Inplace update is required for inference on TPU. + # Inplace_ops only supports inplace_update on the first dimension. + # The performance of current implementation is better than updating + # the tensor by adding the result of matmul(one_hot, + # update_in_current_step) + tmp_k = tf.transpose(cache["k"], perm=[2, 0, 1, 3]) + tmp_k = inplace_ops.alias_inplace_update( + tmp_k, decode_loop_step, tf.squeeze(k, axis=2)) + k = cache["k"] = tf.transpose(tmp_k, perm=[1, 2, 0, 3]) + tmp_v = tf.transpose(cache["v"], perm=[2, 0, 1, 3]) + tmp_v = inplace_ops.alias_inplace_update( + tmp_v, decode_loop_step, tf.squeeze(v, axis=2)) + v = cache["v"] = tf.transpose(tmp_v, perm=[1, 2, 0, 3]) + + q = split_heads(q, num_heads) + if cache is None: + k = split_heads(k, num_heads) + v = split_heads(v, num_heads) + + key_depth_per_head = total_key_depth // num_heads + if not vars_3d: + q *= key_depth_per_head**-0.5 + + additional_returned_value = None + if callable(attention_type): # Generic way to extend multihead_attention + x = attention_type(q, k, v, **kwargs) + if isinstance(x, tuple): + x, additional_returned_value = x # Unpack + elif attention_type == "dot_product": + if max_area_width > 1 or max_area_height > 1: + x = area_attention.dot_product_area_attention( + q, k, v, bias, dropout_rate, image_shapes, + save_weights_to=save_weights_to, + dropout_broadcast_dims=dropout_broadcast_dims, + max_area_width=max_area_width, + max_area_height=max_area_height, + memory_height=memory_height, + area_key_mode=area_key_mode, + area_value_mode=area_value_mode, + training=training) + else: + x = dot_product_attention( + q, k, v, bias, dropout_rate, image_shapes, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + dropout_broadcast_dims=dropout_broadcast_dims, + activation_dtype=kwargs.get("activation_dtype"), + hard_attention_k=hard_attention_k, + gumbel_noise_weight=gumbel_noise_weight) + elif attention_type == "dot_product_relative": + x = dot_product_attention_relative( + q, + k, + v, + bias, + max_relative_position, + dropout_rate, + image_shapes, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + cache=cache is not None, + allow_memory=recurrent_memory is not None, + hard_attention_k=hard_attention_k, + gumbel_noise_weight=gumbel_noise_weight) + elif attention_type == "dot_product_unmasked_relative_v2": + x = dot_product_unmasked_self_attention_relative_v2( + q, + k, + v, + bias, + max_relative_position, + dropout_rate, + image_shapes, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + dropout_broadcast_dims=dropout_broadcast_dims, + heads_share_relative_embedding=heads_share_relative_embedding, + add_relative_to_values=add_relative_to_values) + elif attention_type == "dot_product_relative_v2": + x = dot_product_self_attention_relative_v2( + q, + k, + v, + bias, + max_relative_position, + dropout_rate, + image_shapes, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + dropout_broadcast_dims=dropout_broadcast_dims, + heads_share_relative_embedding=heads_share_relative_embedding, + add_relative_to_values=add_relative_to_values) + elif attention_type == "local_within_block_mask_right": + x = masked_within_block_local_attention_1d( + q, k, v, block_length=block_length) + elif attention_type == "local_relative_mask_right": + x = masked_relative_local_attention_1d( + q, + k, + v, + block_length=block_length, + make_image_summary=make_image_summary, + dropout_rate=dropout_rate, + heads_share_relative_embedding=heads_share_relative_embedding, + add_relative_to_values=add_relative_to_values, + name="masked_relative_local_attention_1d") + elif attention_type == "local_mask_right": + x = masked_local_attention_1d( + q, + k, + v, + block_length=block_length, + make_image_summary=make_image_summary) + elif attention_type == "local_unmasked": + x = local_attention_1d( + q, k, v, block_length=block_length, filter_width=block_width) + elif attention_type == "masked_dilated_1d": + x = masked_dilated_self_attention_1d(q, k, v, block_length, block_width, + gap_size, num_memory_blocks) + else: + assert attention_type == "unmasked_dilated_1d" + x = dilated_self_attention_1d(q, k, v, block_length, block_width, + gap_size, num_memory_blocks) + x = combine_heads(x) + + # Set last dim specifically. + x.set_shape(x.shape.as_list()[:-1] + [total_value_depth]) + + if vars_3d: + o_var = tf.get_variable( + "o", [num_heads, total_value_depth // num_heads, output_depth]) + o_var = tf.cast(o_var, x.dtype) + o_var = tf.reshape(o_var, [total_value_depth, output_depth]) + x = tf.tensordot(x, o_var, axes=1) + else: + x = common_layers.dense( + x, output_depth, use_bias=False, name="output_transform", + layer_collection=layer_collection) + + if recurrent_memory is not None: + x = recurrent_memory.post_attention(recurrent_memory_transaction, x) + if additional_returned_value is not None: + return x, additional_returned_value + return x + + +def multihead_attention_2d(query_antecedent, + memory_antecedent, + total_key_depth, + total_value_depth, + output_depth, + num_heads, + attention_type="local_attention_2d", + query_shape=(8, 16), + memory_flange=(8, 16), + name=None): + """2d Multihead scaled-dot-product attention with inp/output transformations. + + Args: + query_antecedent: a Tensor with shape [batch, h, w, depth_k] + memory_antecedent: a Tensor with shape [batch, h, w, depth_k] + total_key_depth: an integer + total_value_depth: an integer + output_depth: an integer + num_heads: an integer dividing total_key_depth and total_value_depth + attention_type: String, type of attention function to use. + query_shape: an tuple indicating the height and width of each query block. + memory_flange: an integer indicating how much to look in height and width + name: an optional string + + Returns: + A Tensor of shape [batch, h, w, output_depth] + + Raises: + ValueError: if the key depth or value depth are not divisible by the + number of attention heads. + """ + if total_key_depth % num_heads != 0: + raise ValueError("Key depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_key_depth, num_heads)) + if total_value_depth % num_heads != 0: + raise ValueError("Value depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_value_depth, num_heads)) + with tf.variable_scope( + name, + default_name="multihead_attention_2d", + values=[query_antecedent, memory_antecedent]): + q, k, v = compute_qkv(query_antecedent, memory_antecedent, total_key_depth, + total_value_depth) + # after splitting, shape is [batch, heads, h, w, depth] + q = split_heads_2d(q, num_heads) + k = split_heads_2d(k, num_heads) + v = split_heads_2d(v, num_heads) + key_depth_per_head = total_key_depth // num_heads + q *= key_depth_per_head**-0.5 + if attention_type == "local_attention_2d": + x = local_attention_2d( + q, k, v, query_shape=query_shape, memory_flange=memory_flange) + elif attention_type == "masked_local_attention_2d": + assert attention_type == "masked_local_attention_2d" + x = masked_local_attention_2d( + q, k, v, query_shape=query_shape, memory_flange=memory_flange) + else: + assert attention_type == "unmasked_local_attention_2d_tpu" + x = dot_product_unmasked_attention_local_2d_tpu( + q, k, v, None, max_relative_position=None, query_shape=query_shape) + x = combine_heads_2d(x) + x = common_layers.dense( + x, output_depth, use_bias=False, name="output_transform") + return x + + +def multihead_attention_nd(query_antecedent, + memory_antecedent, + total_key_depth, + total_value_depth, + output_depth, + num_heads, + query_shape, + memory_flange, + masked=False, + cache=None, + decode_step=None, + name=None): + """n-d Multihead scaled-dot-product attention with in/output transformations. + + Args: + query_antecedent: a Tensor with shape [batch, d1, ..., dn, depth_q] or + [batch, 1, ..., 1, depth_q] if in fast decoding mode. + memory_antecedent: a Tensor with shape [batch, d1, ..., dn, depth_m] or None + for self attention. + total_key_depth: an integer + total_value_depth: an integer + output_depth: an integer + num_heads: an integer dividing total_key_depth and total_value_depth + query_shape: an tuple indicating the dimensions of each query block. + memory_flange: an integer indicating how much to look around a query block + in each dimension + masked: a boolean to specify whether to do masked or unmasked attention. + cache: a dict like: { + 'k': [batch, num_heads, d1, ..., dn, depth_k // num_heads], + 'v': [batch, num_heads, d1, ..., dn, depth_v // num_heads]} Caller should + initially pass zero tensors for `decode_step` == 0. This method will + update cache and caller should pass the same cache in consecutive calls. + This works for both GPU and TPU inference. Caller should pass the latest + query via `query_antecedent`. `memory_antecedent` should be None in this + case, since auto-regressive decoding only applies to self attention. + decode_step: integer to pass in decoding mode. `cache` and `decode_step` + should both be set in decoding mode. Caller can also pass an empty `cache` + without `decode_step`, for this method to initialize the cache for future + calls with `decode_step` > 0. + name: an optional string + + Returns: + A Tensor of shape [batch, d1, ..., dn, output_depth] or + [batch, 1, ..., 1, output_depth] if decode_step is set. + + Raises: + ValueError: if the key depth or value depth are not divisible by the + number of attention heads. + """ + if total_key_depth % num_heads != 0: + raise ValueError("Key depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_key_depth, num_heads)) + if total_value_depth % num_heads != 0: + raise ValueError("Value depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_value_depth, num_heads)) + # Validate decoding input params are sensible. + if decode_step is not None: + assert "k" in cache and "v" in cache + if cache is not None: + assert memory_antecedent is None + + with tf.variable_scope( + name, + default_name="multihead_attention_nd", + values=[query_antecedent, memory_antecedent]): + if decode_step is not None: + latest_antecedent = query_antecedent + q, latest_k, latest_v = compute_qkv(latest_antecedent, None, + total_key_depth, total_value_depth) + latest_k = split_heads_nd(latest_k, num_heads) + latest_v = split_heads_nd(latest_v, num_heads) + # put latest k and v into their correct position in cache. + k = cache["k"] + v = cache["v"] + k = put_item_in_decode_step(k, latest_k, decode_step, query_shape) + v = put_item_in_decode_step(v, latest_v, decode_step, query_shape) + cache["k"] = k + cache["v"] = v + + else: + q, k, v = compute_qkv(query_antecedent, memory_antecedent, + total_key_depth, total_value_depth) + k = split_heads_nd(k, num_heads) + v = split_heads_nd(v, num_heads) + if cache is not None: + cache["k"] = k + cache["v"] = v + # after splitting, shape is [batch, heads, d1, ..., dn, depth] + q = split_heads_nd(q, num_heads) + key_depth_per_head = total_key_depth // num_heads + q *= key_depth_per_head**-0.5 + if masked: + x = masked_local_attention_nd( + q, + k, + v, + query_shape=query_shape, + memory_flange=memory_flange, + decode_step=decode_step) + else: + raise NotImplementedError( + "Unmaked multihead attention nd is not implemented") + x = combine_heads_nd(x) + x = common_layers.dense( + x, output_depth, use_bias=False, name="output_transform") + return x + + +def decode_step_to_index(decode_step, query_shape, tensor_shape): + """Maps decode step to n-d index according to blocked raster scan order. + + Args: + decode_step: an integer + query_shape: a tuple (q1, q2, ..., qn) representing the query shape + tensor_shape: a tuple (d1, d2, ..., dn) representing the tensor shape, minus + the batch and depth dimensions. + + Returns: + a tuple (i1, i2, ..., in) representing the index of the element at + `decode_step` w.r.t. blocked raster scan order. + """ + assert len(query_shape) == len(tensor_shape) + blocks_per_dimension = [t // q for t, q in zip(tensor_shape, query_shape)] + items_in_block = np.prod(query_shape, dtype=np.int32) + step_block = decode_step // items_in_block + step_within_block = decode_step % items_in_block + + block_index = [] + for q in blocks_per_dimension[::-1]: + block_index.insert(0, step_block % q) + step_block //= q + + within_block_index = [] + for q in query_shape[::-1]: + within_block_index.insert(0, step_within_block % q) + step_within_block //= q + + final_index = [ + w + b * q for w, b, q in zip(within_block_index, block_index, query_shape) + ] + return tuple(final_index) + + +def get_item_at_decode_step(x, decode_step, query_shape): + """Extracts a single item from an n-d tensor at `decode_step` position. + + Args: + x: a [batch, d1, d2, ..., dn, depth] tensor + decode_step: an integer + query_shape: a tuple (q1, q2, ..., qn) representing the query shape + + Returns: + a [batch, 1, 1, ..., 1, depth] tensor that is a single element from `x` at + `decode_step` w.r.t. blocked raster scan order. + """ + x_shape = common_layers.shape_list(x) + index = decode_step_to_index(decode_step, query_shape, x_shape[1:-1]) + # TPU needs size to be non negative for the case when begins are not + # compile-time constants. + return tf.slice(x, [0] + list(index) + [0], + [x_shape[0]] + [1] * len(index) + [x_shape[-1]]) + + +def put_item_in_decode_step(x, item, decode_step, query_shape): + """Puts a single item into an n-d tensor at `decode_step` position. + + Args: + x: a [batch, heads, d1, d2, ..., dn, depth] tensor + item: a [batch, heads, 1, 1, ..., 1, depth] tensor + decode_step: an integer + query_shape: a tuple (q1, q2, ..., qn) representing the query shape + + Returns: + a [batch, heads, d1, d2, ..., dn, depth] tensor with value at `decode_step` + w.r.t. blocked raster scan order is updated to be `item`. + """ + x_shape = common_layers.shape_list(x) + index = decode_step_to_index(decode_step, query_shape, x_shape[2:-1]) + # inplace_update only works on the first dimension, we need to flatten and + # move batch to be the second dimension. + flattened_x = tf.reshape( + x, [-1, x_shape[1], np.prod(x_shape[2:-1]), x_shape[-1]]) + # transpose to [positions, batch, heads, depth] + flattened_x = tf.transpose(flattened_x, [2, 0, 1, 3]) + + flattened_index = 0 + factor = 1 + for d, idx in zip(x_shape[-2:1:-1], index[::-1]): + flattened_index += idx * factor + factor *= d + + item_shape = common_layers.shape_list(item) + item = tf.reshape(item, item_shape[:2] + item_shape[-1:]) + updated_x = inplace_ops.alias_inplace_update(flattened_x, flattened_index, + item) + # unflatten the results + updated_x = tf.transpose(updated_x, [1, 2, 0, 3]) + return tf.reshape(updated_x, [-1, x_shape[1]] + x_shape[2:]) + + +def ffn_self_attention_layer(x, + filter_depth, + output_depth, + num_parts, + dropout_rate, + share_kv=False, + name=None): + """Self-attention feedforward layer. + + We use self-attention to do feedforward computations. We apply this function + positionwise where for each position, we linearly transform the output to have + depth filter_depth, and break up the result depth-wise into num_parts + contiguous parts. The parts self-attend, we concatenate the results + depth-wise, and we linearly transform to a depth of output_depth. The goal is + to get multiplicative interactions between components of a representation. + + Args: + x: a Tensor with shape [batch, length, channels] + filter_depth: an integer + output_depth: an integer + num_parts: an integer dividing filter depth + dropout_rate: a floating point number + share_kv: Share the key value transform + name: an optional string + + Returns: + A Tensor with shape [batch, length, output_depth]. + """ + with tf.variable_scope( + name, default_name="feedforward_self_attention", values=[x]): + x_shape = common_layers.shape_list(x) + part_depth = filter_depth // num_parts + if not share_kv: + combined = common_layers.dense( + x, filter_depth * 3, use_bias=False, name="qkv_transform") + combined = tf.expand_dims(combined, axis=2) + q, k, v = tf.split(combined, 3, axis=3) + else: + q = tf.expand_dims( + common_layers.dense( + x, filter_depth, use_bias=False, name="q_transform"), + axis=2) + kv_combined = tf.expand_dims( + common_layers.dense( + tf.concat([x, x], axis=1), + filter_depth, + use_bias=False, + name="kv_transform"), + axis=2) + k, v = tf.split(kv_combined, [x_shape[1], x_shape[1]], axis=1) + + batch_q = tf.reshape(q, [-1, 1, num_parts, part_depth]) + batch_k = tf.reshape(k, [-1, 1, num_parts, part_depth]) + batch_v = tf.reshape(v, [-1, 1, num_parts, part_depth]) + + batch_q *= part_depth**-0.5 + # non-masked bias + bias = None + x = dot_product_attention(batch_q, batch_k, batch_v, bias, dropout_rate) + x = tf.reshape(x, [x_shape[0], x_shape[1], filter_depth]) + x = common_layers.dense( + x, output_depth, use_bias=False, name="output_transform") + return x + + +def parameter_attention(x, + total_key_depth, + total_value_depth, + output_depth, + memory_rows, + num_heads, + dropout_rate, + name=None): + """Attention over parameters. + + We use the same multi-headed attention as in the other layers, but the memory + keys and values are model parameters. There are no linear transformation on + the keys or values. + + We are also a bit more careful about memory usage, since the number of + memory positions may be very large. + + Args: + x: a Tensor with shape [batch, length_q, channels] + total_key_depth: an integer + total_value_depth: an integer + output_depth: an integer + memory_rows: an integer + num_heads: an integer dividing total_key_depth and total_value_depth + dropout_rate: a floating point number + name: an optional string + + Returns: + A Tensor with shape [batch, length_q, output_depth]. + """ + with tf.variable_scope(name, default_name="parameter_attention", values=[x]): + head_size_k = total_key_depth // num_heads + head_size_v = total_value_depth // num_heads + var_shape_k = [num_heads, memory_rows, head_size_k] + var_shape_v = [num_heads, memory_rows, head_size_v] + k = tf.get_variable( + "k", + var_shape_k, + initializer=tf.random_normal_initializer( + 0, output_depth**-0.5 * (num_heads**0.5))) + v = tf.get_variable( + "v", + var_shape_v, + initializer=tf.random_normal_initializer( + 0, output_depth**-0.5 * (output_depth**0.5))) + batch_size = common_layers.shape_list(x)[0] + length = common_layers.shape_list(x)[1] + q = common_layers.dense( + x, total_key_depth, use_bias=False, name="q_transform") + if dropout_rate: + # This is a cheaper form of attention dropout where we use to use + # the same dropout decisions across batch elements and query positions, + # but different decisions across heads and memory positions. + v = tf.nn.dropout( + v, 1.0 - dropout_rate, noise_shape=[num_heads, memory_rows, 1]) + # query is [batch, length, hidden_size] + # reshape and transpose it to [heads, batch * length, head_size] + q = tf.reshape(q, [batch_size, length, num_heads, head_size_k]) + q = tf.transpose(q, [2, 0, 1, 3]) + q = tf.reshape(q, [num_heads, batch_size * length, head_size_k]) + weights = tf.matmul(q, k, transpose_b=True) + weights = tf.nn.softmax(weights) + y = tf.matmul(weights, v) + y = tf.reshape(y, [num_heads, batch_size, length, head_size_v]) + y = tf.transpose(y, [1, 2, 0, 3]) + y = tf.reshape(y, [batch_size, length, total_value_depth]) + y.set_shape([None, None, total_value_depth]) + y = common_layers.dense( + y, output_depth, use_bias=False, name="output_transform") + return y + + +@expert_utils.add_name_scope() +def coordinate_tensor(shape, axis): + """Return a tensor with given shape containing coordinate along given axis. + + Args: + shape: a Tensor representing the shape of the output Tensor + axis: an integer + + Returns: + A tensor with shape shape and type tf.int32, where each elements its + coordinate along the given axis. + """ + if axis < 0: + axis = tf.size(shape) + axis # Convert to positive for the one_hot indice + + r = tf.range(shape[axis]) + r_shape = tf.one_hot( + axis, tf.size(shape), on_value=-1, off_value=1, dtype=tf.int32) + return tf.zeros(shape, dtype=tf.int32) + tf.reshape(r, r_shape) + + +def self_attention_expert(x, + batch_coordinate, + mask_right=True, + split_batch=False, + attention_num_head=1, + attention_kq_size=None, + attention_v_size=None): + """Implementing attention that runs inside each expert. + + Args: + x: A tensor of shape[batch, depth]. Contains representations from + different positions, which are lexicographically ordered. + batch_coordinate: A tensor of shape [batch, 1] containing the batch + coordinate of each element in x. This is needed to make sure that + positions from different sequences don't attend to each other. + mask_right: A bool. If true, we will not attend to positions on the right, + just as decoder self attention. + split_batch (bool): If True, each sequence of the batch is processed + individually on a loop. If False, the sequences are processed all at + once and a mask is applied to isolate the sequences from each others + attention_num_head (int): number of attention heads + attention_kq_size (int): dimension used for the attention key, and query + attention_v_size (int): dimension used for the attention value + + Returns: + out: A tensor of shape [batch, depth]. + example use: + expert_utils.local_moe( + ... + expert_fn=functools.partial(self_attention_expert, mask_right=) + ) + """ + + depth = x.get_shape().as_list()[-1] + length = common_layers.shape_list(batch_coordinate)[0] + + # Print a warning message if one of the expert isn't used (useful at + # inference where summaries aren't used and the gating function don't add + # noise) + global _expert_count # Hack to make each expert have a unique id + _expert_count += 1 + length = tf.cond( + tf.equal(length, 0), + lambda: tf.Print( # pylint: disable=g-long-lambda + length, [length], "Expert {} empty: ".format(_expert_count)), + lambda: length, + ) + + tf.summary.scalar("batch_size", length, family="experts_stats_batch_size") + + attention_kq_size = attention_kq_size or depth + attention_v_size = attention_v_size or depth + + def length_not_null(x, batch_coordinate): + """Branch of the graph only evaluated when length isn't null.""" + + # Mask between the sequences (not used if map_ids is used) + bias_batch = attention_bias_coordinates(batch_coordinate) + + def add_or_set_if(prev_bias, new_bias, condition): + """Add the bias together while considering the None case.""" + if not condition: + return prev_bias + if prev_bias is None: + return new_bias + return prev_bias + new_bias + + def mask_and_call_attention(x): + """Function applied once for each sequence of the batch.""" + + # Mask to prevent sequences of attending to the future + length = common_layers.shape_list(x)[1] # x has shape [1, length,...] + bias_past = tf.reshape( + attention_bias_lower_triangle(length), [length, length]) + # bias has shape [length, length] + + bias = None + bias = add_or_set_if(bias, bias_past, mask_right) + bias = add_or_set_if(bias, bias_batch, not split_batch) + bias = tf.reshape(bias, [1, 1, length, length]) + + return multihead_attention( + x, + None, + bias, + total_key_depth=attention_kq_size, + total_value_depth=attention_v_size, + output_depth=depth, + num_heads=attention_num_head, + dropout_rate=0.0) + + if split_batch: + out = expert_utils.map_ids(x, batch_coordinate, mask_and_call_attention) + else: + x = tf.reshape(x, [1, length, depth]) + out = mask_and_call_attention(x) + out = tf.squeeze(out, 0) + return out + + # If the length is empty, just forward an empty tensor (avoid having to + # evaluate multihead_attention with tensor having dim equal to zeros) + out = tf.cond( + tf.equal(length, 0), + lambda: tf.zeros(shape=[0, depth], dtype=tf.float32, name="empty_out"), + lambda: length_not_null(x, batch_coordinate), + ) + return out + + +def local_expert_attention(x, + k, + loss_coef, + attention_num_experts, + train=True, + batch_coordinate=None, + **kwargs): + """Attention using a mixture of experts. + + Positions sent to the same expert can attend to each other. + The mixture of experts is "local" in that it is replicated on each + datashard. + + local_moe flatten all batches so to avoid problems with padding (ex: all + padding going to the same expert, self attention attending to non null + padding tokens,...), the padding should be removed before. + + Args: + x: a Tensor with shape [batch, length, depth] or [1, batch*length, depth] + k: The number of experts to dispatch each example to + loss_coef: a scalar. A multiplier for the expert loss + attention_num_experts: The number of experts to use + train: a boolean for the current mode + batch_coordinate (tf.Tensor): int32 tensor of shape [1, batch*length, 1] + containing the batch ids. If None, deduced from first dim of x. + **kwargs: Arguments to forward to self_attention_expert + + Returns: + y: a Tensor with shape [batch, length, depth] + loss: a Scalar + """ + if batch_coordinate is None: + batch_coordinate = tf.expand_dims( + coordinate_tensor(common_layers.shape_list(x)[:-1], axis=0), axis=-1) + with tf.variable_scope("local_expert_attention"): + additional_dispatch_params = {"batch_coordinate": batch_coordinate} + return expert_utils.local_moe( + x, + train, + functools.partial(self_attention_expert, **kwargs), + attention_num_experts, + k=k, + loss_coef=loss_coef, + pass_x=True, + pass_gates=False, + additional_dispatch_params=additional_dispatch_params, + ) + + +@expert_utils.add_name_scope() +def expert_dot_product(q, k, v, info_q, info_k): + """Perform dot product on a subset of the sequence. + + Can add a mask to the attention to prevent sequences to attend to each other + and to prevent attention to the future. + + Args: + q (tf.Tensor): Queries of shape [length_expert_q, depth_k] + k (tf.Tensor): Keys of shape [length_expert_k, depth_k] + v (tf.Tensor): Values of shape [length_expert_k, depth_v] + info_q (BatchInfo): Batch info for queries. If None, no mask is added + info_k (BatchInfo): Batch info for keys + + Returns: + tf.Tensor: dot product attention output ([length_expert_q, depth_v]) + """ + + length_q = common_layers.shape_list(q)[0] + length_k = common_layers.shape_list(k)[0] + depth_v = v.get_shape().as_list()[-1] + + # Create the mask + bias = attention_bias_coordinates(info_q.coordinates, info_k.coordinates) + if info_k.order is not None: + bias += attention_bias_future(info_q.order, info_k.order) + + # Restore batch and head dimension + q, k, v = [tf.expand_dims(tf.expand_dims(t, 0), 0) for t in (q, k, v)] + + def is_zero(): + zeros = tf.zeros(shape=[1, 1, length_q, depth_v], dtype=tf.float32) + zeros = tf.Print(zeros, [length_k, length_q], "length_k/length_q: ") + return zeros + + def is_not_zero(): + return dot_product_attention( + q, + k, + v, + bias=bias, + # No image summary to avoid "Retval[0] does not have value" (because + # inside a condition) + make_image_summary=False, + ) + + # TODO(epot): Should make sure a query gets at least one key. Because the + # different sequences of a batch are merged, it's possible that a + # query from a sequence only receive memory from another sequence, so + # with the mask, the query will perform a softmax on -infinity values. + # A hack could be to add at least one sequence of each batch on each group so + # the query can attend to at least one element. + # Softmax(Q.K)*V + v_out = tf.cond( + tf.logical_or(tf.equal(length_q, 0), tf.equal(length_k, 0)), + is_zero, + is_not_zero, + ) + + # Remove batch and head dimension + v_out = tf.squeeze(v_out, axis=0) + v_out = tf.squeeze(v_out, axis=0) + return v_out + + +@expert_utils.add_name_scope() +def dot_product_single_head(q, k, v, gates_q, gates_k, bi): + """Perform a dot product attention on a single sequence on a single head. + + This function dispatch the q, k, v and loop over the buckets to compute the + attention dot product on each subsequences. + + Args: + q (tf.Tensor): [length_q, depth_q] + k (tf.Tensor): [length_k, depth_q] + v (tf.Tensor): [length_k, depth_v] + gates_q (tf.Tensor): One-hot vector of shape [length_q, nb_buckets] + gates_k (tf.Tensor): One-hot vector of shape [length_k, nb_buckets] + bi (BatchInfo): Contains the batch coordinates and sequence order + + Returns: + tf.Tensor: [length_q, depth_v] + """ + + nb_buckets = gates_q.get_shape().as_list()[-1] + + q_dispatcher = expert_utils.SparseDispatcher(nb_buckets, gates_q) + k_dispatcher = expert_utils.SparseDispatcher(nb_buckets, gates_k) + + def eventually_dispatch(dispatcher, value): + if value is not None: + return dispatcher.dispatch(value) + return [None] * nb_buckets + + # Iterate over every dispatched group + list_v_out = [] + for ( + q_i, + k_i, + v_i, + qbc, + qbo, + kbc, + kbo, + ) in zip( + # Dispatch queries, keys and values + q_dispatcher.dispatch(q), + k_dispatcher.dispatch(k), + k_dispatcher.dispatch(v), + # Also dispatch the sequence positions and batch coordinates + eventually_dispatch(q_dispatcher, bi.coordinates), + eventually_dispatch(q_dispatcher, bi.order), + eventually_dispatch(k_dispatcher, bi.coordinates), + eventually_dispatch(k_dispatcher, bi.order), + ): + list_v_out.append( + expert_dot_product( + q_i, + k_i, + v_i, + info_q=BatchInfo(coordinates=qbc, order=qbo), + info_k=BatchInfo(coordinates=kbc, order=kbo))) + + # Combine all buckets together to restore the original length + return q_dispatcher.combine(list_v_out) + + +def map_fn_switch(fn, elems, use_map_fn=True, **kwargs): + """Construct the graph with either tf.map_fn or a python for loop. + + This function is mainly for for benchmarking purpose. + + tf.map_fn is dynamic but is much slower than creating a static graph with + for loop. However, having a for loop make the graph much longer to build + and can consume too much RAM on distributed setting. + + Args: + fn (fct): same that tf.map_fn but for now can only return a single tensor + value (instead of a tuple of tensor for the general case) + elems (tuple): same that tf.map_fn + use_map_fn (bool): If True, tf.map_fn is used, if False, for _ in _: is used + instead + **kwargs: Additional tf.map_fn arguments (ignored if use_map_fn is False) + + Returns: + tf.Tensor: the output of tf.map_fn + """ + if use_map_fn: + return tf.map_fn(fn, elems, **kwargs) + elems_unpacked = (tf.unstack(e) for e in elems) + out_unpacked = [fn(e) for e in zip(*elems_unpacked)] + out = tf.stack(out_unpacked) + return out + + +@expert_utils.add_name_scope() +def sparse_dot_product_attention(q, k, v, bi, use_map_fn, experts_params): + """Sparse multihead self attention. + + Perform an approximation of the full multihead attention by dispatching + the tokens using their keys/values. Thus the attention matrix are only + computed each times on a subset of the tokens. + + Notes: + * The function don't perform scaling here (multihead_attention does + the /sqrt(depth)). + * The padding should have been removed (so batch size should be 1 but length + contains the elements from all different batches) + * Right now, only self attention is supported so length_q and length_kv + should be identical and the function will add triangular mask. + * If bi.order is not None, The bias is added inside this function to + prevent attention to the future. + + Args: + q (tf.Tensor): Queries of shape [batch, heads, length_q, depth_k] + k (tf.Tensor): Keys of shape [batch, heads, length_q, depth_k] + v (tf.Tensor): Values of shape [batch, heads, length_kv, depth_v] + bi (BatchInfo): Contains the batch coordinates and sequence order + use_map_fn (bool): Use either tf.map_fn of python for loop to compute the + heads separately + experts_params (dict): Additional params for the local expert + + Returns: + tf.Tensor: Approximation of Softmax(Q.K) * V, of shape + [batch, heads, length_q, depth_v] + """ + batch_size, nb_heads, _, depth = common_layers.shape_list(q) + + @expert_utils.add_name_scope() + def flatten_first_dims(x): + """Reshape such that x is [num_heads, -1, depth].""" + # Case 1: Either constant batch size of size 1 or batch already flattened + if x.get_shape().as_list()[0] == 1: + return tf.squeeze(x, axis=0) + + # Case 2: Flatten batch dimension + x = tf.transpose(x, perm=[1, 0, 2, 3]) + x = tf.reshape(x, [nb_heads, -1, depth]) + return x + + def flatten_batch(x): + if x is None: + return x + return expert_utils.flatten_all_but_last(x) + + q = flatten_first_dims(q) + k = flatten_first_dims(k) + v = flatten_first_dims(v) + bi = BatchInfo( + coordinates=flatten_batch(bi.coordinates), + order=flatten_batch(bi.order), + ) + + # Unstack heads + list_q = tf.unstack(q) # list[tf.Tensor(shape=[batch * length, depth])] + list_k = tf.unstack(k) + list_v = tf.unstack(v) + + list_gates_q = [] + list_gates_k = [] + + total_loss = 0.0 + # There might be a more optimized way to compute all heads at once + for single_q, single_k, _ in zip(list_q, list_k, list_v): + # Each head get its own dispatcher + lhs_gating = LshGating( + depth=single_q.get_shape().as_list()[-1], **experts_params) + + list_gates_q.append(lhs_gating.get_gates(single_q)) + list_gates_k.append(lhs_gating.get_gates(single_k)) + + gates_q = tf.stack(list_gates_q) + gates_k = tf.stack(list_gates_k) + + # Process each head separately. + v_out = map_fn_switch( + lambda args: dot_product_single_head(bi=bi, *args), + elems=(q, k, v, gates_q, gates_k), + dtype=(tf.float32), + parallel_iterations=2, + use_map_fn=use_map_fn, + ) + + # Restore original shape as expected by multihead_attention + if isinstance(batch_size, int) and batch_size == 1: + v_out = tf.expand_dims(v_out, axis=0) # Restore batch_size = 1 + else: + v_out = tf.reshape(v_out, [nb_heads, batch_size, -1, depth]) + v_out = tf.transpose(v_out, [1, 0, 2, 3]) + return v_out, total_loss / nb_heads + + +@expert_utils.add_name_scope() +def dot_product_batched_head(q, k, v, gates_q, gates_k, mask_right=False): + """Perform a dot product attention on a single sequence on a single head. + + This function dispatch the q, k, v and loop over the buckets to compute the + attention dot product on each subsequences. + + Args: + q (tf.Tensor): [batch*heads, length_q, depth_q] + k (tf.Tensor): [batch*heads, length_k, depth_q] + v (tf.Tensor): [batch*heads, length_k, depth_v] + gates_q (tf.Tensor): One-hot of shape [batch*heads, length_q, nb_buckets] + gates_k (tf.Tensor): One-hot of shape [batch*heads, length_k, nb_buckets] + mask_right (bool): Add a bias to prevent attention to the future + + Returns: + tf.Tensor: [length_q, depth_v] + """ + nb_buckets = common_layers.shape_list(gates_q)[-1] + + @expert_utils.add_name_scope() + def get_dispatcher(gates): + """Construct dispatcher for gates.""" + length = common_layers.shape_list(gates)[1] + # Count the number of ones per batch (and keep the max value) + nb_elems_to_dispatch = tf.reduce_sum(gates, axis=[1, 2]) + nb_elems_to_dispatch = tf.reduce_max(nb_elems_to_dispatch) + nb_elems_to_dispatch = tf.to_int32(nb_elems_to_dispatch) + capacity = nb_elems_to_dispatch // nb_buckets * 2 # Capacity is hardcoded + capacity = tf.minimum(length, capacity) + tf.summary.scalar("dispatch_capacity", capacity, family="lsh") + return expert_utils.TruncatingDispatcher(gates, capacity) + + def add_summary_capacity(x, prefix): + # Monitor if capacity overflow + x = x[0, ...] # Take first batch/head + x = tf.reduce_sum(x, axis=0) + tf.summary.scalar(prefix + "_min", tf.reduce_min(x), family="lsh") + tf.summary.scalar(prefix + "_max", tf.reduce_max(x), family="lsh") + tf.summary.histogram(prefix + "capacity_distribution", x, family="lsh") + for i in range(3): # Show the first 3 buckets + tf.summary.scalar("{}_{}".format(prefix, i), x[i], family="lsh") + + add_summary_capacity(gates_q, "q") + add_summary_capacity(gates_k, "k") + + q_dispatcher = get_dispatcher(gates_q) + k_dispatcher = get_dispatcher(gates_k) + + q = q_dispatcher.dispatch(q) + k = k_dispatcher.dispatch(k) + v = k_dispatcher.dispatch(v) + + # Bias of shape [batch*heads, nb_buckets, 1, capacity] broadcasted to every + # queries + bias = tf.expand_dims((k_dispatcher.nonpadding() - 1.0) * 1e9, 2) + if mask_right: + q_coordinate = tf.to_float( + tf.expand_dims(q_dispatcher.length_coordinate(), 3)) + k_coordinate = tf.to_float( + tf.expand_dims(k_dispatcher.length_coordinate(), 2)) + bias += tf.to_float(tf.greater(k_coordinate, q_coordinate)) * -1e9 + # The sequence padding is not masked but is ignored on the next layers + + # q, k, v now have shape [batch*heads, nb_bucket, capacity, depth] + # The buckets can be seen as different heads + v_out = dot_product_attention(q, k, v, bias=bias) + + # Combine all buckets together to restore the original length + return q_dispatcher.combine(v_out) + + +@expert_utils.add_name_scope() +def sparse_dot_product_attention_truncated( + q, + k, + v, + bi, # Unused + experts_params, + use_map_fn=False, # Unused + mask_right=False, +): # pylint: disable=unused-argument + """Sparse multihead self attention. + + Perform an approximation of the full multihead attention by dispatching + the tokens using their keys/values. Thus the attention matrix are only + computed each times on a subset of the tokens. + + Notes: + * The function don't perform scaling here (multihead_attention does + the /sqrt(depth)). + * The padding should have been removed (so batch size should be 1 but length + contains the elements from all different batches) + * Right now, only self attention is supported so length_q and length_kv + should be identical and the function will add triangular mask. + * If bi.order is not None, The bias is added inside this function to + prevent attention to the future. + + Args: + q (tf.Tensor): Queries of shape [batch, heads, length_q, depth_k] + k (tf.Tensor): Keys of shape [batch, heads, length_q, depth_k] + v (tf.Tensor): Values of shape [batch, heads, length_kv, depth_v] + bi (BatchInfo): Contains the batch coordinates and sequence order + experts_params (dict): Additional params for the local expert + use_map_fn (bool): Use either tf.map_fn of python for loop to compute the + heads separately + mask_right (bool): + Returns: + tf.Tensor: Approximation of Softmax(Q.K) * V, of shape + [batch, heads, length_q, depth_v] + """ + # Currently depth is the same for for q and v + batch_size, nb_heads, _, depth = common_layers.shape_list(q) + + total_loss = 0.0 + + # Each head get its own dispatcher + list_lsh = [LshGating(depth=depth, **experts_params) for _ in range(nb_heads)] + + @expert_utils.add_name_scope() + def get_gates_head(x, add_first=False): + """Return the gates for each heads of the current x. + + Args: + x (tf.Tensor): of shape [batch, heads, length, depth] + add_first (bool): if True, add the first element on each bucket + + Returns: + tf.Tensor: gates of shape [batch, heads, length, num_buckets] + """ + length = common_layers.shape_list(x)[2] + + # Invert heads/batch + x = tf.transpose(x, perm=[1, 0, 2, 3]) + x = tf.reshape(x, [nb_heads, batch_size * length, depth]) + + list_x = tf.unstack(x) # list[tf.Tensor(shape=[batch * length, depth])] + + # Unstack heads + list_gates = [] + # There might be a more optimized way to compute all heads at once + for lsh, single_x in zip(list_lsh, list_x): + # Each head get its own dispatcher + gates = lsh.get_gates(single_x) + nb_buckets = gates.get_shape().as_list()[-1] + # Reshape to [batch, length, depth] but should consider sequence + # padding in that case (also dispatch the padding) + gates = tf.reshape(gates, [batch_size, length, nb_buckets]) + list_gates.append(gates) + + gates = tf.stack(list_gates) + + # Restore original shape + gates = tf.reshape(gates, [nb_heads, batch_size, length, nb_buckets]) + gates = tf.transpose(gates, [1, 0, 2, 3]) + + # Dispatch the first element to every gates to avoid empty buckets + if add_first: + gates = tf.maximum(gates, + tf.reshape(tf.one_hot([0], length), [1, 1, length, 1])) + + return gates + + gates_q = get_gates_head(q) + gates_k = get_gates_head(k, add_first=True) + + # [batch, heads, length, depth] => [batch*heads, length, depth] + q, k, v, gates_q, gates_k = [ + combine_first_two_dimensions(t) for t in (q, k, v, gates_q, gates_k) + ] + + v_out = dot_product_batched_head(q, k, v, gates_q, gates_k, mask_right) + + # Restore original dimension + v_out = tf.reshape(v_out, [batch_size, nb_heads, -1, depth]) + + return v_out, total_loss / nb_heads + + +@expert_utils.add_var_scope() +def deconv_elems_1d(x, factor, out_depth=None): + """Increase the length and change the dimensionality. + + Expand/project each positions of dim depth of the input into + factor*tokens of dim out_depth + + Args: + x (tf.Tensor): shape [batch_size, length, depth] + factor (int): Multiplicative factor of each tokens. + out_depth (int): Output depth (if None, keep depth constant) + + Returns: + tf.Tensor: shape [batch_size, length*factor, out_depth] + """ + out_depth = out_depth or x.get_shape().as_list()[-1] + x = tf.expand_dims(x, 1) # [batch_size, 1, length, depth] + x = layers().Conv2DTranspose( + filters=out_depth, + kernel_size=(1, factor), + strides=(1, factor), + padding="valid", + data_format="channels_last", + )(x) # [batch_size, 1, length*factor, out_depth] + x = tf.squeeze(x, 1) # [batch_size, length*factor, depth] + return x + + +@expert_utils.add_var_scope() +def conv_elems_1d(x, factor, out_depth=None): + """Decrease the length and change the dimensionality. + + Merge/restore/compress factors positions of dim depth of the input into + a single position of dim out_depth. + This is basically just a strided convolution without overlap + between each strides. The original length has to be divided by factor. + + Args: + x (tf.Tensor): shape [batch_size, length, depth] + factor (int): Length compression factor. + out_depth (int): Output depth + + Returns: + tf.Tensor: shape [batch_size, length//factor, out_depth] + """ + out_depth = out_depth or x.get_shape().as_list()[-1] + # with tf.control_dependencies( # Dynamic assertion + # [tf.assert_equal(tf.shape(x)[1] % factor, 0)]): + x = tf.expand_dims(x, 1) # [batch_size, 1, length, depth] + x = layers().Conv2D( + filters=out_depth, + kernel_size=(1, factor), + strides=(1, factor), + padding="valid", + data_format="channels_last", + )(x) # [batch_size, 1, length//factor, out_depth] + x = tf.squeeze(x, 1) # [batch_size, length//factor, depth] + return x + + +@expert_utils.add_var_scope() +def local_reduction_attention(x, block_length, multihead_params): + """Reduce the length dimension using self attention. + + Args: + x (tf.Tensor): float32 of shape [batch, length, depth] + block_length (int): Block length for local attention (Compression factor) + multihead_params (dict): parameters for multihead attention + + Returns: + tf.Tensor: Compressed tensor of shape [batch, length // factor, depth] + """ + + @expert_utils.add_name_scope() + def dot_product_self_local_attention_flattened(q, k, v): + """Strided block local self-attention. + + No overlap between the blocks. + + Args: + q (tf.Tensor): shape [batch, heads, length, depth_k] + k (tf.Tensor): shape [batch, heads, length, depth_k] + v (tf.Tensor): shape [batch, heads, length, depth_v] + + Returns: + tf.Tensor: shape [batch, heads, length, depth_v] + """ + _, num_head, _, depth = q.get_shape().as_list() + + # Extract the blocks + def pad_and_reshape(x): + """Split the length dim into [num_block, block_length].""" + length_x = common_layers.shape_list(x)[2] + # Add some padding, but won't matter as the last block will never be + # attended by the query (after compression) + x = tf.pad(x, [[0, 0], [0, 0], [0, -length_x % block_length], [0, 0]]) + x = tf.reshape( + x, + [ + common_layers.shape_list(x)[0], # Batch + num_head, # Head + common_layers.shape_list(x)[2] // block_length, # Num blocks + block_length, # Block length + depth, # Depth + ]) + return x + + q, k, v = [pad_and_reshape(t) for t in (q, k, v)] + + # Perform attention on the flattened dot product + logits = tf.matmul(q, k, transpose_b=True) + logits = tf.reshape( + logits, + [ + common_layers.shape_list(logits)[0], # Batch + num_head, # Head + common_layers.shape_list(logits)[2], # Num blocks + block_length**2, # Flatten last dimension + ]) + weights = tf.nn.softmax(logits) + weights = tf.reshape( + weights, + [ + common_layers.shape_list(weights)[0], # Batch + num_head, # Head + common_layers.shape_list(weights)[2], # Num blocks + block_length, + block_length, # Restore the block length dimension + ]) + weights = tf.reduce_sum(weights, axis=3, keep_dims=True) # Compress block + v_out = tf.matmul(weights, v) # [1, block_length] @ [block_length, depth] + v_out = tf.squeeze(v_out, axis=3) + return v_out + + return multihead_attention( + x, + None, + bias=None, + output_depth=x.get_shape().as_list()[-1], + attention_type=dot_product_self_local_attention_flattened, + **multihead_params) + + +@expert_utils.add_var_scope() +def multihead_self_attention_reduced( + x, + memory_antecedent=None, + bias=None, + factor=None, + multihead_params=None, + nonlinearity="none", + reduction_type="conv", + add_mask=True, +): + """Reduce the length dimension by compressing with conv. + + Args: + x (tf.Tensor): float32 of shape [batch, length, depth] + memory_antecedent (tf.Tensor): Unsupported for now + bias (tf.Tensor): Ignored + factor (int): compression factor for the memory sequence + multihead_params (dict): parameters for multihead attention + nonlinearity (str): Add some non-linearity after the memory block + reduction_type (str): type of compression + add_mask (bool): If True, add the bias to prevent attention to the future + + Returns: + (tf.Tensor): float32 of shape [batch, length, depth] + + Raises: + ValueError: If reduction_type or nonlinearity is invalid + """ + if not factor or not multihead_params: + raise ValueError("factor and multihead_params should be set") + if memory_antecedent is not None: + raise NotImplementedError( + "multihead_self_attention_reduced only works with self-attention") + + depth = x.get_shape().as_list()[-1] + + # Could try to have some overlap between the blocks but that would + # create conv artifacts, would make it difficult to not attend to the future + # within one group and the padding should be handled specially. + + # Reduce the memory dimension + if reduction_type == "attention": + memory_x = local_reduction_attention(x, factor, multihead_params) + elif reduction_type == "conv": + # With valid padding, the last block won't be computed (not attended anyway) + memory_x = conv_elems_1d(x, factor) + else: + raise ValueError("Unknown reduction type {}".format(reduction_type)) + + if nonlinearity == "silu": + memory_x *= tf.nn.sigmoid(memory_x) + elif nonlinearity != "none": + raise ValueError("Unknown non linearity {}".format(nonlinearity)) + + memory_x = tf.concat( + # Add the first elem to make it attendable by everyone (otherwise the + # first block cannot attend to anything) + [x[:, :1, :], memory_x], + axis=1, + ) + + # Construct the bias + @expert_utils.add_name_scope() + def construct_bias_vectors(t, axis): + length = tf.to_float(common_layers.shape_list(t)[1]) + length_coordinates = tf.range(length, dtype=tf.float32) + length_coordinates = tf.expand_dims(length_coordinates, axis=axis) + # [1, length_k] or [length_q, 1] + return length_coordinates + + if add_mask: # Create mask to prevent attention to the future + bias = tf.to_float( + tf.greater( + # Because we add the first elem to the memory block and it can be + # attended by anyone,we don't need to add +1 anymore to prevent self + # attention Use * factor to make sure the last tokens of a block + # cannot attend the block + construct_bias_vectors(memory_x, 0) * factor, + # +epsilon to avoid float equality + construct_bias_vectors(x, 1) + 1e-3, + )) * -1e9 + bias = tf.expand_dims(bias, axis=0) + bias = tf.expand_dims(bias, axis=0) # [1, 1, length_k, length_q] + else: + bias = None + + return multihead_attention( + query_antecedent=x, + memory_antecedent=memory_x, + bias=bias, + output_depth=depth, + **multihead_params) + + +def scaled_dot_product_attention_simple(q, k, v, bias, name=None): + """Scaled dot-product attention. One head. One spatial dimension. + + Args: + q: a Tensor with shape [batch, length_q, depth_k] + k: a Tensor with shape [batch, length_kv, depth_k] + v: a Tensor with shape [batch, length_kv, depth_v] + bias: optional Tensor broadcastable to [batch, length_q, length_kv] + name: an optional string + + Returns: + A Tensor. + """ + with tf.variable_scope( + name, default_name="scaled_dot_product_attention_simple"): + scalar = tf.rsqrt(tf.to_float(common_layers.shape_list(q)[2])) + logits = tf.matmul(q * scalar, k, transpose_b=True) + if bias is not None: + logits += bias + weights = tf.nn.softmax(logits, name="attention_weights") + if common_layers.should_generate_summaries(): + tf.summary.image( + "attention", tf.expand_dims(tf.pow(weights, 0.2), 3), max_outputs=1) + return tf.matmul(weights, v) + + +_function_cache = {} + + +def multihead_self_attention_memory_efficient(x, + bias, + num_heads, + head_size=None, + epsilon=1e-6, + forget=True, + test_vars=None, + name=None): + """Multihead scaled-dot-product self-attention. + + Includes layer norm. + + Returns multihead-self-attention(layer_norm(x)) + + Computes one attention head at a time to avoid exhausting memory. + + If forget=True, then forget all forwards activations and recompute on + the backwards pass. + + Args: + x: a Tensor with shape [batch, length, input_size] + bias: an attention bias tensor broadcastable to [batch, 1, length, length] + num_heads: an integer + head_size: an optional integer - defaults to input_size/num_heads + epsilon: a float, for layer norm + forget: a boolean - forget forwards activations and recompute on backprop + test_vars: optional tuple of variables for testing purposes + name: an optional string + + Returns: + A Tensor. + """ + io_size = x.get_shape().as_list()[-1] + if head_size is None: + assert io_size % num_heads == 0 + head_size = io_size / num_heads + + def forward_internal(x, wqkv, wo, attention_bias, norm_scale, norm_bias): + """Forward function.""" + n = common_layers.layer_norm_compute(x, epsilon, norm_scale, norm_bias) + wqkv_split = tf.unstack(wqkv, num=num_heads) + wo_split = tf.unstack(wo, num=num_heads) + y = 0 + for h in range(num_heads): + with tf.control_dependencies([y] if h > 0 else []): + combined = tf.nn.conv1d(n, wqkv_split[h], 1, "SAME") + q, k, v = tf.split(combined, 3, axis=2) + o = scaled_dot_product_attention_simple(q, k, v, attention_bias) + y += tf.nn.conv1d(o, wo_split[h], 1, "SAME") + return y + + key = ( + "multihead_self_attention_memory_efficient %s %s" % (num_heads, epsilon)) + if not forget: + forward_fn = forward_internal + elif key in _function_cache: + forward_fn = _function_cache[key] + else: + + @function.Defun(compiled=True) + def grad_fn(x, wqkv, wo, attention_bias, norm_scale, norm_bias, dy): + """Custom gradient function.""" + with tf.control_dependencies([dy]): + n = common_layers.layer_norm_compute(x, epsilon, norm_scale, norm_bias) + wqkv_split = tf.unstack(wqkv, num=num_heads) + wo_split = tf.unstack(wo, num=num_heads) + deps = [] + dwqkvs = [] + dwos = [] + dn = 0 + for h in range(num_heads): + with tf.control_dependencies(deps): + combined = tf.nn.conv1d(n, wqkv_split[h], 1, "SAME") + q, k, v = tf.split(combined, 3, axis=2) + o = scaled_dot_product_attention_simple(q, k, v, attention_bias) + partial_y = tf.nn.conv1d(o, wo_split[h], 1, "SAME") + pdn, dwqkvh, dwoh = tf.gradients( + ys=[partial_y], + xs=[n, wqkv_split[h], wo_split[h]], + grad_ys=[dy]) + dn += pdn + dwqkvs.append(dwqkvh) + dwos.append(dwoh) + deps = [dn, dwqkvh, dwoh] + dwqkv = tf.stack(dwqkvs) + dwo = tf.stack(dwos) + with tf.control_dependencies(deps): + dx, dnorm_scale, dnorm_bias = tf.gradients( + ys=[n], xs=[x, norm_scale, norm_bias], grad_ys=[dn]) + return (dx, dwqkv, dwo, tf.zeros_like(attention_bias), dnorm_scale, + dnorm_bias) + + @function.Defun( + grad_func=grad_fn, compiled=True, separate_compiled_gradients=True) + def forward_fn(x, wqkv, wo, attention_bias, norm_scale, norm_bias): + return forward_internal(x, wqkv, wo, attention_bias, norm_scale, + norm_bias) + + _function_cache[key] = forward_fn + + if bias is not None: + bias = tf.squeeze(bias, 1) + with tf.variable_scope(name, default_name="multihead_attention", values=[x]): + # TODO(noam): it would be nice to save memory by casting x to float16 + # here, but this causes problems with the gradients. Figure out if there + # is a way to leave the gradients as float32. + if test_vars is not None: + wqkv, wo, norm_scale, norm_bias = list(test_vars) + else: + wqkv = tf.get_variable( + "wqkv", [num_heads, 1, io_size, 3 * head_size], + initializer=tf.random_normal_initializer(stddev=io_size**-0.5)) + wo = tf.get_variable( + "wo", [num_heads, 1, head_size, io_size], + initializer=tf.random_normal_initializer( + stddev=(head_size * num_heads)**-0.5)) + norm_scale, norm_bias = common_layers.layer_norm_vars(io_size) + y = forward_fn(x, wqkv, wo, bias, norm_scale, norm_bias) + y.set_shape(x.get_shape()) + return y + + +multihead_attention_sparse_dot_prod = functools.partial( + multihead_attention, attention_type=sparse_dot_product_attention) + +multihead_attention_sparse_truncated = functools.partial( + multihead_attention, attention_type=sparse_dot_product_attention_truncated) diff --git a/tensor2tensor/layers/common_attention_test.py b/tensor2tensor/layers/common_attention_test.py new file mode 100644 index 000000000..108cd754c --- /dev/null +++ b/tensor2tensor/layers/common_attention_test.py @@ -0,0 +1,1645 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for common attention.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +from absl.testing import parameterized +import kfac +import numpy as np +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import contrib +from tensor2tensor.utils import test_utils + +import tensorflow.compat.v1 as tf + + +tfe = contrib.tfe() +# from tensorflow.contrib.eager.python import tfe as tfe +tf.enable_eager_execution() + + +class CommonAttentionTest(parameterized.TestCase, tf.test.TestCase): + + @test_utils.run_in_graph_and_eager_modes() + def testAttentionBiasLocal(self): + length = 5 + bias = common_attention.attention_bias_local(length, 0, 0) + # For length = 5 + # [[[[-0.e+00 -1.e+09 -1.e+09 -1.e+09 -1.e+09] + # [-1.e+09 -0.e+00 -1.e+09 -1.e+09 -1.e+09] + # [-1.e+09 -1.e+09 -0.e+00 -1.e+09 -1.e+09] + # [-1.e+09 -1.e+09 -1.e+09 -0.e+00 -1.e+09] + # [-1.e+09 -1.e+09 -1.e+09 -1.e+09 -0.e+00]]]] + res = self.evaluate(bias) + expected_res = -1e9 * np.ones((length, length)) - -1e9 * np.identity(length) + expected_res = np.reshape(expected_res, (1, 1, length, length)) + self.assertAllClose(res, expected_res) + + @test_utils.run_in_graph_and_eager_modes() + def testAddPositionalEmbedding(self): + x = np.random.rand(5, 3, 12) + y = common_attention.add_positional_embedding( + tf.constant(x, dtype=tf.float32), + max_length=4, + name="pos_embedding") + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(y) + self.assertEqual(res.shape, (5, 3, 12)) + + @parameterized.named_parameters( + ("hard_top_k", 0.0), + ("sampled_top_k_default", 1.0), + ("sampled_top_k_2", 2.0), + ) + @test_utils.run_in_graph_and_eager_modes() + def testHardenAttentionWeights(self, gumbel_noise_weight): + x = np.random.rand(5, 3, 12) + y = common_attention.harden_attention_weights( + tf.nn.softmax(tf.constant(x, dtype=tf.float32)), 3, gumbel_noise_weight) + res = self.evaluate(y) + self.assertEqual(res.shape, (5, 3, 12)) + + @parameterized.named_parameters( + ("hard_top_k", -0.5), + ("sampled_top_k", 0.5), + ) + @test_utils.run_in_graph_and_eager_modes() + def testHardenAttentionAllZeros(self, gumbel_noise_weight): + """Check if the hardening code does not divide by zero for all zeros.""" + x = np.zeros((5, 3, 12), dtype=np.float32) + y = common_attention.harden_attention_weights( + tf.constant(x, dtype=tf.float32), 3, gumbel_noise_weight) + res = self.evaluate(y) + if gumbel_noise_weight <= 0.0: + self.assertAllClose(res, x) + + @parameterized.parameters( + {"input_shape": (5, 3, 12)}, + {"input_shape": (5, 5, 5, 12)}, + {"input_shape": (5, 3, 3, 3, 12)}, + ) + @test_utils.run_in_graph_and_eager_modes() + def testAddPositionalEmbeddingNd(self, input_shape): + x = np.random.rand(*input_shape) + y = common_attention.add_positional_embedding_nd( + tf.constant(x, dtype=tf.float32), + max_length=5, + name="pos_embedding") + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(y) + self.assertEqual(res.shape, input_shape) + + @test_utils.run_in_graph_and_eager_modes() + def testAddTimingSignalsGivenPositions(self): + x_positions = tf.expand_dims( + tf.constant([0, 1, 2, 3], dtype=tf.float32), axis=0) + y_positions = tf.expand_dims( + tf.constant([4, 5, 6, 7], dtype=tf.float32), axis=0) + x = tf.zeros([1, 4, 8], dtype=tf.float32) + self.assertAllClose( + common_attention.add_timing_signals_given_positions( + x, [x_positions, y_positions]), + tf.constant([[ + [ + math.sin(0), + math.sin(0 * 1e-4), + math.cos(0), + math.cos(0 * 1e-4), + math.sin(4), + math.sin(4 * 1e-4), + math.cos(4), + math.cos(4 * 1e-4) + ], + [ + math.sin(1), + math.sin(1 * 1e-4), + math.cos(1), + math.cos(1 * 1e-4), + math.sin(5), + math.sin(5 * 1e-4), + math.cos(5), + math.cos(5 * 1e-4) + ], + [ + math.sin(2), + math.sin(2 * 1e-4), + math.cos(2), + math.cos(2 * 1e-4), + math.sin(6), + math.sin(6 * 1e-4), + math.cos(6), + math.cos(6 * 1e-4) + ], + [ + math.sin(3), + math.sin(3 * 1e-4), + math.cos(3), + math.cos(3 * 1e-4), + math.sin(7), + math.sin(7 * 1e-4), + math.cos(7), + math.cos(7 * 1e-4) + ], + ]])) + + @test_utils.run_in_graph_and_eager_modes() + def testAddTimingSignalsGivenPositionsEquivalent(self): + x = tf.zeros([1, 10, 128], dtype=tf.float32) + positions = tf.expand_dims(tf.range(0, 10, dtype=tf.float32), axis=0) + # The method add_timing_signal_1d_given_position could be replaced by + # add_timing_signals_given_positions: + tf.assert_equal( + common_attention.add_timing_signal_1d_given_position(x, positions), + common_attention.add_timing_signals_given_positions(x, [positions])) + + @test_utils.run_in_graph_and_eager_modes() + def testDotProductAttention(self): + x = np.random.rand(5, 7, 12, 32) + y = np.random.rand(5, 7, 12, 32) + a = common_attention.dot_product_attention( + tf.constant(x, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), None) + res = self.evaluate(a) + self.assertEqual(res.shape, (5, 7, 12, 32)) + + @parameterized.parameters( + ([3, 10, 64], 4), + ([3, 10, 20, 64], 2), + ([3, 10, 20, 30, 64], 4), + ) + def testSplitHeadsND(self, shape, num_heads): + t = tf.zeros(shape) + h = common_attention.split_heads_nd(t, num_heads) + res = self.evaluate(h) + self.assertEqual( + res.shape, + tuple(shape[:1] + [num_heads] + shape[1:-1] + [shape[-1] // num_heads])) + + @parameterized.parameters( + ([3, 4, 10, 64],), + ([3, 2, 10, 20, 64],), + ([3, 4, 10, 20, 30, 64],), + ) + def testCombineHeadsND(self, shape): + t = tf.zeros(shape) + h = common_attention.combine_heads_nd(t) + res = self.evaluate(h) + self.assertEqual(res.shape, + tuple(shape[:1] + shape[2:-1] + [shape[-1] * shape[1]])) + + @parameterized.parameters( + ([3, 4, 10, 64], (5,), (10,)), + ([3, 4, 10, 10, 64], (5, 5), (5, 5)), + ([3, 4, 10, 10, 10, 64], (5, 5, 5), (5, 5, 5)), + ) + def testShapeMaskedLocalAttentionND(self, shape, query_shape, memory_flange): + q = k = v = tf.reshape(tf.range(np.prod(shape), dtype=tf.float32), shape) + val = common_attention.masked_local_attention_nd(q, k, v, query_shape, + memory_flange) + res = self.evaluate(val) + self.assertEqual(res.shape, tuple(shape)) + + @test_utils.run_in_graph_and_eager_modes() + def testRightShiftBlockwiseND(self): + tensor = tf.convert_to_tensor(np.array([[ + [[1], [2], [3], [4]], + [[5], [6], [7], [8]], + [[9], [10], [11], [12]], + [[13], [14], [15], [16]], + ]], dtype=np.float32)) + val = common_attention.right_shift_blockwise_nd(tensor, (2, 2)) + res = self.evaluate(val) + expected_val = np.array([[ + [[0], [1], [6], [3]], + [[2], [5], [4], [7]], + [[8], [9], [14], [11]], + [[10], [13], [12], [15]], + ]], dtype=np.float32) + self.assertAllClose(expected_val, res) + + @test_utils.run_in_graph_and_eager_modes() + def testContentMaskedLocalAttentionND(self): + def softmax(arr): + return np.exp(arr) / np.sum(np.exp(arr)) + + q = k = v = tf.convert_to_tensor( + np.array([[[ + [[0.1], [0.1], [0.1], [0.1]], + [[0.1], [1.0], [1.0], [0.1]], + [[0.1], [1.0], [1.0], [0.1]], + [[0.1], [0.1], [0.1], [0.1]], + ]]], dtype=np.float32)) + attn_weights = np.array([[[[softmax([-1e9, -1e9, -1e9, -1e9, 0.01]), + softmax([-1e9, -1e9, -1e9, 0.01, 0.01]), + softmax([-1e9, -1e9, -1e9, 0.01, 0.01]), + softmax([-1e9, -1e9, -1e9, 0.01, 0.01]) + ], + [softmax([-1e9, 0.01, 0.01, -1e9, 0.01]), + softmax([0.1, 0.1, 0.1, 0.1, 1.0]), + softmax([0.1, 0.1, 0.1, 1.0, 1.0]), + softmax([0.01, 0.01, -1e9, 0.1, 0.01]) + ], + [softmax([-1e9, 0.01, 0.1, -1e9, 0.01]), + softmax([0.1, 1.0, 1.0, 0.1, 1.0]), + softmax([1.0, 1.0, 0.1, 1.0, 1.0]), + softmax([0.1, 0.01, -1e9, 0.1, 0.01]) + ], + [softmax([-1e9, 0.01, 0.1, -1e9, 0.01]), + softmax([0.01, 0.1, 0.1, 0.01, 0.01]), + softmax([0.1, 0.1, 0.01, 0.01, 0.01]), + softmax([0.1, 0.01, -1e9, 0.01, 0.01]) + ]]]]) + blocked_v = np.array([[[[[0, 0, 0, 0, 0.1], + [0, 0, 0, 0.1, 0.1], + [0, 0, 0, 0.1, 0.1], + [0, 0, 0, 0.1, 0.1]], + [[0, 0.1, 0.1, 0, 0.1], + [0.1, 0.1, 0.1, 0.1, 1], + [0.1, 0.1, 0.1, 1, 1], + [0.1, 0.1, 0, 1, 0.1]], + [[0, 0.1, 1, 0, 0.1], + [0.1, 1, 1, 0.1, 1], + [1, 1, 0.1, 1, 1], + [1, 0.1, 0, 1, 0.1]], + [[0, 0.1, 1, 0, 0.1], + [0.1, 1, 1, 0.1, 0.1], + [1, 1, 0.1, 0.1, 0.1], + [1, 0.1, 0, 0.1, 0.1]]]]]) + expected_val = np.expand_dims( + np.sum(attn_weights * blocked_v, axis=4), axis=-1) + val = common_attention.masked_local_attention_nd(q, k, v, (1, 1), (1, 1)) + res = self.evaluate(val) + self.assertAllClose(expected_val, res) + + @test_utils.run_in_graph_and_eager_modes() + def testSelectBlockForDecodeStep(self): + tensor = tf.reshape( + tf.range(2 * 6 * 6 * 4, dtype=tf.float32), [2, 6, 6, 4, 1]) + block = common_attention.select_block_for_decode_step(tensor, 20, (2, 2)) + expected_tensor = tensor[:, 0:1, 5:6, :, :] + expected_value = self.evaluate(expected_tensor) + res = self.evaluate(block) + self.assertAllClose(expected_value, res) + + @parameterized.parameters( + ((2, 6, 4, 10),), + ((2, 6, 6, 4, 10),), + ((2, 6, 6, 6, 4, 10),), + ) + def testFlattenBlocksND(self, shape): + tensor = tf.zeros(shape, dtype=tf.float32) + value, _ = common_attention.flatten_blocks_nd(tensor) + res = self.evaluate(value) + self.assertAllClose(res.shape, + (shape[0], np.prod(shape[1:-2]), shape[-2], shape[-1])) + + @parameterized.parameters( + ((5,),), + ((5, 10),), + ((5, 10, 15),), + ) + def testUnflattenBlocksND(self, blocks_per_dim): + tensor = tf.zeros([2, np.prod(blocks_per_dim), 6, 10]) + value = common_attention.unflatten_blocks_nd(tensor, blocks_per_dim) + res = self.evaluate(value) + self.assertAllClose(res.shape, (2,) + blocks_per_dim + (6, 10)) + + @test_utils.run_in_graph_and_eager_modes() + def testBreakIntoMemoryBlocksND(self): + tensor = tf.convert_to_tensor( + np.array([[ + [[1], [2], [3], [4]], + [[5], [6], [7], [8]], + [[9], [10], [11], [12]], + [[13], [14], [15], [16]], + ]])) + value = common_attention.break_into_memory_blocks_nd(tensor, + (2, 2), + (2, 2), + masked=True) + res = self.evaluate(value) + expected_value = np.array([[ + [ + [ + [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], + [0], [0], [0], [0], [1], [2], [5], [6], [3], [4], [7], [8] + ], + [ + [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], [0], + [1], [2], [5], [6], [3], [4], [7], [8], [0], [0], [0], [0] + ] + ], + [ + [ + [0], [0], [0], [0], [1], [2], [5], [6], [3], [4], [7], [8], [0], + [0], [0], [0], [9], [10], [13], [14], [11], [12], [15], [16] + ], + [ + [1], [2], [5], [6], [3], [4], [7], [8], [0], [0], [0], [0], [9], + [10], [13], [14], [11], [12], [15], [16], [0], [0], [0], [0] + ] + ]]]) + self.assertAllClose(expected_value, res) + + @test_utils.run_in_graph_and_eager_modes() + def testBreakIntoBlocksND(self): + tensor = tf.convert_to_tensor( + np.array([[ + [[1], [2], [3], [4]], + [[5], [6], [7], [8]], + [[9], [10], [11], [12]], + [[13], [14], [15], [16]], + ]])) + value = common_attention.break_into_blocks_nd(tensor, (2, 2)) + res = self.evaluate(value) + expected_value = np.array([[ + [[[1], [2], [5], [6]], [[3], [4], [7], [8]]], + [[[9], [10], [13], [14]], [[11], [12], [15], [16]]] + ]]) + self.assertAllClose(expected_value, res) + + @test_utils.run_in_graph_and_eager_modes() + def testPutBackBlocksND(self): + tensor = tf.convert_to_tensor( + np.array([[ + [[[1], [2], [5], [6]], [[3], [4], [7], [8]]], + [[[9], [10], [13], [14]], [[11], [12], [15], [16]]] + ]])) + value = common_attention.put_back_blocks_nd(tensor, (2, 2)) + res = self.evaluate(value) + expected_value = np.array([[ + [[1], [2], [3], [4]], + [[5], [6], [7], [8]], + [[9], [10], [11], [12]], + [[13], [14], [15], [16]], + ]]) + self.assertAllClose(expected_value, res) + + @parameterized.parameters( + ((2, 100, 5), (7,), (2, 105, 5)), + ((2, 100, 100, 5), (5, 7), (2, 100, 105, 5)), + ((2, 100, 100, 100, 5), (10, 20, 30), (2, 100, 100, 120, 5)) + ) + def testPadToMultipleND(self, tensor_shape, block_shape, expected_shape): + tensor = tf.zeros(tensor_shape) + value = common_attention.pad_to_multiple_nd(tensor, block_shape) + res = self.evaluate(value) + self.assertAllClose(res.shape, expected_shape) + + @test_utils.run_in_graph_and_eager_modes() + def testCausalAttentionBiasND(self): + bias = common_attention.causal_attention_bias_nd((2, 2), (2, 2)) + res = self.evaluate(bias) + expected_val = np.array([[[ + [0] * 17 + [-1e9] * 7, + [0] * 18 + [-1e9] * 6, + [0] * 19 + [-1e9] * 5, + [0] * 20 + [-1e9] * 4, + ]]]) + self.assertAllClose(expected_val, res) + + @parameterized.parameters( + ((1, 64, 10), (80,), (80,)), + ((1, 64, 64, 10), (8, 8), (16, 16)), + ((1, 5, 64, 64, 10), (1, 8, 8), (1, 8, 8)) + ) + def testMultiheadAttentionND(self, tensor_shape, query_shape, memory_flange): + query_antecedent = tf.zeros(tensor_shape) + value = common_attention.multihead_attention_nd( + query_antecedent=query_antecedent, + memory_antecedent=None, + total_key_depth=256, + total_value_depth=256, + output_depth=256, + num_heads=4, + query_shape=query_shape, + memory_flange=memory_flange, + masked=True) + res = self.evaluate(value) + self.assertAllClose(res.shape, tensor_shape[:-1] + (256,)) + + @parameterized.parameters( + (15, (5,), (100,), (15,)), + (10, (2, 2), (4, 4), (3, 0)), + (25, (2, 2, 3), (10, 10, 12), (0, 0, 7)) + ) + def testDecodeStepToIndex(self, decode_step, query_shape, tensor_shape, + expected_index): + res = common_attention.decode_step_to_index(decode_step, query_shape, + tensor_shape) + self.assertAllClose(res, expected_index) + + @test_utils.run_in_graph_and_eager_modes() + def testGetItemAtDecodeStep(self): + tensor = tf.reshape(tf.range(25 * 25 * 4), [1, 4, 25, 25, 1]) + value = common_attention.get_item_at_decode_step(tensor, 100, (2, 5, 5)) + res = self.evaluate(value) + expected_value = np.array([[[[[10]]]]]) + self.assertAllClose(expected_value, res) + + @test_utils.run_in_graph_and_eager_modes() + def testPutItemAtDecodeStep(self): + tensor = tf.zeros([1, 1, 10, 10, 1]) + item = tf.ones([1, 1, 1, 1, 1]) + value = common_attention.put_item_in_decode_step(tensor, item, 32, (2, 2)) + res = self.evaluate(value) + expected_val = np.zeros([1, 1, 10, 10, 1]) + expected_val[0, 0, 2, 6, 0] = 1 + self.assertAllClose(expected_val, res) + + @parameterized.named_parameters( + ("", 1, 1, 8, 4, 1, 2), + ("dynamic_batch", None, 1, 8, 4, 1, 2), + ("batches", 4, 3, 8, 4, 1, 2), + ("depth_v", 1, 1, 8, 4, 3, 2), + ("block_length", 1, 1, 8, 4, 1, 4), + ) + def testMaskedWithinBlockLocalAttention1D(self, batch, heads, length, + depth_k, depth_v, block_length): + if batch is None: + batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32) + q = tf.random_normal([batch, heads, length, depth_k]) + k = tf.random_normal([batch, heads, length, depth_k]) + v = tf.random_normal([batch, heads, length, depth_v]) + output = common_attention.masked_within_block_local_attention_1d( + q, k, v, block_length=block_length) + if isinstance(batch, tf.Tensor): + batch, res = self.evaluate([batch, output]) + else: + res = self.evaluate(output) + + self.assertEqual(res.shape, (batch, heads, length, depth_v)) + + @parameterized.named_parameters( + ("", 1, 1, 8, 4, 1, 2), + ("dynamic_batch", None, 1, 8, 4, 1, 2), + ("batches", 4, 3, 8, 4, 1, 2), + ("depth_v", 1, 1, 8, 4, 3, 2), + ("block_length", 1, 1, 8, 4, 1, 4), + ) + def testMaskedLocalAttention1D(self, batch, heads, length, depth_k, depth_v, + block_length): + if batch is None: + batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32) + q = tf.random_normal([batch, heads, length, depth_k]) + k = tf.random_normal([batch, heads, length, depth_k]) + v = tf.random_normal([batch, heads, length, depth_v]) + output = common_attention.masked_local_attention_1d( + q, k, v, block_length=block_length) + if isinstance(batch, tf.Tensor): + batch, res = self.evaluate([batch, output]) + else: + res = self.evaluate(output) + + self.assertEqual(res.shape, (batch, heads, length, depth_v)) + + @parameterized.named_parameters( + ("", 1, 1, 8, 4, 4, (2, 2)), + ("dynamic_batch", None, 1, 8, 4, 4, (2, 2)), + ("batches", 3, 2, 8, 4, 4, (2, 2)), + # TODO(trandustin): Extend function to enable depth_k != depth_v. + # ("depth_v", 1, 1, 8, 4, 1, (2, 2)), + ("query_shape", 1, 1, 8, 4, 4, (4, 4)), + ) + def testMaskedLocalAttention2D(self, batch, heads, length, depth_k, depth_v, + query_shape): + if batch is None: + batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32) + q = tf.random_normal([batch, heads, length, length, depth_k]) + k = tf.random_normal([batch, heads, length, length, depth_k]) + v = tf.random_normal([batch, heads, length, length, depth_v]) + output = common_attention.masked_local_attention_2d( + q, + k, + v, + query_shape=query_shape, + memory_flange=(2, 2)) + if isinstance(batch, tf.Tensor): + batch, res = self.evaluate([batch, output]) + else: + res = self.evaluate(output) + + self.assertEqual(res.shape, (batch, heads, length, length, depth_v)) + + @parameterized.named_parameters( + ("matching_block_length", 3, 4, 25, 16, 16, 5), + ("unmatching_block_length", 3, 4, 25, 16, 16, 4), + ("dynamic_batch", None, 4, 25, 16, 16, 5), + ("different_depth_v", 3, 4, 25, 16, 17, 5), + ) + def testLocalUnmaskedAttention1D(self, batch, heads, length, + depth_k, depth_v, block_length): + if batch is None: + batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32) + q = tf.random_normal([batch, heads, length, depth_k]) + k = tf.random_normal([batch, heads, length, depth_k]) + v = tf.random_normal([batch, heads, length, depth_v]) + output = common_attention.local_attention_1d( + q, k, v, block_length=block_length, filter_width=3) + if isinstance(batch, tf.Tensor): + batch, res = self.evaluate([batch, output]) + else: + res = self.evaluate(output) + + self.assertEqual(res.shape, (batch, heads, length, depth_v)) + + @parameterized.named_parameters( + ("matching_block_length", 3, 4, 25, 16, 16, (4, 4)), + ("unmatching_block_length", 3, 4, 25, 16, 16, (5, 5)), + ("dynamic_batch", None, 4, 25, 16, 16, (4, 4)), + # TODO(trandustin): Extend function to enable depth_k != depth_v. + # ("different_depth_v", 3, 4, 25, 16, 17, (4, 4)), + ) + def testLocalUnmaskedAttention2D(self, batch, heads, length, + depth_k, depth_v, query_shape): + if batch is None: + batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32) + q = tf.random_normal([batch, heads, length, length, depth_k]) + k = tf.random_normal([batch, heads, length, length, depth_k]) + v = tf.random_normal([batch, heads, length, length, depth_v]) + output = common_attention.local_attention_2d( + q, + k, + v, + query_shape=query_shape, + memory_flange=(3, 3)) + if isinstance(batch, tf.Tensor): + batch, res = self.evaluate([batch, output]) + else: + res = self.evaluate(output) + + self.assertEqual(res.shape, (batch, heads, length, length, depth_v)) + + @test_utils.run_in_graph_mode_only() + def testMultiheadSelfAttentionMemoryEfficient(self): + num_heads = 4 + io_size = 16 + batch = 2 + length = 7 + head_size = 5 + x = np.random.rand(batch, length, io_size) + dy = np.random.rand(batch, length, io_size) + with self.test_session() as session: + x = tf.to_float(x) + dy = tf.to_float(dy) + bias = common_attention.attention_bias_lower_triangle(length) + wqkv = tf.get_variable( + "wqkv", [num_heads, 1, io_size, 3 * head_size], + initializer=tf.random_normal_initializer(stddev=io_size**-0.5)) + wo = tf.get_variable( + "wo", [num_heads, 1, head_size, io_size], + initializer=tf.random_normal_initializer( + stddev=(head_size * num_heads)**-0.5)) + norm_scale, norm_bias = common_layers.layer_norm_vars(io_size) + y = common_attention.multihead_self_attention_memory_efficient( + x, bias, num_heads, head_size=head_size, forget=False, + test_vars=(wqkv, wo, norm_scale, norm_bias)) + y_forget = common_attention.multihead_self_attention_memory_efficient( + x, bias, num_heads, head_size=head_size, forget=True, + test_vars=(wqkv, wo, norm_scale, norm_bias)) + dx, dwqkv, dwo, dnorm_scale, dnorm_bias = tf.gradients( + ys=[y], xs=[x, wqkv, wo, norm_scale, norm_bias], grad_ys=[dy]) + dx_f, dwqkv_f, dwo_f, dnorm_scale_f, dnorm_bias_f = tf.gradients( + ys=[y_forget], xs=[x, wqkv, wo, norm_scale, norm_bias], grad_ys=[dy]) + session.run(tf.global_variables_initializer()) + (y, y_forget, + dx, dwqkv, dwo, dnorm_scale, dnorm_bias, + dx_f, dwqkv_f, dwo_f, dnorm_scale_f, dnorm_bias_f) = session.run( + [y, y_forget, + dx, dwqkv, dwo, dnorm_scale, dnorm_bias, + dx_f, dwqkv_f, dwo_f, dnorm_scale_f, dnorm_bias_f]) + self.assertAllClose(y, y_forget) + self.assertAllClose(dwo, dwo_f) + self.assertAllClose(dwqkv, dwqkv_f) + self.assertAllClose(dnorm_scale, dnorm_scale_f) + self.assertAllClose(dnorm_bias, dnorm_bias_f) + self.assertAllClose(dx, dx_f) + + @test_utils.run_in_graph_and_eager_modes() + def test2dGatherAndScatterInvertibility(self): + """2d gather and scatter invertibility test.""" + batch_size = 2 + num_heads = 2 + height = 4 + width = 6 + depth = 8 + query_shape = (2, 3) + x = np.random.rand(batch_size, num_heads, height, width, depth) + x_indices = common_attention.gather_indices_2d( + x, query_shape, query_shape) + gathered_x = common_attention.gather_blocks_2d(x, x_indices) + x_shape = tf.constant([batch_size, num_heads, height, width, depth]) + scattered_x = common_attention.scatter_blocks_2d( + gathered_x, x_indices, x_shape) + res = self.evaluate(scattered_x) + self.assertAllClose(x, res) + + @test_utils.run_in_graph_and_eager_modes() + def test2dBlockRasterScanMask(self): + """Testing the 2d block raster scan mask.""" + query_shape = (2, 3) + memory_flange = (2, 1) + mask = common_attention.make_2d_block_raster_mask( + query_shape, memory_flange) + res = self.evaluate(mask) + correct_mask = np.array( + [[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0, + 1.0, 0.0, 1.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, + 1.0, 0.0, 1.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 1.0, 0.0, 1.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 1.0, 0.0, 0.0, 1.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 1.0, 0.0, 0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 1.0, 0.0, 0.0, 0.0, 0.0, 1.0]]) + self.assertAllClose(correct_mask, res) + + @test_utils.run_in_graph_and_eager_modes() + def test2dGather(self): + """Testing 2d index gather and block gather functions.""" + batch_size = 2 + num_heads = 2 + height = 4 + width = 6 + depth = 8 + query_shape = (2, 3) + x = np.random.rand(batch_size, num_heads, height, width, depth) + y = np.reshape(x, (batch_size, num_heads, -1, depth)) + correct_indices = [[0, 1, 2, 6, 7, 8], + [3, 4, 5, 9, 10, 11], + [12, 13, 14, 18, 19, 20], + [15, 16, 17, 21, 22, 23]] + correct_gathered_x = [[[y[0, 0, correct_indices[0]], + y[0, 0, correct_indices[1]], + y[0, 0, correct_indices[2]], + y[0, 0, correct_indices[3]]], + [y[0, 1, correct_indices[0]], + y[0, 1, correct_indices[1]], + y[0, 1, correct_indices[2]], + y[0, 1, correct_indices[3]]]], + [[y[1, 0, correct_indices[0]], + y[1, 0, correct_indices[1]], + y[1, 0, correct_indices[2]], + y[1, 0, correct_indices[3]]], + [y[1, 1, correct_indices[0]], + y[1, 1, correct_indices[1]], + y[1, 1, correct_indices[2]], + y[1, 1, correct_indices[3]]]]] + + x_indices = common_attention.gather_indices_2d( + x, query_shape, query_shape) + gathered_x = common_attention.gather_blocks_2d(x, x_indices) + x_indices, gathered_x = self.evaluate([x_indices, gathered_x]) + self.assertAllEqual(correct_indices, x_indices) + self.assertAllClose(correct_gathered_x, gathered_x) + + @test_utils.run_in_graph_and_eager_modes() + def testGetMemoryRegion(self): + """Testing the function that gathers the flanged memory region.""" + np.set_printoptions(threshold=np.inf) + batch_size = 2 + num_heads = 2 + height = 4 + width = 6 + depth = 3 + query_shape = (2, 3) + memory_flange = (1, 1) + + x = np.random.rand(batch_size, num_heads, height, width, depth) + y = np.reshape(x, (batch_size, num_heads, -1, depth)) + zeros = np.zeros((depth), dtype=np.float32) + five_zeros = np.array([zeros]*5) + seven_zeros = np.array([zeros]*7) + two_zeros = np.array([zeros]*2) + zeros = np.array([zeros]) + + correct_x_flange = [[[seven_zeros, + np.concatenate((five_zeros, y[0, 0, [2, 8]]), + axis=0), + np.concatenate((zeros, y[0, 0, [6, 7, 8, 9]], + two_zeros), axis=0), + np.concatenate((y[0, 0, [8, 9, 10, 11]], zeros, + y[0, 0, [14, 20]]), axis=0)], + [seven_zeros, + np.concatenate((five_zeros, y[0, 1, [2, 8]]), + axis=0), + np.concatenate((zeros, y[0, 1, [6, 7, 8, 9]], + two_zeros), axis=0), + np.concatenate((y[0, 1, [8, 9, 10, 11]], zeros, + y[0, 1, [14, 20]]), axis=0)]], + [[seven_zeros, + np.concatenate((five_zeros, y[1, 0, [2, 8]]), + axis=0), + np.concatenate((zeros, y[1, 0, [6, 7, 8, 9]], + two_zeros), axis=0), + np.concatenate((y[1, 0, [8, 9, 10, 11]], zeros, + y[1, 0, [14, 20]]), axis=0)], + [seven_zeros, + np.concatenate((five_zeros, y[1, 1, [2, 8]]), + axis=0), + np.concatenate((zeros, y[1, 1, [6, 7, 8, 9]], + two_zeros), axis=0), + np.concatenate((y[1, 1, [8, 9, 10, 11]], zeros, + y[1, 1, [14, 20]]), axis=0)]]] + correct_x_flange = np.array(correct_x_flange) + correct_x_center = [[[y[0, 0, [0, 1, 2, 6, 7, 8]], + y[0, 0, [3, 4, 5, 9, 10, 11]], + y[0, 0, [12, 13, 14, 18, 19, 20]], + y[0, 0, [15, 16, 17, 21, 22, 23]]], + [y[0, 1, [0, 1, 2, 6, 7, 8]], + y[0, 1, [3, 4, 5, 9, 10, 11]], + y[0, 1, [12, 13, 14, 18, 19, 20]], + y[0, 1, [15, 16, 17, 21, 22, 23]]]], + [[y[1, 0, [0, 1, 2, 6, 7, 8]], + y[1, 0, [3, 4, 5, 9, 10, 11]], + y[1, 0, [12, 13, 14, 18, 19, 20]], + y[1, 0, [15, 16, 17, 21, 22, 23]]], + [y[1, 1, [0, 1, 2, 6, 7, 8]], + y[1, 1, [3, 4, 5, 9, 10, 11]], + y[1, 1, [12, 13, 14, 18, 19, 20]], + y[1, 1, [15, 16, 17, 21, 22, 23]]]]] + correct_x_center = np.array(correct_x_center) + x_indices = common_attention.gather_indices_2d( + x, query_shape, query_shape) + x_flange, x_center = common_attention.get_memory_region( + tf.constant(x, dtype=tf.float32), + query_shape, + memory_flange, + x_indices) + [x_flange, x_center] = self.evaluate([x_flange, x_center]) + self.assertAllClose(correct_x_flange, x_flange) + self.assertAllClose(correct_x_center, x_center) + + @test_utils.run_in_graph_and_eager_modes() + def testGetShiftedCenterBlocks(self): + """Testing the function that gathers the flanged memory region.""" + np.set_printoptions(threshold=np.inf) + batch_size = 2 + num_heads = 2 + height = 4 + width = 6 + depth = 3 + query_shape = (2, 3) + + x = np.random.rand(batch_size, num_heads, height, width, depth) + y = np.reshape(x, (batch_size, num_heads, -1, depth)) + zeros = np.zeros((depth), dtype=np.float32) + zeros = np.array([zeros]) + + correct_gathered_x = [[[np.concatenate((zeros, y[0, 0, [0, 1, 2, 6, 7]]), + axis=0), + np.concatenate((zeros, y[0, 0, [3, 4, 5, 9, 10]]), + axis=0), + np.concatenate((zeros, + y[0, 0, [12, 13, 14, 18, 19]]), + axis=0), + np.concatenate((zeros, + y[0, 0, [15, 16, 17, 21, 22]]), + axis=0)], + [np.concatenate((zeros, y[0, 1, [0, 1, 2, 6, 7]]), + axis=0), + np.concatenate((zeros, y[0, 1, [3, 4, 5, 9, 10]]), + axis=0), + np.concatenate((zeros, + y[0, 1, [12, 13, 14, 18, 19]]), + axis=0), + np.concatenate((zeros, + y[0, 1, [15, 16, 17, 21, 22]]), + axis=0)]], + [[np.concatenate((zeros, y[1, 0, [0, 1, 2, 6, 7]]), + axis=0), + np.concatenate((zeros, y[1, 0, [3, 4, 5, 9, 10]]), + axis=0), + np.concatenate((zeros, + y[1, 0, [12, 13, 14, 18, 19]]), + axis=0), + np.concatenate((zeros, + y[1, 0, [15, 16, 17, 21, 22]]), + axis=0)], + [np.concatenate((zeros, y[1, 1, [0, 1, 2, 6, 7]]), + axis=0), + np.concatenate((zeros, y[1, 1, [3, 4, 5, 9, 10]]), + axis=0), + np.concatenate((zeros, + y[1, 1, [12, 13, 14, 18, 19]]), + axis=0), + np.concatenate((zeros, + y[1, 1, [15, 16, 17, 21, 22]]), + axis=0)]]] + correct_gathered_x = np.array(correct_gathered_x) + x_indices = common_attention.gather_indices_2d( + x, query_shape, query_shape) + gathered_x = common_attention.get_shifted_center_blocks( + tf.constant(x, dtype=tf.float32), + x_indices) + x_indices, gathered_x = self.evaluate([x_indices, gathered_x]) + self.assertAllClose(correct_gathered_x, gathered_x) + + @test_utils.run_in_graph_and_eager_modes() + def testDotProductAttentionRelative(self): + x = np.random.rand(5, 7, 12, 32) + y = np.random.rand(5, 7, 12, 32) + a = common_attention.dot_product_attention_relative( + tf.constant(x, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + None, + max_relative_position=3) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(a) + self.assertEqual(res.shape, (5, 7, 12, 32)) + + @test_utils.run_in_graph_and_eager_modes() + def testRelativeAttentionV2(self): + # (batch, heads, length, depth) + x = np.random.rand(5, 4, 16, 7) + y = np.random.rand(5, 4, 16, 7) + max_relative_position = 3 + a = common_attention.dot_product_self_attention_relative_v2( + tf.constant(x, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + None, + max_relative_position=max_relative_position, + heads_share_relative_embedding=False) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(a) + self.assertEqual(res.shape, (5, 4, 16, 7)) + + @test_utils.run_in_graph_and_eager_modes() + def testRelativeAttentionV2SharedRel(self): + # (batch, heads, length, depth) + x = np.random.rand(5, 4, 16, 7) + y = np.random.rand(5, 4, 16, 7) + max_relative_position = 3 + a = common_attention.dot_product_self_attention_relative_v2( + tf.constant(x, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + None, + max_relative_position=max_relative_position, + heads_share_relative_embedding=True) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(a) + self.assertEqual(res.shape, (5, 4, 16, 7)) + + @test_utils.run_in_graph_and_eager_modes() + def testRelativeAttentionV2MaxRelativeLargerThanLength(self): + # (batch, heads, length, depth) + x = np.random.rand(5, 4, 3, 7) + y = np.random.rand(5, 4, 3, 7) + max_relative_position = 16 + a = common_attention.dot_product_self_attention_relative_v2( + tf.constant(x, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + None, + max_relative_position=max_relative_position, + heads_share_relative_embedding=False) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(a) + self.assertEqual(res.shape, (5, 4, 3, 7)) + + @test_utils.run_in_graph_and_eager_modes() + def testDotProductUnMaskedAttentionRelativeV2(self): + x = np.random.rand(5, 7, 12, 32) + y = np.random.rand(5, 7, 12, 32) + a = common_attention.dot_product_unmasked_self_attention_relative_v2( + tf.constant(x, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + None, + 35) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(a) + self.assertEqual(res.shape, (5, 7, 12, 32)) + + @tfe.run_test_in_graph_and_eager_modes() + def testExtractblocks(self): + + batch_size = 1 + num_heads = 3 + height = 6 + width = 10 + depth = 15 + block_h = 3 + block_w = 2 + t = np.random.rand(batch_size * num_heads, height, width, depth) + a = common_attention._extract_blocks(t, block_h, block_w) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(a) + self.assertEqual(res.shape, (batch_size * num_heads, height//block_h, + width//block_w, block_h, block_w, depth)) + # also check if the content is right + out = np.zeros((batch_size*num_heads, height//block_h, + width//block_w, block_h, block_w, depth)) + for b in range(batch_size*num_heads): + for x in range(height//block_h): + for y in range(width//block_w): + for v in range(block_h): + for w in range(block_w): + out[b, x, y, v, w] = t[b, block_h*x+v, block_w*y+w] + self.assertAllClose(res, out) + + def python_get_2d_local_memory(self, t, batch_size, num_heads, height, width, + num_h_blocks, num_w_blocks, query_shape, + memory_flange, depth): + # also check if the content is right + out = np.zeros((batch_size, num_heads, height//query_shape[0], + width//query_shape[1], query_shape[0]+2*memory_flange[0], + query_shape[1]+2*memory_flange[1], depth)) + memory_height = query_shape[0]+2*memory_flange[0] + memory_width = query_shape[1]+2*memory_flange[1] + t_padded = np.pad(t, ((0, 0), (0, 0), (memory_flange[0], memory_flange[0]), + (memory_flange[1], memory_flange[1]), (0, 0)), + "constant", + constant_values=((0, 0), (0, 0), (0, 0), (0, 0), (0, 0))) + for b in range(batch_size): + for h in range(num_heads): + for x in range(num_h_blocks): + for y in range(num_w_blocks): + for v in range(memory_height): + for w in range(memory_width): + memory_h_start = x*query_shape[0] + memory_w_start = y*query_shape[1] + memory_h_index = memory_h_start + v + memory_w_index = memory_w_start + w + out[b, h, x, y, v, w] = t_padded[b, h, memory_h_index, + memory_w_index] + return out + + @tfe.run_test_in_graph_and_eager_modes() + def testGet2dLocalMemory(self): + batch_size = 3 + num_heads = 3 + height = 6 + width = 6 + depth = 15 + num_h_blocks = 3 + num_w_blocks = 3 + memory_flange = [1, 1] + query_shape = [2, 2] + t = np.random.rand(batch_size, num_heads, height, width, depth) + a = common_attention.get_2d_local_memory_v2( + np.reshape(t, (batch_size*num_heads, height, width, depth)), + query_shape, memory_flange) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(a) + self.assertEqual(res.shape, (batch_size*num_heads, + num_h_blocks, + num_w_blocks, + query_shape[0]+2*memory_flange[0], + query_shape[1]+2*memory_flange[1], depth)) + out = self.python_get_2d_local_memory(t, batch_size, num_heads, + height, width, num_h_blocks, + num_w_blocks, query_shape, + memory_flange, depth) + out = np.reshape(out, (batch_size*num_heads, + num_h_blocks, + num_w_blocks, + query_shape[0]+2*memory_flange[0], + query_shape[1]+2*memory_flange[1], depth)) + + self.assertAllClose(res, out) + + @tfe.run_test_in_graph_and_eager_modes() + def testSplitAlongWidth(self): + batch_size = 1 + num_heads = 3 + num_outer_h_blocks = 4 + num_outer_w_blocks = 8 + memory_flange = [2, 2] + num_w_blocks = 3 + depth = 15 + t = np.random.rand(batch_size*num_heads, num_outer_h_blocks, + num_outer_w_blocks, memory_flange[0], memory_flange[1], + depth) + a = common_attention._split_along_width(t) + # self.evaluate(tf.global_variables_initializer()) + res_l, res_r = self.evaluate(a) + # res = self.evaluate(a) + self.assertEqual(res_l.shape, (batch_size*num_heads, num_outer_h_blocks, + num_w_blocks, memory_flange[0], + memory_flange[1], depth)) + self.assertEqual(res_r.shape, (batch_size*num_heads, num_outer_h_blocks, + num_w_blocks, memory_flange[0], + memory_flange[1], depth)) + # also check if the content is right + out_l = np.zeros((batch_size*num_heads, num_outer_h_blocks, num_w_blocks, + memory_flange[0], memory_flange[1], depth)) + out_r = np.zeros((batch_size*num_heads, num_outer_h_blocks, num_w_blocks, + memory_flange[0], memory_flange[1], depth)) + block_h = memory_flange[0] + block_w = memory_flange[1] + for b in range(batch_size*num_heads): + for x in range(num_outer_h_blocks): + for y in range(num_w_blocks): + for v in range(block_h): + for w in range(block_w): + # we should compute the index of the position in the + out_l[b, x, y, v, w] = ( + t[b, x, 2*y, v, w] + ) + out_r[b, x, y, v, w] = ( + t[b, x, 2*y+3, v, w] + ) + self.assertAllClose(res_l, out_l) + self.assertAllClose(res_r, out_r) + + @tfe.run_test_in_graph_and_eager_modes() + def testGetLeftRightBlocks(self): + batch_size = 1 + num_heads = 3 + num_outer_h_blocks = 6 + num_outer_w_blocks = 6 + memory_flange = [2, 2] + num_h_blocks = 2 + num_w_blocks = 2 + depth = 15 + t = np.random.rand(batch_size*num_heads, num_outer_h_blocks, + num_outer_w_blocks, memory_flange[0], memory_flange[1], + depth) + a = common_attention._get_left_right_blocks(t) + self.evaluate(tf.global_variables_initializer()) + res_l, res_r = self.evaluate(a) + self.assertEqual(res_l.shape, (batch_size*num_heads, num_h_blocks, + num_w_blocks, memory_flange[0]*2, + memory_flange[1], depth)) + self.assertEqual(res_r.shape, (batch_size*num_heads, num_h_blocks, + num_w_blocks, memory_flange[0]*2, + memory_flange[1], depth)) + # also check if the content is right + block_h = memory_flange[0]*2 + block_w = memory_flange[1] + out_l = np.zeros((batch_size*num_heads, num_h_blocks, + num_w_blocks, memory_flange[0]*2, memory_flange[1], + depth)) + out_r = np.zeros((batch_size*num_heads, num_h_blocks, + num_w_blocks, memory_flange[0]*2, memory_flange[1], + depth)) + block_h = memory_flange[0]*2 + block_w = memory_flange[1] + for b in range(batch_size*num_heads): + for x in range(num_h_blocks): + for y in range(num_w_blocks): + for v in range(block_h): + for w in range(block_w): + # we should compute the index of the position in the + outer_block_h_index = ( + 1 + block_h//memory_flange[0]*x + v//2) + h_index = v%memory_flange[0] + left_outer_w_index = 2*y + right_outer_w_index = 2*y + 3 + out_l[b, x, y, v, w] = ( + t[b, outer_block_h_index, left_outer_w_index, h_index, + w] + ) + out_r[b, x, y, v, w] = ( + t[b, outer_block_h_index, right_outer_w_index, h_index, + w] + ) + self.assertAllClose(res_l, out_l) + self.assertAllClose(res_r, out_r) + + @tfe.run_test_in_graph_and_eager_modes() + def testDotProductUnmaskedAttentionLocal2dTpu(self): + batch_size = 1 + num_heads = 3 + height = 7 + width = 12 + depth = 15 + num_h_blocks = 4 + num_w_blocks = 6 + memory_flange = [1, 1] + query_shape = [2, 2] + memory_h = query_shape[0] + 2*memory_flange[0] + memory_w = query_shape[1] + 2*memory_flange[1] + + q = np.random.rand(batch_size, num_heads, height, width, depth) + k = np.random.rand(batch_size, num_heads, height, width, depth) + v = np.random.rand(batch_size, num_heads, height, width, depth) + a = common_attention.dot_product_unmasked_attention_local_2d_tpu( + tf.constant(q, dtype=tf.float32), + tf.constant(k, dtype=tf.float32), + tf.constant(v, dtype=tf.float32), None, max_relative_position=None, + query_shape=query_shape, dropout_rate=0.0, image_shapes=None, + name=None, make_image_summary=False, dropout_broadcast_dims=None) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(a) + self.assertEqual(res.shape, (batch_size, num_heads, + height, width, depth)) + # now to check the content too + # first pad q, k, ad v + height_padding = -height % query_shape[0] + width_padding = -width % query_shape[1] + new_height = height + -height % query_shape[0] + new_width = width + -width % query_shape[1] + q = np.pad(q, ((0, 0), (0, 0), (0, height_padding), + (0, width_padding), (0, 0)), "constant", + constant_values=((0, 0), (0, 0), (0, 0), (0, 0), (0, 0))) + k = np.pad(k, ((0, 0), (0, 0), (0, height_padding), + (0, width_padding), (0, 0)), "constant", + constant_values=((0, 0), (0, 0), (0, 0), (0, 0), (0, 0))) + v = np.pad(v, ((0, 0), (0, 0), (0, height_padding), + (0, width_padding), (0, 0)), "constant", + constant_values=((0, 0), (0, 0), (0, 0), (0, 0), (0, 0))) + queries = self.python_get_2d_local_memory(q, batch_size, num_heads, + new_height, new_width, + num_h_blocks, num_w_blocks, + query_shape, [0, 0], + depth) + keys = self.python_get_2d_local_memory(k, batch_size, num_heads, + new_height, new_width, num_h_blocks, + num_w_blocks, query_shape, + memory_flange, depth) + values = self.python_get_2d_local_memory(v, batch_size, num_heads, + new_height, new_width, + num_h_blocks, num_w_blocks, + query_shape, + memory_flange, depth) + logits = np.matmul( + np.reshape(queries, (batch_size, num_heads, + num_h_blocks, num_w_blocks, + query_shape[0]*query_shape[1], depth)), + np.transpose( + np.reshape(keys, (batch_size, num_heads, num_h_blocks, num_w_blocks, + memory_h*memory_w, depth)), (0, 1, 2, 3, 5, 4))) + # now to do a softmax across the logits + att = np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True) + att_output = np.matmul(att, np.reshape( + values, (batch_size, num_heads, num_h_blocks, num_w_blocks, + memory_h*memory_w, depth))) + att_output = np.reshape(att_output, + (batch_size, num_heads, num_h_blocks, num_w_blocks, + query_shape[0], query_shape[1], depth)) + # putting the attention results back into the right place + out = np.zeros((batch_size, num_heads, new_height, new_width, depth)) + for b in range(batch_size): + for h in range(num_heads): + for x in range(new_height): + for y in range(new_width): + h_block_index = x//query_shape[0] + w_block_index = y//query_shape[1] + inside_h_index = x%query_shape[0] + inside_w_index = y%query_shape[1] + out[b, h, x, y] = ( + att_output[b, h, h_block_index, w_block_index, inside_h_index, + inside_w_index]) + out = out[:, :, :height, :width, :] + self.assertAllClose(res, out) + + @tfe.run_test_in_graph_and_eager_modes() + def testDotProductUnmaskedAttentionLocal2dTpuSimple(self): + batch_size = 1 + num_heads = 3 + height = 8 + width = 12 + total_depth = 15 + num_h_blocks = 4 + num_w_blocks = 6 + depth = 5 + query_shape = [2, 2] + + x = np.random.rand(batch_size, height, width, total_depth) + a = ( + common_attention.dot_product_unmasked_attention_local_2d_tpu_simple( + tf.constant(x, dtype=tf.float32), + None, total_depth, total_depth, num_heads, + query_shape=query_shape)) + self.evaluate(tf.global_variables_initializer()) + res, q, k, v = self.evaluate(a) + self.assertEqual(res.shape, (batch_size, height, width, total_depth)) + # reshape q, k, v from batch, heads, height*width to batch, heads, + # num_h_blocks, num_w_blocks, query_shape[0], query_shape[1], depth + resh_shape = (batch_size, num_h_blocks, num_w_blocks, + num_heads, query_shape[0], query_shape[1], + depth) + resh = lambda l: np.reshape(l, resh_shape) + q, k, v = map(resh, [q, k, v]) + trans = lambda l: np.transpose(l, (0, 3, 1, 2, 4, 5, 6)) + q, k, v = map(trans, [q, k, v]) + new_height = height + -height % query_shape[0] + new_width = width + -width % query_shape[1] + (queries, keys, values) = (q, k, v) + logits = np.matmul( + np.reshape(queries, (batch_size, num_heads, + num_h_blocks, num_w_blocks, + query_shape[0]*query_shape[1], depth)), + np.transpose( + np.reshape(keys, (batch_size, num_heads, num_h_blocks, num_w_blocks, + query_shape[0]*query_shape[1], depth)), + (0, 1, 2, 3, 5, 4))) + # now to do a softmax across the logits + att = np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True) + att_output = np.matmul(att, np.reshape( + values, (batch_size, num_heads, num_h_blocks, num_w_blocks, + query_shape[0]*query_shape[1], depth))) + att_output = np.reshape(att_output, + (batch_size, num_heads, num_h_blocks, num_w_blocks, + query_shape[0], query_shape[1], depth)) + # putting the attention results back into the right place + out = np.zeros((batch_size, num_heads, new_height, new_width, depth)) + for b in range(batch_size): + for h in range(num_heads): + for x in range(new_height): + for y in range(new_width): + h_block_index = x//query_shape[0] + w_block_index = y//query_shape[1] + inside_h_index = x%query_shape[0] + inside_w_index = y%query_shape[1] + out[b, h, x, y] = ( + att_output[b, h, h_block_index, w_block_index, inside_h_index, + inside_w_index]) + out = np.transpose(out, (0, 2, 3, 1, 4)) + out = np.reshape(out, (batch_size, new_height, new_width, total_depth)) + out = out[:, :height, :width, :] + + self.assertAllClose(res, out) + + def python_relative_att(self, q, k, v, batch, num_heads, height, width, + depth, height_key_relative_embeddings, + width_key_relative_embeddings, + heads_share_relative_embedding): + """Relative attention computation in numpy. + + For query index (i,j) and key index (l, m) the logit is + q_i k_j^T + q_i rh_{l-i}^T + q_i rw_{m-j}^T, where rh and ry are the set of + relative embeddings in height and width spatial dimensions, respectively. + + Args: + q: [batch, heads, height, width, depth] tensor + k: [batch, heads, height, width, depth] tensor + v: [batch, heads, height, width, depth] tensor + batch: int scalar + num_heads: int scalar + height: int scalar + width: int scalar + depth: int scalar + height_key_relative_embeddings: a tensor of relative embeddings + width_key_relative_embeddings: a tensor of relative embeddings + heads_share_relative_embedding: a boolean + + Returns: + att_output: A tensor + """ + + logits = np.zeros((batch, num_heads, height*width, height*width)) + for b in range(batch): + for h in range(num_heads): + for i in range(height*width): + q_col = i%width + q_row = int((i-q_col)/width) + for j in range(height*width): + k_col = j%width + k_row = int((j-k_col)/width) + logit = np.dot(q[b][h][q_row][q_col], k[b][h][k_row][k_col]) + width_rel_dist = k_col - q_col + width_rel_index = width-1 + width_rel_dist + if heads_share_relative_embedding: + width_rel_logit = ( + np.dot(q[b][h][q_row][q_col], + width_key_relative_embeddings[width_rel_index])) + else: + width_rel_logit = ( + np.dot(q[b][h][q_row][q_col], + width_key_relative_embeddings[h][width_rel_index])) + height_rel_dist = k_row - q_row + height_rel_index = height-1 + height_rel_dist + if heads_share_relative_embedding: + height_rel_logit = ( + np.dot(q[b][h][q_row][q_col], + height_key_relative_embeddings[height_rel_index])) + else: + height_rel_logit = ( + np.dot(q[b][h][q_row][q_col], + height_key_relative_embeddings[h][height_rel_index])) + logits[b, h, i, j] = logit + width_rel_logit + height_rel_logit + # now to do a softmax across the logits + att = np.exp(logits) / np.sum(np.exp(logits), axis=-1, keepdims=True) + # comparing the outputs + att_output = np.matmul(att, + np.reshape(v, ( + batch, num_heads, height*width, depth))) + att_output = np.reshape(att_output, + (batch, num_heads, height, width, depth)) + return att_output + + @test_utils.run_in_graph_and_eager_modes() + def testDotProductUnMaskedAttentionRelative2d(self): + batch = 1 + height = 3 + width = 3 + num_heads = 2 + max_relative_position = 6 + depth = 5 + heads_share_relative_embedding = False + q = np.random.rand(batch, num_heads, height, width, depth) + k = np.random.rand(batch, num_heads, height, width, depth) + v = np.random.rand(batch, num_heads, height, width, depth) + a = common_attention.dot_product_unmasked_self_attention_relative_2d( + tf.constant(q, dtype=tf.float32), + tf.constant(k, dtype=tf.float32), + tf.constant(v, dtype=tf.float32), + None, + max_relative_position=max_relative_position, + heads_share_relative_embedding=heads_share_relative_embedding) + + self.evaluate(tf.global_variables_initializer()) + res, height_key_relative_embeddings, width_key_relative_embeddings = ( + self.evaluate(a)) + att_output = self.python_relative_att( + q, k, v, batch, num_heads, height, width, depth, + height_key_relative_embeddings, width_key_relative_embeddings, + heads_share_relative_embedding) + self.assertEqual(res.shape, (batch, num_heads, height, width, depth)) + self.assertAllClose(res, att_output) + + @parameterized.parameters( + (1, 10, 12, 2, 6, 3), + (1, 1, 12, 2, 6, 3), + (2, 10, 1, 2, 6, 3), + (1, 10, 12, 2, 1, 1), + (1, 10, 12, 2, 2, 8), + (4, 10, 12, 2, 12, 10), + ) + @test_utils.run_in_graph_and_eager_modes() + def testDotProductUnMaskedAttentionRelative2dSharedOneRow( + self, batch, height, width, num_heads, max_relative_position, depth): + heads_share_relative_embedding = True + q = np.random.rand(batch, num_heads, height, width, depth) + k = np.random.rand(batch, num_heads, height, width, depth) + v = np.random.rand(batch, num_heads, height, width, depth) + + a = common_attention.dot_product_unmasked_self_attention_relative_2d( + tf.constant(q, dtype=tf.float32), + tf.constant(k, dtype=tf.float32), + tf.constant(v, dtype=tf.float32), + None, + max_relative_position=max_relative_position, + heads_share_relative_embedding=heads_share_relative_embedding) + + self.evaluate(tf.global_variables_initializer()) + (res, height_key_relative_embeddings, + width_key_relative_embeddings) = self.evaluate(a) + att_output = self.python_relative_att( + q, k, v, batch, num_heads, height, width, depth, + height_key_relative_embeddings, width_key_relative_embeddings, + heads_share_relative_embedding) + self.assertEqual(res.shape, + (batch, num_heads, height, width, depth)) + self.assertAllClose(res, att_output) + + @test_utils.run_in_graph_and_eager_modes() + def testRelativeAttentionV2Unmasked(self): + # (batch, heads, length, depth) + x = np.random.rand(5, 4, 16, 7) + y = np.random.rand(5, 4, 16, 7) + max_relative_position = 3 + a = common_attention.dot_product_unmasked_self_attention_relative_v2( + tf.constant(x, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + None, + max_relative_position=max_relative_position, + heads_share_relative_embedding=False) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(a) + self.assertEqual(res.shape, (5, 4, 16, 7)) + + @test_utils.run_in_graph_and_eager_modes() + def testRelativeAttentionV2UnmaskedSharedRel(self): + # (batch, heads, length, depth) + x = np.random.rand(5, 4, 16, 7) + y = np.random.rand(5, 4, 16, 7) + max_relative_position = 3 + a = common_attention.dot_product_unmasked_self_attention_relative_v2( + tf.constant(x, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + None, + max_relative_position=max_relative_position, + heads_share_relative_embedding=True) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(a) + self.assertEqual(res.shape, (5, 4, 16, 7)) + + @test_utils.run_in_graph_and_eager_modes() + def testRelativeAttentionV2UnmaskedRelativeLargerThanLength(self): + # (batch, heads, length, depth) + x = np.random.rand(5, 4, 3, 7) + y = np.random.rand(5, 4, 3, 7) + max_relative_position = 16 + a = common_attention.dot_product_unmasked_self_attention_relative_v2( + tf.constant(x, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + None, + max_relative_position=max_relative_position, + heads_share_relative_embedding=False) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(a) + self.assertEqual(res.shape, (5, 4, 3, 7)) + + @test_utils.run_in_graph_and_eager_modes() + def testMaskedRelativeLocalAttentionV2(self): + # (batch, heads, length, depth) + x = np.random.rand(5, 4, 16, 7) + y = np.random.rand(5, 4, 16, 7) + block_length = 3 + a = common_attention.masked_relative_local_attention_1d( + tf.constant(x, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + block_length=block_length, + heads_share_relative_embedding=True, + add_relative_to_values=False, + name="masked_relative_local_attention_1d") + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(a) + self.assertEqual(res.shape, (5, 4, 16, 7)) + + @test_utils.run_in_graph_and_eager_modes() + def testMaskedRelativeLocalAttentionV2AddRelativeValues(self): + # (batch, heads, length, depth) + x = np.random.rand(5, 4, 16, 7) + y = np.random.rand(5, 4, 16, 7) + block_length = 3 + a = common_attention.masked_relative_local_attention_1d( + tf.constant(x, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + block_length=block_length, + heads_share_relative_embedding=True, + add_relative_to_values=False, + name="masked_relative_local_attention_1d") + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(a) + self.assertEqual(res.shape, (5, 4, 16, 7)) + + @test_utils.run_in_graph_and_eager_modes() + def testMaskedRelativeLocalAttentionV2SeqShorterThanBlockLength(self): + # (batch, heads, length, depth) + x = np.random.rand(5, 7, 2, 7) + y = np.random.rand(5, 7, 2, 7) + block_length = 3 + a = common_attention.masked_relative_local_attention_1d( + tf.constant(x, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + block_length=block_length, + heads_share_relative_embedding=True, + name="masked_relative_local_attention_1d") + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(a) + self.assertEqual(res.shape, (5, 7, 2, 7)) + + @test_utils.run_in_graph_and_eager_modes() + def testMaskedRelativeLocalAttentionV2SeqShorterThanTwiceBlockLength(self): + # (batch, heads, length, depth) + x = np.random.rand(5, 7, 5, 7) + y = np.random.rand(5, 7, 5, 7) + block_length = 3 + a = common_attention.masked_relative_local_attention_1d( + tf.constant(x, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + tf.constant(y, dtype=tf.float32), + block_length=block_length, + heads_share_relative_embedding=True, + name="masked_relative_local_attention_1d") + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(a) + self.assertEqual(res.shape, (5, 7, 5, 7)) + + def testBiasBatchCoordinates(self): + """Testing the batch coordinates mask.""" + q = tf.constant([0, 0, 1, 1, 1, 1, 2, 2, 2], dtype=tf.int32) + q = tf.expand_dims(q, axis=-1) + + k = tf.constant([0, 0, 0, 2, 2, 3, 3, 3], dtype=tf.int32) + k = tf.expand_dims(k, axis=-1) + + ground_truth = np.array([ + [0, 0, 0, 1, 1, 1, 1, 1], # 0 + [0, 0, 0, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], # 1 (just masked) + [1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 0, 0, 1, 1, 1], # 2 + [1, 1, 1, 0, 0, 1, 1, 1], + [1, 1, 1, 0, 0, 1, 1, 1], + ], np.float32) * -1e9 + + bias = common_attention.attention_bias_coordinates(q, k) + self.assertAllClose(self.evaluate(bias), ground_truth) + + @test_utils.run_in_graph_and_eager_modes() + def testBiasFuture(self): + """Testing the sequence order mask.""" + q = tf.constant([0, 1, 2, 3, 0, 1, 2, 0, 1], dtype=tf.int32) + q = tf.expand_dims(q, axis=-1) + + k = tf.constant([0, 1, 2, 3, 4, 0, 1, 2], dtype=tf.int32) + k = tf.expand_dims(k, axis=-1) + + ground_truth = np.array([ + [0, 1, 1, 1, 1, 0, 1, 1], # 0 + [0, 0, 1, 1, 1, 0, 0, 1], # 1 + [0, 0, 0, 1, 1, 0, 0, 0], # 2 + [0, 0, 0, 0, 1, 0, 0, 0], # 3 + [0, 1, 1, 1, 1, 0, 1, 1], # 0 + [0, 0, 1, 1, 1, 0, 0, 1], # 1 + [0, 0, 0, 1, 1, 0, 0, 0], # 2 + [0, 1, 1, 1, 1, 0, 1, 1], # 0 + [0, 0, 1, 1, 1, 0, 0, 1], # 1 + ], np.float32) * -1e9 + + bias = common_attention.attention_bias_future(q, k) + self.assertAllClose(self.evaluate(bias), ground_truth) + + @test_utils.run_in_graph_mode_only() + def testMultiheadAttentionWithLayerCollection(self): + """Testing multihead attention with layer collection for kfac.""" + x = tf.zeros([3, 4, 5], tf.float32) + layer_collection = kfac.LayerCollection() + common_attention.multihead_attention( + x, None, None, 10, 10, 10, 2, 0.2, + layer_collection=layer_collection) + self.assertLen(layer_collection.get_blocks(), 4) + + @parameterized.named_parameters( + ("", 1, 1, 8, 4, 3), + ("dynamic_batch", None, 1, 8, 4, 2), + ("batches", 4, 3, 8, 4, 2), + ("block_length", 1, 1, 8, 4, 4), + ) + def testDilatedAttention(self, batch, heads, length, depth_v, block_length): + if batch is None: + batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32) + q = tf.random_normal([batch, heads, length, depth_v]) + k = tf.random_normal([batch, heads, length, depth_v]) + v = tf.random_normal([batch, heads, length, depth_v]) + output = common_attention.dilated_self_attention_1d( + q, k, v, + query_block_size=block_length, + memory_block_size=block_length, + gap_size=2, + num_memory_blocks=2) + if isinstance(batch, tf.Tensor): + batch, res = self.evaluate([batch, output]) + else: + res = self.evaluate(output) + + self.assertEqual(res.shape, (batch, heads, length, depth_v)) + + @parameterized.named_parameters( + ("", 1, 1, 8, 4, 3), + ("dynamic_batch", None, 1, 8, 4, 2), + ("batches", 4, 3, 8, 4, 2), + ("block_length", 1, 1, 8, 4, 4), + ) + def testMaskedDilatedAttention(self, batch, heads, length, depth_v, + block_length): + if batch is None: + batch = tf.random_uniform([], minval=0, maxval=5, dtype=tf.int32) + q = tf.random_normal([batch, heads, length, depth_v]) + k = tf.random_normal([batch, heads, length, depth_v]) + v = tf.random_normal([batch, heads, length, depth_v]) + output = common_attention.masked_dilated_self_attention_1d( + q, k, v, + query_block_size=block_length, + memory_block_size=block_length, + gap_size=2, + num_memory_blocks=2) + if isinstance(batch, tf.Tensor): + batch, res = self.evaluate([batch, output]) + else: + res = self.evaluate(output) + + self.assertEqual(res.shape, (batch, heads, length, depth_v)) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/layers/common_audio.py b/tensor2tensor/layers/common_audio.py new file mode 100644 index 000000000..27614d7da --- /dev/null +++ b/tensor2tensor/layers/common_audio.py @@ -0,0 +1,138 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utils for audio.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import numpy as np +import scipy.signal +import tensorflow.compat.v1 as tf + + +def add_delta_deltas(filterbanks, name=None): + """Compute time first and second-order derivative channels. + + Args: + filterbanks: float32 tensor with shape [batch_size, len, num_bins, 1] + name: scope name + + Returns: + float32 tensor with shape [batch_size, len, num_bins, 3] + """ + delta_filter = np.array([2, 1, 0, -1, -2]) + delta_delta_filter = scipy.signal.convolve(delta_filter, delta_filter, "full") + + delta_filter_stack = np.array( + [[0] * 4 + [1] + [0] * 4, [0] * 2 + list(delta_filter) + [0] * 2, + list(delta_delta_filter)], + dtype=np.float32).T[:, None, None, :] + + delta_filter_stack /= np.sqrt( + np.sum(delta_filter_stack**2, axis=0, keepdims=True)) + + filterbanks = tf.nn.conv2d( + filterbanks, delta_filter_stack, [1, 1, 1, 1], "SAME", data_format="NHWC", + name=name) + return filterbanks + + +def compute_mel_filterbank_features( + waveforms, + sample_rate=16000, dither=1.0 / np.iinfo(np.int16).max, preemphasis=0.97, + frame_length=25, frame_step=10, fft_length=None, + window_fn=functools.partial(tf.signal.hann_window, periodic=True), + lower_edge_hertz=80.0, upper_edge_hertz=7600.0, num_mel_bins=80, + log_noise_floor=1e-3, apply_mask=True): + """Implement mel-filterbank extraction using tf ops. + + Args: + waveforms: float32 tensor with shape [batch_size, max_len] + sample_rate: sampling rate of the waveform + dither: stddev of Gaussian noise added to waveform to prevent quantization + artefacts + preemphasis: waveform high-pass filtering constant + frame_length: frame length in ms + frame_step: frame_Step in ms + fft_length: number of fft bins + window_fn: windowing function + lower_edge_hertz: lowest frequency of the filterbank + upper_edge_hertz: highest frequency of the filterbank + num_mel_bins: filterbank size + log_noise_floor: clip small values to prevent numeric overflow in log + apply_mask: When working on a batch of samples, set padding frames to zero + Returns: + filterbanks: a float32 tensor with shape [batch_size, len, num_bins, 1] + """ + # `stfts` is a complex64 Tensor representing the short-time Fourier + # Transform of each signal in `signals`. Its shape is + # [batch_size, ?, fft_unique_bins] + # where fft_unique_bins = fft_length // 2 + 1 + + # Find the wave length: the largest index for which the value is !=0 + # note that waveforms samples that are exactly 0.0 are quite common, so + # simply doing sum(waveforms != 0, axis=-1) will not work correctly. + wav_lens = tf.reduce_max( + tf.expand_dims(tf.range(tf.shape(waveforms)[1]), 0) * + tf.to_int32(tf.not_equal(waveforms, 0.0)), + axis=-1) + 1 + if dither > 0: + waveforms += tf.random_normal(tf.shape(waveforms), stddev=dither) + if preemphasis > 0: + waveforms = waveforms[:, 1:] - preemphasis * waveforms[:, :-1] + wav_lens -= 1 + frame_length = int(frame_length * sample_rate / 1e3) + frame_step = int(frame_step * sample_rate / 1e3) + if fft_length is None: + fft_length = int(2**(np.ceil(np.log2(frame_length)))) + + stfts = tf.signal.stft( + waveforms, + frame_length=frame_length, + frame_step=frame_step, + fft_length=fft_length, + window_fn=window_fn, + pad_end=True) + + stft_lens = (wav_lens + (frame_step - 1)) // frame_step + masks = tf.to_float(tf.less_equal( + tf.expand_dims(tf.range(tf.shape(stfts)[1]), 0), + tf.expand_dims(stft_lens, 1))) + + # An energy spectrogram is the magnitude of the complex-valued STFT. + # A float32 Tensor of shape [batch_size, ?, 257]. + magnitude_spectrograms = tf.abs(stfts) + + # Warp the linear-scale, magnitude spectrograms into the mel-scale. + num_spectrogram_bins = magnitude_spectrograms.shape[-1].value + linear_to_mel_weight_matrix = ( + tf.signal.linear_to_mel_weight_matrix( + num_mel_bins, num_spectrogram_bins, sample_rate, lower_edge_hertz, + upper_edge_hertz)) + mel_spectrograms = tf.tensordot( + magnitude_spectrograms, linear_to_mel_weight_matrix, 1) + # Note: Shape inference for tensordot does not currently handle this case. + mel_spectrograms.set_shape(magnitude_spectrograms.shape[:-1].concatenate( + linear_to_mel_weight_matrix.shape[-1:])) + + log_mel_sgram = tf.log(tf.maximum(log_noise_floor, mel_spectrograms)) + + if apply_mask: + log_mel_sgram *= tf.expand_dims(tf.to_float(masks), -1) + + return tf.expand_dims(log_mel_sgram, -1, name="mel_sgrams") diff --git a/tensor2tensor/layers/common_hparams.py b/tensor2tensor/layers/common_hparams.py new file mode 100644 index 000000000..65e349b24 --- /dev/null +++ b/tensor2tensor/layers/common_hparams.py @@ -0,0 +1,505 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Hyperparameters and ranges common to multiple models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from six.moves import zip # pylint: disable=redefined-builtin +from tensor2tensor.utils import hparam +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +@registry.register_hparams("basic_1") +def basic_params1(): + """A set of basic hyperparameters.""" + return hparam.HParams( + # If the problem consists of variable-length sequences + # (see problem.batch_size_means_tokens()), then this is the number + # of tokens per batch per GPU or per TPU core. Otherwise, this is + # the number of examples per GPU or per TPU core. + batch_size=4096, + batch_shuffle_size=512, + # If True, then if the features are of variable length, the batch_size is + # used as the actual batch size (and not tokens per batch). + use_fixed_batch_size=False, + num_hidden_layers=4, + kernel_height=3, + kernel_width=1, + hidden_size=64, + compress_steps=0, + # All hyperparameters ending in "dropout" are automatically set to 0.0 + # when not in training mode. + dropout=0.2, + clip_grad_norm=2.0, + grad_noise_scale=0.0, + summarize_grads=False, + # Flag for whether mlperf mode is on + mlperf_mode=False, + # Whether to log the name and size of every variable + summarize_vars=False, + initializer="orthogonal", + initializer_gain=1.5, + label_smoothing=0.1, + optimizer="adam", + optimizer_adam_epsilon=1e-6, + optimizer_adam_beta1=0.85, + optimizer_adam_beta2=0.997, + optimizer_momentum_momentum=0.9, + optimizer_momentum_nesterov=False, + optimizer_adafactor_beta1=0.0, + optimizer_adafactor_beta2=0.999, + optimizer_adafactor_factored=True, + optimizer_adafactor_decay_type="pow", + optimizer_adafactor_memory_exponent=0.8, + optimizer_adafactor_clipping_threshold=1.0, + optimizer_adafactor_multiply_by_parameter_scale=True, + # Number of accumulating steps for multi step optimizers. + optimizer_multistep_accumulate_steps=0, + # Loss scaling used. + # Generally only necessary with mixed precision training. + # Mixed precision training only supports exponential scaling currently + # To disable the scaler, see to 0/False + mixed_precision_optimizer_loss_scaler="exponential", + # Determines the initial loss scaling value for mixed precision + mixed_precision_optimizer_init_loss_scale=2**15, + # Whether to zero gradients that were not computed, so that the + # appropriate slots are created. Useful for sharing checkpoints between + # models with different sets of heads. + optimizer_zero_grads=False, + weight_decay=1e-6, + weight_noise=0.0, + # Defines the learning rate as a product of named functions. + # Available functions are listed in learning_rate._LEARNING_RATE_FUNCTIONS + # e.g. "constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size" + learning_rate_schedule="legacy", + learning_rate_constant=1.0, + # If learning_rate_schedule=="legacy", + # then we specify decay scheme here. Warmup is always exponential, + # except with "noam" learning rate decay scheme. + # see optimize.legacy_learning_rate_schedule() + # TODO(noam): migrate everyone away from this. + learning_rate_decay_scheme="none", + # decay_steps and decay_staircase for learning_rate_decay_scheme=="exp" + learning_rate_decay_steps=5000, + learning_rate_decay_staircase=False, + learning_rate_minimum=None, + learning_rate_decay_rate=1.0, + learning_rate_warmup_steps=100, + learning_rate_cosine_cycle_steps=250000, + learning_rate=0.1, + sampling_method="argmax", # "argmax" or "random" + sampling_temp=1.0, # temperature for sampling + sampling_keep_top_k=-1, # If >0, ignore all but the top k logits + # expand the logits a piece at a time - saves memory. + factored_logits=False, + multiply_embedding_mode="sqrt_depth", + # Parameters related to mixtures of experts. + moe_hidden_sizes="2048", # hidden layer sizes (comma-separated) + moe_num_experts=64, # number of experts per layer + moe_k=2, # how many experts to use for each batch element + moe_loss_coef=1e-2, + # Sequences of operations to perform on layer input and layer output. + # Used by common_layers.layer_preprocess, common_layers.layer_postprocess + # Each character represents an operation: + # none: no preprocessing + # d: apply dropout + # n: apply normalization (see norm_type and norm_epsilon) + # a: add layer input (residual connection - only during postprocess) + # The special string "none" is used instead of the empty string + # to indicate no pre/postprocessing, since the empty string causes + # trouble for hyperparameter tuning. + # TODO(noam): The current settings ("", "dan") are the published version + # of the transformer. ("n", "da") seems better for harder-to-learn + # models, so it should probably be the default. + layer_preprocess_sequence="none", + layer_postprocess_sequence="dan", + # dropout rate to use during layer_preprocess and layer_postprocess + layer_prepostprocess_dropout=0.1, + # broadcast dimensions for layer_prepostprocess_dropout + # a comma-separated list of integers. + # see common_layers.dropout_with_broadcast_dims() + # Change this to "1" to save memory. + layer_prepostprocess_dropout_broadcast_dims="", + # dropout some symbols (set them to 0) before embedding. + symbol_dropout=0.0, + # What type of normalization to use + norm_type="layer", # "batch", layer", "noam", "none". + # epsilon parameter to normalization function + norm_epsilon=1e-6, + # pad vocabularies so that this value divides the vocabulary size. + vocab_divisor=1, + # During training, we drop sequences whose inputs and targets are shorter + # than min_length + min_length=0, + # During training, we drop sequences whose inputs or targets are longer + # than max_length. + # If max_length==0, we use hparams.batch_size instead. + max_length=0, + # Pack examples on the fly. + pack_dataset=False, + # Use custom ops not included in standard tensorflow. + use_custom_ops=True, + # Split targets on the first axis into chunks of this length. + split_targets_chunk_length=0, + split_targets_max_chunks=100, + split_targets_strided_training=False, + # Maximum length in the smallest length bucket. Setting this + # flag too high will result in wasteful padding of short + # sequences. Due to some (hopefully) temporary hacks in the + # data reading and batching code, setting this flag too low + # results in a very long batch-shuffling queue. + # TODO(noam): change this once the Datasets API changes. + min_length_bucket=8, + # This flag controls the number of length buckets in the data + # reader. The buckets have maximum lengths from + # min_bucket_length to (max_length or batch_size), increasing + # (approximately) by factors of length_bucket_step. + length_bucket_step=1.1, + # If set to True, drop sequences longer than max_length during eval. + # This affects the validity of the evaluation metrics. + eval_drop_long_sequences=False, + # If True, run the model autoregressively instead of teacher-forcing + # during eval + eval_run_autoregressive=False, + # (For features with symbol modality) If True, share all of the + # input embeddings, target embeddings, and softmax weights. + shared_embedding_and_softmax_weights=False, + # (For features with symbol modality) If True, share the input embeddings + # and target embeddings. + shared_embedding=False, + # (For features with symbol modality) Number to shard embeddings by. + symbol_modality_num_shards=1, + # Feature transformations are optional dictionaries comprising key-value + # pairs of a feature name (str) and its transformation (function). If not + # specified, T2TModel applies a default transformation according to the + # feature's modality. Bottom is applicable to all features; loss, top, and + # weights_fn are only applicable to target features. + # TODO(trandustin): `name` is an optional hparam for legacy reasons, + # defining variable scope names. Remove this hparam in the future. + bottom={}, + loss={}, + name={}, + top={}, + weights_fn={}, + # The maximum length of "input" sequence. + # Sequences longer than this value will be truncated. 0 or negative values + # mean there is no maximum or truncation. + # You can change this behavior by overriding preprocess_example() method + # in your problem class. + max_input_seq_length=0, + # The maximum length of "target" sequence. + # Sequences longer than this value will be truncated. 0 or negative values + # mean there is no maximum or truncation. + # You can change this behavior by overriding preprocess_example() method + # in your problem class. + max_target_seq_length=0, + # if nonzero, we split the target sequences on example read. + # This is for use with language modeling problems with fixed length + # examples. e.g. The examples may be written with length 65536, but we + # want to split each example into 64 examples of length 1024. + split_to_length=0, + # Video settings: how many frames to batch on input and targets. + video_num_input_frames=1, + video_num_target_frames=1, + # This flag allows us to optionally treat a seq-to-seq problem + # as a language model. Legal values are: + # + # "none" - Do not prepend the inputs to the targets. + # "prepend_inputs_masked_attention" + # replace "targets" in preprocessing with + # tf.concat([inputs, [0], targets], axis=1) + # i.e. we prepend the inputs to the targets with a single + # padding token in between. Use masked self-attention on the + # entire resulting sequence. During training, we compute losses on + # the combined sequence. During eval, we compute the metrics + # on only the targets portion. + # "prepend_inputs_full_attention" + # similar to the previous option except that each + # position in the inputs portion can see the + # entire inputs portion. This removes the challenge of + # autoregressively predicting the inputs portion. + prepend_mode="none", + # Scheduled sampling is interesting for auto-regressive models. + # It runs an additional step using the generated output as autoregressive + # targets, which can improve the models inference results later. The + # parameter scheduled_sampling_prob determines with what probability + # will such additional step be run. It's turned off (0.0) by default. + # This probability will exponentially warm up for the number of + # steps determined by scheduled_sampling_warmup_steps. + # The tensor used for the n-th pass will consist of outputs from + # the (n-1)-th pass mixed with gold truth, with the proportion of gold + # determined by scheduled_sampling_gold_mixin_prob. Control the number + # of passes with scheduled_sampling_num_passes. + scheduled_sampling_prob=0.0, + scheduled_sampling_method="parallel", # parallel or sequential. + scheduled_sampling_warmup_steps=50000, + scheduled_sampling_gold_mixin_prob=0.5, + scheduled_sampling_num_passes=1, + scheduled_sampling_warmup_schedule="exp", # exp, linear, or sigmoid. + + # This setting controls whether to copy variables around in a daisy chain + # (if true) or leave their placement to TensorFlow. It only affects multi + # device training and mostly should be turned on for performance. One + # exception are recurrent models: with dynamic loops it must be off. + daisy_chain_variables=True, + # If True in PREDICT mode, then last-position-only optimizations are not + # used. + force_full_predict=False, + # Set this for pure model parallelism. There is only one data shard. + no_data_parallelism=False, + # dtype used for activations. - "float32" or "bfloat16" + # activation_dtype="bfloat16" currently only works on TPU. + # It lowers activation-memory usage + # and does not appear to affect quality. + # You can train on TPU with activation_dtype="bfloat16" and evaluate + # on CPU/GPU with activation_dtype="float32" + activation_dtype="float32", + # dtype used for parameters: "float32" or "bfloat16" + # bfloat16 currently only works with optimizer="adafactor". + # The savings in memory allow for training larger models. + # Weights are encoded as (w*128)^8, using pseudostochastic + # roundoff. Initial experiments show that model quality is similar + # to baseline for about 3M training steps, but worse thereafter. + weight_dtype="float32", + # Directory containing a checkpoint for a pretrained model. This will only + # be used if a new run is being started. Parameters not found in the + # pretrained model will be randomly initialized. Superfluous parameters in + # the pretrained model will be ignored. + pretrained_model_dir="", + # Threshold used for two cases: the primary task probability for the + # constant mixing schedule, and the exponential schedule limit for when + # mixing should stop (eg: 0.5 means stop at 50-50 mixing, 0.8 means stop + # at 20-80 mixing for the primary-others mixing case.) + multiproblem_schedule_threshold=0.5, + # For more than 2 tasks, we may want to specify per-task thresholds here. + # In that case, this needs to be a string with as many floating point + # numbers as the number of tasks in the multi-problem. These numbers + # are later normalized to add up to 1 and taken as probabilities for + # each task. This enforces a constant mixing schedule and if this is + # empty then the threshold from above is used for the first task and + # the other tasks get the remaining probability split uniformly. + multiproblem_per_task_threshold="", + # The number of examples at which the proportion of the mixed in datasets + # is multiproblem_schedule_threshold + multiproblem_schedule_max_examples=1e7, + # When training multiproblems, we can mix the data according to different + # schedules. Example: a constant schedule mixing 20-80 between the primary + # and other tasks. + # A list of supported schedules can be found in + # `data_generators.multi_problem.py`. + multiproblem_mixing_schedule="constant", + # A boolean that decides whether input sequence losses and target label + # losses in classification problems should be reweighted. + multiproblem_reweight_label_loss=False, + # How much weight the targets in classification problems receive. Inputs + # receive 1 minus this weight. + multiproblem_label_weight=0.5, + # Hyperparameters for relative attention. + # The maximum relative positional distance to learn an embedding for. + max_relative_position=0, + # If heads share the same relative embedding. + heads_share_relative_embedding=False, + # If relative embedding terms are added to values too. + add_relative_to_values=False, + # If enable the host_call which is executed every training step. + # There could be a performance drop if host_call function is slow and + # cannot keep up with the TPU-side computation. + tpu_enable_host_call=False, + # Pad batch dim of inputs to nearest multiple of batch multiple. + pad_batch=False, + # When true, do not evaluate on the language model data when running the + # multiproblem since it can take a while. If False, set eval_steps to + # something large like 6000 or 10000. + multiproblem_target_eval_only=False, + # Max out the vocab size to a power of 2 for efficiency and to reserve + # extra space in the vocabulary for new task ids and label classes. + multiproblem_vocab_size=-1, + # When using multiproblem with generation tasks, need to truncate the + # inputs and targets manually before concatenating them. + multiproblem_max_input_length=-1, + multiproblem_max_target_length=-1, + # If positive, makes training targets fixed-length in MultiProblem. + multiproblem_fixed_train_length=-1, + # Load weights from a second model. For instance, when using + # pre-trained weights, you might want to initialize the encoder + # and decoder by loading different models. + warm_start_from_second="", + # Area attention hyper parameters + area_value_mode="none", + area_key_mode="none", + # Using area attention for the number of layers from the bottom + num_area_layers=0, + max_area_width=1, + max_area_height=1, + memory_height=1, + # Whether to use GPU automatic mixed precision (via graph rewrite) + gpu_automatic_mixed_precision=False, + ) + + +class RangedHParams(object): + """Defines parameter ranges for tuning.""" + + # From ParameterConfig proto + LINEAR_SCALE = 1 + LOG_SCALE = 2 + REVERSE_LOG_SCALE = 3 + + SCALES_STR = { + LINEAR_SCALE: "UNIT_LINEAR_SCALE", + LOG_SCALE: "UNIT_LOG_SCALE", + REVERSE_LOG_SCALE: "UNIT_REVERSE_LOG_SCALE", + } + + def __init__(self): + self._categorical_params = {} + self._discrete_params = {} + self._float_params = {} + self._int_params = {} + + def _check_reset_and_type_change(self, name, orig_ctr): + """Check if name is in orig_ctr or in one of the other type containers.""" + # Resetting a hyperparameter + if name in orig_ctr: + tf.logging.warning("Overwriting hparam %s", name) + + ctr_names = [ + (self._categorical_params, "categorical"), + (self._discrete_params, "discrete"), + (self._float_params, "float"), + (self._int_params, "int"), + ] + ctrs, names = list(zip(*ctr_names)) + orig_name = names[ctrs.index(orig_ctr)] + + for ctr, ctr_name in ctr_names: + if ctr is orig_ctr: + continue + + # Using a different type for the same hyperparameter name + if name in ctr: + raise ValueError("Setting hyperparameter %s as type %s, but a " + "hyperparemeter of the same name was originally " + "registered as type %s" % (name, ctr_name, orig_name)) + + def set_categorical(self, name, categories, length=None): + self._check_reset_and_type_change(name, self._categorical_params) + self._categorical_params[name] = (name, categories, length) + + def set_discrete(self, name, feasible_points, scale=None, length=None): + self._check_reset_and_type_change(name, self._discrete_params) + self._discrete_params[name] = (name, feasible_points, scale, length) + + def set_float(self, name, min_val, max_val, scale=None, length=None): + self._check_reset_and_type_change(name, self._float_params) + self._float_params[name] = (name, min_val, max_val, scale, length) + + def set_int(self, name, min_val, max_val, scale=None, length=None): + self._check_reset_and_type_change(name, self._int_params) + self._int_params[name] = (name, min_val, max_val, scale, length) + + def fix_select_params(self, hp): + ctrs = [ + self._categorical_params, self._discrete_params, self._float_params, + self._int_params + ] + for key, val in hp.values().iteritems(): + for ctr in ctrs: + if key in ctr: + del ctr[key] + self.set_discrete(key, [val]) + + def to_parameter_specs(self, name_prefix=""): + """To list of dicts suitable for Cloud ML Engine hyperparameter tuning.""" + specs = [] + for name, categories, _ in self._categorical_params.values(): + spec = { + "parameterName": name_prefix + name, + "type": "CATEGORICAL", + "categoricalValues": categories, + } + specs.append(spec) + + for name, feasible_points, scale, _ in self._discrete_params.values(): + spec = { + "parameterName": name_prefix + name, + "type": "DISCRETE", + "discreteValues": feasible_points, + } + if scale: + spec["scaleType"] = self.SCALES_STR[scale] + specs.append(spec) + + for name, min_val, max_val, scale, _ in self._float_params.values(): + spec = { + "parameterName": name_prefix + name, + "type": "DOUBLE", + "minValue": min_val, + "maxValue": max_val, + } + if scale: + spec["scaleType"] = self.SCALES_STR[scale] + specs.append(spec) + + for name, min_val, max_val, scale, _ in self._int_params.values(): + spec = { + "parameterName": name_prefix + name, + "type": "INTEGER", + "minValue": min_val, + "maxValue": max_val, + } + if scale: + spec["scaleType"] = self.SCALES_STR[scale] + specs.append(spec) + + return specs + + +@registry.register_ranged_hparams("basic1") +def basic_range1(ranged_hparams): + """A basic range of hyperparameters.""" + rhp = ranged_hparams + rhp.set_discrete("batch_size", [1024, 2048, 4096]) + rhp.set_discrete("num_hidden_layers", [1, 2, 3, 4, 5, 6]) + rhp.set_discrete("hidden_size", [32, 64, 128, 256, 512], scale=rhp.LOG_SCALE) + rhp.set_discrete("kernel_height", [1, 3, 5, 7]) + rhp.set_discrete("kernel_width", [1, 3, 5, 7]) + rhp.set_discrete("compress_steps", [0, 1, 2]) + rhp.set_float("dropout", 0.0, 0.5) + rhp.set_float("weight_decay", 1e-4, 10.0, scale=rhp.LOG_SCALE) + rhp.set_float("label_smoothing", 0.0, 0.2) + rhp.set_float("clip_grad_norm", 0.01, 50.0, scale=rhp.LOG_SCALE) + rhp.set_float("learning_rate", 0.005, 2.0, scale=rhp.LOG_SCALE) + rhp.set_categorical("initializer", + ["uniform", "orthogonal", "uniform_unit_scaling"]) + rhp.set_float("initializer_gain", 0.5, 3.5) + rhp.set_categorical("learning_rate_decay_scheme", + ["none", "sqrt", "noam", "exp"]) + rhp.set_float("optimizer_adam_epsilon", 1e-7, 1e-2, scale=rhp.LOG_SCALE) + rhp.set_float("optimizer_adam_beta1", 0.8, 0.9) + rhp.set_float("optimizer_adam_beta2", 0.995, 0.999) + rhp.set_categorical( + "optimizer", + ["adam", "adagrad", "momentum", "rms_prop", "sgd", "yellow_fin"]) + + +@registry.register_ranged_hparams +def basic_moe_range(rhp): + """Moe range; when this parameter is unused, it allows us to see variance.""" + rhp.set_float("moe_loss_coef", 0.01, 0.02) diff --git a/tensor2tensor/layers/common_image_attention.py b/tensor2tensor/layers/common_image_attention.py new file mode 100644 index 000000000..f19be8c71 --- /dev/null +++ b/tensor2tensor/layers/common_image_attention.py @@ -0,0 +1,702 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utils for attention mechanism for images.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from six.moves import range # pylint: disable=redefined-builtin +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import expert_utils + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class AttentionType(object): + """Types of attention type used in cia.""" + LOCAL_1D = "local_1d" + LOCAL_2D = "local_2d" + GLOBAL = "global" + GLOCAL = "global_local" + DILATED = "dilated" + MOE_LOCAL_1D = "moe_local1d" + LOCAL_BLOCK = "local_block" + NON_CAUSAL_1D = "local_1d_noncausal" + RELATIVE_LOCAL_1D = "rel_local_1d" + + @staticmethod + def get_choices(): + return [ + AttentionType.GLOBAL, + AttentionType.GLOCAL, + AttentionType.MOE_LOCAL_1D, + AttentionType.LOCAL_1D, + AttentionType.LOCAL_2D, + AttentionType.LOCAL_BLOCK, + AttentionType.DILATED, + AttentionType.NON_CAUSAL_1D, + AttentionType.RELATIVE_LOCAL_1D, + ] + + +class DistributionType(object): + """Types of distributions used in cia.""" + CAT = "cat" + DMOL = "dmol" + + @staticmethod + def get_choices(): + return [ + DistributionType.CAT, + DistributionType.DMOL, + ] + + +def maybe_reshape_4d_to_3d(x): + """Reshape input from 4D to 3D if necessary.""" + x_shape = common_layers.shape_list(x) + is_4d = False + if len(x_shape) == 4: + x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], x_shape[3]]) + is_4d = True + return x, x_shape, is_4d + + +def local_attention_2d(x, hparams, attention_type="local_attention_2d"): + """Local 2d, self attention layer.""" + # self-attention + with tf.variable_scope("local_2d_self_att"): + y = common_attention.multihead_attention_2d( + x, + None, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + attention_type=attention_type, + query_shape=hparams.query_shape, + memory_flange=hparams.memory_flange, + name="self_attention") + return y + + +def local_within_block_attention(x, + self_attention_bias, + hparams, + attention_type="local_within_block_mask_right", + q_padding="VALID", + kv_padding="VALID"): + """Local within block self attention.""" + x_new, x_shape, is_4d = maybe_reshape_4d_to_3d(x) + with tf.variable_scope("local_within_block"): + y = common_attention.multihead_attention( + common_layers.layer_preprocess(x_new, hparams), + None, + self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=attention_type, + block_width=hparams.block_width, + block_length=hparams.block_length, + q_padding=q_padding, + kv_padding=kv_padding, + q_filter_width=hparams.q_filter_width, + kv_filter_width=hparams.kv_filter_width, + name="local_within_block") + if is_4d: + y = tf.reshape(y, x_shape) + return y + + +def local_attention_1d(x, + hparams, + attention_type="local_unmasked", + q_padding="VALID", + kv_padding="VALID"): + """Local 1d self attention.""" + # self-attention + x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) + with tf.variable_scope("local_1d_self_att"): + y = common_attention.multihead_attention( + x, + None, + None, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=attention_type, + shared_rel=hparams.shared_rel, + block_width=hparams.block_width, + block_length=hparams.block_length, + q_padding=q_padding, + kv_padding=kv_padding, + q_filter_width=hparams.q_filter_width, + kv_filter_width=hparams.kv_filter_width, + make_image_summary=False, + name="self_attention") + if is_4d: + y = tf.reshape(y, x_shape) + return y + + +def get_dilated_1d_attention_mask( + num_heads, block_size, + num_blocks, memory_size, gap_size, + name="dilated_mask"): + """Dilated attention with a masking strategy.""" + mask = np.ones((num_heads, block_size, 2*block_size), bool) + + # now going over every row to do the right assignment of + # memory blocks + for i in range(block_size): + visible = 2*block_size - (block_size-i) + # You always attend to yourself, set the mask for that + mask[:, i, -(block_size - i)] = 0 + # Maybe num_blocks can be automatically calculated? + for j in range(num_blocks): + for k in range(memory_size): + index = ((gap_size + memory_size)*j) + k + if index >= visible: + break + mask[:, i, -(index + block_size - i + 1)] = 0 # Verify + + # adding a num blocks dimension + mask = np.expand_dims(mask, axis=1) + return tf.constant(mask, dtype=tf.int32, name=name) + + +def dilated_attention_1d(x, + hparams, + attention_type="masked_dilated_1d", + q_padding="VALID", + kv_padding="VALID", + gap_size=2): + """Dilated 1d self attention.""" + # self-attention + x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) + with tf.variable_scope("masked_dilated_1d"): + y = common_attention.multihead_attention( + x, + None, + None, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=attention_type, + block_width=hparams.block_width, + block_length=hparams.block_length, + q_padding=q_padding, + kv_padding=kv_padding, + q_filter_width=hparams.q_filter_width, + kv_filter_width=hparams.kv_filter_width, + gap_size=gap_size, + num_memory_blocks=hparams.num_memory_blocks, + name="self_attention") + if is_4d: + y = tf.reshape(y, x_shape) + y.set_shape([None, None, None, hparams.hidden_size]) + return y + + +def local_global_attention(x, + self_attention_bias, + hparams, + q_padding="LEFT", + kv_padding="LEFT"): + """Local and global 1d self attention.""" + with tf.variable_scope("self_local_global_att"): + [x_global, x_local] = tf.split(x, 2, axis=-1) + split_hidden_size = int(hparams.hidden_size / 2) + split_heads = int(hparams.num_heads / 2) + if self_attention_bias is not None: + self_attention_bias = get_self_attention_bias(x) + y_global = common_attention.multihead_attention( + x_global, + None, + self_attention_bias, + hparams.attention_key_channels or split_hidden_size, + hparams.attention_value_channels or split_hidden_size, + split_hidden_size, + split_heads, + hparams.attention_dropout, + q_filter_width=hparams.q_filter_width, + kv_filter_width=hparams.kv_filter_width, + q_padding=q_padding, + kv_padding=kv_padding, + name="global_self_att") + y_local = common_attention.multihead_attention( + x_local, + None, + None, + hparams.attention_key_channels or split_hidden_size, + hparams.attention_value_channels or split_hidden_size, + split_hidden_size, + split_heads, + hparams.attention_dropout, + attention_type="local_masked", + block_length=hparams.block_length, + block_width=hparams.block_width, + q_filter_width=hparams.q_filter_width, + kv_filter_width=hparams.kv_filter_width, + q_padding=q_padding, + kv_padding=kv_padding, + name="local_self_att") + y = tf.concat([y_global, y_local], axis=-1) + return y + + +def full_self_attention(x, + self_attention_bias, + hparams, + q_padding="LEFT", + kv_padding="LEFT"): + """Full self-attention layer.""" + x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) + if self_attention_bias is not None: + self_attention_bias = get_self_attention_bias(x) + with tf.variable_scope("self_att"): + y = common_attention.multihead_attention( + x, + None, + self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + q_filter_width=hparams.q_filter_width, + kv_filter_width=hparams.kv_filter_width, + q_padding=q_padding, + kv_padding=kv_padding, + name="self_att") + if is_4d: + y = tf.reshape(y, [x_shape[0], x_shape[1], x_shape[2], x_shape[3]]) + y.set_shape([None, None, None, hparams.hidden_size]) + return y + + +def encdec_attention_1d(x, + encoder_output, + encoder_decoder_attention_bias, + hparams): + """Local 1d self attention.""" + x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) + encoder_output, _, _ = maybe_reshape_4d_to_3d(encoder_output) + with tf.variable_scope("encdec_attention"): + # Encoder Decoder attention + y = common_attention.multihead_attention( + x, + encoder_output, + encoder_decoder_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + name="encdec_attention") + if is_4d: + y = tf.reshape(y, x_shape) + y.set_shape([None, None, None, hparams.hidden_size]) + return y + + +def transformer_decoder_layers(inputs, + encoder_output, + num_layers, + hparams, + self_attention_bias=None, + encoder_decoder_attention_bias=None, + attention_type=AttentionType.LOCAL_2D, + losses=None, + name="transformer"): + """Multi layer transformer.""" + x = inputs + x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout) + if attention_type == AttentionType.DILATED: + assert len(hparams.gap_sizes) == num_layers + for layer in range(num_layers): + with tf.variable_scope("%s_layer_%d" % (name, layer)): + # self-attention + skip connections + if attention_type == AttentionType.LOCAL_2D: + y = local_attention_2d(common_layers.layer_preprocess(x, hparams), + hparams, + attention_type="masked_local_attention_2d") + elif attention_type == AttentionType.LOCAL_1D: + y = local_attention_1d(common_layers.layer_preprocess(x, hparams), + hparams, + attention_type="local_mask_right", + q_padding="LEFT", kv_padding="LEFT") + elif attention_type == AttentionType.RELATIVE_LOCAL_1D: + y = local_attention_1d( + common_layers.layer_preprocess(x, hparams), + hparams, + attention_type="local_relative_mask_right", + q_padding="LEFT", + kv_padding="LEFT") + elif attention_type == AttentionType.NON_CAUSAL_1D: + y = local_attention_1d(common_layers.layer_preprocess(x, hparams), + hparams, + attention_type="local_unmasked", + q_padding="VALID", kv_padding="VALID") + elif attention_type == AttentionType.LOCAL_BLOCK: + y = local_within_block_attention( + common_layers.layer_preprocess(x, hparams), + self_attention_bias, hparams, + attention_type="local_within_block_mask_right", + q_padding="LEFT", kv_padding="LEFT") + elif attention_type == AttentionType.GLOCAL: + y = local_global_attention(common_layers.layer_preprocess(x, hparams), + self_attention_bias, hparams, + q_padding="LEFT", kv_padding="LEFT") + elif attention_type == AttentionType.DILATED: + y = dilated_attention_1d(common_layers.layer_preprocess(x, hparams), + hparams, q_padding="LEFT", + kv_padding="LEFT", + gap_size=hparams.gap_sizes[layer]) + elif attention_type == AttentionType.GLOBAL: + y = full_self_attention(common_layers.layer_preprocess(x, hparams), + self_attention_bias, hparams, + q_padding="LEFT", kv_padding="LEFT") + x = common_layers.layer_postprocess(x, y, hparams) + # enc-dec attention + skip connections + if encoder_output is not None: + y = encdec_attention_1d(common_layers.layer_preprocess(x, hparams), + encoder_output, + encoder_decoder_attention_bias, + hparams) + x = common_layers.layer_postprocess(x, y, hparams) + # feed-fwd layers + skip connections + y = ffn_layer(common_layers.layer_preprocess(x, hparams), hparams, + losses=losses) + x = common_layers.layer_postprocess(x, y, hparams) + return common_layers.layer_preprocess(x, hparams) + + +def transformer_encoder_layers(inputs, + num_layers, + hparams, + attention_type=AttentionType.GLOBAL, + self_attention_bias=None, + q_padding="VALID", + kv_padding="VALID", + name="transformer"): + """Multi layer transformer encoder.""" + x = inputs + x = tf.nn.dropout(x, 1.0 - hparams.layer_prepostprocess_dropout) + + for layer in range(num_layers): + # attention layers + skip connections + with tf.variable_scope("%s_layer_%d" % (name, layer)): + if attention_type == AttentionType.LOCAL_2D: + y = local_attention_2d(common_layers.layer_preprocess(x, hparams), + hparams, + attention_type="local_attention_2d") + elif attention_type == AttentionType.LOCAL_1D: + y = local_attention_1d(common_layers.layer_preprocess(x, hparams), + hparams, + attention_type="local_unmasked", + q_padding=q_padding, kv_padding=kv_padding) + elif attention_type == AttentionType.GLOBAL: + y = full_self_attention(common_layers.layer_preprocess(x, hparams), + self_attention_bias, hparams, + q_padding=q_padding, kv_padding=kv_padding) + x = common_layers.layer_postprocess(x, y, hparams) + # feed-fwd layer + skip connections + y = ffn_layer(common_layers.layer_preprocess(x, hparams), hparams) + x = common_layers.layer_postprocess(x, y, hparams) + return common_layers.layer_preprocess(x, hparams) + + +def ffn_layer(x, hparams, losses=None): + """ffn layer transformer.""" + with tf.variable_scope("ffn"): + if hparams.ffn_layer == "none": + return x + if hparams.ffn_layer == "conv_hidden_relu": + y = common_layers.dense_relu_dense( + x, + hparams.filter_size, + hparams.hidden_size, + dropout=hparams.relu_dropout) + elif hparams.ffn_layer == "normed_conv_hidden_relu": + y = common_layers.normed_conv_hidden_relu( + x, + hparams.norm_type, + hparams.layer_norm_epsilon, + hparams.filter_size, + hparams.hidden_size, + dropout=hparams.relu_dropout, + norm_name="convnorm") + elif hparams.ffn_layer == "self_attention_ffn": + x_shape = tf.shape(x) + x = tf.reshape(x, [x_shape[0], -1, hparams.hidden_size]) + y = common_attention.ffn_self_attention_layer( + x, hparams.filter_size, hparams.hidden_size, hparams.num_parts, + hparams.attention_dropout, hparams.share_kv) + y = tf.reshape(y, x_shape) + elif hparams.ffn_layer == "local_moe_tpu": + overhead = (hparams.moe_overhead_train + if hparams.mode == tf_estimator.ModeKeys.TRAIN + else hparams.moe_overhead_eval) + x, x_shape, is_4d = maybe_reshape_4d_to_3d(x) + y, loss = expert_utils.local_moe_tpu( + x, hparams.filter_size // 2, + hparams.hidden_size, + hparams.moe_num_experts, overhead=overhead, + loss_coef=hparams.moe_loss_coef) + if is_4d: + y = tf.reshape(y, x_shape) + if losses is None: + raise ValueError( + "transformer_ffn_layer with type local_moe_tpu must pass in " + "a losses list") + losses.append(loss) + else: + assert hparams.ffn_layer == "glu_ffn" + y = common_layers.gated_linear_unit_layer(x) + return y + + +def get_self_attention_bias(x): + """Creates masked self attention bias. + + Args: + x: A tensor of shape [batch, length, depth] + + Returns: + self_attention_bias: A tensor of shape [length, length, 1] + """ + + x_shape = common_layers.shape_list(x) + self_attention_bias = common_attention.attention_bias_lower_triangle( + x_shape[1]) + return self_attention_bias + + +def postprocess_image(x, rows, cols, hparams): + """Postprocessing after decoding. + + Args: + x: Tensor of shape [batch, ...], where ... can be any rank such that the + number of elements in x is batch * rows * cols * hparams.hidden_size. + rows: Integer representing number of rows in a 2-D data point. + cols: Integer representing number of columns in a 2-D data point. + hparams: HParams set. + + Returns: + Tensor of shape [batch, rows, cols, depth], where depth is + hparams.num_mixtures * 10 if hparams.likelihood is DMOL, otherwise 256. In + the special case of inference and block raster scan order, it is a Tensor + of shape [batch, num_blocks_rows, num_block_cols, block_length, block_width, + depth]. + """ + batch = common_layers.shape_list(x)[0] + x = tf.reshape(x, [batch, rows, cols, hparams.hidden_size]) + likelihood = getattr(hparams, "likelihood", DistributionType.CAT) + if likelihood == DistributionType.DMOL: + depth = hparams.num_mixtures * 10 + targets = tf.layers.dense(x, + depth, + use_bias=False, + activation=None, + name="output_conv") + else: + depth = 256 + targets = tf.layers.dense(x, + depth, + use_bias=True, + activation=None, + name="output_conv") + if (hparams.mode == tf_estimator.ModeKeys.PREDICT and + hparams.block_raster_scan): + y = targets + yshape = common_layers.shape_list(y) + block_length = hparams.query_shape[0] + block_width = hparams.query_shape[1] + + # Break into block row wise. + y = tf.reshape(y, + [batch, yshape[1] // block_length, block_length, + yshape[2], depth]) + yshape = common_layers.shape_list(y) + # Break into blocks width wise. + y_blocks = tf.reshape(y, + [batch, yshape[1], yshape[2], + yshape[3] // block_width, block_width, depth]) + + # Reshape targets as [batch, num_blocks_rows, num_block_cols, block_length, + # block_width, depth]. + targets = tf.transpose(y_blocks, [0, 1, 3, 2, 4, 5]) + + return targets + + +def prepare_encoder(inputs, hparams, attention_type="local_1d"): + """Prepare encoder for images.""" + x = prepare_image(inputs, hparams, name="enc_channels") + # Add position signals. + x = add_pos_signals(x, hparams, "enc_pos") + x_shape = common_layers.shape_list(x) + if attention_type == "local_1d": + x = tf.reshape(x, [x_shape[0], x_shape[1]*x_shape[2], hparams.hidden_size]) + x.set_shape([None, None, hparams.hidden_size]) + elif attention_type == "local_2d": + x.set_shape([None, None, None, hparams.hidden_size]) + return x + + +def prepare_decoder(targets, hparams): + """Prepare decoder for images.""" + targets_shape = common_layers.shape_list(targets) + channels = hparams.num_channels + curr_infer_length = None + + # during training, images are [batch, IMG_LEN, IMG_LEN, 3]. + # At inference, they are [batch, curr_infer_length, 1, 1] + if hparams.mode == tf_estimator.ModeKeys.PREDICT: + curr_infer_length = targets_shape[1] + if hparams.block_raster_scan: + assert hparams.img_len*channels % hparams.query_shape[1] == 0 + assert hparams.img_len % hparams.query_shape[0] == 0 + total_block_width = hparams.img_len*channels + # Decoding is in block raster scan order. We divide the image into + # hparams.query_shape blocks and then decode each block in raster scan. + # To make that compatible with our inference pipeline, pad the target so + # that rows is a multiple of query_shape and columns is a multiple of + # hparams.img_len*channels + curr_infer_length = targets_shape[1] + block_padding_factor = total_block_width * hparams.query_shape[0] + targets = tf.pad(targets, [ + [0, 0], [0, -curr_infer_length % block_padding_factor], + [0, 0], [0, 0]]) + + num_blocks = total_block_width // hparams.query_shape[1] + # Reshape the image to represent blocks + target_blocks = tf.reshape( + targets, [targets_shape[0], -1, num_blocks, hparams.query_shape[0], + hparams.query_shape[1]]) + # Transpose to read the image in 2D fashion. + targets = tf.transpose(target_blocks, [0, 1, 3, 2, 4]) + else: + # add padding to make sure the size of targets is a multiple of img_height + # times number of channels. This is needed for positional encodings and + # for doing the RGB lookup. + padding_factor = channels * hparams.img_len + targets = tf.pad(targets, [ + [0, 0], [0, -curr_infer_length % padding_factor], [0, 0], [0, 0]]) + targets = tf.reshape(targets, + [targets_shape[0], -1, hparams.img_len, channels]) + # Preprocess image + x = prepare_image(targets, hparams, name="dec_channels") + x_shape = common_layers.shape_list(x) + if (hparams.dec_attention_type == AttentionType.LOCAL_2D or + hparams.dec_attention_type == AttentionType.LOCAL_BLOCK): + x = common_attention.right_shift_blockwise(x, hparams.query_shape) + x = add_pos_signals(x, hparams, "dec_pos") + else: + # Add position signals + x = tf.reshape(x, [targets_shape[0], + x_shape[1]*x_shape[2], hparams.hidden_size]) + x = common_layers.shift_right_3d(x) + x = tf.reshape(x, [targets_shape[0], + x_shape[1], x_shape[2], hparams.hidden_size]) + x = add_pos_signals(x, hparams, "dec_pos") + x = common_layers.cast_like(x, targets) + return x, x_shape[1], x_shape[2] + + +def prepare_image(inputs, hparams, name=None): + """Prepare image.""" + # TODO(trandustin): This is a legacy function. Remove its usage. + del hparams, name # unused arg + return inputs + + +def create_output(decoder_output, rows, cols, targets, hparams): + """Creates output from decoder output and vars. + + Args: + decoder_output: Tensor of shape [batch, ...], where ... can be any rank such + that the number of elements is batch * rows * cols * hparams.hidden_size. + rows: Integer representing number of rows in a 2-D data point. + cols: Integer representing number of columns in a 2-D data point. + targets: Tensor of shape [batch, hparams.img_len, hparams.img_len, + hparams.num_channels]. + hparams: HParams set. + + Returns: + Tensor of shape [batch, hparams.img_len, hparams.img_len, + hparams.num_mixtures * 10] if hparams.likelihood is DMOL, otherwise + [batch, hparams.img_len, hparams.img_len, hparams.num_channels, 256]. + In the special case of predict mode, it is a Tensor of rank 5. + """ + del targets # unused arg + decoded_image = postprocess_image(decoder_output, rows, cols, hparams) + batch = common_layers.shape_list(decoded_image)[0] + depth = common_layers.shape_list(decoded_image)[-1] + likelihood = getattr(hparams, "likelihood", DistributionType.CAT) + if hparams.mode == tf_estimator.ModeKeys.PREDICT: + y = tf.reshape(decoded_image, [batch, -1, 1, 1, depth]) + output = y[:, :rows, :, :, :] + elif likelihood == DistributionType.CAT: + # Unpack the cols dimension of the Categorical. + channels = hparams.num_channels + output = tf.reshape(decoded_image, + [batch, rows, cols // channels, channels, depth]) + else: + output = decoded_image + return output + + +def get_channel_embeddings(io_depth, targets, hidden_size, name="channel"): + """Get separate embedding for each of the channels.""" + targets_split = tf.split(targets, io_depth, axis=3) + rgb_embedding_var = tf.get_variable("rgb_target_emb_%s" % name, + [256 * io_depth, hidden_size]) + rgb_embedding_var = tf.identity(rgb_embedding_var) + rgb_embedding_var *= float(hidden_size)**0.5 + channel_target_embs = [] + for i in range(io_depth): + # Adding the channel offsets to get the right embedding since the + # embedding tensor has shape 256 * io_depth, hidden_size + target_ids = tf.squeeze(targets_split[i], axis=3) + i * 256 + target_embs = common_layers.gather(rgb_embedding_var, target_ids) + channel_target_embs.append(target_embs) + + return tf.concat(channel_target_embs, axis=-1) + + +def add_pos_signals(x, hparams, name="pos_emb"): + with tf.variable_scope(name, reuse=False): + if hparams.pos == "timing": + x = common_attention.add_timing_signal_nd(x) + else: + assert hparams.pos == "emb" + x = common_attention.add_positional_embedding_nd( + x, hparams.max_length, name) + return x diff --git a/tensor2tensor/layers/common_image_attention_test.py b/tensor2tensor/layers/common_image_attention_test.py new file mode 100644 index 000000000..4ece5779d --- /dev/null +++ b/tensor2tensor/layers/common_image_attention_test.py @@ -0,0 +1,158 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for common image attention utilities.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_image_attention +from tensor2tensor.utils import hparam + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class CommonImageAttentionTest(parameterized.TestCase, tf.test.TestCase): + + @parameterized.parameters( + (common_image_attention.DistributionType.DMOL, 5, 50), + (common_image_attention.DistributionType.CAT, None, 256), + ) + def testPostProcessImageTrainMode(self, likelihood, num_mixtures, depth): + batch = 1 + rows = 8 + cols = 24 + hparams = hparam.HParams( + hidden_size=2, + likelihood=likelihood, + mode=tf_estimator.ModeKeys.TRAIN, + num_mixtures=num_mixtures, + ) + inputs = tf.random_uniform([batch, rows, cols, hparams.hidden_size], + minval=-1., maxval=1.) + outputs = common_image_attention.postprocess_image( + inputs, rows, cols, hparams) + self.assertEqual(outputs.shape, (batch, rows, cols, depth)) + + @parameterized.parameters( + (common_image_attention.DistributionType.DMOL, 5, 50), + (common_image_attention.DistributionType.CAT, None, 256), + ) + def testPostProcessImageInferMode(self, likelihood, num_mixtures, depth): + batch = 1 + rows = 8 + cols = 24 + block_length = 4 + block_width = 2 + hparams = hparam.HParams( + block_raster_scan=True, + hidden_size=2, + likelihood=likelihood, + mode=tf_estimator.ModeKeys.PREDICT, + num_mixtures=num_mixtures, + query_shape=[block_length, block_width], + ) + inputs = tf.random_uniform([batch, rows, cols, hparams.hidden_size], + minval=-1., maxval=1.) + outputs = common_image_attention.postprocess_image( + inputs, rows, cols, hparams) + num_blocks_rows = rows // block_length + num_blocks_cols = cols // block_width + self.assertEqual(outputs.shape, + (batch, num_blocks_rows, num_blocks_cols, + block_length, block_width, depth)) + + @parameterized.parameters( + (common_image_attention.DistributionType.DMOL, 5, 50), + (common_image_attention.DistributionType.CAT, None, 256), + ) + def testCreateOutputTrainMode(self, likelihood, num_mixtures, depth): + batch = 1 + height = 8 + width = 8 + channels = 3 + rows = height + if likelihood == common_image_attention.DistributionType.CAT: + cols = channels * width + else: + cols = width + hparams = hparam.HParams( + hidden_size=2, + likelihood=likelihood, + num_channels=channels, + mode=tf_estimator.ModeKeys.TRAIN, + num_mixtures=num_mixtures, + ) + decoder_output = tf.random_normal([batch, rows, cols, hparams.hidden_size]) + targets = tf.random_uniform([batch, height, width, channels], + minval=-1., maxval=1.) + output = common_image_attention.create_output( + decoder_output, rows, cols, targets, hparams) + if hparams.likelihood == common_image_attention.DistributionType.CAT: + self.assertEqual(output.shape, (batch, height, width, channels, depth)) + else: + self.assertEqual(output.shape, (batch, height, width, depth)) + + def testTransformerDecoderLayersGlobal(self): + one_hot_data = tf.constant([[[0., 1.], [1., 0.]], + [[0., 1.], [1., 0.]], + [[1., 0.], [1., 0.]]]) + + hparams = common_hparams.basic_params1() + hparams.hidden_size = 4 + hparams.num_layers = 1 + hparams.layer_prepostprocess_dropout = 0. + + hparams.add_hparam("attention_key_channels", None) + hparams.add_hparam("attention_value_channels", None) + hparams.add_hparam("num_heads", 1) + hparams.add_hparam("attention_dropout", 0.) + hparams.add_hparam("shared_rel", False) + hparams.add_hparam("block_width", 1) + hparams.add_hparam("block_length", 1) + hparams.add_hparam("q_filter_width", 1) + hparams.add_hparam("kv_filter_width", 1) + hparams.add_hparam("filter_size", 16) + hparams.add_hparam("ffn_layer", "conv_hidden_relu") + hparams.add_hparam("relu_dropout", 0.) + + conv_1d = tf.keras.layers.Conv1D(filters=hparams.hidden_size, + kernel_size=1, + use_bias=False) + shifted_data = tf.pad(one_hot_data, [[0, 0], [1, 0], [0, 0]])[..., :-1, :] + net = conv_1d(shifted_data) + output = common_image_attention.transformer_decoder_layers( + inputs=net, + encoder_output=None, + num_layers=hparams.num_layers, + hparams=hparams, + self_attention_bias=common_image_attention.get_self_attention_bias(net), + attention_type=common_image_attention.AttentionType.GLOBAL) + self.evaluate(tf.global_variables_initializer()) + output_val = self.evaluate(output) + # The outputs for the padded dimension should be equal across all data. + self.assertAllEqual(output_val[0, 0], output_val[1, 0]) + self.assertAllEqual(output_val[1, 0], output_val[2, 0]) + # The first and second elements of the batch are identical, so they should + # have the same outputs for the second latent dimension as well. + self.assertAllEqual(output_val[0, 1], output_val[1, 1]) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/layers/common_layers.py b/tensor2tensor/layers/common_layers.py new file mode 100644 index 000000000..79b393c86 --- /dev/null +++ b/tensor2tensor/layers/common_layers.py @@ -0,0 +1,4089 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Layers common to multiple models.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import contextlib +import functools +import math + +from absl import logging +import numpy as np +from six.moves import range # pylint: disable=redefined-builtin + +from tensor2tensor.utils import contrib +import tensorflow.compat.v1 as tf +import tensorflow_probability as tfp + +from tensorflow.python.framework import function +from tensorflow.python.framework import ops +from tensorflow.python.ops import control_flow_util +from tensorflow.python.ops import inplace_ops + + +# TODO(lukaszkaiser): remove this function when not needed any more. +def layers(): + """Get the layers module good for TF 1 and TF 2 work for now.""" + layers_module = None + try: + layers_module = tf.layers + except AttributeError: + logging.info("Cannot access tf.layers, trying TF2 layers.") + try: + from tensorflow.python import tf2 # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + if tf2.enabled(): + logging.info("Running in V2 mode, using Keras layers.") + layers_module = tf.keras.layers + except ImportError: + pass + return layers_module + + +@function.Defun( + python_grad_func=lambda x, dy: tf.convert_to_tensor(dy), + shape_func=lambda op: [op.inputs[0].get_shape()]) +def convert_gradient_to_tensor(x): + """Identity operation whose gradient is converted to a `Tensor`. + + Currently, the gradient to `tf.concat` is particularly expensive to + compute if dy is an `IndexedSlices` (a lack of GPU implementation + forces the gradient operation onto CPU). This situation occurs when + the output of the `tf.concat` is eventually passed to `tf.gather`. + It is sometimes faster to convert the gradient to a `Tensor`, so as + to get the cheaper gradient for `tf.concat`. To do this, replace + `tf.concat(x)` with `convert_gradient_to_tensor(tf.concat(x))`. + + Args: + x: A `Tensor`. + + Returns: + The input `Tensor`. + """ + return x + + +def is_xla_compiled(): + """Whether we are building graph that will be compiled by XLA. + + This checks whether the code is executing within an XLA context. + + If True, model authors should ensure the graph they build is compilable by + XLA. Specifically, they should ensure that all ops have XLA implementations + and that all shapes are statically known. + + Returns: + bool, whether the current graph will be compiled for XLA. + """ + ctxt = tf.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access + return control_flow_util.GetContainingXLAContext(ctxt) is not None + + +def to_float(x): + """Cast x to float; created because tf.to_float is deprecated.""" + return tf.cast(x, tf.float32) + + +def dropout_with_broadcast_dims(x, keep_prob, broadcast_dims=None, **kwargs): + """Like tf.nn.dropout but takes broadcast_dims instead of noise_shape. + + Instead of specifying noise_shape, this function takes broadcast_dims - + a list of dimension numbers in which noise_shape should be 1. The random + keep/drop tensor has dimensionality 1 along these dimensions. + + Args: + x: a floating point tensor. + keep_prob: A scalar Tensor with the same type as x. + The probability that each element is kept. + broadcast_dims: an optional list of integers + the dimensions along which to broadcast the keep/drop flags. + **kwargs: keyword arguments to tf.nn.dropout other than "noise_shape". + + Returns: + Tensor of the same shape as x. + """ + assert "noise_shape" not in kwargs + if broadcast_dims: + shape = tf.shape(x) + ndims = len(x.get_shape()) + # Allow dimensions like "-1" as well. + broadcast_dims = [dim + ndims if dim < 0 else dim for dim in broadcast_dims] + kwargs["noise_shape"] = [ + 1 if i in broadcast_dims else shape[i] for i in range(ndims) + ] + return tf.nn.dropout(x, keep_prob, **kwargs) + + +def comma_separated_string_to_integer_list(s): + return [int(i) for i in s.split(",") if i] + + +def saturating_sigmoid(x): + """Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1].""" + with tf.name_scope("saturating_sigmoid", values=[x]): + y = tf.sigmoid(x) + return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1)) + + +def hard_sigmoid(x, saturation_limit=0.9): + saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit)) + x_shifted = 0.5 * x + 0.5 + return tf.minimum(1.0, tf.nn.relu(x_shifted)), saturation_cost + + +def hard_tanh(x, saturation_limit=0.9): + saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit)) + return tf.minimum(1.0, tf.maximum(x, -1.0)), saturation_cost + + +def inverse_exp_decay(max_step, min_value=0.01, step=None): + """Inverse-decay exponentially from min_value to 1.0 reached at max_step.""" + inv_base = tf.exp(tf.log(min_value) / float(max_step)) + if step is None: + step = tf.train.get_global_step() + if step is None: + return 1.0 + step = to_float(step) + return inv_base**tf.maximum(float(max_step) - step, 0.0) + + +def inverse_lin_decay(max_step, min_value=0.01, step=None): + """Inverse-decay linearly from min_value to 1.0 reached at max_step.""" + if step is None: + step = tf.train.get_global_step() + if step is None: + return 1.0 + step = to_float(step) + progress = tf.minimum(step / float(max_step), 1.0) + return progress * (1.0 - min_value) + min_value + + +def inverse_sigmoid_decay(max_step, min_value=0.01, step=None): + """Inverse-decay linearly from min_value to 1.0 reached at max_step.""" + if step is None: + step = tf.train.get_global_step() + if step is None: + return 1.0 + step = to_float(step) + + def sigmoid(x): + return 1 / (1 + tf.exp(-x)) + + def inv_sigmoid(y): + return tf.log(y / (1 - y)) + + assert min_value > 0, ( + "sigmoid's output is always >0 and <1. min_value must respect " + "these bounds for interpolation to work.") + assert min_value < 0.5, "Must choose min_value on the left half of sigmoid." + + # Find + # x s.t. sigmoid(x ) = y_min and + # x' s.t. sigmoid(x') = y_max + # We will map [0, max_step] to [x_min, x_max]. + y_min = min_value + y_max = 1.0 - min_value + x_min = inv_sigmoid(y_min) + x_max = inv_sigmoid(y_max) + + x = tf.minimum(step / float(max_step), 1.0) # [0, 1] + x = x_min + (x_max - x_min) * x # [x_min, x_max] + y = sigmoid(x) # [y_min, y_max] + + y = (y - y_min) / (y_max - y_min) # [0, 1] + y = y * (1.0 - y_min) # [0, 1-y_min] + y += y_min # [y_min, 1] + return y + + +def shakeshake2_py(x, y, equal=False, individual=False): + """The shake-shake sum of 2 tensors, python version.""" + if equal: + alpha = 0.5 + elif individual: + alpha = tf.random_uniform(tf.get_shape(x)[:1]) + else: + alpha = tf.random_uniform([]) + + return alpha * x + (1.0 - alpha) * y + + +@function.Defun() +def shakeshake2_grad(x1, x2, dy): + """Overriding gradient for shake-shake of 2 tensors.""" + y = shakeshake2_py(x1, x2) + dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy]) + return dx + + +@function.Defun() +def shakeshake2_indiv_grad(x1, x2, dy): + """Overriding gradient for shake-shake of 2 tensors.""" + y = shakeshake2_py(x1, x2, individual=True) + dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy]) + return dx + + +@function.Defun() +def shakeshake2_equal_grad(x1, x2, dy): + """Overriding gradient for shake-shake of 2 tensors.""" + y = shakeshake2_py(x1, x2, equal=True) + dx = tf.gradients(ys=[y], xs=[x1, x2], grad_ys=[dy]) + return dx + + +@function.Defun(grad_func=shakeshake2_grad) +def shakeshake2(x1, x2): + """The shake-shake function with a different alpha for forward/backward.""" + return shakeshake2_py(x1, x2) + + +@function.Defun(grad_func=shakeshake2_indiv_grad) +def shakeshake2_indiv(x1, x2): + return shakeshake2_py(x1, x2, individual=True) + + +@function.Defun(grad_func=shakeshake2_equal_grad) +def shakeshake2_eqgrad(x1, x2): + """The shake-shake function with a different alpha for forward/backward.""" + return shakeshake2_py(x1, x2) + + +def shakeshake(xs, equal_grad=False): + """Multi-argument shake-shake, currently approximated by sums of 2.""" + if len(xs) == 1: + return xs[0] + div = (len(xs) + 1) // 2 + arg1 = shakeshake(xs[:div], equal_grad=equal_grad) + arg2 = shakeshake(xs[div:], equal_grad=equal_grad) + if equal_grad: + return shakeshake2_eqgrad(arg1, arg2) + return shakeshake2(arg1, arg2) + + +def convert_rgb_to_real(x): + """Conversion of pixel values to real numbers.""" + with tf.name_scope("rgb_to_real", values=[x]): + x = to_float(x) + x /= 255.0 + return x + + +def convert_rgb_to_symmetric_real(x): + """Conversion of pixel values to real numbers.""" + with tf.name_scope("rgb_to_real", values=[x]): + x = to_float(x) + # Convert each pixel intensity in [0, 1, 2, ..., 255] into a real number in + # the range [-1, 1]. + x = (x / 127.5) - 1 + return x + + +def convert_real_to_rgb(x): + """Conversion of real numbers to pixel values.""" + with tf.name_scope("real_to_rgb", values=[x]): + x *= 255.0 + return x + + +def expand_squeeze_to_nd(x, n, squeeze_dim=2, expand_dim=-1): + """Make x n-d with squeeze and expand_dims.""" + if len(x.shape) > n: + while len(x.shape) != n: + x = tf.squeeze(x, [squeeze_dim]) + else: + while len(x.shape) != n: + x = tf.expand_dims(x, expand_dim) + return x + + +def standardize_images(x): + """Image standardization on batches and videos.""" + with tf.name_scope("standardize_images", values=[x]): + x_shape = shape_list(x) + x = to_float(tf.reshape(x, [-1] + x_shape[-3:])) + x_mean = tf.reduce_mean(x, axis=[1, 2], keepdims=True) + x_variance = tf.reduce_mean( + tf.squared_difference(x, x_mean), axis=[1, 2], keepdims=True) + num_pixels = to_float(x_shape[-2] * x_shape[-3]) + x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels)) + return tf.reshape(x, x_shape) + + +def flatten4d3d(x): + """Flatten a 4d-tensor into a 3d-tensor by joining width and height.""" + xshape = shape_list(x) + result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]]) + return result + + +# TODO(noam): remove this function after TPUs do gather faster. +def gather(params, indices, dtype=tf.float32): + """Version of tf.gather that works faster on tpu.""" + if not is_xla_compiled(): + return tf.gather(params, indices) + vocab_size = params.get_shape().as_list()[0] + indices_flat = tf.reshape(indices, [-1]) + out = tf.matmul(tf.one_hot(indices_flat, vocab_size, dtype=dtype), params) + out = reshape_like(out, tf.expand_dims(indices, -1)) + return out + + +# TODO(noam): remove this function after TPUs do cumsum faster. +def cumsum(x, axis=0, exclusive=False): + """TPU hack for tf.cumsum. + + This is equivalent to tf.cumsum and is faster on TPU as of 04/2018 unless + the axis dimension is very large. + + Args: + x: a Tensor + axis: an integer + exclusive: a boolean + + Returns: + Tensor of the same shape as x. + """ + if not is_xla_compiled(): + return tf.cumsum(x, axis=axis, exclusive=exclusive) + x_shape = shape_list(x) + rank = len(x_shape) + length = x_shape[axis] + my_range = tf.range(length) + comparator = tf.less if exclusive else tf.less_equal + mask = tf.cast( + comparator(tf.expand_dims(my_range, 1), tf.expand_dims(my_range, 0)), + x.dtype) + ret = tf.tensordot(x, mask, axes=[[axis], [0]]) + if axis != rank - 1: + ret = tf.transpose( + ret, + list(range(axis)) + [rank - 1] + list(range(axis, rank - 1))) + return ret + + +def dropout_no_scaling(x, keep_prob): + """Like tf.nn.dropout, but does not scale up. Works on integers also. + + Args: + x: a Tensor + keep_prob: a floating point number + + Returns: + Tensor of the same shape as x. + """ + if keep_prob == 1.0: + return x + mask = tf.less(tf.random_uniform(tf.shape(x)), keep_prob) + return x * cast_like(mask, x) + + +def embedding(x, + vocab_size, + dense_size, + name=None, + reuse=None, + multiplier=1.0, + symbol_dropout_rate=0.0, + embedding_var=None, + dtype=tf.float32): + """Embed x of type int64 into dense vectors, reducing to max 4 dimensions.""" + with tf.variable_scope( + name, default_name="embedding", values=[x], reuse=reuse, dtype=dtype): + if embedding_var is None: + embedding_var = tf.get_variable("kernel", [vocab_size, dense_size]) + # On the backwards pass, we want to convert the gradient from + # an indexed-slices to a regular tensor before sending it back to the + # parameter server. This avoids excess computation on the parameter server. + if not tf.executing_eagerly(): + embedding_var = convert_gradient_to_tensor(embedding_var) + x = dropout_no_scaling(x, 1.0 - symbol_dropout_rate) + emb_x = gather(embedding_var, x, dtype) + if multiplier != 1.0: + emb_x *= multiplier + static_shape = emb_x.shape.as_list() + if len(static_shape) < 5: + return emb_x + assert len(static_shape) == 5 + # If we had an extra channel dimension, assume it's 1, i.e. shape[3] == 1. + return tf.squeeze(emb_x, 3) + + +def shift_right(x, pad_value=None): + """Shift the second dimension of x right by one.""" + if pad_value is None: + shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])[:, :-1, :, :] + else: + shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :, :] + return shifted_targets + + +def shift_right_3d(x, pad_value=None): + """Shift the second dimension of x right by one.""" + if pad_value is None: + shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :] + else: + shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :] + return shifted_targets + + +def shift_right_2d(x, pad_value=None): + """Shift the second dimension of x right by one.""" + if pad_value is None: + shifted_targets = tf.pad(x, [[0, 0], [1, 0]])[:, :-1] + else: + shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1] + return shifted_targets + + +def conv_stride2_multistep(x, nbr_steps, output_filters, name=None, reuse=None): + """Use a strided convolution to downsample x by 2, `nbr_steps` times. + + We use stride and filter size 2 to avoid the checkerboard problem of deconvs. + As detailed in http://distill.pub/2016/deconv-checkerboard/. + + Args: + x: a `Tensor` with shape `[batch, spatial, depth]` or + `[batch, spatial_1, spatial_2, depth]` + nbr_steps: number of halving downsample rounds to apply + output_filters: an int specifying the filter count for the convolutions + name: a string + reuse: a boolean + + Returns: + a `Tensor` with shape `[batch, spatial / (2**nbr_steps), output_filters]` or + `[batch, spatial_1 / (2**nbr_steps), spatial_2 / (2**nbr_steps), + output_filters]` + """ + with tf.variable_scope( + name, default_name="conv_stride2_multistep", values=[x], reuse=reuse): + if nbr_steps == 0: + out = conv(x, output_filters, (1, 1)) + return out, [out] + hidden_layers = [x] + for i in range(nbr_steps): + hidden_layers.append( + conv( + hidden_layers[-1], + output_filters, (2, 2), + strides=2, + activation=tf.nn.relu, + name="conv" + str(i))) + return hidden_layers[-1], hidden_layers + + +def deconv_stride2_multistep(x, + nbr_steps, + output_filters, + name=None, + reuse=None): + """Use a deconvolution to upsample x by 2**`nbr_steps`. + + Args: + x: a `Tensor` with shape `[batch, spatial, depth]` or + `[batch, spatial_1, spatial_2, depth]` + nbr_steps: an int specifying the number of doubling upsample rounds to + apply. + output_filters: an int specifying the filter count for the deconvolutions + name: a string + reuse: a boolean + + Returns: + a `Tensor` with shape `[batch, spatial * (2**nbr_steps), output_filters]` or + `[batch, spatial_1 * (2**nbr_steps), spatial_2 * (2**nbr_steps), + output_filters]` + """ + with tf.variable_scope( + name, default_name="deconv_stride2_multistep", values=[x], reuse=reuse): + + def deconv1d(cur, i): + cur_shape = shape_list(cur) + thicker = conv( + cur, + output_filters * 2, (1, 1), + padding="SAME", + activation=tf.nn.relu, + name="deconv1d" + str(i)) + return tf.reshape(thicker, + [cur_shape[0], cur_shape[1] * 2, 1, output_filters]) + + def deconv2d(cur, i): + thicker = conv( + cur, + output_filters * 4, (1, 1), + padding="SAME", + activation=tf.nn.relu, + name="deconv2d" + str(i)) + return tf.depth_to_space(thicker, 2) + + cur = x + for i in range(nbr_steps): + if cur.get_shape()[2] == 1: + cur = deconv1d(cur, i) + else: + cur_dim = shape_list(cur)[2] + if isinstance(cur_dim, int): + if cur_dim == 1: + cur = deconv1d(cur, i) + else: + cur = deconv2d(cur, i) + else: + cur = tf.cond( + tf.equal(cur_dim, 1), + lambda idx=i: deconv1d(cur, idx), + lambda idx=i: deconv2d(cur, idx)) + return cur + + +def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs): + """Conditional conv_fn making kernel 1d or 2d depending on inputs shape.""" + static_shape = inputs.get_shape() + if not static_shape or len(static_shape) != 4: + raise ValueError("Inputs to conv must have statically known rank 4. " + "Shape: " + str(static_shape)) + # Add support for left padding. + if kwargs.get("padding") == "LEFT": + dilation_rate = (1, 1) + if "dilation_rate" in kwargs: + dilation_rate = kwargs["dilation_rate"] + assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1 + height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0] + cond_padding = tf.cond( + tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0), + lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1])) + width_padding = 0 if static_shape[2] == 1 else cond_padding + padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]] + inputs = tf.pad(inputs, padding) + # Set middle two dimensions to None to prevent convolution from complaining + inputs.set_shape([static_shape[0], None, None, static_shape[3]]) + kwargs["padding"] = "VALID" + + def conv2d_kernel(kernel_size_arg, name_suffix): + """Call conv2d but add suffix to name.""" + name = "{}_{}".format(kwargs.get("name", "conv"), name_suffix) + original_name = kwargs.pop("name", None) + original_force2d = kwargs.pop("force2d", None) + result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs) + if original_name is not None: + kwargs["name"] = original_name # Restore for other calls. + if original_force2d is not None: + kwargs["force2d"] = original_force2d + return result + + return conv2d_kernel(kernel_size, "single") + + +def conv(inputs, filters, kernel_size, dilation_rate=(1, 1), **kwargs): + def _conv2d(x, *args, **kwargs): + return layers().Conv2D(*args, **kwargs)(x) + return conv_internal( + _conv2d, + inputs, + filters, + kernel_size, + dilation_rate=dilation_rate, + **kwargs) + + +def conv1d(inputs, filters, kernel_size, dilation_rate=1, **kwargs): + return tf.squeeze( + conv(tf.expand_dims(inputs, 2), filters, (kernel_size, 1), + dilation_rate=(dilation_rate, 1), **kwargs), + 2) + + +def separable_conv(inputs, filters, kernel_size, **kwargs): + def _sep_conv2d(x, *args, **kwargs): + return layers().SeparableConv2D(*args, **kwargs)(x) + return conv_internal(_sep_conv2d, inputs, filters, kernel_size, **kwargs) + + +def subseparable_conv(inputs, filters, kernel_size, **kwargs): + """Sub-separable convolution. If separability == 0 it's a separable_conv.""" + + def conv_fn(inputs, filters, kernel_size, **kwargs): + """Sub-separable convolution, splits into separability-many blocks.""" + separability = None + if "separability" in kwargs: + separability = kwargs.pop("separability") + if separability: + parts = [] + abs_sep = separability if separability > 0 else -1 * separability + for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)): + with tf.variable_scope("part_%d" % split_idx): + if separability > 0: + parts.append( + layers().Conv2D(filters // separability, kernel_size, + **kwargs)(split)) + else: + parts.append( + layers().SeparableConv2D(filters // abs_sep, + kernel_size, **kwargs)(split)) + if separability > 1: + result = layers().Conv2D(filters, (1, 1))(tf.concat(parts, axis=3)) + elif abs_sep == 1: # If we have just one block, return it. + assert len(parts) == 1 + result = parts[0] + else: + result = tf.concat(parts, axis=3) + else: + result = layers().SeparableConv2D(filters, kernel_size, + **kwargs)(inputs) + if separability is not None: + kwargs["separability"] = separability + return result + + return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs) + + +def tpu_conv1d(inputs, filters, kernel_size, padding="SAME", name="tpu_conv1d"): + """Version of conv1d that works on TPU (as of 11/2017). + + Args: + inputs: a Tensor with shape [batch, length, input_depth]. + filters: an integer. + kernel_size: an integer. + padding: a string - "SAME" or "LEFT". + name: a string. + + Returns: + a Tensor with shape [batch, length, filters]. + """ + if kernel_size == 1: + return dense(inputs, filters, name=name, use_bias=True) + if padding == "SAME": + assert kernel_size % 2 == 1 + first_offset = -((kernel_size - 1) // 2) + else: + assert padding == "LEFT" + first_offset = -(kernel_size - 1) + last_offset = first_offset + kernel_size - 1 + results = [] + padded = tf.pad(inputs, [[0, 0], [-first_offset, last_offset], [0, 0]]) + for i in range(kernel_size): + shifted = tf.slice(padded, [0, i, 0], tf.shape(inputs)) if i else inputs + shifted.set_shape(inputs.get_shape()) + results.append( + dense(shifted, filters, use_bias=(i == 0), name=name + "_%d" % i)) + ret = tf.add_n(results) + ret *= kernel_size**-0.5 + return ret + + +def layer_norm_vars(filters): + """Create Variables for layer norm.""" + scale = tf.get_variable( + "layer_norm_scale", [filters], initializer=tf.ones_initializer()) + bias = tf.get_variable( + "layer_norm_bias", [filters], initializer=tf.zeros_initializer()) + return scale, bias + + +def layer_norm_compute(x, epsilon, scale, bias, layer_collection=None): + """Layer norm raw computation.""" + + # Save these before they get converted to tensors by the casting below + params = (scale, bias) + + epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]] + mean = tf.reduce_mean(x, axis=[-1], keepdims=True) + variance = tf.reduce_mean( + tf.squared_difference(x, mean), axis=[-1], keepdims=True) + norm_x = (x - mean) * tf.rsqrt(variance + epsilon) + + output = norm_x * scale + bias + + + return output + + +def layer_norm(x, + filters=None, + epsilon=1e-6, + name=None, + reuse=None, + layer_collection=None): + """Layer normalize the tensor x, averaging over the last dimension.""" + if filters is None: + filters = shape_list(x)[-1] + with tf.variable_scope( + name, default_name="layer_norm", values=[x], reuse=reuse): + scale, bias = layer_norm_vars(filters) + return layer_norm_compute(x, epsilon, scale, bias, + layer_collection=layer_collection) + + +def group_norm(x, filters=None, num_groups=8, epsilon=1e-5): + """Group normalization as in https://arxiv.org/abs/1803.08494.""" + x_shape = shape_list(x) + if filters is None: + filters = x_shape[-1] + assert len(x_shape) == 4 + assert filters % num_groups == 0 + # Prepare variables. + scale = tf.get_variable( + "group_norm_scale", [filters], initializer=tf.ones_initializer()) + bias = tf.get_variable( + "group_norm_bias", [filters], initializer=tf.zeros_initializer()) + epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]] + # Reshape and compute group norm. + x = tf.reshape(x, x_shape[:-1] + [num_groups, filters // num_groups]) + # Calculate mean and variance on heights, width, channels (not groups). + mean, variance = tf.nn.moments(x, [1, 2, 4], keep_dims=True) + norm_x = (x - mean) * tf.rsqrt(variance + epsilon) + return tf.reshape(norm_x, x_shape) * scale + bias + + +def noam_norm(x, epsilon=1.0, name=None): + """One version of layer normalization.""" + with tf.name_scope(name, default_name="noam_norm", values=[x]): + shape = x.get_shape() + ndims = len(shape) + return (tf.nn.l2_normalize(x, ndims - 1, epsilon=epsilon) * tf.sqrt( + to_float(shape[-1]))) + + +def l2_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None): + """Layer normalization with l2 norm.""" + if filters is None: + filters = shape_list(x)[-1] + with tf.variable_scope(name, default_name="l2_norm", values=[x], reuse=reuse): + scale = tf.get_variable( + "l2_norm_scale", [filters], initializer=tf.ones_initializer()) + bias = tf.get_variable( + "l2_norm_bias", [filters], initializer=tf.zeros_initializer()) + epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]] + mean = tf.reduce_mean(x, axis=[-1], keepdims=True) + l2norm = tf.reduce_sum( + tf.squared_difference(x, mean), axis=[-1], keepdims=True) + norm_x = (x - mean) * tf.rsqrt(l2norm + epsilon) + return norm_x * scale + bias + + +def apply_spectral_norm(x): + """Normalizes x using the spectral norm. + + The implementation follows Algorithm 1 of + https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is + reshaped such that the number of channels (last-dimension) is the same. + + Args: + x: Tensor with the last dimension equal to the number of filters. + + Returns: + x: Tensor with the same shape as x normalized by the spectral norm. + assign_op: Op to be run after every step to update the vector "u". + """ + weights_shape = shape_list(x) + other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1] + + # Reshape into a 2-D matrix with outer size num_filters. + weights_2d = tf.reshape(x, (other, num_filters)) + + # v = Wu / ||W u|| + with tf.variable_scope("u", reuse=tf.AUTO_REUSE): + u = tf.get_variable( + "u", [num_filters, 1], + initializer=tf.truncated_normal_initializer(), + trainable=False) + v = tf.nn.l2_normalize(tf.matmul(weights_2d, u)) + + # u_new = vW / ||v W|| + u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d)) + + # s = v*W*u + spectral_norm = tf.squeeze( + tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new)))) + + # set u equal to u_new in the next iteration. + assign_op = tf.assign(u, tf.transpose(u_new)) + return tf.divide(x, spectral_norm), assign_op + + +def apply_norm(x, norm_type, depth, epsilon, layer_collection=None): + """Apply Normalization.""" + if layer_collection is not None: + assert norm_type == "layer" + if norm_type == "layer": + return layer_norm( + x, filters=depth, epsilon=epsilon, layer_collection=layer_collection) + if norm_type == "group": + return group_norm(x, filters=depth, epsilon=epsilon) + if norm_type == "batch": + return layers().BatchNormalization(epsilon=epsilon)(x) + if norm_type == "noam": + return noam_norm(x, epsilon) + if norm_type == "l2": + return l2_norm(x, filters=depth, epsilon=epsilon) + if norm_type == "none": + return x + raise ValueError("Parameter normalizer_fn must be one of: 'layer', 'batch'," + "'noam', 'lr', 'none'.") + + +def zero_add(previous_value, x, name=None, reuse=None): + """Resnet connection with zero initialization. + + Another type of resnet connection which returns previous_value + gamma * x. + gamma is a trainable scalar and initialized with zero. It is useful when a + module is plugged into a trained model and we want to make sure it matches the + original model's performance. + + Args: + previous_value: A tensor. + x: A tensor. + name: name of variable scope; defaults to zero_add. + reuse: reuse scope. + + Returns: + previous_value + gamma * x. + """ + with tf.variable_scope(name, default_name="zero_add", reuse=reuse): + gamma = tf.get_variable("gamma", (), initializer=tf.zeros_initializer()) + return previous_value + gamma * x + + +def layer_prepostprocess(previous_value, + x, + sequence, + dropout_rate, + norm_type, + depth, + epsilon, + default_name, + name=None, + dropout_broadcast_dims=None, + layer_collection=None): + """Apply a sequence of functions to the input or output of a layer. + + The sequence is specified as a string which may contain the following + characters: + a: add previous_value + n: apply normalization + d: apply dropout + z: zero add + + For example, if sequence=="dna", then the output is + previous_value + normalize(dropout(x)) + + Args: + previous_value: A Tensor, to be added as a residual connection ('a') + x: A Tensor to be transformed. + sequence: a string. + dropout_rate: a float + norm_type: a string (see apply_norm()) + depth: an integer (size of last dimension of x). + epsilon: a float (parameter for normalization) + default_name: a string + name: a string + dropout_broadcast_dims: an optional list of integers less than 3 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + layer_collection: A tensorflow_kfac.LayerCollection. Only used by the + KFAC optimizer. Default is None. + + Returns: + a Tensor + """ + with tf.variable_scope(name, default_name=default_name): + if sequence == "none": + return x + for c in sequence: + if c == "a": + x += previous_value + elif c == "z": + x = zero_add(previous_value, x) + elif c == "n": + x = apply_norm( + x, norm_type, depth, epsilon, layer_collection=layer_collection) + else: + assert c == "d", ("Unknown sequence step %s" % c) + x = dropout_with_broadcast_dims( + x, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + return x + + +def layer_preprocess(layer_input, hparams, layer_collection=None): + """Apply layer preprocessing. + + See layer_prepostprocess() for details. + + A hyperparameters object is passed for convenience. The hyperparameters + that may be used are: + + layer_preprocess_sequence + layer_prepostprocess_dropout + norm_type + hidden_size + norm_epsilon + + Args: + layer_input: a Tensor + hparams: a hyperparameters object. + layer_collection: A tensorflow_kfac.LayerCollection. Only used by the + KFAC optimizer. Default is None. + + Returns: + a Tensor + """ + assert "a" not in hparams.layer_preprocess_sequence, ( + "No residual connections allowed in hparams.layer_preprocess_sequence") + assert "z" not in hparams.layer_preprocess_sequence, ( + "No residual connections allowed in hparams.layer_preprocess_sequence") + return layer_prepostprocess( + None, + layer_input, + sequence=hparams.layer_preprocess_sequence, + dropout_rate=hparams.layer_prepostprocess_dropout, + norm_type=hparams.norm_type, + depth=None, + epsilon=hparams.norm_epsilon, + dropout_broadcast_dims=comma_separated_string_to_integer_list( + getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")), + default_name="layer_prepostprocess", + layer_collection=layer_collection) + + +def layer_postprocess(layer_input, layer_output, hparams): + """Apply layer postprocessing. + + See layer_prepostprocess() for details. + + A hyperparameters object is passed for convenience. The hyperparameters + that may be used are: + + layer_postprocess_sequence + layer_prepostprocess_dropout + norm_type + hidden_size + norm_epsilon + + Args: + layer_input: a Tensor + layer_output: a Tensor + hparams: a hyperparameters object. + + Returns: + a Tensor + """ + return layer_prepostprocess( + layer_input, + layer_output, + sequence=hparams.layer_postprocess_sequence, + dropout_rate=hparams.layer_prepostprocess_dropout, + norm_type=hparams.norm_type, + depth=None, + epsilon=hparams.norm_epsilon, + dropout_broadcast_dims=comma_separated_string_to_integer_list( + getattr(hparams, "layer_prepostprocess_dropout_broadcast_dims", "")), + default_name="layer_postprocess") + + +def conv_block_internal(conv_fn, + inputs, + filters, + dilation_rates_and_kernel_sizes, + first_relu=True, + use_elu=False, + separabilities=None, + **kwargs): + """A block of convolutions. + + Args: + conv_fn: convolution function, e.g. conv or separable_conv. + inputs: a Tensor + filters: an Integer + dilation_rates_and_kernel_sizes: a list of tuples (dilation, (k_w, k_h)) + first_relu: whether to do a relu at start (defaults to True) + use_elu: whether to use ELUs instead of ReLUs (defaults to False) + separabilities: list of separability factors (per-layer). + **kwargs: additional arguments (e.g., pooling) + + Returns: + a Tensor. + """ + + name = kwargs.pop("name") if "name" in kwargs else None + mask = kwargs.pop("mask") if "mask" in kwargs else None + + # Usage for normalize_fn kwarg: + # if not specified, use layer norm + # if given normalize_fn=None, don't use any normalization + # if given normalize_fn=norm, use the specified norm function + + use_layer_norm = "normalizer_fn" not in kwargs + norm = kwargs.pop("normalizer_fn", None) + use_normalizer_fn = use_layer_norm or norm + + if use_layer_norm: + norm = lambda x, name: layer_norm(x, filters, name=name) + + with tf.variable_scope(name, "conv_block", [inputs]): + cur, counter = inputs, -1 + for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes: + counter += 1 + if first_relu or counter > 0: + cur = tf.nn.elu(cur) if use_elu else tf.nn.relu(cur) + if mask is not None: + cur *= mask + if separabilities: + cur = conv_fn( + cur, + filters, + kernel_size, + dilation_rate=dilation_rate, + name="conv_block_%d" % counter, + use_bias=norm is None, + separability=separabilities[counter], + **kwargs) + else: + cur = conv_fn( + cur, + filters, + kernel_size, + dilation_rate=dilation_rate, + name="conv_block_%d" % counter, + use_bias=norm is None, + **kwargs) + if use_normalizer_fn: + cur = norm(cur, name="conv_block_norm_%d" % counter) + return cur + + +def conv_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs): + """A block of standard 2d convolutions.""" + return conv_block_internal(conv, inputs, filters, + dilation_rates_and_kernel_sizes, **kwargs) + + +def conv1d_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs): + """A block of standard 1d convolutions.""" + return conv_block_internal(conv1d, inputs, filters, + dilation_rates_and_kernel_sizes, **kwargs) + + +def separable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes, + **kwargs): + """A block of separable convolutions.""" + return conv_block_internal(separable_conv, inputs, filters, + dilation_rates_and_kernel_sizes, **kwargs) + + +def subseparable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes, + **kwargs): + """A block of separable convolutions.""" + return conv_block_internal(subseparable_conv, inputs, filters, + dilation_rates_and_kernel_sizes, **kwargs) + + +def pool(inputs, window_size, pooling_type, padding, strides=(1, 1)): + """Pooling (supports "LEFT").""" + with tf.name_scope("pool", values=[inputs]): + static_shape = inputs.get_shape() + if not static_shape or len(static_shape) != 4: + raise ValueError("Inputs to conv must have statically known rank 4.") + # Add support for left padding. + if padding == "LEFT": + assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1 + if len(static_shape) == 3: + width_padding = 2 * (window_size[1] // 2) + padding_ = [[0, 0], [width_padding, 0], [0, 0]] + else: + height_padding = 2 * (window_size[0] // 2) + cond_padding = tf.cond( + tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0), + lambda: tf.constant(2 * (window_size[1] // 2))) + width_padding = 0 if static_shape[2] == 1 else cond_padding + padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]] + inputs = tf.pad(inputs, padding_) + inputs.set_shape([static_shape[0], None, None, static_shape[3]]) + padding = "VALID" + + return tf.nn.pool(inputs, window_size, pooling_type, padding, strides=strides) + + +def conv_block_downsample(x, + kernel, + strides, + padding, + separability=0, + name=None, + reuse=None): + """Implements a downwards-striding conv block, like Xception exit flow.""" + with tf.variable_scope( + name, default_name="conv_block_downsample", values=[x], reuse=reuse): + hidden_size = int(x.get_shape()[-1]) + res = conv_block( + x, + int(1.25 * hidden_size), [((1, 1), kernel)], + padding=padding, + strides=strides, + name="res_conv") + + x = subseparable_conv_block( + x, + hidden_size, [((1, 1), kernel)], + padding=padding, + separability=separability, + name="conv0") + x = subseparable_conv_block( + x, + int(1.25 * hidden_size), [((1, 1), kernel)], + padding=padding, + separability=separability, + name="conv1") + x = pool(x, kernel, "MAX", padding, strides=strides) + + x += res + + x = subseparable_conv_block( + x, + 2 * hidden_size, [((1, 1), kernel)], + first_relu=False, + padding=padding, + separability=separability, + name="conv2") + x = subseparable_conv_block( + x, + int(2.5 * hidden_size), [((1, 1), kernel)], + padding=padding, + separability=separability, + name="conv3") + return x + + +def get_timing_signal(length, + min_timescale=1, + max_timescale=1e4, + num_timescales=16): + """Create Tensor of sinusoids of different frequencies. + + Args: + length: Length of the Tensor to create, i.e. Number of steps. + min_timescale: a float + max_timescale: a float + num_timescales: an int + + Returns: + Tensor of shape (length, 2*num_timescales) + """ + positions = to_float(tf.range(length)) + log_timescale_increment = ( + math.log(max_timescale / min_timescale) / (num_timescales - 1)) + inv_timescales = min_timescale * tf.exp( + to_float(tf.range(num_timescales)) * -log_timescale_increment) + scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0) + return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) + + +def add_timing_signal(x, min_timescale=1, max_timescale=1e4, num_timescales=16): + """Adds a bunch of sinusoids of different frequencies to a Tensor. + + This allows attention to learn to use absolute and relative positions. + The timing signal should be added to some precursor of both the source + and the target of the attention. + + The use of relative position is possible because sin(x+y) and cos(x+y) can be + expressed in terms of y, sin(x) and cos(x). + + In particular, we use a geometric sequence of timescales starting with + min_timescale and ending with max_timescale. For each timescale, we + generate the two sinusoidal signals sin(timestep/timescale) and + cos(timestep/timescale). All of these sinusoids are concatenated in + the depth dimension, padded with zeros to be the same depth as the input, + and added into input. + + Args: + x: a Tensor with shape [?, length, ?, depth] + min_timescale: a float + max_timescale: a float + num_timescales: an int <= depth/2 + + Returns: + a Tensor the same shape as x. + """ + length = shape_list(x)[1] + depth = shape_list(x)[3] + signal = get_timing_signal(length, min_timescale, max_timescale, + num_timescales) + padded_signal = tf.pad(signal, [[0, 0], [0, depth - 2 * num_timescales]]) + return x + tf.reshape(padded_signal, [1, length, 1, depth]) + + +def mask_from_embedding(emb): + """Input embeddings -> padding mask. + + We have hacked symbol_modality to return all-zero embeddings for padding. + Returns a mask with 0.0 in the padding positions and 1.0 elsewhere. + + Args: + emb: a Tensor with shape [batch, width, height, depth]. + Returns: + a 0.0/1.0 Tensor with shape [batch, width, height, 1]. + """ + return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keepdims=True)) + + +def length_from_embedding(emb): + """Compute the length of each sequence in the batch. + + Args: + emb: a sequence embedding Tensor with shape [batch, max_time, 1, depth]. + Returns: + a Tensor with shape [batch]. + """ + return tf.cast(tf.reduce_sum(mask_from_embedding(emb), [1, 2, 3]), tf.int32) + + +def mask_pos_gt(source_length, target_length): + """A mask with 1.0 wherever source_pos > target_pos and 0.0 elsewhere. + + Args: + source_length: an integer + target_length: an integer + Returns: + a Tensor with shape [1, target_length, source_length] + """ + return tf.expand_dims( + tf.cast(tf.greater(tf.expand_dims(tf.range(target_length), axis=0), + tf.expand_dims(tf.range(source_length), axis=1)), + dtype=tf.float32), axis=0) + + +def mask_leq(target_length, source_length): + """A mask with 1.0 wherever source_pos <= target_pos and 0.0 elsewhere. + + Args: + target_length: an integer + source_length: an integer + Returns: + a Tensor with shape [1, target_length, source_length] + """ + return ones_matrix_band_part( + target_length, + source_length, + -1, + 0, + out_shape=[1, target_length, source_length]) + + +def mask_pos_lt(source_length, target_length): + """A mask with 1.0 wherever source_pos < target_pos and 0.0 elsewhere. + + Args: + source_length: an integer + target_length: an integer + Returns: + a Tensor with shape [1, target_length, source_length] + """ + return tf.expand_dims( + tf.cast(tf.less(tf.expand_dims(tf.range(target_length), axis=0), + tf.expand_dims(tf.range(source_length), axis=1)), + dtype=tf.float32), axis=0) + + +def relu_density_logit(x, reduce_dims): + """logit(density(x)). + + Useful for histograms. + + Args: + x: a Tensor, typically the output of tf.relu + reduce_dims: a list of dimensions + + Returns: + a Tensor + """ + frac = tf.reduce_mean(to_float(x > 0.0), reduce_dims) + scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10)) + return scaled + + +def maybe_zero_out_padding(inputs, kernel_size, nonpadding_mask): + """If necessary, zero out inputs to a conv for padding positions. + + Args: + inputs: a Tensor with shape [batch, length, ...] + kernel_size: an integer or pair of integers + nonpadding_mask: a Tensor with shape [batch, length] + + Returns: + Tensor of the same shape as inputs. + """ + if (kernel_size != 1 and kernel_size != (1, 1) and + nonpadding_mask is not None): + while nonpadding_mask.get_shape().ndims < inputs.get_shape().ndims: + nonpadding_mask = tf.expand_dims(nonpadding_mask, -1) + return inputs * nonpadding_mask + + return inputs + + +def dense_relu_dense(inputs, + filter_size, + output_size, + output_activation=None, + dropout=0.0, + dropout_broadcast_dims=None, + layer_collection=None, + name=None): + """Hidden layer with RELU activation followed by linear projection.""" + # layer_name is appended with "conv1" or "conv2" in this method only for + # historical reasons. These are in fact dense layers. + layer_name = "%s_{}" % name if name else "{}" + h = dense( + inputs, + filter_size, + use_bias=True, + activation=tf.nn.relu, + layer_collection=layer_collection, + name=layer_name.format("conv1")) + + if dropout != 0.0: + h = dropout_with_broadcast_dims( + h, 1.0 - dropout, broadcast_dims=dropout_broadcast_dims) + o = dense( + h, + output_size, + activation=output_activation, + use_bias=True, + layer_collection=layer_collection, + name=layer_name.format("conv2")) + return o + + +def dense_dropconnect(inputs, + output_size, + dropconnect_dropout=0.0, + name="dense_dropconnect", + **kwargs): + """Dense layer with dropconnect.""" + + if dropconnect_dropout != 0.0: + tf.logging.info("Applying dropconnect as the kernel regularization.") + kwargs["kernel_regularizer"] = functools.partial( + tf.nn.dropout, keep_prob=1.0 - dropconnect_dropout) + + return dense(inputs, output_size, use_bias=True, name=name, **kwargs) + + +def conv_relu_conv(inputs, + filter_size, + output_size, + first_kernel_size=3, + second_kernel_size=3, + padding="SAME", + nonpadding_mask=None, + dropout=0.0, + name=None, + cache=None, + decode_loop_step=None): + """Hidden layer with RELU activation followed by linear projection. + + Args: + inputs: A tensor. + filter_size: An integer. + output_size: An integer. + first_kernel_size: An integer. + second_kernel_size: An integer. + padding: A string. + nonpadding_mask: A tensor. + dropout: A float. + name: A string. + cache: A dict, containing Tensors which are the results of previous + attentions, used for fast decoding. + decode_loop_step: An integer, step number of the decoding loop. + Only used for inference on TPU. If it is not None, the function + will do inplace update for the cache instead of concatenating the + current result to the cache. + + Returns: + A Tensor. + """ + with tf.variable_scope(name, "conv_relu_conv", [inputs]): + inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask) + + if cache: + if decode_loop_step is None: + inputs = cache["f"] = tf.concat([cache["f"], inputs], axis=1) + else: + # Inplace update is required for inference on TPU. + # Inplace_ops only supports inplace_update on the first dimension. + # The performance of current implementation is better than updating + # the tensor by adding the result of matmul(one_hot, + # update_in_current_step) + tmp_f = tf.transpose(cache["f"], perm=[1, 0, 2]) + tmp_f = inplace_ops.alias_inplace_update( + tmp_f, + decode_loop_step * tf.shape(inputs)[1], + tf.transpose(inputs, perm=[1, 0, 2])) + inputs = cache["f"] = tf.transpose(tmp_f, perm=[1, 0, 2]) + inputs = cache["f"] = inputs[:, -first_kernel_size:, :] + + h = tpu_conv1d( + inputs, filter_size, first_kernel_size, padding=padding, name="conv1") + + if cache: + h = h[:, -1:, :] + + h = tf.nn.relu(h) + if dropout != 0.0: + h = tf.nn.dropout(h, 1.0 - dropout) + h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask) + return tpu_conv1d( + h, output_size, second_kernel_size, padding=padding, name="conv2") + + +def sepconv_relu_sepconv(inputs, + filter_size, + output_size, + first_kernel_size=(1, 1), + second_kernel_size=(1, 1), + padding="LEFT", + nonpadding_mask=None, + dropout=0.0, + name=None): + """Hidden layer with RELU activation followed by linear projection.""" + with tf.variable_scope(name, "sepconv_relu_sepconv", [inputs]): + inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask) + if inputs.get_shape().ndims == 3: + is_3d = True + inputs = tf.expand_dims(inputs, 2) + else: + is_3d = False + h = separable_conv( + inputs, + filter_size, + first_kernel_size, + activation=tf.nn.relu, + padding=padding, + name="conv1") + if dropout != 0.0: + h = tf.nn.dropout(h, 1.0 - dropout) + h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask) + ret = separable_conv( + h, output_size, second_kernel_size, padding=padding, name="conv2") + if is_3d: + ret = tf.squeeze(ret, 2) + return ret + + +# DEPRECATED - use dense_relu_dense, conv_relu_conv, sepconv_relu_sepconv +def conv_hidden_relu(inputs, + hidden_size, + output_size, + kernel_size=(1, 1), + second_kernel_size=(1, 1), + dropout=0.0, + **kwargs): + """Hidden layer with RELU activation followed by linear projection.""" + name = kwargs.pop("name") if "name" in kwargs else None + with tf.variable_scope(name, "conv_hidden_relu", [inputs]): + if inputs.get_shape().ndims == 3: + is_3d = True + inputs = tf.expand_dims(inputs, 2) + else: + is_3d = False + conv_f1 = conv if kernel_size == (1, 1) else separable_conv + h = conv_f1( + inputs, + hidden_size, + kernel_size, + activation=tf.nn.relu, + name="conv1", + **kwargs) + if dropout != 0.0: + h = tf.nn.dropout(h, 1.0 - dropout) + conv_f2 = conv if second_kernel_size == (1, 1) else separable_conv + ret = conv_f2(h, output_size, second_kernel_size, name="conv2", **kwargs) + if is_3d: + ret = tf.squeeze(ret, 2) + return ret + + +def conv_gru(x, + kernel_size, + filters, + padding="SAME", + dilation_rate=(1, 1), + name=None, + reuse=None): + """Convolutional GRU in 1 dimension.""" + + # Let's make a shorthand for conv call first. + def do_conv(args, name, bias_start, padding): + return conv( + args, + filters, + kernel_size, + padding=padding, + dilation_rate=dilation_rate, + bias_initializer=tf.constant_initializer(bias_start), + name=name) + + # Here comes the GRU gate. + with tf.variable_scope( + name, default_name="conv_gru", values=[x], reuse=reuse): + reset = saturating_sigmoid(do_conv(x, "reset", 1.0, padding)) + gate = saturating_sigmoid(do_conv(x, "gate", 1.0, padding)) + candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0, padding)) + return gate * x + (1 - gate) * candidate + + +def gru_feedfwd(a_t, h_prev, filters, name=None): + """position-wise Feed-fwd GRU gates following the MPNN. + + Args: + a_t: Tensor of shape [batch, length, depth] of current input + h_prev: Tensor of shape [batch, length, depth] of prev input + filters: an integer specifying number of dimensions of the filters + name: A string + Returns: + h_t: [batch, length, filters] hidden state + """ + + with tf.variable_scope(name, default_name="GRU", values=[a_t, h_prev]): + # we use right matrix multiplication to handle batches + # W_z and W_r have shape 2d, d. U_z U_r have shape d,d + z_t = ( + tf.sigmoid( + tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_z") + + tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_z"))) + r_t = ( + tf.sigmoid( + tpu_conv1d(a_t, filters, 1, padding="SAME", name="W_r") + + tpu_conv1d(h_prev, filters, 1, padding="SAME", name="U_r"))) + h_tilde = ( + tf.tanh( + tpu_conv1d(a_t, filters, 1, padding="SAME", name="W") + + tpu_conv1d(r_t * h_prev, filters, 1, padding="SAME", name="U"))) + h_t = (1. - z_t) * h_prev + z_t * h_tilde + + return h_t + + +def conv_lstm(x, + kernel_size, + filters, + padding="SAME", + dilation_rate=(1, 1), + name=None, + reuse=None): + """Convolutional LSTM in 1 dimension.""" + with tf.variable_scope( + name, default_name="conv_lstm", values=[x], reuse=reuse): + gates = conv( + x, + 4 * filters, + kernel_size, + padding=padding, + dilation_rate=dilation_rate) + g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3) + new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3]) + return tf.sigmoid(g[2]) * tf.tanh(new_cell) + + +def diagonal_conv_gru(x, + kernel_size, + filters, + dropout=0.0, + name=None, + reuse=None): + """Diagonal Convolutional GRU as in https://arxiv.org/abs/1702.08727.""" + + # Let's make a shorthand for conv call first. + def do_conv(args, name, bias_start): + return conv( + args, + filters, + kernel_size, + padding="SAME", + bias_initializer=tf.constant_initializer(bias_start), + name=name) + + # Here comes the GRU gate. + with tf.variable_scope( + name, default_name="diagonal_conv_gru", values=[x], reuse=reuse): + reset, reset_cost = hard_sigmoid(do_conv(x, "reset", 0.5)) + gate, gate_cost = hard_sigmoid(do_conv(x, "gate", 0.7)) + candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0)) + + if dropout > 0.0: + candidate = tf.nn.dropout(candidate, 1.0 - dropout) + + # Diagonal shift. + shift_filters = filters // 3 + base_filter = ([[0, 1, 0]] * (filters - 2 * shift_filters) + + [[1, 0, 0]] * shift_filters + [[0, 0, 1]] * shift_filters) + shift_filter = tf.constant(np.transpose(base_filter), dtype=tf.float32) + shift_filter = tf.expand_dims(tf.expand_dims(shift_filter, 0), 3) + x_shifted = tf.nn.depthwise_conv2d( + x, shift_filter, [1, 1, 1, 1], padding="SAME") + + # Return the gated result and cost. + total_cost_avg = 0.5 * (reset_cost + gate_cost) + return gate * x_shifted + (1 - gate) * candidate, total_cost_avg + + +def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1): + """Pad tensors x and y on axis 1 so that they have the same length.""" + if axis not in [1, 2]: + raise ValueError("Only axis=1 and axis=2 supported for now.") + with tf.name_scope("pad_to_same_length", values=[x, y]): + x_length = shape_list(x)[axis] + y_length = shape_list(y)[axis] + if (isinstance(x_length, int) and isinstance(y_length, int) and + x_length == y_length and final_length_divisible_by == 1): + return x, y + max_length = tf.maximum(x_length, y_length) + if final_length_divisible_by > 1: + # Find the nearest larger-or-equal integer divisible by given number. + max_length += final_length_divisible_by - 1 + max_length //= final_length_divisible_by + max_length *= final_length_divisible_by + length_diff1 = max_length - x_length + length_diff2 = max_length - y_length + + def padding_list(length_diff, arg): + if axis == 1: + return [[[0, 0], [0, length_diff]], + tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)] + return [[[0, 0], [0, 0], [0, length_diff]], + tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)] + + paddings1 = tf.concat(padding_list(length_diff1, x), axis=0) + paddings2 = tf.concat(padding_list(length_diff2, y), axis=0) + res_x = tf.pad(x, paddings1) + res_y = tf.pad(y, paddings2) + # Static shapes are the same except for axis=1. + x_shape = x.shape.as_list() + x_shape[axis] = None + res_x.set_shape(x_shape) + y_shape = y.shape.as_list() + y_shape[axis] = None + res_y.set_shape(y_shape) + return res_x, res_y + + +def pad_with_zeros(logits, labels): + """Pad labels on the length dimension to match logits length.""" + with tf.name_scope("pad_with_zeros", values=[logits, labels]): + logits, labels = pad_to_same_length(logits, labels) + if len(labels.shape) == 3: # 2-d labels. + logits, labels = pad_to_same_length(logits, labels, axis=2) + return logits, labels + + +def weights_nonzero(labels): + """Assign weight 1.0 to all labels except for padding (id=0).""" + return to_float(tf.not_equal(labels, 0)) + + +def weights_prepend_inputs_to_targets(labels): + """Assign weight 1.0 to only the "targets" portion of the labels. + + Weight 1.0 is assigned to all nonzero labels past the first zero. + See prepend_mode in common_hparams.py + + Args: + labels: A Tensor of int32s. + + Returns: + A Tensor of floats. + """ + past_first_zero = tf.cumsum(to_float(tf.equal(labels, 0)), axis=1) + nonzero = to_float(labels) + return to_float(tf.not_equal(past_first_zero * nonzero, 0)) + + +def check_nonnegative(value): + """Check that the value is nonnegative.""" + if isinstance(value, tf.Tensor): + with tf.control_dependencies([tf.assert_greater_equal(value, 0)]): + value = tf.identity(value) + elif value < 0: + raise ValueError("Value must be non-negative.") + return value + + +def weights_multi_problem(labels, taskid=-1): + """Assign weight 1.0 to only the "targets" portion of the labels. + + Weight 1.0 is assigned to all labels past the taskid. + + Args: + labels: A Tensor of int32s. + taskid: an int32 representing the task id for a problem. + + Returns: + A Tensor of floats. + + Raises: + ValueError: The Task ID must be valid. + """ + taskid = check_nonnegative(taskid) + past_taskid = tf.cumsum(to_float(tf.equal(labels, taskid)), axis=1) + # Additionally zero out the task id location + past_taskid *= to_float(tf.not_equal(labels, taskid)) + non_taskid = to_float(labels) + return to_float(tf.not_equal(past_taskid * non_taskid, 0)) + + +def weights_multi_problem_all(labels, taskid=-1): + """Assign weight 1.0 to only examples from the given task.""" + taskid = check_nonnegative(taskid) + weights = to_float(tf.not_equal(labels, 0)) + past_taskid = tf.cumsum(to_float(tf.equal(labels, taskid)), axis=1) + # Additionally zero out the task id location + past_taskid *= to_float(tf.not_equal(labels, taskid)) + non_taskid = to_float(labels) + example_mask = to_float(tf.not_equal(past_taskid * non_taskid, 0)) + example_mask = tf.reduce_sum(example_mask, axis=1) + example_mask = to_float( + tf.greater(example_mask, tf.zeros_like(example_mask))) + + return weights * tf.expand_dims(example_mask, axis=-1) + + +def weights_multi_problem_input(labels, taskid=-1): + """Assign weight 1.0 to only the inputs for the given task.""" + taskid = check_nonnegative(taskid) + weights_all_tokens = weights_multi_problem_all(labels, taskid) + weights_target = weights_multi_problem(labels, taskid) + return weights_all_tokens - weights_target + + +def weights_all(labels): + """Assign weight 1.0 to all labels.""" + return tf.ones_like(labels, dtype=tf.float32) + + +def weights_concatenated(labels): + """Assign weight 1.0 to the "target" part of the concatenated labels. + + The labels look like: + source English I love you . ID1 target French Je t'aime . ID1 source + English the cat ID1 target French le chat ID1 source English ... + + We want to assign weight 1.0 to all words in the target text (including the + ID1 end symbol), but not to the source text or the boilerplate. In the + above example, the target words that get positive weight are: + Je t'aime . ID1 le chat ID1 + + Args: + labels: a Tensor + Returns: + a Tensor + """ + eos_mask = tf.to_int32(tf.equal(labels, 1)) + sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True) + in_target = tf.equal(tf.mod(sentence_num, 2), 1) + # first two tokens of each sentence are boilerplate. + sentence_num_plus_one = sentence_num + 1 + shifted = tf.pad(sentence_num_plus_one, + [[0, 0], [2, 0], [0, 0], [0, 0]])[:, :-2, :, :] + nonboilerplate = tf.equal(sentence_num_plus_one, shifted) + ret = to_float(tf.logical_and(nonboilerplate, in_target)) + return ret + + +def padded_cross_entropy(logits, + labels, + label_smoothing, + weights_fn=weights_nonzero, + reduce_sum=True, + cutoff=0.0, + gaussian=False): + """Compute cross-entropy assuming 0s are padding. + + Computes a loss numerator (the sum of losses), and loss denominator + (the number of non-padding tokens). + + Args: + logits: a `Tensor` with shape `[batch, timesteps, vocab_size]`. + optionally a FactoredTensor. + labels: an integer `Tensor` with shape `[batch, timesteps]`. + label_smoothing: a floating point `Scalar`. + weights_fn: A function from labels to weights. + reduce_sum: a Boolean, whether to sum at the end or not. + cutoff: a float, at which point to have no loss. + gaussian: If true, use a Gaussian distribution for label smoothing + + Returns: + loss_numerator: a `Scalar`. Sum of losses. + loss_denominator: a `Scalar. The number of non-padding target tokens. + + Raises: + ValueError: in case of unsupported argument types. + """ + if isinstance(logits, FactoredTensor): + if gaussian: + raise ValueError("Factored padded cross entropy with Gaussian smoothing " + "is not implemented yet.") + return padded_cross_entropy_factored( + logits, + labels, + label_smoothing, + weights_fn=weights_fn, + reduce_sum=reduce_sum) + confidence = 1.0 - label_smoothing + logits_shape = shape_list(logits) + vocab_size = logits_shape[-1] + with tf.name_scope("padded_cross_entropy", values=[logits, labels]): + if len(logits_shape) == 2: + # Deal with the case where we did not insert extra dimensions due to + # TPU issues. No pad-to-same-length happens in this case. + # TODO(noam): remove this logic once TPU can handle extra dimensions. + labels = tf.reshape(labels, [-1]) + else: + logits, labels = pad_with_zeros(logits, labels) + logits = tf.reshape( + logits, + shape_list(labels) + [vocab_size], + name="padded_cross_entropy_size_check") + logits = tf.cast(logits, tf.float32) + xent = smoothing_cross_entropy( + logits, labels, vocab_size, confidence, gaussian=gaussian) + weights = weights_fn(labels) + if cutoff > 0.0: + xent = tf.nn.relu(xent - cutoff) + if not reduce_sum: + return xent * weights, weights + return tf.reduce_sum(xent * weights), tf.reduce_sum(weights) + + +def _weights_one_third(labels): + """Returns Tensor of shape [batch, height, width]. Each element is 1/3.""" + return tf.ones(tf.shape(labels)[:-1]) / 3. + + +def dml_loss(pred, labels, weights_fn=_weights_one_third, reduce_sum=True): + """Discretized mixture of logistics loss. + + Args: + pred: A [batch, height, width, num_mixtures*10] tensor of floats + comprising one unconstrained mixture probability, three means + (one per channel), three standard deviations (one per channel), + and three coefficients which linearly parameterize dependence across + channels. + labels: A [batch, height, width, channels] tensor of 8-bit pixel + intensities. The computation assumes channels is 3. + weights_fn: A function of labels, returning a Tensor of shape + [batch, height, width] which weights each loss term. Default is to scale + each loss term by 1/3 so that they capture the average across channels. + reduce_sum: A boolean, to return scalar loss instead of per position. + + Returns: + Tuple of loss tensors for numerator and denominator, each a scalar if + reduce_sum else of shape [batch, height, width]. The sum of their divisions + is the number of nats for each pixel in labels. + """ + real_labels = convert_rgb_to_symmetric_real(labels) + dml_loss_value = discretized_mix_logistic_loss(pred=pred, labels=real_labels) + weights = weights_fn(labels) + loss_num = weights * dml_loss_value + loss_den = weights_nonzero(weights) + if reduce_sum: + loss_num = tf.reduce_sum(loss_num) + loss_den = tf.reduce_sum(loss_den) + return loss_num, loss_den + + +def split_to_discretized_mix_logistic_params(inputs): + """Splits input tensor into parameters of discretized mixture logistic. + + Args: + inputs: A [batch, height, width, num_mixtures*10] tensor of floats + comprising one unconstrained mixture probability, three means + (one per channel), three standard deviations (one per channel), + and three coefficients which linearly parameterize dependence across + channels. + + Returns: + Tuple of unconstrained mixture probabilities, locations, scales, and + coefficient parameters of the distribution. The mixture probability has + shape [batch, height, width, num_mixtures]. Other parameters have shape + [batch, height, width, num_mixtures, 3]. + """ + batch, height, width, output_dim = shape_list(inputs) # pylint: disable=unbalanced-tuple-unpacking + num_mixtures = output_dim // 10 + logits, locs, log_scales, coeffs = tf.split( + inputs, + num_or_size_splits=[ + num_mixtures, num_mixtures * 3, num_mixtures * 3, num_mixtures * 3 + ], + axis=-1) + split_shape = [batch, height, width, num_mixtures, 3] + locs = tf.reshape(locs, split_shape) + log_scales = tf.reshape(log_scales, split_shape) + log_scales = tf.maximum(log_scales, -7.) + coeffs = tf.reshape(coeffs, split_shape) + coeffs = tf.tanh(coeffs) + return logits, locs, log_scales, coeffs + + +def discretized_mix_logistic_loss(pred, labels): + """Computes negative log probability for the discretized mixture of logistics. + + The distribution of a whole pixel is a mixture of 3-dimensional discretized + logistic distributions. The 3-D discretized logistic factorizes as 3 1-D + discretized logistic distributions, one for each channel. It defines + + ```none + P(X = x) + = sum_{k=1}^K probs[k] * P(X = x | locs[k], scales[k]) + = sum_{k=1}^K probs[k] * [ + prod_{c=1}^3 DiscretizedLogistic(X[c] = x[c] | means[k][c], scales[k]) ] + ``` + + The means tensor is a linear combination of location parameters and previous + channels. The discretized logistic distribution assigns probability mass to an + event P(X=x) via logistic CDFs: P(X <= x + 0.5) - P(X < x - 0.5) for 1 < x < + 254; P(X <= 0.5) for x = 0; and 1 - P(X < 245.5) for x = 255. Instead of + 8-bit inputs, this implementation assumes the events are rescaled to [-1, 1]. + + Args: + pred: A [batch, height, width, num_mixtures*10] tensor of floats + comprising one unconstrained mixture probability, three means + (one per channel), three standard deviations (one per channel), + and three coefficients which linearly parameterize dependence across + channels. + labels: A [batch, height, width, channels] tensor of true pixel intensities + rescaled to [-1, 1]. The computation assumes channels is 3. + + Returns: + A [batch, height, width] tensor of the negative log conditional probability + of each pixel given all previous pixels. + """ + + logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params( + pred) + + # Tile labels to broadcast compute across the mixture dimension. + batch, height, width, num_mixtures = shape_list(logits) # pylint: disable=unbalanced-tuple-unpacking + labels = tf.tile( + tf.reshape(labels, [batch, height, width, 1, 3]), + [1, 1, 1, num_mixtures, 1]) + + # p(x) = sigmoid((x - means_i + 1/255.)/scale_i) - + # sigmoid((x - means_i - 1/255.)/scale_i) + # for each channel i. The means are linearly parameterized. + means_0 = locs[..., 0] + means_1 = locs[..., 1] + coeffs[..., 0] * labels[..., 0] + means_2 = ( + locs[..., 2] + coeffs[..., 1] * labels[..., 0] + + coeffs[..., 2] * labels[..., 1]) + means = tf.stack([means_0, means_1, means_2], axis=-1) + centered_labels = labels - means + inv_stdv = tf.exp(-log_scales) + plus_in = inv_stdv * (centered_labels + 1. / 255.) + min_in = inv_stdv * (centered_labels - 1. / 255.) + cdf_plus = tf.nn.sigmoid(plus_in) + cdf_min = tf.nn.sigmoid(min_in) + + # Compute log probability for edge case of 0 (before scaling), 255 (before + # scaling), and all other cases respectively. + log_prob_0 = plus_in - tf.nn.softplus(plus_in) + log_prob_255 = -tf.nn.softplus(min_in) + prob_event = tf.maximum(cdf_plus - cdf_min, 1e-12) + log_prob_event = tf.log(prob_event) + + # Robustly select log-prob based on numerical edge-cases: (a) [-1, -1+eps); + # (b) (1-eps, 1]; (c) NaNs during `tf.gradients` of `tf.select`, which may + # cause `tf.log(0.)`; (d) p(x) < 1e-5. + mid_in = inv_stdv * centered_labels + log_prob_event_approx = ( + mid_in - log_scales - 2. * tf.nn.softplus(mid_in) - np.log(127.5)) + log_probs = tf.where( + labels < -0.999, log_prob_0, + tf.where( + labels > 0.999, log_prob_255, + tf.where(prob_event > 1e-5, log_prob_event, log_prob_event_approx))) + + # Sum over channels and compute log-probability of each mixture. + log_probs = tf.reduce_sum(log_probs, -1) + tf.nn.log_softmax(logits, axis=-1) + output = -tf.reduce_logsumexp(log_probs, axis=-1) + return output + + +def sample_from_discretized_mix_logistic(pred, seed=None): + """Sampling from a discretized mixture of logistics. + + Args: + pred: A [batch, height, width, num_mixtures*10] tensor of floats + comprising one unconstrained mixture probability, three means + (one per channel), three standard deviations (one per channel), + and three coefficients which linearly parameterize dependence across + channels. + seed: Random seed. + + Returns: + A tensor of shape [batch, height, width, 3] with real intensities scaled + between -1 and 1. + """ + + logits, locs, log_scales, coeffs = split_to_discretized_mix_logistic_params( + pred) + + # Sample mixture indicator given logits using the gumbel max trick. + num_mixtures = shape_list(logits)[-1] + gumbel_noise = -tf.log(-tf.log( + tf.random_uniform( + tf.shape(logits), minval=1e-5, maxval=1. - 1e-5, seed=seed))) + sel = tf.one_hot( + tf.argmax(logits + gumbel_noise, -1), + depth=num_mixtures, + dtype=tf.float32) + + # Select mixture component's parameters. + sel = tf.expand_dims(sel, -1) + locs = tf.reduce_sum(locs * sel, 3) + log_scales = tf.reduce_sum(log_scales * sel, 3) + coeffs = tf.reduce_sum(coeffs * sel, 3) + + # Sample from 3-D logistic & clip to interval. Note we don't round to the + # nearest 8-bit value when sampling. + uniform_noise = tf.random_uniform( + tf.shape(locs), minval=1e-5, maxval=1. - 1e-5, seed=seed) + logistic_noise = tf.log(uniform_noise) - tf.log1p(-uniform_noise) + x = locs + tf.exp(log_scales) * logistic_noise + x0 = x[..., 0] + x1 = x[..., 1] + coeffs[..., 0] * x0 + x2 = x[..., 2] + coeffs[..., 1] * x0 + coeffs[..., 2] * x1 + x = tf.stack([x0, x1, x2], axis=-1) + x = tf.clip_by_value(x, -1., 1.) + return x + + +def smoothing_cross_entropy(logits, + labels, + vocab_size, + confidence, + gaussian=False): + """Cross entropy with label smoothing to limit over-confidence. + + Args: + logits: Tensor of shape [batch_size, ?, ?, ?, vocab_size]. + labels: Tensor of shape [batch_size, ?, ?, ?]. + vocab_size: Tensor representing the size of the vocabulary. + confidence: Used to determine on and off values for label smoothing. + If `gaussian` is true, `confidence` is the variance to the Gaussian + distribution. + gaussian: Uses a Gaussian distribution for label smoothing + + Returns: + Tensor of shape [batch_size, ?, ?, ?]. + """ + with tf.name_scope("smoothing_cross_entropy", values=[logits, labels]): + # Low confidence is given to all non-true labels, uniformly. + low_confidence = (1.0 - confidence) / to_float(vocab_size - 1) + # Normalizing constant is the best cross-entropy value with soft targets. + # We subtract it just for readability, makes no difference on learning. + normalizing = -( + confidence * tf.log(confidence) + to_float(vocab_size - 1) * + low_confidence * tf.log(low_confidence + 1e-20)) + + if gaussian and confidence > 0.0: + labels = tf.cast(labels, tf.float32) + + normal_dist = tfp.distributions.Normal(loc=labels, scale=confidence) + # Locations to evaluate the probability distributions. + soft_targets = normal_dist.prob( + tf.cast(tf.range(vocab_size), tf.float32)[:, None, None, None, None]) + # Reordering soft_targets from [vocab_size, batch_size, ?, ?, ?] to match + # logits: [batch_size, ?, ?, ?, vocab_size] + soft_targets = tf.transpose(soft_targets, perm=[1, 2, 3, 4, 0]) + else: + soft_targets = tf.one_hot( + tf.cast(labels, tf.int32), + depth=vocab_size, + on_value=confidence, + off_value=low_confidence) + xentropy = tf.nn.softmax_cross_entropy_with_logits_v2( + logits=logits, labels=soft_targets) + return xentropy - normalizing + + +def global_pool_1d(inputs, pooling_type="MAX", mask=None): + """Pool elements across the last dimension. + + Useful to convert a list of vectors into a single vector so as + to get a representation of a set. + + Args: + inputs: A tensor of shape [batch_size, sequence_length, input_dims] + containing the sequences of input vectors. + pooling_type: the pooling type to use, MAX or AVR + mask: A tensor of shape [batch_size, sequence_length] containing a + mask for the inputs with 1's for existing elements, and 0's elsewhere. + + Returns: + A tensor of shape [batch_size, input_dims] containing the sequences of + transformed vectors. + """ + with tf.name_scope("global_pool", values=[inputs]): + if mask is not None: + mask = tf.expand_dims(mask, axis=2) + inputs = tf.multiply(inputs, mask) + + if pooling_type == "MAX": + # A tf.pool can be used here, but reduce is cleaner + output = tf.reduce_max(inputs, axis=1) + elif pooling_type == "AVR": + if mask is not None: + # Some elems are dummy elems so we can't just reduce the average. + output = tf.reduce_sum(inputs, axis=1) + num_elems = tf.reduce_sum(mask, axis=1, keepdims=True) + output = tf.div(output, tf.maximum(num_elems, 1)) + else: + output = tf.reduce_mean(inputs, axis=1) + + return output + + +def running_global_pool_1d(inputs, pooling_type="MAX"): + """Same global pool, but only for the elements up to the current element. + + Useful for outputs where the state of future elements is not known. + Takes no mask as all elements up to the current element are assumed to exist. + Currently only supports maximum. Equivalent to using a lower triangle bias. + + Args: + inputs: A tensor of shape [batch_size, sequence_length, input_dims] + containing the sequences of input vectors. + pooling_type: Pooling type to use. Currently only supports 'MAX'. + + Returns: + A tensor of shape [batch_size, sequence_length, input_dims] containing the + running 'totals'. + """ + del pooling_type + with tf.name_scope("running_global_pool", values=[inputs]): + scan_fct = tf.maximum + # Permute inputs so seq_length is first. + elems = tf.transpose(inputs, [1, 0, 2]) + # Perform scan. + cumulatives = tf.scan(scan_fct, elems, swap_memory=True) + # Permute output to get back to original order. + output = tf.transpose(cumulatives, [1, 0, 2]) + return output + + +def gated_linear_unit_layer(x, name=None): + """Gated linear unit layer. + + Paper: Language Modeling with Gated Convolutional Networks. + Link: https://arxiv.org/abs/1612.08083 + x = Wx * sigmoid(W'x). + + Args: + x: A tensor + name: A string + + Returns: + A tensor of the same shape as x. + """ + with tf.variable_scope(name, default_name="glu_layer", values=[x]): + depth = shape_list(x)[-1] + x = layers().Dense(depth * 2, activation=None)(x) + x, gating_x = tf.split(x, 2, axis=-1) + return x * tf.nn.sigmoid(gating_x) + + +def sru(x, + num_layers=2, + activation=None, + initial_state=None, + name=None, + reuse=None): + """SRU cell as in https://arxiv.org/abs/1709.02755. + + This implementation uses tf.scan and can incur overhead, see the full SRU + function doc for details and an implementation that is sometimes faster. + + Args: + x: A tensor of shape [batch, ..., channels] ; ... is treated as time. + num_layers: How many SRU layers; default is 2 as results for 1 disappoint. + activation: Optional activation function, try tf.nn.tanh or tf.nn.relu. + initial_state: Optional initial c-state, set to zeros if None. + name: Optional name, "sru" by default. + reuse: Optional reuse. + + Returns: + A tensor of the same shape as x. + + Raises: + ValueError: if num_layers is not positive. + """ + if num_layers < 1: + raise ValueError("Number of layers must be positive: %d" % num_layers) + with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse): + # We assume x is [batch, ..., channels] and treat all ... as time. + x_shape = shape_list(x) + x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]]) + x = tf.transpose(x, [1, 0, 2]) # Scan assumes time on axis 0. + initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]]) + + # SRU state manipulation function. + def next_state(cur_state, args_tup): + cur_x_times_one_minus_f, cur_f = args_tup + return cur_f * cur_state + cur_x_times_one_minus_f + + # Calculate SRU on each layer. + for i in range(num_layers): + # The parallel part of the SRU. + x_orig = x + x, f, r = tf.split( + layers().Dense(3 * x_shape[-1], name="kernel_%d" % i)(x), 3, axis=-1) + f, r = tf.sigmoid(f), tf.sigmoid(r) + x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed. + # Calculate states. + c_states = tf.scan( + next_state, (x_times_one_minus_f, f), + initializer=initial_state, + parallel_iterations=2, + name="scan_%d" % i) + # Final output. + if activation is not None: + c_states = activation(c_states) + h = c_states * r + (1.0 - r) * x_orig + x = h # Next layer. + # Transpose back to batch-major. + x = tf.transpose(x, [1, 0, 2]) + return tf.reshape(x, x_shape) + + +def linear_set_layer(layer_size, + inputs, + context=None, + activation_fn=tf.nn.relu, + dropout=0.0, + name=None): + """Basic layer type for doing funky things with sets. + + Applies a linear transformation to each element in the input set. + If a context is supplied, it is concatenated with the inputs. + e.g. One can use global_pool_1d to get a representation of the set which + can then be used as the context for the next layer. + + TODO: Add bias add (or control the biases used). + + Args: + layer_size: Dimension to transform the input vectors to. + inputs: A tensor of shape [batch_size, sequence_length, input_dims] + containing the sequences of input vectors. + context: A tensor of shape [batch_size, context_dims] containing a global + statistic about the set. + activation_fn: The activation function to use. + dropout: Dropout probability. + name: name. + + Returns: + Tensor of shape [batch_size, sequence_length, output_dims] containing the + sequences of transformed vectors. + """ + with tf.variable_scope( + name, default_name="linear_set_layer", values=[inputs]): + # Apply 1D convolution to apply linear filter to each element + # along the 2nd dimension. + outputs = conv1d(inputs, layer_size, 1, activation=None, name="set_conv") + + # Apply the context if it exists. + if context is not None: + # Unfortunately tf doesn't support broadcasting via concat, but we can + # simply add the transformed context to get the same effect. + if len(context.get_shape().as_list()) == 2: + context = tf.expand_dims(context, axis=1) + cont_tfm = conv1d( + context, layer_size, 1, activation=None, name="cont_conv") + outputs += cont_tfm + + if activation_fn is not None: + outputs = activation_fn(outputs) + + if dropout != 0.0: + outputs = tf.nn.dropout(outputs, 1.0 - dropout) + + return outputs + + +def ravanbakhsh_set_layer(layer_size, + inputs, + mask=None, + sequential=False, + activation_fn=tf.nn.tanh, + dropout=0.0, + name=None): + """Layer from Deep Sets paper: https://arxiv.org/abs/1611.04500 . + + More parameter-efficient version of a linear-set-layer with context. + + Args: + layer_size: Dimension to transform the input vectors to. + inputs: A tensor of shape [batch_size, sequence_length, vector] + containing the sequences of input vectors. + mask: A tensor of shape [batch_size, sequence_length] containing a + mask for the inputs with 1's for existing elements, and 0's elsewhere. + sequential: If true, will use a running global pool so each element will + only depend on those before it. Set true if this layer is being used in + an output sequence. + activation_fn: The activation function to use. + dropout: dropout. + name: name. + + Returns: + Tensor of shape [batch_size, sequence_length, vector] containing the + sequences of transformed vectors. + """ + del dropout + with tf.variable_scope(name, "ravanbakhsh_set_layer", [inputs]): + if sequential: + return linear_set_layer( + layer_size, + inputs - running_global_pool_1d(inputs), + activation_fn=activation_fn, + name=name) + return linear_set_layer( + layer_size, + inputs - tf.expand_dims(global_pool_1d(inputs, mask=mask), axis=1), + activation_fn=activation_fn, + name=name) + + +def fn_device_dependency_dict(): + """State container for fn_device_dependency.""" + default_graph = tf.get_default_graph() + if not hasattr(default_graph, "dependency_dict"): + default_graph.dependency_dict = collections.defaultdict(list) + return default_graph.dependency_dict + + +@contextlib.contextmanager +def fn_device_dependency(name, device=""): + """Add control deps for name and device.""" + key = name + "_" + device + outs = [] + + def body(): + with tf.control_dependencies(fn_device_dependency_dict()[key]): + yield outs + assert outs + + deps = outs + if isinstance(outs[0], (list, tuple)): + assert len(outs) == 1 + deps = outs[0] + fn_device_dependency_dict()[key] = deps + + if device: + with tf.device(device): + return body() + else: + return body() + + +def underlying_variable_ref(t): + """Find the underlying variable ref. + + Traverses through Identity, ReadVariableOp, and Enter ops. + Stops when op type has Variable or VarHandle in name. + + Args: + t: a Tensor + + Returns: + a Tensor that is a variable ref, or None on error. + """ + while t.op.type in ["Identity", "ReadVariableOp", "Enter"]: + t = t.op.inputs[0] + + op_type = t.op.type + if "Variable" in op_type or "VarHandle" in op_type: + return t + else: + return None + + +def underlying_variable(t): + """Find the underlying tf.Variable object. + + Args: + t: a Tensor + + Returns: + tf.Variable. + """ + t = underlying_variable_ref(t) + assert t is not None + # make sure that the graph has a variable index and that it is up-to-date + if not hasattr(tf.get_default_graph(), "var_index"): + tf.get_default_graph().var_index = {} + var_index = tf.get_default_graph().var_index + for v in tf.global_variables()[len(var_index):]: + var_index[v.name] = v + return var_index[t.name] + + +def approximate_split(x, num_splits, axis=0): + """Split approximately equally into num_splits parts. + + Args: + x: a Tensor + num_splits: an integer + axis: an integer. + + Returns: + a list of num_splits Tensors. + """ + size = shape_list(x)[axis] + size_splits = [tf.div(size + i, num_splits) for i in range(num_splits)] + return tf.split(x, size_splits, axis=axis) + + +class FactoredTensor(object): + """A concise factored representation of Tensor as two tensors. + + This class represents the tensor tf.matmul(a, b, transpose_b=True) + by storing the values of Tensors a and b. + + The reason for this is that the product may be too big to fully realize at + once, so it can be realized a part at a time. + + "a" may have extra leading dimensions, in which case they are flattened out + before computing the matrix product, then re-expanded afterwards. + """ + + def __init__(self, a, b): + self._a = a + self._b = b + + @property + def a(self): + return self._a + + @property + def b(self): + return self._b + + def to_tensor(self): + """Convert to Tensor.""" + a_shape = shape_list(self.a) + b_shape = shape_list(self.b) + inner_dim = b_shape[1] + result_dim = b_shape[0] + flat_a = tf.reshape(self.a, [-1, inner_dim]) + product = tf.matmul(flat_a, self.b, transpose_b=True) + product_shape = a_shape[:-1] + [result_dim] + product = tf.reshape(product, product_shape) + product.set_shape(self.a.get_shape().as_list()[:-1] + + [self.b.get_shape()[0]]) + return product + + +def _convert_factored_tensor_to_tensor(value, *args, **kwargs): + # call ops.convert_to_tensor to handle optional arguments appropriately + return ops.convert_to_tensor(value.to_tensor(), *args, **kwargs) + + +tf.register_tensor_conversion_function(FactoredTensor, + _convert_factored_tensor_to_tensor) + + +def smoothing_cross_entropy_factored_grad(op, dy): + """Gradient function for smoothing_cross_entropy_factored.""" + a = op.inputs[0] + b = op.inputs[1] + labels = op.inputs[2] + confidence = op.inputs[3] + num_splits = 16 + vocab_size = shape_list(b)[0] + labels = approximate_split(labels, num_splits) + a = approximate_split(a, num_splits) + dy = approximate_split(dy, num_splits) + b_grad = None + a_grad_parts = [] + deps = [] + for part in range(num_splits): + with tf.control_dependencies(deps): + logits = tf.matmul(a[part], b, transpose_b=True) + output_part = smoothing_cross_entropy(logits, labels[part], vocab_size, + confidence) + a_grad_part, b_grad_part = tf.gradients( + ys=[output_part], xs=[a[part], b], grad_ys=[dy[part]]) + a_grad_parts.append(a_grad_part) + if part > 0: + b_grad += b_grad_part + else: + b_grad = b_grad_part + deps = [b_grad, a_grad_part] + a_grad = tf.concat(a_grad_parts, 0) + return a_grad, b_grad, None, None + + +@function.Defun( + noinline=True, + python_grad_func=smoothing_cross_entropy_factored_grad, + compiled=True, + separate_compiled_gradients=True) +def smoothing_cross_entropy_factored(a, b, labels, confidence): + """Memory-efficient computation of smoothing cross-entropy. + + Avoids realizing the entire logits matrix at once. + + Args: + a: a Tensor with shape [batch, inner_dim] + b: a Tensor with shape [vocab_size, inner_dim] + labels: an integer Tensor with shape [batch] + confidence: a float + + Returns: + A Tensor with shape [batch] + """ + num_splits = 16 + vocab_size = shape_list(b)[0] + labels = approximate_split(labels, num_splits) + a = approximate_split(a, num_splits) + parts = [] + for part in range(num_splits): + with tf.control_dependencies(parts[-1:]): + logits = tf.matmul(a[part], b, transpose_b=True) + parts.append( + smoothing_cross_entropy(logits, labels[part], vocab_size, confidence)) + return tf.concat(parts, 0) + + +def padded_cross_entropy_factored(factored_logits, + labels, + label_smoothing, + weights_fn=weights_nonzero, + reduce_sum=True): + """Memory-efficient computation of smoothing cross-entropy. + + Avoids realizing the entire logits matrix at once. + + Args: + factored_logits: a `FactoredTensor` representing a Tensor + with shape `[batch, timesteps, vocab_size]`. + labels: an integer `Tensor` with shape `[batch, timesteps]`. + label_smoothing: a floating point `Scalar`. + weights_fn: A function from labels to weights. + reduce_sum: a Boolean, whether to sum at the end or not. + + Returns: + loss_numerator: a `Scalar`. Sum of losses. + loss_denominator: a `Scalar. The number of non-padding target tokens. + """ + a = factored_logits.a + b = factored_logits.b + confidence = 1.0 - label_smoothing + with tf.name_scope("padded_cross_entropy_factored", values=[a, b, labels]): + labels_flat = tf.reshape(labels, [-1]) + a_flat = tf.reshape(a, [-1, shape_list(b)[1]]) + xent = smoothing_cross_entropy_factored(a_flat, b, labels_flat, + tf.convert_to_tensor(confidence)) + xent = tf.reshape(xent, shape_list(labels)) + weights = weights_fn(labels) + if not reduce_sum: + return xent * weights, weights + return tf.reduce_sum(xent * weights), tf.reduce_sum(weights) + + +def fn_with_custom_grad(grad_fn, use_global_vars=False): + """Decorator to create a subgraph with a custom gradient function. + + The subgraph created by the decorated function is NOT put in a Defun and so + does not suffer from the limitations of the Defun (all subgraph ops on the + same device, no summaries). + + Args: + grad_fn: function with signature + (inputs, variables, outputs, output_grads) -> (grad_inputs, grad_vars), + all of which are lists of Tensors. + use_global_vars: if True, variables will be the global variables created. + If False, will be the trainable variables. + + Returns: + Decorator for function such that the gradient is defined by grad_fn. + """ + + def dec(fn): + + @functools.wraps(fn) + def wrapped(*args): + return _fn_with_custom_grad( + fn, args, grad_fn, use_global_vars=use_global_vars) + + return wrapped + + return dec + + +def _fn_with_custom_grad(fn, inputs, grad_fn, use_global_vars=False): + """Create a subgraph with a custom gradient. + + Args: + fn: function that takes inputs as arguments and produces 1 or more Tensors. + inputs: list, will be passed as fn(*inputs). + grad_fn: function with signature + (inputs, vars, outputs, output_grads) -> (grad_inputs, grad_vars), + all of which are lists of Tensors. + use_global_vars: if True, variables will be the global variables created. + If False, will be the trainable variables. + + Returns: + fn(*inputs) + """ + vs = tf.get_variable_scope() + get_vars_fn = ( + vs.global_variables if use_global_vars else vs.trainable_variables) + len_before_vars = len(get_vars_fn()) + inputs = list(inputs) + outputs = fn(*inputs) + train_vars = get_vars_fn()[len_before_vars:] + + if grad_fn is None: + return outputs + + if not isinstance(outputs, (tuple, list)): + outputs = [outputs] + outputs = list(outputs) + + defun_inputs = [inputs, train_vars, outputs] + + def custom_grad_fn(op, *dys): + """Custom grad fn applying grad_fn for identity Defun.""" + fn_inputs, fn_vars, fn_outputs = contrib.framework().nest.pack_sequence_as( + defun_inputs, list(op.inputs)) + dys = list(dys) + assert len(fn_outputs) == len(outputs) + assert len(fn_outputs) == len(dys) + + grad_inputs, grad_vars = grad_fn(fn_inputs, fn_vars, fn_outputs, dys) + grad_outputs = [None] * len(fn_outputs) + return tuple(grad_inputs + grad_vars + grad_outputs) + + # The Defun takes as input the original inputs, the trainable variables + # created in fn, and the outputs. In the forward it passes through the + # outputs. In the backwards, it produces gradients for the original inputs + # and the trainable variables. + in_types = [t.dtype for t in inputs] + out_types = [t.dtype for t in outputs] + var_types = [t.dtype for t in train_vars] + + @function.Defun( + *(in_types + var_types + out_types), + func_name="identity_custom_grad%d" % ops.uid(), + python_grad_func=custom_grad_fn, + shape_func=lambda _: [t.get_shape() for t in outputs]) + def identity(*args): + _, _, outs = contrib.framework().nest.pack_sequence_as(defun_inputs, args) + return tuple([tf.identity(t) for t in outs]) + + flat_inputs = contrib.framework().nest.flatten(defun_inputs) + id_out = identity(*flat_inputs) + return id_out + + +_function_cache = {} + + +def conv_hidden_relu_memory_efficient(x, + filter_size, + epsilon=1e-6, + forget=True, + test_vars=None, + name=None): + """LayerNorm, Conv, ReLU, Conv. + + All convolutions have kernel size 1. + + returns conv(relu(conv(layer_norm(x)))) + + Args: + x: input Tensor with shape [batch, length, io_size] + filter_size: an integer - size of the hidden layer. + epsilon: a float (for layer norm) + forget: a boolean - forget forwards activations and recompute on backprop + test_vars: optional tuple of variables for testing purposes + name: an optional string + + Returns: + a Tensor with shape [batch, length, io_size] + """ + io_size = x.get_shape().as_list()[-1] + + def forward_internal(x, f1, f2, scale, bias): + """Forward function.""" + # split batch-wise to avoid exhausting memory in cast the batch is large + # and the hidden layer is large. + num_splits = 4 + x_flat = tf.reshape(x, [-1, 1, shape_list(x)[2]]) + xs = approximate_split(x_flat, num_splits) + ys = [] + for i in range(num_splits): + with tf.control_dependencies(ys[-1:]): + n = layer_norm_compute(xs[i], epsilon, scale, bias) + y = tf.nn.conv1d(n, f1, 1, "SAME") + y = tf.nn.relu(y) + y = tf.nn.conv1d(y, f2, 1, "SAME") + ys.append(y) + y = tf.concat(ys, 0) + y = tf.reshape(y, shape_list(x)) + return y + + key = ("conv_hidden_relu_memory_efficient %s" % epsilon) + if not forget: + forward_fn = forward_internal + elif key in _function_cache: + forward_fn = _function_cache[key] + else: + + @function.Defun(compiled=True) + def grad_fn(x, f1, f2, scale, bias, dy): + """Gradient for efficiency.""" + with tf.control_dependencies([dy]): + num_splits = 4 + x_shape = shape_list(x) + flat_shape = [-1, 1, x_shape[2]] + x = tf.reshape(x, flat_shape) + dy = tf.reshape(dy, flat_shape) + xs = approximate_split(x, num_splits) + dys = approximate_split(dy, num_splits) + dxs = [] + df1 = 0 + df2 = 0 + dscale = 0 + dbias = 0 + deps = [] + for i in range(num_splits): + with tf.control_dependencies(deps): + n = layer_norm_compute(xs[i], epsilon, scale, bias) + y = tf.nn.conv1d(n, f1, 1, "SAME") + y = tf.nn.relu(y) + y = tf.nn.conv1d(y, f2, 1, "SAME") + dxi, pdf1, pdf2, pdscale, pdbias = tf.gradients( + ys=[y], xs=[xs[i], f1, f2, scale, bias], grad_ys=[dys[i]]) + df1 += pdf1 + df2 += pdf2 + dscale += pdscale + dbias += pdbias + dxs.append(dxi) + deps = [dxi, df1, df2, dscale, dbias] + with tf.control_dependencies(deps): + dx = tf.concat(dxs, 0) + dx = tf.reshape(dx, x_shape) + return dx, df1, df2, dscale, dbias + + @function.Defun( + grad_func=grad_fn, compiled=True, separate_compiled_gradients=True) + def forward_fn(x, f1, f2, scale, bias): + return forward_internal(x, f1, f2, scale, bias) + + with tf.variable_scope(name, default_name="ffn2", values=[x]): + # TODO(noam): it would be nice to save memory by casting x to float16 + # here, but this causes problems with the gradients. Figure out if there + # is a way to leave the gradients as float32. + if test_vars is not None: + f1, f2, scale, bias = list(test_vars) + else: + f1 = tf.get_variable("f1", [1, io_size, filter_size]) + f2 = tf.get_variable("f2", [1, filter_size, io_size]) + scale, bias = layer_norm_vars(io_size) + if forget: + y = forward_fn(x, f1, f2, scale, bias) + else: + y = forward_internal(x, f1, f2, scale, bias) + y.set_shape(x.get_shape()) + return y + + +def shape_list(x): + """Return list of dims, statically where possible.""" + x = tf.convert_to_tensor(x) + + # If unknown rank, return dynamic shape + if x.get_shape().dims is None: + return tf.shape(x) + + static = x.get_shape().as_list() + shape = tf.shape(x) + + ret = [] + for i, dim in enumerate(static): + if dim is None: + dim = shape[i] + ret.append(dim) + return ret + + +def list_product(els): + prod = els[0] + for el in els[1:]: + prod *= el + return prod + + +def sample_with_temperature(logits, temperature, sampling_keep_top_k=-1): + """Either argmax or random sampling. + + Args: + logits: a Tensor. + temperature: a float 0.0=argmax 1.0=random + sampling_keep_top_k: If not -1, only sample from the top k logits. + Returns: + a Tensor with one fewer dimension than logits. + """ + if temperature == 0.0: + # TF argmax doesn't handle >5 dimensions, so we reshape here. + logits_shape = shape_list(logits) + argmax = tf.argmax(tf.reshape(logits, [-1, logits_shape[-1]]), axis=1) + return tf.reshape(argmax, logits_shape[:-1]) + else: + tf.debugging.assert_greater(temperature, 0.0) + + if sampling_keep_top_k != -1: + if sampling_keep_top_k <= 0: + raise ValueError("sampling_keep_top_k must either be -1 or positive.") + + vocab_size = shape_list(logits)[1] + + k_largest = contrib.nn().nth_element( + logits, n=sampling_keep_top_k, reverse=True) + k_largest = tf.tile(tf.reshape(k_largest, [-1, 1]), [1, vocab_size]) + + # Force every position that is not in the top k to have probability near + # 0 by setting the logit to be very negative. + logits = tf.where(tf.less_equal(logits, k_largest), + tf.ones_like(logits)*-1e6, logits) + + reshaped_logits = ( + tf.reshape(logits, [-1, shape_list(logits)[-1]]) / temperature) + choices = tf.multinomial(reshaped_logits, 1) + choices = tf.reshape(choices, + shape_list(logits)[:logits.get_shape().ndims - 1]) + return choices + + +def _select_top_k(logits, top_k): + """Replaces logits, expect the top k highest values, with small number (-1e6). + + If k is -1 don't replace anything. + + Args: + logits: A `Tensor` of shape [batch_size, ..., vocab_size] + top_k: vector of batch size. + + Returns: + A `Tensor` with same shape as logits. + """ + vocab_size = logits.shape[-1] + + top_k = tf.where( + tf.not_equal(top_k, -1), top_k, + tf.ones_like(top_k) * vocab_size) + + return tf.where( + tf.argsort(logits) < tf.reshape(top_k, [-1] + [1] * + (len(logits.shape) - 1)), logits, + tf.ones_like(logits) * -1e6) + + +def sample_temperature_per_example(logits, temperature, sampling_keep_top_k=-1): + """Either random sampling with different temperature per example. + + Args: + logits: a Tensor. + temperature: a float vector of same size as logits. + sampling_keep_top_k: If not -1, only sample from the top k logits. + Returns: + a Tensor with one fewer dimension than logits. + """ + logits = _select_top_k(logits, sampling_keep_top_k) + logits /= tf.reshape(temperature, [-1] + [1] * (len(logits.shape) - 1)) + reshaped_logits = tf.reshape(logits, [-1, shape_list(logits)[-1]]) + choices = tf.multinomial(reshaped_logits, 1) + choices = tf.reshape(choices, + shape_list(logits)[:logits.get_shape().ndims - 1]) + return choices + + +def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None): + """Matrix band part of ones. + + Args: + rows: int determining number of rows in output + cols: int + num_lower: int, maximum distance backward. Negative values indicate + unlimited. + num_upper: int, maximum distance forward. Negative values indicate + unlimited. + out_shape: shape to reshape output by. + + Returns: + Tensor of size rows * cols reshaped into shape out_shape. + """ + if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]): + # Needed info is constant, so we construct in numpy + if num_lower < 0: + num_lower = rows - 1 + if num_upper < 0: + num_upper = cols - 1 + lower_mask = np.tri(cols, rows, num_lower).T + upper_mask = np.tri(rows, cols, num_upper) + band = np.ones((rows, cols)) * lower_mask * upper_mask + if out_shape: + band = band.reshape(out_shape) + band = tf.constant(band, tf.float32) + else: + band = tf.linalg.band_part( + tf.ones([rows, cols]), tf.cast(num_lower, tf.int64), + tf.cast(num_upper, tf.int64)) + if out_shape: + band = tf.reshape(band, out_shape) + + return band + + +def reshape_like_all_dims(a, b): + """Reshapes a to match the shape of b.""" + ret = tf.reshape(a, tf.shape(b)) + if not tf.executing_eagerly(): + ret.set_shape(b.get_shape()) + return ret + + +def recompute_grad(fn): + """Decorator that recomputes the function on the backwards pass. + + Args: + fn: a function that takes Tensors (all as positional arguments) and returns + a tuple of Tensors. + + Returns: + A wrapped fn that is identical to fn when called, but its activations will + be discarded and recomputed on the backwards pass (i.e. on a call to + tf.gradients). + """ + + @functools.wraps(fn) + def wrapped(*args): + return _recompute_grad(fn, args) + + return wrapped + + +def _recompute_grad(fn, args): + """See recompute_grad.""" + + cached_vs = [] + cached_arg_scope = [] + + def grad_fn(inputs, variables, outputs, output_grads): + """Recompute outputs for gradient computation.""" + del outputs + variables = [underlying_variable_ref(v) for v in variables] + # Recompute outputs + with tf.control_dependencies(output_grads): + with contrib.framework().arg_scope(cached_arg_scope[0]): + with tf.variable_scope(cached_vs[0], reuse=True): + outputs = fn(*inputs) + + if not isinstance(outputs, (list, tuple)): + outputs = [outputs] + outputs = list(outputs) + grads = tf.gradients(outputs, inputs + variables, output_grads) + grad_inputs = grads[:len(inputs)] + grad_vars = grads[len(inputs):] + # TODO(rsepassi): Make fn_with_custom_grad work with bfloat16. + # If the input gradients are bfloat16, it's assumed the variables are + # bfloat16. This is a hack to ensure that grad_vars are the right type. + if grad_inputs[0].dtype == tf.bfloat16: + grad_vars = [tf.cast(grad_var, tf.bfloat16) for grad_var in grad_vars] + return grad_inputs, grad_vars + + @fn_with_custom_grad(grad_fn) + def fn_with_recompute(*args): + cached_vs.append(tf.get_variable_scope()) + cached_arg_scope.append(contrib.framework().current_arg_scope()) + return fn(*args) + + return fn_with_recompute(*args) + + +def dense(x, units, **kwargs): + """Identical to layers.dense.""" + layer_collection = kwargs.pop("layer_collection", None) + activations = layers().Dense(units, **kwargs)(x) + if layer_collection: + # We need to find the layer parameters using scope name for the layer, so + # check that the layer is named. Otherwise parameters for different layers + # may get mixed up. + layer_name = tf.get_variable_scope().name + if (not layer_name) or ("name" not in kwargs): + raise ValueError( + "Variable scope and layer name cannot be empty. Actual: " + "variable_scope={}, layer name={}".format( + layer_name, kwargs.get("name", None))) + + layer_name += "/" + kwargs["name"] + layer_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, + scope=layer_name) + assert layer_params + if len(layer_params) == 1: + layer_params = layer_params[0] + + tf.logging.info( + "Registering dense layer to collection for tensor: {}".format( + layer_params)) + + x_shape = x.shape.as_list() + if len(x_shape) == 3: + # Handle [batch, time, depth] inputs by folding batch and time into + # one dimension: reshaping inputs to [batchxtime, depth]. + x_2d = tf.reshape(x, [-1, x_shape[2]]) + activations_shape = activations.shape.as_list() + activations_2d = tf.reshape(activations, [-1, activations_shape[2]]) + layer_collection.register_fully_connected_multi( + layer_params, x_2d, activations_2d, num_uses=x_shape[1]) + activations = tf.reshape(activations_2d, activations_shape) + else: + layer_collection.register_fully_connected(layer_params, x, activations) + return activations + + +def batch_dense(inputs, + units, + activation=None, + kernel_initializer=None, + reuse=None, + name=None): + """Multiply a batch of input matrices by a batch of parameter matrices. + + Each input matrix is multiplied by the corresponding parameter matrix. + + This is useful in a mixture-of-experts where the batch represents different + experts with different inputs. + + Args: + inputs: a Tensor with shape [batch, length, input_units] + units: an integer + activation: an optional activation function to apply to the output + kernel_initializer: an optional initializer + reuse: whether to reuse the varaible scope + name: an optional string + + Returns: + a Tensor with shape [batch, length, units] + + Raises: + ValueError: if the "batch" or "input_units" dimensions of inputs are not + statically known. + """ + inputs_shape = shape_list(inputs) + if len(inputs_shape) != 3: + raise ValueError("inputs must have 3 dimensions") + batch = inputs_shape[0] + input_units = inputs_shape[2] + if not isinstance(batch, int) or not isinstance(input_units, int): + raise ValueError("inputs must have static dimensions 0 and 2") + with tf.variable_scope( + name, + default_name="batch_dense", + values=[inputs], + reuse=reuse, + dtype=inputs.dtype): + if kernel_initializer is None: + kernel_initializer = tf.random_normal_initializer( + stddev=input_units**-0.5) + w = tf.get_variable( + "w", [batch, input_units, units], + initializer=kernel_initializer, + dtype=inputs.dtype) + y = tf.matmul(inputs, w) + if activation is not None: + y = activation(y) + return y + + +def mix(x1, + x2, + steps, + is_training, + min_prob=0.0, + max_prob=1.0, + mode="lin", + simple=False, + broadcast_last=False): + """Mix starting with x2, mixing mixing, going towards x1.""" + with tf.name_scope("mix"): + if not is_training: + if max_prob >= 1.0: + return x1 + alpha_shape = shape_list(x1) + if broadcast_last: + alpha_shape = alpha_shape[:-1] + [1] + alpha = tf.random_uniform(alpha_shape) + alpha = to_float(tf.less(alpha, max_prob)) + return alpha * x1 + (1.0 - alpha) * x2 + + def get_res(): + """Create the result. + + Separate function to speed it up later (see below). + + Returns: + Tensor of mixed inputs. + """ + if mode == "lin": + alpha_p = inverse_lin_decay(steps) + else: + alpha_p = inverse_exp_decay(steps) + alpha_p = alpha_p * (max_prob - min_prob) + min_prob + if simple: + return alpha_p * x1 + (1.0 - alpha_p) * x2 + alpha_shape = shape_list(x1) + if broadcast_last: + alpha_shape = alpha_shape[:-1] + [1] + alpha = tf.random_uniform(alpha_shape) + alpha = to_float(tf.less(alpha, alpha_p)) + return alpha * x1 + (1.0 - alpha) * x2 + + if max_prob < 1.0: + return get_res() + + # Prevent sampling after steps is passed to speed it up. + if is_xla_compiled(): + return get_res() + else: + cur_step = tf.train.get_global_step() + if cur_step is None: + return x1 # Step not available, probably eval mode, don't mix. + return tf.cond(tf.less(cur_step, steps), get_res, lambda: x1) + + +def brelu(x): + """Bipolar ReLU as in https://arxiv.org/abs/1709.04054.""" + x_shape = shape_list(x) + x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1) + y1 = tf.nn.relu(x1) + y2 = -tf.nn.relu(-x2) + return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape) + + +def belu(x): + """Bipolar ELU as in https://arxiv.org/abs/1709.04054.""" + x_shape = shape_list(x) + x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1) + y1 = tf.nn.elu(x1) + y2 = -tf.nn.elu(-x2) + return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape) + + +def gelu(x): + """Gaussian Error Linear Unit. + + This is a smoother version of the RELU. + Original paper: https://arxiv.org/abs/1606.08415 + + Args: + x: float Tensor to perform activation. + + Returns: + x with the GELU activation applied. + """ + cdf = 0.5 * (1.0 + tf.tanh( + (np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3))))) + return x * cdf + + +def nac(x, depth, name=None, reuse=None): + """NAC as in https://arxiv.org/abs/1808.00508.""" + with tf.variable_scope(name, default_name="nac", values=[x], reuse=reuse): + x_shape = shape_list(x) + w = tf.get_variable("w", [x_shape[-1], depth]) + m = tf.get_variable("m", [x_shape[-1], depth]) + w = tf.tanh(w) * tf.nn.sigmoid(m) + x_flat = tf.reshape(x, [-1, x_shape[-1]]) + res_flat = tf.matmul(x_flat, w) + return tf.reshape(res_flat, x_shape[:-1] + [depth]) + + +def nalu(x, depth, epsilon=1e-30, name=None, reuse=None): + """NALU as in https://arxiv.org/abs/1808.00508.""" + with tf.variable_scope(name, default_name="nalu", values=[x], reuse=reuse): + x_shape = shape_list(x) + x_flat = tf.reshape(x, [-1, x_shape[-1]]) + gw = tf.get_variable("w", [x_shape[-1], depth]) + g = tf.nn.sigmoid(tf.matmul(x_flat, gw)) + g = tf.reshape(g, x_shape[:-1] + [depth]) + a = nac(x, depth, name="nac_lin") + log_x = tf.log(tf.abs(x) + epsilon) + m = nac(log_x, depth, name="nac_log") + return g * a + (1 - g) * tf.exp(m) + + +def argmax_with_score(logits, axis=None): + """Argmax along with the value.""" + axis = axis or len(logits.get_shape()) - 1 + predictions = tf.argmax(logits, axis=axis) + + logits_shape = shape_list(logits) + prefix_shape, vocab_size = logits_shape[:-1], logits_shape[-1] + prefix_size = 1 + for d in prefix_shape: + prefix_size *= d + + # Flatten to extract scores + flat_logits = tf.reshape(logits, [prefix_size, vocab_size]) + flat_predictions = tf.reshape(predictions, [prefix_size]) + flat_indices = tf.stack( + [tf.range(tf.to_int64(prefix_size)), + tf.to_int64(flat_predictions)], + axis=1) + flat_scores = tf.gather_nd(flat_logits, flat_indices) + + # Unflatten + scores = tf.reshape(flat_scores, prefix_shape) + + return predictions, scores + + +def log_prob_from_logits(logits, reduce_axis=-1): + return logits - tf.reduce_logsumexp(logits, axis=reduce_axis, keepdims=True) + + +def top_kth_iterative(x, k): + """Compute the k-th top element of x on the last axis iteratively. + + This assumes values in x are non-negative, rescale if needed. + It is often faster than tf.nn.top_k for small k, especially if k < 30. + Note: this does not support back-propagation, it stops gradients! + + Args: + x: a Tensor of non-negative numbers of type float. + k: a python integer. + + Returns: + a float tensor of the same shape as x but with 1 on the last axis + that contains the k-th largest number in x. + """ + # The iterative computation is as follows: + # + # cur_x = x + # for _ in range(k): + # top_x = maximum of elements of cur_x on the last axis + # cur_x = cur_x where cur_x < top_x and 0 everywhere else (top elements) + # + # We encode this computation in a TF graph using tf.foldl, so the inner + # part of the above loop is called "next_x" and tf.foldl does the loop. + def next_x(cur_x, _): + top_x = tf.reduce_max(cur_x, axis=-1, keep_dims=True) + return cur_x * to_float(cur_x < top_x) + # We only do k-1 steps of the loop and compute the final max separately. + fin_x = tf.foldl(next_x, tf.range(k - 1), initializer=tf.stop_gradient(x), + parallel_iterations=2, back_prop=False) + return tf.stop_gradient(tf.reduce_max(fin_x, axis=-1, keep_dims=True)) + + +def top_1_tpu(inputs): + """find max and argmax over the last dimension. + + Works well on TPU + + Args: + inputs: A tensor with shape [..., depth] + + Returns: + values: a Tensor with shape [...] + indices: a Tensor with shape [...] + """ + inputs_max = tf.reduce_max(inputs, axis=-1, keepdims=True) + mask = tf.to_int32(tf.equal(inputs_max, inputs)) + index = tf.range(tf.shape(inputs)[-1]) * mask + return tf.squeeze(inputs_max, -1), tf.reduce_max(index, axis=-1) + + +def index_last_dim_with_indices(x, indices): + """Use indices to index into the last axis of x. + + This can be useful for recovering the actual probabilities of a sample from a + probability distribution. + + Args: + x: Tensor, n-d. + indices: Tensor, (n-1)-d, where the dimension sizes match the first (n-1) + dimensions of x. The values of indices will be used to index into the last + axis of x. + + Returns: + Tensor, (n-1)-d. + """ + assert len(x.shape) == len(indices.shape) + 1 + + x_shape = shape_list(x) + vocab_size = x_shape[-1] + + flat_x = tf.reshape(x, [list_product(x_shape[:-1]), vocab_size]) + flat_indices = tf.reshape(indices, [list_product(x_shape[:-1])]) + + idx = tf.stack( + [ + tf.range(tf.to_int64(shape_list(flat_indices)[0])), + tf.to_int64(flat_indices) + ], + axis=1) + flat_x_idx = tf.gather_nd(flat_x, idx) + + x_idx = tf.reshape(flat_x_idx, x_shape[:-1]) + + return x_idx + + +def should_generate_summaries(): + """Is this an appropriate context to generate summaries. + + Returns: + a boolean + """ + name_scope = contrib.framework().get_name_scope() + if name_scope and "while/" in name_scope: + # Summaries don't work well within tf.while_loop() + return False + if tf.get_variable_scope().reuse: + # Avoid generating separate summaries for different data shards + return False + return True + + +def reshape_like(a, b): + """Reshapes a to match the shape of b in all but the last dimension.""" + ret = tf.reshape(a, tf.concat([tf.shape(b)[:-1], tf.shape(a)[-1:]], 0)) + if not tf.executing_eagerly(): + ret.set_shape(b.get_shape().as_list()[:-1] + a.get_shape().as_list()[-1:]) + return ret + + +def summarize_video(video, prefix, max_outputs=1): + """Summarize the video using image summaries starting with prefix.""" + video_shape = shape_list(video) + if len(video_shape) != 5: + raise ValueError("Assuming videos given as tensors in the format " + "[batch, time, height, width, channels] but got one " + "of shape: %s" % str(video_shape)) + if tf.executing_eagerly(): + return + if video.get_shape().as_list()[1] is None: + tf.summary.image( + "%s_last_frame" % prefix, + tf.cast(video[:, -1, :, :, :], tf.uint8), + max_outputs=max_outputs) + else: + for k in range(video_shape[1]): + tf.summary.image( + "%s_frame_%d" % (prefix, k), + tf.cast(video[:, k, :, :, :], tf.uint8), + max_outputs=max_outputs) + + +def cast_like(x, y): + """Cast x to y's dtype, if necessary.""" + x = tf.convert_to_tensor(x) + y = tf.convert_to_tensor(y) + + if x.dtype.base_dtype == y.dtype.base_dtype: + return x + + cast_x = tf.cast(x, y.dtype) + if cast_x.device != x.device: + x_name = "(eager Tensor)" + try: + x_name = x.name + except AttributeError: + pass + tf.logging.warning("Cast for %s may induce copy from '%s' to '%s'", x_name, + x.device, cast_x.device) + return cast_x + + +def make_even_size(x): + """Pad x to be even-sized on axis 1 and 2, but only if necessary.""" + x_shape = x.get_shape().as_list() + assert len(x_shape) > 2, "Only 3+-dimensional tensors supported." + shape = [dim if dim is not None else -1 for dim in x_shape] + new_shape = x_shape # To make sure constant shapes remain constant. + if x_shape[1] is not None: + new_shape[1] = 2 * int(math.ceil(x_shape[1] * 0.5)) + if x_shape[2] is not None: + new_shape[2] = 2 * int(math.ceil(x_shape[2] * 0.5)) + if shape[1] % 2 == 0 and shape[2] % 2 == 0: + return x + if shape[1] % 2 == 0: + x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2) + x.set_shape(new_shape) + return x + if shape[2] % 2 == 0: + x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1) + x.set_shape(new_shape) + return x + x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=1) + x, _ = pad_to_same_length(x, x, final_length_divisible_by=2, axis=2) + x.set_shape(new_shape) + return x + + +def sliced_gan_loss(input1, + input2, + discriminator, + num_vecs, + do_random_vecs=True, + do_tanh=True, + return_logits=False): + """Loss inspired by the sliced WGAN paper: https://arxiv.org/abs/1804.01947. + + Puts input1 and input2 through the provided discriminator to get logits. + Then, computes num_vecs random projections of the logits, sorts them on + the batch dimension and returns the L2 loss between the sorted vectors. + See the above-mentioned paper for the reasoning behind it. + + Args: + input1: first discriminator inputs. + input2: second discriminator inputs. + discriminator: inputs -> logits function. + num_vecs: how many random vectors to use for projections. + do_random_vecs: whether to use random vectors or just tanh of the logits. + do_tanh: if true (default) we'll also just use tanh of the logits. + return_logits: Whether or not to return the logits. + + Returns: + The generator loss, i.e., the sliced approximation of the distance between + the projected distributions (warning: discriminator should maximize it). + """ + with tf.variable_scope("sliced_gan"): + with tf.variable_scope("discriminator"): + logits1 = discriminator(input1) + with tf.variable_scope("discriminator", reuse=True): + logits2 = discriminator(input2) + + if do_random_vecs: + random_vecs = tf.nn.l2_normalize( + tf.random_uniform([shape_list(logits1)[-1], num_vecs]), axis=0) + + def get_sorted_projections(x): + """Make projections of x and sort them on the batch dimension.""" + x = tf.reshape(x, [-1, shape_list(x)[-1]]) + batch_size = shape_list(x)[0] + if do_random_vecs and do_tanh: + n = tf.nn.l2_normalize(x, axis=1) + proj = tf.concat([tf.matmul(n, random_vecs), tf.tanh(n)], axis=1) + elif do_random_vecs: + n = tf.nn.l2_normalize(x, axis=1) + proj = tf.matmul(n, random_vecs) + else: + proj = tf.tanh(x) + proj = tf.transpose(proj, [1, 0]) # [num_vecs, batch] after this. + + if is_xla_compiled(): + proj_dtype = proj.dtype + proj = tf.cast(proj, tf.bfloat16) + + # Currently TPU only supports 1-D top_k calls. + map_fn = lambda x: tf.nn.top_k(x, k=batch_size, sorted=True)[0] + values = tf.map_fn(map_fn, proj) + + values = tf.cast(values, proj_dtype) + else: + values, _ = tf.nn.top_k(proj, k=batch_size, sorted=True) + + return values + + proj1 = get_sorted_projections(logits1) + proj2 = get_sorted_projections(logits2) + dist = tf.reduce_mean(tf.squared_difference(proj1, proj2)) + if return_logits: + return dist, logits1, logits2 + return dist + + +def lrelu(input_, leak=0.2, name="lrelu"): + return tf.maximum(input_, leak * input_, name=name) + + +def deep_discriminator(x, + batch_norm, + is_training, + filters=64, + filter_size=4, + stride=2, + output_size=1024): + """Discriminator architecture based on InfoGAN.""" + with tf.variable_scope( + "discriminator", initializer=tf.random_normal_initializer(stddev=0.02)): + batch_size, height, width = shape_list(x)[:3] # pylint: disable=unbalanced-tuple-unpacking + net = layers().Conv2D( + filters, filter_size, strides=stride, padding="SAME", name="conv1")(x) + net = lrelu(net) + net = layers().Conv2D( + 2 * filters, + filter_size, + strides=stride, + padding="SAME", + name="conv2")(net) + # [bs, h/4, w/4, 128] + if batch_norm: + net = layers().BatchNormalization( + training=is_training, momentum=0.999, name="d_bn2")(net) + net = lrelu(net) + size = height * width + x_shape = x.get_shape().as_list() + if x_shape[1] is None or x_shape[2] is None: + net = tf.reduce_mean(net, axis=[1, 2]) + else: + net = tf.reshape(net, [batch_size, size * 8]) + net = layers().Dense(output_size, name="d_fc3")(net) + if batch_norm: + net = layers().BatchNormalization( + training=is_training, momentum=0.999, name="d_bn3")(net) + net = lrelu(net) + return net + + +def instance_norm(x): + """Instance normalization layer.""" + with tf.variable_scope("instance_norm"): + epsilon = 1e-5 + mean, var = tf.nn.moments(x, [1, 2], keep_dims=True) + scale = tf.get_variable( + "scale", [x.get_shape()[-1]], + initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02)) + offset = tf.get_variable( + "offset", [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0)) + out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset + + return out + + +def general_conv(x, + num_filters=64, + filter_size=7, + stride=1, + stddev=0.02, + padding="VALID", + name="conv", + do_norm="instance", + do_relu=True, + relufactor=0): + """Generalized convolution layer.""" + with tf.variable_scope(name): + x = layers().Conv2D( + num_filters, + filter_size, + stride, + padding, + activation=None, + kernel_initializer=tf.truncated_normal_initializer(stddev=stddev), + bias_initializer=tf.constant_initializer(0.0))(x) + if do_norm == "layer": + x = layer_norm(x) + elif do_norm == "instance": + x = instance_norm(x) + + if do_relu: + if relufactor == 0: + x = tf.nn.relu(x, "relu") + else: + x = lrelu(x, leak=relufactor) + + return x + + +def patch_discriminator(x, filters=64, filter_size=5, n=4, + name="patch_discrim"): + """Patch descriminator.""" + with tf.variable_scope(name): + x_shape = shape_list(x) + spatial_dims = [x_shape[1] // 4, x_shape[2] // 4] + x = tf.random_crop(x, [x_shape[0]] + spatial_dims + [x_shape[3]]) + for i in range(n): + x = general_conv( + x=x, + num_filters=filters * 2**i, + filter_size=filter_size, + stride=2 if i != n - 1 else 1, + stddev=0.02, + padding="SAME", + name="c%d" % i, + do_norm="instance" if i != 0 else False, + do_relu=i != n - 1, + relufactor=0.2) + x = tf.reduce_mean(x, [1, 2]) + return x + + +def mean_with_attention(x, name, num_heads=4): + """Mean and attention to reduce spatial dimensions.""" + with tf.variable_scope(name): + shape = shape_list(x) + m = tf.reduce_mean(x, [1, 2]) + a = layers().Dense(num_heads, name="mean_attn")(x) + s = tf.reshape(a, [shape[0], -1, num_heads]) + s = tf.nn.softmax(s, axis=1) + s = tf.reshape(s, shape[:-1] + [1, num_heads]) + am = tf.reduce_mean(tf.expand_dims(x, axis=-1) * s, [1, 2]) + l = tf.concat([am, tf.expand_dims(m, axis=-1)], axis=-1) + return layers().Dense(2 * shape[-1], name="mean_attn_final")( + tf.reshape(l, [shape[0], (num_heads+1) * shape[-1]])) + + +def single_discriminator(x, filters=128, kernel_size=8, + strides=4, pure_mean=False): + """A simple single-layer convolutional discriminator.""" + with tf.variable_scope("discriminator"): + net = layers().Conv2D( + filters, kernel_size, strides=strides, padding="SAME", name="conv1")(x) + if pure_mean: + net = tf.reduce_mean(net, [1, 2]) + else: + net = mean_with_attention(net, "mean_with_attention") + return net + + +def double_discriminator(x, filters1=128, filters2=None, + kernel_size=8, strides=4, pure_mean=False): + """A convolutional discriminator with 2 layers and concatenated output.""" + if filters2 is None: + filters2 = 4 * filters1 + with tf.variable_scope("discriminator"): + batch_size = shape_list(x)[0] + net = layers().Conv2D( + filters1, kernel_size, strides=strides, padding="SAME", name="conv1")(x) + if pure_mean: + net1 = tf.reduce_mean(net, [1, 2]) + else: + net1 = mean_with_attention(net, "mean_with_attention1") + tf.reshape(net, [batch_size, -1]) + net = tf.nn.relu(net) + net = layers().Conv2D( + filters2, kernel_size, strides=strides, padding="SAME", + name="conv2")(net) + if pure_mean: + net2 = tf.reduce_mean(net, [1, 2]) + else: + net2 = mean_with_attention(net, "mean_with_attention2") + return tf.concat([net1, net2], axis=-1) + + +def upscale(inputs, f, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR): + """Upscaling the image by a factor of f.""" + height, width = shape_list(inputs)[1:3] # pylint: disable=unbalanced-tuple-unpacking + return tf.image.resize_images(inputs, (height * f, width * f), method) + + +def tpu_safe_image_summary(image): + if is_xla_compiled(): + # We only support float32 images at the moment due to casting complications. + if image.dtype != tf.float32: + image = to_float(image) + else: + image = tf.cast(image, tf.uint8) + return image + + +# This has been (shamefully) copied from +# GitHub tensorflow/models/blob/master/research/slim/nets/cyclegan.py +# +# tensorflow/models cannot be pip installed, and even if it were we don't want +# to depend on all the models in it. +# +# Therefore copying and forgoing any more bugfixes into it is the most +# expedient way to use this function. +def cyclegan_upsample(net, num_outputs, stride, method="conv2d_transpose"): + """Upsamples the given inputs. + + Args: + net: A Tensor of size [batch_size, height, width, filters]. + num_outputs: The number of output filters. + stride: A list of 2 scalars or a 1x2 Tensor indicating the scale, + relative to the inputs, of the output dimensions. For example, if kernel + size is [2, 3], then the output height and width will be twice and three + times the input size. + method: The upsampling method: 'nn_upsample_conv', + 'bilinear_upsample_conv', or 'conv2d_transpose'. + + Returns: + A Tensor which was upsampled using the specified method. + + Raises: + ValueError: if `method` is not recognized. + """ + + with tf.variable_scope("upconv"): + net_shape = tf.shape(net) + height = net_shape[1] + width = net_shape[2] + + # Reflection pad by 1 in spatial dimensions (axes 1, 2 = h, w) to make a + # 3x3 "valid" convolution produce an output with the same dimension as the + # input. + spatial_pad_1 = np.array([[0, 0], [1, 1], [1, 1], [0, 0]]) + + if method == "nn_upsample_conv": + net = tf.image.resize_nearest_neighbor( + net, [stride[0] * height, stride[1] * width]) + net = tf.pad(net, spatial_pad_1, "REFLECT") + net = layers().Conv2D( + num_outputs, (3, 3), activation=tf.nn.relu)(net) + elif method == "bilinear_upsample_conv": + net = tf.image.resize_bilinear(net, + [stride[0] * height, stride[1] * width]) + net = tf.pad(net, spatial_pad_1, "REFLECT") + net = layers().Conv2D( + num_outputs, (3, 3), activation=tf.nn.relu)(net) + elif method == "conv2d_transpose": + # This corrects 1 pixel offset for images with even width and height. + # conv2d is left aligned and conv2d_transpose is right aligned for even + # sized images (while doing "SAME" padding). + # Note: This doesn"t reflect actual model in paper. + net = layers().Conv2DTranspose( + num_outputs, (3, 3), strides=stride, activation=tf.nn.relu)(net) + net = net[:, 1:, 1:, :] + else: + raise ValueError("Unknown method: [%s]" % method) + + return net + + +def weight_targeting(w, k): + """Weight-level magnitude pruning.""" + k = tf.to_int32(k) + w_shape = shape_list(w) + size = tf.to_int32(tf.reduce_prod(w_shape[:-1])) + w = tf.reshape(w, [size, w_shape[-1]]) + + transpose_w = tf.transpose(w) + thres = contrib.framework().sort(tf.abs(transpose_w), axis=1)[:, k] + mask = to_float(thres[None, :] >= tf.abs(w)) + + return tf.reshape(mask, w_shape) + + +def unit_targeting(w, k): + """Unit-level magnitude pruning.""" + k = tf.to_int32(k) + w_shape = shape_list(w) + size = tf.to_int32(tf.reduce_prod(w_shape[:-1])) + w = tf.reshape(w, [size, w_shape[-1]]) + + norm = tf.norm(w, axis=0) + thres = contrib.framework().sort(norm, axis=0)[k] + mask = to_float(thres >= norm)[None, :] + mask = tf.tile(mask, [size, 1]) + + return tf.reshape(mask, w_shape) + + +def td_conv(inputs, + filters, + kernel_size, + targeting_count, + targeting_fn, + keep_prob, + is_training, + do_prune=True, + strides=(1, 1), + padding="valid", + data_format="channels_last", + dilation_rate=(1, 1), + activation=None, + use_bias=True, + kernel_initializer=None, + bias_initializer=tf.zeros_initializer(), + name=None, + reuse=None): + """Apply targeted dropout to the weights of a convolution.""" + with tf.variable_scope(name, default_name="td_conv", reuse=reuse): + nhwc = data_format == "channels_last" + in_dim = shape_list(inputs)[-1] if nhwc else shape_list(inputs)[1] + + kernel_shape = [kernel_size, kernel_size, in_dim, filters] + w = tf.get_variable( + "DW", shape=kernel_shape, initializer=kernel_initializer) + if use_bias: + b = tf.get_variable("b", shape=[filters], initializer=bias_initializer) + + if keep_prob < 1.0: + w = targeted_dropout( + w, + targeting_count, + keep_prob, + targeting_fn, + is_training, + do_prune=do_prune) + + if isinstance(strides, int): + strides = [strides, strides] + if isinstance(dilation_rate, int): + dilation_rate = [dilation_rate, dilation_rate] + + if nhwc: + strides = [1, strides[0], strides[1], 1] + dilation_rate = [1, dilation_rate[0], dilation_rate[1], 1] + else: + strides = [1, 1, strides[0], strides[1]] + dilation_rate = [1, 1, dilation_rate[0], dilation_rate[1]] + + y = tf.nn.conv2d( + inputs, + w, + strides, + padding, + data_format="NHWC" if nhwc else "NCHW", + dilations=dilation_rate, + name=None) + + if use_bias: + y += b + + if activation: + y = activation(y) + + return y + + +def targeted_dropout(inputs, + k, + keep_prob, + targeting_fn, + is_training, + do_prune=False): + """Applies targeted dropout. + + Applies dropout at a rate of `1 - keep_prob` to only those elements of + `inputs` marked by `targeting_fn`. See below and paper for more detail: + + "Targeted Dropout for Posthoc Pruning" Aidan N. Gomez, Ivan Zhang, + Kevin Swersky, Yarin Gal, and Geoffrey E. Hinton. + + Args: + inputs: Tensor, inputs to apply targeted dropout to. + k: Scalar Tensor or python scalar, sets the number of elements to target in + `inputs`. Must be within `[0, tf.shape(x)[-1]]` and compatible with + second argument of `targeting_fn`. + keep_prob: Scalar Tensor, passed as `tf.nn.dropout`'s `keep_prob` argument. + targeting_fn: callable `fn(inputs, k) -> Boolean Tensor`, produces a + boolean mask the same shape as `inputs` where True indicates an element + will be dropped, and False not. + is_training: bool, indicates whether currently training. + do_prune: bool, indicates whether to prune the `k * (1 - keep_prob)` + elements of `inputs` expected to be dropped each forwards pass. + + Returns: + Tensor, same shape and dtype as `inputs`. + """ + if not is_training and do_prune: + k = tf.round(to_float(k) * to_float(1. - keep_prob)) + + mask = targeting_fn(inputs, k) + mask = tf.cast(mask, inputs.dtype) + + if is_training: + return inputs * (1 - mask) + tf.nn.dropout(inputs, keep_prob) * mask + elif do_prune: + return inputs * (1 - mask) + else: + return inputs + + +def kl_divergence(mu, log_var, mu_p=0.0, log_var_p=0.0): + """KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1). + + Args: + mu: mu parameter of the distribution. + log_var: log(var) parameter of the distribution. + mu_p: optional mu from a learned prior distribution + log_var_p: optional log(var) from a learned prior distribution + Returns: + the KL loss. + """ + + batch_size = shape_list(mu)[0] + prior_distribution = tfp.distributions.Normal( + mu_p, tf.exp(tf.multiply(0.5, log_var_p))) + posterior_distribution = tfp.distributions.Normal( + mu, tf.exp(tf.multiply(0.5, log_var))) + kld = tfp.distributions.kl_divergence(posterior_distribution, + prior_distribution) + return tf.reduce_sum(kld) / to_float(batch_size) + + +def sparse_equals_constant(constant, tensor): + return tf.SparseTensor( + indices=tensor.indices, + dense_shape=tensor.dense_shape, + values=tf.equal(tensor.values, constant)) + + +def sparse_expand_dims(tensor, current_num_dims, axis=0): + if axis == -1: + axis = current_num_dims + + new_col = tf.zeros([tf.shape(tensor.indices)[0]], dtype=tf.int64) + cols = tf.unstack(tensor.indices, axis=1, num=current_num_dims) + shape = tf.unstack(tensor.dense_shape, num=current_num_dims) + new_indices = tf.stack(cols[:axis] + [new_col] + cols[axis:], axis=1) + return tf.SparseTensor( + indices=new_indices, + values=tensor.values, + dense_shape=tf.stack(shape[:axis] + [1] + shape[axis:])) + + +def sparse_add_constant(constant, tensor): + return tf.SparseTensor( + indices=tensor.indices, + values=constant + tensor.values, + dense_shape=tensor.dense_shape) + + +def sparse_eye(size): + indices = tf.cast(tf.stack([tf.range(size), tf.range(size)]), tf.int64) + values = tf.ones(size) + dense_shape = [tf.cast(size, tf.int64), tf.cast(size, tf.int64)] + + return tf.SparseTensor( + indices=indices, values=values, dense_shape=dense_shape) + + +# modification from https://github.com/tensorflow/tensorflow/pull/21276 +# without special initialization for g +class WeightNorm(tf.keras.layers.Wrapper): + """Decouple weight magnitude and direction. + + This wrapper reparameterizes a layer by decoupling the weight's + magnitude and direction. This speeds up convergence by improving the + conditioning of the optimization problem. + + Weight Normalization: A Simple Reparameterization to Accelerate + Training of Deep Neural Networks: https://arxiv.org/abs/1602.07868 + Tim Salimans, Diederik P. Kingma (2016) + + WeightNorm wrapper works for keras and tf layers. + + ```python + net = WeightNorm(tf.keras.layers.Conv2D(2, 2, activation='relu'), + input_shape=(32, 32, 3), data_init=True)(x) + net = WeightNorm(tf.keras.layers.Conv2D(16, 5, activation='relu'), + data_init=True) + net = WeightNorm(tf.keras.layers.Dense(120, activation='relu'), + data_init=True)(net) + net = WeightNorm(tf.keras.layers.Dense(n_classes), + data_init=True)(net) + ``` + + Arguments: + layer: a layer instance. + data_init: If `True` use data dependent variable initialization + + Raises: + ValueError: If not initialized with a `Layer` instance. + ValueError: If `Layer` does not contain a `kernel` of weights + NotImplementedError: If `data_init` is True and running graph execution + """ + + def __init__(self, layer, data_init=False, **kwargs): + if not isinstance(layer, tf.keras.layers.Layer): + raise ValueError( + "Please initialize `WeightNorm` layer with a " + "`Layer` instance. You passed: {input}".format(input=layer)) + + super(WeightNorm, self).__init__(layer, **kwargs) + self._track_trackable(layer, name="layer") + + def _compute_weights(self): + """Generate weights with normalization.""" + with tf.variable_scope("compute_weights"): + self.layer.kernel = tf.nn.l2_normalize( + self.layer.v, axis=self.norm_axes) * self.layer.g + + def _init_norm(self, weights): + """Set the norm of the weight vector.""" + with tf.variable_scope("init_norm"): + flat = tf.reshape(weights, [-1, self.layer_depth]) + return tf.reshape(tf.norm(flat, axis=0), (self.layer_depth,)) + + def _data_dep_init(self, inputs): + """Data dependent initialization for eager execution.""" + + with tf.variable_scope("data_dep_init"): + # Generate data dependent init values + activation = self.layer.activation + self.layer.activation = None + x_init = self.layer.call(inputs) + m_init, v_init = tf.moments(x_init, self.norm_axes) + scale_init = 1. / tf.sqrt(v_init + 1e-10) + + # Assign data dependent init values + self.layer.g = self.layer.g * scale_init + self.layer.bias = (-m_init * scale_init) + self.layer.activation = activation + self.initialized = True + + def build(self, input_shape=None): + """Build `Layer`.""" + if not self.layer.built: + self.layer.build(input_shape) + self.layer.built = False + + if not hasattr(self.layer, "kernel"): + raise ValueError("`WeightNorm` must wrap a layer that" + " contains a `kernel` for weights") + + # The kernel's filter or unit dimension is -1 + self.layer_depth = int(self.layer.kernel.shape[-1]) + self.norm_axes = list(range(self.layer.kernel.shape.ndims - 1)) + + self.layer.v = self.layer.kernel + self.layer.g = self.layer.add_variable( + name="g", + shape=(self.layer_depth,), + initializer=tf.ones_initializer, + dtype=self.layer.kernel.dtype, + trainable=True) + + # with ops.control_dependencies([self.layer.g.assign( + # self._init_norm(self.layer.v))]): + # self._compute_weights() + self._compute_weights() + + self.layer.built = True + self.input_spec = self.layer.input_spec + + super(WeightNorm, self).build() + self.built = True + + def call(self, inputs): + """Call `Layer`.""" + # if context.executing_eagerly(): + # if not self.initialized: + # self._data_dep_init(inputs) + self._compute_weights() # Recompute weights for each forward pass + + output = self.layer.call(inputs) + return output + + def compute_output_shape(self, input_shape): + return tf.TensorShape( + self.layer.compute_output_shape(input_shape).as_list()) diff --git a/tensor2tensor/layers/common_layers_test.py b/tensor2tensor/layers/common_layers_test.py new file mode 100644 index 000000000..897aae2db --- /dev/null +++ b/tensor2tensor/layers/common_layers_test.py @@ -0,0 +1,984 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for common layers.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import kfac +import numpy as np + +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import test_utils + +import tensorflow.compat.v1 as tf + +tf.enable_eager_execution() + + +class CommonLayersTest(parameterized.TestCase, tf.test.TestCase): + + @test_utils.run_in_graph_and_eager_modes() + def testIndexLastDimWithIndices(self): + x = np.array([[2., 3., 4., 5.], + [6., 7., 8., 9.]]) + indices = np.array([2, 0]) + x_idx = common_layers.index_last_dim_with_indices(x, indices) + + expected = np.array([4., 6.]) + self.assertAllEqual(expected, self.evaluate(x_idx)) + + @test_utils.run_in_graph_and_eager_modes() + def testSaturatingSigmoid(self): + x = np.array([-120.0, -100.0, 0.0, 100.0, 120.0], dtype=np.float32) + y = common_layers.saturating_sigmoid(tf.constant(x)) + res = self.evaluate(y) + self.assertAllClose(res, [0.0, 0.0, 0.5, 1.0, 1.0]) + + @test_utils.run_in_graph_and_eager_modes() + def testFlatten4D3D(self): + x = np.random.randint(1, high=9, size=(3, 5, 2)) + y = common_layers.flatten4d3d(common_layers.embedding(x, 10, 7)) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(y) + self.assertEqual(res.shape, (3, 5 * 2, 7)) + + @test_utils.run_in_graph_and_eager_modes() + def testEmbedding(self): + x = np.random.randint(1, high=9, size=(3, 5)) + y = common_layers.embedding(x, 10, 16) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(y) + self.assertEqual(res.shape, (3, 5, 16)) + + @test_utils.run_in_graph_mode_only() + def testShakeShake(self): + x = np.random.rand(5, 7) + with self.test_session() as session: + x = tf.constant(x, dtype=tf.float32) + y = common_layers.shakeshake([x, x, x, x, x]) + inp, res = session.run([x, y]) + self.assertAllClose(res, inp) + + @test_utils.run_in_graph_and_eager_modes() + def testConv(self): + x = np.random.rand(5, 7, 1, 11) + y = common_layers.conv(tf.constant(x, dtype=tf.float32), 13, (3, 1)) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(y) + self.assertEqual(res.shape, (5, 5, 1, 13)) + + @test_utils.run_in_graph_and_eager_modes() + def testConv1d(self): + x = np.random.rand(5, 7, 11) + y = common_layers.conv1d(tf.constant(x, dtype=tf.float32), 13, 1) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(y) + self.assertEqual(res.shape, (5, 7, 13)) + + @test_utils.run_in_graph_and_eager_modes() + def testSeparableConv(self): + x = np.random.rand(5, 7, 1, 11) + y = common_layers.separable_conv( + tf.constant(x, dtype=tf.float32), 13, (3, 1)) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(y) + self.assertEqual(res.shape, (5, 5, 1, 13)) + + @test_utils.run_in_graph_and_eager_modes() + def testSubSeparableConv(self): + for sep in [0, 1, 2, 4]: + x = np.random.rand(5, 7, 1, 12) + with tf.variable_scope("sep_%d" % sep): + y = common_layers.subseparable_conv( + tf.constant(x, dtype=tf.float32), 16, (3, 1), separability=sep) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(y) + self.assertEqual(res.shape, (5, 5, 1, 16)) + + @test_utils.run_in_graph_and_eager_modes() + def testConvBlock(self): + x = np.random.rand(5, 7, 1, 11) + y = common_layers.conv_block( + tf.constant(x, dtype=tf.float32), + 13, [(1, (3, 3)), (1, (3, 3))], + padding="SAME", + normalizer_fn=common_layers.noam_norm) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(y) + self.assertEqual(res.shape, (5, 7, 1, 13)) + + @test_utils.run_in_graph_and_eager_modes() + def testSeparableConvBlock(self): + x = np.random.rand(5, 7, 1, 11) + y = common_layers.separable_conv_block( + tf.constant(x, dtype=tf.float32), + 13, [(1, (3, 3)), (1, (3, 3))], + padding="SAME") + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(y) + self.assertEqual(res.shape, (5, 7, 1, 13)) + + @test_utils.run_in_graph_and_eager_modes() + def testSubSeparableConvBlock(self): + for sep in [0, 1, 2, 4]: + x = np.random.rand(5, 7, 1, 12) + with tf.variable_scope("sep_%d" % sep): + y = common_layers.subseparable_conv_block( + tf.constant(x, dtype=tf.float32), + 16, [(1, (3, 3)), (1, (3, 3))], + padding="SAME", + separability=sep) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(y) + self.assertEqual(res.shape, (5, 7, 1, 16)) + + @test_utils.run_in_graph_and_eager_modes() + def testPool(self): + x = np.random.rand(5, 8, 1, 11) + y = common_layers.pool( + tf.constant(x, dtype=tf.float32), (2, 2), "AVG", "SAME") + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(y) + self.assertEqual(res.shape, (5, 8, 1, 11)) + + @test_utils.run_in_graph_and_eager_modes() + def testConvBlockDownsample(self): + x = np.random.rand(5, 7, 1, 11) + y = common_layers.conv_block_downsample( + tf.constant(x, dtype=tf.float32), (3, 1), (2, 1), "SAME") + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(y) + self.assertEqual(res.shape, (5, 4, 1, 27)) + + @test_utils.run_in_graph_and_eager_modes() + def testGetTimingSignal(self): + length = 7 + num_timescales = 10 + a = common_layers.get_timing_signal(length, num_timescales=num_timescales) + res = self.evaluate(a) + self.assertEqual(res.shape, (length, 2 * num_timescales)) + + @test_utils.run_in_graph_and_eager_modes() + def testAddTimingSignal(self): + batch = 5 + length = 7 + height = 3 + depth = 35 + x = np.random.rand(batch, length, height, depth) + a = common_layers.add_timing_signal(tf.constant(x, dtype=tf.float32)) + res = self.evaluate(a) + self.assertEqual(res.shape, (batch, length, height, depth)) + + @test_utils.run_in_graph_and_eager_modes() + def testConvGRU(self): + x = np.random.rand(5, 7, 3, 11) + y = common_layers.conv_gru(tf.constant(x, dtype=tf.float32), (1, 3), 11) + z = common_layers.conv_gru( + tf.constant(x, dtype=tf.float32), (1, 3), 11, padding="LEFT") + self.evaluate(tf.global_variables_initializer()) + res1 = self.evaluate(y) + res2 = self.evaluate(z) + self.assertEqual(res1.shape, (5, 7, 3, 11)) + self.assertEqual(res2.shape, (5, 7, 3, 11)) + + @test_utils.run_in_graph_mode_only + def testSRU(self): + x = np.random.rand(5, 7, 3, 11) + with self.test_session() as session: + y = common_layers.sru(tf.constant(x, dtype=tf.float32)) + session.run(tf.global_variables_initializer()) + res = session.run(y) + self.assertEqual(res.shape, (5, 7, 3, 11)) + + @test_utils.run_in_graph_and_eager_modes() + def testLayerNorm(self): + x = np.random.rand(5, 7, 11) + y = common_layers.layer_norm(tf.constant(x, dtype=tf.float32), 11) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(y) + self.assertEqual(res.shape, (5, 7, 11)) + + + @test_utils.run_in_graph_and_eager_modes() + def testGroupNorm(self): + x = np.random.rand(5, 7, 3, 16) + y = common_layers.group_norm(tf.constant(x, dtype=tf.float32)) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(y) + self.assertEqual(res.shape, (5, 7, 3, 16)) + + @test_utils.run_in_graph_and_eager_modes() + def testConvLSTM(self): + x = np.random.rand(5, 7, 11, 13) + y = common_layers.conv_lstm(tf.constant(x, dtype=tf.float32), (1, 3), 13) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(y) + self.assertEqual(res.shape, (5, 7, 11, 13)) + + @test_utils.run_in_graph_and_eager_modes() + def testPadToSameLength(self): + x1 = np.random.rand(5, 7, 11) + x2 = np.random.rand(5, 9, 11) + a, b = common_layers.pad_to_same_length( + tf.constant(x1, dtype=tf.float32), tf.constant(x2, dtype=tf.float32)) + c, d = common_layers.pad_to_same_length( + tf.constant(x1, dtype=tf.float32), + tf.constant(x2, dtype=tf.float32), + final_length_divisible_by=4) + res1, res2 = self.evaluate([a, b]) + res1a, res2a = self.evaluate([c, d]) + self.assertEqual(res1.shape, (5, 9, 11)) + self.assertEqual(res2.shape, (5, 9, 11)) + self.assertEqual(res1a.shape, (5, 12, 11)) + self.assertEqual(res2a.shape, (5, 12, 11)) + + @test_utils.run_in_graph_and_eager_modes() + def testShiftLeft(self): + x1 = np.zeros((5, 7, 1, 11)) + x1[:, 0, :] = np.ones_like(x1[:, 0, :]) + expected = np.zeros((5, 7, 1, 11)) + expected[:, 1, :] = np.ones_like(expected[:, 1, :]) + a = common_layers.shift_right(tf.constant(x1, dtype=tf.float32)) + actual = self.evaluate(a) + self.assertAllEqual(actual, expected) + + @test_utils.run_in_graph_and_eager_modes() + def testConvStride2MultiStep(self): + x1 = np.random.rand(5, 32, 16, 11) + a = common_layers.conv_stride2_multistep( + tf.constant(x1, dtype=tf.float32), 4, 16) + self.evaluate(tf.global_variables_initializer()) + actual = self.evaluate(a[0]) + self.assertEqual(actual.shape, (5, 2, 1, 16)) + + @test_utils.run_in_graph_and_eager_modes() + def testDeconvStride2MultiStep(self): + x1 = np.random.rand(5, 2, 1, 11) + a = common_layers.deconv_stride2_multistep( + tf.constant(x1, dtype=tf.float32), 4, 16) + self.evaluate(tf.global_variables_initializer()) + actual = self.evaluate(a) + self.assertEqual(actual.shape, (5, 32, 1, 16)) + + @test_utils.run_in_graph_and_eager_modes() + def testApplyNormLayer(self): + x1 = np.random.rand(5, 2, 1, 11) + x2 = common_layers.apply_norm( + tf.constant(x1, dtype=tf.float32), "layer", depth=11, epsilon=1e-6) + self.evaluate(tf.global_variables_initializer()) + actual = self.evaluate(x2) + self.assertEqual(actual.shape, (5, 2, 1, 11)) + + @test_utils.run_in_graph_and_eager_modes() + def testApplyNormNoam(self): + x1 = np.random.rand(5, 2, 1, 11) + x2 = common_layers.apply_norm( + tf.constant(x1, dtype=tf.float32), "noam", depth=11, epsilon=1e-6) + self.evaluate(tf.global_variables_initializer()) + actual = self.evaluate(x2) + self.assertEqual(actual.shape, (5, 2, 1, 11)) + + @test_utils.run_in_graph_and_eager_modes() + def testApplyNormBatch(self): + x1 = np.random.rand(5, 2, 1, 11) + x2 = common_layers.apply_norm( + tf.constant(x1, dtype=tf.float32), "batch", depth=11, epsilon=1e-6) + self.evaluate(tf.global_variables_initializer()) + actual = self.evaluate(x2) + self.assertEqual(actual.shape, (5, 2, 1, 11)) + + @test_utils.run_in_graph_and_eager_modes() + def testApplyNormNone(self): + x1 = np.random.rand(5, 2, 1, 11) + x2 = common_layers.apply_norm( + tf.constant(x1, dtype=tf.float32), "none", depth=11, epsilon=1e-6) + self.evaluate(tf.global_variables_initializer()) + actual = self.evaluate(x2) + self.assertEqual(actual.shape, (5, 2, 1, 11)) + self.assertAllClose(actual, x1, atol=1e-03) + + + @test_utils.run_in_graph_mode_only() + def testDenseWithLayerCollection(self): + with tf.variable_scope("test_layer_collection"): + x1 = tf.zeros([3, 4], tf.float32) + layer_collection = kfac.LayerCollection() + common_layers.dense( + x1, units=10, layer_collection=layer_collection, name="y1") + self.assertLen(layer_collection.get_blocks(), 1) + + # 3D inputs. + x2 = tf.zeros([3, 4, 5], tf.float32) + common_layers.dense( + x2, units=10, layer_collection=layer_collection, name="y2") + self.assertLen(layer_collection.get_blocks(), 2) + + def testGlobalPool1d(self): + x1 = np.random.rand(5, 4, 11) + no_mask = np.ones((5, 4)) + full_mask = np.zeros((5, 4)) + + x1_ = tf.Variable(x1, dtype=tf.float32) + no_mask_ = tf.Variable(no_mask, dtype=tf.float32) + full_mask_ = tf.Variable(full_mask, dtype=tf.float32) + + none_mask_max = common_layers.global_pool_1d(x1_) + no_mask_max = common_layers.global_pool_1d(x1_, mask=no_mask_) + result1 = tf.reduce_sum(none_mask_max - no_mask_max) + + full_mask_max = common_layers.global_pool_1d(x1_, mask=full_mask_) + result2 = tf.reduce_sum(full_mask_max) + + none_mask_avr = common_layers.global_pool_1d(x1_, "AVR") + no_mask_avr = common_layers.global_pool_1d(x1_, "AVR", no_mask_) + result3 = tf.reduce_sum(none_mask_avr - no_mask_avr) + + full_mask_avr = common_layers.global_pool_1d(x1_, "AVR", full_mask_) + result4 = tf.reduce_sum(full_mask_avr) + + self.evaluate(tf.global_variables_initializer()) + actual = self.evaluate([result1, result2, result3, result4]) + self.assertAllEqual(actual[:3], [0.0, 0.0, 0.0]) + + def testLinearSetLayer(self): + x1 = np.random.rand(5, 4, 11) + cont = np.random.rand(5, 13) + x1_ = tf.Variable(x1, dtype=tf.float32) + cont_ = tf.Variable(cont, dtype=tf.float32) + + simple_ff = common_layers.linear_set_layer(32, x1_) + cont_ff = common_layers.linear_set_layer(32, x1_, context=cont_) + + self.evaluate(tf.global_variables_initializer()) + actual = self.evaluate([simple_ff, cont_ff]) + self.assertEqual(actual[0].shape, (5, 4, 32)) + self.assertEqual(actual[1].shape, (5, 4, 32)) + + def testRavanbakhshSetLayer(self): + x1 = np.random.rand(5, 4, 11) + x1_ = tf.Variable(x1, dtype=tf.float32) + layer = common_layers.ravanbakhsh_set_layer(32, x1_) + self.evaluate(tf.global_variables_initializer()) + actual = self.evaluate(layer) + self.assertEqual(actual.shape, (5, 4, 32)) + + @test_utils.run_in_graph_and_eager_modes() + def testTopKthIterativeShape(self): + x = np.random.rand(5, 2, 1, 12) + y = common_layers.top_kth_iterative(tf.constant(x, dtype=tf.float32), 3) + actual = self.evaluate(y) + self.assertEqual(actual.shape, (5, 2, 1, 1)) + + @test_utils.run_in_graph_and_eager_modes() + def testTopKthIterativeValue(self): + x = [1.0, 2.0, 3.0, 4.0] + y = common_layers.top_kth_iterative(tf.constant(x, dtype=tf.float32), 3) + actual = self.evaluate(y) + self.assertEqual(int(actual[0]), 2.0) + + @test_utils.run_in_graph_and_eager_modes() + def testBReLU(self): + x = np.random.rand(5, 2, 1, 12) + y = common_layers.brelu(tf.constant(x, dtype=tf.float32)) + actual = self.evaluate(y) + self.assertEqual(actual.shape, (5, 2, 1, 12)) + + @test_utils.run_in_graph_and_eager_modes() + def testBELU(self): + x = np.random.rand(5, 2, 1, 12) + y = common_layers.belu(tf.constant(x, dtype=tf.float32)) + actual = self.evaluate(y) + self.assertEqual(actual.shape, (5, 2, 1, 12)) + + @test_utils.run_in_graph_and_eager_modes() + def testNAC(self): + x = np.random.rand(5, 2, 1, 12) + y = common_layers.nac(tf.constant(x, dtype=tf.float32), 14) + self.evaluate(tf.global_variables_initializer()) + actual = self.evaluate(y) + self.assertEqual(actual.shape, (5, 2, 1, 14)) + + @test_utils.run_in_graph_and_eager_modes() + def testNALU(self): + x = np.random.rand(5, 2, 1, 12) + y = common_layers.nalu(tf.constant(x, dtype=tf.float32), 14) + self.evaluate(tf.global_variables_initializer()) + actual = self.evaluate(y) + self.assertEqual(actual.shape, (5, 2, 1, 14)) + + @test_utils.run_in_graph_and_eager_modes() + def testNALUzeros(self): + x = np.random.rand(5, 2, 1, 12) + y = common_layers.nalu(tf.zeros_like(x, dtype=tf.float32), 14) + self.evaluate(tf.global_variables_initializer()) + actual = self.evaluate(y) + self.assertTrue(np.all(np.isfinite(actual))) + self.assertEqual(actual.shape, (5, 2, 1, 14)) + + @test_utils.run_in_graph_mode_only + def testPaddingCrossEntropyFactored(self): + vocab_size = 19 + rows = 5 + cols = 4 + depth = 11 + label_smoothing = 0.1 + features = np.random.rand(rows, cols, depth) + weights = np.random.rand(vocab_size, depth) + labels = np.random.randint(0, vocab_size - 1, size=(rows, cols)) + with self.test_session() as session: + features = tf.to_float(features) + weights = tf.to_float(weights) + labels = tf.to_int32(labels) + logits = tf.matmul( + tf.reshape(features, [rows * cols, depth]), weights, transpose_b=True) + logits = tf.reshape(logits, [rows, cols, vocab_size]) + loss_num, loss_den = common_layers.padded_cross_entropy( + logits, labels, label_smoothing=label_smoothing, reduce_sum=False) + factored_logits = common_layers.FactoredTensor(features, weights) + loss_num_f, loss_den_f = common_layers.padded_cross_entropy_factored( + factored_logits, + labels=labels, + label_smoothing=label_smoothing, + reduce_sum=False) + num, den, num_f, den_f = session.run( + [loss_num, loss_den, loss_num_f, loss_den_f]) + self.assertEqual(num.shape, (rows, cols)) + self.assertEqual(den.shape, (rows, cols)) + self.assertEqual(num_f.shape, (rows, cols)) + self.assertEqual(den_f.shape, (rows, cols)) + self.assertAllClose(num, num_f) + self.assertAllClose(den, den_f) + + @test_utils.run_in_graph_mode_only + def testPaddingCrossEntropyFactoredGrad(self): + vocab_size = 19 + rows = 5 + cols = 4 + depth = 11 + label_smoothing = 0.1 + features = np.random.rand(rows, cols, depth) + weights = np.random.rand(vocab_size, depth) + labels = np.random.randint(0, vocab_size - 1, size=(rows, cols)) + with self.test_session() as session: + features = tf.to_float(features) + weights = tf.to_float(weights) + labels = tf.to_int32(labels) + logits = tf.matmul( + tf.reshape(features, [rows * cols, depth]), weights, transpose_b=True) + logits = tf.reshape(logits, [rows, cols, vocab_size]) + loss_num, loss_den = common_layers.padded_cross_entropy( + logits, labels, label_smoothing=label_smoothing, reduce_sum=False) + factored_logits = common_layers.FactoredTensor(features, weights) + loss_num_factored, loss_den_factored = ( + common_layers.padded_cross_entropy_factored( + factored_logits, + labels=labels, + label_smoothing=label_smoothing, + reduce_sum=False)) + df, dw = tf.gradients(ys=[loss_num, loss_den], xs=[features, weights]) + df_factored, dw_factored = tf.gradients( + ys=[loss_num_factored, loss_den_factored], xs=[features, weights]) + actual_df, actual_dw, actual_df_factored, actual_dw_factored = ( + session.run([df, dw, df_factored, dw_factored])) + self.assertEqual(actual_df.shape, (rows, cols, depth)) + self.assertEqual(actual_dw.shape, (vocab_size, depth)) + self.assertEqual(actual_df_factored.shape, (rows, cols, depth)) + self.assertEqual(actual_dw_factored.shape, (vocab_size, depth)) + self.assertAllClose(actual_df, actual_df_factored) + self.assertAllClose(actual_dw, actual_dw_factored) + + @parameterized.parameters( + (2, 4, 4, 5, True), + (2, 4, 4, 5, False), + (1, 16, 16, 1, True), + (1, 16, 16, 1, False), + ) + def testDmlLoss(self, batch, height, width, num_mixtures, reduce_sum): + channels = 3 + pred = tf.random_normal([batch, height, width, num_mixtures * 10]) + labels = tf.random_uniform([batch, height, width, channels], + minval=0, maxval=256, dtype=tf.int32) + actual_loss_num, actual_loss_den = common_layers.dml_loss( + pred=pred, labels=labels, reduce_sum=reduce_sum) + actual_loss = actual_loss_num / actual_loss_den + + real_labels = common_layers.convert_rgb_to_symmetric_real(labels) + expected_loss = common_layers.discretized_mix_logistic_loss( + pred=pred, labels=real_labels) / channels + if reduce_sum: + expected_loss = tf.reduce_mean(expected_loss) + + actual_loss_val, expected_loss_val = self.evaluate( + [actual_loss, expected_loss]) + self.assertAllClose(actual_loss_val, expected_loss_val) + + @test_utils.run_in_graph_and_eager_modes() + def testWeightsMultiProblemAll(self): + labels = tf.constant(np.array([[12, 15, 1, 20, 100], + [67, 1, 34, 45, 124], + [78, 2, 34, 18, 29], + [78, 123, 55, 1, 33], + [1, 18, 22, 36, 59]]), dtype=tf.int32) + taskid = 1 + expected_mask = np.array([[1, 1, 1, 1, 1], + [1, 1, 1, 1, 1], + [0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [1, 1, 1, 1, 1]]) + actual_mask = common_layers.weights_multi_problem_all(labels, taskid) + actual_mask_eval = self.evaluate(actual_mask) + self.assertAllClose(expected_mask, actual_mask_eval) + + @test_utils.run_in_graph_and_eager_modes() + def testWeightsMultiProblem(self): + labels = tf.constant(np.array([[12, 15, 1, 20, 100], + [67, 1, 34, 45, 124], + [78, 2, 34, 18, 29], + [78, 123, 55, 1, 33], + [1, 18, 22, 36, 59]]), dtype=tf.int32) + taskid = 1 + expected_mask = np.array([[0, 0, 0, 1, 1], + [0, 0, 1, 1, 1], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 1], + [0, 1, 1, 1, 1]]) + actual_mask = common_layers.weights_multi_problem(labels, taskid) + actual_mask_eval = self.evaluate(actual_mask) + self.assertAllClose(expected_mask, actual_mask_eval) + + @test_utils.run_in_graph_and_eager_modes() + def testDiscretizedMixLogisticLoss(self): + batch = 2 + height = 4 + width = 4 + channels = 3 + num_mixtures = 5 + logits = tf.concat( # assign all probability mass to first component + [tf.ones([batch, height, width, 1]) * 1e8, + tf.zeros([batch, height, width, num_mixtures - 1])], + axis=-1) + locs = tf.random_uniform([batch, height, width, num_mixtures * 3], + minval=-.9, maxval=.9) + log_scales = tf.random_uniform([batch, height, width, num_mixtures * 3], + minval=-1., maxval=1.) + coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3])) + pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1) + + # Test labels that don't satisfy edge cases where 8-bit value is 0 or 255. + labels = tf.random_uniform([batch, height, width, channels], + minval=-.9, maxval=.9) + locs_0 = locs[..., :3] + log_scales_0 = log_scales[..., :3] + centered_labels = labels - locs_0 + inv_stdv = tf.exp(-log_scales_0) + plus_in = inv_stdv * (centered_labels + 1. / 255.) + min_in = inv_stdv * (centered_labels - 1. / 255.) + cdf_plus = tf.nn.sigmoid(plus_in) + cdf_min = tf.nn.sigmoid(min_in) + expected_loss = -tf.reduce_sum(tf.log(cdf_plus - cdf_min), axis=-1) + + actual_loss = common_layers.discretized_mix_logistic_loss( + pred=pred, labels=labels) + actual_loss_val, expected_loss_val = self.evaluate( + [actual_loss, expected_loss]) + self.assertAllClose(actual_loss_val, expected_loss_val, rtol=1e-5) + + @test_utils.run_in_graph_and_eager_modes() + def testSampleFromDiscretizedMixLogistic(self): + batch = 2 + height = 4 + width = 4 + num_mixtures = 5 + seed = 42 + logits = tf.concat( # assign all probability mass to first component + [tf.ones([batch, height, width, 1]) * 1e8, + tf.zeros([batch, height, width, num_mixtures - 1])], + axis=-1) + locs = tf.random_uniform([batch, height, width, num_mixtures * 3], + minval=-.9, maxval=.9) + log_scales = tf.ones([batch, height, width, num_mixtures * 3]) * -1e8 + coeffs = tf.atanh(tf.zeros([batch, height, width, num_mixtures * 3])) + pred = tf.concat([logits, locs, log_scales, coeffs], axis=-1) + + locs_0 = locs[..., :3] + expected_sample = tf.clip_by_value(locs_0, -1., 1.) + + actual_sample = common_layers.sample_from_discretized_mix_logistic( + pred, seed=seed) + actual_sample_val, expected_sample_val = self.evaluate( + [actual_sample, expected_sample]) + # Use a low tolerance: samples numerically differ, as the actual + # implementation clips log-scales so they always contribute to sampling. + self.assertAllClose(actual_sample_val, expected_sample_val, atol=1e-2) + + @test_utils.run_in_graph_and_eager_modes() + def testFactoredTensorImplicitConversion(self): + a = np.random.rand(3, 4, 5) + b = np.random.rand(6, 5) + c = np.random.rand(3, 4, 6) + # a factored representation of a Tensor of shape (3, 4, 6) + factored = common_layers.FactoredTensor(tf.to_float(a), tf.to_float(b)) + # implicitly converts factored to a Tensor (performing the matmul) + d = factored + tf.to_float(c) + out = self.evaluate(d) + self.assertEqual(out.shape, (3, 4, 6)) + + @test_utils.run_in_graph_mode_only() + def testConvHiddenReluMemoryEfficient(self): + batch = 3 + length = 23 + io_size = 16 + filter_size = 7 + x = np.random.rand(batch, length, io_size) + dy = np.random.rand(batch, length, io_size) + with self.test_session() as session: + x = tf.to_float(x) + dy = tf.to_float(dy) + f1 = tf.get_variable("f1", [1, io_size, filter_size]) + f2 = tf.get_variable("f2", [1, filter_size, io_size]) + norm_scale, norm_bias = common_layers.layer_norm_vars(io_size) + y = common_layers.conv_hidden_relu_memory_efficient( + x, filter_size, forget=False, + test_vars=(f1, f2, norm_scale, norm_bias)) + y_forget = common_layers.conv_hidden_relu_memory_efficient( + x, filter_size, forget=True, + test_vars=(f1, f2, norm_scale, norm_bias)) + dx, df1, df2, dnorm_scale, dnorm_bias = tf.gradients( + ys=[y], xs=[x, f1, f2, norm_scale, norm_bias], grad_ys=[dy]) + dx_f, df1_f, df2_f, dnorm_scale_f, dnorm_bias_f = tf.gradients( + ys=[y_forget], xs=[x, f1, f2, norm_scale, norm_bias], grad_ys=[dy]) + session.run(tf.global_variables_initializer()) + (y, y_forget, + dx, df1, df2, dnorm_scale, dnorm_bias, + dx_f, df1_f, df2_f, dnorm_scale_f, dnorm_bias_f) = session.run( + [y, y_forget, + dx, df1, df2, dnorm_scale, dnorm_bias, + dx_f, df1_f, df2_f, dnorm_scale_f, dnorm_bias_f]) + self.assertAllClose(y, y_forget) + self.assertAllClose(df2, df2_f, rtol=2e-6, atol=2e-6) + self.assertAllClose(df1, df1_f, rtol=2e-6, atol=2e-6) + self.assertAllClose(dnorm_scale, dnorm_scale_f) + self.assertAllClose(dnorm_bias, dnorm_bias_f) + self.assertAllClose(dx, dx_f) + + @test_utils.run_in_graph_and_eager_modes() + def testTopk(self): + batch_size = 3 + seq_len = 5 + vocab_size = 7 + + top_k = [3, 2, -1] + logits = np.random.rand(batch_size, seq_len, 1, 1, vocab_size) + 0.001 + topk_logits = common_layers._select_top_k(logits, top_k) + + self.evaluate(tf.global_variables_initializer()) + topk_logits = self.evaluate(topk_logits) + + for i, k in enumerate(top_k): + for j in range(seq_len): + self.assertEqual((topk_logits[i, j, 0, 0, :] > -1e6).sum(), + k if k != -1 else vocab_size) + + @test_utils.run_in_graph_and_eager_modes() + def testSampleTemperaturePerExample(self): + batch_size = 3 + seq_len = 5 + vocab_size = 7 + + logits = np.random.randn(batch_size, seq_len, 1, 1, vocab_size) + temperature = np.random.rand(batch_size) + + out = common_layers.sample_temperature_per_example(logits, temperature, -1) + + self.assertAllEqual( + self.evaluate(tf.shape(out)), [batch_size, seq_len, 1, 1]) + + @test_utils.run_in_graph_and_eager_modes() + def testSampleTemperaturePerExampleWithTopK(self): + batch_size = 3 + seq_len = 5 + vocab_size = 7 + + logits = np.random.randn(batch_size, seq_len, 1, 1, vocab_size) + temperature = np.random.rand(batch_size) + top_k = np.array([3, -1, 4], dtype=np.int32) + + out = common_layers.sample_temperature_per_example(logits, temperature, + top_k) + + self.assertAllEqual( + self.evaluate(tf.shape(out)), [batch_size, seq_len, 1, 1]) + + @test_utils.run_in_graph_and_eager_modes() + def testSampleTemperaturePerExampleWithTopK2(self): + batch_size = 3 + vocab_size = 7 + + logits = np.random.randn(batch_size, vocab_size) + temperature = np.random.rand(batch_size) + top_k = np.array([3, -1, 4], dtype=np.int32) + + out = common_layers.sample_temperature_per_example(logits, temperature, + top_k) + + self.assertAllEqual(self.evaluate(tf.shape(out)), [batch_size]) + + @test_utils.run_in_graph_mode_only() + def testSampleTemperaturePerExampleDynamicBatchSize(self): + batch_size = None + vocab_size = 7 + + logits = tf.placeholder(tf.float32, shape=(batch_size, vocab_size)) + temperature = tf.placeholder(tf.float32, shape=(batch_size, 1)) + sampling_keep_top_k = tf.placeholder(tf.int32, shape=(batch_size, 1)) + + out = common_layers.sample_temperature_per_example(logits, temperature, + sampling_keep_top_k) + + self.assertAllEqual(out.shape.as_list(), [batch_size]) + + @test_utils.run_in_graph_and_eager_modes() + def testCycleGANUpsampleNnUpsampleConv(self): + batch = 8 + height = 32 + width = 32 + num_channels = 3 + output_filters = 10 + stride = [2, 3] # we want height to be x2 and width to be x3 + random_input = np.random.rand(batch, height, width, num_channels).astype( + np.float32) + + # nn_upsample_conv gives exactly the shapes we'd expect. + upsampled_output = common_layers.cyclegan_upsample( + random_input, output_filters, stride, "nn_upsample_conv") + upsampled_output_shape = tf.shape(upsampled_output) + self.evaluate(tf.global_variables_initializer()) + self.assertAllEqual( + [batch, height * stride[0], width * stride[1], output_filters], + self.evaluate(upsampled_output_shape)) + + @test_utils.run_in_graph_and_eager_modes() + def testCycleGANUpsampleBilinearUpsampleConv(self): + batch = 8 + height = 32 + width = 32 + num_channels = 3 + output_filters = 10 + stride = [2, 3] # we want height to be x2 and width to be x3 + random_input = np.random.rand(batch, height, width, num_channels).astype( + np.float32) + + # bilinear_upsample_conv gives exactly the shapes we'd expect. + upsampled_output = common_layers.cyclegan_upsample( + random_input, output_filters, stride, "bilinear_upsample_conv") + upsampled_output_shape = tf.shape(upsampled_output) + self.evaluate(tf.global_variables_initializer()) + self.assertAllEqual( + [batch, height * stride[0], width * stride[1], output_filters], + self.evaluate(upsampled_output_shape)) + + @test_utils.run_in_graph_and_eager_modes() + def testCycleGANUpsampleConv2dTranspose(self): + batch = 8 + height = 32 + width = 32 + num_channels = 3 + output_filters = 10 + stride = [2, 3] # we want height to be x2 and width to be x3 + random_input = tf.convert_to_tensor( + np.random.rand(batch, height, width, num_channels), dtype=tf.float32) + + # conv2d_transpose is a little tricky. + # height_new = (height_old - 1) * stride + kernel - 2*padding - correction + # here kernel = 3, padding = 0, correction = 1 + upsampled_height = (height - 1) * stride[0] + 3 - 2*0 - 1 + upsampled_width = (width - 1) * stride[1] + 3 - 2*0 - 1 + upsampled_output = common_layers.cyclegan_upsample(random_input, + output_filters, stride, + "conv2d_transpose") + upsampled_output_shape = tf.shape(upsampled_output) + self.evaluate(tf.global_variables_initializer()) + self.assertAllEqual( + [batch, upsampled_height, upsampled_width, output_filters], + self.evaluate(upsampled_output_shape)) + + def testSpectralNorm(self): + # Test that after 20 calls to apply_spectral_norm, the spectral + # norm of the normalized matrix is close to 1.0 + with tf.Graph().as_default(): + weights = tf.get_variable("w", dtype=tf.float32, shape=[2, 3, 50, 100]) + weights = tf.multiply(weights, 10.0) + normed_weight, assign_op = common_layers.apply_spectral_norm(weights) + + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + + for _ in range(20): + sess.run(assign_op) + normed_weight, assign_op = common_layers.apply_spectral_norm( + weights) + normed_weight = sess.run(normed_weight).reshape(-1, 100) + _, s, _ = np.linalg.svd(normed_weight) + self.assertTrue(np.allclose(s[0], 1.0, rtol=0.1)) + + +class FnWithCustomGradTest(tf.test.TestCase): + + @test_utils.run_in_graph_mode_only() + def testCorrectness(self): + + w = tf.random_uniform([6, 10]) + + def fn(a, b, c): + return tf.layers.dense( + a, + 10, + use_bias=False, + kernel_initializer=lambda shape, dtype, partition_info: w + ) + tf.matmul(b, c) + + def grad_fn(inputs, variables, outputs, grad_outputs): + outputs = outputs[0] + grad_outputs = grad_outputs[0] + grad_inputs = tf.gradients(outputs, inputs, grad_ys=grad_outputs) + grad_vars = tf.gradients(outputs, variables, grad_ys=grad_outputs) + return grad_inputs, grad_vars + + custom_fn = common_layers.fn_with_custom_grad(grad_fn)(fn) + + a = tf.random_uniform([11, 6]) + b = tf.random_uniform([11, 7]) + c = tf.random_uniform([7, 10]) + + out = fn(a, b, c) + custom_out = custom_fn(a, b, c) + self.assertEqual(out.get_shape().as_list(), + custom_out.get_shape().as_list()) + + loss = tf.reduce_mean(out) + custom_loss = tf.reduce_mean(custom_out) + + grads = tf.gradients(loss, [a, b, c] + [tf.trainable_variables()[0]]) + custom_grads = tf.gradients(custom_loss, + [a, b, c] + [tf.trainable_variables()[1]]) + + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + out_val, custom_out_val, grads_val, custom_grads_val = sess.run( + [out, custom_out, grads, custom_grads]) + self.assertAllClose(out_val, custom_out_val) + for g1, g2 in zip(grads_val, custom_grads_val): + self.assertAllClose(g1, g2) + + @test_utils.run_in_graph_mode_only() + def testCustomGrad(self): + + def fn(a, b, c): + return tf.layers.dense(a, 10, use_bias=False) + tf.matmul(b, c) + + def grad_fn(inputs, variables, unused_outputs, unused_grad_outputs): + grad_inputs = [tf.ones_like(t) * (i + 1.) for i, t in enumerate(inputs)] + grad_vars = [ + tf.ones_like(t) * (i + len(inputs) + 1.) + for i, t in enumerate(variables) + ] + return grad_inputs, grad_vars + + a = tf.random_uniform([11, 6]) + b = tf.random_uniform([11, 7]) + c = tf.random_uniform([7, 10]) + w = tf.random_uniform([6, 10]) + out = common_layers.fn_with_custom_grad(grad_fn)(fn)(a, b, c) + loss = tf.reduce_mean(out) + grads = tf.gradients(loss, [a, b, c, tf.trainable_variables()[0]]) + expected_grads = [ + tf.ones_like(t) * (i + 1.) for i, t in enumerate([a, b, c, w]) + ] + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + g_val, eg_val = sess.run([grads, expected_grads]) + for g1, g2 in zip(g_val, eg_val): + self.assertAllClose(g1, g2) + + +class RecomputeTest(tf.test.TestCase): + + @test_utils.run_in_graph_mode_only() + def testRecompute(self): + + def layer(x, name=None): + with tf.variable_scope(name, default_name="layer"): + x = common_layers.layer_norm(x) + x = tf.layers.conv1d( + x, + 10, + 1, + use_bias=False, + kernel_initializer=tf.constant_initializer(42.42)) + x = tf.nn.relu(x) + return x + + def fn(x): + out = x + for _ in range(3): + out = layer(out) + return out + + @common_layers.recompute_grad + def fn_recompute(x): + return fn(x) + + x = tf.random_uniform((3, 1, 3)) + recompute_vars = None + with tf.variable_scope("recompute") as vs: + out1 = tf.reduce_sum(fn_recompute(x)) + recompute_vars = vs.trainable_variables() + reg_vars = None + with tf.variable_scope("regular") as vs: + out2 = tf.reduce_sum(fn(x)) + reg_vars = vs.trainable_variables() + + grad1 = tf.gradients(out1, recompute_vars) + grad2 = tf.gradients(out2, reg_vars) + + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + outs = sess.run([out1, out2, grad1, grad2]) + self.assertAllClose(outs[0], outs[1]) + for g1, g2 in zip(outs[2], outs[3]): + self.assertAllClose(g1, g2) + + +class WeightNormTest(tf.test.TestCase): + + def testInputSpec(self): + """Test that WeighNorm does not overspecify the input_spec.""" + conv = common_layers.WeightNorm( + tf.keras.layers.Conv1D(filters=8, kernel_size=3)) + # Call with one batch size: + conv(tf.zeros([1, 16, 2])) + # Should allow call with another batch size. + conv(tf.zeros([2, 16, 2])) + # Input spec does detect incorrect input feature dim. + with self.assertRaises(ValueError): + conv(tf.zeros([2, 16, 3])) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/layers/common_video.py b/tensor2tensor/layers/common_video.py new file mode 100644 index 000000000..103fb08b3 --- /dev/null +++ b/tensor2tensor/layers/common_video.py @@ -0,0 +1,854 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for video.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import contrib +import tensorflow.compat.v1 as tf + +from tensorflow.python.ops import summary_op_util # pylint: disable=g-direct-tensorflow-import + +# After tf-nightly 1.14.1.dev20190314 summary_op_util.skip_summary was extracted +# out to the distribute module. +try: + from tensorflow.python.distribute import summary_op_util as distribute_summary_op_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top +except ImportError: + distribute_summary_op_util = summary_op_util + +tfl = common_layers.layers() + + +def swap_time_and_batch_axes(inputs): + """Swaps time and batch axis (the first two axis).""" + transposed_axes = tf.concat([[1, 0], tf.range(2, tf.rank(inputs))], axis=0) + return tf.transpose(inputs, transposed_axes) + + +def encode_to_shape(inputs, shape, scope): + """Encode the given tensor to given image shape.""" + with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): + w, h = shape[1], shape[2] + x = inputs + x = tfl.flatten(x) + x = tfl.dense(x, w * h, activation=None, name="enc_dense") + x = tf.reshape(x, (-1, w, h, 1)) + return x + + +def decode_to_shape(inputs, shape, scope): + """Encode the given tensor to given image shape.""" + with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): + x = inputs + x = tfl.flatten(x) + x = tfl.dense(x, shape[2], activation=None, name="dec_dense") + x = tf.expand_dims(x, axis=1) + return x + + +def basic_lstm(inputs, state, num_units, name=None): + """Basic LSTM.""" + input_shape = common_layers.shape_list(inputs) + # reuse parameters across time-steps. + cell = tf.nn.rnn_cell.BasicLSTMCell( + num_units, name=name, reuse=tf.AUTO_REUSE) + if state is None: + state = cell.zero_state(input_shape[0], tf.float32) + outputs, new_state = cell(inputs, state) + return outputs, new_state + + +def lstm_cell(inputs, + state, + num_units, + use_peepholes=False, + cell_clip=0.0, + initializer=None, + num_proj=None, + num_unit_shards=None, + num_proj_shards=None, + reuse=None, + name=None): + """Full LSTM cell.""" + input_shape = common_layers.shape_list(inputs) + cell = tf.nn.rnn_cell.LSTMCell(num_units, + use_peepholes=use_peepholes, + cell_clip=cell_clip, + initializer=initializer, + num_proj=num_proj, + num_unit_shards=num_unit_shards, + num_proj_shards=num_proj_shards, + reuse=reuse, + name=name, + state_is_tuple=False) + if state is None: + state = cell.zero_state(input_shape[0], tf.float32) + outputs, new_state = cell(inputs, state) + return outputs, new_state + + +def conv_lstm_2d(inputs, state, output_channels, + kernel_size=5, name=None, spatial_dims=None): + """2D Convolutional LSTM.""" + input_shape = common_layers.shape_list(inputs) + batch_size, input_channels = input_shape[0], input_shape[-1] + if spatial_dims is None: + input_shape = input_shape[1:] + else: + input_shape = spatial_dims + [input_channels] + + cell = contrib.rnn().ConvLSTMCell( + 2, input_shape, output_channels, [kernel_size, kernel_size], name=name) + if state is None: + state = cell.zero_state(batch_size, tf.float32) + outputs, new_state = cell(inputs, state) + return outputs, new_state + + +def scheduled_sample_count(ground_truth_x, + generated_x, + batch_size, + scheduled_sample_var): + """Sample batch with specified mix of groundtruth and generated data points. + + Args: + ground_truth_x: tensor of ground-truth data points. + generated_x: tensor of generated data points. + batch_size: batch size + scheduled_sample_var: number of ground-truth examples to include in batch. + Returns: + New batch with num_ground_truth sampled from ground_truth_x and the rest + from generated_x. + """ + num_ground_truth = scheduled_sample_var + idx = tf.random_shuffle(tf.range(batch_size)) + ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth)) + generated_idx = tf.gather(idx, tf.range(num_ground_truth, batch_size)) + + ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx) + generated_examps = tf.gather(generated_x, generated_idx) + + output = tf.dynamic_stitch([ground_truth_idx, generated_idx], + [ground_truth_examps, generated_examps]) + # if batch size is known set it. + if isinstance(batch_size, int): + output.set_shape([batch_size] + common_layers.shape_list(output)[1:]) + return output + + +def inject_additional_input(layer, inputs, name, mode="concat"): + """Injects the additional input into the layer. + + Args: + layer: layer that the input should be injected to. + inputs: inputs to be injected. + name: TF scope name. + mode: how the infor should be added to the layer: + "concat" concats as additional channels. + "multiplicative" broadcasts inputs and multiply them to the channels. + "multi_additive" broadcasts inputs and multiply and add to the channels. + + Returns: + updated layer. + + Raises: + ValueError: in case of unknown mode. + """ + layer_shape = common_layers.shape_list(layer) + input_shape = common_layers.shape_list(inputs) + zeros_mask = tf.zeros(layer_shape, dtype=tf.float32) + if mode == "concat": + emb = encode_to_shape(inputs, layer_shape, name) + layer = tf.concat(values=[layer, emb], axis=-1) + elif mode == "multiplicative": + filters = layer_shape[-1] + input_reshaped = tf.reshape(inputs, [-1, 1, 1, input_shape[-1]]) + input_mask = tf.layers.dense(input_reshaped, filters, name=name) + input_broad = input_mask + zeros_mask + layer *= input_broad + elif mode == "multi_additive": + filters = layer_shape[-1] + input_reshaped = tf.reshape(inputs, [-1, 1, 1, input_shape[-1]]) + input_mul = tf.layers.dense(input_reshaped, filters, name=name + "_mul") + layer *= tf.nn.sigmoid(input_mul) + input_add = tf.layers.dense(input_reshaped, filters, name=name + "_add") + layer += input_add + else: + raise ValueError("Unknown injection mode: %s" % mode) + + return layer + + +def scheduled_sample_prob(ground_truth_x, + generated_x, + batch_size, + scheduled_sample_var): + """Probability based scheduled sampling. + + Args: + ground_truth_x: tensor of ground-truth data points. + generated_x: tensor of generated data points. + batch_size: batch size + scheduled_sample_var: probability of choosing from ground_truth. + Returns: + New batch with randomly selected data points. + """ + probability_threshold = scheduled_sample_var + probability_of_generated = tf.random_uniform([batch_size]) + return tf.where(probability_of_generated > probability_threshold, + generated_x, ground_truth_x) + + +def dna_transformation(prev_image, dna_input, dna_kernel_size, relu_shift): + """Apply dynamic neural advection to previous image. + + Args: + prev_image: previous image to be transformed. + dna_input: hidden lyaer to be used for computing DNA transformation. + dna_kernel_size: dna kernel size. + relu_shift: shift for ReLU function. + Returns: + List of images transformed by the predicted CDNA kernels. + """ + # Construct translated images. + prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]]) + image_height = int(prev_image.get_shape()[1]) + image_width = int(prev_image.get_shape()[2]) + + inputs = [] + for xkern in range(dna_kernel_size): + for ykern in range(dna_kernel_size): + inputs.append( + tf.expand_dims( + tf.slice(prev_image_pad, [0, xkern, ykern, 0], + [-1, image_height, image_width, -1]), [3])) + inputs = tf.concat(axis=3, values=inputs) + + # Normalize channels to 1. + kernel = tf.nn.relu(dna_input - relu_shift) + relu_shift + kernel = tf.expand_dims( + kernel / tf.reduce_sum(kernel, [3], keep_dims=True), [4]) + return tf.reduce_sum(kernel * inputs, [3], keep_dims=False) + + +def cdna_transformation(prev_image, cdna_input, num_masks, color_channels, + dna_kernel_size, relu_shift): + """Apply convolutional dynamic neural advection to previous image. + + Args: + prev_image: previous image to be transformed. + cdna_input: hidden lyaer to be used for computing CDNA kernels. + num_masks: number of masks and hence the number of CDNA transformations. + color_channels: the number of color channels in the images. + dna_kernel_size: dna kernel size. + relu_shift: shift for ReLU function. + Returns: + List of images transformed by the predicted CDNA kernels. + """ + batch_size = tf.shape(cdna_input)[0] + height = int(prev_image.get_shape()[1]) + width = int(prev_image.get_shape()[2]) + + # Predict kernels using linear function of last hidden layer. + cdna_kerns = tfl.dense( + cdna_input, dna_kernel_size * dna_kernel_size * num_masks, + name="cdna_params", + activation=None) + + # Reshape and normalize. + cdna_kerns = tf.reshape( + cdna_kerns, [batch_size, dna_kernel_size, dna_kernel_size, 1, num_masks]) + cdna_kerns = (tf.nn.relu(cdna_kerns - relu_shift) + relu_shift) + norm_factor = tf.reduce_sum(cdna_kerns, [1, 2, 3], keep_dims=True) + cdna_kerns /= norm_factor + + # Treat the color channel dimension as the batch dimension since the same + # transformation is applied to each color channel. + # Treat the batch dimension as the channel dimension so that + # depthwise_conv2d can apply a different transformation to each sample. + cdna_kerns = tf.transpose(cdna_kerns, [1, 2, 0, 4, 3]) + cdna_kerns = tf.reshape( + cdna_kerns, [dna_kernel_size, dna_kernel_size, batch_size, num_masks]) + # Swap the batch and channel dimensions. + prev_image = tf.transpose(prev_image, [3, 1, 2, 0]) + + # Transform image. + transformed = tf.nn.depthwise_conv2d( + prev_image, cdna_kerns, [1, 1, 1, 1], "SAME") + + # Transpose the dimensions to where they belong. + transformed = tf.reshape( + transformed, [color_channels, height, width, batch_size, num_masks]) + transformed = tf.transpose(transformed, [3, 1, 2, 0, 4]) + transformed = tf.unstack(transformed, axis=-1) + return transformed + + +def vgg_layer(inputs, + nout, + kernel_size=3, + activation=tf.nn.leaky_relu, + padding="SAME", + is_training=True, + has_batchnorm=False, + scope=None): + """A layer of VGG network with batch norm. + + Args: + inputs: image tensor + nout: number of output channels + kernel_size: size of the kernel + activation: activation function + padding: padding of the image + is_training: whether it is training mode or not + has_batchnorm: whether batchnorm is applied or not + scope: variable scope of the op + Returns: + net: output of layer + """ + with tf.variable_scope(scope): + net = tfl.conv2d(inputs, nout, kernel_size=kernel_size, padding=padding, + activation=None, name="conv") + if has_batchnorm: + net = tfl.batch_normalization(net, training=is_training, name="bn") + net = activation(net) + return net + + +def tile_and_concat(image, latent, concat_latent=True): + """Tile latent and concatenate to image across depth. + + Args: + image: 4-D Tensor, (batch_size X height X width X channels) + latent: 2-D Tensor, (batch_size X latent_dims) + concat_latent: If set to False, the image is returned as is. + + Returns: + concat_latent: 4-D Tensor, (batch_size X height X width X channels+1) + latent tiled and concatenated to the image across the channels. + """ + if not concat_latent: + return image + image_shape = common_layers.shape_list(image) + latent_shape = common_layers.shape_list(latent) + height, width = image_shape[1], image_shape[2] + latent_dims = latent_shape[1] + height_multiples = height // latent_dims + pad = height - (height_multiples * latent_dims) + latent = tf.reshape(latent, (-1, latent_dims, 1, 1)) + latent = tf.tile(latent, (1, height_multiples, width, 1)) + latent = tf.pad(latent, [[0, 0], [pad // 2, pad // 2], [0, 0], [0, 0]]) + return tf.concat([image, latent], axis=-1) + + +def _encode_gif(images, fps): + """Encodes numpy images into gif string. + + Args: + images: A 4-D `uint8` `np.array` (or a list of 3-D images) of shape + `[time, height, width, channels]` where `channels` is 1 or 3. + fps: frames per second of the animation + + Returns: + The encoded gif string. + + Raises: + IOError: If the ffmpeg command returns an error. + """ + writer = WholeVideoWriter(fps) + writer.write_multi(images) + return writer.finish() + + +def ffmpeg_works(): + """Tries to encode images with ffmpeg to check if it works.""" + images = np.zeros((2, 32, 32, 3), dtype=np.uint8) + try: + _encode_gif(images, 2) + return True + except (IOError, OSError): + return False + + +def py_gif_summary(tag, images, max_outputs, fps, return_summary_value=False): + """Outputs a `Summary` protocol buffer with gif animations. + + Args: + tag: Name of the summary. + images: A 5-D `uint8` `np.array` of shape `[batch_size, time, height, width, + channels]` where `channels` is 1 or 3. + max_outputs: Max number of batch elements to generate gifs for. + fps: frames per second of the animation. + return_summary_value: If set to True, return a list of tf.Summary.Value + objects in addition to the protocol buffer. + + Returns: + The serialized `Summary` protocol buffer. + + Raises: + ValueError: If `images` is not a 5-D `uint8` array with 1 or 3 channels. + """ + images = np.asarray(images) + if images.dtype != np.uint8: + raise ValueError("Tensor must have dtype uint8 for gif summary.") + if images.ndim != 5: + raise ValueError("Tensor must be 5-D for gif summary.") + batch_size, _, height, width, channels = images.shape + if channels not in (1, 3): + raise ValueError("Tensors must have 1 or 3 channels for gif summary.") + + summ = tf.Summary() + all_summ_values = [] + num_outputs = min(batch_size, max_outputs) + for i in range(num_outputs): + image_summ = tf.Summary.Image() + image_summ.height = height + image_summ.width = width + image_summ.colorspace = channels # 1: grayscale, 3: RGB + try: + image_summ.encoded_image_string = _encode_gif(images[i], fps) + except (IOError, OSError) as e: + tf.logging.warning( + "Unable to encode images to a gif string because either ffmpeg is " + "not installed or ffmpeg returned an error: %s. Falling back to an " + "image summary of the first frame in the sequence.", e) + try: + from PIL import Image # pylint: disable=g-import-not-at-top + import io # pylint: disable=g-import-not-at-top + with io.BytesIO() as output: + Image.fromarray(images[i][0]).save(output, "PNG") + image_summ.encoded_image_string = output.getvalue() + except ImportError as e: + tf.logging.warning( + "Gif summaries requires ffmpeg or PIL to be installed: %s", e) + image_summ.encoded_image_string = "" + if num_outputs == 1: + summ_tag = "{}/gif".format(tag) + else: + summ_tag = "{}/gif/{}".format(tag, i) + curr_summ_value = tf.Summary.Value(tag=summ_tag, image=image_summ) + all_summ_values.append(curr_summ_value) + summ.value.add(tag=summ_tag, image=image_summ) + summ_str = summ.SerializeToString() + if return_summary_value: + return all_summ_values, summ_str + return summ_str + + +def gif_summary(name, tensor, max_outputs=3, fps=10, collections=None, + family=None): + """Outputs a `Summary` protocol buffer with gif animations. + + Args: + name: Name of the summary. + tensor: A 5-D `uint8` `Tensor` of shape `[batch_size, time, height, width, + channels]` where `channels` is 1 or 3. + max_outputs: Max number of batch elements to generate gifs for. + fps: frames per second of the animation + collections: Optional list of tf.GraphKeys. The collections to add the + summary to. Defaults to [tf.GraphKeys.SUMMARIES] + family: Optional; if provided, used as the prefix of the summary tag name, + which controls the tab name used for display on Tensorboard. + + Returns: + A scalar `Tensor` of type `string`. The serialized `Summary` protocol + buffer. + + Raises: + ValueError: if the given tensor has the wrong shape. + """ + tensor = tf.convert_to_tensor(tensor) + if len(tensor.get_shape()) != 5: + raise ValueError("Assuming videos given as tensors in the format " + "[batch, time, height, width, channels] but got one " + "of shape: %s" % str(tensor.get_shape())) + tensor = tf.cast(tensor, tf.uint8) + if distribute_summary_op_util.skip_summary(): + return tf.constant("") + with summary_op_util.summary_scope( + name, family, values=[tensor]) as (tag, scope): + val = tf.py_func( + py_gif_summary, + [tag, tensor, max_outputs, fps], + tf.string, + stateful=False, + name=scope) + summary_op_util.collect(val, collections, [tf.GraphKeys.SUMMARIES]) + return val + + + + +def tinyify(array, tiny_mode, small_mode): + if tiny_mode: + return [1 for _ in array] + if small_mode: + return [max(x // 4, 1) for x in array] + return array + + +def get_gaussian_tensor(mean, log_var): + z = tf.random_normal(tf.shape(mean), 0, 1, dtype=tf.float32) + z = mean + tf.exp(log_var / 2.0) * z + return z + + +def conv_latent_tower(images, time_axis, latent_channels=1, min_logvar=-5, + is_training=False, random_latent=False, + tiny_mode=False, small_mode=False): + """Builds convolutional latent tower for stochastic model. + + At training time this tower generates a latent distribution (mean and std) + conditioned on the entire video. This latent variable will be fed to the + main tower as an extra variable to be used for future frames prediction. + At inference time, the tower is disabled and only returns latents sampled + from N(0,1). + If the multi_latent flag is on, a different latent for every timestep would + be generated. + + Args: + images: tensor of ground truth image sequences + time_axis: the time axis in images tensor + latent_channels: number of latent channels + min_logvar: minimum value for log_var + is_training: whether or not it is training mode + random_latent: whether or not generate random latents + tiny_mode: whether or not it is tiny_mode. tiny_mode sets the number + of conv channels to 1 at each layer. useful for testing the + integration tests. + small_mode: whether or not it is small_mode. small mode is the same model + with less conv and lstm layers and also lower number of channels. + suitable for videos with less complexity and testing. + Returns: + latent_mean: predicted latent mean + latent_logvar: predicted latent log variance + """ + conv_size = tinyify([32, 64, 64], tiny_mode, small_mode) + with tf.variable_scope("latent", reuse=tf.AUTO_REUSE): + images = tf.to_float(images) + images = tf.unstack(images, axis=time_axis) + images = tf.concat(images, axis=3) + + x = images + x = common_layers.make_even_size(x) + x = tfl.conv2d(x, conv_size[0], [3, 3], strides=(2, 2), + padding="SAME", activation=tf.nn.relu, name="latent_conv1") + x = contrib.layers().layer_norm(x) + if not small_mode: + x = tfl.conv2d(x, conv_size[1], [3, 3], strides=(2, 2), + padding="SAME", activation=tf.nn.relu, name="latent_conv2") + x = contrib.layers().layer_norm(x) + x = tfl.conv2d(x, conv_size[2], [3, 3], strides=(1, 1), + padding="SAME", activation=tf.nn.relu, name="latent_conv3") + x = contrib.layers().layer_norm(x) + + nc = latent_channels + mean = tfl.conv2d(x, nc, [3, 3], strides=(2, 2), + padding="SAME", activation=None, name="latent_mean") + logv = tfl.conv2d(x, nc, [3, 3], strides=(2, 2), + padding="SAME", activation=tf.nn.relu, name="latent_std") + logvar = logv + min_logvar + + # No latent tower at inference time, just standard gaussian. + if not is_training: + return tf.zeros_like(mean), tf.zeros_like(logvar) + + # No latent in the first phase + ret_mean, ret_logvar = tf.cond( + random_latent, + lambda: (tf.zeros_like(mean), tf.zeros_like(logvar)), + lambda: (mean, logvar)) + + return ret_mean, ret_logvar + + +def beta_schedule(schedule, global_step, final_beta, decay_start, decay_end): + """Get KL multiplier (beta) based on the schedule.""" + if decay_start > decay_end: + raise ValueError("decay_end is smaller than decay_end.") + + # Since some of the TF schedules do not support incrementing a value, + # in all of the schedules, we anneal the beta from final_beta to zero + # and then reverse it at the bottom. + if schedule == "constant": + decayed_value = 0.0 + elif schedule == "linear": + decayed_value = tf.train.polynomial_decay( + learning_rate=final_beta, + global_step=global_step - decay_start, + decay_steps=decay_end - decay_start, + end_learning_rate=0.0) + elif schedule == "noisy_linear_cosine_decay": + decayed_value = tf.train.noisy_linear_cosine_decay( + learning_rate=final_beta, + global_step=global_step - decay_start, + decay_steps=decay_end - decay_start) + # TODO(mechcoder): Add log_annealing schedule. + else: + raise ValueError("Unknown beta schedule.") + + increased_value = final_beta - decayed_value + increased_value = tf.maximum(0.0, increased_value) + + beta = tf.case( + pred_fn_pairs={ + tf.less(global_step, decay_start): lambda: 0.0, + tf.greater(global_step, decay_end): lambda: final_beta}, + default=lambda: increased_value) + return beta + + +def extract_random_video_patch(videos, num_frames=-1): + """For every video, extract a random consecutive patch of num_frames. + + Args: + videos: 5-D Tensor, (NTHWC) + num_frames: Integer, if -1 then the entire video is returned. + Returns: + video_patch: 5-D Tensor, (NTHWC) with T = num_frames. + Raises: + ValueError: If num_frames is greater than the number of total frames in + the video. + """ + if num_frames == -1: + return videos + batch_size, num_total_frames, h, w, c = common_layers.shape_list(videos) + if num_total_frames < num_frames: + raise ValueError("Expected num_frames <= %d, got %d" % + (num_total_frames, num_frames)) + + # Randomly choose start_inds for each video. + frame_start = tf.random_uniform( + shape=(batch_size,), minval=0, maxval=num_total_frames - num_frames + 1, + dtype=tf.int32) + + # [start[0], start[0] + 1, ... start[0] + num_frames - 1] + ... + # [start[batch_size-1], ... start[batch_size-1] + num_frames - 1] + range_inds = tf.expand_dims(tf.range(num_frames), axis=0) + frame_inds = range_inds + tf.expand_dims(frame_start, axis=1) + frame_inds = tf.reshape(frame_inds, [-1]) + + # [0]*num_frames + [1]*num_frames + ... [batch_size-1]*num_frames + batch_inds = tf.expand_dims(tf.range(batch_size), axis=1) + batch_inds = tf.tile(batch_inds, [1, num_frames]) + batch_inds = tf.reshape(batch_inds, [-1]) + + gather_inds = tf.stack((batch_inds, frame_inds), axis=1) + video_patches = tf.gather_nd(videos, gather_inds) + return tf.reshape(video_patches, (batch_size, num_frames, h, w, c)) + + +class VideoWriter(object): + """Base helper class for writing videos.""" + + def write(self, frame, encoded_frame=None): + """Writes a single video frame.""" + raise NotImplementedError + + def write_multi(self, frames, encoded_frames=None): + """Writes multiple video frames.""" + if encoded_frames is None: + # Infinite iterator. + encoded_frames = iter(lambda: None, 1) + for (frame, encoded_frame) in zip(frames, encoded_frames): + self.write(frame, encoded_frame) + + def finish(self): + """Finishes writing frames and returns output, if any. + + Frees any resources acquired by the writer. + """ + pass + + def save_to_disk(self, output): + """Saves output to disk. + + Args: + output: result of finish(). + """ + raise NotImplementedError + + def finish_to_disk(self): + """Finishes writing frames and saves output to disk, if any.""" + output = self.finish() # pylint: disable=assignment-from-no-return + if output is not None: + self.save_to_disk(output) + + def __del__(self): + """Frees any resources acquired by the writer.""" + self.finish() + + +class WholeVideoWriter(VideoWriter): + """Helper class for writing whole videos.""" + + def __init__(self, fps, output_path=None, file_format="gif"): + self.fps = fps + self.output_path = output_path + self.file_format = file_format + self.proc = None + self._out_chunks = [] + self._err_chunks = [] + self._out_thread = None + self._err_thread = None + + def __init_ffmpeg(self, image_shape): + """Initializes ffmpeg to write frames.""" + import itertools # pylint: disable=g-import-not-at-top + from subprocess import Popen, PIPE # pylint: disable=g-import-not-at-top,g-multiple-import,g-importing-member + ffmpeg = "ffmpeg" + height, width, channels = image_shape + self.cmd = [ + ffmpeg, "-y", + "-f", "rawvideo", + "-vcodec", "rawvideo", + "-r", "%.02f" % self.fps, + "-s", "%dx%d" % (width, height), + "-pix_fmt", {1: "gray", 3: "rgb24"}[channels], + "-i", "-", + "-filter_complex", "[0:v]split[x][z];[x]fifo[w];[z]palettegen,fifo[y];" + "[w][y]paletteuse,fifo", + "-r", "%.02f" % self.fps, + "-f", self.file_format, + "-qscale", "0", + "-" + ] + self.proc = Popen( + self.cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, bufsize=-1 + ) + (self._out_thread, self._err_thread) = itertools.starmap( + self._start_reader_thread, [ + (self.proc.stdout, self._out_chunks), + (self.proc.stderr, self._err_chunks) + ] + ) + + def _start_reader_thread(self, stream, chunks): + """Starts a thread for reading output from FFMPEG. + + The thread reads consecutive chunks from the stream and saves them in + the given list. + + Args: + stream: output stream of the FFMPEG process. + chunks: list to save output chunks to. + + Returns: + Thread + """ + import io # pylint: disable=g-import-not-at-top + import threading # pylint: disable=g-import-not-at-top + def target(): + while True: + chunk = stream.read(io.DEFAULT_BUFFER_SIZE) + if not chunk: + break + chunks.append(chunk) + thread = threading.Thread(target=target) + thread.start() + return thread + + def write(self, frame, encoded_frame=None): + if self.proc is None: + self.__init_ffmpeg(frame.shape) + self.proc.stdin.write(frame.tostring()) + + def finish(self): + """Finishes transconding and returns the video. + + Returns: + bytes + + Raises: + IOError: in case of transcoding error. + """ + if self.proc is None: + return None + self.proc.stdin.close() + for thread in (self._out_thread, self._err_thread): + thread.join() + (out, err) = [ + b"".join(chunks) for chunks in (self._out_chunks, self._err_chunks) + ] + self.proc.stdout.close() + self.proc.stderr.close() + if self.proc.returncode: + err = "\n".join([" ".join(self.cmd), err.decode("utf8")]) + raise IOError(err) + del self.proc + self.proc = None + return out + + def save_to_disk(self, output): + if self.output_path is None: + raise ValueError( + "This writer doesn't support saving to disk (output_path not " + "specified)." + ) + with tf.gfile.Open(self.output_path, "w") as f: + f.write(output) + + +class BatchWholeVideoWriter(VideoWriter): + """Helper class for writing videos in batch.""" + + def __init__(self, fps, path_template, file_format="gif"): + self.fps = fps + self.path_template = path_template + self.file_format = file_format + self.writers = None + + def write(self, batch_frame, batch_encoded_frame=None): + del batch_encoded_frame + if self.writers is None: + self.writers = [ + WholeVideoWriter( # pylint: disable=g-complex-comprehension + self.fps, self.path_template.format(i), self.file_format + ) + for i in range(len(batch_frame)) + ] + for i, frame in enumerate(batch_frame): + self.writers[i].write(frame) + + def finish(self): + outs = [w.finish() for w in self.writers] + return outs + + def save_to_disk(self, outputs): + for (writer, output) in zip(self.writers, outputs): + writer.save_to_disk(output) + + +class IndividualFrameWriter(VideoWriter): + """Helper class for writing individual video frames.""" + + def __init__(self, output_dir): + self.output_dir = output_dir + self._counter = 0 + + def write(self, frame=None, encoded_frame=None): + import os # pylint: disable=g-import-not-at-top + if encoded_frame is None: + raise ValueError("This writer only supports encoded frames.") + path = os.path.join(self.output_dir, "frame_%05d.png" % self._counter) + with tf.gfile.Open(path, "wb") as f: + f.write(encoded_frame) + self._counter += 1 diff --git a/tensor2tensor/layers/common_video_test.py b/tensor2tensor/layers/common_video_test.py new file mode 100644 index 000000000..d17b67ca1 --- /dev/null +++ b/tensor2tensor/layers/common_video_test.py @@ -0,0 +1,170 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for video utils.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np + +from tensor2tensor.layers import common_video +from tensor2tensor.utils import test_utils + +import tensorflow.compat.v1 as tf +tf.enable_eager_execution() + + +class CommonVideoTest(parameterized.TestCase, tf.test.TestCase): + + def _run_scheduled_sample_func(self, func, var, batch_size): + ground_truth_x = list(range(1, batch_size+1)) + generated_x = [-x for x in ground_truth_x] + ground_truth_x = tf.convert_to_tensor(ground_truth_x) + generated_x = tf.convert_to_tensor(generated_x) + ss_out = func(ground_truth_x, generated_x, batch_size, var) + output = self.evaluate([ground_truth_x, generated_x, ss_out]) + return output + + @test_utils.run_in_graph_and_eager_modes() + def testScheduledSampleProbStart(self): + ground_truth_x, _, ss_out = self._run_scheduled_sample_func( + common_video.scheduled_sample_prob, 1.0, 10) + self.assertAllEqual(ground_truth_x, ss_out) + + @test_utils.run_in_graph_and_eager_modes() + def testScheduledSampleProbMid(self): + _, _, ss_out = self._run_scheduled_sample_func( + common_video.scheduled_sample_prob, 0.5, 1000) + positive_count = np.sum(ss_out > 0) + self.assertAlmostEqual(positive_count / 1000.0, 0.5, places=1) + + @test_utils.run_in_graph_and_eager_modes() + def testScheduledSampleProbEnd(self): + _, generated_x, ss_out = self._run_scheduled_sample_func( + common_video.scheduled_sample_prob, 0.0, 10) + self.assertAllEqual(generated_x, ss_out) + + @test_utils.run_in_graph_and_eager_modes() + def testScheduledSampleCountStart(self): + ground_truth_x, _, ss_out = self._run_scheduled_sample_func( + common_video.scheduled_sample_count, 10, 10) + self.assertAllEqual(ground_truth_x, ss_out) + + @test_utils.run_in_graph_and_eager_modes() + def testScheduledSampleCountMid(self): + _, _, ss_out = self._run_scheduled_sample_func( + common_video.scheduled_sample_count, 5, 10) + positive_count = np.sum(ss_out > 0) + self.assertEqual(positive_count, 5) + + @test_utils.run_in_graph_and_eager_modes() + def testScheduledSampleCountEnd(self): + _, generated_x, ss_out = self._run_scheduled_sample_func( + common_video.scheduled_sample_count, 0, 10) + self.assertAllEqual(generated_x, ss_out) + + @test_utils.run_in_graph_and_eager_modes() + def testDynamicTileAndConcat(self): + # image = (1 X 4 X 4 X 1) + image = [[1, 2, 3, 4], + [2, 4, 5, 6], + [7, 8, 9, 10], + [7, 9, 10, 1]] + image_t = tf.expand_dims(tf.expand_dims(image, axis=0), axis=-1) + image_t = tf.cast(image_t, dtype=tf.float32) + + # latent = (1 X 2) + latent = np.array([[90, 100]]) + latent_t = tf.cast(tf.convert_to_tensor(latent), dtype=tf.float32) + + tiled = common_video.tile_and_concat( + image_t, latent_t) + tiled_np, image_np = self.evaluate([tiled, image_t]) + tiled_latent = tiled_np[0, :, :, -1] + self.assertAllEqual(tiled_np.shape, (1, 4, 4, 2)) + + self.assertAllEqual(tiled_np[:, :, :, :1], image_np) + self.assertAllEqual( + tiled_latent, + [[90, 90, 90, 90], + [100, 100, 100, 100], + [90, 90, 90, 90], + [100, 100, 100, 100]]) + + @test_utils.run_in_graph_mode_only() + def testGifSummary(self): + for c in (1, 3): + images_shape = (1, 12, 48, 64, c) # batch, time, height, width, channels + images = np.random.randint(256, size=images_shape).astype(np.uint8) + + with self.test_session(): + summary = common_video.gif_summary( + "gif", tf.convert_to_tensor(images), fps=10) + summary_string = summary.eval() + + summary = tf.Summary() + summary.ParseFromString(summary_string) + + self.assertEqual(1, len(summary.value)) + self.assertTrue(summary.value[0].HasField("image")) + encoded = summary.value[0].image.encoded_image_string + + self.assertEqual(encoded, common_video._encode_gif(images[0], fps=10)) # pylint: disable=protected-access + + def check_if_patch_exists(self, videos, video_patches, num_frames): + """Check that given patch is present in video.""" + for video, video_patch in zip(videos, video_patches): + total_frames = len(video) + is_present = [] + for start_ind in range(total_frames - num_frames + 1): + curr_patch = video[start_ind: start_ind + num_frames] + is_present.append(np.allclose(curr_patch, video_patch)) + self.assertTrue(np.any(is_present)) + + def testBasicLstm(self): + """Tests that the parameters of the LSTM are shared across time.""" + with tf.Graph().as_default(): + state = None + for _ in range(10): + inputs = tf.random_uniform(shape=(32, 16)) + _, state = common_video.basic_lstm( + inputs, state, num_units=100, name="basic") + num_params = np.sum([np.prod(v.shape) for v in tf.trainable_variables()]) + # 4 * ((100 + 16)*100 + 100) => 4 * (W_{xh} + W_{hh} + b) + self.assertEqual(num_params, 46800) + + @parameterized.named_parameters( + ("two_frames", 2), ("ten_frames", 10), ("default", -1)) + def testExtractRandomVideoPatch(self, num_frames=2): + with tf.Graph().as_default(): + rng = np.random.RandomState(0) + video_np = rng.randint(0, 255, size=(12, 20, 256, 256, 3)) + video = tf.convert_to_tensor(video_np) + video_patch = common_video.extract_random_video_patch( + video, num_frames=num_frames) + with tf.Session() as sess: + video_patch_np = sess.run(video_patch) + if num_frames != -1: + self.assertEqual(video_patch_np.shape, (12, num_frames, 256, 256, 3)) + self.check_if_patch_exists(video_np, video_patch_np, num_frames) + else: + self.assertTrue(np.allclose(video_np, video_patch_np)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/layers/discretization.py b/tensor2tensor/layers/discretization.py new file mode 100644 index 000000000..feb338dd4 --- /dev/null +++ b/tensor2tensor/layers/discretization.py @@ -0,0 +1,1535 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Discretization bottlenecks used to train discrete latent variables.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from functools import partial # pylint: disable=g-importing-member + +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_image_attention as cia +from tensor2tensor.layers import common_layers + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator +import tensorflow_probability as tfp + +from tensorflow.python.training import moving_averages # pylint: disable=g-direct-tensorflow-import + + +def project_hidden(x, projection_tensors, hidden_size, num_blocks): + """Project encoder hidden state under num_blocks using projection tensors. + + Args: + x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size]. + projection_tensors: Projection tensors used to project the hidden state. + hidden_size: Dimension of the latent space. + num_blocks: Number of blocks in DVQ. + + Returns: + x_projected: Projected states of shape [batch_size, latent_dim, num_blocks, + hidden_size / num_blocks]. + """ + batch_size, latent_dim, _ = common_layers.shape_list(x) + x = tf.reshape(x, shape=[1, -1, hidden_size]) + x_tiled = tf.reshape( + tf.tile(x, multiples=[num_blocks, 1, 1]), + shape=[num_blocks, -1, hidden_size]) + x_projected = tf.matmul(x_tiled, projection_tensors) + x_projected = tf.transpose(x_projected, perm=[1, 0, 2]) + x_4d = tf.reshape(x_projected, [batch_size, latent_dim, num_blocks, -1]) + return x_4d + + +def slice_hidden(x, hidden_size, num_blocks): + """Slice encoder hidden state under num_blocks. + + Args: + x: Encoder hidden state of shape [batch_size, latent_dim, hidden_size]. + hidden_size: Dimension of the latent space. + num_blocks: Number of blocks in DVQ. + + Returns: + Sliced states of shape [batch_size, latent_dim, num_blocks, block_dim]. + """ + batch_size, latent_dim, _ = common_layers.shape_list(x) + block_dim = hidden_size // num_blocks + x_sliced = tf.reshape(x, + shape=[batch_size, latent_dim, num_blocks, block_dim]) + return x_sliced + + +def nearest_neighbor(x, + means, + block_v_size, + random_top_k=1, + soft_em=False, + num_samples=1, + sum_over_latents=False, + summary=True): + """Find the nearest element in means to elements in x. + + Args: + x: Continuous encodings of shape [batch_size, latent_dim, num_blocks, + block_dim]. + means: Embedding table of shape [num_blocks, block_v_size, block_dim]. + block_v_size: Number of table entries per block. + random_top_k: Noisy top-k if this is bigger than 1. + soft_em: If True then use soft EM rather than hard EM. + num_samples: Number of samples to take in soft EM. + sum_over_latents: Whether to sum over non-batch dimensions when calculating + negative entropy loss. Used only when doing soft EM. + summary: If True then record summary histogram of entropies. + + Returns: + Tensor with nearest element in mean encoded in one-hot notation + and distances. + """ + batch_size, latent_dim, num_blocks, block_dim = common_layers.shape_list(x) + x = tf.reshape(x, [batch_size * latent_dim, num_blocks, block_dim]) + x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True) + means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keep_dims=True) + scalar_prod = tf.matmul( + tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1])) + scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2]) + dist = x_norm_sq + tf.transpose( + means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod + + # computing cluster probabilities + if soft_em: + num_blocks = common_layers.shape_list(dist)[1] + nearest_idx = tf.stack( + [ + tf.multinomial(-dist[:, i, :], num_samples=num_samples) + for i in range(num_blocks) + ], + axis=1) + nearest_hot = tf.one_hot(nearest_idx, depth=block_v_size) + neg_q_entropy = tf.reduce_sum( + nearest_hot * tf.expand_dims(tf.nn.log_softmax(-dist), 2), axis=2) + if sum_over_latents: + neg_q_entropy = tf.reduce_sum(neg_q_entropy, [1, 2]) + neg_q_entropy = tf.reduce_mean(neg_q_entropy, axis=0) + nearest_hot = tf.reduce_mean(nearest_hot, axis=-2) + if summary: + tf.summary.histogram("neg_q_entropy", tf.reshape(neg_q_entropy, [-1])) + else: + neg_q_entropy = 0. + if random_top_k > 1: + _, top_k_idx = tf.nn.top_k(-dist, k=random_top_k) + nearest_idx = tf.gather( + top_k_idx, + tf.random_uniform( + [1], minval=0, maxval=random_top_k - 1, dtype=tf.int32), + axis=-1) + else: + nearest_idx = tf.argmax(-dist, axis=-1) + nearest_hot = tf.one_hot(nearest_idx, block_v_size) + return nearest_hot, neg_q_entropy + + +def embedding_lookup(x, + means, + num_blocks, + block_v_size, + bottleneck_kind="dvq", + random_top_k=1, + soft_em=False, + num_samples=1, + do_hard_gumbel_softmax=False, + temperature_warmup_steps=150000, + num_flows=0, + approximate_gs_entropy=False, + sum_over_latents=False): + """Compute nearest neighbors and loss for training the embeddings via DVQ. + + Args: + x: Continuous encodings of shape [batch_size, latent_dim, num_blocks, + block_dim]. + means: Embedding table of shape [num_blocks, block_v_size, block_dim]. + num_blocks: Number of blocks in DVQ. + block_v_size: Number of table entries per block. + bottleneck_kind: Discrete bottleneck type. + random_top_k: Noisy top-k if this is bigger than 1. + soft_em: If True then use soft EM rather than hard EM. + num_samples: Number of samples to use for soft EM. + do_hard_gumbel_softmax: Whether to use hard or soft Gumbel-Softmax samples + for gumbel-softmax-dvq bottleneck. + temperature_warmup_steps: Number of steps it takes to decay temperature to + 0. Used only if bottleneck_kind is gumbel-softmax-dvq. + num_flows: Number of inverse autoregressive flows for gumbel-softmax-dvq + bottleneck. + approximate_gs_entropy: Whether to approximate the Gumbel-Softmax density + as a categorical distribution when calculating the sample entropy. Used + only if bottleneck_kind is gumbel-softmax-dvq. + sum_over_latents: Whether to sum over non-batch dimensions when calculating + negative entropy loss. Used only if soft EM or when bottleneck_kind is + gumbel-softmax-dvq. + + Returns: + x_means_hot: The nearest neighbor in one hot form, with shape + [batch_size * latent_dim, num_blocks, block_v_size]. + x_means: The nearest neighbor itself, with shape [batch_size * latent_dim, + num_blocks, block_dim]. + q_loss: Scalar Tensor representing codebook loss. + e_loss: Scalar Tensor representing commitment loss. + neg_q_entropy: Scalar Tensor representing negative entropy of variational + approximation (0 if it is deterministic). + """ + if bottleneck_kind == "gumbel-softmax-dvq": + x_means_hot, neg_q_entropy = gumbel_softmax_nearest_neighbor_dvq( + x, + means, + block_v_size, + hard=do_hard_gumbel_softmax, + num_samples=num_samples, + temperature_warmup_steps=temperature_warmup_steps, + num_flows=num_flows, + approximate_gs_entropy=approximate_gs_entropy, + sum_over_latents=sum_over_latents) + else: + x_means_hot, neg_q_entropy = nearest_neighbor( + x, + means, + block_v_size, + random_top_k, + soft_em=soft_em, + num_samples=num_samples, + sum_over_latents=sum_over_latents) + x_means_hot_flat = tf.reshape(x_means_hot, [-1, num_blocks, block_v_size]) + x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means) + x_means = tf.transpose(x_means, [1, 0, 2]) + batch_size, latent_dim, num_blocks, block_dim = common_layers.shape_list(x) + x = tf.reshape(x, [batch_size * latent_dim, num_blocks, block_dim]) + + # Currently, we use the mean scaling for the commitment loss, as opposed to + # summing across all non-batch dimensions. + q_loss = tf.reduce_mean(tf.squared_difference(tf.stop_gradient(x), x_means)) + e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means))) + return x_means_hot, x_means, q_loss, e_loss, neg_q_entropy + + +def bit_to_int(x_bit, num_bits, base=2): + """Turn x_bit representing numbers bitwise (lower-endian) to int tensor. + + Args: + x_bit: Tensor containing numbers in a particular base to be converted to + int. + num_bits: Number of bits in the representation. + base: Base of the representation. + + Returns: + Integer representation of this number. + """ + x_l = tf.stop_gradient(tf.to_int32(tf.reshape(x_bit, [-1, num_bits]))) + x_labels = [ + x_l[:, i] * tf.to_int32(base)**tf.to_int32(i) for i in range(num_bits)] + res = sum(x_labels) + return tf.to_int32(tf.reshape(res, common_layers.shape_list(x_bit)[:-1])) + + +def int_to_bit(x_int, num_bits, base=2): + """Turn x_int representing numbers into a bitwise (lower-endian) tensor. + + Args: + x_int: Tensor containing integer to be converted into base notation. + num_bits: Number of bits in the representation. + base: Base of the representation. + + Returns: + Corresponding number expressed in base. + """ + x_l = tf.to_int32(tf.expand_dims(x_int, axis=-1)) + x_labels = [tf.floormod( + tf.floordiv(tf.to_int32(x_l), tf.to_int32(base)**i), tf.to_int32(base)) + for i in range(num_bits)] + res = tf.concat(x_labels, axis=-1) + return tf.to_float(res) + + +def int_to_bit_embed(x_int, num_bits, embedding_size, base=2): + """Turn x_int into a bitwise (lower-endian) tensor and embed densly.""" + shape = common_layers.shape_list(x_int) + inputs = int_to_bit(x_int, num_bits, base=base) + inputs = tf.reshape(inputs, shape[:-1] + [shape[-1] * 8]) + inputs = 2.0 * tf.to_float(inputs) - 1.0 # Move from 0/1 to -1/1. + return tf.layers.dense(inputs, embedding_size, name="int_to_bit_embed") + + +def embed(x, + hidden_size, + z_size, + filter_size, + bottleneck_kind="dvq", + soft_em=False, + num_blocks=2, + num_residuals=1, + block_v_size=None, + means=None, + name=None): + """Embedding function that takes discrete latent and returns embedding. + + Args: + x: Input to the discretization bottleneck. + hidden_size: Dimension of the latent state. + z_size: Number of bits, where discrete codes range from 1 to 2**z_size. + filter_size: Dimension to project embedding by. Used only if bottleneck_kind + is semhash. + bottleneck_kind: Kind of discretization bottleneck to use; one of dvq, + semhash, gumbel-softmax (Default: dvq). + soft_em: If True then it uses a multi-sample version of EM (Default: False). + num_blocks: Number of blocks in DVQ (Default: 2). + num_residuals: Number of residuals (Default: 1). + block_v_size: Number of embedding entries per block (Default: None). + means: The embedding table for dvq (Default: None). + name: Name for the bottleneck scope. + + Returns: + Continuous embedding to be passed on to the decoder. + + Raises: + ValueError: For unknown or missing arguments. + """ + with tf.variable_scope(name, default_name="embed", reuse=tf.AUTO_REUSE): + if bottleneck_kind == "semhash": + c = int_to_bit(x, z_size) + h1a = tf.layers.dense(c, filter_size, name="vch1a") + h1b = tf.layers.dense(1.0 - c, filter_size, name="vch1b") + h1 = h1a + h1b + h1 = tf.layers.dense(h1, hidden_size, name="vch_final_linear") + + elif bottleneck_kind == "gumbel-softmax": + hot = tf.one_hot(x, 2**z_size) + h1 = tf.layers.dense(hot, hidden_size, name="dae_dense") + elif bottleneck_kind in ["dvq", "gumbel-softmax-dvq"]: + if block_v_size is None: + raise ValueError("Bottleneck kind is dvq but block_v_size is None.") + + if soft_em: + assert num_residuals == 1 + x_hot_flat = tf.reshape(x, shape=[-1, num_blocks, block_v_size]) + h1 = tf.matmul(tf.transpose(x_hot_flat, perm=[1, 0, 2]), means[0]) + h1 = tf.transpose(h1, perm=[1, 0, 2]) + new_shape = common_layers.shape_list(x) + new_shape[-1] = hidden_size + h1 = tf.reshape(h1, shape=new_shape) + else: + shape_x = common_layers.shape_list(x) + x_flat = tf.reshape(x, [-1, 1]) + c = int_to_bit(x_flat, num_bits=z_size, base=2) + shape = common_layers.shape_list(c) + new_shape = shape + new_shape[-1] = num_residuals + new_shape.append(num_blocks) + new_shape.append(int(z_size / (num_residuals * num_blocks))) + c = tf.to_int32(tf.reshape(c, shape=new_shape)) + h1_shape = shape_x + h1_shape.append(hidden_size) + h1 = tf.zeros(dtype=tf.float32, shape=h1_shape) + for i in range(num_residuals): + c_residual = bit_to_int( + c[:, :, i, :, :], + num_bits=int(z_size / (num_residuals * num_blocks)), + base=2) + c_hot = tf.one_hot(c_residual, depth=block_v_size, axis=-1) + c_hot_flat = tf.reshape(c_hot, shape=[-1, num_blocks, block_v_size]) + h1_residual = tf.matmul( + tf.transpose(c_hot_flat, perm=[1, 0, 2]), means[i]) + h1_residual = tf.transpose(h1_residual, perm=[1, 0, 2]) + h1_residual = tf.reshape(h1_residual, shape=h1_shape) + h1 += h1_residual + elif bottleneck_kind == "rounding": + h1 = x + else: + raise ValueError("Unknown bottleneck kind.") + + return h1 + + +def vae(x, z_size, name=None): + """Simple variational autoencoder without discretization. + + Args: + x: Input to the discretization bottleneck. + z_size: Number of bits, where discrete codes range from 1 to 2**z_size. + name: Name for the bottleneck scope. + + Returns: + Embedding function, latent, loss, mu and log_simga. + """ + with tf.variable_scope(name, default_name="vae"): + mu = tf.layers.dense(x, z_size, name="mu") + log_sigma = tf.layers.dense(x, z_size, name="log_sigma") + shape = common_layers.shape_list(x) + epsilon = tf.random_normal([shape[0], shape[1], 1, z_size]) + z = mu + tf.exp(log_sigma / 2) * epsilon + kl = 0.5 * tf.reduce_mean( + tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1) + free_bits = z_size // 4 + kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0)) + return z, kl_loss, mu, log_sigma + + +def top_k_softmax(x, k): + """Calculate softmax(x), select top-k and rescale to sum to 1. + + Args: + x: Input to softmax over. + k: Number of top-k to select. + + Returns: + softmax(x) and maximum item. + """ + x = tf.nn.softmax(x) + top_x, _ = tf.nn.top_k(x, k=k + 1) + min_top = tf.reduce_min(top_x, axis=-1, keep_dims=True) + x = tf.nn.relu((x - min_top) + 1e-12) + x /= tf.reduce_sum(x, axis=-1, keep_dims=True) + return x, tf.reduce_max(top_x, axis=-1) + + +def gumbel_sample(shape): + """Sample from the Gumbel distribution, protect from overflows. + + Args: + shape: Shape of Gumbel samples. + + Returns: + Noise drawn from Gumbel distribution. + """ + uniform_samples = tf.random_uniform(shape, minval=0.00001, maxval=0.99998) + return -tf.log(-tf.log(uniform_samples)) + + +def gumbel_softmax(x, + z_size, + mode, + softmax_k=0, + temperature_warmup_steps=150000, + summary=True, + name=None): + """Gumbel softmax discretization bottleneck. + + Args: + x: Input to the discretization bottleneck. + z_size: Number of bits, where discrete codes range from 1 to 2**z_size. + mode: tf.estimator.ModeKeys. + softmax_k: If > 0 then do top-k softmax. + temperature_warmup_steps: Number of steps it takes to decay temperature to + 0. + summary: Whether to write summaries. + name: Name for the bottleneck scope. + + Returns: + Embedding function, discrete code, and loss. + """ + with tf.variable_scope(name, default_name="gumbel_softmax"): + m = tf.layers.dense(x, 2**z_size, name="mask") + if softmax_k > 0: + m, kl = top_k_softmax(m, softmax_k) + return m, m, 1.0 - tf.reduce_mean(kl) + logsm = tf.nn.log_softmax(m) + + # Gumbel-softmax sample. + gumbel_samples = gumbel_sample(common_layers.shape_list(m)) + steps = temperature_warmup_steps + gumbel_samples *= common_layers.inverse_exp_decay(steps // 5) * 0.5 + temperature = 1.2 - common_layers.inverse_lin_decay(steps) + + # 10% of the time keep reasonably high temperature to keep learning. + temperature = tf.cond( + tf.less(tf.random_uniform([]), 0.9), lambda: temperature, + lambda: tf.random_uniform([], minval=0.5, maxval=1.0)) + s = tf.nn.softmax((logsm + gumbel_samples) / temperature) + m = tf.nn.softmax(m) + kl = -tf.reduce_max(logsm, axis=-1) + + if summary: + tf.summary.histogram("max-log", tf.reshape(kl, [-1])) + + # Calculate the argmax and construct hot vectors. + maxvec = tf.reshape(tf.argmax(m, axis=-1), [-1]) + maxvhot = tf.stop_gradient(tf.one_hot(maxvec, 2**z_size)) + + # Add losses that prevent too few being used. + distrib = tf.reshape(logsm, [-1, 2**z_size]) * maxvhot + d_mean = tf.reduce_mean(distrib, axis=[0], keep_dims=True) + d_variance = tf.reduce_mean( + tf.squared_difference(distrib, d_mean), axis=[0]) + d_dev = -tf.reduce_mean(d_variance) + ret = s + + if mode != tf_estimator.ModeKeys.TRAIN: + ret = tf.reshape(maxvhot, common_layers.shape_list(s)) # Just hot @eval. + return m, ret, d_dev * 5.0 + tf.reduce_mean(kl) * 0.002 + + +def discrete_bottleneck(inputs, + hidden_size, + z_size, + filter_size, + mode=None, + bottleneck_kind="dvq", + num_blocks=2, + num_residuals=1, + reshape_method="slice", + projection_tensors=None, + beta=0.25, + ema=True, + means=None, + ema_count=None, + ema_means=None, + epsilon=1e-5, + decay=0.999, + random_top_k=1, + soft_em=False, + num_samples=1, + softmax_k=0, + temperature_warmup_steps=150000, + do_hard_gumbel_softmax=False, + num_flows=0, + approximate_gs_entropy=False, + sum_over_latents=False, + discrete_mix=0.5, + noise_dev=1., + startup_steps=50000, + summary=True, + name=None, + cond=True): + """Discretization bottleneck. + + Args: + inputs: Input to the bottleneck, a Tensor of shape [..., channels]. + hidden_size: Dimension of the dense output. + z_size: Number of bits, where discrete codes range from 1 to 2**z_size. + filter_size: Filter size in the embedding function. + mode: tf.estimator.ModeKeys. + bottleneck_kind: Kind of discretization bottleneck. One of dense, dvq + (decomposed vector quantization), gumbel-softmax, gumbel-softmax-dvq, + semhash, or vae. + num_blocks: Number of blocks. Used only if bottleneck_kind is DVQ. + num_residuals: Number of residual units used to compute nearest + neighbors. Used only if bottleneck_kind is DVQ. + reshape_method: Method to reshape. Used only if bottleneck_kind is DVQ. + projection_tensors: If the reshape method is project, then these are the + tensors used to project. + beta: Scale factor for codebook loss and EMA. Used only if bottleneck_kind + is DVQ. + ema: Whether to update embeddings using exponential moving averages. Used + only if bottleneck_kind is DVQ. + means: The embedding table. Used only if ema is True. + ema_count: Table of counts for each embedding corresponding to how many + examples in a batch it was the closest to. Used only if ema is True. + ema_means: Exponentially averaged version of the embeddings. Used only if + ema is True. + epsilon: Small value to avoid dividing by zero in EMA update. Used only if + ema is True. + decay: Decay factor for the exponential moving average. Used only if ema is + True. + random_top_k: Noisy top-k. Used only if bottleneck_kind is DVQ. + soft_em: Whether to use soft EM or hard EM. Used only if bottleneck_kind is + DVQ. + num_samples: Number of samples for soft EM. Used only if soft_em is True. + softmax_k: If > 0 then do top-k softmax. Used only if bottleneck_kind + is gumbel-softmax. + temperature_warmup_steps: Number of steps it takes to decay temperature to + 0. Used only if bottleneck_kind is gumbel-softmax or gumbel-softmax-dvq. + do_hard_gumbel_softmax: Whether to use hard or soft Gumbel-Softmax + samples. Used only if bottleneck_kind is gumbel-softmax-dvq. + num_flows: Number of inverse autoregresive flows. Used only if + bottleneck_kind is gumbel-softmax-dvq. + approximate_gs_entropy: Whether to approximate the Gumbel-Softmax density + as a categorical distribution when calculating the sample entropy. Used + only if bottleneck_kind is gumbel-softmax-dvq. + sum_over_latents: Whether to sum over all non-batch dimensions before + taking mean of entropy loss term. Used only if bottleneck kind is DVQ + or gumbel-softmax-dvq. + discrete_mix: Factor for mixing discrete and non-discrete input. Used only + if bottleneck_kind is semhash. + noise_dev: Noise stddev. Used only if bottleneck_kind is semhash. + startup_steps: Number of steps after which latent predictor is trained. Used + only if bottleneck_kind is semhash. + summary: Whether to write summaries. + name: Name for the bottleneck scope. + cond: A tf.bool condition on whether to update the codebook. + + Returns: + outputs_dense: Tensor of shape [..., output_dim]. The output dimension is + hidden_size if bottleneck_kind is gumbel-softmax, DVQ; filter_size if + bottleneck_kind is dense, semhash, vae. If bottleneck_kind is DVQ, + outputs_dense represents the codebook (means) indexed by outputs_discrete. + outputs_discrete: Tensor of shape [...]. Discrete codes, each an index in + [0, 2**z_size). It uses the hot representation if soft_em is True. + extra_loss: Scalar Tensor. Sum of codebook and commitment losses if + bottleneck_kind is DVQ; else zero. + embed_fn: Function embed with arguments partially filled in. + neg_q_entropy: Scalar Tensor representing negative entropy of variational + approximation (0 if it is deterministic). + + Raises: + ValueError: If projection_tensors is None for reshape_method project, or + ema_count or ema_means is None if ema is True, or unknown args. + """ + if bottleneck_kind in ["dvq", "gumbel-softmax-dvq"]: + assert means is not None + if hidden_size % num_blocks != 0: + raise ValueError("num_blocks does not divide hidden size") + + if z_size % num_residuals != 0: + raise ValueError("num_residuals does not divide embedding table size") + z_size_per_residual = int(z_size / num_residuals) + + if z_size_per_residual % num_blocks != 0: + raise ValueError("num_blocks does not divide embedding table size") + block_v_size = 2**int(z_size_per_residual / num_blocks) + + if ema: + if ema_count is None: + raise ValueError("ema_count is None but ema is True") + if ema_means is None: + raise ValueError("ema_means is None but ema is True") + else: + block_v_size = None + + with tf.variable_scope( + name, default_name="discrete_bottleneck", reuse=tf.AUTO_REUSE): + embed_fn = partial( + embed, + hidden_size=hidden_size, + z_size=z_size, + filter_size=filter_size, + bottleneck_kind=bottleneck_kind, + soft_em=soft_em, + num_blocks=num_blocks, + num_residuals=num_residuals, + block_v_size=block_v_size, + means=means, + name=name) + + if bottleneck_kind == "dense": + # Note discrete output is continuous here. + outputs_discrete = tf.layers.dense(inputs, z_size, name="vcc") + outputs_dense = tf.layers.dense( + outputs_discrete, filter_size, name="vch1") + extra_loss = tf.constant(0.0) + neg_q_entropy = tf.constant(0.0) + elif bottleneck_kind in ["dvq", "gumbel-softmax-dvq"]: + inputs_3d = inputs + if len(inputs.shape) == 4: + inputs_3d = tf.squeeze(inputs, axis=2) + if reshape_method == "slice": + x_reshaped = slice_hidden( + inputs_3d, hidden_size=hidden_size, num_blocks=num_blocks) + elif reshape_method == "project": + if projection_tensors is None: + raise ValueError( + "Projection tensors is None for reshape_method project") + x_reshaped = project_hidden( + inputs_3d, + projection_tensors=projection_tensors, + hidden_size=hidden_size, + num_blocks=num_blocks) + else: + raise ValueError("Unknown reshape_method") + + x_res = tf.reshape(x_reshaped, + [-1] + common_layers.shape_list(x_reshaped)[2:]) + x_means_hot = [] + x_means = 0 + extra_loss = 0 + for i in range(num_residuals): + x_means_hot_res, x_means_res, q_loss_res, e_loss_res, neg_q_entropy = ( + embedding_lookup( + x_reshaped, + means=means[i], + num_blocks=num_blocks, + block_v_size=block_v_size, + bottleneck_kind=bottleneck_kind, + random_top_k=random_top_k, + soft_em=soft_em, + num_samples=num_samples, + temperature_warmup_steps=temperature_warmup_steps, + do_hard_gumbel_softmax=do_hard_gumbel_softmax, + num_flows=num_flows, + approximate_gs_entropy=approximate_gs_entropy, + sum_over_latents=sum_over_latents)) + # Update the EMA variables. + if ema: + tf.logging.info("Using EMA with beta = {}".format(beta)) + updated_ema_count_res = moving_averages.assign_moving_average( + ema_count[i], + tf.where(cond, + tf.reduce_sum( + tf.reshape(x_means_hot_res, + shape=[-1, num_blocks, block_v_size]), + axis=0), ema_count[i]), + decay, + zero_debias=False) + + dw = tf.matmul( + tf.transpose(x_means_hot_res, perm=[1, 2, 0]), + tf.transpose(x_res, perm=[1, 0, 2])) + + updated_ema_means_res = moving_averages.assign_moving_average( + ema_means[i], tf.where(cond, dw, ema_means[i]), + decay, zero_debias=False) + n = tf.reduce_sum(updated_ema_count_res, axis=-1, keep_dims=True) + updated_ema_count_res = ( + (updated_ema_count_res + epsilon) / (n + 2**z_size * epsilon) * n) + updated_ema_means_res = updated_ema_means_res / tf.expand_dims( + updated_ema_count_res, axis=-1) + + with tf.control_dependencies([e_loss_res]): + update_means_res = tf.assign(means[i], + tf.where(cond, + updated_ema_means_res, + means[i])) + with tf.control_dependencies([update_means_res]): + extra_loss += beta * e_loss_res + else: + extra_loss += q_loss_res + beta * e_loss_res + + # Update the residuals. + x_res -= x_means_res + x_means += x_means_res + x_means_hot.append(x_means_hot_res) + + # Get the discrete latent representation. + x_means_hot = tf.stack(x_means_hot, axis=1) + x_means_idx = tf.argmax(x_means_hot, axis=-1) + + # Get the binary representation. + x_means_bits = int_to_bit( + x_means_idx, + num_bits=int(z_size / (num_residuals * num_blocks)), + base=2) + shape = common_layers.shape_list(x_means_bits) + new_shape = shape[:-2] + new_shape[-1] = z_size + x_means_bits = tf.reshape(x_means_bits, shape=new_shape) + outputs_discrete = bit_to_int( + tf.to_int32(x_means_bits), num_bits=z_size, base=2) + + # Adjust shape of discrete outputs. + inputs_shape = common_layers.shape_list(inputs) + outputs_discrete = tf.reshape(outputs_discrete, inputs_shape[:-1]) + + # If we're using soft EM then set discretes to the hot representation. + if soft_em: + outputs_discrete = x_means_hot + outputs_discrete = tf.reshape(outputs_discrete, + inputs_shape[:-1] + [block_v_size]) + + # Reshape assuming hidden_size == inputs_shape[:-1]. + x_means = tf.reshape(x_means, inputs_shape) + outputs_dense = inputs + tf.stop_gradient(x_means - inputs) + elif bottleneck_kind == "gumbel-softmax": + _, outputs_hot, extra_loss = gumbel_softmax( + inputs, + z_size=z_size, + mode=mode, + softmax_k=softmax_k, + temperature_warmup_steps=temperature_warmup_steps, + summary=summary, + name=name) + outputs_discrete = tf.argmax(outputs_hot, axis=-1) + outputs_dense = tf.layers.dense( + outputs_hot, hidden_size, name="dae_dense") + neg_q_entropy = tf.constant(0.0) + elif bottleneck_kind == "semhash": + outputs_discrete = tf.layers.dense(inputs, z_size, name="vcc") + y_clean = common_layers.saturating_sigmoid(outputs_discrete) + if summary: + tf.summary.histogram("y_clean", tf.reshape(y_clean, [-1])) + if noise_dev > 0 and mode == tf_estimator.ModeKeys.TRAIN: + noise = tf.truncated_normal( + common_layers.shape_list(outputs_discrete), + mean=0.0, + stddev=noise_dev) + y = common_layers.saturating_sigmoid(outputs_discrete + noise) + else: + y = y_clean + d = tf.to_float(tf.less(0.5, y)) + y_discrete = tf.stop_gradient(d) + y - tf.stop_gradient(y) + pd = common_layers.inverse_exp_decay(startup_steps * 2) + pd *= discrete_mix + pd = pd if mode == tf_estimator.ModeKeys.TRAIN else 1.0 + c = tf.where( + tf.less(tf.random_uniform([common_layers.shape_list(y)[0]]), pd), + y_discrete, y) + outputs_dense_a = tf.layers.dense(c, filter_size, name="vch1a") + outputs_dense_b = tf.layers.dense(1.0 - c, filter_size, name="vch1b") + outputs_dense = outputs_dense_a + outputs_dense_b + outputs_dense = tf.layers.dense(outputs_dense, hidden_size, + name="vch_final_linear") + + dx = tf.to_int32(tf.stop_gradient(d)) + outputs_discrete = bit_to_int(dx, z_size) + extra_loss = tf.constant(0.0) + neg_q_entropy = tf.constant(0.0) + elif bottleneck_kind == "vae": + outputs_discrete, extra_loss, _, _ = vae(inputs, z_size, name="vae") + outputs_dense = tf.layers.dense( + outputs_discrete, filter_size, name="vch1") + neg_q_entropy = tf.constant(0.0) + else: + raise ValueError("Unknown discretization method.") + + return outputs_dense, outputs_discrete, extra_loss, embed_fn, neg_q_entropy + + +def predict_bits_with_lstm(prediction_source, state_size, total_num_bits, + target_bits=None, extra_inputs=None, + bits_at_once=8, temperature=1.0, dropout=0.1): + """Predict a sequence of bits (a latent) with LSTM, both training and infer. + + Given a tensor on which the predictions are based (prediction_source), we use + a single-layer LSTM with state of size state_size to predict total_num_bits, + which we predict in groups of size bits_at_once. During training, we use + target_bits as input to the LSTM (teacher forcing) and return the target_bits + together with the prediction loss. During inference, we sample with the given + temperature and return the predicted sequence and loss 0. + + Args: + prediction_source: a Tensor of shape [batch_size, ...] used to create + the initial state and the first input to the LSTM. + state_size: python integer, the size of the LSTM state. + total_num_bits: python integer, how many bits in total to predict. + target_bits: a tensor of shape [batch_size, total_num_bits] used during + training as the target to predict; each element should be -1 or 1. + extra_inputs: a Tensor [batch_size, total_num_bits // bits_at_once, d] + of additional inputs, passed as additional LSTM inputs. + bits_at_once: pytho integer, how many bits to predict at once. + temperature: python float, temperature used for sampling during inference. + dropout: float, the amount of dropout to aply during training (0.1 default). + + Returns: + a pair (bits, loss) with the predicted bit sequence, which is a Tensor of + shape [batch_size, total_num_bits] with elements either -1 or 1, and a loss + used to train the predictions against the provided target_bits. + """ + + with tf.variable_scope("predict_bits_with_lstm"): + # Layers and cell state creation. + lstm_cell = tf.nn.rnn_cell.LSTMCell(state_size) + discrete_predict = tf.layers.Dense(2**bits_at_once, name="discrete_predict") + discrete_embed = tf.layers.Dense(state_size, name="discrete_embed") + batch_size = common_layers.shape_list(prediction_source)[0] + layer_pred = tf.layers.flatten(prediction_source) + first_lstm_input = tf.layers.dense(layer_pred, state_size, name="istate") + c_state = tf.layers.dense(layer_pred, state_size, name="cstate") + m_state = tf.layers.dense(layer_pred, state_size, name="mstate") + state = (c_state, m_state) + + # Prediction mode if no targets are given. + if target_bits is None: + outputs = [] + lstm_input = first_lstm_input + for i in range(total_num_bits // bits_at_once): + if extra_inputs is not None: + lstm_input = tf.concat([lstm_input, extra_inputs[:, i, :]], axis=1) + output, state = lstm_cell(lstm_input, state) + discrete_logits = discrete_predict(output) + discrete_samples = common_layers.sample_with_temperature( + discrete_logits, temperature) + outputs.append(tf.expand_dims(discrete_samples, axis=1)) + lstm_input = discrete_embed(tf.one_hot(discrete_samples, 256)) + outputs = tf.concat(outputs, axis=1) + outputs = int_to_bit(outputs, bits_at_once) + outputs = tf.reshape(outputs, [batch_size, total_num_bits]) + return 2 * outputs - 1, 0.0 + + # Training mode, calculating loss. + assert total_num_bits % bits_at_once == 0 + target_bits = tf.reshape(tf.maximum(tf.stop_gradient(target_bits), 0), [ + batch_size, total_num_bits // bits_at_once, bits_at_once]) + target_ints = bit_to_int(target_bits, bits_at_once) + tf.summary.histogram("target_integers", tf.reshape(target_ints, [-1])) + target_hot = tf.one_hot(target_ints, 2**bits_at_once, axis=-1) + target_embedded = discrete_embed(target_hot) + target_embedded = tf.nn.dropout(target_embedded, 1.0 - dropout) + teacher_input = tf.concat( + [tf.expand_dims(first_lstm_input, axis=1), target_embedded], axis=1) + outputs = [] + for i in range(total_num_bits // bits_at_once): + lstm_input = teacher_input[:, i, :] + if extra_inputs is not None: + lstm_input = tf.concat([lstm_input, extra_inputs[:, i, :]], axis=1) + output, state = lstm_cell(lstm_input, state) + outputs.append(tf.expand_dims(output, axis=1)) + outputs = tf.concat(outputs, axis=1) + outputs = tf.nn.dropout(outputs, 1.0 - dropout) + d_int_pred = discrete_predict(outputs) + pred_loss = tf.losses.sparse_softmax_cross_entropy( + logits=d_int_pred, labels=target_ints) + pred_loss = tf.reduce_mean(pred_loss) + return d_int_pred, pred_loss + + +# New API for discretization bottlenecks: +# * Each method is separate and provides 2 functions: +# * The [method]_bottleneck function returns discretized state. +# * The [method]_unbottleneck function moves from discretized state to dense. + + +def get_vq_codebook(codebook_size, hidden_size): + """Get lookup table for VQ bottleneck.""" + with tf.variable_scope("vq", reuse=tf.AUTO_REUSE): + means = tf.get_variable( + name="means", + shape=[codebook_size, hidden_size], + initializer=tf.uniform_unit_scaling_initializer()) + + ema_count = tf.get_variable( + name="ema_count", + shape=[codebook_size], + initializer=tf.constant_initializer(0), + trainable=False) + + with tf.colocate_with(means): + ema_means = tf.get_variable( + name="ema_means", + initializer=tf.cond( + tf.is_variable_initialized(means), + means.read_value, + lambda: means.initial_value), + trainable=False) + + return means, ema_means, ema_count + + +def vq_nearest_neighbor(x, means, + soft_em=False, num_samples=10, temperature=None): + """Find the nearest element in means to elements in x.""" + bottleneck_size = common_layers.shape_list(means)[0] + x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True) + means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) + scalar_prod = tf.matmul(x, means, transpose_b=True) + dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod + if soft_em: + x_means_idx = tf.multinomial(-dist, num_samples=num_samples) + x_means_hot = tf.one_hot( + x_means_idx, depth=common_layers.shape_list(means)[0]) + x_means_hot = tf.reduce_mean(x_means_hot, axis=1) + else: + if temperature is None: + x_means_idx = tf.argmax(-dist, axis=-1) + else: + x_means_idx = tf.multinomial(- dist / temperature, 1) + x_means_idx = tf.squeeze(x_means_idx, axis=-1) + if (common_layers.should_generate_summaries() and + not common_layers.is_xla_compiled()): + tf.summary.histogram("means_idx", tf.reshape(x_means_idx, [-1])) + x_means_hot = tf.one_hot(x_means_idx, bottleneck_size) + x_means_hot_flat = tf.reshape(x_means_hot, [-1, bottleneck_size]) + x_means = tf.matmul(x_means_hot_flat, means) + e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means))) + return x_means_hot, e_loss, dist + + +def vq_discrete_bottleneck(x, + bottleneck_bits, + beta=0.25, + decay=0.999, + epsilon=1e-5, + soft_em=False, + num_samples=10): + """Simple vector quantized discrete bottleneck.""" + bottleneck_size = 2**bottleneck_bits + x_means_hot, e_loss, _ = vq_body( + x, + bottleneck_size, + beta=beta, + decay=decay, + epsilon=epsilon, + soft_em=soft_em, + num_samples=num_samples) + return x_means_hot, e_loss + + +def vq_body(x, + codebook_size, + beta=0.25, + decay=0.999, + epsilon=1e-5, + soft_em=False, + num_samples=10, + temperature=None, + do_update=True): + """Discretize each x into one of codebook_size codes.""" + x_shape = common_layers.shape_list(x) + hidden_size = x_shape[-1] + means, ema_means, ema_count = get_vq_codebook(codebook_size, hidden_size) + x = tf.reshape(x, [-1, hidden_size]) + x_means_hot, e_loss, distances = vq_nearest_neighbor( + x, means, soft_em=soft_em, num_samples=num_samples, + temperature=temperature) + + def loss_with_update(): + """Update the ema variables and return loss triggering the update.""" + updated_ema_count = moving_averages.assign_moving_average( + ema_count, + tf.reduce_sum(tf.reshape(x_means_hot, shape=[-1, codebook_size]), + axis=0), + decay, + zero_debias=False) + + dw = tf.matmul(x_means_hot, x, transpose_a=True) + updated_ema_means = tf.identity( + moving_averages.assign_moving_average( + ema_means, dw, decay, zero_debias=False)) + n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True) + updated_ema_count = ( + (updated_ema_count + epsilon) / (n + codebook_size * epsilon) * n) + updated_ema_means /= tf.expand_dims(updated_ema_count, axis=-1) + with tf.control_dependencies([e_loss]): + update_means = means.assign(updated_ema_means) + with tf.control_dependencies([update_means]): + return beta * e_loss + + # Loss, also do update if requested. + if do_update: + loss = loss_with_update() + else: + loss = tf.cond(do_update, loss_with_update, lambda: beta * e_loss) + + d = tf.reshape(x_means_hot, x_shape[:-1] + [codebook_size]) + return d, loss, distances + + +def vq_loss(x, + targets, + codebook_size, + beta=0.25, + decay=0.999, + epsilon=1e-5, + soft_em=False, + num_samples=10, + temperature=None, + do_update=True): + """Compute the loss of large vocab tensors using a VQAE codebook. + + Args: + x: Tensor of inputs to be quantized to nearest code + targets: Tensor of target indices to target codes + codebook_size: Size of quantization codebook + beta: scalar float for moving averages + decay: scalar float for moving averages + epsilon: scalar float for moving averages + soft_em: boolean, whether to apply a soft sampling procedure + num_samples: if soft_em, number of samples to take + temperature: temperature if we want to sample nearest neighbors or None + do_update: whether to update the means; True by default, can be a Tensor + + Returns: + discrete_x: one-hot Tensor indicating which codebook element is closest to x + x_means: Tensor, on the forward pass: closest codebook element to x, on the + backwards pass: soft convex-combination of codebook elements by proximity + to x + target_means: the codebook elements corresponding to the targets + code_loss: loss driving x closer to its nearest codebook element + targets_loss: cross-entropy loss driving x closer to code corresponding to + target + """ + x_shape = common_layers.shape_list(x) + target_shape = common_layers.shape_list(targets) + hidden_size = x_shape[-1] + means, _, _ = get_vq_codebook(codebook_size, hidden_size) + x = tf.reshape(x, [-1, hidden_size]) + targets = tf.reshape(targets, [-1]) + one_hot_targets = tf.one_hot(targets, codebook_size) + target_means = tf.matmul(one_hot_targets, means) + + discrete_x, code_loss, distances = vq_body( + x, + codebook_size, + beta=beta, + decay=decay, + epsilon=epsilon, + soft_em=soft_em, + num_samples=num_samples, + temperature=temperature, + do_update=do_update) + + logits = -distances + targets_loss = tf.losses.sparse_softmax_cross_entropy( + logits=logits, labels=targets) + targets_loss = tf.reduce_mean(targets_loss) + + x_means = tf.matmul(discrete_x, means) + x_means = x + tf.stop_gradient(x_means - x) + + discrete_x = tf.reshape(discrete_x, x_shape[:-1] + [codebook_size]) + target_means = tf.reshape(target_means, target_shape + [hidden_size]) + return discrete_x, x_means, target_means, code_loss, targets_loss + + +def vq_discrete_unbottleneck(x, hidden_size): + """Simple undiscretization from vector quantized representation.""" + x_shape = common_layers.shape_list(x) + x = tf.to_float(x) + bottleneck_size = common_layers.shape_list(x)[-1] + means, _, _ = get_vq_codebook(bottleneck_size, hidden_size) + result = tf.matmul(tf.reshape(x, [-1, x_shape[-1]]), means) + return tf.reshape(result, x_shape[:-1] + [hidden_size]) + + +def gumbel_softmax_nearest_neighbor_dvq(x, + means, + block_v_size, + hard=False, + temperature_init=1.2, + num_samples=1, + temperature_warmup_steps=150000, + summary=True, + num_flows=0, + approximate_gs_entropy=False, + sum_over_latents=False): + """Sample from Gumbel-Softmax and compute neighbors and losses. + + Args: + x: A `float`-like `Tensor` of shape [batch_size, latent_dim, num_blocks, + block_dim] containing the latent vectors to be compared to the codebook. + means: Embedding table of shape [num_blocks, block_v_size, block_dim]. + block_v_size: Number of discrete codes per block. + hard: Determines whether we take hard or soft Gumbel-Softmax samples + (Default: False). + temperature_init: Initial temperature used for Gumbel-Softmax samples, + after it which it decays to 0 (Default: 1.2). + num_samples: Number of samples drawn for each latent (Default: 1). + temperature_warmup_steps: Number of steps it takes to decay temperature to 0 + (Default: 150000). + summary: When `True`, we save histogram summaries of the KL term (Default: + True). + num_flows: Number of inverse autoregressive flows with Gumbel-Softmax + samples. + approximate_gs_entropy: When `True`, we approximate Gumbel-Softmax + density as categorical when calculating sample entropy (Default: False). + sum_over_latents: Whether to sum over non-batch dimensions when calculating + negative entropy loss. + + Returns: + x_means_assignments: A `float`-like `Tensor` containing the codebook + assignments, averaged over samples, with shape [batch_size * latent_dim, + num_blocks, block_v_size]. + neg_q_entropy: The negative entropy of the variational distribution, + averaged over samples. + """ + batch_size, latent_dim, num_blocks, block_dim = common_layers.shape_list(x) + + # Combine latent_dim and batch_size for computing distances. + x = tf.reshape(x, [-1, num_blocks, block_dim]) + + # Compute distances using (x - means)**2 = x**2 + means**2 - 2*x*means. + x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True) + means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) + means_norm_sq = tf.transpose(means_norm_sq, perm=[2, 0, 1]) + scalar_prod = tf.matmul( + tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1])) + scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2]) + dist = x_norm_sq + means_norm_sq - 2 * scalar_prod + + # IAF requires latents to have their own dimension, so reshape dist from + # [batch_size * latent_dim, num_blocks, block_v_size] to + # [batch_size * num_blocks, latent_dim, block_v_size]. + dist = tf.reshape(dist, [batch_size, latent_dim, num_blocks, -1]) + dist = tf.reshape( + tf.transpose(dist, perm=[0, 2, 1, 3]), [-1, latent_dim, block_v_size]) + log_class_probs = tf.nn.log_softmax(-dist) + + sample_shape = [num_samples] + common_layers.shape_list(dist) + gumbel_samples = gumbel_sample(sample_shape) + + # Temperature decays linearly. + temperature = temperature_init - common_layers.inverse_lin_decay( + temperature_warmup_steps) + + # 10% of the time keep reasonably high temperature to keep learning. + temperature = tf.cond( + tf.less(tf.random_uniform([]), 0.9), lambda: temperature, + lambda: tf.random_uniform([], minval=0.5, maxval=1.0)) + + gumbel_softmax_samples = tf.nn.softmax( + (tf.expand_dims(log_class_probs, 0) + gumbel_samples) / temperature) + q_samples = tf.clip_by_value(gumbel_softmax_samples, 1e-6, 1 - 1e-6) + + if approximate_gs_entropy: + q_dist = tfp.distributions.Multinomial(total_count=1.0, logits=-dist) + else: + q_dist = tfp.distributions.RelaxedOneHotCategorical( + temperature, logits=-dist) + + # Take mean over samples to approximate entropy. + neg_q_entropy = tf.reduce_mean(q_dist.log_prob(q_samples), 0) + if summary: + tf.summary.histogram("neg_q_entropy", tf.reshape(neg_q_entropy, [-1])) + if sum_over_latents: + neg_q_entropy = tf.reshape(neg_q_entropy, + [batch_size, num_blocks, latent_dim]) + neg_q_entropy = tf.reduce_sum(neg_q_entropy, [1, 2]) + neg_q_entropy = tf.reduce_mean(neg_q_entropy) + + if num_flows > 0: + hparams = iaf_hparams(hidden_size=512, filter_size=4096) + q_samples = tf.reshape(q_samples, [-1, latent_dim, block_v_size]) + for flow in range(num_flows): + shifted_samples = tf.pad(q_samples, [[0, 0], [1, 0], [0, 0]])[:, :-1, :] + + # Project samples from [batch_size, latent_size, block_v_size] to + # [batch_size, latent_size, hidden_size]. + shifted_samples = common_layers.dense(shifted_samples, + hparams.hidden_size) + # TODO(vafa): Include masking as a flag. + mask = True + if mask: + attention_type = cia.AttentionType.LOCAL_1D + else: + attention_type = cia.AttentionType.GLOBAL + ffn_output = cia.transformer_decoder_layers( + inputs=shifted_samples, + encoder_output=None, + num_layers=6, + hparams=hparams, + attention_type=attention_type, + name="transformer_" + str(flow)) + + # Project samples back to [batch_size, latent_size, block_v_size]. + ffn_output = common_layers.dense(ffn_output, block_v_size) + log_pi = tf.nn.log_softmax(ffn_output) + + # Flow 1: Adding log_pi to q_samples and dividing by the temperature. + # Note that we drop the last dimension of q_samples for centered-softmax, + # which we can do without recalculating probabilities because the last + # dimension of log_pi and q_samples are deterministic given the others. + # Flow 2: Centered-softmax. + chained_bijectors = tfp.bijectors.Chain([ + tfp.bijectors.SoftmaxCentered(), + tfp.bijectors.Affine( + shift=log_pi[:, :, :-1], + scale_identity_multiplier=1. / temperature) + ]) + q_samples = chained_bijectors.forward(q_samples[:, :, :-1]) + log_det = chained_bijectors.inverse_log_det_jacobian( + q_samples, event_ndims=1) + log_det = tf.reshape(log_det, + [num_samples, batch_size, num_blocks, latent_dim]) + if sum_over_latents: + log_det = tf.reduce_sum(log_det, axis=[2, 3]) + neg_q_entropy += tf.reduce_mean(log_det) + + q_samples = tf.reshape( + q_samples, + [num_samples, batch_size * num_blocks, latent_dim, block_v_size]) + + if hard: + x_means_idx = tf.argmax(q_samples, -1) + + # Take average of one-hot vectors over samples. + x_means_hot = tf.reduce_mean(tf.one_hot(x_means_idx, block_v_size), 0) + x_means_assignments = ( + tf.reduce_mean(q_samples, 0) + + tf.stop_gradient(x_means_hot - tf.reduce_mean(q_samples, 0))) + else: + x_means_assignments = tf.reduce_mean(gumbel_softmax_samples, 0) + + # Reshape assignments to [batch_size * latent_dim, num_blocks, + # block_v_size]. We have to transpose between reshapes to make sure the + # dimensions have the correct interpretation. + x_means_assignments = tf.reshape( + x_means_assignments, [batch_size, num_blocks, latent_dim, block_v_size]) + x_means_assignments = tf.transpose(x_means_assignments, [0, 2, 1, 3]) + x_means_assignments = tf.reshape( + x_means_assignments, [batch_size * latent_dim, num_blocks, block_v_size]) + + return x_means_assignments, neg_q_entropy + + +def gumbel_softmax_discrete_bottleneck(x, + bottleneck_bits, + beta=0.25, + decay=0.999, + epsilon=1e-5, + temperature_warmup_steps=150000, + hard=False, + summary=True): + """VQ-VAE using Gumbel-Softmax. + + Different from `gumbel_softmax()` function as + this function calculates the KL by using the discrete entropy + instead of taking the argmax, and it also uses an exponential moving average + to update the codebook while the `gumbel_softmax()` function includes no + codebook update. + + Args: + x: A `float`-like `Tensor` containing the latent vectors to be compared to + the codebook, whose squared difference is used as the Gumbel-Softmax + logits. + bottleneck_bits: An `int` that sets the size of the bottleneck in `log_2`. + beta: Beta factor for commitment loss (Default: 0.25). + decay: Decay factor for exponential moving average (Default: 0.999). + epsilon: Small value to avoid dividing by zero in EMA update + (Default: 1e-5). + temperature_warmup_steps: Number of steps it takes to decay temperature to 0 + (Default: 150000). + hard: When `True`, we use hard Gumbel-Softmax samples and force + discrete latents by taking the argmax. When `False`, we use soft samples, + which we treat as codebook weights (Default: False). + summary: When `True`, we save histogram summaries of the KL term (Default: + True). + + Returns: + x_means_assignments: A `float`-like `Tensor` containing the codebook + assignments. When `hard == True`, this is one-hot, containing the arg-max + of the Gumbel-Softmax samples (and we use the straightthrough gradient). + Otherwise, it contains the Gumbel-Softmax samples exactly, which are + values from the `(K-1)`-simplex where `K` is the bottleneck size. + loss: The loss, which is the sum of the KL between the Gumbel-Softmax and + the uniform prior and the commitment loss multiplied by the beta factor. + We approximate the KL by using the entropy of a categorical distribution + instead of the Gumbel Softmax. + + """ + bottleneck_size = 2**bottleneck_bits + x_shape = common_layers.shape_list(x) + hidden_size = x_shape[-1] + means, ema_means, ema_count = get_vq_codebook(bottleneck_size, hidden_size) + x = tf.reshape(x, [-1, hidden_size]) + + bottleneck_size = common_layers.shape_list(means)[0] + x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True) + means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) + scalar_prod = tf.matmul(x, means, transpose_b=True) + dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod + + class_probs = tf.nn.softmax(dist) + log_class_probs = tf.nn.log_softmax(dist) + gumbel_samples = gumbel_sample(common_layers.shape_list(dist)) + steps = temperature_warmup_steps + gumbel_samples *= common_layers.inverse_exp_decay(steps // 5) * 0.5 + temperature = 1.2 - common_layers.inverse_lin_decay(steps) + + # 10% of the time keep reasonably high temperature to keep learning. + temperature = tf.cond( + tf.less(tf.random_uniform([]), 0.9), lambda: temperature, + lambda: tf.random_uniform([], minval=0.5, maxval=1.0)) + gumbel_softmax_samples = tf.nn.softmax( + (log_class_probs + gumbel_samples) / temperature) + + # Calculate KL between q and a uniform prior. + kl = tf.reduce_sum( + class_probs * (log_class_probs - tf.log(1.0 / bottleneck_size)), -1) + if summary: + tf.summary.histogram("KL", tf.reshape(kl, [-1])) + + # Straight-through gradient estimation when we're using hard assignments. + if hard: + x_means_idx = tf.reshape(tf.argmax(gumbel_softmax_samples, axis=-1), [-1]) + x_means_hot = tf.one_hot(x_means_idx, bottleneck_size) + x_means_assignments = gumbel_softmax_samples + tf.stop_gradient( + x_means_hot - gumbel_softmax_samples) + else: + x_means_assignments = gumbel_softmax_samples + x_means_assignments_flat = tf.reshape(x_means_assignments, + [-1, bottleneck_size]) + x_means = tf.matmul(x_means_assignments_flat, means) + commitment_loss = tf.reduce_mean( + tf.squared_difference(x, tf.stop_gradient(x_means))) + + # Update the ema variables. + updated_ema_count = moving_averages.assign_moving_average( + ema_count, + tf.reduce_sum( + tf.reshape(x_means_assignments, shape=[-1, bottleneck_size]), axis=0), + decay, + zero_debias=False) + + dw = tf.matmul(x_means_assignments, x, transpose_a=True) + updated_ema_means = tf.identity( + moving_averages.assign_moving_average( + ema_means, dw, decay, zero_debias=False)) + n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True) + updated_ema_count = ( + (updated_ema_count + epsilon) / (n + bottleneck_size * epsilon) * n) + updated_ema_means /= tf.expand_dims(updated_ema_count, axis=-1) + with tf.control_dependencies([commitment_loss]): + update_means = means.assign(updated_ema_means) + with tf.control_dependencies([update_means]): + loss = beta * commitment_loss + + # Add KL loss. + loss += tf.reduce_mean(kl) + + x_means_assignments = tf.reshape(x_means_assignments, + x_shape[:-1] + [bottleneck_size]) + return x_means_assignments, loss + + +def tanh_discrete_bottleneck(x, bottleneck_bits, bottleneck_noise, + discretize_warmup_steps, mode): + """Simple discretization through tanh, flip bottleneck_noise many bits.""" + x = tf.layers.dense(x, bottleneck_bits, name="tanh_discrete_bottleneck") + d0 = tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x))) - 1.0 + if mode == tf_estimator.ModeKeys.TRAIN: + x += tf.truncated_normal( + common_layers.shape_list(x), mean=0.0, stddev=0.2) + x = tf.tanh(x) + d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x) + if mode == tf_estimator.ModeKeys.TRAIN: + noise = tf.random_uniform(common_layers.shape_list(x)) + noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0 + d *= noise + d = common_layers.mix(d, x, discretize_warmup_steps, + mode == tf_estimator.ModeKeys.TRAIN) + return d, d0 + + +def tanh_discrete_unbottleneck(x, hidden_size): + """Simple un-discretization from tanh.""" + x = tf.layers.dense(x, hidden_size, name="tanh_discrete_unbottleneck") + return x + + +def isemhash_bottleneck(x, + bottleneck_bits, + bottleneck_noise, + discretize_warmup_steps, + mode, + isemhash_noise_dev=0.5, + isemhash_mix_prob=0.5): + """Improved semantic hashing bottleneck.""" + with tf.variable_scope("isemhash_bottleneck"): + x = tf.layers.dense(x, bottleneck_bits, name="dense") + y = common_layers.saturating_sigmoid(x) + if isemhash_noise_dev > 0 and mode == tf_estimator.ModeKeys.TRAIN: + noise = tf.truncated_normal( + common_layers.shape_list(x), mean=0.0, stddev=isemhash_noise_dev) + y = common_layers.saturating_sigmoid(x + noise) + d = tf.to_float(tf.less(0.5, y)) + y - tf.stop_gradient(y) + d = 2.0 * d - 1.0 # Move from [0, 1] to [-1, 1]. + if mode == tf_estimator.ModeKeys.TRAIN: # Flip some bits. + noise = tf.random_uniform(common_layers.shape_list(x)) + noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0 + d *= noise + d = common_layers.mix( + d, + 2.0 * y - 1.0, + discretize_warmup_steps, + mode == tf_estimator.ModeKeys.TRAIN, + max_prob=isemhash_mix_prob) + return d, 0.0 + + +def isemhash_unbottleneck(x, hidden_size, isemhash_filter_size_multiplier=1.0): + """Improved semantic hashing un-bottleneck.""" + filter_size = int(hidden_size * isemhash_filter_size_multiplier) + x = 0.5 * (x - 1.0) # Move from [-1, 1] to [0, 1]. + with tf.variable_scope("isemhash_unbottleneck"): + h1a = tf.layers.dense(x, filter_size, name="hidden1a") + h1b = tf.layers.dense(1.0 - x, filter_size, name="hidden1b") + h2 = tf.layers.dense(tf.nn.relu(h1a + h1b), filter_size, name="hidden2") + return tf.layers.dense(tf.nn.relu(h2), hidden_size, name="final") + + +def parametrized_bottleneck(x, hparams): + """Meta-function calling all the above bottlenecks with hparams.""" + if hparams.bottleneck_kind == "tanh_discrete": + d, _ = tanh_discrete_bottleneck( + x, hparams.bottleneck_bits, hparams.bottleneck_noise * 0.5, + hparams.discretize_warmup_steps, hparams.mode) + return d, 0.0 + if hparams.bottleneck_kind == "isemhash": + return isemhash_bottleneck( + x, hparams.bottleneck_bits, hparams.bottleneck_noise * 0.5, + hparams.discretize_warmup_steps, hparams.mode, + hparams.isemhash_noise_dev, hparams.isemhash_mix_prob) + if hparams.bottleneck_kind == "vq": + return vq_discrete_bottleneck(x, hparams.bottleneck_bits, hparams.vq_beta, + hparams.vq_decay, hparams.vq_epsilon) + if hparams.bottleneck_kind == "em": + return vq_discrete_bottleneck( + x, + hparams.bottleneck_bits, + hparams.vq_beta, + hparams.vq_decay, + hparams.vq_epsilon, + soft_em=True, + num_samples=hparams.vq_num_samples) + if hparams.bottleneck_kind == "gumbel_softmax": + return gumbel_softmax_discrete_bottleneck( + x, + hparams.bottleneck_bits, + hparams.vq_beta, + hparams.vq_decay, + hparams.vq_epsilon, + hparams.temperature_warmup_steps, + hard=False, + summary=True) + + raise ValueError( + "Unsupported hparams.bottleneck_kind %s" % hparams.bottleneck_kind) + + +def parametrized_unbottleneck(x, hidden_size, hparams): + """Meta-function calling all the above un-bottlenecks with hparams.""" + if hparams.bottleneck_kind == "tanh_discrete": + return tanh_discrete_unbottleneck(x, hidden_size) + if hparams.bottleneck_kind == "isemhash": + return isemhash_unbottleneck(x, hidden_size, + hparams.isemhash_filter_size_multiplier) + if hparams.bottleneck_kind in ["vq", "em", "gumbel_softmax"]: + return vq_discrete_unbottleneck(x, hidden_size) + raise ValueError( + "Unsupported hparams.bottleneck_kind %s" % hparams.bottleneck_kind) + + +def iaf_hparams(hidden_size=512, filter_size=4096): + """Create hyperpameters for inverse autoregressive flows. + + Args: + hidden_size: Width of attention layers and neural network output layer. + filter_size: Hidden layer width for neural network. + + Returns: + hparams: Hyperpameters with basic presets for inverse autoregressive flows. + """ + hparams = common_hparams.basic_params1() + + # Attention hyperparameters. + hparams.hidden_size = hidden_size + hparams.add_hparam("attention_key_channels", None) + hparams.add_hparam("attention_value_channels", None) + hparams.add_hparam("num_heads", 4) + hparams.add_hparam("attention_dropout", 0.1) + hparams.add_hparam("shared_rel", False) + hparams.add_hparam("block_width", 1) + hparams.add_hparam("block_length", 1) + hparams.add_hparam("q_filter_width", 1) + hparams.add_hparam("kv_filter_width", 1) + + # Preprocessing and postprocesing hyperparameters. + hparams.layer_preprocess_sequence = "n" + hparams.layer_prepostprocess_dropout = 0.1 + hparams.norm_type = "layer" + hparams.norm_epsilon = 1e-06 + hparams.layer_prepostprocess_dropout_broadcast_dims = "" + hparams.layer_postprocess_sequence = "da" + + # Feedforward neural network hyperparameters. + hparams.add_hparam("filter_size", filter_size) + hparams.add_hparam("ffn_layer", "conv_hidden_relu") + hparams.add_hparam("relu_dropout", 0.1) + return hparams diff --git a/tensor2tensor/layers/discretization_test.py b/tensor2tensor/layers/discretization_test.py new file mode 100644 index 000000000..d0957a03f --- /dev/null +++ b/tensor2tensor/layers/discretization_test.py @@ -0,0 +1,245 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for discretization.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from tensor2tensor.layers import discretization +from tensor2tensor.utils import test_utils + +import tensorflow.compat.v1 as tf +tf.enable_eager_execution() + + +class DiscretizationTest(tf.test.TestCase): + """Tests for discretization layers.""" + + def setUp(self): + tf.set_random_seed(1234) + np.random.seed(123) + + @test_utils.run_in_graph_and_eager_modes() + def testBitToIntZeros(self): + x_bit = tf.zeros(shape=[1, 10], dtype=tf.float32) + x_int = tf.zeros(shape=[1], dtype=tf.int32) + diff = discretization.bit_to_int(x_bit, num_bits=10) - x_int + d = self.evaluate(diff) + self.assertEqual(d, 0) + + @test_utils.run_in_graph_and_eager_modes() + def testBitToIntOnes(self): + x_bit = tf.ones(shape=[1, 3], dtype=tf.float32) + x_int = 7 * tf.ones(shape=[1], dtype=tf.int32) + diff = discretization.bit_to_int(x_bit, num_bits=3) - x_int + d = self.evaluate(diff) + self.assertEqual(d, 0) + + @test_utils.run_in_graph_and_eager_modes() + def testIntToBitZeros(self): + x_bit = tf.zeros(shape=[1, 10], dtype=tf.float32) + x_int = tf.zeros(shape=[1], dtype=tf.int32) + diff = discretization.int_to_bit(x_int, num_bits=10) - x_bit + d = self.evaluate(diff) + self.assertTrue(np.all(d == 0)) + + @test_utils.run_in_graph_and_eager_modes() + def testIntToBitOnes(self): + x_bit = tf.ones(shape=[1, 3], dtype=tf.float32) + x_int = 7 * tf.ones(shape=[1], dtype=tf.int32) + diff = discretization.int_to_bit(x_int, num_bits=3) - x_bit + d = self.evaluate(diff) + self.assertTrue(np.all(d == 0)) + + @test_utils.run_in_graph_and_eager_modes() + def testProjectHidden(self): + hidden_size = 60 + block_dim = 20 + num_blocks = 3 + x = tf.zeros(shape=[1, 1, hidden_size], dtype=tf.float32) + projection_tensors = tf.random_normal( + shape=[num_blocks, hidden_size, block_dim], dtype=tf.float32) + x_projected = discretization.project_hidden(x, projection_tensors, + hidden_size, num_blocks) + x_projected_eval = self.evaluate(x_projected) + self.assertEqual(np.shape(x_projected_eval), (1, 1, num_blocks, block_dim)) + self.assertTrue(np.all(x_projected_eval == 0)) + + @test_utils.run_in_graph_and_eager_modes() + def testSliceHiddenZeros(self): + hidden_size = 60 + block_dim = 20 + num_blocks = 3 + x = tf.zeros(shape=[1, 1, hidden_size], dtype=tf.float32) + x_sliced = discretization.slice_hidden(x, hidden_size, num_blocks) + x_sliced_eval = self.evaluate(x_sliced) + self.assertEqual(np.shape(x_sliced_eval), (1, 1, num_blocks, block_dim)) + self.assertTrue(np.all(x_sliced_eval == 0)) + + @test_utils.run_in_graph_and_eager_modes() + def testSliceHiddenOnes(self): + hidden_size = 60 + block_dim = 20 + num_blocks = 3 + x = tf.ones(shape=[1, 1, hidden_size], dtype=tf.float32) + x_sliced = discretization.slice_hidden(x, hidden_size, num_blocks) + x_sliced_eval = self.evaluate(x_sliced) + self.assertEqual(np.shape(x_sliced_eval), (1, 1, num_blocks, block_dim)) + self.assertTrue(np.all(x_sliced_eval == 1)) + + @test_utils.run_in_graph_and_eager_modes() + def testNearestNeighbors(self): + x = tf.constant([[0, 0.9, 0], [0.8, 0., 0.]], dtype=tf.float32) + x = tf.reshape(x, [1, 1, 2, 3]) + means = tf.constant( + [[1, 0, 0], [0, 1, 0], [0, 0, 1], [9, 9, 9]], dtype=tf.float32) + means = tf.stack([means, means], axis=0) + x_means_hot, _ = discretization.nearest_neighbor( + x, means, block_v_size=4) + x_means_hot_test = np.array([[0, 1, 0, 0], [1, 0, 0, 0]]) + x_means_hot_test = np.expand_dims(x_means_hot_test, axis=0) + x_means_hot_eval = self.evaluate(x_means_hot) + self.assertEqual(np.shape(x_means_hot_eval), (1, 2, 4)) + self.assertTrue(np.all(x_means_hot_eval == x_means_hot_test)) + + @test_utils.run_in_graph_mode_only() + def testGetVQBottleneck(self): + bottleneck_bits = 2 + bottleneck_size = 2**bottleneck_bits + hidden_size = 3 + means, _, ema_count = discretization.get_vq_codebook( + bottleneck_size, hidden_size) + assign_op = means.assign(tf.zeros(shape=[bottleneck_size, hidden_size])) + means_new, _, _ = discretization.get_vq_codebook(bottleneck_size, + hidden_size) + with self.test_session() as sess: + tf.global_variables_initializer().run() + sess.run(assign_op) + self.assertTrue(np.all(sess.run(means_new) == 0)) + self.assertTrue(np.all(sess.run(ema_count) == 0)) + + @test_utils.run_in_graph_and_eager_modes() + def testVQNearestNeighbors(self): + x = tf.constant([[0, 0.9, 0], [0.8, 0., 0.]], dtype=tf.float32) + means = tf.constant( + [[1, 0, 0], [0, 1, 0], [0, 0, 1], [9, 9, 9]], dtype=tf.float32) + x_means_hot, _, _ = discretization.vq_nearest_neighbor(x, means) + x_means_hot_test = np.array([[0, 1, 0, 0], [1, 0, 0, 0]]) + x_means_hot_eval = self.evaluate(x_means_hot) + self.assertEqual(np.shape(x_means_hot_eval), (2, 4)) + self.assertTrue(np.all(x_means_hot_eval == x_means_hot_test)) + + def testVQDiscreteBottleneck(self): + x = tf.constant([[0, 0.9, 0], [0.8, 0., 0.]], dtype=tf.float32) + x_means_hot, _ = discretization.vq_discrete_bottleneck(x, bottleneck_bits=2) + self.evaluate(tf.global_variables_initializer()) + x_means_hot_eval = self.evaluate(x_means_hot) + self.assertEqual(np.shape(x_means_hot_eval), (2, 4)) + + def testVQDiscreteUnbottlenck(self): + x = tf.constant([[1, 0, 0, 0], [0, 0, 1, 0]], dtype=tf.int32) + x_means = discretization.vq_discrete_unbottleneck(x, hidden_size=3) + self.evaluate(tf.global_variables_initializer()) + x_means_eval = self.evaluate(x_means) + self.assertEqual(np.shape(x_means_eval), (2, 3)) + + def testGumbelSoftmaxDiscreteBottleneck(self): + x = tf.constant([[0, 0.9, 0], [0.8, 0., 0.]], dtype=tf.float32) + tf.add_to_collection(tf.GraphKeys.GLOBAL_STEP, tf.constant(1)) + x_means_hot, _ = discretization.gumbel_softmax_discrete_bottleneck( + x, bottleneck_bits=2) + self.evaluate(tf.global_variables_initializer()) + x_means_hot_eval = self.evaluate(x_means_hot) + self.assertEqual(np.shape(x_means_hot_eval), (2, 4)) + + @test_utils.run_in_graph_mode_only() + def testDiscreteBottleneckVQ(self): + hidden_size = 60 + z_size = 4 + x = tf.zeros(shape=[100, 1, hidden_size], dtype=tf.float32) + with tf.variable_scope("test", reuse=tf.AUTO_REUSE): + means = tf.get_variable("means", + shape=[1, 1, 2**z_size, hidden_size], + initializer=tf.constant_initializer(0.), + dtype=tf.float32) + ema_count = [] + ema_count_i = tf.get_variable( + "ema_count", + [1, 2**z_size], + initializer=tf.constant_initializer(0), + trainable=False) + ema_count.append(ema_count_i) + ema_means = [] + with tf.colocate_with(means): + ema_means_i = tf.get_variable("ema_means", + initializer=means.initialized_value()[0], + trainable=False) + ema_means.append(ema_means_i) + x_means_dense, x_means_hot, _, _, _ = discretization.discrete_bottleneck( + x, hidden_size, z_size, 32, means=means, num_blocks=1, + ema_means=ema_means, ema_count=ema_count, name="test") + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + x_means_dense_eval, x_means_hot_eval = sess.run( + [x_means_dense, x_means_hot]) + means_eval = sess.run(means) + self.assertEqual(x_means_dense_eval.shape, (100, 1, hidden_size)) + self.assertEqual(x_means_hot_eval.shape, (100, 1)) + self.assertTrue(np.all(means_eval == np.zeros( + (1, 1, 2**z_size, hidden_size)))) + + @test_utils.run_in_graph_mode_only() + def testDiscreteBottleneckVQCond(self): + hidden_size = 60 + z_size = 4 + x = tf.zeros(shape=[100, 1, hidden_size], dtype=tf.float32) + with tf.variable_scope("test2", reuse=tf.AUTO_REUSE): + means = tf.get_variable("means", + shape=[1, 1, 2**z_size, hidden_size], + initializer=tf.constant_initializer(0.), + dtype=tf.float32) + ema_count = [] + ema_count_i = tf.get_variable( + "ema_count", + [1, 2**z_size], + initializer=tf.constant_initializer(0), + trainable=False) + ema_count.append(ema_count_i) + ema_means = [] + with tf.colocate_with(means): + ema_means_i = tf.get_variable("ema_means", + initializer=means.initialized_value()[0], + trainable=False) + ema_means.append(ema_means_i) + cond = tf.cast(0.0, tf.bool) + x_means_dense, x_means_hot, _, _, _ = discretization.discrete_bottleneck( + x, hidden_size, z_size, 32, means=means, num_blocks=1, cond=cond, + ema_means=ema_means, ema_count=ema_count, name="test2") + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + x_means_dense_eval, x_means_hot_eval = sess.run( + [x_means_dense, x_means_hot]) + means_eval = sess.run(means) + self.assertEqual(x_means_dense_eval.shape, (100, 1, hidden_size)) + self.assertEqual(x_means_hot_eval.shape, (100, 1)) + self.assertAllClose(means_eval, np.zeros((1, 1, 2**z_size, + hidden_size))) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/layers/latent_layers.py b/tensor2tensor/layers/latent_layers.py new file mode 100644 index 000000000..bf1629d2f --- /dev/null +++ b/tensor2tensor/layers/latent_layers.py @@ -0,0 +1,759 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utils for latent variable models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range # pylint: disable=redefined-builtin +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_image_attention as cia +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import transformer_layers +from tensor2tensor.utils import beam_search + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator +import tensorflow_probability as tfp + +DO_SUMMARIES = True + + +def compress_self_attention_layer(x, hparams, name=None): + """Attend function.""" + with tf.variable_scope(name, default_name="compress_self_attention"): + x, xshape, _ = cia.maybe_reshape_4d_to_3d(x) + y = common_attention.multihead_attention( + common_layers.layer_preprocess(x, hparams), + None, + None, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, hparams.num_heads, + hparams.attention_dropout) + res = common_layers.layer_postprocess(x, y, hparams) + return tf.reshape(res, xshape) + + +def compute_nats_and_bits_per_dim(data_dim, + latent_dim, + average_reconstruction, + average_prior): + """Computes negative ELBO, which is an upper bound on the negative likelihood. + + Args: + data_dim: int-like indicating data dimensionality. + latent_dim: int-like indicating latent dimensionality. + average_reconstruction: Scalar Tensor indicating the reconstruction cost + averaged over all data dimensions and any data batches. + average_prior: Scalar Tensor indicating the negative log-prior probability + averaged over all latent dimensions and any data batches. + + Returns: + Tuple of scalar Tensors, representing the nats and bits per data dimension + (e.g., subpixels) respectively. + """ + with tf.name_scope(None, default_name="compute_nats_per_dim"): + data_dim = tf.cast(data_dim, average_reconstruction.dtype) + latent_dim = tf.cast(latent_dim, average_prior.dtype) + negative_log_likelihood = data_dim * average_reconstruction + negative_log_prior = latent_dim * average_prior + negative_elbo = negative_log_likelihood + negative_log_prior + nats_per_dim = tf.divide(negative_elbo, data_dim, name="nats_per_dim") + bits_per_dim = tf.divide(nats_per_dim, tf.log(2.), name="bits_per_dim") + return nats_per_dim, bits_per_dim + + +def multinomial_sample(x, vocab_size=None, sampling_method="random", + temperature=1.0): + """Multinomial sampling from a n-dimensional tensor. + + Args: + x: Tensor of shape [..., vocab_size]. Parameterizes logits of multinomial. + vocab_size: Number of classes in multinomial distribution. + sampling_method: String, "random" or otherwise deterministic. + temperature: Positive float. + + Returns: + Tensor of shape [...]. + """ + vocab_size = vocab_size or common_layers.shape_list(x)[-1] + if sampling_method == "random" and temperature > 0.0: + samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]) / temperature, 1) + else: + samples = tf.argmax(x, axis=-1) + reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1]) + return reshaped_samples + + +def ae_latent_softmax(latents_pred, latents_discrete_hot, vocab_size, hparams): + """Latent prediction and loss. + + Args: + latents_pred: Tensor of shape [..., depth]. + latents_discrete_hot: Tensor of shape [..., vocab_size]. + vocab_size: an int representing the vocab size. + hparams: HParams. + + Returns: + sample: Tensor of shape [...], a sample from a multinomial distribution. + loss: Tensor of shape [...], the softmax cross-entropy. + """ + with tf.variable_scope("latent_logits"): + latents_logits = tf.layers.dense(latents_pred, vocab_size, + name="logits_dense") + if hparams.logit_normalization: + latents_logits *= tf.rsqrt(1e-8 + + tf.reduce_mean(tf.square(latents_logits))) + loss = tf.nn.softmax_cross_entropy_with_logits_v2( + labels=latents_discrete_hot, logits=latents_logits) + + # TODO(trandustin): tease this out from ae_latent_softmax. + # we use just the loss portion to anchor prior / encoder on text. + sample = multinomial_sample(latents_logits, + vocab_size, + hparams.sampling_method, + hparams.sampling_temp) + return sample, loss + + +def ae_latent_sample_beam(latents_dense_in, inputs, ed, embed, hparams): + """Samples from the latent space in the autoencoder. + + Args: + latents_dense_in: Tensor of shape [batch, length_q, ...]. Only the shape of + its first two dimensions are used. length_q is the latent length, which is + height * width * hparams.num_latents / (2**hparams.num_compress_steps). + inputs: Tensor of shape [batch, length_kv, hparams.hidden_size]. Encodings + to attend to in decoder. + ed: Tensor which broadcasts with shape [batch, hparams.num_heads, length_q, + length_kv]. Encoder-decoder attention bias. + embed: Callable which embeds discrete latent hot-vectors and a hidden size + and returns dense vectors. + hparams: HParams. + + Returns: + Tensor of shape [batch, length]. + """ + + def symbols_to_logits_fn(ids): + """Go from ids to logits.""" + ids = tf.expand_dims(ids, axis=2) # Ids start with added all-zeros. + latents_discrete = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0]]) + + with tf.variable_scope(tf.get_variable_scope(), reuse=False): + latents_dense = embed( + tf.one_hot(latents_discrete, depth=2**hparams.bottleneck_bits), + hparams.hidden_size) + latents_pred = transformer_latent_decoder( + latents_dense, inputs, ed, hparams, name="latent_prediction") + logits = tf.layers.dense( + latents_pred, 2**hparams.bottleneck_bits, name="logits_dense") + current_output_position = common_layers.shape_list(ids)[1] - 1 + logits = logits[:, current_output_position, :] + return logits + + initial_ids = tf.zeros([tf.shape(latents_dense_in)[0]], dtype=tf.int32) + length = tf.shape(latents_dense_in)[1] + ids, _, _ = beam_search.beam_search( + symbols_to_logits_fn, + initial_ids, + 1, + length, + 2**hparams.bottleneck_bits, + alpha=0.0, + eos_id=-1, + stop_early=False) + + res = tf.expand_dims(ids[:, 0, :], axis=2) # Pick first beam. + return res[:, 1:] # Remove the added all-zeros from ids. + + +def residual_block_layer(inputs, hparams): + """Residual block over inputs. + + Runs a residual block consisting of + conv: kernel_size x kernel_size + conv: 1x1 + dropout, add and normalize according to hparams.layer_postprocess_sequence. + + Args: + inputs: Tensor of shape [batch, height, width, hparams.hidden_size]. + hparams: HParams. + + Returns: + Tensor of shape [batch, height, width, hparams.hidden_size]. + """ + kernel = (hparams.res_kernel_size, hparams.res_kernel_size) + x = inputs + for i in range(hparams.num_res_layers): + with tf.variable_scope("res_conv_%d" % i): + # kernel_size x kernel_size conv block + y = common_layers.conv_block( + common_layers.layer_norm(x, hparams.hidden_size, name="lnorm"), + hparams.hidden_size, [((1, 1), kernel)], + strides=(1, 1), + padding="SAME", + name="residual_conv") + # 1x1 conv block + y = common_layers.conv_block( + y, + hparams.hidden_size, [((1, 1), (1, 1))], + strides=(1, 1), + padding="SAME", + name="residual_dense") + x = common_layers.layer_postprocess(x, y, hparams) + return x + + +def compress_encoder(inputs, + hparams, + strides=(2, 2), + kernel_size=(3, 3), + name=None): + """Encoder that compresses 2-D inputs by 2**num_compress_steps. + + Args: + inputs: Tensor of shape [batch, height, width, channels]. + hparams: HParams. + strides: Tuple, strides for conv block. + kernel_size: Tuple, kernel window size for conv block. + name: string, variable scope. + + Returns: + Tensor of shape [batch, latent_length, hparams.hidden_size], where + latent_length is + hparams.num_latents * (height*width) / 2**(hparams.num_compress_steps). + """ + with tf.variable_scope(name, default_name="compress"): + x = inputs + for i in range(hparams.num_compress_steps // 2): + with tf.variable_scope("compress_conv_%d" % i): + y = common_layers.conv_block( + common_layers.layer_norm( + x, hparams.hidden_size, name="lnorm"), + hparams.hidden_size, + dilation_rates_and_kernel_sizes=[((1, 1), kernel_size)], + strides=strides, + padding="SAME", + name="compress_conv_%d" % i) + y = tf.nn.dropout(y, 1.0 - hparams.dropout) + if hparams.do_compress_attend: + y = compress_self_attention_layer( + x, hparams, name="compress_selfatt_%d" % i) + y += x + x = y + + x = residual_block_layer(x, hparams) + + # If using multiple copies of latents, blow up the hidden size and then + # reshape to increase by num_latents. + shape_x = common_layers.shape_list(x) + x = tf.layers.dense(x, + hparams.num_latents * hparams.hidden_size, + name=name + "_dense") + return tf.reshape(x, [shape_x[0], + shape_x[1] * shape_x[2] * hparams.num_latents, + hparams.hidden_size]) + + +def compress_encoder_2d(x, hparams, name=None): + """Encoder that compresses 2-D inputs by 2**num_compress_steps. + + Args: + x: Tensor of shape [batch, height, width, channels]. + hparams: HParams. + name: string, variable scope. + + Returns: + Tensor of shape [batch, latent_length, hparams.hidden_size], where + latent_length is + hparams.num_latents * (height*width) / 2**(hparams.num_compress_steps). + """ + return compress_encoder( + x, + hparams, + strides=(2, 2), + kernel_size=(hparams.kernel_size, hparams.kernel_size), + name=name) + + +def compress_encoder_1d(x, hparams, name=None): + """Encoder that compresses 1-D inputs by 2**num_compress_steps. + + Args: + x: Tensor of shape [batch, length, channels]. + hparams: HParams. + name: string, variable scope. + + Returns: + Tensor of shape [batch, latent_length, hparams.hidden_size], where + latent_length is + hparams.num_latents * length / 2**hparams.num_compress_steps. + """ + x = tf.expand_dims(x, axis=2) + return compress_encoder(x, + hparams, + strides=(2, 1), + kernel_size=(hparams.kernel_size, 1), + name=name) + + +def decompress_decoder(inputs, + hparams, + strides=(2, 2), + kernel=(3, 3), + name=None): + """Decoder that decompresses 2-D inputs by 2**num_compress_steps. + + Args: + inputs: Tensor of shape [batch, compress_height, compress_width, channels]. + hparams: HParams. + strides: Tuple, strides for conv block. + kernel: Tuple, kernel window size for conv block. + name: string, variable scope. + + Returns: + Tensor of shape [batch, height, width, hparams.hidden_size]. + """ + with tf.variable_scope(name, default_name="decompress"): + x = inputs + x = tf.layers.dense(x, hparams.hidden_size, name=name + "_dense") + x = residual_block_layer(x, hparams) + for i in range(hparams.num_compress_steps // 2): + j = hparams.num_compress_steps // 2 - i - 1 + with tf.variable_scope(name + "_%d" % j): + if hparams.do_decompress_attend: + y = compress_self_attention_layer( + x, hparams, name="decompress_selfatt") + x += y + y = tf.layers.conv2d_transpose( + x, + hparams.hidden_size, + kernel, + strides=strides, + padding="SAME", + activation=tf.nn.relu if i > 0 else None, + name="decompress_conv") + x = y + return x + + +def decompress_decoder_2d(x, hparams, name=None): + """Decoder that decompresses 2-D inputs by 2**num_compress_steps. + + Args: + x: Tensor of shape [batch, compress_height, compress_width, channels]. + hparams: HParams. + name: string, variable scope. + + Returns: + Tensor of shape [batch, height, width, hparams.hidden_size]. + """ + return decompress_decoder(x, hparams, + strides=(2, 2), + kernel=(hparams.kernel_size, hparams.kernel_size), + name=name) + + +def decompress_decoder_1d(x, hparams, name=None): + """Decoder that decompresses 1-D inputs by 2**num_compress_steps. + + Args: + x: Tensor of shape [batch, compress_length, channels]. + hparams: HParams. + name: string, variable scope. + + Returns: + Tensor of shape [batch, length, hparams.hidden_size]. + """ + x = tf.expand_dims(x, axis=2) + output = decompress_decoder(x, hparams, + strides=(2, 1), + kernel=(hparams.kernel_size, 1), + name=name) + return tf.squeeze(output, axis=2) + + +def transformer_text_encoder(inputs, + target_space, + hparams, + name=None): + """Transformer text encoder over inputs with unmasked full attention. + + Args: + inputs: Tensor of shape [batch, length, 1, hparams.hidden_size]. + target_space: int. Used for encoding inputs under a target space id. + hparams: HParams. + name: string, variable scope. + + Returns: + encoder_output: Tensor of shape [batch, length, hparams.hidden_size]. + ed: Tensor of shape [batch, 1, 1, length]. Encoder-decoder attention bias + for any padded tokens. + """ + with tf.variable_scope(name, default_name="transformer_text_encoder"): + inputs = common_layers.flatten4d3d(inputs) + [ + encoder_input, + encoder_self_attention_bias, + ed, + ] = transformer_layers.transformer_prepare_encoder( + inputs, target_space=target_space, hparams=hparams) + encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.dropout) + encoder_output = transformer_layers.transformer_encoder( + encoder_input, encoder_self_attention_bias, hparams) + return encoder_output, ed + + +def transformer_image_decoder(targets, + encoder_output, + ed_attention_bias, + hparams, + name=None): + """Transformer image decoder over targets with local attention. + + Args: + targets: Tensor of shape [batch, ...], and whose size is batch * height * + width * hparams.num_channels * hparams.hidden_size. + encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size]. + ed_attention_bias: Tensor which broadcasts with shape [batch, + hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. + hparams: HParams. + name: string, variable scope. + + Returns: + Tensor of shape [batch, height, width * hparams.num_channels, + hparams.hidden_size]. + """ + with tf.variable_scope(name, default_name="transformer_dec"): + batch_size = common_layers.shape_list(targets)[0] + targets = tf.reshape(targets, [batch_size, + hparams.img_len, + hparams.img_len, + hparams.num_channels * hparams.hidden_size]) + decoder_input, _, _ = cia.prepare_decoder(targets, hparams) + decoder_output = cia.transformer_decoder_layers( + decoder_input, + encoder_output, + hparams.num_decoder_layers or hparams.num_hidden_layers, + hparams, + attention_type=hparams.dec_attention_type, + encoder_decoder_attention_bias=ed_attention_bias, + name="decoder") + decoder_output = tf.reshape(decoder_output, + [batch_size, + hparams.img_len, + hparams.img_len * hparams.num_channels, + hparams.hidden_size]) + return decoder_output + + +def transformer_latent_decoder(x, + encoder_output, + ed_attention_bias, + hparams, + name=None): + """Transformer decoder over latents using latent_attention_type. + + Args: + x: Tensor of shape [batch, length_q, hparams.hidden_size]. length_q is the + latent length, which is + height * width * hparams.num_latents / (2**hparams.num_compress_steps). + encoder_output: Tensor of shape [batch, length_kv, hparams.hidden_size]. + ed_attention_bias: Tensor which broadcasts with shape [batch, + hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. + hparams: HParams. + name: string, variable scope. + + Returns: + Tensor of shape [batch, length_q, hparams.hidden_size]. + """ + with tf.variable_scope(name, default_name="transformer_latent_dec"): + batch_size = common_layers.shape_list(x)[0] + compressed_img_len = (hparams.img_len // + 2**(hparams.num_compress_steps // 2)) + x = tf.reshape(x, [batch_size, + compressed_img_len, + compressed_img_len * hparams.num_latents, + hparams.hidden_size]) + decoder_input, _, _ = cia.prepare_decoder(x, hparams) + decoder_output = cia.transformer_decoder_layers( + decoder_input, + encoder_output, + hparams.num_latent_layers or hparams.num_hidden_layers, + hparams, + attention_type=hparams.latent_attention_type, + encoder_decoder_attention_bias=ed_attention_bias, + name="decoder") + decoder_output = tf.reshape(decoder_output, + [batch_size, + compressed_img_len**2 * hparams.num_latents, + hparams.hidden_size]) + return decoder_output + + +def bottleneck_layer(inputs, + hparams, + name="discrete_bottleneck"): + """Computes latents given inputs (typically, compressed targets).""" + [ + latents_dense, + latents_discrete, + extra_loss, + embed_fn, + _, + ] = hparams.bottleneck(inputs=inputs, + filter_size=hparams.compress_filter_size, + name=name, + mode=hparams.mode) + if DO_SUMMARIES: + tf.summary.histogram("discrete_latents", + tf.reshape(latents_discrete, [-1])) + return latents_dense, latents_discrete, extra_loss, embed_fn + + +def latent_prediction_model(inputs, + ed_attention_bias, + latents_discrete, + latents_dense, + hparams, + vocab_size=None, + name=None): + """Transformer-based latent prediction model. + + It is an autoregressive decoder over latents_discrete given inputs. + + Args: + inputs: Tensor of shape [batch, length_kv, hparams.hidden_size]. Inputs to + attend to for the decoder on latents. + ed_attention_bias: Tensor which broadcasts with shape [batch, + hparams.num_heads, length_q, length_kv]. Encoder-decoder attention bias. + latents_discrete: Tensor of shape [batch, length_q, vocab_size]. + One-hot latents to compute log-probability of given inputs. + latents_dense: Tensor of shape [batch, length_q, hparams.hidden_size]. + length_q is the latent length, which is + height * width * hparams.num_latents / (2**hparams.num_compress_steps). + hparams: HParams. + vocab_size: int or None. If None, it is 2**hparams.bottleneck_bits. + name: string, variable scope. + + Returns: + latents_pred: Tensor of shape [batch, length_q, hparams.hidden_size]. + latents_pred_loss: Tensor of shape [batch, length_q]. + """ + with tf.variable_scope(name, default_name="latent_prediction"): + if hparams.mode != tf_estimator.ModeKeys.PREDICT: + latents_pred = transformer_latent_decoder(tf.stop_gradient(latents_dense), + inputs, + ed_attention_bias, + hparams, + name) + if vocab_size is None: + vocab_size = 2**hparams.bottleneck_bits + if not hparams.soft_em: + # TODO(trandustin): latents_discrete is not one-hot from + # discrete_bottleneck unless hparams.soft_em is True. Refactor. + latents_discrete = tf.one_hot(latents_discrete, depth=vocab_size) + _, latent_pred_loss = ae_latent_softmax( + latents_pred, tf.stop_gradient(latents_discrete), vocab_size, hparams) + return latents_pred, latent_pred_loss + + +def transformer_autoencoder(inputs, + targets, + target_space, + hparams, + cache=None, + predict_mask=1.0): + """Auto-encoder using a Transformer decoder and a prior over latent sequences. + + Args: + inputs: Tensor of shape [batch, length, 1, hparams.hidden_size] or None. + targets: Tensor of shape [batch, ..., channels]. Ellipses may be 1 or 2 + dimensions denoting sequence length. + target_space: int. Used for encoding inputs under a target space id. + hparams: HParams. + cache: Tensor of shape [batch, length] or None. + predict_mask: Tensor masking whether to use gold targets or predictions. + + Returns: + decoder_output: Tensor of shape [batch, ..., hparams.hidden_size] presenting + pre-logit activations. After a transformation (`top` in `T2TModel`), it is + used with targets to compute the "training" (reconstruction) loss. + losses: dict of str to Tensors. There are three loss terms: "extra", + "extra_loss", and "latent_pred". The first is hard-coded to 0. The latter + two are Tensors of shape [batch]. + cache: Tensor of shape [batch, length], either the same as cache, or newly + computed if the cache input is None. + """ + original_targets_shape = common_layers.shape_list(targets) + batch_size = original_targets_shape[0] + if len(original_targets_shape) == 4: + compress_fn = compress_encoder_2d + decompress_fn = decompress_decoder_2d + else: + compress_fn = compress_encoder_1d + decompress_fn = decompress_decoder_1d + + ed_attention_bias = None + if inputs is not None: + inputs, ed_attention_bias = transformer_text_encoder( + inputs, target_space, hparams, name="input_encoder") + + losses = {"extra": 0., + "extra_loss": 0., + "latent_pred": 0.} + if hparams.mode != tf_estimator.ModeKeys.PREDICT: + targets_compressed = compress_fn(targets, hparams, name="compress") + + if hparams.mode == tf_estimator.ModeKeys.TRAIN: + scale = common_layers.inverse_exp_decay(hparams.startup_steps) + else: + scale = 1.0 + scale = tf.to_float(tf.less(tf.random_uniform([batch_size]), scale)) + + latents_dense, latents_discrete, extra_loss, _ = bottleneck_layer( + targets_compressed, hparams) + extra_loss = scale * tf.reduce_mean(extra_loss) + + _, latents_pred_loss = latent_prediction_model( + inputs, ed_attention_bias, latents_discrete, latents_dense, hparams, + name="latent_pred") + latent_time = tf.less(hparams.mask_startup_steps, + tf.to_int32(tf.train.get_global_step())) + latents_pred_loss = scale * tf.reduce_mean(latents_pred_loss) + latents_pred_loss *= tf.to_float(latent_time) + + # Apply dropout noise for each data point and time step. + latents_dense_shape = common_layers.shape_list(latents_dense) + latents_dense = tf.nn.dropout( + latents_dense, + keep_prob=1 - hparams.latent_dropout, + noise_shape=[latents_dense_shape[0], latents_dense_shape[1], 1]) + + # TODO(trandustin): Can we combine extra and extra_loss? + losses = {"extra": 0., + "extra_loss": extra_loss, + "latent_pred": latents_pred_loss} + else: + # Set the latent length, which is num_latents times the number of latent + # pixels. The number of latent pixels is determined by a compression factor + # on the number of image pixels. + latent_len = ((hparams.img_len * hparams.img_len * hparams.num_latents) / + (2**hparams.num_compress_steps)) + _, _, _, embed_fn = bottleneck_layer(targets_compressed, hparams) + latents_dense = tf.zeros([batch_size, latent_len, 1, hparams.hidden_size]) + if cache is None: + cache = ae_latent_sample_beam(latents_dense, + inputs, + ed_attention_bias, + embed_fn, + hparams) + cache_one_hot = tf.one_hot(cache, depth=2**hparams.bottleneck_bits) + latents_dense = embed_fn(cache_one_hot, hparams.hidden_size) + + if len(original_targets_shape) == 4: + compressed_img_len = (hparams.img_len // + 2**(hparams.num_compress_steps // 2)) + latents_dense = tf.reshape(latents_dense, + [batch_size, + compressed_img_len, + compressed_img_len, + hparams.num_latents * hparams.hidden_size]) + + latents_dense = decompress_fn(latents_dense, hparams, name="decompress") + latents_dense = tf.reshape( + latents_dense, + [-1, hparams.img_len, hparams.img_len, hparams.hidden_size]) + + if hparams.use_gold_targets: + if hparams.mode == tf_estimator.ModeKeys.PREDICT: + masking = predict_mask + else: + masking = common_layers.inverse_exp_decay(hparams.mask_startup_steps) + targets, _, _ = cia.maybe_reshape_4d_to_3d(targets) + mask = tf.less(masking, + tf.random_uniform(common_layers.shape_list(targets)[:-1])) + mask = tf.expand_dims(tf.to_float(mask), 2) + latents_dense = mask * targets + (1.0 - mask) * latents_dense + + latents_dense = tf.reshape(latents_dense, original_targets_shape) + if hparams.decode_autoregressive: + decoder_output = transformer_image_decoder( + latents_dense, inputs, ed_attention_bias, hparams, name="decoder") + else: + decoder_output = latents_dense + return decoder_output, losses, cache + + +def iaf_flow(one_hot_assignments, + scale_weights, + scale_bias, + num_codes, + summary=True, + name=None): + """Performs a single IAF flow using scale and normalization transformations. + + Args: + one_hot_assignments: Assignments Tensor with shape [num_samples, batch_size, + latent_size, num_codes]. + scale_weights: Tensor corresponding to lower triangular matrix used to + autoregressively generate scale matrix from assignments. To ensure the + lower-triangular matrix has length of latent_size, scale_weights should + be a rank-one tensor with size latent_size * (latent_size + 1) / 2. + scale_bias: Bias tensor to be added to scale tensor, with shape + [latent_size, num_codes]. If scale weights are zero, initialize scale_bias + to be log(exp(1.) / 2. - 1) so initial transformation is identity. + num_codes: Number of codes in codebook. + summary: Whether to save summaries. + name: String used for name scope. + + Returns: + flow_output: Transformed one-hot assignments. + inverse_log_det_jacobian: Inverse log deteriminant of Jacobian corresponding + to transformation. + """ + with tf.name_scope(name, default_name="iaf"): + # Pad the one_hot_assignments by zeroing out the first latent dimension and + # shifting the rest down by one (and removing the last dimension). + padded_assignments = tf.pad( + one_hot_assignments, [[0, 0], [0, 0], [1, 0], [0, 0]])[:, :, :-1, :] + scale_bijector = tfp.distributions.bijectors.Affine( + scale_tril=tfp.math.fill_triangular(scale_weights)) + scale = scale_bijector.forward( + tf.transpose(padded_assignments, [0, 1, 3, 2])) + # Transpose the bijector output since it performs a batch matmul. + scale = tf.transpose(scale, [0, 1, 3, 2]) + scale = tf.nn.softplus(scale) + scale = scale + tf.nn.softplus(scale_bias[tf.newaxis, tf.newaxis, ...]) + # Don't need last dimension since the transformation keeps it constant. + scale = scale[..., :-1] + + z = one_hot_assignments[..., :-1] + unnormalized_probs = tf.concat([z * scale, + one_hot_assignments[..., -1, tf.newaxis]], + axis=-1) + normalizer = tf.reduce_sum(unnormalized_probs, axis=-1) + flow_output = unnormalized_probs / (normalizer[..., tf.newaxis]) + inverse_log_det_jacobian = (-tf.reduce_sum(tf.log(scale), axis=-1) + + num_codes * tf.log(normalizer)) + if summary: + tf.summary.histogram("iaf/scale", tf.reshape(scale, [-1])) + tf.summary.histogram("iaf/inverse_log_det_jacobian", + tf.reshape(inverse_log_det_jacobian, [-1])) + return flow_output, inverse_log_det_jacobian diff --git a/tensor2tensor/layers/latent_layers_test.py b/tensor2tensor/layers/latent_layers_test.py new file mode 100644 index 000000000..7a2c59275 --- /dev/null +++ b/tensor2tensor/layers/latent_layers_test.py @@ -0,0 +1,174 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for layers in latent variable models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +from tensor2tensor.layers import common_image_attention as cia +from tensor2tensor.layers import discretization +from tensor2tensor.layers import latent_layers +from tensor2tensor.models import transformer +from tensor2tensor.utils import test_utils + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator +tf.enable_eager_execution() + + +def imagetransformer_latent_tiny(): + """Tiny set of hparams for a latent image model.""" + hparams = transformer.transformer_small() + hparams.batch_size = 2 + hparams.num_hidden_layers = 3 + hparams.hidden_size = 16 + hparams.filter_size = 32 + hparams.compress_filter_size = 64 + hparams.ffn_layer = "conv_hidden_relu" + hparams.layer_prepostprocess_dropout = 0.2 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.dropout = 0.3 + hparams.pos = "timing" + hparams.num_encoder_layers = 1 + hparams.num_decoder_layers = 2 + hparams.use_pad_remover = False + hparams.add_hparam("logit_normalization", True) + hparams.add_hparam("bottleneck_kind", "dvq") + hparams.add_hparam("bottleneck_bits", 4) + hparams.add_hparam("num_residuals", 1) + hparams.add_hparam("use_gold_targets", False) + hparams.add_hparam("do_compress_attend", False) + hparams.add_hparam("do_decompress_attend", False) + hparams.add_hparam("drop_inputs", False) + hparams.add_hparam("num_compress_steps", 2) + hparams.add_hparam("startup_steps", 10000) + hparams.add_hparam("mask_startup_steps", 50000) + hparams.add_hparam("latent_dropout", 0.0) + hparams.add_hparam("decode_autoregressive", False) + hparams.add_hparam("vq_beta", 0.25) + hparams.add_hparam("vq_epsilon", 1e-5) + hparams.add_hparam("vq_decay", 0.999) + hparams.add_hparam("ema", False) + hparams.add_hparam("soft_em", True) + hparams.add_hparam("num_samples", 1) + hparams.add_hparam("num_latent_layers", 2) + hparams.add_hparam("num_res_layers", 2) + hparams.add_hparam("res_kernel_size", 3) + hparams.add_hparam("num_blocks", 1) + hparams.add_hparam("reshape_method", "slice") + hparams.add_hparam("shared_rel", False) + hparams.add_hparam("block_size", 1) + hparams.add_hparam("kernel_size", 3) + hparams.add_hparam("img_len", 8) + hparams.add_hparam("num_channels", 1) + hparams.add_hparam("local_and_global_att", False) + hparams.add_hparam("block_length", 32) + hparams.add_hparam("block_width", 128) + hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_1D) + hparams.add_hparam("latent_attention_type", cia.AttentionType.GLOBAL) + hparams.add_hparam("block_raster_scan", False) + hparams.add_hparam("num_latents", 1) + hparams.add_hparam("q_filter_width", 1) + hparams.add_hparam("kv_filter_width", 1) + return hparams + + +class LatentLayersTest(tf.test.TestCase): + + @test_utils.run_in_graph_and_eager_modes() + def testComputeBitsAndNats(self): + reconstruction_loss = tf.random_uniform(()) + prior_loss = tf.random_uniform(()) + data_dim = tf.random_uniform((), maxval=1000, dtype=tf.int32) + latent_dim = tf.random_uniform((), maxval=1000, dtype=tf.int32) + nats_per_dim, bits_per_dim = latent_layers.compute_nats_and_bits_per_dim( + data_dim, + latent_dim, + reconstruction_loss, + prior_loss) + + nats_per_dim_py, bits_per_dim_conv_py = self.evaluate( + [nats_per_dim, bits_per_dim * tf.log(2.)]) + self.assertAllClose(nats_per_dim_py, bits_per_dim_conv_py) + + @test_utils.run_in_graph_and_eager_modes() + def testTransformerAutoencoder(self): + hparams = imagetransformer_latent_tiny() + hparams.mode = tf_estimator.ModeKeys.TRAIN + block_dim = int(hparams.hidden_size // hparams.num_blocks) + block_v_size = 2**(hparams.bottleneck_bits / + (hparams.num_residuals * hparams.num_blocks)) + block_v_size = int(block_v_size) + means = tf.get_variable( + name="means", + shape=[hparams.num_residuals, + hparams.num_blocks, + block_v_size, + block_dim], + initializer=tf.uniform_unit_scaling_initializer()) + hparams.bottleneck = functools.partial( + discretization.discrete_bottleneck, + hidden_size=hparams.hidden_size, + z_size=hparams.bottleneck_bits, + filter_size=hparams.filter_size, + startup_steps=hparams.startup_steps, + bottleneck_kind=hparams.bottleneck_kind, + num_blocks=hparams.num_blocks, + num_residuals=hparams.num_residuals, + reshape_method=hparams.reshape_method, + beta=hparams.vq_beta, + decay=hparams.vq_decay, + soft_em=hparams.soft_em, + num_samples=hparams.num_samples, + epsilon=hparams.vq_epsilon, + ema=hparams.ema, + means=means) + + inputs = None + batch_size = hparams.batch_size + targets = tf.random_uniform([batch_size, + hparams.img_len, + hparams.img_len, + hparams.hidden_size], + minval=-1., maxval=1.) + target_space_id = None + + tf.train.create_global_step() + decoder_output, losses, cache = latent_layers.transformer_autoencoder( + inputs, targets, target_space_id, hparams) + + self.assertEqual(set(losses), {"extra", "extra_loss", "latent_pred"}) + + self.evaluate(tf.global_variables_initializer()) + decoder_output_, extra_loss_, latent_pred_ = self.evaluate( + [decoder_output, losses["extra_loss"], losses["latent_pred"]]) + self.assertEqual(decoder_output_.shape, (batch_size, + hparams.img_len, + hparams.img_len, + hparams.hidden_size)) + self.assertEqual(extra_loss_.shape, (batch_size,)) + self.assertEqual(latent_pred_.shape, (batch_size,)) + self.assertAllGreaterEqual(extra_loss_, 0.) + self.assertAllGreaterEqual(latent_pred_, 0.) + self.assertEqual(cache, None) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/layers/message_passing_attention.py b/tensor2tensor/layers/message_passing_attention.py new file mode 100644 index 000000000..e2db7246a --- /dev/null +++ b/tensor2tensor/layers/message_passing_attention.py @@ -0,0 +1,935 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for attention.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import expert_utils + +import tensorflow.compat.v1 as tf + + +def multihead_graph_attention(query_antecedent, + memory_antecedent, + bias, + total_key_depth, + total_value_depth, + output_depth, + num_heads, + dropout_rate, + image_shapes=None, + attention_type="edge_vector", + name="multihead_graph_attention", + save_weights_to=None, + make_image_summary=True, + dropout_broadcast_dims=None, + adjacency_matrix=None, + num_edge_types=5, + vars_3d=False, + **kwargs): + """Multihead scaled-dot-product attention with input/output transformations. + + Args: + query_antecedent: a Tensor with shape [batch, length_q, channels] + memory_antecedent: a Tensor with shape [batch, length_m, channels] or None + bias: bias Tensor (see attention_bias()) + total_key_depth: an integer + total_value_depth: an integer + output_depth: an integer + num_heads: an integer dividing total_key_depth and total_value_depth + dropout_rate: a floating point number + image_shapes: optional tuple of integer scalars. + see comments for attention_image_summary() + attention_type: a string, either "dot_product", "dot_product_relative", + "local_mask_right", "local_unmasked", "masked_dilated_1d", + "unmasked_dilated_1d", graph, or any attention function + with the signature (query, key, value, **kwargs) + name: an optional string. + save_weights_to: an optional dictionary to capture attention weights + for vizualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + adjacency_matrix: an optional tensor of shape [batch, len_q, len_q] + containing edge vectors for attention + num_edge_types: number of edge types, an int + vars_3d: use 3-dimensional variables for input/output transformations + **kwargs (dict): Parameters for the attention function + + Returns: + The result of the attention transformation. The output shape is + [batch_size, length_q, output_depth] + + Raises: + ValueError: if the key depth or value depth are not divisible by the + number of attention heads. + """ + if total_key_depth % num_heads != 0: + raise ValueError("Key depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_key_depth, num_heads)) + if total_value_depth % num_heads != 0: + raise ValueError("Value depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_value_depth, num_heads)) + vars_3d_num_heads = num_heads if vars_3d else None + with tf.variable_scope( + name, + default_name="multihead_attention", + values=[query_antecedent, memory_antecedent]): + + q, k, v = common_attention.compute_qkv( + query_antecedent, + memory_antecedent, + total_key_depth, + total_value_depth, + vars_3d_num_heads=vars_3d_num_heads) + q = common_attention.split_heads(q, num_heads) + k = common_attention.split_heads(k, num_heads) + v = common_attention.split_heads(v, num_heads) + + key_depth_per_head = total_key_depth // num_heads + if not vars_3d: + q *= key_depth_per_head**-0.5 + + additional_returned_value = None + if callable(attention_type): # Generic way to extend multihead_attention + x = attention_type(q, k, v, **kwargs) + if isinstance(x, tuple): + x, additional_returned_value = x # Unpack + + elif attention_type == "edge_vector": + x = graph_attention( + q, + k, + v, + bias, + dropout_rate, + image_shapes, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + dropout_broadcast_dims=dropout_broadcast_dims, + adjacency_matrix=adjacency_matrix, + num_edge_types=num_edge_types) + + x = common_attention.combine_heads(x) + + # Set last dim specifically. + x.set_shape(x.shape.as_list()[:-1] + [total_value_depth]) + + if vars_3d: + o_var = tf.get_variable( + "o", [num_heads, total_value_depth // num_heads, output_depth]) + o_var = tf.reshape(o_var, [total_value_depth, output_depth]) + x = tf.tensordot(x, o_var, axes=1) + else: + x = common_layers.dense( + x, output_depth, use_bias=False, name="output_transform") + if additional_returned_value is not None: + return x, additional_returned_value + return x + + +@expert_utils.add_name_scope() +def make_edge_vectors(adjacency_matrix, + num_edge_types, + depth, + name=None): + """Gets edge vectors for the edge types in the adjacency matrix. + + Args: + adjacency_matrix: A [batch, num_nodes, num_nodes, num_edge_types] tensor. + num_edge_types: Number of different edge types + depth: Number of channels + name: A optional string name for scoping + Returns: + A [batch, num_nodes, num_nodes, depth] vector of tensors + """ + with tf.variable_scope(name, default_name="edge_vectors"): + att_adj_vectors_shape = [num_edge_types, depth] + adjacency_matrix_shape = common_layers.shape_list(adjacency_matrix) + adj_vectors = ( + tf.get_variable( + "adj_vectors", + att_adj_vectors_shape, + initializer=tf.random_normal_initializer(0, depth**-0.5)) * + (depth**0.5)) + + att_adj_vectors = tf.matmul( + tf.reshape(tf.to_float(adjacency_matrix), [-1, num_edge_types]), + adj_vectors) + # Reshape to be [batch, num_nodes, num_nodes, depth]. + att_adj_vectors = tf.reshape(att_adj_vectors, [ + adjacency_matrix_shape[0], adjacency_matrix_shape[1], + adjacency_matrix_shape[2], depth + ]) + return att_adj_vectors + + +def graph_attention(q, + k, + v, + bias, + dropout_rate=0.0, + image_shapes=None, + name=None, + make_image_summary=True, + save_weights_to=None, + dropout_broadcast_dims=None, + adjacency_matrix=None, + num_edge_types=5): + """graph attention. + + Args: + q: a Tensor with shape [batch, heads, length_q, depth_k] + k: a Tensor with shape [batch, heads, length_kv, depth_k] + v: a Tensor with shape [batch, heads, length_kv, depth_v] + bias: bias Tensor (see attention_bias()) + dropout_rate: a floating point number + image_shapes: optional tuple of integer scalars. + see comments for attention_image_summary() + name: an optional string + make_image_summary: True if you want an image summary. + save_weights_to: an optional dictionary to capture attention weights + for vizualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + adjacency_matrix: optional matrix of [batch, length, length] ids indicating + edge type + num_edge_types: an int indicating number of edge types + Returns: + A Tensor of shape [batch, length, depth(q)] + """ + with tf.variable_scope( + name, default_name="dot_product_attention", values=[q, k, v]) as scope: + # [batch, num_heads, query_length, memory_length] + logits = tf.matmul(q, k, transpose_b=True) + if adjacency_matrix is not None: + key_head_depth = common_layers.shape_list(q)[-1] + adjacency_vectors = make_edge_vectors( + adjacency_matrix, + num_edge_types, + key_head_depth, + name=name) + # transposing q to be [batch, length_q, heads, depth_k] + # to allow for matmul with [batch, length_q, length_q, depth_k] + q_t = tf.transpose(q, [0, 2, 1, 3]) + adj_logits = tf.matmul(q_t, adjacency_vectors, transpose_b=True) + logits += tf.transpose(adj_logits, [0, 2, 1, 3]) + # [batch, depth, num_nodes, num_nodes] + if bias is not None: + logits += bias + weights = tf.nn.softmax(logits, name="attention_weights") + if save_weights_to is not None: + save_weights_to[scope.name] = weights + # dropping out the attention links for each of the heads + weights = common_layers.dropout_with_broadcast_dims( + weights, 1.0 - dropout_rate, broadcast_dims=dropout_broadcast_dims) + if common_layers.should_generate_summaries() and make_image_summary: + common_attention.attention_image_summary(weights, image_shapes) + return tf.matmul(weights, v) + + +def _compute_edge_transforms(node_states, + depth, + num_transforms, + name="transform"): + """Helper function that computes transformation for keys and values. + + Let B be the number of batches. + Let N be the number of nodes in the graph. + Let D be the size of the node hidden states. + Let K be the size of the attention keys/queries (total_key_depth). + Let V be the size of the attention values (total_value_depth). + Let T be the total number of transforms (num_transforms). + + Computes the transforms for keys or values for attention. + * For each node N_j and edge type t, a key K_jt of size K is computed. When an + edge of type t goes from node N_j to any other node, K_jt is the key that is + in the attention process. + * For each node N_j and edge type t, a value V_jt of size V is computed. When + an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt) + produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt. + + Args: + node_states: A tensor of shape [B, L, D] + depth: An integer (K or V) + num_transforms: An integer (T), + name: A name for the function + + Returns: + x: A The attention keys or values for each node and edge type + (shape [B, N*T, K or V]) + """ + node_shapes = common_layers.shape_list(node_states) + x = common_layers.dense( + node_states, + depth * num_transforms, + use_bias=False, + name=name) + + batch = node_shapes[0] # B. + length = node_shapes[1] # N. + + # Making the fourth dimension explicit by separating the vectors of size + # K*T (in k) and V*T (in v) into two-dimensional matrices with shape [K, T] + # (in k) and [V, T] in v. + # + x = tf.reshape(x, [batch, length, num_transforms, depth]) + + # Flatten out the fourth dimension. + x = tf.reshape(x, [batch, length * num_transforms, depth]) + + return x + + +def compute_mpnn_qkv(node_states, + total_key_depth, + total_value_depth, + num_transforms): + """Computes query, key and value for edge matrices. + + Let B be the number of batches. + Let N be the number of nodes in the graph. + Let D be the size of the node hidden states. + Let K be the size of the attention keys/queries (total_key_depth). + Let V be the size of the attention values (total_value_depth). + Let T be the total number of transforms (num_transforms). + + Computes the queries, keys, and values for attention. + * For each node N_i in the graph, a query Q_i of size K is computed. This + query is used to determine the relative weights to give to each of the + node's incoming edges. + * For each node N_j and edge type t, a key K_jt of size K is computed. When an + edge of type t goes from node N_j to any other node, K_jt is the key that is + in the attention process. + * For each node N_j and edge type t, a value V_jt of size V is computed. When + an edge of type t goes from node N_j to node N_i, Attention(Q_i, K_jt) + produces a weight w_ijt. The message sent along this edge is w_ijt * V_jt. + + Args: + node_states: A Tensor with shape [B, N, D]. + total_key_depth: an integer (K). + total_value_depth: an integer (V). + num_transforms: a integer specifying number of transforms (T). This is + typically the number of edge types. + Returns: + q: The attention queries for each destination node (shape [B, N, K]). + k: The attention keys for each node and edge type (shape [B, N*T, K]). + v: The attention values for each node and edge type (shape [B, N*T, V]). + """ + + # node_states is initially a tensor with shape [B, N, D]. The call to dense + # creates a D x K kernel that serves as a fully-connected layer. + # + # For each possible batch b and node n in the first two dimensions of + # node_states, the corresponding size-D vector (the third dimension of + # node_states) is the hidden state for node n in batch b. Each of these size-D + # vectors is multiplied by the kernel to produce an attention query of size K. + # The result is a tensor of size [B, N, K] containing the attention queries + # for each node in each batch. + q = common_layers.dense( + node_states, total_key_depth, use_bias=False, name="q_mpnn") + + # Creates the attention keys in a manner similar to the process of creating + # the attention queries. One key is created for each type of outgoing edge the + # corresponding node might have, meaning k will have shape [B, N, K*T]. + k = _compute_edge_transforms(node_states, + total_key_depth, + num_transforms, + name="k_mpnn") + v = _compute_edge_transforms(node_states, + total_value_depth, + num_transforms, + name="v_mpnn") + + return q, k, v + + +def sparse_message_pass_batched(node_states, + adjacency_matrices, + num_edge_types, + hidden_size, + use_bias=True, + average_aggregation=False, + name="sparse_ggnn_batched"): + """Identical to sparse_ggnn except that each input has a batch dimension. + + B = The batch size. + N = The number of nodes in each batch. + H = The size of the hidden states. + T = The number of edge types. + + Args: + node_states: Initial states of each node in the graph. Shape: [B, N, H] + adjacency_matrices: Adjacency matrices of directed edges for each edge + type and batch. Shape: [B, N, N, T] (sparse). + num_edge_types: The number of edge types. T. + hidden_size: The size of the hidden layer. H. + use_bias: Whether to use bias in the hidden layer. + average_aggregation: How to aggregate the incoming node messages. If + average_aggregation is true, the messages are averaged. If it is false, + they are summed. + name: (optional) The scope within which tf variables should be created. + + Returns: + The result of one round of message-passing of shape [B, N, H]. + """ + + b, n = tf.shape(node_states)[0], tf.shape(node_states)[1] + + # Flatten the batch dimension of the node states. + node_states = tf.reshape(node_states, [b*n, hidden_size]) + + # Flatten the batch dimension of the adjacency matrices. + indices = adjacency_matrices.indices + new_index2 = indices[:, 3] # The edge type dimension. + + # Offset N x N adjacency matrix by the batch number in which it appears. + new_index0 = indices[:, 1] + indices[:, 0] * tf.cast(n, tf.int64) + new_index1 = indices[:, 2] + indices[:, 0] * tf.cast(n, tf.int64) + + # Combine these indices as triples. + new_indices = tf.stack([new_index0, new_index1, new_index2], axis=1) + + # Build the new sparse matrix. + new_shape = [tf.cast(b*n, tf.int64), tf.cast(b*n, tf.int64), num_edge_types] + adjacency_matrices = tf.SparseTensor(indices=new_indices, + values=adjacency_matrices.values, + dense_shape=new_shape) + + # Run a message-passing step and return the result with the batch dimension. + node_states = sparse_message_pass( + node_states, + adjacency_matrices, + num_edge_types, + hidden_size, + use_bias=use_bias, + average_aggregation=average_aggregation, + name=name) + return tf.reshape(node_states, [b, n, hidden_size]) + + +def sparse_message_pass(node_states, + adjacency_matrices, + num_edge_types, + hidden_size, + use_bias=True, + average_aggregation=False, + name="sparse_ggnn"): + """One message-passing step for a GNN with a sparse adjacency matrix. + + Implements equation 2 (the message passing step) in + [Li et al. 2015](https://arxiv.org/abs/1511.05493). + + N = The number of nodes in each batch. + H = The size of the hidden states. + T = The number of edge types. + + Args: + node_states: Initial states of each node in the graph. Shape is [N, H]. + adjacency_matrices: Adjacency matrix of directed edges for each edge + type. Shape is [N, N, T] (sparse tensor). + num_edge_types: The number of edge types. T. + hidden_size: The size of the hidden state. H. + use_bias: Whether to use bias in the hidden layer. + average_aggregation: How to aggregate the incoming node messages. If + average_aggregation is true, the messages are averaged. If it is false, + they are summed. + name: (optional) The scope within which tf variables should be created. + + Returns: + The result of one step of Gated Graph Neural Network (GGNN) message passing. + Shape: [N, H] + """ + n = tf.shape(node_states)[0] + t = num_edge_types + incoming_edges_per_type = tf.sparse_reduce_sum(adjacency_matrices, axis=1) + + # Convert the adjacency matrix into shape [T, N, N] - one [N, N] adjacency + # matrix for each edge type. Since sparse tensor multiplication only supports + # two-dimensional tensors, we actually convert the adjacency matrix into a + # [T * N, N] tensor. + adjacency_matrices = tf.sparse_transpose(adjacency_matrices, [2, 0, 1]) + adjacency_matrices = tf.sparse_reshape(adjacency_matrices, [t * n, n]) + + # Multiply the adjacency matrix by the node states, producing a [T * N, H] + # tensor. For each (edge type, node) pair, this tensor stores the sum of + # the hidden states of the node's neighbors over incoming edges of that type. + messages = tf.sparse_tensor_dense_matmul(adjacency_matrices, node_states) + + # Rearrange this tensor to have shape [N, T * H]. The incoming states of each + # nodes neighbors are summed by edge type and then concatenated together into + # a single T * H vector. + messages = tf.reshape(messages, [t, n, hidden_size]) + messages = tf.transpose(messages, [1, 0, 2]) + messages = tf.reshape(messages, [n, t * hidden_size]) + + # Run each of those T * H vectors through a linear layer that produces + # a vector of size H. This process is equivalent to running each H-sized + # vector through a separate linear layer for each edge type and then adding + # the results together. + # + # Note that, earlier on, we added together all of the states of neighbors + # that were connected by edges of the same edge type. Since addition and + # multiplying by a linear layer are commutative, this process was equivalent + # to running each incoming edge through a linear layer separately and then + # adding everything at the end. + with tf.variable_scope(name, default_name="sparse_ggnn"): + final_node_states = common_layers.dense( + messages, hidden_size, use_bias=False) + + # Multiply the bias by for each edge type by the number of incoming nodes + # of that edge type. + if use_bias: + bias = tf.get_variable("bias", initializer=tf.zeros([t, hidden_size])) + final_node_states += tf.matmul(incoming_edges_per_type, bias) + + if average_aggregation: + incoming_edges = tf.reduce_sum(incoming_edges_per_type, -1, keepdims=True) + incoming_edges = tf.tile(incoming_edges, [1, hidden_size]) + final_node_states /= incoming_edges + 1e-7 + + return tf.reshape(final_node_states, [n, hidden_size]) + + +def multihead_mpnn_attention(node_states, + total_key_depth, + total_value_depth, + output_depth, + num_heads, + adjacency_matrix=None, + num_edge_types=5, + num_transforms=None, + use_weighted_sum=False, + name="mpnn_attention"): + """Multihead scaled-dot-product attention with input/output transformations. + + Let B be the number of batches. + Let N be the number of nodes in the graph. + Let D be the size of the node hidden states. + Let K be the size of the attention keys/queries (total_key_depth). + Let V be the size of the attention values (total_value_depth). + Let O be the size of the attention output (output_depth). + Let H be the number of heads (num_heads). + Let T be the total number of transforms (num_transforms). + + The key and value depths are split across all of the heads. For example, if + the key depth is 6 and there are three heads, then the key for each head has + depth 2. + + Args: + node_states: A Tensor with shape [B, N, D] + total_key_depth: An integer (K). + total_value_depth: An integer (V). + output_depth: An integer (O). + num_heads: An integer (H). + adjacency_matrix: An Tensor of ints with shape [B, T, N, N]. If there is an + edge from node j to node i in batch b, then adjacency_matrix[b, i, j] + contains the type of that edge as an integer. Otherwise, it contains 0. + num_edge_types: An integer indicating number of edge types. + num_transforms: An integer indicating number of transforms (T). If None, + then num_transforms will be equal to num_edge_types. + use_weighted_sum: If False, will only use a single transform per edge type. + Otherwise, use a learned weighted sum of transforms per edge type. + name: A string. + + Returns: + The result of the attention transformation. The output shape is [B, N, O]. + + Raises: + ValueError: if the key depth or value depth are not divisible by the + number of attention heads. + """ + if total_key_depth % num_heads != 0: + raise ValueError("Key depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_key_depth, num_heads)) + if total_value_depth % num_heads != 0: + raise ValueError("Value depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_value_depth, num_heads)) + with tf.variable_scope( + name, default_name="multihead_mpnn_attention", values=[node_states]): + # If not explicitly set, use num_transforms set to num_edge_types. + num_transforms = ( + num_edge_types if num_transforms is None else num_transforms) + + # Create the query for each node's incoming edges. + # Create the keys/values for each node for each possible outgoing edge type. + q, k, v = compute_mpnn_qkv( + node_states, + total_key_depth, + total_value_depth, + num_transforms) + + q_shape = tf.shape(q) # As above, q_shape is [B, N, K]. + + # Divides each query/key/value into separate heads. Specifically, the + # query/key/value for each (batch, node) pair (i.e., the third dimensions + # of q, k, and v) are broken into H separate pieces. These pieces are used + # as the separate attention heads. The resulting tensors have shape + # [B, H, N, ?/H], where ? = K, K*T or V*T as appropriate. + q = common_attention.split_heads(q, num_heads) # Shape [B, H, N, K/H]. + k = common_attention.split_heads(k, num_heads) # Shape [B, H, N, K*T/H]. + v = common_attention.split_heads(v, num_heads) # Shape [B, H, N, V*T/H]. + key_depth_per_head = total_key_depth // num_heads + + # Ensures that the logits don't have too large of a magnitude. + q *= key_depth_per_head**-0.5 + + # Rearrange the dimensions so that the head is first. This will make + # subsequent steps easier (we loop over the head). + q = tf.transpose(q, [1, 0, 2, 3]) # Shape [H, B, N, K/H]. + k = tf.transpose(k, [1, 0, 2, 3]) # Shape [H, B, N, K*T/H]. + v = tf.transpose(v, [1, 0, 2, 3]) # Shape [H, B, N, V*T/H]. + + # Split the keys and values into separate per-edge-type keys and values. + k = tf.reshape(k, [ + num_heads, q_shape[0], q_shape[1], num_transforms, + total_key_depth // num_heads + ]) # Shape [H, B, N, T, K/H]. + k = tf.transpose(k, [0, 1, 3, 2, 4]) # Shape [H, B, T, N, K/H]. + + v = tf.reshape(v, [ + num_heads, q_shape[0], q_shape[1], num_transforms, + total_value_depth // num_heads + ]) # Shape [H, B, N, T, V/H]. + v = tf.transpose(v, [0, 1, 3, 2, 4]) # Shape [H, B, T, N, V/H]. + + # Perform attention for each head and combine the results into a list. + # head_outputs stores a list of tensors, each with shape [1, B, N, V/H]. + # The last dimension contains the values computed for each attention head. + # Each value was determined by computing attention over all of the + # incoming edges for node n, weighting the incoming values accordingly, + # and adding those weighted values together. + head_outputs = [] + for head_id in range(num_heads): + output = dot_product_mpnn_attention( + q[head_id], + k[head_id], + v[head_id], + adjacency_matrix, + num_edge_types, + num_transforms=num_transforms, + use_weighted_sum=use_weighted_sum) + + # Store this result in the list of attention results for each head. + # The call to expand_dims gives output shape [1, B, N, V/H], which will + # come in handy when we combine the heads together. + head_outputs.append(tf.expand_dims(output, axis=0)) + + # Combine the heads together into one tensor and rearrange the dimensions. + x = tf.concat(head_outputs, axis=0) # Shape [H, B, N, V/H]. + x = tf.transpose(x, [1, 0, 2, 3]) # Shape [B, H, N, V/H]. + + # Concatenate the values produced by each head together into one vector. + x = common_attention.combine_heads(x) # Shape [B, N, V]. + + # A fully-connected linear layer to convert from the value vectors of size V + # to output vectors of length O (the appropriate output length). + x = common_layers.dense( + x, output_depth, use_bias=False, name="output_transform") + return x + + +def dot_product_mpnn_attention(q, + k, + v, + adjacency_matrix, + num_edge_types, + num_transforms=None, + use_weighted_sum=False, + name=None): + """Dot product attention with edge vectors. + + Let B be the number of batches. + Let N be the number of nodes in the graph. + Let K be the size of the attention keys/queries. + Let V be the size of the attention values. + Let T be the total number of transforms (num_transforms). + + Args: + q: The query Tensor of shape [B, N, K]. + k: The key Tensor of shape [B, T, N, K]. + v: The value Tensor of shape [B, T, N, V]. + adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at + indices b, i, j, k is the indicator of the edge + from node j to node i in batch b. A standard adjacency matrix will only + have one edge type while a mutigraph will have multiple edge types. + num_edge_types: An integer specifying number of edge types. + num_transforms: An integer indicating number of transforms (T). If None, + then num_transforms will be equal to num_edge_types. + use_weighted_sum: If False, will only use a single transform per edge type. + Otherwise, use a learned weighted sum of transforms per edge type. + name: A string. + + Returns: + A Tensor of shape [B, N, V] storing the result of computing attention + weights using the queries and keys and combining the values according to + those weights. + + Raises: + ValueError: if num_transforms doesn't equal num_edge_types and not using + weighted sum. + """ + with tf.variable_scope( + name, + default_name="dot_product_mpnn_attention", + values=[q, k, v, adjacency_matrix, num_edge_types]): + # If not explicitly set, use num_transforms set to num_edge_types. + num_transforms = ( + num_edge_types if num_transforms is None else num_transforms) + + if not use_weighted_sum and num_transforms != num_edge_types: + raise ValueError("num_transforms must equal num_edge_types unless " + "use_weighted_sum is True") + + # Computes the raw dot-product attention values between each query and + # the corresponding keys it needs to consider. + # + # This operation takes the dot product of (the query for + # each node) and (the key for each node for each possible edge type), + # creating an N x N matrix for each edge type. The entry at index (i, j) + # is the dot-product for the edge from node i to node j of the appropriate + # type. These dot products will eventually become attention weights + # specifying how much node i weights an edge of that type coming from node + # j. + all_edge_logits = tf.matmul( + tf.tile(tf.expand_dims(q, axis=1), [1, num_edge_types, 1, 1]), + k, + transpose_b=True) + + # The adjacency matrix assumes there is only one directed edge (i <- j) for + # each pair of nodes. If such an edge exists, it contains the integer + # type of that edge at position (i, j) of the adjacency matrix. + # + # Construct edge_vectors of shape [B, N, N, T]. + if use_weighted_sum: + # Use dense representation for edge vectors. + edge_vectors = make_edge_vectors( + adjacency_matrix, + num_edge_types, + num_transforms) + else: + # Generate one-hot vectors based on edge types. + # If there is an edge from node j to node i of type t, then index t of the + # last dimension is 1 for entry (i, j) of the second and third dimensions. + edge_vectors = tf.one_hot(adjacency_matrix, num_transforms) + + # Rearranging the dimensions to match the shape of all_edge_logits. + edge_vectors = tf.transpose(edge_vectors, [0, 3, 1, 2]) + + # Element-wise multiplies all_edge_logits and edge_vectors. + # + # In other words: all_edge_logits contains N x N matrices of query-key + # products. This element-wise multiplication zeroes out entries that do not + # correspond to actual edges in the graph of the appropriate edge type. + # all_edge_logits retains shape [B, T, N, N]. + all_edge_logits *= edge_vectors + + # Since there can only be one edge from node A to node B, we can collapse + # the T different adjacency matrices containing key-query pairs into one + # adjacency matrix. logits is [B, N, N]. + # TODO(dbieber): Use a reshape instead of reduce sum to attend over all + # edges instead of over all neighboring nodes to handle the multigraph case. + logits = tf.reduce_sum(all_edge_logits, axis=1) + + # For pairs of nodes with no edges between them, add a large negative bias + # to each location without an edge so that the softmax of entries with the + # value 0 become a small negative number instead. + bias = 0 + bias = tf.to_float(tf.equal( + tf.reduce_sum(adjacency_matrix, axis=-1), 0)) * -1e9 + logits += bias + + # Turn the raw key-query products into a probability distribution (or, + # in terms of attention, weights). The softmax is computed across the + # last dimension of logits. + compatibility = tf.nn.softmax(logits) # Shape [B, N, N]. + + # Computes a summary showing the attention matrix as an image. Does not do + # any work toward actually performing attention. + common_attention.attention_image_summary( + tf.expand_dims(compatibility, axis=1), None) + + # Repeats the attention matrix T times for each batch, producing + # a tensor with shape [B, T, N, N] where the [N, N] component is T + # repeats of the values found in compatibility. + edge_compatibility = tf.tile( + tf.expand_dims(compatibility, axis=1), [1, num_edge_types, 1, 1]) + + # Zeroes out the entries in edge_compatibility that do not correspond to + # actual edges. + edge_compatibility *= edge_vectors # Shape [B, T, N, N]. + + output = compute_values(edge_compatibility, v) + return output + + +def ggnn_fast_dense(node_states, + adjacency_matrix, + num_edge_types, + total_value_depth, + name=None): + """ggnn version of the MPNN from Gilmer et al. + + Let B be the number of batches. + Let D be the size of the node hidden states. + Let K be the size of the attention keys/queries. + Let V be the size of the output of the ggnn. + Let T be the number of transforms / edge types. + + Args: + node_states: The value Tensor of shape [B, T, N, D]. + adjacency_matrix: A Tensor of shape [B, N, N, T]. An entry at + indices b, i, j, k is the indicator of the edge from node j to node i in + batch b. A standard adjacency matrix will only have values of one, while a + mutigraph may have larger integer values. + num_edge_types: An integer specifying number of edge types. + total_value_depth: An integer (V) + name: A string. + + Returns: + A Tensor of shape [B, N, V] storing the result of computing attention + weights using the queries and keys and combining the values according to + those weights. + + Raises: + ValueError: if num_transforms doesn't equal num_edge_types and not using + weighted sum. + """ + # between the same nodes (with only one edge of each type. adjacency_matrix + # will need to be converted to shape [B, T, N, N]. + with tf.variable_scope( + name, + default_name="ggnn_fast_dense", + values=[node_states, adjacency_matrix, num_edge_types]): + nodes_shape = common_layers.shape_list(node_states) + v = _compute_edge_transforms(node_states, + total_value_depth, + num_edge_types, + name="v_mpnn") + v = tf.reshape(v, [nodes_shape[0], nodes_shape[1], num_edge_types, + total_value_depth + ]) # Shape [B, N, T, V]. + v = tf.transpose(v, [0, 2, 1, 3]) # Shape [B, T, N, V]. + + # Rearranging the dimensions to match the shape of all_edge_logits. + edge_vectors = tf.transpose(adjacency_matrix, [0, 3, 1, 2]) + output = compute_values(edge_vectors, v) + return output + + +def compute_values(edge_compatibility, v): + """Compute values. If edge compatibilities is just adjacency, we get ggnn. + + Args: + edge_compatibility: A tensor of shape [batch, num_transforms, length, depth] + v: A tensor of shape [batch, num_transforms, length, depth] + + Returns: + output: A [batch, length, depth] tensor + """ + + # Computes the incoming value vectors for each node by weighting them + # according to the attention weights. These values are still segregated by + # edge type. + # Shape = [B, T, N, V]. + all_edge_values = tf.matmul(tf.to_float(edge_compatibility), v) + + # Combines the weighted value vectors together across edge types into a + # single N x V matrix for each batch. + output = tf.reduce_sum(all_edge_values, axis=1) # Shape [B, N, V]. + return output + + +def precompute_edge_matrices(adjacency, hparams): + """Precompute the a_in and a_out tensors. + + (we don't want to add to the graph everytime _fprop is called) + Args: + adjacency: placeholder of real valued vectors of shape [B, L, L, E] + hparams: HParams object + Returns: + edge_matrices: [batch, L * D, L * D] the dense matrix for message passing + viewed as a block matrix (L,L) blocks of size (D,D). Each plot is a function + of the edge vector of the adjacency matrix at that spot. + """ + batch_size, num_nodes, _, edge_dim = common_layers.shape_list(adjacency) + + # build the edge_network for incoming edges + with tf.variable_scope("edge_network"): + x = tf.reshape( + adjacency, [batch_size * num_nodes * num_nodes, edge_dim], + name="adj_reshape_in") + + for ip_layer in range(hparams.edge_network_layers): + name = "edge_network_layer_%d"%ip_layer + x = tf.layers.dense(common_layers.layer_preprocess(x, hparams), + hparams.edge_network_hidden_size, + activation=tf.nn.relu, + name=name) + x = tf.layers.dense(common_layers.layer_preprocess(x, hparams), + hparams.hidden_size**2, + activation=None, + name="edge_network_output") + + # x = [batch * l * l, d *d] + edge_matrices_flat = tf.reshape(x, [batch_size, num_nodes, + num_nodes, hparams.hidden_size, + hparams.hidden_size]) + + # reshape to [batch, l * d, l *d] + edge_matrices = tf.reshape( + tf.transpose(edge_matrices_flat, [0, 1, 3, 2, 4]), [ + -1, num_nodes * hparams.hidden_size, + num_nodes * hparams.hidden_size + ], + name="edge_matrices") + + return edge_matrices + + +def dense_message_pass(node_states, edge_matrices): + """Computes a_t from h_{t-1}, see bottom of page 3 in the paper. + + Args: + node_states: [B, L, D] tensor (h_{t-1}) + edge_matrices (tf.float32): [B, L*D, L*D] + + Returns: + messages (tf.float32): [B, L, D] For each pair + of nodes in the graph a message is sent along both the incoming and + outgoing edge. + """ + batch_size, num_nodes, node_dim = common_layers.shape_list(node_states) + + # Stack the nodes as a big column vector. + h_flat = tf.reshape( + node_states, [batch_size, num_nodes * node_dim, 1], name="h_flat") + + messages = tf.reshape( + tf.matmul(edge_matrices, h_flat), [batch_size * num_nodes, node_dim], + name="messages_matmul") + + message_bias = tf.get_variable("message_bias", shape=node_dim) + messages = messages + message_bias + messages = tf.reshape(messages, [batch_size, num_nodes, node_dim]) + return messages diff --git a/tensor2tensor/layers/modalities.py b/tensor2tensor/layers/modalities.py new file mode 100644 index 000000000..0d9894997 --- /dev/null +++ b/tensor2tensor/layers/modalities.py @@ -0,0 +1,1506 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Modalities, which specify a feature's domain. + +T2TModel applies a default transformation to each feature according to its +modality. Override them by specifying a model's +hparams.{bottom,loss,top,weights_fn}. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range # pylint: disable=redefined-builtin + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_audio +from tensor2tensor.layers import common_image_attention as cia +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import common_video +from tensor2tensor.layers import discretization + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator +import tensorflow_probability as tfp + + +class ModalityType(object): + """Types of modalities.""" + + AUDIO = "audio" + AUDIO_SPECTRAL = "audio_spectral" + CLASS_LABEL = "class_label" + CTC_SYMBOL = "ctc_symbol" # symbol with CTC loss + GENERIC_L2_LOSS = "generic_l2" # identity modality with L2 loss + IDENTITY = "identity" # identity top and bottom + IDENTITY_SYMBOL = "identity_symbol" # symbol with identity top and bottom + IMAGE = "image" + # images using channel compression for generation + IMAGE_CHANNEL_BOTTOM_IDENTITY = "image_channel_bottom_identity" + # images using channel compression for generation + IMAGE_CHANNEL_COMPRESS = "image_channel_compress" + IMAGE_CHANNEL_EMBEDDINGS_BOTTOM = "image_channel_embeddings_bottom" + MULTI_LABEL = "multi_label" + ONE_HOT_CLASS_LABEL = "one_hot_class_label" + REAL = "real" # real vectors + REAL_L2_LOSS = "real_l2" # real vectors with L2 as loss + # real vectors with log Poisson regression loss + REAL_LOG_POISSON_LOSS = "real_log_poisson" + SIGMOID_CLASS_LABEL = "sigmoid_class_label" # sigmoid cross-entropy loss + # sigmoid cross-entropy applied on max-pooling over timesteps + SIGMOID_MAX_POOLING_CLASS_LABEL = "sigmoid_max_pooling_class_label" + # softmax cross-entropy applied on average-pooling over timesteps + SOFTMAX_AVERAGE_POOLING_CLASS_LABEL = "softmax_average_pooling_class_label" + # softmax cross-entropy applied on last-timestep encoding + SOFTMAX_LAST_TIMESTEP_CLASS_LABEL = "softmax_last_timestep_class_label" + # softmax cross-entropy applied on max-pooling over timesteps + SOFTMAX_MAX_POOLING_CLASS_LABEL = "softmax_max_pooling_class_label" + SPEECH_RECOGNITION = "speech_recognition" + SYMBOL = "symbol" + SYMBOL_WEIGHTS_ALL = "symbol_weights_all" # symbol for features w/o 0-padding + SYMBOL_ONE_HOT = "symbol_one_hot" # symbol with one hot as embeddings + VIDEO = "video" + VIDEO_BITWISE = "video_bitwise" # video where bottom embeds pixels bitwise + VIDEO_IDENTITY = "video_identity" # video with identity top and bottom + VIDEO_L1 = "video_l1" # video with L2 loss + VIDEO_L2 = "video_l2" # video with L1 loss + # video with L1 loss and raw input (sequences of frames) + VIDEO_L1_RAW = "video_l1_raw" + # video with L2 loss and raw input (sequences of frames) + VIDEO_L2_RAW = "video_l2_raw" + # video with pixel noise on input during training + VIDEO_PIXEL_NOISE = "video_pixel_noise" + + @staticmethod + def get_choices(): + return [ + ModalityType.AUDIO, + ModalityType.AUDIO_SPECTRAL, + ModalityType.CLASS_LABEL, + ModalityType.CTC_SYMBOL, + ModalityType.GENERIC_L2_LOSS, + ModalityType.IDENTITY, + ModalityType.IDENTITY_SYMBOL, + ModalityType.IMAGE, + ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY, + ModalityType.IMAGE_CHANNEL_COMPRESS, + ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM, + ModalityType.MULTI_LABEL, + ModalityType.ONE_HOT_CLASS_LABEL, + ModalityType.REAL, + ModalityType.REAL_L2_LOSS, + ModalityType.REAL_LOG_POISSON_LOSS, + ModalityType.SIGMOID_CLASS_LABEL, + ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL, + ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL, + ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL, + ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL, + ModalityType.SPEECH_RECOGNITION, + ModalityType.SYMBOL, + ModalityType.SYMBOL_ONE_HOT, + ModalityType.SYMBOL_WEIGHTS_ALL, + ModalityType.VIDEO, + ModalityType.VIDEO_BITWISE, + ModalityType.VIDEO_IDENTITY, + ModalityType.VIDEO_L1, + ModalityType.VIDEO_L2, + ModalityType.VIDEO_L1_RAW, + ModalityType.VIDEO_L2_RAW, + ModalityType.VIDEO_PIXEL_NOISE, + ] + + +# Bottom transformations, applied to all features + + +def audio_bottom(x, model_hparams, vocab_size): + """Transform input from data space to model space. + + Args: + x: A Tensor with shape [batch, ...] + model_hparams: HParams, model hyperparmeters. + vocab_size: int, vocabulary size. + + Returns: + body_input: A Tensor with shape [batch, ?, ?, + model_hparams.hidden_size]. + """ + del vocab_size # unused arg + inputs = x + with tf.variable_scope("audio_modality"): + # TODO(aidangomez): Will need to sort out a better audio pipeline + def xnet_resblock(x, filters, res_relu, name): + """Xception block.""" + with tf.variable_scope(name): + # Typically audio samples are >100k samples in length and have a width + # of 2 or 4. Mono audio has a single channel while stereo has 2. + y = common_layers.separable_conv_block( + x, + filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], + first_relu=True, + padding="SAME", + force2d=True, + name="sep_conv_block") + y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2)) + return y + common_layers.conv_block( + x, + filters, [((1, 1), (1, 1))], + padding="SAME", + strides=(2, 2), + first_relu=res_relu, + force2d=True, + name="res_conv0") + + x = tf.to_float(inputs) / 255. + x.set_shape([None, None, None, 1]) + for i in range(model_hparams.audio_compression): + x = xnet_resblock(x, 2**(i + 1), True, "compress_block_%d" % i) + return xnet_resblock(x, + model_hparams.hidden_size, + False, + "compress_block_final") + + +def audio_spectral_bottom(x, model_hparams, vocab_size): + """Transform input from data space to model space. + + Args: + x: A Tensor with shape [batch, ...] + model_hparams: HParams, model hyperparmeters. + vocab_size: int, vocabulary size. + + Returns: + body_input: A Tensor with shape [batch, ?, ?, + model_hparams.hidden_size]. + """ + del vocab_size # unused arg + inputs = x + with tf.variable_scope("audio_spectral_modality"): + # TODO(aidangomez): Will need to sort out a better audio pipeline + def xnet_resblock(x, filters, res_relu, name): + """Xception-like block.""" + with tf.variable_scope(name): + # We only stride along the length dimension to preserve the spectral + # bins (which are tiny in dimensionality relative to length) + y = common_layers.separable_conv_block( + x, + filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], + first_relu=True, + padding="SAME", + force2d=True, + name="sep_conv_block") + y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 1)) + return y + common_layers.conv_block( + x, + filters, [((1, 1), (1, 1))], + padding="SAME", + strides=(2, 1), + first_relu=res_relu, + force2d=True, + name="res_conv0") + + # Bitcast back from int32 + x = tf.bitcast(inputs, tf.float32) + x.set_shape([None, None, None, 1]) + for i in range(model_hparams.audio_compression): + x = xnet_resblock(x, 2**(i + 1), True, "compress_block_%d" % i) + return xnet_resblock(x, + model_hparams.hidden_size, + False, + "compress_block_final") + + +def class_label_bottom(x, model_hparams, vocab_size): + with tf.variable_scope("class_label_modality_%d_%d" % ( + vocab_size, model_hparams.hidden_size)): + multiplier = 1.0 + if model_hparams.multiply_embedding_mode == "sqrt_depth": + multiplier = model_hparams.hidden_size**0.5 + return common_layers.embedding(x, + vocab_size, + model_hparams.hidden_size, + multiplier=multiplier) + + +def class_label_targets_bottom(x, model_hparams, vocab_size): + with tf.variable_scope("class_label_modality_%d_%d" % ( + vocab_size, model_hparams.hidden_size)): + return tf.zeros([common_layers.shape_list(x)[0], + 1, + 1, + model_hparams.hidden_size]) + + +def identity_bottom(x, model_hparams, vocab_size): + del model_hparams, vocab_size # unused arg + return tf.to_float(x) + + +def image_bottom(x, model_hparams, vocab_size): + del model_hparams, vocab_size # unused arg + with tf.variable_scope("image_modality"): + if not tf.executing_eagerly(): + tf.summary.image( + "inputs", common_layers.tpu_safe_image_summary(x), max_outputs=2) + return tf.to_float(x) + + +def image_targets_bottom(x, model_hparams, vocab_size): + """Bottom transformation for target images.""" + pixel_embedding_size = 64 + inputs = x + with tf.variable_scope("image_modality"): + if not tf.executing_eagerly(): + tf.summary.image( + "targets_bottom", + common_layers.tpu_safe_image_summary(inputs), + max_outputs=1) + inputs_shape = common_layers.shape_list(inputs) + if len(inputs_shape) != 4: + raise ValueError("Assuming images given as int tensors in the format " + "[batch, height, width, channels] (256 values).") + # We embed each of 256=vocab_size possible pixel values. + embedding_var = tf.get_variable( + "pixel_embedding", + [vocab_size, pixel_embedding_size]) + hot_inputs = tf.one_hot(tf.to_int32(inputs), vocab_size) + hot_inputs = tf.reshape(hot_inputs, [-1, vocab_size]) + embedded = tf.matmul(hot_inputs, embedding_var) + # Let's now merge all channels that were embedded into a single vector. + merged_size = pixel_embedding_size * inputs_shape[3] + embedded = tf.reshape(embedded, inputs_shape[:3] + [merged_size]) + merged = tf.layers.dense( + embedded, + model_hparams.hidden_size, + name="merge_pixel_embedded_channels") + return merged + + +def _image_channel_compress_bottom(inputs, model_hparams, name="bottom"): + """Compresses channel-wise input pixels into whole pixel representions. + + Perform conversion of RGB pixel values to a real number in the range -1 to + 1. This combines pixel channels to form a representation of shape + [img_len, img_len]. + + Args: + inputs: Tensor representing RGB pixel intensities as integers, of shape + [batch, img_len, img_len, channels]. + model_hparams: HParams, model hyperparmeters. + name: string, scope. + + Returns: + body_input: Tensor of shape + [batch, img_len, img_len, model_hparams.hidden_size]. + """ + num_channels = 3 + with tf.variable_scope(name): + inputs = tf.to_float(inputs) + hp = model_hparams + if hp.mode != tf_estimator.ModeKeys.PREDICT: + tf.summary.image( + "inputs", + common_layers.tpu_safe_image_summary(inputs), + max_outputs=2) + inputs = common_layers.convert_rgb_to_symmetric_real(inputs) + + # Reshape inputs to apply convolutions across [img_len, img_len*channels]. + inputs_shape = common_layers.shape_list(inputs) + inputs = tf.reshape( + inputs, [-1, inputs_shape[1], inputs_shape[2] * inputs_shape[3], 1]) + + # Compress RGB intensities for each pixel using a convolution. + outputs = tf.layers.conv2d( + inputs, + model_hparams.hidden_size, + kernel_size=(1, num_channels), + padding="VALID", + strides=(1, num_channels), + activation=tf.nn.relu, + name="conv_input") + return outputs + + +def image_channel_compress_bottom(x, model_hparams, vocab_size): + del vocab_size # unused arg + return _image_channel_compress_bottom(x, model_hparams, "input_bottom") + + +def image_channel_compress_targets_bottom(x, model_hparams, vocab_size): + del vocab_size # unused arg + return _image_channel_compress_bottom(x, model_hparams, "output_bottom") + + +def image_channel_embeddings_bottom(x, model_hparams, vocab_size): + """Bottom transformation for image targets.""" + del vocab_size # unused arg + inputs = tf.to_int32(x) + io_depth = model_hparams.num_channels + tshape = common_layers.shape_list(inputs) + hidden_size = model_hparams.hidden_size + target_embeddings = cia.get_channel_embeddings( + io_depth, inputs, hidden_size, "input_bottom") + return tf.reshape(target_embeddings, + [tshape[0], tshape[1], tshape[2] * io_depth, hidden_size]) + + +def make_targets_bottom(bottom): + def targets_bottom(x, model_hparams, vocab_size): + with tf.variable_scope("targets_bottom"): + return bottom(x, model_hparams, vocab_size) + return targets_bottom + + +def real_bottom(x, model_hparams, vocab_size): + del vocab_size # unused arg + with tf.variable_scope("real"): + return tf.layers.dense( + tf.to_float(x), model_hparams.hidden_size, name="bottom") + + +def speech_recognition_bottom(x, model_hparams, vocab_size): + """Use batchnorm instead of CMVN and shorten the stft with strided convs. + + Args: + x: float32 tensor with shape [batch_size, len, 1, freqs * channels] + model_hparams: HParams, model hyperparmeters. + vocab_size: int, vocabulary size. + + Returns: + float32 tensor with shape [batch_size, shorter_len, 1, hidden_size] + """ + del vocab_size # unused arg + inputs = x + p = model_hparams + + num_mel_bins = p.audio_num_mel_bins + num_channels = 3 if p.audio_add_delta_deltas else 1 + + with tf.variable_scope("speech_recognition_modality"): + if p.audio_preproc_in_bottom: + # Compute filterbanks + with tf.variable_scope("fbanks"): + waveforms = tf.squeeze(inputs, [2, 3]) + mel_fbanks = common_audio.compute_mel_filterbank_features( + waveforms, + sample_rate=p.audio_sample_rate, + dither=p.audio_dither, + preemphasis=p.audio_preemphasis, + frame_length=p.audio_frame_length, + frame_step=p.audio_frame_step, + lower_edge_hertz=p.audio_lower_edge_hertz, + upper_edge_hertz=p.audio_upper_edge_hertz, + num_mel_bins=p.audio_num_mel_bins, + apply_mask=True) + if p.audio_add_delta_deltas: + mel_fbanks = common_audio.add_delta_deltas(mel_fbanks) + x = tf.reshape(mel_fbanks, + common_layers.shape_list(mel_fbanks)[:2] + + [num_mel_bins, num_channels]) + + nonpadding_mask = 1. - common_attention.embedding_to_padding(x) + num_of_nonpadding_elements = tf.reduce_sum( + nonpadding_mask) * num_mel_bins * num_channels + + # This replaces CMVN estimation on data + var_epsilon = 1e-09 + mean = tf.reduce_sum( + x, axis=[1], keepdims=True) / num_of_nonpadding_elements + variance = (num_of_nonpadding_elements * mean**2. - + 2. * mean * tf.reduce_sum(x, axis=[1], keepdims=True) + + tf.reduce_sum(x**2, axis=[1], keepdims=True) + ) / num_of_nonpadding_elements + x = (x - mean) * tf.rsqrt(variance + var_epsilon) * tf.expand_dims( + nonpadding_mask, -1) + else: + x = inputs + + # The convention is that the models are flattened along the spatial, + # dimensions, thus the speech preprocessor treats frequencies and + # channels as image colors (last axis) + x.set_shape([None, None, num_mel_bins, num_channels]) + + # TODO(chorowski): how to specify bottom's hparams and avoid hardcoding? + x = tf.pad(x, [[0, 0], [0, 8], [0, 0], [0, 0]]) + for _ in range(2): + x = tf.layers.conv2d( + x, 128, (3, 3), (2, 2), use_bias=False) + x = common_layers.layer_norm(x) + x = tf.nn.relu(x) + + xshape = common_layers.shape_list(x) + # apply a conv that will remove all frequencies and at the same time + # project the output into desired hidden_size + x = tf.pad(x, [[0, 0], [0, 2], [0, 0], [0, 0]]) + x = tf.layers.conv2d(x, p.hidden_size, (3, xshape[2]), use_bias=False) + + assert common_layers.shape_list(x)[2] == 1 + x = common_layers.layer_norm(x) + x = tf.nn.relu(x) + return x + + +def get_weights(model_hparams, vocab_size, hidden_dim=None): + """Create or get concatenated embedding or softmax variable. + + Args: + model_hparams: HParams, model hyperparmeters. + vocab_size: int, vocabulary size. + hidden_dim: dim of the variable. Defaults to _model_hparams' hidden_size + + Returns: + a list of num_shards Tensors. + """ + if hidden_dim is None: + hidden_dim = model_hparams.hidden_size + num_shards = model_hparams.symbol_modality_num_shards + shards = [] + for i in range(num_shards): + shard_size = (vocab_size // num_shards) + ( + 1 if i < vocab_size % num_shards else 0) + var_name = "weights_%d" % i + shards.append( + tf.get_variable( + var_name, [shard_size, hidden_dim], + initializer=tf.random_normal_initializer(0.0, hidden_dim**-0.5))) + if num_shards == 1: + ret = shards[0] + else: + ret = tf.concat(shards, 0) + # Convert ret to tensor. + if not tf.executing_eagerly(): + ret = common_layers.convert_gradient_to_tensor(ret) + return ret + + +def _symbol_bottom_simple(x, model_hparams, vocab_size, name, reuse): + """Bottom transformation for symbols.""" + with tf.variable_scope(name, reuse=reuse): + # Ensure the inputs are 3-D + if len(x.get_shape()) == 4: + x = tf.squeeze(x, axis=3) + while len(x.get_shape()) < 3: + x = tf.expand_dims(x, axis=-1) + + var = get_weights(model_hparams, vocab_size) + x = common_layers.dropout_no_scaling( + x, 1.0 - model_hparams.symbol_dropout) + ret = common_layers.gather(var, x) + if model_hparams.multiply_embedding_mode == "sqrt_depth": + ret *= model_hparams.hidden_size**0.5 + ret *= tf.expand_dims( + common_layers.cast_like(tf.not_equal(x, 0), ret), -1) + return ret + + +def symbol_bottom(x, model_hparams, vocab_size): + if (model_hparams.shared_embedding_and_softmax_weights or + model_hparams.get("shared_embedding")): + return _symbol_bottom_simple( + x, model_hparams, vocab_size, "shared", reuse=None) + return _symbol_bottom_simple( + x, model_hparams, vocab_size, "input_emb", reuse=None) + + +def symbol_targets_bottom(x, model_hparams, vocab_size): + """Bottom transformation for target symbols.""" + if (model_hparams.shared_embedding_and_softmax_weights or + model_hparams.get("shared_embedding")): + try: + return _symbol_bottom_simple( + x, model_hparams, vocab_size, "shared", reuse=True) + except ValueError: + # perhaps there were no inputs, and this is a new variable. + return _symbol_bottom_simple( + x, model_hparams, vocab_size, "shared", reuse=None) + else: + return _symbol_bottom_simple( + x, model_hparams, vocab_size, "target_emb", reuse=None) + + +def symbol_one_hot_bottom(x, model_hparams, vocab_size): + del model_hparams # unused arg + return tf.one_hot(x, vocab_size) + + +def video_bottom(x, model_hparams, vocab_size): + del model_hparams, vocab_size # unused arg + common_video.gif_summary("inputs", x, max_outputs=1) + x = common_layers.standardize_images(x) + return x + + +def video_targets_bottom(x, model_hparams, vocab_size): + del model_hparams, vocab_size # unused arg + common_video.gif_summary("targets", x, max_outputs=1) + x = common_layers.standardize_images(x) + return x + + +def video_bitwise_bottom(x, model_hparams, vocab_size): + """Bottom transformation for embedding video bitwise.""" + pixel_embedding_size = 64 + inputs = x + with tf.variable_scope("video_modality_bitwise", reuse=tf.AUTO_REUSE): + common_layers.summarize_video(inputs, "bottom") + # Embed bitwise. + assert vocab_size == 256 + embedded = discretization.int_to_bit_embed(inputs, 8, + pixel_embedding_size) + # Project. + return tf.layers.dense( + embedded, + model_hparams.hidden_size, + name="merge_pixel_embedded_frames") + + +def video_bitwise_targets_bottom(x, model_hparams, vocab_size): + """Bottom transformation for embedding target video bitwise.""" + pixel_embedding_size = 64 + inputs = x + with tf.variable_scope("video_modality_bitwise", reuse=tf.AUTO_REUSE): + common_layers.summarize_video(inputs, "targets_bottom") + # Embed bitwise. + assert vocab_size == 256 + embedded = discretization.int_to_bit_embed(inputs, 8, + pixel_embedding_size) + # Transpose and project. + transposed = common_layers.time_to_channels(embedded) + return tf.layers.dense( + transposed, + model_hparams.hidden_size, + name="merge_pixel_embedded_frames") + + +def video_identity_bottom(x, model_hparams, vocab_size): + del model_hparams, vocab_size # unused arg + common_video.gif_summary("inputs", x, max_outputs=1) + return x + + +def video_identity_targets_bottom(x, model_hparams, vocab_size): + del model_hparams, vocab_size # unused arg + common_video.gif_summary("targets", x, max_outputs=1) + return x + + +def video_pixel_noise_bottom(x, model_hparams, vocab_size): + """Bottom transformation for video.""" + input_noise = getattr(model_hparams, "video_modality_input_noise", 0.25) + inputs = x + if model_hparams.mode == tf_estimator.ModeKeys.TRAIN: + background = tfp.stats.percentile(inputs, 50., axis=[0, 1, 2, 3]) + input_shape = common_layers.shape_list(inputs) + input_size = tf.reduce_prod(input_shape[:-1]) + input_mask = tf.multinomial( + tf.log([[input_noise, 1.-input_noise]]), input_size) + input_mask = tf.reshape(tf.cast(input_mask, tf.int32), + input_shape[:-1]+[1]) + inputs = inputs * input_mask + background * (1 - input_mask) + return video_bottom(inputs, model_hparams, vocab_size) + + +def convert_rgb_to_real(prediction, targets): + """Convert prediction and target from rgb to real.""" + prediction = tf.squeeze(prediction, axis=-1) + prediction = common_layers.convert_rgb_to_real(prediction) + targets = common_layers.convert_rgb_to_real(targets) + return prediction, targets + + +def video_raw_bottom(x, model_hparams, vocab_size): + del model_hparams, vocab_size # unused arg + common_video.gif_summary("inputs", x) + return common_layers.convert_rgb_to_real(x) + + +def video_raw_targets_bottom(x, model_hparams, vocab_size): + del model_hparams, vocab_size # unused arg + common_video.gif_summary("targets_bottom", x) + return common_layers.convert_rgb_to_real(x) + + +# Loss transformations, applied to target features + + +def ctc_symbol_loss(top_out, targets, model_hparams, vocab_size, weight_fn): + """Compute the CTC loss.""" + del model_hparams, vocab_size # unused arg + logits = top_out + with tf.name_scope("ctc_loss", values=[logits, targets]): + # For CTC we assume targets are 1d, [batch, length, 1, 1] here. + targets_shape = targets.get_shape().as_list() + assert len(targets_shape) == 4 + assert targets_shape[2] == 1 + assert targets_shape[3] == 1 + targets = tf.squeeze(targets, axis=[2, 3]) + logits = tf.squeeze(logits, axis=[2, 3]) + targets_mask = 1 - tf.to_int32(tf.equal(targets, 0)) + targets_lengths = tf.reduce_sum(targets_mask, axis=1) + sparse_targets = tf.keras.backend.ctc_label_dense_to_sparse( + targets, targets_lengths) + xent = tf.nn.ctc_loss( + sparse_targets, + logits, + targets_lengths, + time_major=False, + preprocess_collapse_repeated=False, + ctc_merge_repeated=False) + weights = weight_fn(targets) + return tf.reduce_sum(xent), tf.reduce_sum(weights) + + +def generic_loss(top_out, targets, model_hparams, vocab_size, weights_fn): + """Compute loss numerator and denominator for one shard of output.""" + del vocab_size # unused arg + logits = top_out + logits = common_attention.maybe_upcast(logits, hparams=model_hparams) + cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.0) + return common_layers.padded_cross_entropy( + logits, + targets, + model_hparams.label_smoothing, + cutoff=cutoff, + weights_fn=weights_fn) + + +def generic_l2_loss(body_output, + targets, + model_hparams, + vocab_size, + weights_fn): + del model_hparams, vocab_size, weights_fn # unused arg + loss = tf.squared_difference(body_output, tf.to_float(targets)) + return tf.reduce_mean(loss), tf.constant(1.0) + + +def multi_label_loss(top_out, targets, model_hparams, vocab_size, weights_fn): + """Average loss over the labels.""" + del vocab_size # unused arg + logits = top_out + num_labels = tf.shape(targets)[1] + logits = tf.tile(logits, [1, num_labels, 1, 1, 1]) + + xent, weights = common_layers.padded_cross_entropy( + logits, + targets, + model_hparams.label_smoothing, + weights_fn=weights_fn, + reduce_sum=False, + ) + xent = tf.squeeze(xent, [2, 3]) + weights = tf.squeeze(weights, [2, 3]) + # average loss over all labels + loss = tf.reduce_sum(xent, axis=1) + weights = tf.reduce_sum(weights, axis=1) + loss /= (weights + 1e-8) + weights = tf.to_float(tf.greater(weights, 0.)) + + return tf.reduce_sum(loss*weights), tf.reduce_sum(weights) + + +def one_hot_class_label_loss(top_out, + targets, + model_hparams, + vocab_size, + weights_fn): + """Apply softmax cross-entropy between outputs and targets. + + Args: + top_out: logits Tensor with shape [batch, ?, ?, num_classes] + targets: one-hot encoding Tensor with shape [batch, ?, ?, num_classes] + model_hparams: HParams, model hyperparmeters. + vocab_size: int, vocabulary size. + weights_fn: + + Returns: + loss_scale (cross-entropy), loss_denom + """ + del model_hparams, vocab_size # unused arg + loss_scale = tf.losses.softmax_cross_entropy( + onehot_labels=targets, logits=top_out) + weights = weights_fn(targets) + loss_denom = tf.reduce_sum(weights) + return loss_scale, loss_denom + + +def real_l2_loss(top_out, targets, model_hparams, vocab_size, weights_fn): + del model_hparams, vocab_size # unused arg + predictions = top_out + if (len(common_layers.shape_list(top_out)) != len( + common_layers.shape_list(targets))): + predictions = tf.squeeze(top_out, axis=[-1]) + with tf.name_scope("l2"): + weights = weights_fn(targets) + l2 = tf.pow(predictions - targets, 2) + return tf.reduce_sum(l2 * weights), tf.reduce_sum(weights) + + +def real_log_poisson_loss(top_out, + targets, + model_hparams, + vocab_size, + weights_fn): + """Poisson loss for real.""" + del model_hparams, vocab_size # unused arg + predictions = top_out + if (len(common_layers.shape_list(top_out)) != len( + common_layers.shape_list(targets))): + predictions = tf.squeeze(top_out, axis=[-1]) + with tf.name_scope("log_possion"): + weights = weights_fn(targets) + lp_loss = tf.nn.log_poisson_loss(targets, predictions) + return tf.reduce_sum(lp_loss * weights), tf.reduce_sum(weights) + + +def sigmoid_class_label_loss(top_out, + targets, + model_hparams, + vocab_size, + weights_fn): + """Loss for class label.""" + # Expect inputs of size [batch-size, timesteps, 1, num-classes], where the + # last dimension of num-classes represents logits for binary labels + del model_hparams, vocab_size # unused arg + loss_scale = tf.losses.sigmoid_cross_entropy( + multi_class_labels=targets, logits=top_out) + weights = weights_fn(targets) + loss_denom = tf.reduce_sum(weights) + return loss_scale, loss_denom + + +def sigmoid_max_pooling_class_label_loss(top_out, + targets, + model_hparams, + vocab_size, + weights_fn): + """Loss for class label.""" + # Expect inputs of size [batch-size, 1, 1, num-classes], where the + # last dimension of num-classes represents logits for binary labels + del model_hparams, vocab_size # unused arg + loss_scale = tf.losses.sigmoid_cross_entropy( + multi_class_labels=targets, logits=top_out) + weights = weights_fn(targets) + loss_denom = tf.reduce_sum(weights) + return loss_scale, loss_denom + + +def symbol_one_hot_loss(top_out, + targets, + model_hparams, + vocab_size, + weights_fn): + del model_hparams, weights_fn # unused arg + labels = tf.one_hot(targets, vocab_size) + loss = tf.nn.softmax_cross_entropy_with_logits( + logits=top_out, labels=labels) + return tf.reduce_mean(loss), tf.constant(1.0) + + +def video_loss(top_out, targets, model_hparams, vocab_size, weights_fn): + """Compute loss numerator and denominator for one shard of output.""" + del vocab_size # unused arg + logits = top_out + logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:]) + targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:]) + cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.01) + return common_layers.padded_cross_entropy( + logits, + targets, + model_hparams.label_smoothing, + cutoff=cutoff, + weights_fn=weights_fn) + + +def video_identity_loss(top_out, + targets, + model_hparams, + vocab_size, + weights_fn): + """Compute loss numerator and denominator for one shard of output.""" + del vocab_size # unused arg + # TODO(nikip): Try L2 loss + logits = top_out + logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:]) + targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:]) + cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.01) + return common_layers.padded_cross_entropy( + logits, + targets, + model_hparams.label_smoothing, + cutoff=cutoff, + weights_fn=weights_fn) + + +def video_l1_internal_loss(logits, targets, model_hparams): + cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.2) + return tf.nn.relu(tf.abs(logits - targets) - cutoff) + + +def video_l1_loss(top_out, targets, model_hparams, vocab_size, weights_fn): + """Compute loss numerator and denominator for one shard of output.""" + del vocab_size # unused arg + logits = top_out + logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:-1]) + targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:]) + weights = weights_fn(targets) + # Shift targets by 0.5 so later just casting to int gives the prediction. + # So for int targets, say 0 and 7, we actually train to predict 0.5 and 7.5. + # Later (in merics or infer) this is cast to int anyway. Also, we have no + # loss beyond cutoff = 0.2 as these are already correct predictions. + targets = tf.to_float(targets) + 0.5 + loss = video_l1_internal_loss(logits, targets, model_hparams) + return tf.reduce_sum(loss * weights), tf.reduce_sum(weights) + + +def video_l2_internal_loss(logits, targets, model_hparams): + cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.2) + return tf.nn.relu( + tf.squared_difference(logits, targets) - cutoff * cutoff) + + +def video_l2_loss(top_out, targets, model_hparams, vocab_size, weights_fn): + """Compute loss numerator and denominator for one shard of output.""" + del vocab_size # unused arg + logits = top_out + logits = tf.reshape(logits, [-1] + common_layers.shape_list(logits)[2:-1]) + targets = tf.reshape(targets, [-1] + common_layers.shape_list(targets)[2:]) + weights = weights_fn(targets) + # Shift targets by 0.5 so later just casting to int gives the prediction. + # So for int targets, say 0 and 7, we actually train to predict 0.5 and 7.5. + # Later (in merics or infer) this is cast to int anyway. Also, we have no + # loss beyond cutoff = 0.2 as these are already correct predictions. + targets = tf.to_float(targets) + 0.5 + loss = video_l2_internal_loss(logits, targets, model_hparams) + return tf.reduce_sum(loss * weights), tf.reduce_sum(weights) + + +def video_l2_raw_loss(top_out, targets, model_hparams, vocab_size, weights_fn): + del model_hparams, vocab_size, weights_fn # unused arg + prediction, groundtruth = convert_rgb_to_real(top_out, targets) + loss = tf.losses.mean_squared_error(prediction, groundtruth) + return loss, tf.constant(1.0) + + +def video_l1_raw_loss(top_out, targets, model_hparams, vocab_size, weights_fn): + del model_hparams, vocab_size, weights_fn # unused arg + prediction, groundtruth = convert_rgb_to_real(top_out, targets) + loss = tf.losses.absolute_difference(prediction, groundtruth) + return loss, tf.constant(1.0) + + +# Top transformations, applied to target features + + +def is_pointwise(func): + """Decorator for whether the function is pointwise. + + An example of a pointwise function is a linear layer followed by + a softmax. Given a tensor [batch, length, height, depth] it operates + only on the last axis, on every point in [batch, length, height] fully + independently. In contrast, a classifier that first averages over length + and height is not pointwise, as it depends on the whole field. It is useful + to know if top functions are pointwise to speed up decoding in certain models. + + Args: + func: Function to decorate. + + Returns: + Original function with an attribute pointwise set to True. + """ + func.pointwise = True + return func + + +def class_label_top(body_output, targets, model_hparams, vocab_size): + """Transform inputs from model space to target space. + + Average over inner dims and a linear layer to logits. + + Args: + body_output: A Tensor with shape [batch, ?, ?, body_output_size]. + targets: + model_hparams: HParams, model hyperparmeters. + vocab_size: int, vocabulary size. + + Returns: + a Tensors, each with shape [batch_size, 1, 1, 1, vocab_size] + """ + del targets # unused arg + with tf.variable_scope("class_label_modality_%d_%d" % ( + vocab_size, model_hparams.hidden_size)): + x = body_output + x = tf.reduce_mean(x, axis=[1, 2], keepdims=True) + res = tf.layers.dense(x, vocab_size) + return tf.expand_dims(res, 3) + + +def identity_top(body_output, targets, model_hparams, vocab_size): + del targets, model_hparams, vocab_size # unused arg + return body_output + + +def image_top(body_output, targets, model_hparams, vocab_size): + """Top transformation for images.""" + del targets # unused arg + # TODO(lukaszkaiser): is this a universal enough way to get channels? + num_channels = model_hparams.problem.num_channels + with tf.variable_scope("rgb_softmax"): + body_output_shape = common_layers.shape_list(body_output) + reshape_shape = body_output_shape[:3] + reshape_shape.extend([num_channels, vocab_size]) + res = tf.layers.dense(body_output, vocab_size * num_channels) + res = tf.reshape(res, reshape_shape) + if not tf.get_variable_scope().reuse: + res_argmax = tf.argmax(res, axis=-1) + tf.summary.image( + "result", + common_layers.tpu_safe_image_summary(res_argmax), + max_outputs=1) + return res + + +def image_channel_compress_top(body_output, targets, model_hparams, vocab_size): + """Transforms body output to return logits. + + Args: + body_output: Tensor of shape [batch, img_len, img_len, depth]. + targets: + model_hparams: HParams, model hyperparmeters. + vocab_size: int, vocabulary size. + + Returns: + Tensor of shape [batch, img_len, img_len, channels, vocab_size]. + """ + del targets # unused arg + with tf.variable_scope("image_channel_compress_modality"): + hidden_size = model_hparams.hidden_size + img_len = model_hparams.img_len + channels = 3 # RGB + batch = common_layers.shape_list(body_output)[0] + x = tf.layers.conv2d( + body_output, + hidden_size * channels, + kernel_size=(1, 1), + strides=(1, 1), + padding="VALID", + activation=tf.nn.relu, + name="decompress_conv") + x = tf.reshape(x, [batch, img_len, img_len * channels, hidden_size]) + x = common_layers.layer_preprocess(x, model_hparams) + x = tf.layers.dense(x, + vocab_size, + use_bias=True, + activation=None, + name="output_conv") + x = tf.reshape( + x, [batch, img_len, img_len, channels, vocab_size]) + return x + + +def image_channel_embeddings_top(body_output, + targets, + model_hparams, + vocab_size): + """Top transformation for images.""" + del targets # unused arg + with tf.variable_scope("image_channel_embeddings_bottom"): + img_len = model_hparams.img_len + channels = model_hparams.num_channels + x = tf.layers.dense( + body_output, 256, use_bias=True, activation=None, name="output_conv") + x = tf.reshape(x, + [-1, img_len, img_len, channels, vocab_size]) + return x + + +@is_pointwise +def real_top(body_output, targets, model_hparams, vocab_size): + del targets, model_hparams # unused arg + with tf.variable_scope("real"): + return tf.layers.dense(body_output, vocab_size, name="top") + + +def sigmoid_max_pooling_class_label_top(body_output, + targets, + model_hparams, + vocab_size): + """Transform inputs from model space to target space. + + Average over inner dims and a linear layer to logits. + + Args: + body_output: A Tensor with shape [batch, timesteps, 1, body_output_size]. + targets: + model_hparams: HParams, model hyperparmeters. + vocab_size: int, vocabulary size. + + Returns: + a Tensors, each with shape [batch_size, 1, 1, vocab_size] + """ + del targets # unused arg + with tf.variable_scope( + "sigmoid_max_pooling_class_symbol_modality_%d_%d" % ( + vocab_size, model_hparams.hidden_size)): + x = body_output + x = tf.reduce_max(x, axis=1, keepdims=True) + return tf.layers.dense(x, vocab_size) + + +def softmax_average_pooling_class_label_top(body_output, + targets, + model_hparams, + vocab_size): + """Loss for class label.""" + del targets # unused arg + with tf.variable_scope( + "softmax_average_pooling_onehot_class_label_modality_%d_%d" % ( + vocab_size, model_hparams.hidden_size)): + x = body_output + x = tf.reduce_mean(x, axis=1, keepdims=True) + return tf.layers.dense(x, vocab_size) + + +def softmax_last_timestep_class_label_top(body_output, + targets, + model_hparams, + vocab_size): + """Loss for class label.""" + del targets # unused arg + with tf.variable_scope( + "softmax_last_timestep_onehot_class_label_modality_%d_%d" % ( + vocab_size, model_hparams.hidden_size)): + x = body_output + x = tf.expand_dims(x[:, -1], 1) # Pick the last timestep + return tf.layers.dense(x, vocab_size) + + +def softmax_max_pooling_class_label_top(body_output, + targets, + model_hparams, + vocab_size): + """Loss for class label.""" + del targets # unused arg + with tf.variable_scope( + "softmax_max_pooling_onehot_class_label_modality_%d_%d" % ( + vocab_size, model_hparams.hidden_size)): + x = body_output + x = tf.reduce_max(x, axis=1, keepdims=True) + return tf.layers.dense(x, vocab_size) + + +@is_pointwise +def symbol_top(body_output, targets, model_hparams, vocab_size): + """Generate logits. + + Args: + body_output: A Tensor with shape + [batch, p0, p1, model_hparams.hidden_size]. + targets: Unused. + model_hparams: HParams, model hyperparmeters. + vocab_size: int, vocabulary size. + + Returns: + logits: A Tensor with shape [batch, p0, p1, ?, vocab_size]. + """ + del targets # unused arg + if model_hparams.shared_embedding_and_softmax_weights: + scope_name = "shared" + reuse = tf.AUTO_REUSE + else: + scope_name = "softmax" + reuse = False + with tf.variable_scope(scope_name, reuse=reuse): + body_output_shape = common_layers.shape_list(body_output) + var = get_weights(model_hparams, vocab_size, body_output_shape[-1]) + if (model_hparams.factored_logits and + model_hparams.mode == tf_estimator.ModeKeys.TRAIN): + # insert channels dimension + body_output = tf.expand_dims(body_output, 3) + return common_layers.FactoredTensor(body_output, var) + else: + body_output = tf.reshape(body_output, [-1, body_output_shape[-1]]) + logits = tf.matmul(body_output, var, transpose_b=True) + return tf.reshape(logits, + body_output_shape[:-1] + [1, vocab_size]) + + +@is_pointwise +def symbol_one_hot_top(body_output, targets, model_hparams, vocab_size): + del targets, model_hparams, vocab_size # unused arg + return body_output + + +def video_top(body_output, targets, model_hparams, vocab_size): + """Top transformation for video.""" + del targets # unused arg + num_channels = model_hparams.problem.num_channels + shape = common_layers.shape_list(body_output) + reshape_shape = shape[:-1] + [num_channels, vocab_size] + res = tf.reshape(body_output, reshape_shape) + # Calculate argmax so as to have a summary with the produced images. + x = tf.argmax(tf.reshape(res, [-1, vocab_size]), axis=-1) + x = tf.reshape(x, shape[:-1] + [num_channels]) + common_video.gif_summary("results", x, max_outputs=1) + return res + + +def video_l1_top(body_output, targets, model_hparams, vocab_size): + """Top transformation for video.""" + del targets, vocab_size # unused arg + num_channels = model_hparams.problem.num_channels + num_frames = model_hparams.video_num_target_frames + with tf.variable_scope("rgb"): + body_output_shape = common_layers.shape_list(body_output) + res = tf.layers.dense(body_output, num_channels * num_frames, name="cast") + res = tf.reshape(res, body_output_shape[:3] + [num_channels, num_frames]) + res = tf.transpose(res, [0, 4, 1, 2, 3]) # Move frames next to batch. + if not tf.get_variable_scope().reuse: + res_argmax = res[:, -1, :, :, :] + tf.summary.image( + "result", + common_layers.tpu_safe_image_summary(res_argmax), + max_outputs=1) + return tf.expand_dims(res, axis=-1) # Add an axis like in perplexity. + + +def video_raw_top(body_output, targets, model_hparams, vocab_size): + del targets, model_hparams, vocab_size # unused arg + frames = body_output + if isinstance(body_output, list): + frames = tf.stack(body_output, axis=1) + rgb_frames = common_layers.convert_real_to_rgb(frames) + common_video.gif_summary("body_output", rgb_frames) + return tf.expand_dims(rgb_frames, axis=-1) + + +# Utility functions similar to tf.keras for default transformations + + +def get_bottom(modality_type, value=None): + """Gets default bottom transformation; if none available, return value.""" + if modality_type == ModalityType.AUDIO: + return audio_bottom + elif modality_type == ModalityType.AUDIO_SPECTRAL: + return audio_spectral_bottom + elif modality_type in (ModalityType.CLASS_LABEL, + ModalityType.MULTI_LABEL, + ModalityType.ONE_HOT_CLASS_LABEL, + ModalityType.SIGMOID_CLASS_LABEL, + ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL, + ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL, + ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL, + ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL): + return class_label_bottom + elif modality_type in (ModalityType.CTC_SYMBOL, + ModalityType.SYMBOL, + ModalityType.SYMBOL_WEIGHTS_ALL): + return symbol_bottom + elif modality_type in (ModalityType.GENERIC_L2_LOSS, + ModalityType.IDENTITY, + ModalityType.IDENTITY_SYMBOL, + ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM): + return identity_bottom + elif modality_type == ModalityType.IMAGE: + return image_bottom + elif modality_type in (ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY, + ModalityType.IMAGE_CHANNEL_COMPRESS): + return image_channel_compress_bottom + elif modality_type in (ModalityType.REAL, + ModalityType.REAL_L2_LOSS, + ModalityType.REAL_LOG_POISSON_LOSS): + return real_bottom + elif modality_type == ModalityType.SPEECH_RECOGNITION: + return speech_recognition_bottom + elif modality_type == ModalityType.SYMBOL_ONE_HOT: + return symbol_one_hot_bottom + elif modality_type in (ModalityType.VIDEO, + ModalityType.VIDEO_L1, + ModalityType.VIDEO_L2): + return video_bottom + elif modality_type == ModalityType.VIDEO_BITWISE: + return video_bitwise_bottom + elif modality_type == ModalityType.VIDEO_IDENTITY: + return video_identity_bottom + elif modality_type in (ModalityType.VIDEO_L1_RAW, + ModalityType.VIDEO_L2_RAW): + return video_raw_bottom + elif modality_type == ModalityType.VIDEO_PIXEL_NOISE: + return video_pixel_noise_bottom + return value + + +def get_loss(modality_type, value=None): + """Gets default loss transformation; if none available, return value.""" + if modality_type in (ModalityType.AUDIO, + ModalityType.AUDIO_SPECTRAL, + ModalityType.CLASS_LABEL, + ModalityType.IDENTITY, + ModalityType.IDENTITY_SYMBOL, + ModalityType.IMAGE, + ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY, + ModalityType.IMAGE_CHANNEL_COMPRESS, + ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM, + ModalityType.REAL, + ModalityType.SPEECH_RECOGNITION, + ModalityType.SYMBOL, + ModalityType.SYMBOL_WEIGHTS_ALL): + return generic_loss + elif modality_type == ModalityType.CTC_SYMBOL: + return ctc_symbol_loss + elif modality_type == ModalityType.GENERIC_L2_LOSS: + return generic_l2_loss + elif modality_type == ModalityType.MULTI_LABEL: + return multi_label_loss + elif modality_type in (ModalityType.ONE_HOT_CLASS_LABEL, + ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL, + ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL, + ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL): + return one_hot_class_label_loss + elif modality_type == ModalityType.REAL_L2_LOSS: + return real_l2_loss + elif modality_type == ModalityType.REAL_LOG_POISSON_LOSS: + return real_log_poisson_loss + elif modality_type == ModalityType.SIGMOID_CLASS_LABEL: + return sigmoid_class_label_loss + elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL: + return sigmoid_max_pooling_class_label_loss + elif modality_type == ModalityType.SYMBOL_ONE_HOT: + return symbol_one_hot_loss + elif modality_type in (ModalityType.VIDEO, + ModalityType.VIDEO_BITWISE, + ModalityType.VIDEO_PIXEL_NOISE): + return video_loss + elif modality_type == ModalityType.VIDEO_IDENTITY: + return video_identity_loss + elif modality_type == ModalityType.VIDEO_L1: + return video_l1_loss + elif modality_type == ModalityType.VIDEO_L1_RAW: + return video_l1_raw_loss + elif modality_type == ModalityType.VIDEO_L2: + return video_l2_loss + elif modality_type == ModalityType.VIDEO_L2_RAW: + return video_l2_raw_loss + return value + + +def get_name(modality_type, value=None): + """Gets default name for transformations; if none available, return value.""" + # For legacy reasons, modalities vary in their naming scheme. Future plans are + # to remove any need for get_name. We do not recommend using it. + if modality_type == ModalityType.AUDIO: + return lambda model_hparams, vocab_size: "audio_modality" + elif modality_type == ModalityType.AUDIO_SPECTRAL: + return lambda model_hparams, vocab_size: "audio_spectral_modality" + elif modality_type == ModalityType.GENERIC_L2_LOSS: + return lambda model_hparams, vocab_size: "generic_l2_loss_modality" + elif modality_type == ModalityType.IDENTITY: + return lambda model_hparams, vocab_size: "identity_modality" + elif modality_type == ModalityType.IMAGE: + return lambda model_hparams, vocab_size: "image_modality" + elif modality_type == ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY: + return (lambda model_hparams, vocab_size: # pylint: disable=g-long-lambda + "image_channel_bottom_identity_modality") + elif modality_type == ModalityType.IMAGE_CHANNEL_COMPRESS: + return lambda model_hparams, vocab_size: "image_channel_compress_modality" + elif modality_type == ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM: + return lambda model_hparams, vocab_size: "image_channel_embeddings_bottom" + elif modality_type == ModalityType.REAL: + return lambda model_hparams, vocab_size: "real_modality" + elif modality_type == ModalityType.REAL_L2_LOSS: + return lambda model_hparams, vocab_size: "real_l2_loss_modality" + elif modality_type == ModalityType.REAL_LOG_POISSON_LOSS: + return lambda model_hparams, vocab_size: "real_log_poisson_loss_modality" + elif modality_type == ModalityType.SPEECH_RECOGNITION: + return lambda model_hparams, vocab_size: "speech_recognition_modality" + elif modality_type == ModalityType.VIDEO: + return lambda model_hparams, vocab_size: "video_modality" + elif modality_type == ModalityType.VIDEO_BITWISE: + return lambda model_hparams, vocab_size: "video_modality_bitwise" + elif modality_type == ModalityType.VIDEO_IDENTITY: + return lambda model_hparams, vocab_size: "video_modality_identity" + elif modality_type == ModalityType.VIDEO_L1: + return lambda model_hparams, vocab_size: "video_modality_l1" + elif modality_type == ModalityType.VIDEO_L1_RAW: + return lambda model_hparams, vocab_size: "video_modality_l1_raw" + elif modality_type == ModalityType.VIDEO_L2: + return lambda model_hparams, vocab_size: "video_modality_l2" + elif modality_type == ModalityType.VIDEO_L2_RAW: + return lambda model_hparams, vocab_size: "video_modality_l2_raw" + elif modality_type == ModalityType.VIDEO_PIXEL_NOISE: + return lambda model_hparams, vocab_size: "video_modality_pixel_noise" + elif modality_type in (ModalityType.CLASS_LABEL, + ModalityType.MULTI_LABEL, + ModalityType.ONE_HOT_CLASS_LABEL): + def name(model_hparams, vocab_size): + return "class_label_modality_%d_%d" % (vocab_size, + model_hparams.hidden_size) + return name + elif modality_type in (ModalityType.CTC_SYMBOL, + ModalityType.IDENTITY_SYMBOL, + ModalityType.SYMBOL, + ModalityType.SYMBOL_WEIGHTS_ALL, + ModalityType.SYMBOL_ONE_HOT): + def name(model_hparams, vocab_size): + return "symbol_modality_%d_%d" % (vocab_size, model_hparams.hidden_size) + return name + elif modality_type == ModalityType.SIGMOID_CLASS_LABEL: + def name(model_hparams, vocab_size): + return "sigmoid_class_symbol_modality_%d_%d" % (vocab_size, + model_hparams.hidden_size) + return name + elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL: + def name(model_hparams, vocab_size): + return "sigmoid_max_pooling_class_symbol_modality_%d_%d" % ( + vocab_size, model_hparams.hidden_size) + return name + elif modality_type == ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL: + def name(model_hparams, vocab_size): + return "softmax_average_pooling_onehot_class_label_modality_%d_%d" % ( + vocab_size, model_hparams.hidden_size) + return name + elif modality_type == ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL: + def name(model_hparams, vocab_size): + return "softmax_last_timestep_onehot_class_label_modality_%d_%d" % ( + vocab_size, model_hparams.hidden_size) + return name + elif modality_type == ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL: + def name(model_hparams, vocab_size): + return "softmax_max_pooling_onehot_class_label_modality_%d_%d" % ( + vocab_size, model_hparams.hidden_size) + return name + return value + + +def get_targets_bottom(modality_type, value=None): + """Gets default bottom transformation for targets; if none, return value.""" + if modality_type == ModalityType.AUDIO: + return make_targets_bottom(audio_bottom) + elif modality_type == ModalityType.AUDIO_SPECTRAL: + return make_targets_bottom(audio_spectral_bottom) + elif modality_type in (ModalityType.CLASS_LABEL, + ModalityType.MULTI_LABEL, + ModalityType.ONE_HOT_CLASS_LABEL, + ModalityType.SIGMOID_CLASS_LABEL, + ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL, + ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL, + ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL, + ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL): + return class_label_targets_bottom + elif modality_type in (ModalityType.CTC_SYMBOL, + ModalityType.SYMBOL, + ModalityType.SYMBOL_WEIGHTS_ALL): + return symbol_targets_bottom + elif modality_type in (ModalityType.GENERIC_L2_LOSS, + ModalityType.IDENTITY_SYMBOL): + return identity_bottom + elif modality_type == ModalityType.IDENTITY: + return make_targets_bottom(identity_bottom) + elif modality_type == ModalityType.IMAGE: + return image_targets_bottom + elif modality_type in (ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY, + ModalityType.IMAGE_CHANNEL_COMPRESS): + return image_channel_compress_targets_bottom + elif modality_type == ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM: + return image_channel_embeddings_bottom + elif modality_type in (ModalityType.REAL, + ModalityType.REAL_L2_LOSS, + ModalityType.REAL_LOG_POISSON_LOSS): + return make_targets_bottom(real_bottom) + elif modality_type == ModalityType.SPEECH_RECOGNITION: + return make_targets_bottom(speech_recognition_bottom) + elif modality_type == ModalityType.SYMBOL_ONE_HOT: + return symbol_one_hot_bottom + elif modality_type in (ModalityType.VIDEO, + ModalityType.VIDEO_L1, + ModalityType.VIDEO_L2): + return video_targets_bottom + elif modality_type == ModalityType.VIDEO_BITWISE: + return video_bitwise_targets_bottom + elif modality_type == ModalityType.VIDEO_IDENTITY: + return video_identity_targets_bottom + elif modality_type in (ModalityType.VIDEO_L1_RAW, + ModalityType.VIDEO_L2_RAW): + return video_raw_targets_bottom + elif modality_type == ModalityType.VIDEO_PIXEL_NOISE: + return make_targets_bottom(video_pixel_noise_bottom) + return value + + +def get_top(modality_type, value=None): + """Gets default top transformation; if none available, return value.""" + if modality_type in (ModalityType.AUDIO, + ModalityType.AUDIO_SPECTRAL, + ModalityType.GENERIC_L2_LOSS, + ModalityType.IDENTITY, + ModalityType.IDENTITY_SYMBOL, + ModalityType.IMAGE_CHANNEL_BOTTOM_IDENTITY, + ModalityType.SPEECH_RECOGNITION, + ModalityType.VIDEO_IDENTITY): + return identity_top + elif modality_type in (ModalityType.CLASS_LABEL, + ModalityType.MULTI_LABEL, + ModalityType.ONE_HOT_CLASS_LABEL, + ModalityType.SIGMOID_CLASS_LABEL): + return class_label_top + elif modality_type in (ModalityType.CTC_SYMBOL, + ModalityType.SYMBOL, + ModalityType.SYMBOL_WEIGHTS_ALL): + return symbol_top + elif modality_type == ModalityType.IMAGE: + return image_top + elif modality_type == ModalityType.IMAGE_CHANNEL_COMPRESS: + return image_channel_compress_top + elif modality_type == ModalityType.IMAGE_CHANNEL_EMBEDDINGS_BOTTOM: + return image_channel_embeddings_top + elif modality_type in (ModalityType.REAL, + ModalityType.REAL_L2_LOSS, + ModalityType.REAL_LOG_POISSON_LOSS): + return real_top + elif modality_type == ModalityType.SIGMOID_MAX_POOLING_CLASS_LABEL: + return sigmoid_max_pooling_class_label_top + elif modality_type == ModalityType.SOFTMAX_AVERAGE_POOLING_CLASS_LABEL: + return softmax_average_pooling_class_label_top + elif modality_type == ModalityType.SOFTMAX_LAST_TIMESTEP_CLASS_LABEL: + return softmax_last_timestep_class_label_top + elif modality_type == ModalityType.SOFTMAX_MAX_POOLING_CLASS_LABEL: + return softmax_max_pooling_class_label_top + elif modality_type == ModalityType.SYMBOL_ONE_HOT: + return symbol_one_hot_top + elif modality_type in (ModalityType.VIDEO, + ModalityType.VIDEO_BITWISE, + ModalityType.VIDEO_PIXEL_NOISE): + return video_top + elif modality_type in (ModalityType.VIDEO_L1, + ModalityType.VIDEO_L2): + return video_l1_top + elif modality_type in (ModalityType.VIDEO_L1_RAW, + ModalityType.VIDEO_L2_RAW): + return video_raw_top + return value + + +def get_weights_fn(modality_type, value=None): + """Gets default weights function; if none available, return value.""" + if modality_type in (ModalityType.CTC_SYMBOL, + ModalityType.IDENTITY_SYMBOL, + ModalityType.MULTI_LABEL, + ModalityType.SYMBOL, + ModalityType.SYMBOL_ONE_HOT): + return common_layers.weights_nonzero + elif modality_type in ModalityType.get_choices(): + return common_layers.weights_all + return value diff --git a/tensor2tensor/layers/modalities_test.py b/tensor2tensor/layers/modalities_test.py new file mode 100644 index 000000000..dbaf3c68e --- /dev/null +++ b/tensor2tensor/layers/modalities_test.py @@ -0,0 +1,164 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for Modalities.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import modalities +from tensor2tensor.utils import expert_utils +from tensor2tensor.utils import test_utils + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator +tf.enable_eager_execution() + + +class ModalityTest(tf.test.TestCase): + + @test_utils.run_in_graph_and_eager_modes() + def testGetForAllModalities(self): + for modality in modalities.ModalityType.get_choices(): + bottom = modalities.get_bottom(modality) + loss = modalities.get_loss(modality) + name = modalities.get_name(modality) + targets_bottom = modalities.get_targets_bottom(modality) + top = modalities.get_top(modality) + weights_fn = modalities.get_weights_fn(modality) + self.assertIsNotNone(bottom, + msg="{} has no default bottom".format(modality)) + self.assertIsNotNone(loss, msg="{} has no default loss".format(modality)) + self.assertIsNotNone(name, msg="{} has no default name".format(modality)) + self.assertIsNotNone( + targets_bottom, + msg="{} has no default targets_bottom".format(modality)) + self.assertIsNotNone(top, msg="{} has no default top".format(modality)) + self.assertIsNotNone(weights_fn, + msg="{} has no default weights_fn".format(modality)) + + @test_utils.run_in_graph_and_eager_modes() + def testSymbolModalityInputs(self): + batch_size = 10 + num_datashards = 5 + length = 5 + vocab_size = 5000 + hidden_size = 9 + model_hparams = common_hparams.basic_params1() + model_hparams.hidden_size = hidden_size + model_hparams.mode = tf_estimator.ModeKeys.TRAIN + x = np.random.randint( + vocab_size, size=(batch_size, length, 1, 1)) + data_parallelism = expert_utils.Parallelism( + ["/device:CPU:0"] * num_datashards) + xs = tf.split(x, num_datashards) + sharded_output = data_parallelism( + modalities.get_bottom(modalities.ModalityType.SYMBOL), + xs, + model_hparams, + vocab_size) + output = tf.concat(sharded_output, 0) + self.evaluate(tf.global_variables_initializer()) + res = self.evaluate(output) + self.assertEqual(res.shape, (batch_size, length, 1, hidden_size)) + + @test_utils.run_in_graph_and_eager_modes() + def testSymbolModalityTargets(self): + batch_size = 10 + num_datashards = 5 + length = 6 + height = 7 + hidden_size = 9 + vocab_size = 11 + model_hparams = common_hparams.basic_params1() + model_hparams.hidden_size = hidden_size + model_hparams.mode = tf_estimator.ModeKeys.TRAIN + body_output = np.random.randint( + 100, size=(batch_size, length, height, hidden_size)) + targets = np.random.randint( + vocab_size, size=(batch_size, length, height, 1)) + data_parallelism = expert_utils.Parallelism( + ["/device:CPU:0"] * num_datashards) + sharded_body_output = tf.split(tf.to_float(body_output), num_datashards) + sharded_targets = tf.split(targets, num_datashards) + sharded_logits = data_parallelism( + modalities.get_top(modalities.ModalityType.SYMBOL), + sharded_body_output, + sharded_targets, + model_hparams, + vocab_size) + sharded_loss_num, sharded_loss_den = data_parallelism( + modalities.get_loss(modalities.ModalityType.SYMBOL), + sharded_logits, + sharded_targets, + model_hparams, + vocab_size, + modalities.get_weights_fn(modalities.ModalityType.SYMBOL)) + train_loss = (tf.add_n(sharded_loss_num) / + tf.maximum(1.0, tf.add_n(sharded_loss_den))) + logits = tf.concat(sharded_logits, 0) + self.evaluate(tf.global_variables_initializer()) + res1, res2 = self.evaluate((logits, train_loss)) + self.assertEqual(res1.shape, (batch_size, length, height, 1, vocab_size)) + self.assertEqual(res2.shape, ()) + + @test_utils.run_in_graph_mode_only() + def testSymbolModalityTargetsFactored(self): + batch_size = 10 + num_datashards = 5 + length = 6 + height = 7 + hidden_size = 9 + vocab_size = 11 + model_hparams = common_hparams.basic_params1() + model_hparams.factored_logits = True + model_hparams.hidden_size = hidden_size + model_hparams.mode = tf_estimator.ModeKeys.TRAIN + body_output = np.random.randint( + 100, size=(batch_size, length, height, hidden_size)) + targets = np.random.randint( + vocab_size, size=(batch_size, length, height, 1)) + data_parallelism = expert_utils.Parallelism( + ["/device:CPU:0"] * num_datashards) + with self.test_session() as session: + sharded_body_output = tf.split(tf.to_float(body_output), num_datashards) + sharded_targets = tf.split(targets, num_datashards) + sharded_logits = data_parallelism( + modalities.get_top(modalities.ModalityType.SYMBOL), + sharded_body_output, + sharded_targets, + model_hparams, + vocab_size) + sharded_loss_num, sharded_loss_den = data_parallelism( + modalities.get_loss(modalities.ModalityType.SYMBOL), + sharded_logits, + sharded_targets, + model_hparams, + vocab_size, + modalities.get_weights_fn(modalities.ModalityType.SYMBOL)) + train_loss = (tf.add_n(sharded_loss_num) / + tf.maximum(1.0, tf.add_n(sharded_loss_den))) + logits = tf.concat(sharded_logits, 0) + session.run(tf.global_variables_initializer()) + res1, res2 = session.run((logits, train_loss)) + self.assertEqual(res1.shape, (batch_size, length, height, 1, vocab_size)) + self.assertEqual(res2.shape, ()) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/layers/ngram.py b/tensor2tensor/layers/ngram.py new file mode 100644 index 000000000..05dcd54d8 --- /dev/null +++ b/tensor2tensor/layers/ngram.py @@ -0,0 +1,92 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""N-gram layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import tensorflow.compat.v1 as tf + + +class NGram(tf.keras.layers.Layer): + r"""N-gram layer. + + The layer takes as input an integer Tensor of shape [..., length], each + element of which is a token index in [0, input_dim). It returns a real-valued + Tensor of shape [..., num_ngrams], counting the number of times each n-gram + appears in a batch element. The total number of n-grams is + + ```none + num_ngrams = \sum_{minval <= n < maxval} input_dim^n. + ``` + """ + + def __init__(self, input_dim, minval, maxval, **kwargs): + """Constructs layer. + + Args: + input_dim: int > 0. Size of the vocabulary, i.e. maximum integer index + + 1. + minval: Lowest inclusive value of n for computing n-grams. For example, + setting it to 1 will compute starting from unigrams. + maxval: Highest non-inclusive value of n for computing n-grams. For + example, setting it to 3 will compute at most bigrams. + **kwargs: kwargs of parent class. + """ + super(NGram, self).__init__(**kwargs) + self.input_dim = input_dim + self.minval = minval + self.maxval = maxval + + def call(self, inputs): + batch_shape = tf.shape(inputs)[:-1] + length = tf.shape(inputs)[-1] + ngram_range_counts = [] + for n in range(self.minval, self.maxval): + # Reshape inputs from [..., length] to [..., 1, length // n, n], dropping + # remainder elements. Each n-vector is an ngram. + reshaped_inputs = tf.reshape( + inputs[..., :(n * (length // n))], + tf.concat([batch_shape, [1], (length // n)[tf.newaxis], [n]], 0)) + # Count the number of times each ngram appears in the input. We do so by + # checking whether each n-vector in the input is equal to each n-vector + # in a Tensor of all possible ngrams. The comparison is batched between + # the input Tensor of shape [..., 1, length // n, n] and the ngrams Tensor + # of shape [..., input_dim**n, 1, n]. + ngrams = tf.reshape( + list(np.ndindex((self.input_dim,) * n)), + [1] * (len(inputs.shape)-1) + [self.input_dim**n, 1, n]) + is_ngram = tf.equal( + tf.reduce_sum(tf.cast(tf.equal(reshaped_inputs, ngrams), tf.int32), + axis=-1), + n) + ngram_counts = tf.reduce_sum(tf.cast(is_ngram, tf.float32), axis=-1) + ngram_range_counts.append(ngram_counts) + return tf.concat(ngram_range_counts, axis=-1) + + def compute_output_shape(self, input_shape): + input_shape = tf.TensorShape(input_shape) + num_ngrams = sum([self.input_dim**n + for n in range(self.minval, self.maxval)]) + return input_shape[:-1].concatenate(num_ngrams) + + def get_config(self): + config = {'minval': self.minval, + 'maxval': self.maxval} + base_config = super(NGram, self).get_config() + return dict(list(base_config.items()) + list(config.items())) diff --git a/tensor2tensor/layers/ngram_test.py b/tensor2tensor/layers/ngram_test.py new file mode 100644 index 000000000..3dce37268 --- /dev/null +++ b/tensor2tensor/layers/ngram_test.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for n-gram layer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import ngram + +from tensor2tensor.utils import test_utils + +import tensorflow.compat.v1 as tf +tf.enable_eager_execution() + + +class NGramTest(tf.test.TestCase): + + @test_utils.run_in_graph_and_eager_modes() + def testNGramLayerShape(self): + batch_size = 2 + length = 8 + vocab_size = 3 + minval = 1 + maxval = 4 + inputs = tf.random_uniform( + [batch_size, length], minval=0, maxval=vocab_size, dtype=tf.int32) + layer = ngram.NGram(vocab_size, minval, maxval) + outputs = layer(inputs) + outputs_val = self.evaluate(outputs) + num_ngrams = sum([vocab_size**n for n in range(minval, maxval)]) + self.assertEqual(outputs_val.shape, (batch_size, num_ngrams)) + + @test_utils.run_in_graph_and_eager_modes() + def testNGramLayerOutput(self): + inputs = tf.constant( + [[0, 0, 0, 0, 1], + [2, 1, 2, 1, 0]], dtype=tf.int32) + layer = ngram.NGram(3, minval=1, maxval=3) + outputs = layer(inputs) + expected_outputs = tf.constant( + [[4., 1., 0., 2., 0., 0., 0., 0., 0., 0., 0., 0.], + [1., 2., 2., 0., 0., 0., 0., 0., 0., 0., 2., 0.]], dtype=tf.float32) + outputs_val, expected_outputs_val = self.evaluate( + [outputs, expected_outputs]) + self.assertAllEqual(outputs_val, expected_outputs_val) + +if __name__ == "__main__": + tf.test.main() + diff --git a/tensor2tensor/layers/transformer_glow_layers.py b/tensor2tensor/layers/transformer_glow_layers.py new file mode 100644 index 000000000..12c963fcb --- /dev/null +++ b/tensor2tensor/layers/transformer_glow_layers.py @@ -0,0 +1,444 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Glow operations for text. + +Adapted glow operations from tensor2tensor.models.research.glow_ops to be used +as a prior in Text VAEs (specifically for MT). Supports: +1. Log determinant Jacobian computation with variable length data and masking. +2. Transformer instead of convolution as a basic transformation. +3. Every transformation (affine, split) conditions on the source + sentence. +4. Three different split functions in affine coupling. +5. Multi-head 1x1 convolution. +6. Actnorm with weight normalization. + +Implementation based on Ma et al., 2019: https://arxiv.org/pdf/1909.02480.pdf +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import numpy as np +import scipy +from tensor2tensor.layers import common_layers +import tensor2tensor.layers.transformer_glow_layers_ops as gops +import tensorflow.compat.v1 as tf + + +def actnorm(name, x, x_mask, inverse, init, logscale_factor=3.0): + """Activation normalization, returns logabsdet of shape [B].""" + eps = tf.keras.backend.epsilon() + n_channels = common_layers.shape_list(x)[2] + + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + x_mean, x_var = gops.moments_over_bl(x, x_mask) + b = gops.get_variable_ddi( + "b", (n_channels), -x_mean, init, tf.zeros_initializer) + log_w_init = -0.5 * tf.log(x_var + eps) / logscale_factor + log_w = gops.get_variable_ddi( + "log_w", (n_channels), log_w_init, init, + tf.zeros_initializer) * logscale_factor + + if not inverse: + x = (x + b) * tf.exp(log_w) + else: + x = x * tf.exp(-log_w) - b + + x_length = tf.reduce_sum(x_mask, -1) + logabsdet = x_length * tf.reduce_sum(log_w) + if inverse: + logabsdet *= -1 + return x, logabsdet + + +def multihead_invertible_1x1_conv_np( + name, x, x_mask, multihead_split, inverse, dtype): + """Multi-head 1X1 convolution on x.""" + batch_size, length, n_channels_all = common_layers.shape_list(x) + assert n_channels_all % 32 == 0 + n_channels = 32 + n_1x1_heads = n_channels_all // n_channels + + def get_init_np(): + """Initializer function for multihead 1x1 parameters using numpy.""" + results = [] + for _ in range(n_1x1_heads): + random_matrix = np.random.rand(n_channels, n_channels) + np_w = scipy.linalg.qr(random_matrix)[0].astype("float32") + np_p, np_l, np_u = scipy.linalg.lu(np_w) + np_s = np.diag(np_u) + np_sign_s = np.sign(np_s)[np.newaxis, :] + np_log_s = np.log(np.abs(np_s))[np.newaxis, :] + np_u = np.triu(np_u, k=1) + results.append( + np.concatenate([np_p, np_l, np_u, np_sign_s, np_log_s], axis=0)) + return tf.convert_to_tensor(np.stack(results, axis=0)) + + def get_mask_init(): + ones = tf.ones([n_1x1_heads, n_channels, n_channels], dtype=dtype) + l_mask = tf.matrix_band_part(ones, -1, 0) - tf.matrix_band_part(ones, 0, 0) + u_mask = tf.matrix_band_part(ones, 0, -1) - tf.matrix_band_part(ones, 0, 0) + return tf.stack([l_mask, u_mask], axis=0) + + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + params = tf.get_variable("params", initializer=get_init_np, dtype=dtype) + mask_params = tf.get_variable( + "mask_params", initializer=get_mask_init, dtype=dtype, trainable=False) + + p = tf.stop_gradient(params[:, :n_channels, :]) + l = params[:, n_channels : 2*n_channels, :] + u = params[:, 2*n_channels : 3*n_channels, :] + sign_s = tf.stop_gradient(params[:, 3*n_channels, :]) + log_s = params[:, 3*n_channels+1, :] + + l_mask = mask_params[0] + u_mask = mask_params[1] + + l_diag = l * l_mask + ( + tf.eye(n_channels, n_channels, [n_1x1_heads], dtype=dtype)) + u_diag = u * u_mask + ( + tf.matrix_diag(sign_s * tf.exp(log_s))) + w = tf.matmul(p, tf.matmul(l_diag, u_diag)) + + if multihead_split == "a": + x = tf.reshape(x, [batch_size, length, n_channels, n_1x1_heads]) + x = tf.transpose(x, [3, 0, 1, 2]) + elif multihead_split == "c": + x = tf.reshape(x, [batch_size, length, n_1x1_heads, n_channels]) + x = tf.transpose(x, [2, 0, 1, 3]) + else: + raise ValueError("Multihead split not supported.") + # [n_1x1_heads, batch_size, length, n_channels] + + if not inverse: + # [n_1x1_heads, 1, n_channels, n_channels] + x = tf.matmul(x, w[:, tf.newaxis, :, :]) + else: + w_inv = tf.matrix_inverse(w) + x = tf.matmul(x, w_inv[:, tf.newaxis, :, :]) + + if multihead_split == "a": + x = tf.transpose(x, [1, 2, 3, 0]) + x = tf.reshape(x, [batch_size, length, n_channels * n_1x1_heads]) + elif multihead_split == "c": + x = tf.transpose(x, [1, 2, 0, 3]) + x = tf.reshape(x, [batch_size, length, n_1x1_heads * n_channels]) + else: + raise ValueError("Multihead split not supported.") + + x_length = tf.reduce_sum(x_mask, -1) + logabsdet = x_length * tf.reduce_sum(log_s) + if inverse: + logabsdet *= -1 + return x, logabsdet + + +def coupling(*args, **kwargs): + """Coupling transform layer.""" + prior_type = kwargs["hparams"].prior_type + posterior_type = kwargs["hparams"].posterior_type + if prior_type == "affine" or posterior_type == "affine": + return affine_coupling(*args, **kwargs) + elif prior_type == "additive" or posterior_type == "additive": + return additive_coupling(*args, **kwargs) + + +def additive_coupling( + name, x, x_mask, inverse, split_dim, identity_first, init, + decoder_self_attention_bias=None, **kwargs): + """Additive coupling transform layer.""" + hparams = kwargs["hparams"] + batch_size, length, n_channels = common_layers.shape_list(x) + assert hparams.scale_width > 0.0 and hparams.scale_width < 1.0 + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + x_id, x_tr, _, n_transform, bias, mask = gops.split_coupling( + x, x_mask, split_dim, identity_first, decoder_self_attention_bias) + z_id = x_id + + loc = gops.transformer_decoder_block( + "theta_tr", + n_layers=hparams.n_layers_transform_params, + x=x_id, + x_mask=mask, + output_size=n_transform, + init=init, + decoder_self_attention_bias=bias, + **kwargs) + if not inverse: + z_tr = x_tr + loc + else: + z_tr = x_tr - loc + logabsdet = tf.constant(0.0, dtype=tf.float32) + + tf.summary.histogram("_loc", tf.boolean_mask(loc, mask)) + result = gops.join_coupling(z_id, z_tr, split_dim, identity_first) + result = tf.reshape(result, [batch_size, length, n_channels]) + return result, logabsdet + + +def affine_coupling( + name, x, x_mask, inverse, split_dim, identity_first, init, + decoder_self_attention_bias=None, **kwargs): + """Affine coupling transform layer. + + Args: + name: variable scope. + x: 3-D Tensor, shape=[B, L, C]. + x_mask : 2-D Tensor, shape=[B, L]. + inverse: Forward or inverse pass. + split_dim: which dimension to split + (time, channel_continuous, channel_alternate). + identity_first: True means the first half remains constant. False for 2nd. + init: init. + decoder_self_attention_bias: bias. + **kwargs: additional arguments. Contains hparams, encoder_output and + encoder_decoder_attention_bias. + + Returns: + z: data transformed by the affine coupling layer. shape=[B, L, C] + logabsdets: Log absolute determinant Jacobian. shape=[B] + """ + hparams = kwargs["hparams"] + batch_size, length, n_channels = common_layers.shape_list(x) + assert hparams.scale_width > 0.0 and hparams.scale_width < 1.0 + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + x_id, x_tr, _, n_transform, bias, mask = gops.split_coupling( + x, x_mask, split_dim, identity_first, decoder_self_attention_bias) + z_id = x_id + + transform_params = gops.transformer_decoder_block( + "theta_tr", + n_layers=hparams.n_layers_transform_params, + x=x_id, + x_mask=mask, + output_size=n_transform*2, + init=init, + decoder_self_attention_bias=bias, + **kwargs) + loc, unconstrained_scale = tf.split(transform_params, 2, axis=-1) + scale = tf.sigmoid(unconstrained_scale + 2.0) + if not inverse: + z_tr = (x_tr + loc) * scale + else: + z_tr = x_tr / scale - loc + + logabsdet = gops.reduce_sum_over_lc(tf.log(scale), mask) # [B] + if inverse: + logabsdet *= -1 + + tf.summary.histogram("_loc", tf.boolean_mask(loc, mask)) + tf.summary.histogram("_scale", tf.boolean_mask(scale, mask)) + result = gops.join_coupling(z_id, z_tr, split_dim, identity_first) + result = tf.reshape(result, [batch_size, length, n_channels]) + return result, logabsdet + + +def flow_step_glow(name, x, x_mask, split_dims, inverse, init, dtype, **kwargs): + """One step of flow.""" + conv_fn = multihead_invertible_1x1_conv_np + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + reversible_ops = [] + for _, split_dim in enumerate(split_dims): + identity_first = True + reversible_ops += [functools.partial(actnorm, name="actnorm", init=init)] + if split_dim in "ca": + multihead_split = "a" if split_dim == "c" else "c" + reversible_ops += [functools.partial( + conv_fn, name="conv_{}".format(multihead_split), + multihead_split=multihead_split, dtype=dtype)] + reversible_ops += [functools.partial( + coupling, name="coupling_{}".format(split_dim), + split_dim=split_dim, identity_first=identity_first, init=init, + **kwargs)] + if inverse: + reversible_ops = reversible_ops[::-1] + + logabsdets = tf.constant(0.0, dtype=dtype) + for reversible_op in reversible_ops: + x, logabsdet = reversible_op(x=x, x_mask=x_mask, inverse=inverse) + logabsdets += logabsdet + return x, logabsdets + + +def flow_level( + name, x, x_mask, depth, split_dims, prior, inverse, init, dtype, **kwargs): + """One level of flow.""" + flow_step_fn = flow_step_glow + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + reversible_ops = [] + for step in np.arange(depth): + reversible_ops += [functools.partial( + flow_step_fn, name="{}_step".format(step), split_dims=split_dims, + init=init, dtype=dtype, **kwargs)] + if prior: + reversible_ops += [functools.partial( + coupling, name="{}_prior".format(depth), split_dim="c", + identity_first=True, init=init, **kwargs)] + if inverse: + reversible_ops = reversible_ops[::-1] + + logabsdets = tf.constant(0.0, dtype=dtype) + for reversible_op in reversible_ops: + x, logabsdet = reversible_op(x=x, x_mask=x_mask, inverse=inverse) + logabsdets += logabsdet + return x, logabsdets + + +def split(name, x, x_mask, inverse, temp=1.0, dtype=tf.float32, z=None): + """Splits / concatenates x into x1 and x2 across number of channels. + + x2 is modelled with a standard gaussian distribution. + Args: + name: variable scope. + x: 3-D Tensor, shape=[B, L, C]. + x_mask: 2-D Tensor, shape=[B, L]. + inverse: forward or inverse pass. + temp: only used for inverse pass. temperature for sampling. + dtype: dtype + z: used in inverse pass to check invertibility. + + Returns: + x: if forward, returns the 1st half of the channel dimensions. + if inverse, return concat[input, N(0,1)] + z: second half of the channel dimensions. modelled as standard normal. + log_p: log p(x2; N(0,1)), shape=[B] + """ + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + if not inverse: + x1, x2 = tf.split(x, 2, axis=-1) + log_p = gops.standard_normal_density(x2, x_mask) + return x1, x2, log_p + else: + if z is None: + x2 = tf.random.normal( + common_layers.shape_list(x), stddev=temp, dtype=dtype) + else: + x2 = z + log_p = gops.standard_normal_density(x2, x_mask) + return tf.concat([x, x2], 2), None, log_p + + +def squeeze(name, x, factor, inverse): + """Temporal squeezing of x to increase the number of channels.""" + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + if factor == 1: + return x + batch_size, length, n_channels = common_layers.shape_list(x) + if not inverse: + x = tf.reshape(x, [batch_size, length//factor, factor, n_channels]) + # transposing groups neighbouring elements together. + x = tf.transpose(x, [0, 1, 3, 2]) + x = tf.reshape(x, [batch_size, length//factor, n_channels*factor]) + else: + x = tf.reshape(x, (batch_size, length, n_channels//factor, factor)) + x = tf.transpose(x, [0, 1, 3, 2]) + x = tf.reshape(x, (batch_size, length*factor, n_channels//factor)) + return x + + +def glow( + name, x, max_x_mask, max_self_attn_bias, inverse, init, dtype=tf.float32, + split_zs=None, temp=1.0, **kwargs): + """Multi-scale glow model. Flow + (n_levels-1)*(Split + Squeeze + Flow). + + Note the original glow's ordering is Squeeze + Flow + Split. + + Args: + name: variable scope. + x: 3-D Tensor, shape=[B, L, C]. The length dimension is padded to the + closest multiple of factor**n_levels. + max_x_mask : 2-D Tensor, shape=[B, L]. Binary mask indicating padding. + max_self_attn_bias : 4-D Tensor, shape=[B, 1, 1, L]. + inverse: forward or inverse pass. + init: init. + dtype: dtype. + split_zs: intermediate latents modelled as a standard normal. + temp: Only used in inverse. Temperature for sampling. + **kwargs: additional arguments. Contains hparams, disable_dropout, + encoder_output and encoder_decoder_attention_bias. + + Returns: + x: if forward, data transformed to the base distribution. + if inverse, base transformed to the data (latent) distribution. + logabsdets: log absolute determinant Jacobian. [B] + log_ps: log probability in the base distribution. [B] + split_zs: all intermediate latents (only used to check invertibility.) + """ + assert x.shape.rank == 3 + hparams = kwargs["hparams"] + factor = hparams.factor + if hparams.depths: + depths = [int(depth_str) for depth_str in hparams.depths.split("/")] + else: + depths = [] + split_plans = hparams.split_plans.split("/") + n_levels = len(depths) + logabsdets = tf.constant(0.0, dtype=dtype) + log_ps = tf.constant(0.0, dtype=dtype) + with tf.variable_scope(name, use_resource=True, reuse=tf.AUTO_REUSE): + if not inverse: # z -> e (density estimation) + x_mask, self_attn_bias = max_x_mask, max_self_attn_bias + split_zs = [] + for level in range(n_levels): + if level > 0: + x, z, log_p_z = split( + "{}_split".format(level), x, x_mask, inverse, dtype) + log_ps += log_p_z + split_zs.append(z) + + x = squeeze("{}_squeeze".format(level), x, factor, inverse) + x_mask = max_x_mask[:, ::factor**level] + self_attn_bias = max_self_attn_bias[..., ::factor**level] + + prior = level < n_levels - 1 + x, logabsdet = flow_level( + "{}_level".format(level), x, x_mask, depths[level], + split_plans[level], prior, inverse, init, dtype, + decoder_self_attention_bias=self_attn_bias, **kwargs) + logabsdets += logabsdet # (B) + + log_p_base = gops.standard_normal_density(x, x_mask) + log_ps += log_p_base + return x, logabsdets, log_ps, split_zs + + else: # e -> z (sampling) + x_mask = max_x_mask[:, ::factor**(n_levels-1)] + log_p_base = gops.standard_normal_density(x, x_mask) + log_ps += log_p_base + if split_zs is None: + split_zs = [None] * (n_levels-1) + + for level in reversed(range(n_levels)): + x_mask = max_x_mask[:, ::factor**level] + self_attn_bias = max_self_attn_bias[..., ::factor**level] + prior = level < n_levels - 1 + x, logabsdet = flow_level( + "{}_level".format(level), x, x_mask, depths[level], + split_plans[level], prior, inverse, init, dtype, + decoder_self_attention_bias=self_attn_bias, **kwargs) + logabsdets += logabsdet + + if level > 0: + x = squeeze("{}_squeeze".format(level), x, factor, inverse) + x_mask = max_x_mask[:, ::factor**(level-1)] + x, _, log_p_z = split( + "{}_split".format(level), x, x_mask, inverse, temp=temp, + dtype=dtype, z=split_zs[level-1]) + log_ps += log_p_z + + return x, logabsdets, log_ps, None diff --git a/tensor2tensor/layers/transformer_glow_layers_ops.py b/tensor2tensor/layers/transformer_glow_layers_ops.py new file mode 100644 index 000000000..58bae3831 --- /dev/null +++ b/tensor2tensor/layers/transformer_glow_layers_ops.py @@ -0,0 +1,297 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Additional operations for transformer_glow_layers.py.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import math +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.models.transformer import transformer_decoder_layer +import tensorflow.compat.v1 as tf +import tensorflow_probability as tfp + + +def dense(name, x, n_out, dtype=tf.float32, init_w=0.05): + """Dense layer.""" + n_in = common_layers.shape_list(x)[2] + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + w = tf.get_variable( + "w", [n_in, n_out], dtype, + initializer=tf.random_normal_initializer(0.0, init_w), trainable=True) + b = tf.get_variable( + "b", [n_out,], dtype, initializer=tf.zeros_initializer, trainable=True) + x = tf.matmul(x, w) + b + return x + + +def dense_weightnorm( + name, x, n_out, x_mask, init_scale, init, dtype=tf.float32): + """Dense layer with weight normalization.""" + n_in = common_layers.shape_list(x)[2] + eps = tf.keras.backend.epsilon() + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + v = tf.get_variable( + "v", [n_in, n_out], dtype, + initializer=tf.random_normal_initializer(0, 0.05), trainable=True) + v = v / tf.norm(v, axis=0, keepdims=True) + t = tf.matmul(x, v) # [B, L, n_out] + mean, var = moments_over_bl(t, x_mask) + g_init = init_scale / (tf.sqrt(var) + eps) + g = get_variable_ddi( + "g", [n_out], g_init, init, + initializer=tf.zeros_initializer, dtype=dtype, trainable=True) + b = get_variable_ddi( + "b", [n_out], -mean*g_init, init, + initializer=tf.zeros_initializer, dtype=dtype, trainable=True) + w = g * v + y = tf.matmul(x, w) + b + tf.summary.histogram("_g", g) + return y + + +def transformer_decoder_block(name, + n_layers, + x, + x_mask, + output_size, + init, + **kwargs): + """A transformation block composed of transformer decoder layers. + + Args: + name: variable scope. + n_layers: number of transformer layers. + x: input to transformation. + x_mask: mask. + output_size: output dimensionality. + init: data-dependent init for weightnorm parameters. + **kwargs: Constains hparams, encoder_output, + encoder_decoder_attention_bias and decoder_self_attention_bias + + Returns: + outputs: Tensor of shape [batch_size, length, output_size]. + """ + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + hparams = kwargs.pop("hparams") + disable_dropout = kwargs.pop("disable_dropout") + if disable_dropout: + hparams = copy.deepcopy(hparams) + hparams.attention_dropout = 0.0 + hparams.layer_prepostprocess_dropout = 0.0 + hparams.relu_dropout = 0.0 + n_channels = common_layers.shape_list(x)[-1] + if n_channels != hparams.hidden_size: + hparams = copy.deepcopy(hparams) + hparams.hidden_size = n_channels + + outputs = common_attention.add_timing_signal_1d(x) + with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE): + for layer_idx in range(n_layers): + outputs = transformer_decoder_layer( + decoder_input=outputs, + layer_idx=layer_idx, + hparams=hparams, + **kwargs) + outputs = common_layers.layer_preprocess(outputs, hparams) + outputs = dense_weightnorm( + "h2o", outputs, output_size, x_mask, init_scale=0.0, init=init) + return outputs + + +def reduce_sum_over_lc(x, x_mask): + """Returns sum of x (over L and C) given the actual length and pad. + + Args: + x: input. (B,L,C) + x_mask: binary padding mask. (B,L) + + Returns: + sum of x. (B) + """ + + if x.shape.rank == 3 and x_mask.shape.rank == 2: + x_mask = x_mask[..., tf.newaxis] + else: + tf.logging.info("x: {}, x_mask: {}".format(x.shape.rank, x_mask.shape.rank)) + raise ValueError("Dimension not supported.") + + mean = x * x_mask + return tf.reduce_sum(mean, axis=[1, 2]) # sum over L, C + + +def reduce_sum_over_l(x, x_mask): + """Returns sum of x (over L) given the actual length and pad. + + Args: + x: input. (B,L,C) + x_mask: binary padding mask. (B,L) + + Returns: + sum of x. (B,C) + """ + + if x.shape.rank == 3 and x_mask.shape.rank == 2: + x_mask = x_mask[..., tf.newaxis] + else: + tf.logging.info("x: {}, x_mask: {}".format(x.shape.rank, x_mask.shape.rank)) + raise ValueError("Dimension not supported.") + + mean = x * x_mask + return tf.reduce_sum(mean, axis=-2) # sum over L + + +def reduce_mean_over_l(x, x_mask): + """Returns mean of x (over L) given the actual length and pad.""" + return reduce_sum_over_l(x, x_mask) / tf.reduce_sum(x_mask, 1, keepdims=True) + + +def reduce_mean_over_bl(x, x_mask): + """Returns average of x (over B and L) given the actual length and pad. + + Args: + x: input. (B,L,C) + x_mask: binary padding mask. (B,L) + + Returns: + mean of x. (C) + """ + + if x.shape.rank == 3 and x_mask.shape.rank == 2: + x_mask = x_mask[..., tf.newaxis] + else: + tf.logging.info("x: {}, x_mask: {}".format(x.shape.rank, x_mask.shape.rank)) + raise ValueError("Dimension not supported.") + + mean = x * x_mask + mean = tf.reduce_sum(mean, axis=[0, 1]) # sum over B, L + return mean / tf.reduce_sum(x_mask) + + +def reduce_mean_over_l_sum_over_c(x, x_mask): + """Returns mean of x over L and sum over C.""" + mean = reduce_sum_over_lc(x, x_mask) + return mean / tf.reduce_sum(x_mask, 1) + + +def reduce_mean_over_bl_sum_over_c(x, x_mask): + """Returns mean of x over B and L and sum over C.""" + mean = reduce_mean_over_bl(x, x_mask) + return tf.reduce_sum(mean) + + +def moments_over_bl(x, x_mask): + """Returns mean and var of x over B and L.""" + mean = reduce_mean_over_bl(x, x_mask) + var = reduce_mean_over_bl((x-mean)**2, x_mask) + return mean, var + + +def standard_normal_density(x, x_mask, reduce_sum=False): + """Return standard normal distribution with same shape as x.""" + log_probs = -0.5 * (x**2 + math.log(math.pi * 2.0)) + if reduce_sum: + log_probs = reduce_mean_over_bl_sum_over_c(log_probs, x_mask) + else: + log_probs = reduce_sum_over_lc(log_probs, x_mask) + return log_probs + + +def standard_normal(x, name="normal"): + """Return standard normal distribution with same shape as x.""" + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + dist = tfp.distributions.Normal( + loc=tf.zeros_like(x), + scale=tf.ones_like(x), + allow_nan_stats=False) + return dist + + +def diagonal_normal(outputs, name="normal"): + """Split outputs into mu and log_sigma and return z.""" + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + loc, log_scale = tf.split(outputs, 2, axis=-1) + scale = tf.exp(log_scale) + dist = tfp.distributions.Normal( + loc=loc, + scale=scale + tf.keras.backend.epsilon(), + allow_nan_stats=False) + return dist + + +def split_coupling( + x, x_mask, split_dim, identity_first, decoder_self_attention_bias): + """Split function used in coupling flows.""" + n_channels = common_layers.shape_list(x)[-1] + if split_dim == "c": + n_transform = n_identity = n_channels // 2 + x_id = x[..., :n_identity] if identity_first else x[..., n_transform:] + x_tr = x[..., n_identity:] if identity_first else x[..., :n_transform] + bias, mask = decoder_self_attention_bias, x_mask + + elif split_dim == "a": + n_transform = n_identity = n_channels // 2 + x_id = x[..., 0::2] if identity_first else x[..., 1::2] + x_tr = x[..., 1::2] if identity_first else x[..., 0::2] + bias, mask = decoder_self_attention_bias, x_mask + + elif split_dim == "t": + n_transform = n_identity = n_channels + x_id = x[:, 0::2, :] if identity_first else x[:, 1::2, :] + x_tr = x[:, 1::2, :] if identity_first else x[:, 0::2, :] + bias, mask = decoder_self_attention_bias[..., 0::2], x_mask[..., 0::2] + + return x_id, x_tr, n_identity, n_transform, bias, mask + + +def join_coupling(z_id, z_tr, split_dim, identity_first): + """Reverse split function used in coupling flows.""" + assert z_id.shape.rank == 3 and z_tr.shape.rank == 3 + result = [z_id, z_tr] if identity_first else [z_tr, z_id] + if split_dim == "c": + result = tf.concat(result, axis=2) # concat in the channel dimension + elif split_dim == "a": + result = tf.stack(result, axis=3) # stack in the channel dimension + elif split_dim == "t": + result = tf.stack(result, axis=2) # stack in the time dimension + return result + + +def assign(w, initial_value): + w = w.assign(initial_value) + with tf.control_dependencies([w]): + return w + + +def get_variable_ddi( + name, shape, value, init, initializer=None, dtype=tf.float32, + regularizer=None, trainable=True): + """Wrapper for data-dependent initialization.""" + kwargs = {"trainable": trainable} + if initializer: + kwargs["initializer"] = initializer + if regularizer: + kwargs["regularizer"] = regularizer + w = tf.get_variable(name, shape, dtype, **kwargs) + if isinstance(init, bool): + if init: + return assign(w, value) + return w + else: + return tf.cond(init, lambda: assign(w, value), lambda: w) diff --git a/tensor2tensor/layers/transformer_glow_layers_ops_test.py b/tensor2tensor/layers/transformer_glow_layers_ops_test.py new file mode 100644 index 000000000..9bed12aca --- /dev/null +++ b/tensor2tensor/layers/transformer_glow_layers_ops_test.py @@ -0,0 +1,103 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.layers.transformer_flow_ops.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np + +from tensor2tensor.layers import transformer_glow_layers_ops as gops +from tensor2tensor.models import transformer +import tensorflow.compat.v1 as tf + +BATCH_SIZE = 10 +INPUT_LENGTH = 3 +TARGET_LENGTH = 16 +N_CHANNELS = 24 +HIDDEN_SIZE = 64 +N_1X1_HEADS = 4 + + +class TransformerFlowOpsTest(parameterized.TestCase, tf.test.TestCase): + + def get_data(self): + x = tf.random_normal((BATCH_SIZE, TARGET_LENGTH, N_CHANNELS), + mean=0.0, stddev=1.0) + x_lengths = np.random.randint(low=1, high=TARGET_LENGTH+1, size=BATCH_SIZE) + x_mask = tf.sequence_mask(x_lengths, maxlen=TARGET_LENGTH, dtype=tf.float32) + return x, x_mask + + def get_hparams(self): + hparams = transformer.transformer_small() + hparams.add_hparam("prior_type", "affine") + hparams.add_hparam("depths", "12") # infer n_levels from depths + hparams.add_hparam("split_plans", "tca") + hparams.add_hparam("factor", 2) # squeezing factor + hparams.add_hparam("n_layers_transform_params", 1) + hparams.add_hparam("n_layers_multiscale_prior", 3) + hparams.add_hparam("flow_num_heads", 4) + hparams.add_hparam("flow_num_1x1_heads", N_1X1_HEADS) + hparams.add_hparam("flow_hidden_size", 64) + hparams.add_hparam("flow_filter_size", 128) + hparams.add_hparam("cond_prior_on_src", True) + hparams.add_hparam("bottom_prior_std", False) + hparams.add_hparam("latent_size", N_CHANNELS) + hparams.add_hparam("scale_width", 0.999) + hparams.add_hparam("coupling_transform_ratio", 0.5) + hparams.add_hparam("actnorm_type", "actnorm") + hparams.add_hparam("actnorm_weightnorm", True) + hparams.add_hparam("perm_type", "1x1") + hparams.add_hparam("init_permutation", True) + hparams.causal_decoder_self_attention = False + hparams.hidden_size = HIDDEN_SIZE + return hparams + + def get_kwargs(self, hparams=None): + if hparams is None: + hparams = self.get_hparams() + encoder_output = tf.random.uniform( + (BATCH_SIZE, INPUT_LENGTH, HIDDEN_SIZE)) + encoder_decoder_attention_bias = tf.random.uniform( + (BATCH_SIZE, 1, 1, INPUT_LENGTH)) + decoder_self_attention_bias = tf.random.uniform( + (BATCH_SIZE, 1, 1, TARGET_LENGTH)) + kwargs = {"hparams": hparams, + "encoder_output": encoder_output, + "encoder_decoder_attention_bias": encoder_decoder_attention_bias, + "decoder_self_attention_bias": decoder_self_attention_bias} + return kwargs + + def test_dense_weightnorm(self): + x, x_mask = self.get_data() + x = tf.random_normal((BATCH_SIZE, TARGET_LENGTH, HIDDEN_SIZE), + mean=0.0, stddev=1.0) + y = gops.dense_weightnorm("wn", x, N_CHANNELS, x_mask, + init_scale=1.0, init=True) + + y_nopad = tf.boolean_mask(y, x_mask) + mean, var = tf.nn.moments(y_nopad, axes=[0]) + self.evaluate(tf.global_variables_initializer()) + x, x_mask, y, y_nopad, mean, var = ( + self.evaluate([x, x_mask, y, y_nopad, mean, var])) + self.assertEqual(y.shape, (BATCH_SIZE, TARGET_LENGTH, N_CHANNELS)) + self.assertTrue(np.allclose(mean, 0.0, atol=1e-5)) + self.assertTrue(np.allclose(var, 1.0, atol=1e-5)) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/layers/transformer_glow_layers_test.py b/tensor2tensor/layers/transformer_glow_layers_test.py new file mode 100644 index 000000000..c4f97050a --- /dev/null +++ b/tensor2tensor/layers/transformer_glow_layers_test.py @@ -0,0 +1,333 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.layers.transformer_glow_layers. + +1. Actnorm test (zero mean and unit variance). +2. Invertibility tests for: + * actnorm + * actnorm with weight normalization + * 1x1 invertible convolution + * multi-head 1x1 invertible convolution + * affine coupling + * split + * 1 step of flow + * k steps of flow + * entire pipeline (tested up to 3 levels, 32 steps: tca/tca/ca, 12/12/8) +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tempfile +from absl.testing import parameterized +import numpy as np + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import transformer_glow_layers as glow +from tensor2tensor.layers import transformer_glow_layers_ops as gops +from tensor2tensor.models import transformer +import tensorflow.compat.v1 as tf + +BATCH_SIZE = 20 +INPUT_LENGTH = 3 +TARGET_LENGTH = 16 +N_CHANNELS = 256 +HIDDEN_SIZE = 64 +N_1X1_HEADS = 4 +DTYPE = tf.float32 + + +def float32_bottleneck(x): + return tf.cast(tf.cast(x, tf.float32), tf.float64) + + +def get_diff(l1, l2): + l2 = l2[::-1] + for i1, i2 in zip(l1, l2): + print (i1 - i2) + for i1, i2 in zip(l1, l2): + print (np.max(np.abs(i1 - i2))) + + +class TransformerGlowLayersTest(parameterized.TestCase, tf.test.TestCase): + + def get_hparams(self): + hparams = transformer.transformer_small() + hparams.add_hparam("prior_type", "affine") + hparams.add_hparam("factor", 2) # squeezing factor + hparams.add_hparam("n_layers_transform_params", 1) + hparams.add_hparam("n_1x1_heads", N_1X1_HEADS) + hparams.add_hparam("flow_num_1x1_heads", 4) + hparams.add_hparam("flow_num_heads", 4) + hparams.add_hparam("flow_hidden_size", 64) + hparams.add_hparam("flow_filter_size", 128) + hparams.add_hparam("flow_layer_prepostprocess_dropout", 0.0) + hparams.add_hparam("flow_attention_dropout", 0.0) + hparams.add_hparam("flow_relu_dropout", 0.0) + hparams.add_hparam("latent_size", N_CHANNELS) + hparams.add_hparam("use_weightnorm", True) + hparams.add_hparam("kl_startup_steps", 2000) + hparams.add_hparam("affine_scale", "glow") + hparams.add_hparam("scale_width", 0.999) + hparams.add_hparam("step_fn", "glow") # glow / chunting + hparams.add_hparam("conv_fn", "np") # np / tf + hparams.add_hparam("posterior_type", "diagonal_normal") + hparams.causal_decoder_self_attention = False + hparams.hidden_size = HIDDEN_SIZE + hparams.weight_dtype = "float32" + hparams.add_hparam("pos_attn", False) + return hparams + + def get_data(self): + x = tf.random_normal( + (BATCH_SIZE, TARGET_LENGTH, N_CHANNELS), dtype=DTYPE) + x_lengths = np.random.randint( + low=1, high=TARGET_LENGTH+1, size=BATCH_SIZE) + x_lengths = np.ceil(x_lengths / 4.0) * 4.0 + x_lengths = x_lengths.astype(int) + x_mask = tf.sequence_mask(x_lengths, maxlen=TARGET_LENGTH, dtype=DTYPE) + return x, x_mask, x_lengths + + def get_kwargs(self, x_mask, hparams=None): + if hparams is None: + hparams = self.get_hparams() + encoder_output = tf.random.uniform( + (BATCH_SIZE, INPUT_LENGTH, HIDDEN_SIZE), dtype=DTYPE) + encoder_decoder_attention_bias = tf.zeros( + (BATCH_SIZE, 1, 1, INPUT_LENGTH), dtype=DTYPE) + decoder_self_attention_bias = 1.0 - x_mask[:, tf.newaxis, tf.newaxis, :] + decoder_self_attention_bias *= -1e9 + kwargs = {"hparams": hparams, + "encoder_output": encoder_output, + "encoder_decoder_attention_bias": encoder_decoder_attention_bias, + "decoder_self_attention_bias": decoder_self_attention_bias} + return kwargs + + def test_actnorm(self): + _, x_mask, _ = self.get_data() + x = tf.random_normal((BATCH_SIZE, TARGET_LENGTH, N_CHANNELS), + mean=50.0, stddev=10.0, dtype=DTYPE) + x_act, logabsdet = glow.actnorm( + "actnorm", x, x_mask, inverse=False, init=True) + + x_act_nopad = tf.boolean_mask(x_act, x_mask) + x_mean, x_var = tf.nn.moments(x_act_nopad, axes=[0]) + self.evaluate(tf.global_variables_initializer()) + x, x_act, logabsdet, x_mean, x_var = ( + self.evaluate([x, x_act, logabsdet, x_mean, x_var])) + self.assertEqual(x_act.shape, (BATCH_SIZE, TARGET_LENGTH, N_CHANNELS)) + self.assertEqual(logabsdet.shape, (BATCH_SIZE,)) + self.assertTrue(np.allclose(x_mean, 0.0, atol=1e-5)) + self.assertTrue(np.allclose(x_var, 1.0, atol=1e-5)) + + def test_actnorm_invertibility(self): + name = "actnorm" + x, x_mask, _ = self.get_data() + + x_inv, logabsdet = glow.actnorm( + name, x, x_mask, inverse=False, init=False) + x_inv_inv, logabsdet_inv = glow.actnorm( + name, x_inv, x_mask, inverse=True, init=False) + self.evaluate(tf.global_variables_initializer()) + x, x_inv, x_inv_inv, x_mask, logabsdet, logabsdet_inv = ( + self.evaluate( + [x, x_inv, x_inv_inv, x_mask, logabsdet, logabsdet_inv])) + diff = x - x_inv_inv + logabsdet_sum = logabsdet + logabsdet_inv + self.assertEqual(x.shape, (BATCH_SIZE, TARGET_LENGTH, N_CHANNELS)) + self.assertEqual(x_inv.shape, (BATCH_SIZE, TARGET_LENGTH, N_CHANNELS)) + self.assertEqual(x_inv_inv.shape, (BATCH_SIZE, TARGET_LENGTH, N_CHANNELS)) + self.assertTrue(np.allclose(diff, 0.0, atol=1e-5)) + self.assertTrue(np.allclose(logabsdet_sum, 0.0, atol=1e-5)) + + @parameterized.parameters( + (glow.multihead_invertible_1x1_conv_np, "a"), + (glow.multihead_invertible_1x1_conv_np, "c"), + ) + def test_multi_1x1_invertibility( + self, func, multihead_split): + name = "multi_1x1" + x, x_mask, _ = self.get_data() + + x_inv, logabsdet = func( + name, x, x_mask, multihead_split, inverse=False, dtype=DTYPE) + x_inv_inv, logabsdet_inv = func( + name, x_inv, x_mask, multihead_split, inverse=True, dtype=DTYPE) + self.evaluate(tf.global_variables_initializer()) + x, x_mask, x_inv, x_inv_inv, logabsdet, logabsdet_inv = ( + self.evaluate( + [x, x_mask, x_inv, x_inv_inv, logabsdet, logabsdet_inv])) + diff = x - x_inv_inv + logabsdet_sum = logabsdet + logabsdet_inv + logabsdet_ = logabsdet / np.sum(x_mask, -1) + self.assertTrue(np.allclose(diff, 0.0, atol=1e-5)) + self.assertTrue(np.allclose(logabsdet_, 0.0, atol=1e-5)) + self.assertTrue(np.allclose(logabsdet_sum, 0.0, atol=1e-5)) + + @parameterized.parameters( + (glow.additive_coupling, "c"), + (glow.additive_coupling, "t"), + (glow.additive_coupling, "a"), + (glow.affine_coupling, "c"), + (glow.affine_coupling, "t"), + (glow.affine_coupling, "a"), + ) + def test_coupling_invertibility(self, func, split_dim): + name = "affine" + x, x_mask, _ = self.get_data() + kwargs = self.get_kwargs(x_mask) + + x_inv, logabsdet = func( + name, x, x_mask, split_dim=split_dim, + identity_first=True, inverse=False, init=False, disable_dropout=True, + **kwargs) + x_inv_inv, logabsdet_inv = func( + name, x_inv, x_mask, split_dim=split_dim, + identity_first=True, inverse=True, init=False, disable_dropout=True, + **kwargs) + self.evaluate(tf.global_variables_initializer()) + x, x_mask, x_inv, x_inv_inv, logabsdet, logabsdet_inv = ( + self.evaluate( + [x, x_mask, x_inv, x_inv_inv, logabsdet, logabsdet_inv])) + diff = x - x_inv_inv + logabsdet_sum = logabsdet + logabsdet_inv + self.assertTrue(np.allclose(diff, 0.0, atol=1e-5)) + self.assertTrue(np.allclose(logabsdet_sum, 0.0, atol=1e-5)) + + def test_split(self): + x, x_mask, _ = self.get_data() + + x_inv, z, log_p = glow.split( + "split", x, x_mask, inverse=False) + x_inv_inv, _, log_p_inv = glow.split( + "split", x_inv, x_mask, z=z, inverse=True) + self.evaluate(tf.global_variables_initializer()) + x, x_inv, x_inv_inv, z, log_p, log_p_inv = self.evaluate( + [x, x_inv, x_inv_inv, z, log_p, log_p_inv]) + diff = x - x_inv_inv + log_p_diff = log_p - log_p_inv + self.assertEqual( + x_inv.shape, (BATCH_SIZE, TARGET_LENGTH, N_CHANNELS//2)) + self.assertEqual( + z.shape, (BATCH_SIZE, TARGET_LENGTH, N_CHANNELS//2)) + self.assertTrue(np.allclose(diff, 0.0, atol=1e-5)) + self.assertTrue(np.allclose(log_p_diff, 0.0, atol=1e-5)) + + def test_flow_invertibility(self): + name = "flow_step" + split_dims = "cat" + x, x_mask, _ = self.get_data() + kwargs = self.get_kwargs(x_mask) + x_inv, logabsdet = glow.flow_step_glow( + name, x, x_mask, split_dims, inverse=False, init=False, dtype=DTYPE, + disable_dropout=True, **kwargs) + x_inv_inv, logabsdet_inv = glow.flow_step_glow( + name, x_inv, x_mask, split_dims, inverse=True, init=False, + dtype=DTYPE, disable_dropout=True, **kwargs) + self.evaluate(tf.global_variables_initializer()) + x, x_mask, x_inv, x_inv_inv, logabsdet, logabsdet_inv = ( + self.evaluate( + [x, x_mask, x_inv, x_inv_inv, logabsdet, logabsdet_inv])) + diff = x - x_inv_inv + logabsdet_sum = logabsdet + logabsdet_inv + self.assertTrue(np.allclose(diff, 0.0, atol=2e-5)) + self.assertTrue(np.allclose(logabsdet_sum, 0.0, atol=7e-5)) + + @parameterized.parameters( + ("1", "cat", "affine"), + ("1/1", "cat/cat", "affine"), + ("1/1/1", "cat/cat/ca", "affine"), + ) + def test_aaa_glow_training(self, depths, split_plans, prior_type): + with tf.Graph().as_default(): + _, x_mask, _ = self.get_data() + x = tf.random_normal((BATCH_SIZE, TARGET_LENGTH, N_CHANNELS), + mean=10.0, stddev=3.0, dtype=DTYPE) + bias = common_attention.attention_bias_ignore_padding(1.0 - x_mask) + hparams = self.get_hparams() + hparams.prior_type = prior_type + hparams.depths = depths + hparams.split_plans = split_plans + n_levels = len(hparams.depths.split("/")) + kwargs = self.get_kwargs(x_mask, hparams) + _ = kwargs.pop("decoder_self_attention_bias") + + x_inv, _, _, _ = glow.glow( + "glow", x, x_mask, bias, inverse=False, init=True, + disable_dropout=True, **kwargs) + curr_dir = tempfile.mkdtemp() + model_path = os.path.join(curr_dir, "model") + + with tf.Session() as session: + saver = tf.train.Saver() + session.run(tf.global_variables_initializer()) + session.run(x_inv) + saver.save(session, model_path) + + with tf.Graph().as_default(): + _, x_mask, _ = self.get_data() + x = tf.random_normal((BATCH_SIZE, TARGET_LENGTH, N_CHANNELS), + mean=10.0, stddev=3.0, dtype=DTYPE) + bias = common_attention.attention_bias_ignore_padding(1.0 - x_mask) + hparams = self.get_hparams() + hparams.depths = depths + hparams.split_plans = split_plans + kwargs = self.get_kwargs(x_mask, hparams) + _ = kwargs.pop("decoder_self_attention_bias") + log_q_z = gops.standard_normal_density(x, x_mask) + log_q_z = tf.reduce_sum(log_q_z) / tf.reduce_sum(x_mask) + + x_inv, logabsdets, log_ps, zs = glow.glow( + "glow", x, x_mask, bias, inverse=False, init=False, + disable_dropout=True, **kwargs) + x_inv_inv, logabsdets_inv, log_ps_inv, _ = glow.glow( + "glow", x_inv, x_mask, bias, inverse=True, split_zs=zs, init=False, + disable_dropout=True, **kwargs) + logabsdets = tf.reduce_sum( + logabsdets, axis=0) / tf.reduce_sum(x_mask) + logabsdets_inv = tf.reduce_sum( + logabsdets_inv, axis=0) / tf.reduce_sum(x_mask) + log_ps = tf.reduce_sum(log_ps, axis=0) / tf.reduce_sum(x_mask) + log_ps_inv = tf.reduce_sum(log_ps_inv, axis=0) / tf.reduce_sum(x_mask) + + with tf.Session() as session: + saver = tf.train.Saver() + saver.restore(session, model_path) + (x, x_inv, x_inv_inv, log_q_z, logabsdets, log_ps, + logabsdets_inv, log_ps_inv) = session.run([ + x, x_inv, x_inv_inv, log_q_z, logabsdets, log_ps, + logabsdets_inv, log_ps_inv]) + diff = x - x_inv_inv + log_ps_diff = log_ps - log_ps_inv + logabsdets_sum = logabsdets + logabsdets_inv + self.assertEqual( + x_inv.shape, + (BATCH_SIZE, TARGET_LENGTH//(2**(n_levels-1)), N_CHANNELS)) + print (np.max(np.abs(diff))) + print (np.max(np.abs(log_ps_diff))) + print (np.max(np.abs(logabsdets_sum))) + self.assertTrue(np.allclose(diff, 0.0, atol=1e-4), + msg=np.max(np.abs(diff))) + self.assertTrue(np.allclose(log_ps_diff, 0.0, atol=1e-4), + msg=np.max(np.abs(log_ps_diff))) + self.assertTrue(np.allclose(logabsdets_sum, 0.0, atol=1e-4), + msg=np.max(np.abs(logabsdets_sum))) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/layers/transformer_layers.py b/tensor2tensor/layers/transformer_layers.py new file mode 100644 index 000000000..327f8e591 --- /dev/null +++ b/tensor2tensor/layers/transformer_layers.py @@ -0,0 +1,400 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Commonly re-used transformer layers.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import expert_utils +from tensor2tensor.utils import mlperf_log + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +# TODO(lukaszkaiser): remove this function when not needed any more. +def layers(): + return common_layers.layers() + + +def transformer_prepare_encoder(inputs, target_space, hparams, features=None, + type_ids=None, num_types=None, + reuse_target_embedding=tf.AUTO_REUSE): + """Prepare one shard of the model for the encoder. + + Args: + inputs: a Tensor. + target_space: a Tensor. + hparams: run hyperparameters + features: optionally pass the entire features dictionary as well. + This is needed now for "packed" datasets. + type_ids: optional, an int64 Tensor of shape [batch, length] that allows + for adding type embeddings, similar to positional embeddings. + num_types: optional, an int that decides the number of types in type_ids. + reuse_target_embedding: option to reuse variable name in the case that + symbol modalities are reused between inputs/targets. + + Returns: + encoder_input: a Tensor, bottom of encoder stack + encoder_self_attention_bias: a bias tensor for use in encoder self-attention + encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder + attention + """ + ishape_static = inputs.shape.as_list() + encoder_input = inputs + if features and "inputs_segmentation" in features: + # Packed dataset. Keep the examples from seeing each other. + inputs_segmentation = features["inputs_segmentation"] + inputs_position = features["inputs_position"] + targets_segmentation = features["targets_segmentation"] + if (hasattr(hparams, "unidirectional_encoder") and + hparams.unidirectional_encoder): + tf.logging.info("Using unidirectional encoder") + encoder_self_attention_bias = ( + common_attention.attention_bias_lower_triangle( + common_layers.shape_list(inputs)[1])) + else: + encoder_self_attention_bias = ( + common_attention.attention_bias_same_segment( + inputs_segmentation, inputs_segmentation)) + encoder_decoder_attention_bias = ( + common_attention.attention_bias_same_segment(targets_segmentation, + inputs_segmentation)) + else: + encoder_padding = common_attention.embedding_to_padding(encoder_input) + ignore_padding = common_attention.attention_bias_ignore_padding( + encoder_padding) + if (hasattr(hparams, "unidirectional_encoder") and + hparams.unidirectional_encoder): + tf.logging.info("Using unidirectional encoder") + encoder_self_attention_bias = ( + common_attention.attention_bias_lower_triangle( + common_layers.shape_list(inputs)[1])) + else: + # Usual case - not a packed dataset. + encoder_self_attention_bias = ignore_padding + encoder_decoder_attention_bias = ignore_padding + inputs_position = None + if hparams.proximity_bias: + encoder_self_attention_bias += common_attention.attention_bias_proximal( + common_layers.shape_list(inputs)[1]) + if target_space is not None and hparams.get("use_target_space_embedding", + True): + # Append target_space_id embedding to inputs. + emb_target_space = common_layers.embedding( + target_space, + 32, + ishape_static[-1], + name="target_space_embedding", + dtype=hparams.get("activation_dtype", "float32"), + reuse=reuse_target_embedding) + emb_target_space = tf.reshape(emb_target_space, [1, 1, -1]) + encoder_input += emb_target_space + if hparams.pos == "timing": + if inputs_position is not None: + encoder_input = common_attention.add_timing_signal_1d_given_position( + encoder_input, inputs_position) + else: + encoder_input = common_attention.add_timing_signal_1d(encoder_input) + elif hparams.pos == "timing_from_features": + encoder_input = common_attention.add_timing_signals_from_features( + encoder_input, features, hparams.position_features) + elif hparams.pos == "emb": + encoder_input = common_attention.add_positional_embedding( + encoder_input, hparams.max_length, "inputs_positional_embedding", + inputs_position) + + # Add type embeddings + if type_ids is not None: + if not num_types: + raise ValueError("Need to set num_types as well.") + encoder_input = common_attention.add_positional_embedding( + encoder_input, num_types, "inputs_type_embedding", type_ids) + + encoder_self_attention_bias = common_layers.cast_like( + encoder_self_attention_bias, encoder_input) + encoder_decoder_attention_bias = common_layers.cast_like( + encoder_decoder_attention_bias, encoder_input) + return (encoder_input, encoder_self_attention_bias, + encoder_decoder_attention_bias) + + +def transformer_encoder(encoder_input, + encoder_self_attention_bias, + hparams, + name="encoder", + nonpadding=None, + save_weights_to=None, + make_image_summary=True, + losses=None, + attn_bias_for_padding=None): + """A stack of transformer layers. + + Args: + encoder_input: a Tensor + encoder_self_attention_bias: bias Tensor for self-attention + (see common_attention.attention_bias()) + hparams: hyperparameters for model + name: a string + nonpadding: optional Tensor with shape [batch_size, encoder_length] + indicating what positions are not padding. This must either be + passed in, which we do for "packed" datasets, or inferred from + encoder_self_attention_bias. The knowledge about padding is used + for pad_remover(efficiency) and to mask out padding in convolutional + layers. + save_weights_to: an optional dictionary to capture attention weights + for visualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. + losses: optional list onto which to append extra training losses + attn_bias_for_padding: Padded attention bias in case a unidirectional + encoder is being used where future attention is masked. + + Returns: + y: a Tensors + """ + x = encoder_input + attention_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(hparams, "attention_dropout_broadcast_dims", ""))) + mlperf_log.transformer_print( + key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS, + value=hparams.num_encoder_layers or hparams.num_hidden_layers) + mlperf_log.transformer_print( + key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT, + value=hparams.attention_dropout) + mlperf_log.transformer_print( + key=mlperf_log.MODEL_HP_ATTENTION_DENSE, + value={ + "use_bias": "false", + "num_heads": hparams.num_heads, + "hidden_size": hparams.hidden_size + }) + + with tf.variable_scope(name): + if nonpadding is not None: + padding = 1.0 - nonpadding + else: + attention_bias = encoder_self_attention_bias + if attn_bias_for_padding is not None: + attention_bias = attn_bias_for_padding + padding = common_attention.attention_bias_to_padding(attention_bias) + nonpadding = 1.0 - padding + pad_remover = None + if hparams.use_pad_remover and not common_layers.is_xla_compiled(): + pad_remover = expert_utils.PadRemover(padding) + for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): + with tf.variable_scope("layer_%d" % layer): + with tf.variable_scope("self_attention"): + if layer < hparams.get("num_area_layers", 0): + max_area_width = hparams.get("max_area_width", 1) + max_area_height = hparams.get("max_area_height", 1) + memory_height = hparams.get("memory_height", 1) + else: + max_area_width = 1 + max_area_height = 1 + memory_height = 1 + y = common_attention.multihead_attention( + common_layers.layer_preprocess(x, hparams), + None, + encoder_self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=hparams.self_attention_type, + max_relative_position=hparams.max_relative_position, + heads_share_relative_embedding=( + hparams.heads_share_relative_embedding), + add_relative_to_values=hparams.add_relative_to_values, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims, + max_length=hparams.get("max_length"), + vars_3d=hparams.get("attention_variables_3d"), + activation_dtype=hparams.get("activation_dtype", "float32"), + weight_dtype=hparams.get("weight_dtype", "float32"), + hard_attention_k=hparams.get("hard_attention_k", 0), + gumbel_noise_weight=hparams.get("gumbel_noise_weight", 0.0), + max_area_width=max_area_width, + max_area_height=max_area_height, + memory_height=memory_height, + area_key_mode=hparams.get("area_key_mode", "none"), + area_value_mode=hparams.get("area_value_mode", "none"), + training=(hparams.get("mode", tf_estimator.ModeKeys.TRAIN) + == tf_estimator.ModeKeys.TRAIN)) + x = common_layers.layer_postprocess(x, y, hparams) + with tf.variable_scope("ffn"): + y = transformer_ffn_layer( + common_layers.layer_preprocess(x, hparams), + hparams, + pad_remover, + conv_padding="SAME", + nonpadding_mask=nonpadding, + losses=losses) + x = common_layers.layer_postprocess(x, y, hparams) + # if normalization is done in layer_preprocess, then it should also be done + # on the output, since the output can grow very large, being the sum of + # a whole stack of unnormalized layer outputs. + mlperf_log.transformer_print( + key=mlperf_log.MODEL_HP_NORM, + value={"hidden_size": hparams.hidden_size}) + return common_layers.layer_preprocess(x, hparams) + + +def transformer_ffn_layer(x, + hparams, + pad_remover=None, + conv_padding="LEFT", + nonpadding_mask=None, + losses=None, + cache=None, + decode_loop_step=None, + readout_filter_size=0, + layer_collection=None): + """Feed-forward layer in the transformer. + + Args: + x: a Tensor of shape [batch_size, length, hparams.hidden_size] + hparams: hyperparameters for model + pad_remover: an expert_utils.PadRemover object tracking the padding + positions. If provided, when using convolutional settings, the padding + is removed before applying the convolution, and restored afterward. This + can give a significant speedup. + conv_padding: a string - either "LEFT" or "SAME". + nonpadding_mask: an optional Tensor with shape [batch_size, length]. + needed for convolutional layers with "SAME" padding. + Contains 1.0 in positions corresponding to nonpadding. + losses: optional list onto which to append extra training losses + cache: dict, containing tensors which are the results of previous + attentions, used for fast decoding. + decode_loop_step: An integer, step number of the decoding loop. + Only used for inference on TPU. + readout_filter_size: if it's greater than 0, then it will be used instead of + filter_size + layer_collection: A tensorflow_kfac.LayerCollection. Only used by the + KFAC optimizer. Default is None. + + + Returns: + a Tensor of shape [batch_size, length, hparams.hidden_size] + + Raises: + ValueError: If losses arg is None, but layer generates extra losses. + """ + ffn_layer = hparams.ffn_layer + relu_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(hparams, "relu_dropout_broadcast_dims", ""))) + if ffn_layer == "conv_hidden_relu": + # Backwards compatibility + ffn_layer = "dense_relu_dense" + if ffn_layer == "dense_relu_dense": + # In simple convolution mode, use `pad_remover` to speed up processing. + mlperf_log.transformer_print( + key=mlperf_log.MODEL_HP_FFN_FILTER_DENSE, + value={ + "filter_size": hparams.filter_size, + "use_bias": "True", + "activation": mlperf_log.RELU + }) + mlperf_log.transformer_print( + key=mlperf_log.MODEL_HP_FFN_OUTPUT_DENSE, + value={ + "hidden_size": hparams.hidden_size, + "use_bias": "True", + }) + mlperf_log.transformer_print( + key=mlperf_log.MODEL_HP_RELU_DROPOUT, value=hparams.relu_dropout) + if pad_remover: + original_shape = common_layers.shape_list(x) + # Collapse `x` across examples, and remove padding positions. + x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0)) + x = tf.expand_dims(pad_remover.remove(x), axis=0) + conv_output = common_layers.dense_relu_dense( + x, + hparams.filter_size, + hparams.hidden_size, + dropout=hparams.relu_dropout, + dropout_broadcast_dims=relu_dropout_broadcast_dims, + layer_collection=layer_collection) + if pad_remover: + # Restore `conv_output` to the original shape of `x`, including padding. + conv_output = tf.reshape( + pad_remover.restore(tf.squeeze(conv_output, axis=0)), original_shape) + return conv_output + elif ffn_layer == "conv_relu_conv": + return common_layers.conv_relu_conv( + x, + readout_filter_size or hparams.filter_size, + hparams.hidden_size, + first_kernel_size=hparams.conv_first_kernel, + second_kernel_size=1, + padding=conv_padding, + nonpadding_mask=nonpadding_mask, + dropout=hparams.relu_dropout, + cache=cache, + decode_loop_step=decode_loop_step) + elif ffn_layer == "parameter_attention": + return common_attention.parameter_attention( + x, hparams.parameter_attention_key_channels or hparams.hidden_size, + hparams.parameter_attention_value_channels or hparams.hidden_size, + hparams.hidden_size, readout_filter_size or hparams.filter_size, + hparams.num_heads, + hparams.attention_dropout) + elif ffn_layer == "conv_hidden_relu_with_sepconv": + return common_layers.conv_hidden_relu( + x, + readout_filter_size or hparams.filter_size, + hparams.hidden_size, + kernel_size=(3, 1), + second_kernel_size=(31, 1), + padding="LEFT", + dropout=hparams.relu_dropout) + elif ffn_layer == "sru": + return common_layers.sru(x) + elif ffn_layer == "local_moe_tpu": + overhead = hparams.moe_overhead_eval + if hparams.mode == tf_estimator.ModeKeys.TRAIN: + overhead = hparams.moe_overhead_train + ret, loss = expert_utils.local_moe_tpu( + x, + hparams.filter_size // 2, + hparams.hidden_size, + hparams.moe_num_experts, + overhead=overhead, + loss_coef=hparams.moe_loss_coef) + elif ffn_layer == "local_moe": + overhead = hparams.moe_overhead_eval + if hparams.mode == tf_estimator.ModeKeys.TRAIN: + overhead = hparams.moe_overhead_train + ret, loss = expert_utils.local_moe( + x, + True, + expert_utils.ffn_expert_fn(hparams.hidden_size, [hparams.filter_size], + hparams.hidden_size), + hparams.moe_num_experts, + k=hparams.moe_k, + hparams=hparams) + losses.append(loss) + return ret + else: + assert ffn_layer == "none" + return x diff --git a/tensor2tensor/layers/transformer_memory.py b/tensor2tensor/layers/transformer_memory.py new file mode 100644 index 000000000..b35707128 --- /dev/null +++ b/tensor2tensor/layers/transformer_memory.py @@ -0,0 +1,393 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The memory unit for Transformer.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_layers +import tensorflow.compat.v1 as tf + + +class RecurrentMemory(object): + """Base class for recurrent memory. + + This class defines the memory interface, but behaves like a no-op. + """ + + def pre_attention(self, segment, query_antecedent, memory_antecedent, bias): + """Called prior to self-attention, to incorporate memory items. + + Args: + segment: an integer Tensor with shape [batch] + query_antecedent: a Tensor with shape [batch, length_q, channels] + memory_antecedent: must be None. Attention normally allows this to be a + Tensor with shape [batch, length_m, channels], but we currently only + support memory for decoder-side self-attention. + bias: bias Tensor (see attention_bias()) + Returns: + (data, new_query_antecedent, new_memory_antecedent, new_bias) + """ + del segment + return None, query_antecedent, memory_antecedent, bias + + def post_attention(self, token, x): + """Called after self-attention. The memory can be updated here. + + Args: + token: Data returned by pre_attention, which can be used to carry over + state related to the current memory operation. + x: a Tensor of data after self-attention and feed-forward + Returns: + a (possibly modified) version of the input x + """ + assert token is None + return x + + +class RecentTokensMemory(RecurrentMemory): + """A memory module that caches features for recent tokens. + + When the number of tokens cached is equal to the chunk size, this is + equivalent to the memory used by Transformer-XL + (https://arxiv.org/abs/1901.02860) + """ + + def __init__(self, name, hparams): + hidden_size = hparams.hidden_size + self.chunk_length = hparams.split_targets_chunk_length + assert self.chunk_length > 0, "Chunking is required to use recurrent memory" + + if hasattr(hparams, "num_memory_items") and hparams.num_memory_items > 0: + self.tokens_to_cache = hparams.num_memory_items + else: + self.tokens_to_cache = self.chunk_length + + # TODO(kitaev): The implementation of the chunking code makes it somewhat + # convoluted to figure out how many actual sequences we can have per batch. + # The data pipeline should be revisited at some point. + if (hasattr(hparams, "recurrent_memory_batch_size") + and hparams.recurrent_memory_batch_size > 0): + batch_size_in_sequences = hparams.recurrent_memory_batch_size + else: + batch_size_in_sequences = hparams.batch_size / hparams.max_length + + memory_shape = [batch_size_in_sequences, self.tokens_to_cache, hidden_size] + bias_shape = [batch_size_in_sequences, 1, 1, self.tokens_to_cache] + + with tf.variable_scope(name): + self.previous_segment = tf.get_variable( + "memsegment", (batch_size_in_sequences,), + dtype=tf.int32, trainable=False, + collections=[tf.GraphKeys.LOCAL_VARIABLES], + initializer=tf.constant_initializer(0)) + + self.previous_vals = tf.get_variable( + "memvals", memory_shape, + dtype=tf.float32, trainable=False, + collections=[tf.GraphKeys.LOCAL_VARIABLES], + initializer=tf.constant_initializer(.0)) + + self.previous_bias = tf.get_variable( + "membias", bias_shape, + dtype=tf.float32, trainable=False, + collections=[tf.GraphKeys.LOCAL_VARIABLES], + initializer=tf.constant_initializer(-1e9)) + + def pre_attention(self, segment, query_antecedent, memory_antecedent, bias): + """Called prior to self-attention, to incorporate memory items. + + Args: + segment: an integer Tensor with shape [batch] + query_antecedent: a Tensor with shape [batch, length_q, channels] + memory_antecedent: must be None. Attention normally allows this to be a + Tensor with shape [batch, length_m, channels], but we currently only + support memory for decoder-side self-attention. + bias: bias Tensor (see attention_bias()) + Returns: + (data, new_query_antecedent, new_memory_antecedent, new_bias) + """ + assert memory_antecedent is None, "We only support language modeling" + + # In eval mode, batch size may be variable + memory_batch_size = tf.shape(self.previous_vals)[0] + current_batch_size = tf.shape(query_antecedent)[0] + amount_to_pad = memory_batch_size - current_batch_size + + # If segment id is zero, don't attend back to the memory + previous_bias = self.previous_bias[:current_batch_size, :, :, :] + tf.cast( + tf.equal(segment[:, None, None, None], 0), tf.float32) * -1e9 + + sliced_previous_vals = self.previous_vals[:current_batch_size, :, :] + + new_memory_antecedent = tf.concat( + [tf.stop_gradient(sliced_previous_vals), query_antecedent], 1) + new_bias = tf.concat([ + tf.tile(tf.stop_gradient(previous_bias), [1, 1, self.chunk_length, 1]), + tf.tile(bias, [current_batch_size, 1, 1, 1]), + ], -1) + + remember_segment = tf.pad(segment, [[0, amount_to_pad]]) + # TODO(kitaev): The code assumes that we always either increment the chunk + # number or reset it to zero. This assumption will not hold if we re-run the + # model for each token, e.g. for autoregressive greedy/beam/sampling decode. + remember_vals = tf.pad(query_antecedent, + [[0, amount_to_pad], [0, 0], [0, 0]]) + # Query position is on axis -2 for bias: as long as a token can be attended + # to from at least one query position (i.e. it's not padding), memorize it. + remember_bias = tf.tile( + tf.reduce_max(bias, -2, keepdims=True), [memory_batch_size, 1, 1, 1]) + # Assume that query_antecedent is always a full chunk (i.e. not truncated) + if self.chunk_length < self.tokens_to_cache: + remember_vals = tf.concat([self.previous_vals, remember_vals], 1) + remember_bias = tf.concat([ + self.previous_bias - 1e9 * tf.cast( + tf.equal( + tf.pad(segment, [[0, amount_to_pad]])[:, None, None, None], + 0), tf.float32), + remember_bias + ], -1) + if self.chunk_length != self.tokens_to_cache: + remember_vals = remember_vals[:, -self.tokens_to_cache:, :] + remember_bias = remember_bias[:, :, :, -self.tokens_to_cache:] + token = (remember_segment, remember_vals, remember_bias) + + return token, query_antecedent, new_memory_antecedent, new_bias + + def post_attention(self, token, x): + """Called after self-attention. The memory can be updated here. + + Args: + token: Data returned by pre_attention, which can be used to carry over + state related to the current memory operation. + x: a Tensor of data after self-attention and feed-forward + Returns: + a (possibly modified) version of the input x + """ + with tf.control_dependencies([ + self.previous_segment.assign(token[0]), + self.previous_vals.assign(token[1]), + self.previous_bias.assign(token[2]), + ]): + return tf.identity(x) + + +class TransformerMemory(object): + """Implements the Memory module. + + Based on Neural Turing Machines: arXiv:1410.5401 [cs.NE] + """ + + def __init__(self, batch_size, key_depth, val_depth, memory_size, + sharpen_factor=1., name="neural_memory"): + """Initialize the memory object. + + Args: + batch_size: the batch size. + key_depth: the depth of the memory keys. + val_depth: the depth of the memory values. + memory_size: the number of items in the memory. + sharpen_factor: the sharpen_factor for addressing the memory. + name: the optional variable scope. + """ + self.name = name + self.batch_size = batch_size + self.key_depth = key_depth + self.val_depth = val_depth + self.memory_size = memory_size + self.sharpen_factor = sharpen_factor + with tf.variable_scope(name): + self.segment_number = tf.get_variable( + "segment_number", [self.batch_size], + dtype=tf.int32, trainable=False, + initializer=tf.constant_initializer(100000)) + self.mem_vals = tf.get_variable( + "memvals", [self.batch_size, self.memory_size, self.val_depth], + dtype=tf.float32, trainable=False, + initializer=tf.constant_initializer(.0)) + self.mean_logits = tf.get_variable( + "meanlogits", [self.batch_size, self.memory_size], + dtype=tf.float32, trainable=False, + initializer=tf.constant_initializer(.0)) + + def _norm(self, x): + """Compute the safe norm.""" + return tf.sqrt(tf.reduce_sum(tf.square(x), keepdims=True, axis=-1) + 1e-7) + + def _address_content(self, x): + """Address the memory based on content similarity. + + Args: + x: a tensor in the shape of [batch_size, length, depth]. + Returns: + the logits for each memory entry [batch_size, length, memory_size]. + """ + mem_keys = tf.layers.dense(self.mem_vals, self.key_depth, + bias_initializer=tf.constant_initializer(1.0), + name="mem_key") + mem_query = tf.layers.dense(x, self.key_depth, + bias_initializer=tf.constant_initializer(1.0), + name="mem_query") + norm = tf.matmul(self._norm(mem_query), self._norm(mem_keys), + transpose_b=True) + dot_product = tf.matmul(mem_query, mem_keys, transpose_b=True) + cos_dist = tf.div(dot_product, norm + 1e-7, name="cos_dist") + access_logits = self.sharpen_factor * cos_dist + return access_logits + + def read(self, x): + """Read from the memory. + + An external component can use the results via a simple MLP, + e.g., fn(x W_x + retrieved_mem W_m). + + Args: + x: a tensor in the shape of [batch_size, length, depth]. + Returns: + access_logits: the logits for accessing the memory in shape of + [batch_size, length, memory_size]. + retrieved_mem: the retrieved results in the shape of + [batch_size, length, val_depth]. + """ + access_logits = self._address_content(x) + weights = tf.nn.softmax(access_logits) + retrieved_mem = tf.reduce_sum( + tf.multiply(tf.expand_dims(weights, 3), + tf.expand_dims(self.mem_vals, axis=1)), axis=2) + return access_logits, retrieved_mem + + def write(self, x, access_logits): + """Write to the memory based on a combination of similarity and least used. + + Based on arXiv:1607.00036v2 [cs.LG]. + + Args: + x: a tensor in the shape of [batch_size, length, depth]. + access_logits: the logits for accessing the memory. + Returns: + the update op. + """ + gamma = tf.layers.dense(x, 1, activation=tf.sigmoid, name="gamma") + write_logits = access_logits - gamma * tf.expand_dims(self.mean_logits, 1) + candidate_value = tf.layers.dense(x, self.val_depth, + activation=tf.nn.relu, + name="candidate_value") + erase_gates = tf.layers.dense(x, self.memory_size, + activation=tf.nn.sigmoid, + name="erase") + write_weights = tf.nn.softmax(write_logits) + erase_weights = tf.expand_dims(1 - erase_gates * write_weights, 3) + erase = tf.multiply(erase_weights, + tf.expand_dims(self.mem_vals, 1)) + addition = tf.multiply( + tf.expand_dims(write_weights, 3), + tf.expand_dims(candidate_value, 2)) + update_value_op = self.mem_vals.assign( + tf.reduce_mean(erase + addition, axis=1)) + with tf.control_dependencies([update_value_op]): + write_op = self.mean_logits.assign( + self.mean_logits * 0.1 + tf.reduce_mean(write_logits * 0.9, axis=1)) + return write_op + + def set(self, mem_vals, mean_logits): + set_op = tf.group([ + self.mem_vals.assign(mem_vals), + self.mean_logits.assign(mean_logits)]) + return set_op + + def get(self): + return self.mem_vals, self.mean_logits + + def update_segment_number(self, segment_number): + return self.segment_number.assign(segment_number) + + def reset(self, entries_to_reset): + """Reset the entries in the memory. + + Args: + entries_to_reset: a 1D tensor. + Returns: + the reset op. + """ + num_updates = tf.size(entries_to_reset) + update_vals = tf.scatter_update( + self.mem_vals, entries_to_reset, + tf.tile(tf.expand_dims( + tf.fill([self.memory_size, self.val_depth], .0), 0), + [num_updates, 1, 1])) + update_logits = tf.scatter_update( + self.mean_logits, entries_to_reset, + tf.tile(tf.expand_dims( + tf.fill([self.memory_size], .0), 0), + [num_updates, 1])) + reset_op = tf.group([update_vals, update_logits]) + return reset_op + + def pre_attention(self, segment_number, query_antecedent, + memory_antecedent, bias): + """Called prior to self-attention, to incorporate memory items. + + Args: + segment_number: an integer Tensor with shape [batch] + query_antecedent: a Tensor with shape [batch, length_q, channels] + memory_antecedent: must be None. Attention normally allows this to be a + Tensor with shape [batch, length_m, channels], but we currently only + support memory for decoder-side self-attention. + bias: bias Tensor (see attention_bias()) + Returns: + (data, new_query_antecedent, new_memory_antecedent, new_bias) + """ + with tf.variable_scope(self.name + "/pre_attention", reuse=tf.AUTO_REUSE): + assert memory_antecedent is None, "We only support language modeling" + with tf.control_dependencies([ + tf.assert_greater_equal(self.batch_size, tf.size(segment_number))]): + difference = self.batch_size - tf.size(segment_number) + segment_number = tf.pad(segment_number, [[0, difference]]) + reset_op = self.reset(tf.reshape(tf.where( + tf.less(segment_number, self.segment_number)), [-1])) + memory_results = {} + with tf.control_dependencies([reset_op]): + with tf.control_dependencies([ + self.update_segment_number(segment_number)]): + x = tf.pad(query_antecedent, [ + [0, difference], [0, 0], [0, 0]]) + access_logits, retrieved_mem = self.read(x) + memory_results["x"] = x + memory_results["access_logits"] = access_logits + memory_results["retrieved_mem"] = retrieved_mem + return memory_results, query_antecedent, memory_antecedent, bias + + def post_attention(self, token, x): + """Called after self-attention. The memory can be updated here. + + Args: + token: Data returned by pre_attention, which can be used to carry over + state related to the current memory operation. + x: a Tensor of data after self-attention and feed-forward + Returns: + a (possibly modified) version of the input x + """ + with tf.variable_scope(self.name + "/post_attention", reuse=tf.AUTO_REUSE): + depth = common_layers.shape_list(x)[-1] + actual_batch_size = common_layers.shape_list(x)[0] + memory_output = tf.gather(token["retrieved_mem"], + tf.range(actual_batch_size)) + output = tf.add(tf.layers.dense(x, depth, use_bias=False), + tf.layers.dense(memory_output, depth)) + with tf.control_dependencies([output]): + with tf.control_dependencies([ + self.write(token["x"], token["access_logits"])]): + return tf.identity(output) diff --git a/tensor2tensor/layers/transformer_memory_test.py b/tensor2tensor/layers/transformer_memory_test.py new file mode 100644 index 000000000..10155eb9c --- /dev/null +++ b/tensor2tensor/layers/transformer_memory_test.py @@ -0,0 +1,125 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.layers.transformer_memory.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +from tensor2tensor.layers import transformer_memory +import tensorflow.compat.v1 as tf + + +class TransformerMemoryTest(parameterized.TestCase, tf.test.TestCase): + + def testRead(self): + batch_size = 2 + key_depth = 3 + val_depth = 5 + memory_size = 4 + window_size = 6 + x_depth = 10 + memory = transformer_memory.TransformerMemory( + batch_size, key_depth, val_depth, memory_size) + x = tf.random_uniform([batch_size, window_size, x_depth], minval=1.0) + vals = tf.random_uniform([batch_size, memory_size, val_depth], minval=1.0) + logits = tf.random_uniform([batch_size, memory_size], minval=1.0) + update_op = memory.set(vals, logits) + with tf.control_dependencies([update_op]): + logits, retrieved_values = memory.read(x) + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + logits_values, values = session.run([logits, retrieved_values]) + self.assertAllEqual([batch_size, window_size, memory_size], + logits_values.shape) + self.assertAllEqual([batch_size, window_size, val_depth], values.shape) + + def testWrite(self): + batch_size = 2 + key_depth = 3 + val_depth = 5 + memory_size = 4 + window_size = 6 + x_depth = 10 + memory = transformer_memory.TransformerMemory( + batch_size, key_depth, val_depth, memory_size) + x = tf.random_uniform([batch_size, window_size, x_depth], minval=1.0) + vals = tf.random_uniform([batch_size, memory_size, val_depth], minval=1.0) + logits = tf.random_uniform([batch_size, memory_size], minval=1.0) + update_op = memory.set(vals, logits) + with tf.control_dependencies([update_op]): + logits, _ = memory.read(x) + write_op = memory.write(x, logits) + mem_vals, mem_logits = memory.get() + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + session.run(write_op) + updated_vals, updated_logits = session.run([mem_vals, mem_logits]) + self.assertAllEqual([batch_size, memory_size, val_depth], + updated_vals.shape) + self.assertAllEqual([batch_size, memory_size], updated_logits.shape) + + def testReset(self): + batch_size = 2 + key_depth = 3 + val_depth = 5 + memory_size = 4 + memory = transformer_memory.TransformerMemory( + batch_size, key_depth, val_depth, memory_size) + vals = tf.random_uniform([batch_size, memory_size, val_depth], minval=1.0) + logits = tf.random_uniform([batch_size, memory_size], minval=1.0) + update_op = memory.set(vals, logits) + reset_op = memory.reset([1]) + mem_vals, mem_logits = memory.get() + assert_op1 = tf.assert_equal(mem_vals[0], vals[0]) + assert_op2 = tf.assert_equal(mem_logits[0], logits[0]) + with tf.control_dependencies([assert_op1, assert_op2]): + all_zero1 = tf.reduce_sum(tf.abs(mem_vals[1])) + all_zero2 = tf.reduce_sum(tf.abs(mem_logits[1])) + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + session.run(update_op) + session.run(reset_op) + zero1, zero2 = session.run([all_zero1, all_zero2]) + self.assertAllEqual(0, zero1) + self.assertAllEqual(0, zero2) + + def testLoss(self): + batch_size = 2 + key_depth = 5 + val_depth = 5 + memory_size = 4 + window_size = 3 + x_depth = 5 + memory = transformer_memory.TransformerMemory( + batch_size, key_depth, val_depth, memory_size) + x = tf.random_uniform([batch_size, window_size, x_depth], minval=.0) + memory_results, _, _, _ = ( + memory.pre_attention( + tf.random_uniform([batch_size], minval=0, maxval=1, dtype=tf.int32), + x, None, None)) + x = memory.post_attention(memory_results, x) + with tf.control_dependencies([tf.print("x", x)]): + is_nan = tf.reduce_any(tf.math.is_nan(x)) + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + for _ in range(100): + is_nan_value, _ = session.run([is_nan, x]) + self.assertEqual(is_nan_value, False) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/layers/vq_discrete.py b/tensor2tensor/layers/vq_discrete.py new file mode 100644 index 000000000..31c9b1215 --- /dev/null +++ b/tensor2tensor/layers/vq_discrete.py @@ -0,0 +1,310 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Clean discrete bottleneck as in https://arxiv.org/abs/1805.11063.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from functools import partial +from tensor2tensor.layers import common_layers +import tensorflow.compat.v1 as tf +from tensorflow.python.training import moving_averages + + +class DiscreteBottleneck(object): + """Discrete bottleneck class.""" + + def __init__(self, hparams): + self.hparams = hparams + print ("self.hparams.z_size", self.hparams.z_size) + # Set the discretization bottleneck specific things here + self.hparams.z_size_per_residual = self.hparams.z_size // \ + self.hparams.num_residuals + print ("self.hparams.num_residuals", self.hparams.num_residuals) + self.hparams.block_dim = int( + self.hparams.hidden_size // self.hparams.num_blocks) + self.hparams.block_v_size = 2**( + self.hparams.z_size_per_residual / self.hparams.num_blocks) + self.hparams.block_v_size = int(self.hparams.block_v_size) + self.means = tf.get_variable( + name="means", + shape=[ + self.hparams.num_blocks, self.hparams.block_v_size, + self.hparams.block_dim + ], + initializer=tf.initializers.variance_scaling(distribution="uniform")) + + # Create the shadow variables if we are using EMA + if self.hparams.ema: + self.ema_count = tf.get_variable( + "ema_count", [self.hparams.num_blocks, self.hparams.block_v_size], + initializer=tf.constant_initializer(0), + trainable=False) + with tf.colocate_with(self.means): + self.ema_means = tf.get_variable( + "ema_means", + initializer=self.means.initialized_value(), + trainable=False) + + def slice_hidden(self, x): + """Slice encoder hidden state into block_dim. + + Args: + x: Encoder hidden state of shape [-1, hidden_size]. + + Returns: + Sliced states of shape [-1, num_blocks, block_dim]. + """ + x_sliced = tf.reshape( + x, shape=[-1, self.hparams.num_blocks, self.hparams.block_dim]) + return x_sliced + + def nearest_neighbor(self, x, means): + """Find the nearest element in means to elements in x. + + Args: + x: Batch of encoder continuous latent states sliced/projected into + shape [-1, num_blocks, block_dim]. + means: Embedding means of shape. + + Returns: + Tensor with nearest element in mean encoded in one-hot notation. + """ + x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keep_dims=True) + means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keep_dims=True) + scalar_prod = tf.matmul( + tf.transpose(x, perm=[1, 0, 2]), tf.transpose(means, perm=[0, 2, 1])) + scalar_prod = tf.transpose(scalar_prod, perm=[1, 0, 2]) + dist = x_norm_sq + tf.transpose( + means_norm_sq, perm=[2, 0, 1]) - 2 * scalar_prod + + if self.hparams.soft_em: + nearest_idx = tf.stack( + [ + tf.multinomial( + -dist[:, i, :], num_samples=self.hparams.num_samples) + for i in range(self.hparams.num_blocks) + ], + axis=1) + nearest_hot = tf.one_hot(nearest_idx, depth=self.hparams.block_v_size) + nearest_hot = tf.reduce_mean(nearest_hot, axis=-2) + else: + if self.hparams.random_top_k > 1: + _, top_k_idx = tf.nn.top_k(-dist, k=self.hparams.random_top_k) + nearest_idx = tf.gather( + top_k_idx, + tf.random_uniform( + [1], + minval=0, + maxval=self.hparams.random_top_k - 1, + dtype=tf.int32), + axis=-1) + else: + if self.hparams.use_scales: + dist /= tf.reshape(self.hparams.scales, + [1, 1, self.hparams.moe_num_experts]) + nearest_idx = tf.argmax(-dist, axis=-1) + nearest_hot = tf.one_hot(nearest_idx, self.hparams.block_v_size) + return nearest_hot + + def embedding_lookup(self, x, means): + """Compute nearest neighbors and loss for training the embeddings. + + Args: + x: Batch of encoder continuous latent states sliced/projected into + shape + [-1, num_blocks, block_dim]. + means: Embedding means. + + Returns: + The nearest neighbor in one hot form, the nearest neighbor + itself, the + commitment loss, embedding training loss. + """ + x_means_hot = self.nearest_neighbor(x, means) + x_means_hot_flat = tf.reshape( + x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size]) + x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means) + x_means = tf.transpose(x_means, [1, 0, 2]) + q_loss = tf.reduce_mean( + tf.squared_difference(tf.stop_gradient(x), x_means)) + e_loss = tf.reduce_mean( + tf.squared_difference(x, tf.stop_gradient(x_means))) + return x_means_hot, x_means, q_loss, e_loss + + def bit_to_int(self, x_bit, num_bits, base=2): + """Turn x_bit representing numbers bitwise (lower-endian) to int tensor. + + Args: + x_bit: Tensor containing numbers in a particular base to be + converted to + int. + num_bits: Number of bits in the representation. + base: Base of the representation. + + Returns: + Integer representation of this number. + """ + x_l = tf.stop_gradient(tf.to_int32(tf.reshape(x_bit, [-1, num_bits]))) + # pylint: disable=g-complex-comprehension + x_labels = [ + x_l[:, i] * tf.to_int32(base)**tf.to_int32(i) for i in range(num_bits)] + res = sum(x_labels) + return tf.to_int32(tf.reshape(res, common_layers.shape_list(x_bit)[:-1])) + + def int_to_bit(self, x_int, num_bits, base=2): + """Turn x_int representing numbers into a bitwise (lower-endian) tensor. + + Args: + x_int: Tensor containing integer to be converted into base + notation. + num_bits: Number of bits in the representation. + base: Base of the representation. + + Returns: + Corresponding number expressed in base. + """ + x_l = tf.to_int32(tf.expand_dims(x_int, axis=-1)) + # pylint: disable=g-complex-comprehension + x_labels = [ + tf.floormod( + tf.floordiv(tf.to_int32(x_l), + tf.to_int32(base)**i), tf.to_int32(base)) + for i in range(num_bits)] + res = tf.concat(x_labels, axis=-1) + return tf.to_float(res) + + def embed(self, x): + """Embedding function that takes discrete latent and returns embedding. + + Args: + x: Input to the discretization bottleneck. + Returns: + Continuous embedding to be passed on to the decoder. + + Raises: + ValueError: For unknown or missing arguments. + """ + shape_x = common_layers.shape_list(x) + x_flat = tf.reshape(x, [-1, 1]) + c = self.int_to_bit(x_flat, num_bits=self.hparams.z_size, base=2) + shape = common_layers.shape_list(c) + new_shape = shape + new_shape.append(self.hparams.num_blocks) + new_shape.append(int(self.hparams.z_size / self.hparams.num_blocks)) + c = tf.to_int32(tf.reshape(c, shape=new_shape)) + h1_shape = shape_x + h1_shape.append(self.hparams.hidden_size) + h1 = tf.zeros(dtype=tf.float32, shape=h1_shape) + c_int = self.bit_to_int( + c, num_bits=int(self.hparams.z_size / self.hparams.num_blocks), base=2) + c_hot = tf.one_hot(c_int, depth=self.hparams.block_v_size, axis=-1) + c_hot_flat = tf.reshape( + c_hot, shape=[-1, self.hparams.num_blocks, self.hparams.block_v_size]) + h1 = tf.matmul(tf.transpose(c_hot_flat, perm=[1, 0, 2]), self.means) + h1 = tf.transpose(h1, perm=[1, 0, 2]) + h1 = tf.reshape(h1, shape=h1_shape) + h1_shape[0] = self.hparams.batch_size + h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2") + res = tf.layers.dense( + tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin") + return res + + def discrete_bottleneck(self, x): + """Discretization bottleneck for latent variables. + + Args: + x: Input to the discretization bottleneck. + + Returns: + Embedding to pass to the decoder, discrete latent, loss, and the + embedding + function. + + Raises: + ValueError: If projection_tensors is None for reshape_method + project, or + ema_count or ema_means is None if we are using ema, or unknown + args. + """ + x_reshaped = self.slice_hidden(x) + x_means_hot = [] + x_means = 0 + loss = 0 + x_means_hot, x_means, q_loss, e_loss = self.embedding_lookup( + x_reshaped, self.means) + + if self.hparams.ema: + tf.logging.info("Using EMA with beta = {}".format(self.hparams.beta)) + updated_ema_count = \ + moving_averages.assign_moving_average( + self.ema_count, + tf.reduce_sum( + tf.reshape( + x_means_hot, + shape=[-1, self.hparams.num_blocks, + self.hparams.block_v_size]), + axis=0), + self.hparams.decay, + zero_debias=False) + + dw = tf.matmul( + tf.transpose(x_means_hot, perm=[1, 2, 0]), + tf.transpose(x_reshaped, perm=[1, 0, 2])) + + updated_ema_means = \ + moving_averages.assign_moving_average( + self.ema_means, dw, self.hparams.decay, + zero_debias=False) + n = tf.reduce_sum(updated_ema_count, axis=-1, keep_dims=True) + updated_ema_count = ((updated_ema_count + self.hparams.epsilon) / ( + n + 2**self.hparams.z_size * self.hparams.epsilon) * n) + updated_ema_means = updated_ema_means / tf.expand_dims( + updated_ema_count, axis=-1) + + with tf.control_dependencies([e_loss]): + update_means = tf.assign(self.means, updated_ema_means) + with tf.control_dependencies([update_means]): + loss += self.hparams.beta * e_loss + else: + # Use a gradient based loss for learning the cluster centers + loss += q_loss + self.hparams.beta * e_loss + + # Get the discrete latent representation + x_means_idx = tf.argmax(x_means_hot, axis=-1) + + # Get the binary representation + num_bits = int(self.hparams.z_size // self.hparams.num_blocks) + x_means_bits = self.int_to_bit(x_means_idx, num_bits=num_bits, base=2) + x_discrete = self.bit_to_int( + tf.to_int32(x_means_bits), num_bits=self.hparams.z_size, base=2) + + # Reshape x_discrete + shape_x = common_layers.shape_list(x) + shape_discrete = shape_x[:-1] + x_discrete = tf.reshape(x_discrete, shape_discrete) + x_means = tf.reshape(x_means, shape=shape_x) + h1 = x + tf.stop_gradient(x_means - x) + + h2 = tf.layers.dense(tf.nn.relu(h1), self.hparams.filter_size, name="vch2") + res = tf.layers.dense( + tf.nn.relu(h2), self.hparams.hidden_size, name="vcfin") + embed_fn = partial(self.embed) + return { + "dense": res, + "discrete": x_discrete, + "loss": loss, + "embed": embed_fn + } diff --git a/tensor2tensor/layers/vqa_layers.py b/tensor2tensor/layers/vqa_layers.py new file mode 100644 index 000000000..7949eb662 --- /dev/null +++ b/tensor2tensor/layers/vqa_layers.py @@ -0,0 +1,348 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Some customization of common_attention.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import contrib + +import tensorflow.compat.v1 as tf + +from tensorflow.contrib import slim +from tensorflow.contrib.slim.python.slim.nets.resnet_v1 import resnet_v1_152 +from tensorflow.contrib.slim.python.slim.nets.resnet_v2 import resnet_v2_152 # pylint: disable=unused-import +from tensorflow.python.ops import inplace_ops + + +def summarize_tensors(tensor_dict, tag=None): + """Summarize the tensors. + + Args: + tensor_dict: a dictionary of tensors. + tag: name scope of the summary; defaults to tensors/. + """ + if tag is None: + tag = "tensors/" + + for t_name in list(tensor_dict): + t = tensor_dict[t_name] + tf.summary.histogram(tag + t_name, t) + + +def image_embedding(images, + model_fn=resnet_v1_152, + trainable=True, + is_training=True, + weight_decay=0.0001, + batch_norm_decay=0.997, + batch_norm_epsilon=1e-5, + batch_norm_scale=True, + add_summaries=False, + reuse=False): + """Extract image features from pretrained resnet model.""" + + is_resnet_training = trainable and is_training + + batch_norm_params = { + "is_training": is_resnet_training, + "trainable": trainable, + "decay": batch_norm_decay, + "epsilon": batch_norm_epsilon, + "scale": batch_norm_scale, + } + + if trainable: + weights_regularizer = contrib.layers().l2_regularizer(weight_decay) + else: + weights_regularizer = None + + with tf.variable_scope(model_fn.__name__, [images], reuse=reuse) as scope: + with slim.arg_scope( + [slim.conv2d], + weights_regularizer=weights_regularizer, + trainable=trainable): + with slim.arg_scope( + [slim.conv2d], + weights_initializer=slim.variance_scaling_initializer(), + activation_fn=tf.nn.relu, + normalizer_fn=slim.batch_norm, + normalizer_params=batch_norm_params): + with slim.arg_scope([slim.batch_norm], + is_training=is_resnet_training, + trainable=trainable): + with slim.arg_scope([slim.max_pool2d], padding="SAME"): + net, end_points = model_fn( + images, num_classes=None, global_pool=False, + is_training=is_resnet_training, + reuse=reuse, scope=scope) + + if add_summaries: + for v in end_points.values(): + contrib.layers().summaries.summarize_activation(v) + + return net + + +def multihead_attention(query_antecedent, + memory_antecedent, + bias, + total_key_depth, + total_value_depth, + output_depth, + num_heads, + dropout_rate, + shared_rel=False, + max_relative_position=None, + image_shapes=None, + attention_type="dot_product", + block_length=128, + block_width=128, + q_filter_width=1, + kv_filter_width=1, + q_padding="VALID", + kv_padding="VALID", + cache=None, + gap_size=0, + num_memory_blocks=2, + name="multihead_attention", + save_weights_to=None, + make_image_summary=True, + dropout_broadcast_dims=None, + max_length=None, + vars_3d=False, + scale_dotproduct=True, + **kwargs): + """Multihead scaled-dot-product attention with input/output transformations. + + Args: + query_antecedent: a Tensor with shape [batch, length_q, channels] + memory_antecedent: a Tensor with shape [batch, length_m, channels] or None + bias: bias Tensor (see attention_bias()) + total_key_depth: an integer + total_value_depth: an integer + output_depth: an integer + num_heads: an integer dividing total_key_depth and total_value_depth + dropout_rate: a floating point number + shared_rel: boolean to share relative embeddings + max_relative_position: Maximum distance between inputs to generate + unique relation embeddings for. Only relevant + when using "dot_product_relative" attention. + image_shapes: optional tuple of integer scalars. + see comments for attention_image_summary() + attention_type: a string, either "dot_product", "dot_product_relative", + "local_mask_right", "local_unmasked", "masked_dilated_1d", + "unmasked_dilated_1d", graph, or any attention function + with the signature (query, key, value, **kwargs) + block_length: an integer - relevant for "local_mask_right" + block_width: an integer - relevant for "local_unmasked" + q_filter_width: An integer specifying how wide you want the query to be. + kv_filter_width: An integer specifying how wide you want the keys and values + to be. + q_padding: One of "VALID", "SAME" or "LEFT". Default is VALID: No padding. + kv_padding: One of "VALID", "SAME" or "LEFT". Default is "VALID": + no padding. + cache: dict containing Tensors which are the results of previous + attentions, used for fast decoding. Expects the dict to contrain two + keys ('k' and 'v'), for the initial call the values for these keys + should be empty Tensors of the appropriate shape. + 'k' [batch_size, 0, key_channels] + 'v' [batch_size, 0, value_channels] + gap_size: Integer option for dilated attention to indicate spacing between + memory blocks. + num_memory_blocks: Integer option to indicate how many memory blocks to look + at. + name: an optional string. + save_weights_to: an optional dictionary to capture attention weights + for vizualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. + dropout_broadcast_dims: an optional list of integers less than 4 + specifying in which dimensions to broadcast the dropout decisions. + saves memory. + max_length: an integer - needed by relative attention + vars_3d: use 3-dimensional variables for input/output transformations + scale_dotproduct: whether to normalize the attention product. + **kwargs (dict): Parameters for the attention function + + Caching: + WARNING: For decoder self-attention, i.e. when memory_antecedent == None, + the caching assumes that the bias contains future masking. + + The caching works by saving all the previous key and value values so that + you are able to send just the last query location to this attention + function. I.e. if the cache dict is provided it assumes the query is of the + shape [batch_size, 1, hidden_dim] rather than the full memory. + + Returns: + The result of the attention transformation. The output shape is + [batch_size, length_q, hidden_dim] + unless the cache dict is provided in which case only the last memory + position is calculated and the output shape is [batch_size, 1, hidden_dim] + Optionally returns an additional loss parameters (ex: load balance loss for + the experts) returned by the attention_type function. + + Raises: + ValueError: if the key depth or value depth are not divisible by the + number of attention heads. + """ + if total_key_depth % num_heads != 0: + raise ValueError("Key depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_key_depth, num_heads)) + if total_value_depth % num_heads != 0: + raise ValueError("Value depth (%d) must be divisible by the number of " + "attention heads (%d)." % (total_value_depth, num_heads)) + vars_3d_num_heads = num_heads if vars_3d else 0 + with tf.variable_scope(name, default_name="multihead_attention", + values=[query_antecedent, memory_antecedent]): + + if cache is None or memory_antecedent is None: + q, k, v = common_attention.compute_qkv( + query_antecedent, memory_antecedent, + total_key_depth, total_value_depth, q_filter_width, + kv_filter_width, q_padding, kv_padding, + vars_3d_num_heads=vars_3d_num_heads) + if cache is not None: + if attention_type != "dot_product": + # TODO(petershaw): Support caching when using relative position + # representations, i.e. "dot_product_relative" attention. + raise NotImplementedError( + "Caching is not guaranteed to work with attention types other than" + " dot_product.") + if bias is None: + raise ValueError("Bias required for caching. See function docstring " + "for details.") + + if memory_antecedent is not None: + # Encoder-Decoder Attention Cache + q = common_attention.compute_attention_component( + query_antecedent, total_key_depth, + q_filter_width, q_padding, "q", + vars_3d_num_heads=vars_3d_num_heads) + k = cache["k_encdec"] + v = cache["v_encdec"] + else: + k = common_attention.split_heads(k, num_heads) + v = common_attention.split_heads(v, num_heads) + decode_loop_step = kwargs.get("decode_loop_step") + if decode_loop_step is None: + k = cache["k"] = tf.concat([cache["k"], k], axis=2) + v = cache["v"] = tf.concat([cache["v"], v], axis=2) + else: + # Inplace update is required for inference on TPU. + # Inplace_ops only supports inplace_update on the first dimension. + # The performance of current implementation is better than updating + # the tensor by adding the result of matmul(one_hot, + # update_in_current_step) + tmp_k = tf.transpose(cache["k"], perm=[2, 0, 1, 3]) + tmp_k = inplace_ops.alias_inplace_update( + tmp_k, decode_loop_step, tf.squeeze(k, axis=2)) + k = cache["k"] = tf.transpose(tmp_k, perm=[1, 2, 0, 3]) + tmp_v = tf.transpose(cache["v"], perm=[2, 0, 1, 3]) + tmp_v = inplace_ops.alias_inplace_update( + tmp_v, decode_loop_step, tf.squeeze(v, axis=2)) + v = cache["v"] = tf.transpose(tmp_v, perm=[1, 2, 0, 3]) + + q = common_attention.split_heads(q, num_heads) + if cache is None: + k = common_attention.split_heads(k, num_heads) + v = common_attention.split_heads(v, num_heads) + + key_depth_per_head = total_key_depth // num_heads + if not vars_3d: + if scale_dotproduct: + q *= key_depth_per_head**-0.5 + + additional_returned_value = None + if callable(attention_type): # Generic way to extend multihead_attention + x = attention_type(q, k, v, **kwargs) + if isinstance(x, tuple): + x, additional_returned_value = x # Unpack + elif attention_type == "dot_product": + x = common_attention.dot_product_attention( + q, k, v, bias, dropout_rate, image_shapes, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + dropout_broadcast_dims=dropout_broadcast_dims) + elif attention_type == "dot_product_relative": + x = common_attention.dot_product_attention_relative( + q, + k, + v, + bias, + max_relative_position, + dropout_rate, + image_shapes, + make_image_summary=make_image_summary) + elif attention_type == "dot_product_relative_v2": + x = common_attention.dot_product_self_attention_relative_v2( + q, + k, + v, + bias, + max_length, + dropout_rate, + image_shapes, + make_image_summary=make_image_summary, + dropout_broadcast_dims=dropout_broadcast_dims) + elif attention_type == "local_within_block_mask_right": + x = common_attention.masked_within_block_local_attention_1d( + q, k, v, block_length=block_length) + elif attention_type == "rel_local_mask_right": + x = common_attention.masked_rel_local_attention_1d( + q, k, v, block_length=block_length, + make_image_summary=make_image_summary, + dropout_rate=dropout_rate, + share_rel_embed=shared_rel) + elif attention_type == "local_mask_right": + x = common_attention.masked_local_attention_1d( + q, + k, + v, + block_length=block_length, + make_image_summary=make_image_summary) + elif attention_type == "local_unmasked": + x = common_attention.local_attention_1d( + q, k, v, block_length=block_length, filter_width=block_width) + elif attention_type == "masked_dilated_1d": + x = common_attention.masked_dilated_self_attention_1d( + q, k, v, block_length, block_width, + gap_size, num_memory_blocks) + else: + assert attention_type == "unmasked_dilated_1d" + x = common_attention.dilated_self_attention_1d( + q, k, v, block_length, block_width, + gap_size, num_memory_blocks) + x = common_attention.combine_heads(x) + + # Set last dim specifically. + x.set_shape(x.shape.as_list()[:-1] + [total_value_depth]) + + if vars_3d: + o_var = tf.get_variable( + "o", [num_heads, total_value_depth // num_heads, output_depth]) + o_var = tf.cast(o_var, x.dtype) + o_var = tf.reshape(o_var, [total_value_depth, output_depth]) + x = tf.tensordot(x, o_var, axes=1) + else: + x = common_layers.dense( + x, output_depth, use_bias=False, name="output_transform") + if additional_returned_value is not None: + return x, additional_returned_value + return x diff --git a/tensor2tensor/metrics/__init__.py b/tensor2tensor/metrics/__init__.py new file mode 100644 index 000000000..ff174dd63 --- /dev/null +++ b/tensor2tensor/metrics/__init__.py @@ -0,0 +1,15 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tensor2tensor/metrics/video_conditional_fvd.py b/tensor2tensor/metrics/video_conditional_fvd.py new file mode 100644 index 000000000..f6a089651 --- /dev/null +++ b/tensor2tensor/metrics/video_conditional_fvd.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Conditional FVD metric on video. + +FVD - Frechet Video Distance + +This is the metric that is inspired by FID, but applied to +video rather than to images. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + + +class VideoEvaluationDataset( + collections.namedtuple( + 'VideoEvaluationDataset', + ['n_input_frames', 'n_output_frames', 'get_video_batch_fn'])): + """Dataset for video evaluation. + + This tuple describes the video problem for Evaluation. + Args: + n_input_frames: number of frames passed to the model to condition on. + n_output_frames: number of frames that model should return. + get_video_batch_fn: function that accepts a batch size and returns a tensor + with real video, which should match [batch_size, N, height, width, + depth], where N is n_input_frames + n_output_frames. + """ + pass + + +class Model( + collections.namedtuple('Model', [ + 'apply_fn', 'load_fn', + ])): + """Model that should be evaluated. + + Args: + apply_fn: will be called with a single tensor (floats between 0 and 255 + of shape [batch_size, n_input_frames, height, width, depth]), + that will contain input frames. + it should return a single tensor with output frames (floats + between 0 and 255, of shape + [batch_size, n_output_frames, height, width, depth]) + load_fn: Callable, that receives session as an argument. + Should load the variables from the checkpoint. + """ + pass + + +def evaluate_model(video_eval_dataset, model, num_batches, batch_size): + """Computes the FVD video metric. + + Args: + video_eval_dataset: VideoEvaluationDataset tuple with video and frames + information. + model: Model tuple with model to evaluate. + num_batches: number of batches to evaluate. + batch_size: number of videos to compute per batch. + + Returns: + FVD metric (float). + """ + del video_eval_dataset, model, num_batches, batch_size diff --git a/tensor2tensor/metrics/video_conditional_fvd_test.py b/tensor2tensor/metrics/video_conditional_fvd_test.py new file mode 100644 index 000000000..3f2723ec1 --- /dev/null +++ b/tensor2tensor/metrics/video_conditional_fvd_test.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for video_conditional_fvd.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.metrics import video_conditional_fvd +import tensorflow.compat.v1 as tf + + +class VideoConditionalFvdTest(tf.test.TestCase): + + def test_sample(self): + dataset = video_conditional_fvd.VideoEvaluationDataset( + n_input_frames=4, + n_output_frames=10, + get_video_batch_fn=None) + model = video_conditional_fvd.Model( + apply_fn=None, + load_fn=None) + video_conditional_fvd.evaluate_model(dataset, model, 10, 16) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/models/README.md b/tensor2tensor/models/README.md index 29b88484f..3da3e0f82 100644 --- a/tensor2tensor/models/README.md +++ b/tensor2tensor/models/README.md @@ -4,13 +4,13 @@ This directory contains T2T models, their hyperparameters, and a number of common layers and hyperparameter settings to help construct new models. Common building blocks are in `common_layers.py` and `common_attention.py`. Common hyperparameters are in `common_hparams.py`. Models are imported in -`models.py`. +`__init__.py`. ## Adding a new model. To add a model to the built-in set, create a new file (see, e.g., `neural_gpu.py`) and write your model class inheriting from `T2TModel` there and -decorate it with `registry.register_model`. Import it in `models.py`. +decorate it with `registry.register_model`. Import it in `__init__.py`. -It is now avaialable to use with the trainer binary (`t2t-trainer`) using the +It is now available to use with the trainer binary (`t2t-trainer`) using the `--model=model_name` flag. diff --git a/tensor2tensor/models/__init__.py b/tensor2tensor/models/__init__.py index 27d533abc..62d059134 100644 --- a/tensor2tensor/models/__init__.py +++ b/tensor2tensor/models/__init__.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,3 +13,87 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Models defined in T2T. Imports here force registration.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import six + +# pylint: disable=unused-import + +from tensor2tensor.layers import modalities # pylint: disable=g-import-not-at-top +from tensor2tensor.models import basic +from tensor2tensor.models import bytenet +from tensor2tensor.models import distillation +from tensor2tensor.models import evolved_transformer +from tensor2tensor.models import image_transformer +from tensor2tensor.models import image_transformer_2d +from tensor2tensor.models import lstm +from tensor2tensor.models import neural_assistant +from tensor2tensor.models import neural_gpu +from tensor2tensor.models import resnet +from tensor2tensor.models import revnet +from tensor2tensor.models import shake_shake +from tensor2tensor.models import slicenet +from tensor2tensor.models import text_cnn +from tensor2tensor.models import transformer +from tensor2tensor.models import vanilla_gan +from tensor2tensor.models import xception +from tensor2tensor.models.neural_architecture_search import nas_model +from tensor2tensor.models.research import adafactor_experiments +from tensor2tensor.models.research import aligned +from tensor2tensor.models.research import autoencoders +from tensor2tensor.models.research import cycle_gan +from tensor2tensor.models.research import gene_expression +from tensor2tensor.models.research import neural_stack +from tensor2tensor.models.research import residual_shuffle_exchange +from tensor2tensor.models.research import rl +from tensor2tensor.models.research import shuffle_network +from tensor2tensor.models.research import similarity_transformer +from tensor2tensor.models.research import super_lm +from tensor2tensor.models.research import transformer_moe +from tensor2tensor.models.research import transformer_nat +from tensor2tensor.models.research import transformer_parallel +from tensor2tensor.models.research import transformer_revnet +from tensor2tensor.models.research import transformer_seq2edits +from tensor2tensor.models.research import transformer_sketch +from tensor2tensor.models.research import transformer_symshard +from tensor2tensor.models.research import transformer_vae +from tensor2tensor.models.research import universal_transformer +from tensor2tensor.models.video import basic_deterministic +from tensor2tensor.models.video import basic_recurrent +from tensor2tensor.models.video import basic_stochastic +from tensor2tensor.models.video import emily +from tensor2tensor.models.video import savp +from tensor2tensor.models.video import sv2p +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry + +# The following models can't be imported under TF2 +if not contrib.is_tf2: + # pylint: disable=g-import-not-at-top + from tensor2tensor.models.research import attention_lm + from tensor2tensor.models.research import attention_lm_moe + from tensor2tensor.models.research import glow + from tensor2tensor.models.research import lm_experiments + from tensor2tensor.models.research import moe_experiments + from tensor2tensor.models.research import multiquery_paper + from tensor2tensor.models import mtf_image_transformer + from tensor2tensor.models import mtf_resnet + from tensor2tensor.models import mtf_transformer + from tensor2tensor.models import mtf_transformer2 + from tensor2tensor.models.research import vqa_attention + from tensor2tensor.models.research import vqa_recurrent_self_attention + from tensor2tensor.models.research import vqa_self_attention + from tensor2tensor.models.video import epva + from tensor2tensor.models.video import next_frame_glow + # pylint: enable=g-import-not-at-top + +# pylint: disable=unused-import + +# pylint: enable=unused-import + + +def model(name): + return registry.model(name) diff --git a/tensor2tensor/models/attention_lm.py b/tensor2tensor/models/attention_lm.py deleted file mode 100644 index 581cd767f..000000000 --- a/tensor2tensor/models/attention_lm.py +++ /dev/null @@ -1,169 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Self-attention based language model. - -Like transformer.py, but no encoder - -decoder: [Self-Attention, Feed-forward] x n - -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import copy - -# Dependency imports - -from six.moves import xrange # pylint: disable=redefined-builtin - -from tensor2tensor.models import common_attention -from tensor2tensor.models import common_hparams -from tensor2tensor.models import common_layers -from tensor2tensor.utils import registry -from tensor2tensor.utils import t2t_model - -import tensorflow as tf - - -@registry.register_model -class AttentionLM(t2t_model.T2TModel): - """Attention net. See file docstring.""" - - def model_fn_body(self, features, train): - # Remove dropout if not training - hparams = copy.copy(self._hparams) - if not train: - hparams.attention_dropout = 0. - hparams.relu_dropout = 0. - hparams.residual_dropout = 0. - targets = features["targets"] - targets = tf.squeeze(targets, 2) - - (decoder_input, decoder_self_attention_bias) = attention_lm_prepare_decoder( - targets, hparams) - - def residual_fn(x, y): - return common_layers.layer_norm(x + tf.nn.dropout( - y, 1.0 - hparams.residual_dropout)) - - decoder_input = tf.nn.dropout(decoder_input, 1.0 - hparams.residual_dropout) - decoder_output = attention_lm_decoder( - decoder_input, residual_fn, decoder_self_attention_bias, hparams) - decoder_output = tf.expand_dims(decoder_output, 2) - - return decoder_output - - -def attention_lm_prepare_decoder(targets, hparams): - """Prepare one shard of the model for the decoder. - - Args: - targets: a Tensor. - hparams: run hyperparameters - - Returns: - decoder_input: a Tensor, bottom of decoder stack - decoder_self_attention_bias: a Tensor, containing large negative values - to implement masked attention and possibly baises for diagonal alignments - """ - decoder_self_attention_bias = ( - common_attention.attention_bias_lower_triangle(tf.shape(targets)[1])) - decoder_input = common_layers.shift_left_3d(targets) - if hparams.pos == "timing": - decoder_input = common_attention.add_timing_signal_1d(decoder_input) - return (decoder_input, decoder_self_attention_bias) - - -def attention_lm_decoder(decoder_input, - residual_fn, - decoder_self_attention_bias, - hparams, - name="decoder"): - """A stack of attention_lm layers. - - Args: - decoder_input: a Tensor - residual_fn: a function from (layer_input, layer_output) -> combined_output - decoder_self_attention_bias: bias Tensor for self-attention - (see common_attention.attention_bias()) - hparams: hyperparameters for model - name: a string - - Returns: - y: a Tensors - """ - x = decoder_input - # Summaries don't work in multi-problem setting yet. - summaries = "problems" not in hparams.values() or len(hparams.problems) == 1 - with tf.variable_scope(name): - for layer in xrange(hparams.num_hidden_layers): - with tf.variable_scope("layer_%d" % layer): - x = residual_fn( - x, - common_attention.multihead_attention( - x, - None, - decoder_self_attention_bias, - hparams.attention_key_channels or hparams.hidden_size, - hparams.attention_value_channels or hparams.hidden_size, - hparams.hidden_size, - hparams.num_heads, - hparams.attention_dropout, - summaries=summaries, - name="decoder_self_attention")) - x = residual_fn(x, - common_layers.conv_hidden_relu( - x, - hparams.filter_size, - hparams.hidden_size, - dropout=hparams.relu_dropout)) - return x - - -@registry.register_hparams -def attention_lm_base(): - """Set of hyperparameters.""" - hparams = common_hparams.basic_params1() - hparams.hidden_size = 1024 - hparams.batch_size = 8192 - hparams.max_length = 256 - hparams.dropout = 0.0 - hparams.clip_grad_norm = 0. # i.e. no gradient clipping - hparams.optimizer_adam_epsilon = 1e-9 - hparams.learning_rate_decay_scheme = "noam" - hparams.learning_rate = 1.0 - hparams.learning_rate_warmup_steps = 1000 - hparams.initializer_gain = 1.0 - hparams.num_hidden_layers = 6 - hparams.initializer = "uniform_unit_scaling" - hparams.weight_decay = 0.0 - hparams.optimizer_adam_beta1 = 0.9 - hparams.optimizer_adam_beta2 = 0.98 - hparams.num_sampled_classes = 0 - hparams.label_smoothing = 0.1 - hparams.shared_embedding_and_softmax_weights = int(False) - - hparams.add_hparam("filter_size", 4096) # Add new ones like this. - # attention-related flags - hparams.add_hparam("num_heads", 8) - hparams.add_hparam("attention_key_channels", 0) - hparams.add_hparam("attention_value_channels", 0) - hparams.add_hparam("attention_dropout", 0.0) - hparams.add_hparam("relu_dropout", 0.0) - hparams.add_hparam("pos", "timing") # timing, none - hparams.add_hparam("residual_dropout", 0.1) - return hparams diff --git a/tensor2tensor/models/baseline.py b/tensor2tensor/models/baseline.py deleted file mode 100644 index 78f79eed0..000000000 --- a/tensor2tensor/models/baseline.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Baseline models.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -from tensor2tensor.models import common_layers -from tensor2tensor.utils import registry -from tensor2tensor.utils import t2t_model - -import tensorflow as tf - - -def lstm(inputs, hparams, train, name, initial_state=None): - """Run LSTM cell on inputs, assuming they are [batch x time x size].""" - - def dropout_lstm_cell(): - return tf.contrib.rnn.DropoutWrapper( - tf.contrib.rnn.BasicLSTMCell(hparams.hidden_size), - input_keep_prob=1.0 - hparams.dropout * tf.to_float(train)) - - layers = [dropout_lstm_cell() for _ in range(hparams.num_hidden_layers)] - with tf.variable_scope(name): - return tf.nn.dynamic_rnn( - tf.contrib.rnn.MultiRNNCell(layers), - inputs, - initial_state=initial_state, - dtype=tf.float32, - time_major=False) - - -def lstm_seq2seq_internal(inputs, targets, hparams, train): - """The basic LSTM seq2seq model, main step used for training.""" - with tf.variable_scope("lstm_seq2seq"): - # Flatten inputs. - inputs = common_layers.flatten4d3d(inputs) - # LSTM encoder. - _, final_encoder_state = lstm( - tf.reverse(inputs, axis=[1]), hparams, train, "encoder") - # LSTM decoder. - shifted_targets = common_layers.shift_left(targets) - decoder_outputs, _ = lstm( - common_layers.flatten4d3d(shifted_targets), - hparams, - train, - "decoder", - initial_state=final_encoder_state) - return tf.expand_dims(decoder_outputs, axis=2) - - -@registry.register_model("baseline_lstm_seq2seq") -class LSTMSeq2Seq(t2t_model.T2TModel): - - def model_fn_body(self, features, train): - return lstm_seq2seq_internal(features["inputs"], features["targets"], - self._hparams, train) diff --git a/tensor2tensor/models/baseline_test.py b/tensor2tensor/models/baseline_test.py deleted file mode 100644 index 25e191d6f..000000000 --- a/tensor2tensor/models/baseline_test.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Baseline models tests.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -import numpy as np - -from tensor2tensor.data_generators import problem_hparams -from tensor2tensor.models import baseline -from tensor2tensor.models import common_hparams - -import tensorflow as tf - - -class BaselineTest(tf.test.TestCase): - - def testLSTMSeq2Seq(self): - vocab_size = 9 - x = np.random.random_integers(1, high=vocab_size - 1, size=(3, 5, 1, 1)) - y = np.random.random_integers(1, high=vocab_size - 1, size=(3, 6, 1, 1)) - hparams = common_hparams.basic_params1() - p_hparams = problem_hparams.test_problem_hparams(hparams, vocab_size, - vocab_size) - with self.test_session() as session: - features = { - "inputs": tf.constant(x, dtype=tf.int32), - "targets": tf.constant(y, dtype=tf.int32), - } - model = baseline.LSTMSeq2Seq(hparams, p_hparams) - sharded_logits, _, _ = model.model_fn(features, True) - logits = tf.concat(sharded_logits, 0) - session.run(tf.global_variables_initializer()) - res = session.run(logits) - self.assertEqual(res.shape, (3, 6, 1, 1, vocab_size)) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensor2tensor/models/basic.py b/tensor2tensor/models/basic.py new file mode 100644 index 000000000..4a3209022 --- /dev/null +++ b/tensor2tensor/models/basic.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic models for testing simple tasks.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf + + +@registry.register_model +class BasicFcRelu(t2t_model.T2TModel): + """Basic fully-connected + ReLU model.""" + + def body(self, features): + hparams = self.hparams + x = features["inputs"] + shape = common_layers.shape_list(x) + x = tf.reshape(x, [-1, shape[1] * shape[2] * shape[3]]) + for i in range(hparams.num_hidden_layers): + x = tf.layers.dense(x, hparams.hidden_size, name="layer_%d" % i) + x = tf.nn.dropout(x, keep_prob=1.0 - hparams.dropout) + x = tf.nn.relu(x) + return tf.expand_dims(tf.expand_dims(x, axis=1), axis=1) # 4D For T2T. + + +@registry.register_hparams +def basic_fc_small(): + """Small fully connected model.""" + hparams = common_hparams.basic_params1() + hparams.learning_rate = 0.1 + hparams.batch_size = 128 + hparams.hidden_size = 256 + hparams.num_hidden_layers = 2 + hparams.initializer = "uniform_unit_scaling" + hparams.initializer_gain = 1.0 + hparams.weight_decay = 0.0 + hparams.dropout = 0.0 + return hparams diff --git a/tensor2tensor/models/multimodel_test.py b/tensor2tensor/models/basic_test.py similarity index 53% rename from tensor2tensor/models/multimodel_test.py rename to tensor2tensor/models/basic_test.py index 8df682c5c..3f6b4affd 100644 --- a/tensor2tensor/models/multimodel_test.py +++ b/tensor2tensor/models/basic_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,43 +13,38 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Tests for Xnet.""" +"""Basic nets tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function - -# Dependency imports - import numpy as np -from tensor2tensor.data_generators import problem_hparams -from tensor2tensor.models import multimodel -from tensor2tensor.models import slicenet +from tensor2tensor.data_generators import mnist # pylint: disable=unused-import +from tensor2tensor.models import basic +from tensor2tensor.utils import trainer_lib -import tensorflow as tf +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator -class MultiModelTest(tf.test.TestCase): +class BasicTest(tf.test.TestCase): - def testMultiModel(self): - x = np.random.random_integers(0, high=255, size=(3, 5, 4, 3)) - y = np.random.random_integers(0, high=9, size=(3, 5, 1, 1)) - hparams = slicenet.slicenet_params1_tiny() - p_hparams = problem_hparams.image_cifar10(hparams) - hparams.problems = [p_hparams] + def testBasicFcRelu(self): + x = np.random.randint(256, size=(1, 28, 28, 1)) + y = np.random.randint(10, size=(1, 1)) + hparams = trainer_lib.create_hparams( + "basic_fc_small", problem_name="image_mnist", data_dir=".") with self.test_session() as session: features = { "inputs": tf.constant(x, dtype=tf.int32), "targets": tf.constant(y, dtype=tf.int32), - "target_space_id": tf.constant(1, dtype=tf.int32), } - model = multimodel.MultiModel(hparams, p_hparams) - sharded_logits, _, _ = model.model_fn(features, True) - logits = tf.concat(sharded_logits, 0) + model = basic.BasicFcRelu(hparams, tf_estimator.ModeKeys.TRAIN) + logits, _ = model(features) session.run(tf.global_variables_initializer()) res = session.run(logits) - self.assertEqual(res.shape, (3, 1, 1, 1, 10)) + self.assertEqual(res.shape, (1, 1, 1, 1, 10)) if __name__ == "__main__": diff --git a/tensor2tensor/models/bytenet.py b/tensor2tensor/models/bytenet.py index 42db05700..84594f36a 100644 --- a/tensor2tensor/models/bytenet.py +++ b/tensor2tensor/models/bytenet.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,39 +18,36 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +from six.moves import range # pylint: disable=redefined-builtin -# Dependency imports - -from six.moves import xrange # pylint: disable=redefined-builtin - -from tensor2tensor.models import common_hparams -from tensor2tensor.models import common_layers +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers from tensor2tensor.utils import registry from tensor2tensor.utils import t2t_model -import tensorflow as tf +import tensorflow.compat.v1 as tf -def residual_dilated_conv(x, repeat, padding, name, hparams, train): +def residual_dilated_conv(x, repeat, padding, name, hparams): """A stack of convolution blocks with residual connections.""" with tf.variable_scope(name): k = (hparams.kernel_height, hparams.kernel_width) dilations_and_kernels = [((2**i, 1), k) - for i in xrange(hparams.num_hidden_layers)] - for i in xrange(repeat): + for i in range(hparams.num_hidden_layers)] + for i in range(repeat): with tf.variable_scope("repeat_%d" % i): y = common_layers.conv_block( - x, + common_layers.layer_norm(x, hparams.hidden_size, name="lnorm"), hparams.hidden_size, dilations_and_kernels, padding=padding, name="residual_conv") - x = common_layers.layer_norm(x + y, hparams.hidden_size, name="lnorm") - x = tf.nn.dropout(x, 1.0 - hparams.dropout * tf.to_float(train)) + y = tf.nn.dropout(y, 1.0 - hparams.dropout) + x += y return x -def bytenet_internal(inputs, targets, hparams, train): +def bytenet_internal(inputs, targets, hparams): """ByteNet, main step used for training.""" with tf.variable_scope("bytenet"): # Flatten inputs and extend length by 50%. @@ -62,27 +60,26 @@ def bytenet_internal(inputs, targets, hparams, train): # Pad inputs and targets to be the same length, divisible by 50. inputs, targets = common_layers.pad_to_same_length( inputs, targets, final_length_divisible_by=50) - final_encoder = residual_dilated_conv( - inputs, hparams.num_block_repeat, "SAME", "encoder", hparams, train) + final_encoder = residual_dilated_conv(inputs, hparams.num_block_repeat, + "SAME", "encoder", hparams) - shifted_targets = common_layers.shift_left(targets) + shifted_targets = common_layers.shift_right(targets) kernel = (hparams.kernel_height, hparams.kernel_width) decoder_start = common_layers.conv_block( tf.concat([final_encoder, shifted_targets], axis=3), hparams.hidden_size, [((1, 1), kernel)], padding="LEFT") - return residual_dilated_conv( - decoder_start, hparams.num_block_repeat, - "LEFT", "decoder", hparams, train) + return residual_dilated_conv(decoder_start, hparams.num_block_repeat, + "LEFT", "decoder", hparams) @registry.register_model class ByteNet(t2t_model.T2TModel): - def model_fn_body(self, features, train): + def body(self, features): return bytenet_internal(features["inputs"], features["targets"], - self._hparams, train) + self._hparams) @registry.register_hparams @@ -98,7 +95,7 @@ def bytenet_base(): hparams.num_hidden_layers = 4 hparams.kernel_height = 3 hparams.kernel_width = 1 - hparams.learning_rate_decay_scheme = "exp50k" + hparams.learning_rate_decay_scheme = "exp" hparams.learning_rate = 0.05 hparams.learning_rate_warmup_steps = 3000 hparams.initializer_gain = 1.0 diff --git a/tensor2tensor/models/bytenet_test.py b/tensor2tensor/models/bytenet_test.py index 676220cc8..204d54bc1 100644 --- a/tensor2tensor/models/bytenet_test.py +++ b/tensor2tensor/models/bytenet_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,34 +18,33 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function - -# Dependency imports - import numpy as np from tensor2tensor.data_generators import problem_hparams from tensor2tensor.models import bytenet -import tensorflow as tf +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator class ByteNetTest(tf.test.TestCase): def testByteNet(self): vocab_size = 9 - x = np.random.random_integers(1, high=vocab_size - 1, size=(3, 5, 1, 1)) - y = np.random.random_integers(1, high=vocab_size - 1, size=(3, 6, 1, 1)) + x = np.random.randint(1, high=vocab_size, size=(3, 5, 1, 1)) + y = np.random.randint(1, high=vocab_size, size=(3, 6, 1, 1)) hparams = bytenet.bytenet_base() - p_hparams = problem_hparams.test_problem_hparams(hparams, vocab_size, - vocab_size) + p_hparams = problem_hparams.test_problem_hparams(vocab_size, + vocab_size, + hparams) with self.test_session() as session: features = { "inputs": tf.constant(x, dtype=tf.int32), "targets": tf.constant(y, dtype=tf.int32), } - model = bytenet.ByteNet(hparams, p_hparams) - sharded_logits, _, _ = model.model_fn(features, True) - logits = tf.concat(sharded_logits, 0) + model = bytenet.ByteNet( + hparams, tf_estimator.ModeKeys.TRAIN, p_hparams) + logits, _ = model(features) session.run(tf.global_variables_initializer()) res = session.run(logits) self.assertEqual(res.shape, (3, 50, 1, 1, vocab_size)) diff --git a/tensor2tensor/models/common_attention.py b/tensor2tensor/models/common_attention.py deleted file mode 100644 index ccf288a09..000000000 --- a/tensor2tensor/models/common_attention.py +++ /dev/null @@ -1,344 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities for attention.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math - -# Dependency imports - -from tensor2tensor.models import common_layers - -import tensorflow as tf - - -def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4): - """Adds a bunch of sinusoids of different frequencies to a Tensor. - - Each channel of the input Tensor is incremented by a sinusoid of a different - frequency and phase. - - This allows attention to learn to use absolute and relative positions. - Timing signals should be added to some precursors of both the query and the - memory inputs to attention. - - The use of relative position is possible because sin(x+y) and cos(x+y) can be - experessed in terms of y, sin(x) and cos(x). - - In particular, we use a geometric sequence of timescales starting with - min_timescale and ending with max_timescale. The number of different - timescales is equal to channels / 2. For each timescale, we - generate the two sinusoidal signals sin(timestep/timescale) and - cos(timestep/timescale). All of these sinusoids are concatenated in - the channels dimension. - - Args: - x: a Tensor with shape [batch, length, channels] - min_timescale: a float - max_timescale: a float - - Returns: - a Tensor the same shape as x. - """ - length = tf.shape(x)[1] - channels = tf.shape(x)[2] - position = tf.to_float(tf.range(length)) - num_timescales = channels // 2 - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (tf.to_float(num_timescales) - 1)) - inv_timescales = min_timescale * tf.exp( - tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) - scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0) - signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) - signal = tf.pad(signal, [[0, 0], [0, tf.mod(channels, 2)]]) - signal = tf.reshape(signal, [1, length, channels]) - return x + signal - - -def add_timing_signal_nd(x, min_timescale=1.0, max_timescale=1.0e4): - """Adds a bunch of sinusoids of different frequencies to a Tensor. - - Each channel of the input Tensor is incremented by a sinusoid of a different - frequency and phase in one of the positional dimensions. - - This allows attention to learn to use absolute and relative positions. - Timing signals should be added to some precursors of both the query and the - memory inputs to attention. - - The use of relative position is possible because sin(a+b) and cos(a+b) can be - experessed in terms of b, sin(a) and cos(a). - - x is a Tensor with n "positional" dimensions, e.g. one dimension for a - sequence or two dimensions for an image - - We use a geometric sequence of timescales starting with - min_timescale and ending with max_timescale. The number of different - timescales is equal to channels // (n * 2). For each timescale, we - generate the two sinusoidal signals sin(timestep/timescale) and - cos(timestep/timescale). All of these sinusoids are concatenated in - the channels dimension. - - Args: - x: a Tensor with shape [batch, d1 ... dn, channels] - min_timescale: a float - max_timescale: a float - - Returns: - a Tensor the same shape as x. - """ - static_shape = x.get_shape().as_list() - num_dims = len(static_shape) - 2 - channels = tf.shape(x)[-1] - num_timescales = channels // (num_dims * 2) - log_timescale_increment = ( - math.log(float(max_timescale) / float(min_timescale)) / - (tf.to_float(num_timescales) - 1)) - inv_timescales = min_timescale * tf.exp( - tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) - for dim in xrange(num_dims): - length = tf.shape(x)[dim + 1] - position = tf.to_float(tf.range(length)) - scaled_time = tf.expand_dims(position, 1) * tf.expand_dims( - inv_timescales, 0) - signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) - prepad = dim * 2 * num_timescales - postpad = channels - (dim + 1) * 2 * num_timescales - signal = tf.pad(signal, [[0, 0], [prepad, postpad]]) - for _ in xrange(1 + dim): - signal = tf.expand_dims(signal, 0) - for _ in xrange(num_dims - 1 - dim): - signal = tf.expand_dims(signal, -2) - x += signal - return x - - -def embedding_to_padding(emb): - """Input embeddings -> is_padding. - - We have hacked symbol_modality to return all-zero embeddings for padding. - - Args: - emb: a Tensor with shape [..., depth]. - Returns: - a boolean Tensor with shape [...]. - """ - emb_sum = tf.reduce_sum(tf.abs(emb), axis=-1) - return tf.equal(emb_sum, 0.0) - - -def attention_bias_lower_triangle(length): - """Create an bias tensor to be added to attention logits. - - Args: - length: a Scalar. - - Returns: - a `Tensor` with shape [1, 1, length, length]. - """ - lower_triangle = tf.matrix_band_part(tf.ones([length, length]), -1, 0) - ret = -1e9 * (1.0 - lower_triangle) - return tf.reshape(ret, [1, 1, length, length]) - - -def attention_bias_ignore_padding(memory_padding): - """Create an bias tensor to be added to attention logits. - - Args: - memory_padding: a boolean `Tensor` with shape [batch, memory_length]. - - Returns: - a `Tensor` with shape [batch, 1, 1, memory_length]. - """ - ret = tf.to_float(memory_padding) * -1e9 - return tf.expand_dims(tf.expand_dims(ret, 1), 1) - - -def split_last_dimension(x, n): - """Reshape x so that the last dimension becomes two dimensions. - - The first of these two dimensions is n. - - Args: - x: a Tensor with shape [..., m] - n: an integer. - - Returns: - a Tensor with shape [..., n, m/n] - """ - old_shape = x.get_shape().dims - last = old_shape[-1] - new_shape = old_shape[:-1] + [n] + [last // n if last else None] - ret = tf.reshape(x, tf.concat([tf.shape(x)[:-1], [n, -1]], 0)) - ret.set_shape(new_shape) - return ret - - -def combine_last_two_dimensions(x): - """Reshape x so that the last two dimension become one. - - Args: - x: a Tensor with shape [..., a, b] - - Returns: - a Tensor with shape [..., ab] - """ - old_shape = x.get_shape().dims - a, b = old_shape[-2:] - new_shape = old_shape[:-2] + [a * b if a and b else None] - ret = tf.reshape(x, tf.concat([tf.shape(x)[:-2], [-1]], 0)) - ret.set_shape(new_shape) - return ret - - -def split_heads(x, num_heads): - """Split channels (dimension 3) into multiple heads (becomes dimension 1). - - Args: - x: a Tensor with shape [batch, length, channels] - num_heads: an integer - - Returns: - a Tensor with shape [batch, num_heads, length, channels / num_heads] - """ - return tf.transpose(split_last_dimension(x, num_heads), [0, 2, 1, 3]) - - -def combine_heads(x): - """Inverse of split_heads. - - Args: - x: a Tensor with shape [batch, num_heads, length, channels / num_heads] - - Returns: - a Tensor with shape [batch, length, channels] - """ - return combine_last_two_dimensions(tf.transpose(x, [0, 2, 1, 3])) - - -def attention_image_summary(attn): - """Compute color image summary. - - Args: - attn: a Tensor with shape [batch, num_heads, query_length, memory_length] - """ - num_heads = attn.get_shape().as_list()[1] - # [batch, query_length, memory_length, num_heads] - image = tf.transpose(attn, [0, 2, 3, 1]) - image = tf.pow(image, 0.2) # for high-dynamic-range - # Each head will correspond to one of RGB. - # pad the heads to be a multiple of 3 - image = tf.pad(image, [[0, 0], [0, 0], [0, 0], [0, -num_heads % 3]]) - image = split_last_dimension(image, 3) - image = tf.reduce_max(image, 4) - tf.summary.image("attention", image, max_outputs=1) - - -def dot_product_attention(q, - k, - v, - bias, - dropout_rate=0.0, - summaries=False, - name=None): - """dot-product attention. - - Args: - q: a Tensor with shape [batch, heads, length_q, depth_k] - k: a Tensor with shape [batch, heads, length_kv, depth_k] - v: a Tensor with shape [batch, heads, length_kv, depth_v] - bias: bias Tensor (see attention_bias()) - dropout_rate: a floating point number - summaries: a boolean - name: an optional string - - Returns: - A Tensor. - """ - with tf.variable_scope( - name, default_name="dot_product_attention", values=[q, k, v]): - # [batch, num_heads, query_length, memory_length] - logits = tf.matmul(q, k, transpose_b=True) - if bias is not None: - logits += bias - weights = tf.nn.softmax(logits, name="attention_weights") - # dropping out the attention links for each of the heads - weights = tf.nn.dropout(weights, 1.0 - dropout_rate) - if summaries and not tf.get_variable_scope().reuse: - attention_image_summary(weights) - return tf.matmul(weights, v) - - -def multihead_attention(query_antecedent, - memory_antecedent, - bias, - total_key_depth, - total_value_depth, - output_depth, - num_heads, - dropout_rate, - summaries=False, - name=None): - """Multihead scaled-dot-product attention with input/output transformations. - - Args: - query_antecedent: a Tensor with shape [batch, length_q, channels] - memory_antecedent: a Tensor with shape [batch, length_m, channels] - bias: bias Tensor (see attention_bias()) - total_key_depth: an integer - total_value_depth: an integer - output_depth: an integer - num_heads: an integer dividing total_key_depth and total_value_depth - dropout_rate: a floating point number - summaries: a boolean - name: an optional string - - Returns: - A Tensor. - """ - with tf.variable_scope( - name, - default_name="multihead_attention", - values=[query_antecedent, memory_antecedent]): - if memory_antecedent is None: - # self attention - combined = common_layers.conv1d( - query_antecedent, - total_key_depth * 2 + total_value_depth, - 1, - name="qkv_transform") - q, k, v = tf.split( - combined, [total_key_depth, total_key_depth, total_value_depth], - axis=2) - else: - q = common_layers.conv1d( - query_antecedent, total_key_depth, 1, name="q_transform") - combined = common_layers.conv1d( - memory_antecedent, - total_key_depth + total_value_depth, - 1, - name="kv_transform") - k, v = tf.split(combined, [total_key_depth, total_value_depth], axis=2) - q = split_heads(q, num_heads) - k = split_heads(k, num_heads) - v = split_heads(v, num_heads) - key_depth_per_head = total_key_depth // num_heads - q *= key_depth_per_head**-0.5 - x = dot_product_attention(q, k, v, bias, dropout_rate, summaries) - x = combine_heads(x) - x = common_layers.conv1d(x, output_depth, 1, name="output_transform") - return x diff --git a/tensor2tensor/models/common_hparams.py b/tensor2tensor/models/common_hparams.py deleted file mode 100644 index 81c41dcc5..000000000 --- a/tensor2tensor/models/common_hparams.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Hyperparameters and ranges common to multiple models.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -import six -from six.moves import zip # pylint: disable=redefined-builtin -from tensor2tensor.utils import registry - -import tensorflow as tf - - -@registry.register_hparams("basic1") -def basic_params1(): - """A set of basic hyperparameters.""" - return tf.contrib.training.HParams( - batch_size=4096, # in tokens per batch per gpu - # This flag controls the number of length buckets in the data reader. - # Too many buckets slows down data reading - this needs fixing. - # Too few buckets mean lots of wasted padding. - # If this value is 1, we have buckets with maximum lengths: - # [8, 12, 16, 24, 32, 48 ... (max_length or batch_size)] - # If this value is 2, we have buckets with maximum lengths: - # [8, 10, 12, 14, 16, 20, 24 ... (max_length or batch_size)] - batching_mantissa_bits=1, - num_hidden_layers=4, - kernel_height=3, - kernel_width=1, - hidden_size=64, - compress_steps=0, - dropout=0.2, - clip_grad_norm=2.0, - initializer="orthogonal", - initializer_gain=1.5, - label_smoothing=0.1, - optimizer="Adam", - optimizer_adam_epsilon=1e-6, - optimizer_adam_beta1=0.85, - optimizer_adam_beta2=0.997, - optimizer_momentum_momentum=0.9, - weight_decay=0.1, - weight_noise=0.0, - learning_rate_decay_scheme="none", - learning_rate_warmup_steps=100, - learning_rate=0.1, - sampling_method="argmax", # "argmax" or "random" - problem_choice="adaptive", # "uniform", "adaptive", "distributed" - multiply_embedding_mode="sqrt_depth", - symbol_modality_num_shards=16, - # setting the max length in a minibatch. 0 means default behavior, - # max_length = hparams.batch_size * length_multiplier - max_length=0, - # in SymbolModality, share the output embeddings and the softmax - # variables. - # You can also share the input embeddings with the output embeddings - # by using a problem_hparams that uses the same modality object for - # the input_modality and target_modality. - shared_embedding_and_softmax_weights=int(False),) - - -class RangedHParams(object): - """Defines parameter ranges for tuning.""" - - # From ParameterConfig proto - LINEAR_SCALE = 1 - LOG_SCALE = 2 - REVERSE_LOG_SCALE = 3 - - def __init__(self): - self._categorical_params = {} - self._discrete_params = {} - self._float_params = {} - self._int_params = {} - - def _check_reset_and_type_change(self, name, orig_ctr): - """Check if name is in orig_ctr or in one of the other type containers.""" - # Resetting a hyperparameter - if name in orig_ctr: - tf.logging.warning("Overwriting hparam %s", name) - - ctr_names = [(self._categorical_params, - "categorical"), (self._discrete_params, "discrete"), - (self._float_params, "float"), (self._int_params, "int")] - ctrs, names = list(zip(*ctr_names)) - orig_name = names[ctrs.index(orig_ctr)] - - for ctr, ctr_name in ctr_names: - if ctr is orig_ctr: - continue - - # Using a different type for the same hyperparameter name - if name in ctr: - raise ValueError("Setting hyperparameter %s as type %s, but a " - "hyperparemeter of the same name was originally " - "registered as type %s" % (name, ctr_name, orig_name)) - - def set_categorical(self, name, categories, length=None): - self._check_reset_and_type_change(name, self._categorical_params) - self._categorical_params[name] = (name, categories, length) - - def set_discrete(self, name, feasible_points, scale=None, length=None): - self._check_reset_and_type_change(name, self._discrete_params) - self._discrete_params[name] = (name, feasible_points, scale, length) - - def set_float(self, name, min_val, max_val, scale=None, length=None): - self._check_reset_and_type_change(name, self._float_params) - self._float_params[name] = (name, min_val, max_val, scale, length) - - def set_int(self, name, min_val, max_val, scale=None, length=None): - self._check_reset_and_type_change(name, self._int_params) - self._int_params[name] = (name, min_val, max_val, scale, length) - - -def fill_ranged_hparams_from_hparams(hparams, ranged_hparams): - """Fill ranged_hparams with singleton values from hparams. - - HParams are placed in RangedHParams with the following functions, according to - type: - * int: set_discrete - * float: set_float - * str: set_categorical - - Args: - hparams: tf.contrib.training.HParams; contains the hyperparameters to copy - over to ranged_hparams. - ranged_hparams: RangedHParams; will have hparams values copied to it. - - Raises: - ValueError: if hparams contains a hyperparameter not of type - {int, float, str, bool}. - """ - for name, (hp_type, is_multivalent) in six.iteritems(hparams._hparam_types): # pylint: disable=protected-access - - if is_multivalent: - raise ValueError("Multivalent hparams not supported in RangedHParams. " - "Hyperparameter %s is multivalent." % name) - val = getattr(hparams, name) - if hp_type == int: - ranged_hparams.set_discrete(name, [val]) - elif hp_type == float: - ranged_hparams.set_float(name, val, val) - elif hp_type == str: - ranged_hparams.set_categorical(name, [val]) - else: - raise ValueError("Unsupported type %s for param %s" % (hp_type, name)) - - -@registry.register_ranged_hparams("basic1") -def basic_range1(ranged_hparams): - """A basic range of hyperparameters.""" - rhp = ranged_hparams - - hparams = basic_params1() - fill_ranged_hparams_from_hparams(hparams, rhp) - - rhp.set_discrete("batch_size", [1024, 2048, 4096]) - rhp.set_discrete("num_hidden_layers", [1, 2, 3, 4, 5, 6]) - rhp.set_discrete("hidden_size", [32, 64, 128, 256, 512], scale=rhp.LOG_SCALE) - rhp.set_discrete("kernel_height", [1, 3, 5, 7]) - rhp.set_discrete("kernel_width", [1, 3, 5, 7]) - rhp.set_discrete("compress_steps", [0, 1, 2]) - rhp.set_float("dropout", 0.0, 0.5) - rhp.set_float("weight_decay", 1e-4, 10.0, scale=rhp.LOG_SCALE) - rhp.set_float("label_smoothing", 0.0, 0.2) - rhp.set_float("clip_grad_norm", 0.01, 50.0, scale=rhp.LOG_SCALE) - rhp.set_float("learning_rate", 0.005, 2.0, scale=rhp.LOG_SCALE) - rhp.set_categorical("initializer", - ["uniform", "orthogonal", "uniform_unit_scaling"]) - rhp.set_float("initializer_gain", 0.5, 3.5) - rhp.set_categorical("learning_rate_decay_scheme", - ["none", "sqrt", "noam", "exp10k"]) - rhp.set_float("optimizer_adam_epsilon", 1e-7, 1e-2, scale=rhp.LOG_SCALE) - rhp.set_float("optimizer_adam_beta1", 0.8, 0.9) - rhp.set_float("optimizer_adam_beta2", 0.995, 0.999) - rhp.set_categorical("optimizer", - ["Adam", "Adagrad", "Momentum", "RMSProp", "SGD"]) diff --git a/tensor2tensor/models/common_layers.py b/tensor2tensor/models/common_layers.py deleted file mode 100644 index ef6559f9e..000000000 --- a/tensor2tensor/models/common_layers.py +++ /dev/null @@ -1,1340 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Layers common to multiple models.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math - -# Dependency imports - -import numpy as np -from six.moves import xrange # pylint: disable=redefined-builtin -from tensor2tensor.utils import expert_utils as eu - -import tensorflow as tf - -from tensorflow.python.framework import function - -# This is a global setting. When turned off, no @function.Defun is used. -allow_defun = True - - -def saturating_sigmoid(x): - """Saturating sigmoid: 1.2 * sigmoid(x) - 0.1 cut to [0, 1].""" - with tf.name_scope("saturating_sigmoid", [x]): - y = tf.sigmoid(x) - return tf.minimum(1.0, tf.maximum(0.0, 1.2 * y - 0.1)) - - -def hard_sigmoid(x, saturation_limit=0.9): - saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit)) - x_shifted = 0.5 * x + 0.5 - return tf.minimum(1.0, tf.nn.relu(x_shifted)), saturation_cost - - -def hard_tanh(x, saturation_limit=0.9): - saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit)) - return tf.minimum(1.0, tf.maximum(x, -1.0)), saturation_cost - - -def inverse_exp_decay(max_step, min_value=0.01): - """Inverse-decay exponentially from 0.01 to 1.0 reached at max_step.""" - inv_base = tf.exp(tf.log(min_value) / float(max_step)) - step = tf.to_float(tf.contrib.framework.get_global_step()) - return inv_base**tf.maximum(float(max_step) - step, 0.0) - - -def standardize_images(x): - """Image standardization on batches (tf.image.per_image_standardization).""" - with tf.name_scope("standardize_images", [x]): - x = tf.to_float(x) - x_mean = tf.reduce_mean(x, axis=[1, 2, 3], keep_dims=True) - x_variance = tf.reduce_mean( - tf.square(x - x_mean), axis=[1, 2, 3], keep_dims=True) - num_pixels = tf.to_float(tf.shape(x)[1] * tf.shape(x)[2] * 3) - x = (x - x_mean) / tf.maximum(tf.sqrt(x_variance), tf.rsqrt(num_pixels)) - # TODO(lukaszkaiser): remove hack below, needed for greedy decoding for now. - if x.shape and len(x.shape) == 4 and x.shape[3] == 1: - x = tf.concat([x, x, x], axis=3) # Not used, just a dead tf.cond branch. - x.set_shape([None, None, None, 3]) - return x - - -def image_augmentation(images, do_colors=False): - """Image augmentation: cropping, flipping, and color transforms.""" - images = tf.random_crop(images, [299, 299, 3]) - images = tf.image.random_flip_left_right(images) - if do_colors: # More augmentation, but might be slow. - images = tf.image.random_brightness(images, max_delta=32. / 255.) - images = tf.image.random_saturation(images, lower=0.5, upper=1.5) - images = tf.image.random_hue(images, max_delta=0.2) - images = tf.image.random_contrast(images, lower=0.5, upper=1.5) - return images - - -def flatten4d3d(x): - """Flatten a 4d-tensor into a 3d-tensor by joining width and height.""" - xshape = tf.shape(x) - result = tf.reshape(x, [xshape[0], xshape[1] * xshape[2], xshape[3]]) - # Preserve static shapes when available. - xshape_static = x.get_shape() - result.set_shape([xshape_static[0], None, xshape_static[3]]) - return result - - -def embedding(x, vocab_size, dense_size, name=None, reuse=None, multiplier=1.0): - """Embed x of type int64 into dense vectors, reducing to max 4 dimensions.""" - with tf.variable_scope( - name, default_name="embedding", values=[x], reuse=reuse): - embedding_var = tf.get_variable("kernel", [vocab_size, dense_size]) - # On the backwards pass, we want to convert the gradient from - # an indexed-slices to a regular tensor before sending it back to the - # parameter server. This avoids excess computation on the parameter server. - embedding_var = eu.ConvertGradientToTensor(embedding_var) - emb_x = tf.gather(embedding_var, x) - if multiplier != 1.0: - emb_x *= multiplier - shape, static_shape = tf.shape(emb_x), emb_x.shape.as_list() - if not static_shape or len(static_shape) < 5: - return emb_x - # If we had extra channel dimensions, assume it's 1, i.e. shape[3] == 1. - assert len(static_shape) == 5 - return tf.reshape(emb_x, [shape[0], shape[1], shape[2], static_shape[4]]) - - -def shift_left(x, pad_value=None): - """Shift the second dimension of x right by one.""" - if pad_value is None: - shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0], [0, 0]])[:, :-1, :, :] - else: - shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :, :] - return shifted_targets - - -def shift_left_3d(x, pad_value=None): - """Shift the second dimension of x right by one.""" - if pad_value is None: - shifted_targets = tf.pad(x, [[0, 0], [1, 0], [0, 0]])[:, :-1, :] - else: - shifted_targets = tf.concat([pad_value, x], axis=1)[:, :-1, :] - return shifted_targets - - -def conv_stride2_multistep(x, nbr_steps, output_filters, name=None, reuse=None): - """Use a strided convolution to downsample x by 2, `nbr_steps` times. - - We use stride and filter size 2 to avoid the checkerboard problem of deconvs. - As detailed in http://distill.pub/2016/deconv-checkerboard/. - - Args: - x: a `Tensor` with shape `[batch, spatial, depth]` or - `[batch, spatial_1, spatial_2, depth]` - nbr_steps: number of halving downsample rounds to apply - output_filters: an int specifying the filter count for the convolutions - name: a string - reuse: a boolean - - Returns: - a `Tensor` with shape `[batch, spatial / (2**nbr_steps), output_filters]` or - `[batch, spatial_1 / (2**nbr_steps), spatial_2 / (2**nbr_steps), - output_filters]` - """ - with tf.variable_scope( - name, default_name="conv_stride2_multistep", values=[x], reuse=reuse): - if nbr_steps == 0: - out = conv(x, output_filters, (1, 1)) - return out, [out] - hidden_layers = [x] - for i in xrange(nbr_steps): - hidden_layers.append( - conv( - hidden_layers[-1], - output_filters, (2, 2), - strides=2, - activation=tf.nn.relu, - name="conv" + str(i))) - return hidden_layers[-1], hidden_layers - - -def deconv_stride2_multistep(x, - nbr_steps, - output_filters, - name=None, - reuse=None): - """Use a deconvolution to upsample x by 2**`nbr_steps`. - - Args: - x: a `Tensor` with shape `[batch, spatial, depth]` or - `[batch, spatial_1, spatial_2, depth]` - nbr_steps: an int specifying the number of doubling upsample rounds to - apply. - output_filters: an int specifying the filter count for the deconvolutions - name: a string - reuse: a boolean - - Returns: - a `Tensor` with shape `[batch, spatial * (2**nbr_steps), output_filters]` or - `[batch, spatial_1 * (2**nbr_steps), spatial_2 * (2**nbr_steps), - output_filters]` - """ - with tf.variable_scope( - name, default_name="deconv_stride2_multistep", values=[x], reuse=reuse): - - def deconv1d(cur, i): - cur_shape = tf.shape(cur) - thicker = conv( - cur, - output_filters * 2, (1, 1), - padding="SAME", - activation=tf.nn.relu, - name="deconv1d" + str(i)) - return tf.reshape(thicker, - [cur_shape[0], cur_shape[1] * 2, 1, output_filters]) - - def deconv2d(cur, i): - thicker = conv( - cur, - output_filters * 4, (1, 1), - padding="SAME", - activation=tf.nn.relu, - name="deconv2d" + str(i)) - return tf.depth_to_space(thicker, 2) - - cur = x - for i in xrange(nbr_steps): - if cur.get_shape()[2] == 1: - cur = deconv1d(cur, i) - else: - cur = tf.cond( - tf.equal(tf.shape(cur)[2], 1), - lambda idx=i: deconv1d(cur, idx), - lambda idx=i: deconv2d(cur, idx)) - return cur - - -def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs): - """Conditional conv_fn making kernel 1d or 2d depending on inputs shape.""" - static_shape = inputs.get_shape() - if not static_shape or len(static_shape) != 4: - raise ValueError("Inputs to conv must have statically known rank 4.") - inputs.set_shape([static_shape[0], None, None, static_shape[3]]) - # Add support for left padding. - if "padding" in kwargs and kwargs["padding"] == "LEFT": - dilation_rate = (1, 1) - if "dilation_rate" in kwargs: - dilation_rate = kwargs["dilation_rate"] - assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1 - height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0] - cond_padding = tf.cond( - tf.equal(tf.shape(inputs)[2], 1), lambda: tf.constant(0), - lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1])) - width_padding = 0 if static_shape[2] == 1 else cond_padding - padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]] - inputs = tf.pad(inputs, padding) - kwargs["padding"] = "VALID" - force2d = False # Special argument we use to force 2d kernels (see below). - if "force2d" in kwargs: - force2d = kwargs["force2d"] - - def conv2d_kernel(kernel_size_arg, name_suffix): - """Call conv2d but add suffix to name.""" - if "name" in kwargs: - original_name = kwargs["name"] - name = kwargs.pop("name") + "_" + name_suffix - else: - original_name = None - name = "conv_" + name_suffix - original_force2d = None - if "force2d" in kwargs: - original_force2d = kwargs.pop("force2d") - result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs) - if original_name is not None: - kwargs["name"] = original_name # Restore for other calls. - if original_force2d is not None: - kwargs["force2d"] = original_force2d - return result - - # Manually setting the shape to be unknown in the middle two dimensions so - # that the `tf.cond` below won't throw an error based on the convolution - # kernels being too large for the data. - inputs._shape = tf.TensorShape([static_shape[0], None, None, static_shape[3]]) # pylint: disable=protected-access - if kernel_size[1] == 1 or force2d: - # Avoiding the cond below can speed up graph and gradient construction. - return conv2d_kernel(kernel_size, "single") - return tf.cond( - tf.equal(tf.shape(inputs)[2], - 1), lambda: conv2d_kernel((kernel_size[0], 1), "small"), - lambda: conv2d_kernel(kernel_size, "std")) - - -def conv(inputs, filters, kernel_size, **kwargs): - return conv_internal(tf.layers.conv2d, inputs, filters, kernel_size, **kwargs) - - -def conv1d(inputs, filters, kernel_size, **kwargs): - return tf.squeeze( - conv(tf.expand_dims(inputs, 2), filters, (kernel_size, 1), **kwargs), 2) - - -def separable_conv(inputs, filters, kernel_size, **kwargs): - return conv_internal(tf.layers.separable_conv2d, inputs, filters, kernel_size, - **kwargs) - - -def subseparable_conv(inputs, filters, kernel_size, **kwargs): - """Sub-separable convolution. If separability == 0 it's a separable_conv.""" - - def conv_fn(inputs, filters, kernel_size, **kwargs): - """Sub-separable convolution, splits into separability-many blocks.""" - separability = None - if "separability" in kwargs: - separability = kwargs.pop("separability") - if separability: - parts = [] - abs_sep = separability if separability > 0 else -1 * separability - for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)): - with tf.variable_scope("part_%d" % split_idx): - if separability > 0: - parts.append( - tf.layers.conv2d(split, filters // separability, kernel_size, ** - kwargs)) - else: - parts.append( - tf.layers.separable_conv2d(split, filters // abs_sep, - kernel_size, **kwargs)) - if separability > 1: - result = tf.layers.conv2d(tf.concat(parts, axis=3), filters, (1, 1)) - elif abs_sep == 1: # If we have just one block, return it. - assert len(parts) == 1 - result = parts[0] - else: - result = tf.concat(parts, axis=3) - else: - result = tf.layers.separable_conv2d(inputs, filters, kernel_size, - **kwargs) - if separability is not None: - kwargs["separability"] = separability - return result - - return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs) - - -def layer_norm_compute_python(x, epsilon, scale, bias): - """Layer norm raw computation.""" - mean = tf.reduce_mean(x, axis=[-1], keep_dims=True) - variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keep_dims=True) - norm_x = (x - mean) * tf.rsqrt(variance + epsilon) - return norm_x * scale + bias - - -@function.Defun(compiled=True) -def layer_norm_compute_grad(x, epsilon, scale, bias, dy): - y = layer_norm_compute_python(x, epsilon, scale, bias) - dx = tf.gradients(ys=[y], xs=[x, epsilon, scale, bias], grad_ys=[dy]) - return dx - - -@function.Defun( - compiled=True, - separate_compiled_gradients=True, - grad_func=layer_norm_compute_grad) -def layer_norm_compute(x, epsilon, scale, bias): - return layer_norm_compute_python(x, epsilon, scale, bias) - - -def layer_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None): - """Layer normalize the tensor x, averaging over the last dimension.""" - if filters is None: - filters = x.get_shape()[-1] - with tf.variable_scope( - name, default_name="layer_norm", values=[x], reuse=reuse): - scale = tf.get_variable( - "layer_norm_scale", [filters], initializer=tf.ones_initializer()) - bias = tf.get_variable( - "layer_norm_bias", [filters], initializer=tf.zeros_initializer()) - if allow_defun: - result = layer_norm_compute(x, tf.constant(epsilon), scale, bias) - result.set_shape(x.get_shape()) - else: - result = layer_norm_compute_python(x, epsilon, scale, bias) - return result - - -def noam_norm(x, name=None): - """One version of layer normalization.""" - with tf.name_scope(name, default_name="noam_norm", values=[x]): - shape = x.get_shape() - ndims = len(shape) - return (tf.nn.l2_normalize(x, ndims - 1, epsilon=1.0) * - tf.sqrt(tf.to_float(shape[-1]))) - - -def residual_function(hparams): - """Returns a function for combining layer input and layer output. - - The returned function on x (layer input) and y (layer output) computes: - norm_function(x + t - - Args: - hparams: model hyperparameters - - Returns: - a function from x= and y= to computed output - """ - - def residual_fn(x, y): - return hparams.norm_function(x + tf.nn.dropout( - y, 1.0 - hparams.residual_dropout)) - - return residual_fn - - -def conv_block_internal(conv_fn, - inputs, - filters, - dilation_rates_and_kernel_sizes, - first_relu=True, - use_elu=False, - separabilities=None, - **kwargs): - """A block of convolutions. - - Args: - conv_fn: convolution function, e.g. conv or separable_conv. - inputs: a Tensor - filters: an Integer - dilation_rates_and_kernel_sizes: a list of tuples (dilation, (k_w, k_h)) - first_relu: whether to do a relu at start (defaults to True) - use_elu: whether to use ELUs instead of ReLUs (defaults to False) - separabilities: list of separability factors (per-layer). - **kwargs: additional arguments (e.g., pooling) - - Returns: - a Tensor. - """ - name = kwargs.pop("name") if "name" in kwargs else None - mask = kwargs.pop("mask") if "mask" in kwargs else None - norm = kwargs.pop("normalizer_fn") if "normalizer_fn" in kwargs else None - if norm is None and "normalizer_fn" not in kwargs: - norm = lambda x, name: layer_norm(x, filters, name=name) - with tf.variable_scope(name, "conv_block", [inputs]): - cur, counter = inputs, -1 - for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes: - counter += 1 - if first_relu or counter > 0: - cur = tf.nn.elu(cur) if use_elu else tf.nn.relu(cur) - if mask is not None: - cur *= mask - if separabilities: - cur = conv_fn( - cur, - filters, - kernel_size, - dilation_rate=dilation_rate, - name="conv_block_%d" % counter, - use_bias=norm is None, - separability=separabilities[counter], - **kwargs) - else: - cur = conv_fn( - cur, - filters, - kernel_size, - dilation_rate=dilation_rate, - name="conv_block_%d" % counter, - use_bias=norm is None, - **kwargs) - if norm is not None: - cur = norm(cur, name="conv_block_norm_%d" % counter) - return cur - - -def conv_block(inputs, filters, dilation_rates_and_kernel_sizes, **kwargs): - """A block of standard convolutions.""" - return conv_block_internal(conv, inputs, filters, - dilation_rates_and_kernel_sizes, **kwargs) - - -def separable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes, - **kwargs): - """A block of separable convolutions.""" - return conv_block_internal(separable_conv, inputs, filters, - dilation_rates_and_kernel_sizes, **kwargs) - - -def subseparable_conv_block(inputs, filters, dilation_rates_and_kernel_sizes, - **kwargs): - """A block of separable convolutions.""" - return conv_block_internal(subseparable_conv, inputs, filters, - dilation_rates_and_kernel_sizes, **kwargs) - - -def pool(inputs, window_size, pooling_type, padding, strides=(1, 1)): - """Pooling (supports "LEFT").""" - with tf.name_scope("pool", [inputs]): - static_shape = inputs.get_shape() - if not static_shape or len(static_shape) != 4: - raise ValueError("Inputs to conv must have statically known rank 4.") - # Add support for left padding. - if padding == "LEFT": - assert window_size[0] % 2 == 1 and window_size[1] % 2 == 1 - if len(static_shape) == 3: - width_padding = 2 * (window_size[1] // 2) - padding_ = [[0, 0], [width_padding, 0], [0, 0]] - else: - height_padding = 2 * (window_size[0] // 2) - cond_padding = tf.cond( - tf.equal(tf.shape(inputs)[2], 1), lambda: tf.constant(0), - lambda: tf.constant(2 * (window_size[1] // 2))) - width_padding = 0 if static_shape[2] == 1 else cond_padding - padding_ = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]] - inputs = tf.pad(inputs, padding_) - inputs.set_shape([static_shape[0], None, None, static_shape[3]]) - padding = "VALID" - window_size_small = (window_size[0], 1) - strides_small = (strides[0], 1) - # Manually setting the shape to be unknown in the middle two dimensions so - # that the `tf.cond` below won't throw an error based on the convolution - # kernels being too large for the data. - inputs._shape = tf.TensorShape( # pylint: disable=protected-access - [static_shape[0], None, None, static_shape[3]]) - return tf.cond( - tf.equal(tf.shape(inputs)[2], 1), - lambda: tf.nn.pool( # pylint: disable=g-long-lambda - inputs, window_size_small, pooling_type, padding, - strides=strides_small), - lambda: tf.nn.pool( # pylint: disable=g-long-lambda - inputs, window_size, pooling_type, padding, strides=strides)) - - -def conv_block_downsample(x, - kernel, - strides, - padding, - separability=0, - name=None, - reuse=None): - """Implements a downwards-striding conv block, like Xception exit flow.""" - with tf.variable_scope( - name, default_name="conv_block_downsample", values=[x], reuse=reuse): - hidden_size = int(x.get_shape()[-1]) - res = conv_block( - x, - int(1.25 * hidden_size), [((1, 1), kernel)], - padding=padding, - strides=strides, - name="res_conv") - - x = subseparable_conv_block( - x, - hidden_size, [((1, 1), kernel)], - padding=padding, - separability=separability, - name="conv0") - x = subseparable_conv_block( - x, - int(1.25 * hidden_size), [((1, 1), kernel)], - padding=padding, - separability=separability, - name="conv1") - x = pool(x, kernel, "MAX", padding, strides=strides) - - x += res - - x = subseparable_conv_block( - x, - 2 * hidden_size, [((1, 1), kernel)], - first_relu=False, - padding=padding, - separability=separability, - name="conv2") - x = subseparable_conv_block( - x, - int(2.5 * hidden_size), [((1, 1), kernel)], - padding=padding, - separability=separability, - name="conv3") - return x - - -def decompress_seqcnn(x, - targets, - targets_vocab_size, - dilations_and_kernels, - block_size, - is_2d=False, - embedding_var=None, - name=None, - reuse=None): - """Decompress x into targets size using a Sequence CNN at every element.""" - with tf.variable_scope( - name, - default_name="decompress_batch_seqcnn", - values=[x, targets], - reuse=reuse): - # We assume targets are [batch x block_size * N x block_size * N x C] if - # is_2d=True or [batch, block_size * N, 1, C] otherwise, and C is static. - # Let's shift targets to depth and embed. - targets_shape, targets_shape_static = tf.shape(targets), targets.get_shape() - channels = int(targets_shape_static[-1]) - hidden_size = int(x.get_shape()[-1]) - if is_2d: - depth_targets = tf.space_to_depth(targets, block_size) - factor = channels * block_size * block_size - else: - depth_targets = tf.reshape(targets, [ - targets_shape[0], targets_shape[1] // block_size, 1, - channels * block_size - ]) - factor = channels * block_size - if embedding_var is None: - embedding_var = tf.get_variable("targets_embedding", - [targets_vocab_size, hidden_size]) - targets_emb = tf.gather(embedding_var, depth_targets) - # Flatten x and embedded targets. Flat targets are factor* larger on axis=1. - flat_x = tf.reshape(x, [-1, 1, 1, hidden_size]) - flat_targets = tf.reshape(targets_emb, [-1, factor, 1, hidden_size]) - shifted_targets = shift_left(flat_targets) - # Run a SeqCNN large-batch to produce factor outputs out of every target. - flat_x += tf.zeros_like(shifted_targets) # Broadcast on axis=1. - flat_outputs = conv_block( - tf.concat([flat_x, shifted_targets], axis=3), - hidden_size, - dilations_and_kernels, - padding="LEFT") - # Reshape back to embedded targets shape. - outputs = tf.reshape(flat_outputs, [ - tf.shape(targets_emb)[0], - tf.shape(targets_emb)[1], - tf.shape(targets_emb)[2], factor * hidden_size - ]) - # Move depth back to target space. - if is_2d: - outputs = tf.depth_to_space(outputs, 2) - else: - outputs = tf.reshape(outputs, [ - tf.shape(outputs)[0], block_size * tf.shape(outputs)[1], 1, - hidden_size - ]) - # Final reshape before prediction to ensure target size. - outputs = tf.reshape(outputs, [ - targets_shape[0], targets_shape[1], targets_shape[2], channels, - hidden_size - ]) - return tf.layers.dense(outputs, targets_vocab_size) - - -def moe_layer(data_parallelism, - ps_devices, - xs, - train, - model_hidden_size, - expert_hidden_size, - n1, - n2, - loss_coef, - autoscale=True, - name=None): - """A mixture of experts layer. - - Args: - data_parallelism: a expert_utils.Parallelism object. - ps_devices: a list of strings - xs: a list of input tensors. - train: a boolean scalar. - model_hidden_size: an integer (input/output size for this layer) - expert_hidden_size: an integer (size of each expert's hidden layer) - n1: an integer - number of experts (or # of groups for hierarchical MoE) - n2: optional integer - size of each group of experts for hierarchical MoE - loss_coef: a scalar - multiplier on load-balancing losses - autoscale: a boolean - name: a string - - Returns: - ys: a list of tensors: - extra_training_loss: a scalar - """ - dp = data_parallelism - with tf.variable_scope(name, default_name="moe"): - # Set up the hyperparameters for the gating networks. - primary_gating_hp = eu.NoisyTopKGatingParams() - primary_gating_hp.num_experts = n1 - if n2: - # hierarchical MoE containing moe_n1 groups of moe_n2 experts. - assert n2 > 1 - secondary_gating_hp = eu.NoisyTopKGatingParams() - secondary_gating_hp.num_experts = n2 - else: - # flat mixture of moe_n1 experts. - secondary_gating_hp = None - # Set up the hyperparameters for the expert networks. - # Each expert contains a hidden RELU layer of size filter_size - expert_hp = eu.FeedForwardExpertParams() - expert_hp.autoscale = autoscale - expert_hp.hidden_layer_sizes = [expert_hidden_size] - # Create the mixture of experts. - moe = eu.DistributedMixtureOfExperts(primary_gating_hp, secondary_gating_hp, - expert_hp, model_hidden_size, - model_hidden_size, ps_devices, "moe") - # MoE expects input tensors to be 2d. - # Flatten out spatial dimensions. - xs_2d = dp(tf.reshape, xs, [[-1, model_hidden_size]] * dp.n) - # Call the MoE - moe_out_2d, importance, load, _, _ = moe.Eval( - dp.devices, xs_2d, train, identifiers=None, summaries=True) - # Reshape the output to the original shape. - moe_out = dp(tf.reshape, moe_out_2d, dp(tf.shape, xs)) - # These losses encourage equal load on the different experts. - loss = loss_coef * (eu.CVSquared(importance) + eu.CVSquared(load)) - return moe_out, loss - - -def simple_attention(target, source, bias=None, summaries=True): - """A simple attention function. - - Args: - target: a `Tensor` with shape `[batch, target_timesteps, depth]` or - `[batch, target_timesteps_1, target_timesteps_2, depth]` - source: a `Tensor` with shape `[batch, source_timesteps, depth]` or - `[batch, source_timesteps_1, source_timesteps_2, depth]` - bias: an optional `Tensor` with shape `[batch, timesteps, 1, 1]` used - to mask the attention to not attend to padding of input. - summaries: Boolean, whether to output summaries. - - Returns: - a `Tensor` with same shape as `target` - """ - with tf.name_scope("simple_attention", [target, source]): - target_shape = tf.shape(target) - source_shape = tf.shape(source) - target = tf.reshape(target, [ - target_shape[0], target_shape[1] * target_shape[2], target_shape[3] - ]) - source = tf.reshape(source, [ - source_shape[0], source_shape[1] * source_shape[2], source_shape[3] - ]) - attention = tf.matmul(target, source, transpose_b=True) - attention *= tf.rsqrt(tf.to_float(tf.shape(target)[2])) - if bias is not None: - attention += tf.expand_dims(tf.squeeze(bias, axis=[2, 3]), axis=1) - attention = tf.nn.softmax(attention) - if summaries and not tf.get_variable_scope().reuse: - tf.summary.image("attention", tf.expand_dims(attention, 3), max_outputs=5) - attended = tf.matmul(attention, source) - return tf.reshape(attended, target_shape) - - -def multiscale_conv_sum(inputs, output_size, dilation_rates_and_kernel_sizes, - pooling_type, **kwargs): - """Sum of several dilated convolutions. - - For all convolutions with dilation_rate > 1, we first pool the input with - width dilation_rate. - - Args: - inputs: a Tensor - output_size: an Integer - dilation_rates_and_kernel_sizes: a list of pairs (dilation, kernel_size) - pooling_type: "AVG" or "MAX" - **kwargs: additional - - Returns: - a Tensor. - """ - name = kwargs.pop("name") if "name" in kwargs else None - with tf.variable_scope(name, "multiscale_conv_sum", [inputs]): - padding = kwargs["padding"] - results, counter = [], -1 - for dilation_rate, kernel_size in dilation_rates_and_kernel_sizes: - counter += 1 - if dilation_rate > 1: - pooled = pool(inputs, kernel_size, pooling_type, padding) - else: - pooled = inputs - results.append( - conv( - pooled, - output_size, - kernel_size, - dilation_rate=dilation_rate, - name="conv_layer%d" % counter, - **kwargs)) - return tf.add_n(results) * (len(results)**-0.5) - - -def multiscale_conv_and_attention(x, - padding, - hparams, - source=None, - summaries=True): - """A common part of t2t layers. - - First, do a linear multiscale convolution - Second, do attention (if source is not None) - - Applies residuals and normalization on both steps. - - Args: - x: a Tensor. - padding: a padding type - hparams: hyperparameters for model - source: optional source tensor for attention. (encoder output) - summaries: Boolean, whether to output summaries. - - Returns: - a Tensor. - """ - # TODO(noam): The number of different scales should be a hyperparameter. - conv_sum = multiscale_conv_sum( - x, - hparams.hidden_size, [((hparams.kernel_height**i, hparams.kernel_width** - i), (hparams.kernel_height, hparams.kernel_width)) - for i in xrange(3)], - "AVG", - padding=padding) - # For residuals a rescale if necessary if channels differ. - if x.get_shape().as_list()[-1] != conv_sum.get_shape().as_list()[-1]: - x = conv(x, hparams.hidden_size, (1, 1)) - x = noam_norm(x + conv_sum) - if source is not None: - x = noam_norm(x + simple_attention(x, source, summaries=summaries)) - return x - - -def conv_with_pools(inputs, output_size, kernel_size, pool_sizes, pooling_type, - **kwargs): - """Convolution plus 1x1 convolution applied to specified pools. - - For example we might do a regular convolution with kernel size (3, 1), - and pools of sizes [(9, 1), (27, 1)]. - - Args: - inputs: a Tensor - output_size: an Integer - kernel_size: a tuple of integers - pool_sizes: a list of tuples of integers. - pooling_type: "AVG" or "MAX" - **kwargs: additional keyword args for conv - - Returns: - a Tensor. - """ - name = kwargs.pop("name") if "name" in kwargs else None - with tf.variable_scope(name, "conv_with_pools", [inputs]): - padding = kwargs["padding"] - results = [] - results.append(conv(inputs, output_size, kernel_size, **kwargs)) - for i, pool_size in enumerate(pool_sizes): - pooled = pool(inputs, pool_size, pooling_type, padding) - results.append( - conv(pooled, output_size, (1, 1), name="pool_%d" % i, **kwargs)) - return tf.add_n(results) * (len(results)**-0.5) - - -def conv_with_pools_and_attention(x, - padding, - hparams, - source=None, - summaries=True): - """A common part of t2t layers. - - First, do conv_with_pools - Second, do attention (if source is not None) - - Applies residuals and normalization on both steps. - - Args: - x: a Tensor. - padding: a padding type - hparams: hyperparameters for model - source: optional source tensor for attention. (encoder output) - summaries: Boolean, whether to output summaries. - - Returns: - a Tensor. - """ - conv_sum = conv_with_pools( - x, - hparams.hidden_size, (hparams.kernel_height, hparams.kernel_width), - hparams.pool_sizes, - "AVG", - padding=padding) - if x.get_shape().as_list()[-1] == conv_sum.get_shape().as_list()[-1]: - conv_sum += x - x = noam_norm(conv_sum) - if source is not None: - x = noam_norm(x + simple_attention(x, source, summaries=summaries)) - return x - - -def get_timing_signal(length, - min_timescale=1, - max_timescale=1e4, - num_timescales=16): - """Create Tensor of sinusoids of different frequencies. - - Args: - length: Length of the Tensor to create, i.e. Number of steps. - min_timescale: a float - max_timescale: a float - num_timescales: an int - - Returns: - Tensor of shape (length, 2*num_timescales) - """ - positions = tf.to_float(tf.range(length)) - log_timescale_increment = (math.log(max_timescale / min_timescale) / - (num_timescales - 1)) - inv_timescales = min_timescale * tf.exp( - tf.to_float(tf.range(num_timescales)) * -log_timescale_increment) - scaled_time = tf.expand_dims(positions, 1) * tf.expand_dims(inv_timescales, 0) - return tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1) - - -def add_timing_signal(x, min_timescale=1, max_timescale=1e4, num_timescales=16): - """Adds a bunch of sinusoids of different frequencies to a Tensor. - - This allows attention to learn to use absolute and relative positions. - The timing signal should be added to some precursor of both the source - and the target of the attention. - - The use of relative position is possible because sin(x+y) and cos(x+y) can be - experessed in terms of y, sin(x) and cos(x). - - In particular, we use a geometric sequence of timescales starting with - min_timescale and ending with max_timescale. For each timescale, we - generate the two sinusoidal signals sin(timestep/timescale) and - cos(timestep/timescale). All of these sinusoids are concatenated in - the depth dimension, padded with zeros to be the same depth as the input, - and added into input. - - Args: - x: a Tensor with shape [?, length, ?, depth] - min_timescale: a float - max_timescale: a float - num_timescales: an int <= depth/2 - - Returns: - a Tensor the same shape as x. - """ - length = tf.shape(x)[1] - depth = tf.shape(x)[3] - signal = get_timing_signal(length, min_timescale, max_timescale, - num_timescales) - padded_signal = tf.pad(signal, [[0, 0], [0, depth - 2 * num_timescales]]) - return x + tf.reshape(padded_signal, [1, length, 1, depth]) - - -def mask_from_embedding(emb): - """Input embeddings -> padding mask. - - We have hacked symbol_modality to return all-zero embeddings for padding. - Returns a mask with 0.0 in the padding positions and 1.0 elsewhere. - - Args: - emb: a Tensor with shape [batch, width, height, depth]. - Returns: - a 0.0/1.0 Tensor with shape [batch, width, height, 1]. - """ - return weights_nonzero(tf.reduce_sum(tf.abs(emb), axis=3, keep_dims=True)) - - -def mask_leq(target_length, source_length): - """A mask with 1.0 wherever source_pos <= target_pos and 0.0 elsewhere. - - Args: - target_length: an integer - source_length: an integer - Returns: - a Tensor with shape [1, target_length, source_length] - """ - return tf.expand_dims( - tf.matrix_band_part(tf.ones([target_length, source_length]), -1, 0), 0) - - -def attention_1d_v0(source, - target, - attention_size, - output_size, - num_heads, - mask=None, - transform_source=True, - transform_target=True, - transform_output=True, - summaries=True, - name=None): - """multi-headed attention. - - TODO(noam): this could probably be extended to 2d. - - Args: - source: a Tensor of shape [batch, source_length, source_depth] - target: a Tensor of shape [batch, target_length, target_depth] - attention_size: an integer - output_size: an integer - num_heads: an integer divisor of attention_size - mask: a float32 Tensor of shape [batch, target_length, source_length] - 1.0 means can-see; 0.0 means can't-see. - Any dimension can be 1 (supports broadcasting). - transform_source: a boolean - transform_target: a boolean - transform_output: a boolean - summaries: a boolean - name: an optional string - - Returns: - a Tensor of shape [batch, length, output_size] - """ - with tf.variable_scope(name, default_name="attention", values=[target]): - source_length = tf.shape(source)[1] - target_length = tf.shape(target)[1] - batch = tf.shape(source)[0] - - def _maybe_transform(t, size, should_transform, name): - if should_transform: - return conv1d(t, size, 1, name=name) - else: - assert t.get_shape()[-1] == size - return t - - source_attention = _maybe_transform(source, attention_size, - transform_source, "source_attention") - target_attention = _maybe_transform(target, attention_size, - transform_target, "target_attention") - assert attention_size % num_heads == 0 - size_per_head = attention_size // num_heads - source_attention = tf.reshape( - source_attention, [batch, source_length, num_heads, size_per_head]) - target_attention = tf.reshape( - target_attention, [batch, target_length, num_heads, size_per_head]) - # [batch, num_heads, length, size_per_head] - source_attention = tf.transpose(source_attention, [0, 2, 1, 3]) - target_attention = tf.transpose(target_attention, [0, 2, 1, 3]) - - # [batch, num_heads, target_length, source_length] - attention = tf.matmul(target_attention, source_attention, transpose_b=True) - attention *= size_per_head**-0.5 - - if mask is not None: - mask = tf.expand_dims(mask, 1) - mask = (1.0 - mask) * -1e9 - attention += mask - attention = tf.nn.softmax(attention) - if summaries and not tf.get_variable_scope().reuse: - # Compute a color image summary. - image = tf.reshape(attention, - [batch, num_heads, target_length, source_length]) - image = tf.transpose(image, [0, 2, 3, 1]) - image = tf.pow(image, 0.2) # for high-dynamic-range - # Each head will correspond to one of RGB. - # pad the heads to be a multiple of 3 - extra_heads = -num_heads % 3 - image = tf.pad(image, [[0, 0], [0, 0], [0, 0], [0, -num_heads % 3]]) - image = tf.reshape(image, [ - batch, target_length, source_length, 3, (num_heads + extra_heads) // 3 - ]) - image = tf.reduce_max(image, 4) - tf.summary.image("local_attention", image, max_outputs=1) - # output: [batch, num_heads, target_length, size_per_head] - output = tf.matmul(attention, source_attention) - output = tf.transpose(output, [0, 2, 1, 3]) - output = tf.reshape(output, [batch, target_length, attention_size]) - output = _maybe_transform(output, output_size, transform_output, - "attention_output") - return output - - -def relu_density_logit(x, reduce_dims): - """logit(density(x)). - - Useful for histograms. - - Args: - x: a Tensor, typilcally the output of tf.relu - reduce_dims: a list of dimensions - - Returns: - a Tensor - """ - frac = tf.reduce_mean(tf.to_float(x > 0.0), reduce_dims) - scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10)) - return scaled - - -def conv_hidden_relu(inputs, - hidden_size, - output_size, - kernel_size=(1, 1), - summaries=True, - dropout=0.0, - **kwargs): - """Hidden layer with RELU activation followed by linear projection.""" - name = kwargs.pop("name") if "name" in kwargs else None - with tf.variable_scope(name, "conv_hidden_relu", [inputs]): - if inputs.get_shape().ndims == 3: - is_3d = True - inputs = tf.expand_dims(inputs, 2) - else: - is_3d = False - h = conv( - inputs, - hidden_size, - kernel_size, - activation=tf.nn.relu, - name="conv1", - **kwargs) - if dropout != 0.0: - h = tf.nn.dropout(h, 1.0 - dropout) - if summaries and not tf.get_variable_scope().reuse: - tf.summary.histogram("hidden_density_logit", - relu_density_logit( - h, list(range(inputs.shape.ndims - 1)))) - ret = conv(h, output_size, (1, 1), name="conv2", **kwargs) - if is_3d: - ret = tf.squeeze(ret, 2) - return ret - - -def conv_gru(x, - kernel_size, - filters, - padding="SAME", - dilation_rate=(1, 1), - name=None, - reuse=None): - """Convolutional GRU in 1 dimension.""" - - # Let's make a shorthand for conv call first. - def do_conv(args, name, bias_start, padding): - return conv( - args, - filters, - kernel_size, - padding=padding, - dilation_rate=dilation_rate, - bias_initializer=tf.constant_initializer(bias_start), - name=name) - - # Here comes the GRU gate. - with tf.variable_scope( - name, default_name="conv_gru", values=[x], reuse=reuse): - reset = saturating_sigmoid(do_conv(x, "reset", 1.0, padding)) - gate = saturating_sigmoid(do_conv(x, "gate", 1.0, padding)) - candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0, padding)) - return gate * x + (1 - gate) * candidate - - -def conv_lstm(x, - kernel_size, - filters, - padding="SAME", - dilation_rate=(1, 1), - name=None, - reuse=None): - """Convolutional LSTM in 1 dimension.""" - with tf.variable_scope( - name, default_name="conv_lstm", values=[x], reuse=reuse): - gates = conv( - x, - 4 * filters, - kernel_size, - padding=padding, - dilation_rate=dilation_rate) - g = tf.split(layer_norm(gates, 4 * filters), 4, axis=3) - new_cell = tf.sigmoid(g[0]) * x + tf.sigmoid(g[1]) * tf.tanh(g[3]) - return tf.sigmoid(g[2]) * tf.tanh(new_cell) - - -def diagonal_conv_gru(x, - kernel_size, - filters, - train, - dropout=0.0, - name=None, - reuse=None): - """Diagonal Convolutional GRU as in https://arxiv.org/abs/1702.08727.""" - - # Let's make a shorthand for conv call first. - def do_conv(args, name, bias_start): - return conv( - args, - filters, - kernel_size, - padding="SAME", - bias_initializer=tf.constant_initializer(bias_start), - name=name) - - # Here comes the GRU gate. - with tf.variable_scope( - name, default_name="diagonal_conv_gru", values=[x], reuse=reuse): - reset, reset_cost = hard_sigmoid(do_conv(x, "reset", 0.5)) - gate, gate_cost = hard_sigmoid(do_conv(x, "gate", 0.7)) - candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0)) - - # Dropout if training. - if dropout > 0.0 and train: - candidate = tf.nn.dropout(candidate, 1.0 - dropout) - - # Diagonal shift. - shift_filters = filters // 3 - base_filter = ([[0, 1, 0]] * (filters - 2 * shift_filters) + - [[1, 0, 0]] * shift_filters + [[0, 0, 1]] * shift_filters) - shift_filter = tf.constant(np.transpose(base_filter), dtype=tf.float32) - shift_filter = tf.expand_dims(tf.expand_dims(shift_filter, 0), 3) - x_shifted = tf.nn.depthwise_conv2d( - x, shift_filter, [1, 1, 1, 1], padding="SAME") - - # Return the gated result and cost. - total_cost_avg = 0.5 * (reset_cost + gate_cost) - return gate * x_shifted + (1 - gate) * candidate, total_cost_avg - - -def pad_to_same_length(x, y, final_length_divisible_by=1, axis=1): - """Pad tensors x and y on axis 1 so that they have the same length.""" - if axis not in [1, 2]: - raise ValueError("Only axis=1 and axis=2 supported for now.") - with tf.name_scope("pad_to_same_length", [x, y]): - x_length = tf.shape(x)[axis] - y_length = tf.shape(y)[axis] - max_length = tf.maximum(x_length, y_length) - if final_length_divisible_by > 1: - # Find the nearest larger-or-equal integer divisible by given number. - max_length += final_length_divisible_by - 1 - max_length //= final_length_divisible_by - max_length *= final_length_divisible_by - length_diff1 = max_length - x_length - length_diff2 = max_length - y_length - - def padding_list(length_diff, arg): - if axis == 1: - return [[[0, 0], [0, length_diff]], - tf.zeros([tf.rank(arg) - 2, 2], dtype=tf.int32)] - return [[[0, 0], [0, 0], [0, length_diff]], - tf.zeros([tf.rank(arg) - 3, 2], dtype=tf.int32)] - - paddings1 = tf.concat(padding_list(length_diff1, x), axis=0) - paddings2 = tf.concat(padding_list(length_diff2, y), axis=0) - res_x = tf.pad(x, paddings1) - res_y = tf.pad(y, paddings2) - # Static shapes are the same except for axis=1. - x_shape = x.shape.as_list() - x_shape[axis] = None - res_x.set_shape(x_shape) - y_shape = y.shape.as_list() - y_shape[axis] = None - res_y.set_shape(y_shape) - return res_x, res_y - - -def pad_with_zeros(logits, labels): - """Pad labels on the length dimension to match logits length.""" - with tf.name_scope("pad_with_zeros", [logits, labels]): - logits, labels = pad_to_same_length(logits, labels) - if len(labels.shape.as_list()) == 3: # 2-d labels. - logits, labels = pad_to_same_length(logits, labels, axis=2) - return labels - - -def weights_nonzero(labels): - """Assign weight 1.0 to all labels except for padding (id=0).""" - return tf.to_float(tf.not_equal(labels, 0)) - - -def weights_all(labels): - """Assign weight 1.0 to all labels.""" - return tf.ones_like(labels, dtype=tf.float32) - - -def weights_concatenated(labels): - """Assign weight 1.0 to the "target" part of the concatenated labels. - - The labels look like: - source English I love you . ID1 target French Je t'aime . ID1 source - English the cat ID1 target French le chat ID1 source English ... - - We want to assign weight 1.0 to all words in the target text (including the - ID1 end symbol), but not to the source text or the boilerplate. In the - above example, the target words that get positive weight are: - Je t'aime . ID1 le chat ID1 - - Args: - labels: a Tensor - Returns: - a Tensor - """ - eos_mask = tf.to_int32(tf.equal(labels, 1)) - sentence_num = tf.cumsum(eos_mask, axis=1, exclusive=True) - in_target = tf.equal(tf.mod(sentence_num, 2), 1) - # first two tokens of each sentence are boilerplate. - sentence_num_plus_one = sentence_num + 1 - shifted = tf.pad(sentence_num_plus_one, [[0, 0], [2, 0], [0, 0], - [0, 0]])[:, :-2, :, :] - nonboilerplate = tf.equal(sentence_num_plus_one, shifted) - ret = tf.to_float(tf.logical_and(nonboilerplate, in_target)) - return ret - - -def padded_cross_entropy(logits, - labels, - label_smoothing, - weights_fn=weights_nonzero, - reduce_sum=True): - """Compute cross-entropy assuming 0s are padding. - - Computes a loss numerator (the sum of losses), and loss denominator - (the number of non-padding tokens). - - Args: - logits: a `Tensor` with shape `[batch, timesteps, vocab_size]`. - labels: an integer `Tensor` with shape `[batch, timesteps]`. - label_smoothing: a floating point `Scalar`. - weights_fn: A function from labels to weights. - reduce_sum: a Boolean, whether to sum at the end or not. - - Returns: - loss_numerator: a `Scalar`. Sum of losses. - loss_denominator: a `Scalar. The number of non-padding target tokens. - """ - confidence = 1.0 - label_smoothing - vocab_size = tf.shape(logits)[-1] - with tf.name_scope("padded_cross_entropy", [logits, labels]): - pad_labels = pad_with_zeros(logits, labels) - xent = smoothing_cross_entropy(logits, pad_labels, vocab_size, confidence) - weights = weights_fn(pad_labels) - if not reduce_sum: - return xent * weights, weights - return tf.reduce_sum(xent * weights), tf.reduce_sum(weights) - - -def smoothing_cross_entropy(logits, labels, vocab_size, confidence): - """Cross entropy with label smoothing to limit over-confidence.""" - with tf.name_scope("smoothing_cross_entropy", [logits, labels]): - # Low confidence is given to all non-true labels, uniformly. - low_confidence = (1.0 - confidence) / tf.to_float(vocab_size - 1) - # Normalizing constant is the best cross-entropy value with soft targets. - # We subtract it just for readability, makes no difference on learning. - normalizing = -(confidence * tf.log(confidence) + tf.to_float( - vocab_size - 1) * low_confidence * tf.log(low_confidence + 1e-20)) - # Soft targets. - soft_targets = tf.one_hot( - tf.cast(labels, tf.int32), - depth=vocab_size, - on_value=confidence, - off_value=low_confidence) - xentropy = tf.nn.softmax_cross_entropy_with_logits( - logits=logits, labels=soft_targets) - return xentropy - normalizing diff --git a/tensor2tensor/models/common_layers_test.py b/tensor2tensor/models/common_layers_test.py deleted file mode 100644 index 2bd6a53ad..000000000 --- a/tensor2tensor/models/common_layers_test.py +++ /dev/null @@ -1,290 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for common layers.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -import numpy as np -from tensor2tensor.models import common_layers - -import tensorflow as tf - - -class CommonLayersTest(tf.test.TestCase): - - def testStandardizeImages(self): - x = np.random.rand(5, 7, 7, 3) - with self.test_session() as session: - y = common_layers.standardize_images(tf.constant(x)) - res = session.run(y) - self.assertEqual(res.shape, (5, 7, 7, 3)) - - def testImageAugmentation(self): - x = np.random.rand(500, 500, 3) - with self.test_session() as session: - y = common_layers.image_augmentation(tf.constant(x)) - res = session.run(y) - self.assertEqual(res.shape, (299, 299, 3)) - - def testSaturatingSigmoid(self): - x = np.array([-120.0, -100.0, 0.0, 100.0, 120.0], dtype=np.float32) - with self.test_session() as session: - y = common_layers.saturating_sigmoid(tf.constant(x)) - res = session.run(y) - self.assertAllClose(res, [0.0, 0.0, 0.5, 1.0, 1.0]) - - def testFlatten4D3D(self): - x = np.random.random_integers(1, high=8, size=(3, 5, 2)) - with self.test_session() as session: - y = common_layers.flatten4d3d(common_layers.embedding(x, 10, 7)) - session.run(tf.global_variables_initializer()) - res = session.run(y) - self.assertEqual(res.shape, (3, 5 * 2, 7)) - - def testEmbedding(self): - x = np.random.random_integers(1, high=8, size=(3, 5)) - with self.test_session() as session: - y = common_layers.embedding(x, 10, 16) - session.run(tf.global_variables_initializer()) - res = session.run(y) - self.assertEqual(res.shape, (3, 5, 16)) - - def testConv(self): - x = np.random.rand(5, 7, 1, 11) - with self.test_session() as session: - y = common_layers.conv(tf.constant(x, dtype=tf.float32), 13, (3, 3)) - session.run(tf.global_variables_initializer()) - res = session.run(y) - self.assertEqual(res.shape, (5, 5, 1, 13)) - - def testSeparableConv(self): - x = np.random.rand(5, 7, 1, 11) - with self.test_session() as session: - y = common_layers.separable_conv( - tf.constant(x, dtype=tf.float32), 13, (3, 3)) - session.run(tf.global_variables_initializer()) - res = session.run(y) - self.assertEqual(res.shape, (5, 5, 1, 13)) - - def testSubSeparableConv(self): - for sep in [0, 1, 2, 4]: - x = np.random.rand(5, 7, 1, 12) - with self.test_session() as session: - with tf.variable_scope("sep_%d" % sep): - y = common_layers.subseparable_conv( - tf.constant(x, dtype=tf.float32), 16, (3, 3), separability=sep) - session.run(tf.global_variables_initializer()) - res = session.run(y) - self.assertEqual(res.shape, (5, 5, 1, 16)) - - def testConvBlock(self): - x = np.random.rand(5, 7, 1, 11) - with self.test_session() as session: - y = common_layers.conv_block( - tf.constant(x, dtype=tf.float32), - 13, [(1, (3, 3)), (1, (3, 3))], - padding="SAME", - normalizer_fn=common_layers.noam_norm) - session.run(tf.global_variables_initializer()) - res = session.run(y) - self.assertEqual(res.shape, (5, 7, 1, 13)) - - def testSeparableConvBlock(self): - x = np.random.rand(5, 7, 1, 11) - with self.test_session() as session: - y = common_layers.separable_conv_block( - tf.constant(x, dtype=tf.float32), - 13, [(1, (3, 3)), (1, (3, 3))], - padding="SAME") - session.run(tf.global_variables_initializer()) - res = session.run(y) - self.assertEqual(res.shape, (5, 7, 1, 13)) - - def testSubSeparableConvBlock(self): - for sep in [0, 1, 2, 4]: - x = np.random.rand(5, 7, 1, 12) - with self.test_session() as session: - with tf.variable_scope("sep_%d" % sep): - y = common_layers.subseparable_conv_block( - tf.constant(x, dtype=tf.float32), - 16, [(1, (3, 3)), (1, (3, 3))], - padding="SAME", - separability=sep) - session.run(tf.global_variables_initializer()) - res = session.run(y) - self.assertEqual(res.shape, (5, 7, 1, 16)) - - def testPool(self): - x = np.random.rand(5, 8, 1, 11) - with self.test_session() as session: - y = common_layers.pool( - tf.constant(x, dtype=tf.float32), (2, 2), "AVG", "SAME") - session.run(tf.global_variables_initializer()) - res = session.run(y) - self.assertEqual(res.shape, (5, 8, 1, 11)) - - def testConvBlockDownsample(self): - x = np.random.rand(5, 7, 1, 11) - with self.test_session() as session: - y = common_layers.conv_block_downsample( - tf.constant(x, dtype=tf.float32), (3, 1), (2, 1), "SAME") - session.run(tf.global_variables_initializer()) - res = session.run(y) - self.assertEqual(res.shape, (5, 4, 1, 27)) - - def testSimpleAttention(self): - x = np.random.rand(5, 7, 1, 11) - y = np.random.rand(5, 9, 1, 11) - with self.test_session() as session: - a = common_layers.simple_attention( - tf.constant(x, dtype=tf.float32), tf.constant(y, dtype=tf.float32)) - session.run(tf.global_variables_initializer()) - res = session.run(a) - self.assertEqual(res.shape, (5, 7, 1, 11)) - - def testGetTimingSignal(self): - length = 7 - num_timescales = 10 - with self.test_session() as session: - a = common_layers.get_timing_signal(length, num_timescales=num_timescales) - session.run(tf.global_variables_initializer()) - res = session.run(a) - self.assertEqual(res.shape, (length, 2 * num_timescales)) - - def testAddTimingSignal(self): - batch = 5 - length = 7 - height = 3 - depth = 35 - x = np.random.rand(batch, length, height, depth) - with self.test_session() as session: - a = common_layers.add_timing_signal(tf.constant(x, dtype=tf.float32)) - session.run(tf.global_variables_initializer()) - res = session.run(a) - self.assertEqual(res.shape, (batch, length, height, depth)) - - def testAttention1D(self): - batch = 5 - target_length = 7 - source_length = 13 - source_depth = 9 - target_depth = 11 - attention_size = 21 - output_size = 15 - num_heads = 7 - source = np.random.rand(batch, source_length, source_depth) - target = np.random.rand(batch, target_length, target_depth) - mask = np.random.rand(batch, target_length, source_length) - with self.test_session() as session: - a = common_layers.attention_1d_v0( - tf.constant(source, dtype=tf.float32), - tf.constant(target, dtype=tf.float32), attention_size, output_size, - num_heads, tf.constant(mask, dtype=tf.float32)) - session.run(tf.global_variables_initializer()) - res = session.run(a) - self.assertEqual(res.shape, (batch, target_length, output_size)) - - def testMultiscaleConvSum(self): - x = np.random.rand(5, 9, 1, 11) - with self.test_session() as session: - y = common_layers.multiscale_conv_sum( - tf.constant(x, dtype=tf.float32), - 13, [((1, 1), (5, 5)), ((2, 2), (3, 3))], - "AVG", - padding="SAME") - session.run(tf.global_variables_initializer()) - res = session.run(y) - self.assertEqual(res.shape, (5, 9, 1, 13)) - - def testConvGRU(self): - x = np.random.rand(5, 7, 3, 11) - with self.test_session() as session: - y = common_layers.conv_gru(tf.constant(x, dtype=tf.float32), (1, 3), 11) - z = common_layers.conv_gru( - tf.constant(x, dtype=tf.float32), (1, 3), 11, padding="LEFT") - session.run(tf.global_variables_initializer()) - res1 = session.run(y) - res2 = session.run(z) - self.assertEqual(res1.shape, (5, 7, 3, 11)) - self.assertEqual(res2.shape, (5, 7, 3, 11)) - - def testLayerNorm(self): - x = np.random.rand(5, 7, 11) - with self.test_session() as session: - y = common_layers.layer_norm(tf.constant(x, dtype=tf.float32), 11) - session.run(tf.global_variables_initializer()) - res = session.run(y) - self.assertEqual(res.shape, (5, 7, 11)) - - def testConvLSTM(self): - x = np.random.rand(5, 7, 11, 13) - with self.test_session() as session: - y = common_layers.conv_lstm(tf.constant(x, dtype=tf.float32), (1, 3), 13) - session.run(tf.global_variables_initializer()) - res = session.run(y) - self.assertEqual(res.shape, (5, 7, 11, 13)) - - def testPadToSameLength(self): - x1 = np.random.rand(5, 7, 11) - x2 = np.random.rand(5, 9, 11) - with self.test_session() as session: - a, b = common_layers.pad_to_same_length( - tf.constant(x1, dtype=tf.float32), tf.constant(x2, dtype=tf.float32)) - c, d = common_layers.pad_to_same_length( - tf.constant(x1, dtype=tf.float32), - tf.constant(x2, dtype=tf.float32), - final_length_divisible_by=4) - res1, res2 = session.run([a, b]) - res1a, res2a = session.run([c, d]) - self.assertEqual(res1.shape, (5, 9, 11)) - self.assertEqual(res2.shape, (5, 9, 11)) - self.assertEqual(res1a.shape, (5, 12, 11)) - self.assertEqual(res2a.shape, (5, 12, 11)) - - def testShiftLeft(self): - x1 = np.zeros((5, 7, 1, 11)) - x1[:, 0, :] = np.ones_like(x1[:, 0, :]) - expected = np.zeros((5, 7, 1, 11)) - expected[:, 1, :] = np.ones_like(expected[:, 1, :]) - with self.test_session() as session: - a = common_layers.shift_left(tf.constant(x1, dtype=tf.float32)) - actual = session.run(a) - self.assertAllEqual(actual, expected) - - def testConvStride2MultiStep(self): - x1 = np.random.rand(5, 32, 1, 11) - with self.test_session() as session: - a = common_layers.conv_stride2_multistep( - tf.constant(x1, dtype=tf.float32), 4, 16) - session.run(tf.global_variables_initializer()) - actual = session.run(a[0]) - self.assertEqual(actual.shape, (5, 2, 1, 16)) - - def testDeconvStride2MultiStep(self): - x1 = np.random.rand(5, 2, 1, 11) - with self.test_session() as session: - a = common_layers.deconv_stride2_multistep( - tf.constant(x1, dtype=tf.float32), 4, 16) - session.run(tf.global_variables_initializer()) - actual = session.run(a) - self.assertEqual(actual.shape, (5, 32, 1, 16)) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensor2tensor/models/distillation.py b/tensor2tensor/models/distillation.py new file mode 100644 index 000000000..9d8ccb849 --- /dev/null +++ b/tensor2tensor/models/distillation.py @@ -0,0 +1,199 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Traditional Student-Teacher Distillation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensor2tensor.layers import common_hparams +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +@registry.register_model +class Distillation(t2t_model.T2TModel): + """Distillation from a teacher to student network. + + First, a teacher is trained on a task; Second, a student is trained to perform + the task while matching the teacher's softened outputs. For more details, see + the paper below. + + In the hparams passed to this model include the desired + {teacher/student}_model and {teacher/student}_hparams to be used. Also, + specify the distillation temperature and task-distillation balance. + + Distilling the Knowledge in a Neural Network + Hinton, Vinyals and Dean + https://arxiv.org/abs/1503.02531 + """ + + def __init__(self, + hparams, + mode=tf_estimator.ModeKeys.TRAIN, + problem_hparams=None, + data_parallelism=None, + decode_hparams=None, + **kwargs): + assert hparams.distill_phase in ["train", "distill"] + + if hparams.distill_phase == "train" and hparams.teacher_learning_rate: + hparams.learning_rate = hparams.teacher_learning_rate + elif hparams.distill_phase == "distill" and hparams.student_learning_rate: + hparams.learning_rate = hparams.student_learning_rate + + self.teacher_hparams = registry.hparams(hparams.teacher_hparams) + self.teacher_model = registry.model( + hparams.teacher_model)(self.teacher_hparams, mode, problem_hparams, + data_parallelism, decode_hparams) + self.student_hparams = registry.hparams(hparams.student_hparams) + self.student_model = registry.model( + hparams.student_model)(self.student_hparams, mode, problem_hparams, + data_parallelism, decode_hparams) + super(Distillation, + self).__init__(hparams, mode, problem_hparams, data_parallelism, + decode_hparams, **kwargs) + + def body(self, features): + hp = self.hparams + is_distill = hp.distill_phase == "distill" + + targets = features["targets_raw"] + targets = tf.squeeze(targets, [1, 2, 3]) + one_hot_targets = tf.one_hot(targets, hp.num_classes, dtype=tf.float32) + + # Teacher Network + with tf.variable_scope("teacher"): + teacher_outputs = self.teacher_model.body(features) + tf.logging.info("teacher output shape: %s" % teacher_outputs.get_shape()) + teacher_outputs = tf.reduce_mean(teacher_outputs, axis=[1, 2]) + teacher_logits = tf.layers.dense(teacher_outputs, hp.num_classes) + + teacher_task_xent = tf.nn.softmax_cross_entropy_with_logits_v2( + labels=one_hot_targets, logits=teacher_logits) + outputs = teacher_logits + + if is_distill: + # Load teacher weights + tf.train.init_from_checkpoint(hp.teacher_dir, {"teacher/": "teacher/"}) + # Do not train the teacher + trainable_vars = tf.get_collection_ref(tf.GraphKeys.TRAINABLE_VARIABLES) + del trainable_vars[:] + + # Student Network + if is_distill: + with tf.variable_scope("student"): + student_outputs = self.student_model.body(features) + tf.logging.info( + "student output shape: %s" % student_outputs.get_shape()) + student_outputs = tf.reduce_mean(student_outputs, axis=[1, 2]) + student_logits = tf.layers.dense(student_outputs, hp.num_classes) + + student_task_xent = tf.nn.softmax_cross_entropy_with_logits_v2( + labels=one_hot_targets, logits=student_logits) + teacher_targets = tf.nn.softmax(teacher_logits / hp.distill_temperature) + student_distill_xent = tf.nn.softmax_cross_entropy_with_logits_v2( + labels=tf.stop_gradient(teacher_targets), + logits=student_logits / hp.distill_temperature) + # scale soft target obj. to match hard target obj. scale + student_distill_xent *= hp.distill_temperature**2 + + outputs = student_logits + + # Summaries + tf.summary.scalar("distill_xent", student_distill_xent) + + if not is_distill: + phase_loss = teacher_task_xent + else: + phase_loss = hp.task_balance * student_task_xent + phase_loss += (1 - hp.task_balance) * student_distill_xent + + losses = {"training": phase_loss} + outputs = tf.reshape(outputs, [-1, 1, 1, 1, outputs.shape[1]]) + + return outputs, losses + + def top(self, body_output, features): + return body_output + + +def distill_base(): + """Set of hyperparameters.""" + # Base + hparams = common_hparams.basic_params1() + + # teacher/student parameters + hparams.add_hparam("teacher_model", "") + hparams.add_hparam("teacher_hparams", "") + hparams.add_hparam("student_model", "") + hparams.add_hparam("student_hparams", "") + + # Distillation parameters + # WARNING: distill_phase hparam will be overwritten in /bin/t2t_distill.py + hparams.add_hparam("distill_phase", None) + hparams.add_hparam("task_balance", 1.0) + hparams.add_hparam("distill_temperature", 1.0) + hparams.add_hparam("num_classes", 10) + + # Optional Phase-specific hyperparameters + hparams.add_hparam("teacher_learning_rate", None) + hparams.add_hparam("student_learning_rate", None) + + # Training parameters (stolen from ResNet) + hparams.batch_size = 128 + hparams.optimizer = "Momentum" + hparams.optimizer_momentum_momentum = 0.9 + hparams.optimizer_momentum_nesterov = True + hparams.weight_decay = 1e-4 + hparams.clip_grad_norm = 0.0 + # (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.) + hparams.learning_rate = 0.4 + hparams.learning_rate_decay_scheme = "cosine" + # For image_imagenet224, 120k training steps, which effectively makes this a + # cosine decay (i.e. no cycles). + hparams.learning_rate_cosine_cycle_steps = 120000 + hparams.initializer = "normal_unit_scaling" + hparams.initializer_gain = 2. + + return hparams + + +@registry.register_hparams +def distill_resnet_32_to_15_cifar20x5(): + """Set of hyperparameters.""" + hparams = distill_base() + hparams.teacher_model = "resnet" + hparams.teacher_hparams = "resnet_cifar_32" + hparams.student_model = "resnet" + hparams.student_hparams = "resnet_cifar_15" + + hparams.optimizer_momentum_nesterov = True + # (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.) + hparams.teacher_learning_rate = 0.25 * 128. * 8. / 256. + hparams.student_learning_rate = 0.2 * 128. * 8. / 256. + hparams.learning_rate_decay_scheme = "piecewise" + hparams.add_hparam("learning_rate_boundaries", [40000, 60000, 80000]) + hparams.add_hparam("learning_rate_multiples", [0.1, 0.01, 0.001]) + + hparams.task_balance = 0.28 + hparams.distill_temperature = 2.0 + + hparams.num_classes = 20 + + return hparams diff --git a/tensor2tensor/models/evolved_transformer.py b/tensor2tensor/models/evolved_transformer.py new file mode 100644 index 000000000..bac01a3cf --- /dev/null +++ b/tensor2tensor/models/evolved_transformer.py @@ -0,0 +1,833 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Evolved Transformer model. + +This implements the model described in arxiv.org/abs/1901.11117 . +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.models import transformer +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf + +# pylint: disable=g-direct-tensorflow-import +from tensorflow.python.ops import inplace_ops +# pylint: enable=g-direct-tensorflow-import + +_CONV_BRANCHES_NAME = "conv_branches" +_CONV_BRANCHES_FIRST_LAYER_NAME = _CONV_BRANCHES_NAME + "_first" +_CONV_BRANCHES_SECOND_LAYER_NAME = _CONV_BRANCHES_NAME + "_second" +_FIRST_ATTEND_TO_ENCODER_NAME = "first_attend_to_encoder" +_SECOND_ATTEND_TO_ENCODER_NAME = "second_attend_to_encoder" +_SIXTEEN_HEAD_ATTENTION_NAME = "16_head_self_attention" +_VANILLA_ATTENTION_NAME = "self_attention" + +_DECODER_LEFT_CONV_PADDING = 10 +_DECODER_RIGHT_CONV_PADDING = 6 +_DECODER_FINAL_CONV_PADDING = 6 + + +def _capped_double_heads(num_heads, cap=16): + """Calculate the number of heads for the attention layers with more heads. + + The number of heads will be twice the normal amount (num_heads), until it + reaches |cap| heads. + + Args: + num_heads: the num_heads hparam for the model. + cap: the maximum number of heads |num_heads| will be doubled to. + + Returns: + The number of heads for the attention layers that have more heads. + """ + return max(min(num_heads * 2, cap), num_heads) + + +@registry.register_model +class EvolvedTransformer(transformer.Transformer): + """The Evolved Transformer from arxiv.org/abs/1901.11117 .""" + + def __init__(self, *args, **kwargs): + super(EvolvedTransformer, self).__init__(*args, **kwargs) + self._encoder_function = evolved_transformer_encoder + self._decoder_function = evolved_transformer_decoder + self._init_cache_fn = init_evolved_transformer_cache + + # -1 means train all weights. + if self.hparams.get("num_trainable_top_decoder_layers", -1) < 0: + t2t_model.log_info( + "num_trainable_top_decoder_layers is negative so training all weights." + ) + elif self.hparams.shared_embedding_and_softmax_weights: + t2t_model.log_info( + "Setting hparams.shared_embedding_and_softmax_weights to False, " + "because hparam.num_trainable_top_decoder_layers is being used.") + + # When hparam.num_trainable_top_decoder_layers is set to N >= 0 we will + # freeze (not train) every variable except the N top decoder layers and + # the (pre-)softmax matrix. For any N >= 0 we will freeze the encoder and + # input/target embeddings. This also means we will not share the + # (pre-)softmax matrix with input/target embeddings otherwise they will be + # trained as well. + self.hparams.shared_embedding_and_softmax_weights = False + + # If hparams.shared_embedding_and_softmax_weights was previously True, + # then input and target embeddings were being shared. + # To make sure it they embeddings continue to be shared, we need to set + # hparams.shared_embedding to True. + self.hparams.shared_embedding = True + self._init_cache_fn = init_evolved_transformer_cache + + +def evolved_transformer_encoder(encoder_input, + encoder_self_attention_bias, + hparams, + name="encoder", + nonpadding=None, + save_weights_to=None, + make_image_summary=True, + losses=None, + attn_bias_for_padding=None): + """Evolved Transformer encoder. See arxiv.org/abs/1901.11117 for more details. + + Note: Pad remover is not supported. + + Args: + encoder_input: a Tensor. + encoder_self_attention_bias: bias Tensor for self-attention (see + common_attention.attention_bias()). + hparams: hyperparameters for model. + name: a string. + nonpadding: optional Tensor with shape [batch_size, encoder_length] + indicating what positions are not padding. This must either be passed in, + which we do for "packed" datasets, or inferred from + encoder_self_attention_bias. The knowledge about padding is used for + pad_remover(efficiency) and to mask out padding in convolutional layers. + save_weights_to: an optional dictionary to capture attention weights for + visualization; the weights tensor will be appended there under a string + key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. + losses: Not used. + attn_bias_for_padding: Padded attention bias in case a unidirectional + encoder is being used where future attention is masked. + + Returns: + Tensor encoder output. + """ + del losses + + hidden_state = encoder_input + attention_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(hparams, "attention_dropout_broadcast_dims", ""))) + + with tf.variable_scope(name): + if nonpadding is not None: + padding = 1.0 - nonpadding + else: + attention_bias = encoder_self_attention_bias + if attn_bias_for_padding is not None: + attention_bias = attn_bias_for_padding + # Only bfloat16 and float32 supported. + float_type = hparams.get("activation_dtype", "float32") + if float_type == "bfloat16": + cast_fn = tf.to_bfloat16 + else: + assert float_type == "float32" + cast_fn = tf.to_float + padding = common_attention.attention_bias_to_padding( + attention_bias, cast_fn) + nonpadding = 1.0 - padding + + for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): + with tf.variable_scope("layer_%d" % layer): + + with tf.variable_scope("gated_linear_unit"): + + residual_state = hidden_state + hidden_state = common_layers.layer_preprocess(hidden_state, hparams) + + values = common_layers.layers().Dense( + hparams.hidden_size)(hidden_state) + gates = common_layers.layers().Dense( + hparams.hidden_size, activation=tf.nn.sigmoid)(hidden_state) + hidden_state = values * gates + + hidden_state = common_layers.layer_postprocess( + residual_state, hidden_state, hparams) + + with tf.variable_scope("conv_branches"): + + residual_state = hidden_state + hidden_state = common_layers.layer_preprocess(hidden_state, hparams) + # Mask padding from conv layers. + mask = tf.tile( + tf.expand_dims(nonpadding, 2), [1, 1, hparams.hidden_size]) + hidden_state *= mask + + left_output_dim = int(hparams.hidden_size * 4) + left_state = common_layers.layers().Dense( + left_output_dim, activation=tf.nn.relu)(hidden_state) + left_state = tf.nn.dropout(left_state, + 1 - hparams.layer_prepostprocess_dropout) + + right_output_dim = int(hparams.hidden_size / 2) + right_state = common_layers.layers().Conv1D( + right_output_dim, + 3, + padding="SAME", + name="standard_conv_3x1", + activation=tf.nn.relu)(hidden_state) + right_state = tf.nn.dropout(right_state, + 1 - hparams.layer_prepostprocess_dropout) + + right_state = tf.pad( + right_state, + [[0, 0], [0, 0], [0, left_output_dim - right_output_dim]], + constant_values=0) + hidden_state = left_state + right_state + + hidden_state = common_layers.layer_preprocess(hidden_state, hparams) + # Mask padding from conv layer. + mask = tf.tile(tf.expand_dims(nonpadding, 2), [1, 1, left_output_dim]) + hidden_state *= mask + + separable_conv_9x1 = common_layers.layers().SeparableConv1D( + right_output_dim, 9, padding="SAME", name="separable_conv_9x1") + hidden_state = separable_conv_9x1(hidden_state) + hidden_state = tf.pad( + hidden_state, + [[0, 0], [0, 0], [0, hparams.hidden_size - right_output_dim]], + constant_values=0) + + hidden_state = common_layers.layer_postprocess( + residual_state, hidden_state, hparams) + + if hparams.get("et_encoder_self_attention", True): + with tf.variable_scope("self_attention"): + residual_state = hidden_state + hidden_state = common_layers.layer_preprocess(hidden_state, hparams) + + hidden_state = common_attention.multihead_attention( + hidden_state, + None, + encoder_self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=hparams.self_attention_type, + max_relative_position=hparams.max_relative_position, + heads_share_relative_embedding=( + hparams.heads_share_relative_embedding), + add_relative_to_values=hparams.add_relative_to_values, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims, + max_length=hparams.get("max_length"), + vars_3d=hparams.get("attention_variables_3d"), + activation_dtype=hparams.get("activation_dtype", "float32"), + weight_dtype=hparams.get("weight_dtype", "float32")) + + hidden_state = common_layers.layer_postprocess( + residual_state, hidden_state, hparams) + + with tf.variable_scope("dense_layers"): + residual_state = hidden_state + hidden_state = common_layers.layer_preprocess(hidden_state, hparams) + + hidden_state = common_layers.layers().Dense( + int(hparams.hidden_size * 4), activation=tf.nn.relu)(hidden_state) + hidden_state = tf.nn.dropout(hidden_state, + 1 - hparams.layer_prepostprocess_dropout) + + hidden_state = common_layers.layers().Dense( + hparams.hidden_size)(hidden_state) + hidden_state = common_layers.layer_postprocess( + residual_state, hidden_state, hparams) + + # If normalization is done in layer_preprocess, then it should also be done + # on the output, since the output can grow very large, being the sum of + # a whole stack of unnormalized layer outputs. + return common_layers.layer_preprocess(hidden_state, hparams) + + +def evolved_transformer_decoder(decoder_input, + encoder_output, + decoder_self_attention_bias, + encoder_decoder_attention_bias, + hparams, + cache=None, + decode_loop_step=None, + name="decoder", + nonpadding=None, + save_weights_to=None, + make_image_summary=True, + losses=None): + """Evolved Transformer decoder. See arxiv.org/abs/1901.11117 for more details. + + Args: + decoder_input: a Tensor. + encoder_output: a Tensor. + decoder_self_attention_bias: bias Tensor for self-attention (see + common_attention.attention_bias()). + encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention + (see common_attention.attention_bias()). + hparams: hyperparameters for model. + cache: dict, containing tensors which are the results of previous + layers, used for fast decoding. + decode_loop_step: An integer, step number of the decoding loop. Only used + for inference on TPU. + name: a string. + nonpadding: optional Tensor with shape [batch_size, encoder_length] + indicating what positions are not padding. This is used to mask out + padding in convolutional layers. We generally only need this mask for + "packed" datasets, because for ordinary datasets, no padding is ever + followed by nonpadding. + save_weights_to: an optional dictionary to capture attention weights for + visualization; the weights tensor will be appended there under a string + key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. + losses: Not supported. + + Returns: + Decoder output tensor. + """ + del losses + + num_trainable_top_decoder_layers = hparams.get( + "num_trainable_top_decoder_layers", -1) # -1 means train all weights. + + if num_trainable_top_decoder_layers >= 0: + encoder_output = tf.stop_gradient(encoder_output) + + attention_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(hparams, "attention_dropout_broadcast_dims", ""))) + + with tf.variable_scope(name): + hidden_state = decoder_input + + num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers + for layer in range(num_layers): + if num_trainable_top_decoder_layers == num_layers - layer: + hidden_state = tf.stop_gradient(hidden_state) + layer_name = "layer_%d" % layer + layer_cache = cache[layer_name] if cache is not None else None + with tf.variable_scope(layer_name): + + with tf.variable_scope(_SIXTEEN_HEAD_ATTENTION_NAME): + residual_state = hidden_state + hidden_state = common_layers.layer_preprocess(hidden_state, hparams) + + attention_cache = layer_cache[ + _SIXTEEN_HEAD_ATTENTION_NAME] if layer_cache is not None else None + left_state = common_attention.multihead_attention( + hidden_state, + None, + decoder_self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + _capped_double_heads(hparams.num_heads), + hparams.attention_dropout, + attention_type=hparams.self_attention_type, + max_relative_position=hparams.max_relative_position, + heads_share_relative_embedding=( + hparams.heads_share_relative_embedding), + add_relative_to_values=hparams.add_relative_to_values, + save_weights_to=save_weights_to, + cache=attention_cache, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims, + max_length=hparams.get("max_length"), + decode_loop_step=decode_loop_step, + vars_3d=hparams.get("attention_variables_3d"), + activation_dtype=hparams.get("activation_dtype", "float32"), + weight_dtype=hparams.get("weight_dtype", "float32")) + + if encoder_output is not None: + with tf.variable_scope(_FIRST_ATTEND_TO_ENCODER_NAME): + attention_cache = ( + layer_cache[_FIRST_ATTEND_TO_ENCODER_NAME] + if layer_cache is not None else None) + right_state = common_attention.multihead_attention( + hidden_state, + encoder_output, + encoder_decoder_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + max_relative_position=hparams.max_relative_position, + heads_share_relative_embedding=( + hparams.heads_share_relative_embedding), + add_relative_to_values=hparams.add_relative_to_values, + save_weights_to=save_weights_to, + cache=attention_cache, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims, + max_length=hparams.get("max_length"), + vars_3d=hparams.get("attention_variables_3d"), + activation_dtype=hparams.get("activation_dtype", "float32"), + weight_dtype=hparams.get("weight_dtype", "float32")) + + left_state = tf.nn.dropout(left_state, + 1 - hparams.layer_prepostprocess_dropout) + right_state = tf.nn.dropout( + right_state, 1 - hparams.layer_prepostprocess_dropout) + + hidden_state = residual_state + left_state + right_state + + else: + hidden_state = common_layers.layer_postprocess( + residual_state, left_state, hparams) + + with tf.variable_scope(_CONV_BRANCHES_NAME): + residual_state = hidden_state + hidden_state = common_layers.layer_preprocess(hidden_state, hparams) + + if nonpadding is not None: + # Mask padding from conv layers. + mask = tf.tile( + tf.expand_dims(nonpadding, 2), [1, 1, hparams.hidden_size]) + hidden_state *= mask + + if layer_cache: + if decode_loop_step is None: + hidden_state = layer_cache[ + _CONV_BRANCHES_FIRST_LAYER_NAME] = tf.concat( + [ + layer_cache[_CONV_BRANCHES_FIRST_LAYER_NAME], + hidden_state + ], + axis=1)[:, -1 * _DECODER_LEFT_CONV_PADDING - 1:, :] + left_state = hidden_state + right_state = hidden_state[:, _DECODER_LEFT_CONV_PADDING - + _DECODER_RIGHT_CONV_PADDING:, :] + + else: + # Inplace update is required for inference on TPU. + # Inplace_ops only supports inplace_update on the first dimension. + tmp = tf.transpose( + layer_cache[_CONV_BRANCHES_FIRST_LAYER_NAME], perm=[1, 0, 2]) + tmp = tf.expand_dims(tmp, axis=1) + tmp = inplace_ops.alias_inplace_update( + tmp, + decode_loop_step * tf.shape(hidden_state)[1] + + _DECODER_LEFT_CONV_PADDING, + tf.transpose(hidden_state, perm=[1, 0, 2])) + tmp = tf.squeeze(tmp, axis=1) + hidden_state = layer_cache[ + _CONV_BRANCHES_FIRST_LAYER_NAME] = tf.transpose( + tmp, perm=[1, 0, 2]) + + batch_size = hidden_state.shape.as_list()[0] + left_state = tf.slice(hidden_state, [0, decode_loop_step, 0], [ + batch_size, _DECODER_LEFT_CONV_PADDING + 1, + hparams.hidden_size + ]) + right_state = tf.slice(hidden_state, [ + 0, decode_loop_step + _DECODER_LEFT_CONV_PADDING - + _DECODER_RIGHT_CONV_PADDING, 0 + ], [ + batch_size, _DECODER_RIGHT_CONV_PADDING + 1, + hparams.hidden_size + ]) + + else: # No caching. + left_state = tf.pad( + hidden_state, + paddings=[[0, 0], [_DECODER_LEFT_CONV_PADDING, 0], [0, 0]]) + right_state = tf.pad( + hidden_state, + paddings=[[0, 0], [_DECODER_RIGHT_CONV_PADDING, 0], [0, 0]]) + + left_output_dim = int(hparams.hidden_size * 2) + separable_conv_11x1 = tf.layers.SeparableConv1D( + left_output_dim, + 11, + padding="VALID", + name="separable_conv11x1", + activation=tf.nn.relu) + left_state = separable_conv_11x1.apply(left_state) + left_state = tf.nn.dropout(left_state, + 1 - hparams.layer_prepostprocess_dropout) + + right_output_dim = int(hparams.hidden_size / 2) + separable_conv_7x1_1 = tf.layers.SeparableConv1D( + right_output_dim, 7, padding="VALID", name="separable_conv_7x1_1") + right_state = separable_conv_7x1_1.apply(right_state) + right_state = tf.nn.dropout(right_state, + 1 - hparams.layer_prepostprocess_dropout) + right_state = tf.pad( + right_state, + [[0, 0], [0, 0], [0, left_output_dim - right_output_dim]], + constant_values=0) + + hidden_state = left_state + right_state + + hidden_state = common_layers.layer_preprocess(hidden_state, hparams) + if nonpadding is not None: + # Mask padding from conv layers. + mask = tf.tile( + tf.expand_dims(nonpadding, 2), [1, 1, hparams.hidden_size * 2]) + hidden_state *= mask + + if layer_cache: + if decode_loop_step is None: + hidden_state = layer_cache[ + _CONV_BRANCHES_SECOND_LAYER_NAME] = tf.concat( + [ + layer_cache[_CONV_BRANCHES_SECOND_LAYER_NAME], + hidden_state + ], + axis=1)[:, -1 * _DECODER_FINAL_CONV_PADDING - 1:, :] + + else: + # Inplace update is required for inference on TPU. + # Inplace_ops only supports inplace_update on the first dimension. + tmp = tf.transpose( + layer_cache[_CONV_BRANCHES_SECOND_LAYER_NAME], perm=[1, 0, 2]) + tmp = tf.expand_dims(tmp, axis=1) + tmp = inplace_ops.alias_inplace_update( + tmp, (decode_loop_step + _DECODER_FINAL_CONV_PADDING) * + tf.shape(hidden_state)[1], + tf.transpose(hidden_state, perm=[1, 0, 2])) + tmp = tf.squeeze(tmp, axis=1) + hidden_state = layer_cache[ + _CONV_BRANCHES_SECOND_LAYER_NAME] = tf.transpose( + tmp, perm=[1, 0, 2]) + + batch_size = hidden_state.shape.as_list()[0] + hidden_state = tf.slice(hidden_state, [0, decode_loop_step, 0], [ + batch_size, _DECODER_FINAL_CONV_PADDING + 1, + hparams.hidden_size * 2 + ]) + else: + hidden_state = tf.pad( + hidden_state, + paddings=[[0, 0], [_DECODER_FINAL_CONV_PADDING, 0], [0, 0]]) + + separable_conv_7x1_2 = tf.layers.SeparableConv1D( + hparams.hidden_size, + 7, + padding="VALID", + name="separable_conv_7x1_2") + hidden_state = separable_conv_7x1_2.apply(hidden_state) + + hidden_state = common_layers.layer_postprocess( + residual_state, hidden_state, hparams) + + with tf.variable_scope(_VANILLA_ATTENTION_NAME): + residual_state = hidden_state + hidden_state = common_layers.layer_preprocess(hidden_state, hparams) + + attention_cache = layer_cache[ + _VANILLA_ATTENTION_NAME] if layer_cache is not None else None + hidden_state = common_attention.multihead_attention( + hidden_state, + None, + decoder_self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=hparams.self_attention_type, + max_relative_position=hparams.max_relative_position, + heads_share_relative_embedding=( + hparams.heads_share_relative_embedding), + add_relative_to_values=hparams.add_relative_to_values, + save_weights_to=save_weights_to, + cache=attention_cache, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims, + max_length=hparams.get("max_length"), + decode_loop_step=decode_loop_step, + vars_3d=hparams.get("attention_variables_3d"), + activation_dtype=hparams.get("activation_dtype", "float32"), + weight_dtype=hparams.get("weight_dtype", "float32")) + hidden_state = common_layers.layer_postprocess( + residual_state, hidden_state, hparams) + + if encoder_output is not None: + with tf.variable_scope(_SECOND_ATTEND_TO_ENCODER_NAME): + residual_state = hidden_state + hidden_state = common_layers.layer_preprocess(hidden_state, hparams) + + attention_cache = ( + layer_cache[_SECOND_ATTEND_TO_ENCODER_NAME] + if layer_cache is not None else None) + hidden_state = common_attention.multihead_attention( + hidden_state, + encoder_output, + encoder_decoder_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + max_relative_position=hparams.max_relative_position, + heads_share_relative_embedding=( + hparams.heads_share_relative_embedding), + add_relative_to_values=hparams.add_relative_to_values, + save_weights_to=save_weights_to, + cache=attention_cache, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims, + max_length=hparams.get("max_length"), + vars_3d=hparams.get("attention_variables_3d"), + activation_dtype=hparams.get("activation_dtype", "float32"), + weight_dtype=hparams.get("weight_dtype", "float32")) + hidden_state = common_layers.layer_postprocess( + residual_state, hidden_state, hparams) + + with tf.variable_scope("dense_layers"): + residual_state = hidden_state + hidden_state = common_layers.layer_preprocess(hidden_state, hparams) + + hidden_state = tf.layers.dense( + hidden_state, + int(hparams.hidden_size * 4), + activation=tf.nn.swish) + hidden_state = tf.nn.dropout(hidden_state, + 1 - hparams.layer_prepostprocess_dropout) + + hidden_state = common_layers.layer_preprocess(hidden_state, hparams) + + hidden_state = tf.layers.dense(hidden_state, hparams.hidden_size) + hidden_state = common_layers.layer_postprocess( + residual_state, hidden_state, hparams) + + decoder_output = common_layers.layer_preprocess(hidden_state, hparams) + if num_trainable_top_decoder_layers == 0: + decoder_output = tf.stop_gradient(decoder_output) + return decoder_output + + +def _add_attend_to_encoder_cache(cache, attention_name, hparams, num_layers, + key_channels, value_channels, + vars_3d_num_heads, scope_prefix, + encoder_output): + """Add attend-to-encoder layers to cache.""" + for layer in range(num_layers): + layer_name = "layer_%d" % layer + with tf.variable_scope("%sdecoder/%s/%s/multihead_attention" % + (scope_prefix, layer_name, attention_name)): + k_encdec = common_attention.compute_attention_component( + encoder_output, + key_channels, + name="k", + vars_3d_num_heads=vars_3d_num_heads) + k_encdec = common_attention.split_heads(k_encdec, hparams.num_heads) + v_encdec = common_attention.compute_attention_component( + encoder_output, + value_channels, + name="v", + vars_3d_num_heads=vars_3d_num_heads) + v_encdec = common_attention.split_heads(v_encdec, hparams.num_heads) + cache[layer_name][attention_name] = { + "k_encdec": k_encdec, + "v_encdec": v_encdec + } + return cache + + +def init_evolved_transformer_cache(cache, hparams, batch_size, + attention_init_length, encoder_output, + encoder_decoder_attention_bias, + scope_prefix): + """Create the initial cache for Evolved Transformer fast decoding.""" + key_channels = hparams.attention_key_channels or hparams.hidden_size + value_channels = hparams.attention_value_channels or hparams.hidden_size + num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers + vars_3d_num_heads = ( + hparams.num_heads if hparams.get("attention_variables_3d") else 0) + + # Add self-attentions. + if cache is None: + cache = {} + cache.update({ + "layer_%d" % layer: { # pylint: disable=g-complex-comprehension + _SIXTEEN_HEAD_ATTENTION_NAME: { + "k": + common_attention.split_heads( + tf.zeros( + [batch_size, attention_init_length, key_channels]), + _capped_double_heads(hparams.num_heads)), + "v": + common_attention.split_heads( + tf.zeros( + [batch_size, attention_init_length, value_channels]), + _capped_double_heads(hparams.num_heads)), + }, + _VANILLA_ATTENTION_NAME: { + "k": + common_attention.split_heads( + tf.zeros( + [batch_size, attention_init_length, key_channels]), + hparams.num_heads), + "v": + common_attention.split_heads( + tf.zeros( + [batch_size, attention_init_length, value_channels]), + hparams.num_heads), + } + } for layer in range(num_layers) + }) + + # Add branched layers. Pad with additional zeros for causal convolution. + for layer in range(num_layers): + cache["layer_%d" % layer][_CONV_BRANCHES_FIRST_LAYER_NAME] = tf.zeros([ + batch_size, attention_init_length + _DECODER_LEFT_CONV_PADDING, + hparams.hidden_size + ]) + cache["layer_%d" % layer][_CONV_BRANCHES_SECOND_LAYER_NAME] = tf.zeros([ + batch_size, attention_init_length + _DECODER_FINAL_CONV_PADDING, + hparams.hidden_size * 2 + ]) + + # Add encoder embedding attentions. + if encoder_output is not None: + cache = _add_attend_to_encoder_cache( + cache=cache, + attention_name=_FIRST_ATTEND_TO_ENCODER_NAME, + hparams=hparams, + num_layers=num_layers, + key_channels=key_channels, + value_channels=value_channels, + vars_3d_num_heads=vars_3d_num_heads, + scope_prefix=scope_prefix, + encoder_output=encoder_output) + cache = _add_attend_to_encoder_cache( + cache=cache, + attention_name=_SECOND_ATTEND_TO_ENCODER_NAME, + hparams=hparams, + num_layers=num_layers, + key_channels=key_channels, + value_channels=value_channels, + vars_3d_num_heads=vars_3d_num_heads, + scope_prefix=scope_prefix, + encoder_output=encoder_output) + + cache["encoder_output"] = encoder_output + cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias + + return cache + + +# TODO(davidso): Update optimizer, learning rate, and decay to match paper. +def add_evolved_transformer_hparams(hparams): + """Add Evolved Transformer hparams. + + Note: These are for the Adam optimizer, not the Adafactor optimizer used in + the paper. + + Args: + hparams: Current hparams. + + Returns: + hparams updated with Evolved Transformer values. + """ + # Evolved Transformer "layers" are twice as deep as Transformer, so roughly + # halve the number that we use. These numbers are taken from + # arxiv.org/abs/1901.11117 . + hparams.num_encoder_layers = 3 + hparams.num_decoder_layers = 4 + + # Learning rate and decay scheme that mimics the transformer Adam config, + # but with cosine decay instead of rsqrt. + hparams.learning_rate_constant /= hparams.learning_rate_warmup_steps ** 0.5 + hparams.learning_rate_schedule = ( + "constant*linear_warmup*single_cycle_cos_decay*rsqrt_hidden_size") + return hparams + + +@registry.register_hparams +def evolved_transformer_tiny(): + """Base parameters for Evolved Transformer model.""" + hparams = add_evolved_transformer_hparams(transformer.transformer_tiny()) + hparams.learning_rate_schedule = ( + "constant*single_cycle_cos_decay") + return hparams + + +@registry.register_hparams +def evolved_transformer_base(): + """Base parameters for Evolved Transformer model.""" + return add_evolved_transformer_hparams(transformer.transformer_base()) + + +@registry.register_hparams +def evolved_transformer_big(): + """Big parameters for Evolved Transformer model on WMT.""" + return add_evolved_transformer_hparams(transformer.transformer_big()) + + +@registry.register_hparams +def evolved_transformer_deep(): + """Deep parameters for Evolved Transformer model on WMT.""" + hparams = add_evolved_transformer_hparams(transformer.transformer_big()) + hparams.num_encoder_layers = 9 + hparams.num_decoder_layers = 10 + hparams.hidden_size = 640 + return hparams + + +@registry.register_hparams +def evolved_transformer_base_tpu(): + """Base parameters for Evolved Transformer model on TPU.""" + hparams = add_evolved_transformer_hparams(transformer.transformer_tpu()) + hparams.learning_rate_constant = 1 / hparams.learning_rate_warmup_steps ** 0.5 + hparams.learning_rate_schedule = ( + "constant*single_cycle_cos_decay") + return hparams + + +@registry.register_hparams +def evolved_transformer_big_tpu(): + """Big parameters for Evolved Transformer model on TPU.""" + hparams = add_evolved_transformer_hparams(transformer.transformer_big_tpu()) + hparams.learning_rate_constant = 1 / hparams.learning_rate_warmup_steps ** 0.5 + hparams.learning_rate_schedule = ( + "constant*single_cycle_cos_decay") + return hparams + + +@registry.register_hparams +def evolved_transformer_tpu_basic(): + """Basic Seq2Seq TPU hyper-parameters.""" + hparams = transformer.transformer_big_tpu() + hparams.add_hparam("print_vars", False) + hparams.batch_size = 8192 + hparams.max_length = 256 + + # N < 0 means all weights in the model are trainable. + # N >= 0 means all weights are frozen except N top decoder layers + + # (pre-)softmax matrix (that projects from hidden size to vocab size). + hparams.add_hparam("num_trainable_top_decoder_layers", -1) + + return hparams diff --git a/tensor2tensor/models/evolved_transformer_test.py b/tensor2tensor/models/evolved_transformer_test.py new file mode 100644 index 000000000..388769918 --- /dev/null +++ b/tensor2tensor/models/evolved_transformer_test.py @@ -0,0 +1,756 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the Evolved Transformer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.models import evolved_transformer +from tensor2tensor.models import transformer + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +BATCH_SIZE = 3 +INPUT_LENGTH = 5 +TARGET_LENGTH = 7 +VOCAB_SIZE = 10 +DECODE_LENGTH = 3 + + +def print_vars(all_vars=None): + """Print info about a list of variables.""" + if not all_vars: + all_vars = tf.trainable_variables() + tf.logging.info("Format: , , <(soft) device placement>") + for var in all_vars: + tf.logging.info(" %s, %s, %s" % + (var.name, str(var.get_shape()), var.op.device)) + + +def get_var(name): + """Get trainable variable by name.""" + variables = [var for var in tf.trainable_variables() if var.name == name] + if len(variables) == 1: + return variables[0] + raise ValueError("`name` must match exactly one variable. '%s' matched %d" % + (name, len(variables))) + + +def get_vars(names): + """Get trainable variables by name.""" + return [get_var(name) for name in names] + + +def assert_with_message(assert_method, a, b, message): + try: + assert_method(a, b) + except AssertionError as e: + tf.logging.error(message) + raise e + + +def get_model(hparams, has_input=True, num_decoder_layers=1): + hparams.layer_prepostprocess_dropout = 0.0 + hparams.hidden_size = 4 + hparams.num_heads = 1 + hparams.num_encoder_layers = 1 + hparams.num_decoder_layers = num_decoder_layers + + p_hparams = problem_hparams.test_problem_hparams(VOCAB_SIZE, VOCAB_SIZE, + hparams) + if not has_input: + del p_hparams.modality["inputs"] + hparams.problem_hparams = p_hparams + + inputs = np.random.randint(VOCAB_SIZE, size=(BATCH_SIZE, INPUT_LENGTH, 1, 1)) + targets = np.random.randint( + VOCAB_SIZE, size=(BATCH_SIZE, TARGET_LENGTH, 1, 1)) + features = { + "targets": tf.constant(targets, dtype=tf.int32, name="targets"), + "target_space_id": tf.constant(1, dtype=tf.int32), + } + if has_input: + features["inputs"] = tf.constant(inputs, dtype=tf.int32, name="inputs") + + return (evolved_transformer.EvolvedTransformer(hparams, + tf_estimator.ModeKeys.TRAIN, + p_hparams), features) + + +class EvolvedTransformerTest(tf.test.TestCase): + + def testEvolvedTransformer(self): + model, features = get_model(hparams=transformer.transformer_tiny()) + logits, _ = model(features) + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + res = session.run(logits) + self.assertEqual(res.shape, (BATCH_SIZE, TARGET_LENGTH, 1, 1, VOCAB_SIZE)) + + def testSlowVsFast(self): + tf.set_random_seed(1234) + model, features = get_model(transformer.transformer_tiny()) + + decode_length = DECODE_LENGTH + + out_logits, _ = model(features) + out_logits = tf.squeeze(out_logits, axis=[2, 3]) + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]), + labels=tf.reshape(features["targets"], [-1])) + loss = tf.reduce_mean(loss) + apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss) + + with self.test_session(): + tf.global_variables_initializer().run() + for _ in range(10): + apply_grad.run() + + model.set_mode(tf_estimator.ModeKeys.PREDICT) + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + greedy_result = model._slow_greedy_infer(features, + decode_length)["outputs"] + greedy_result = tf.squeeze(greedy_result, axis=[2, 3]) + + fast_result = model._greedy_infer(features, decode_length)["outputs"] + + with self.test_session(): + greedy_res = greedy_result.eval() + fast_res = fast_result.eval() + + self.assertEqual(fast_res.shape, (BATCH_SIZE, INPUT_LENGTH + decode_length)) + self.assertAllClose(greedy_res, fast_res) + + def testSlowVsFastNoInput(self): + model, features = get_model(transformer.transformer_tiny(), has_input=False) + + decode_length = DECODE_LENGTH + + out_logits, _ = model(features) + out_logits = tf.squeeze(out_logits, axis=[2, 3]) + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]), + labels=tf.reshape(features["targets"], [-1])) + loss = tf.reduce_mean(loss) + apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss) + + with self.test_session(): + tf.global_variables_initializer().run() + for _ in range(10): + apply_grad.run() + + model.set_mode(tf_estimator.ModeKeys.PREDICT) + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + slow_result = model._slow_greedy_infer(features, decode_length)["outputs"] + slow_result = tf.squeeze(slow_result, axis=[2, 3]) + + fast_result = model._greedy_infer(features, decode_length)["outputs"] + + with self.test_session(): + slow_res = slow_result.eval() + fast_res = fast_result.eval() + + self.assertEqual(slow_res.shape, (BATCH_SIZE, decode_length)) + self.assertAllClose(slow_res, fast_res) + + def testBeamVsFast(self): + model, features = get_model(transformer.transformer_tiny()) + + decode_length = DECODE_LENGTH + + out_logits, _ = model(features) + out_logits = tf.squeeze(out_logits, axis=[2, 3]) + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]), + labels=tf.reshape(features["targets"], [-1])) + loss = tf.reduce_mean(loss) + apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss) + + with self.test_session(): + tf.global_variables_initializer().run() + for _ in range(10): + apply_grad.run() + + model.set_mode(tf_estimator.ModeKeys.PREDICT) + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + beam_result = model._beam_decode_slow( + features, decode_length, beam_size=4, top_beams=1, + alpha=1.0)["outputs"] + + fast_result = model._beam_decode( + features, decode_length, beam_size=4, top_beams=1, + alpha=1.0)["outputs"] + + with self.test_session(): + beam_res = beam_result.eval() + fast_res = fast_result.eval() + + self.assertAllClose(beam_res, fast_res) + + def _create_greedy_infer_model(self): + """Creates model for greedy inference testing. + + Returns: + model: A t2t model. + features: An map of string to tensor. + """ + model, features = get_model(transformer.transformer_tiny()) + + out_logits, _ = model(features) + out_logits = tf.squeeze(out_logits, axis=[2, 3]) + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]), + labels=tf.reshape(features["targets"], [-1])) + loss = tf.reduce_mean(loss) + apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss) + + with self.test_session(): + tf.global_variables_initializer().run() + for _ in range(10): + apply_grad.run() + + model.set_mode(tf_estimator.ModeKeys.PREDICT) + + return model, features + + def testGreedySlowTPUVsNonTPU(self): + decode_length = DECODE_LENGTH + + model, features = self._create_greedy_infer_model() + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + slow_result_non_tpu = model._slow_greedy_infer(features, + decode_length)["outputs"] + slow_result_non_tpu = tf.squeeze(slow_result_non_tpu, axis=[2, 3]) + + slow_result_tpu = model._slow_greedy_infer_tpu(features, + decode_length)["outputs"] + slow_result_tpu = tf.squeeze(slow_result_tpu, axis=[2, 3]) + + with self.test_session(): + slow_non_tpu_res = slow_result_non_tpu.eval() + slow_tpu_res = slow_result_tpu.eval() + + self.assertEqual(slow_tpu_res.shape, + (BATCH_SIZE, INPUT_LENGTH + decode_length)) + self.assertAllClose(slow_tpu_res, slow_non_tpu_res) + + def testGreedyFastTPUVsNonTPU(self): + tf.set_random_seed(1234) + decode_length = DECODE_LENGTH + + model, features = self._create_greedy_infer_model() + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + fast_result_non_tpu = model._greedy_infer( + features, decode_length, use_tpu=False)["outputs"] + + fast_result_tpu = model._greedy_infer( + features, decode_length, use_tpu=True)["outputs"] + + with self.test_session(): + fast_non_tpu_res = fast_result_non_tpu.eval() + fast_tpu_res = fast_result_tpu.eval() + + self.assertEqual(fast_tpu_res.shape, + (BATCH_SIZE, INPUT_LENGTH + decode_length)) + self.assertAllClose(fast_tpu_res, fast_non_tpu_res) + + def testGreedyTPUSlowVsFast(self): + tf.set_random_seed(1234) + decode_length = DECODE_LENGTH + + model, features = self._create_greedy_infer_model() + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + slow_result = model._slow_greedy_infer_tpu(features, + decode_length)["outputs"] + slow_result = tf.squeeze(slow_result, axis=[2, 3]) + + fast_result = model._greedy_infer( + features, decode_length, use_tpu=True)["outputs"] + + with self.test_session(): + slow_res = slow_result.eval() + fast_res = fast_result.eval() + + self.assertEqual(fast_res.shape, (BATCH_SIZE, INPUT_LENGTH + decode_length)) + self.assertAllClose(fast_res, slow_res) + + def testFrozenWeightsUnchangedByTraining(self): + # Arrange. + hparams = transformer.transformer_tiny() + hparams.add_hparam("num_trainable_top_decoder_layers", 1) + model, features = get_model(hparams, num_decoder_layers=3) + out_logits, _ = model(features) + out_logits = tf.squeeze(out_logits, axis=[2, 3]) + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]), + labels=tf.reshape(features["targets"], [-1])) + loss = tf.reduce_mean(loss) + apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss) + frozen_names = [ + "evolved_transformer/symbol_modality_10_4/shared/weights_0:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_1:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_2:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_3:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_4:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_5:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_6:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_7:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_8:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_9:0", + "evolved_transformer/body/target_space_embedding/kernel:0", + "evolved_transformer/body/encoder/layer_0/gated_linear_unit/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/encoder/layer_0/gated_linear_unit/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/encoder/layer_0/gated_linear_unit/dense/kernel:0", + "evolved_transformer/body/encoder/layer_0/gated_linear_unit/dense/bias:0", + "evolved_transformer/body/encoder/layer_0/gated_linear_unit/dense_1/kernel:0", + "evolved_transformer/body/encoder/layer_0/gated_linear_unit/dense_1/bias:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/dense/kernel:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/dense/bias:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/standard_conv_3x1/kernel:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/standard_conv_3x1/bias:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/layer_prepostprocess_1/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/layer_prepostprocess_1/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/separable_conv_9x1/depthwise_kernel:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/separable_conv_9x1/pointwise_kernel:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/separable_conv_9x1/bias:0", + "evolved_transformer/body/encoder/layer_0/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/encoder/layer_0/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/encoder/layer_0/self_attention/multihead_attention/q/kernel:0", + "evolved_transformer/body/encoder/layer_0/self_attention/multihead_attention/k/kernel:0", + "evolved_transformer/body/encoder/layer_0/self_attention/multihead_attention/v/kernel:0", + "evolved_transformer/body/encoder/layer_0/self_attention/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/encoder/layer_0/dense_layers/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/encoder/layer_0/dense_layers/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/encoder/layer_0/dense_layers/dense/kernel:0", + "evolved_transformer/body/encoder/layer_0/dense_layers/dense/bias:0", + "evolved_transformer/body/encoder/layer_0/dense_layers/dense_1/kernel:0", + "evolved_transformer/body/encoder/layer_0/dense_layers/dense_1/bias:0", + "evolved_transformer/body/encoder/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/encoder/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_0/16_head_self_attention/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_0/16_head_self_attention/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_0/16_head_self_attention/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_0/16_head_self_attention/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_0/16_head_self_attention/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_0/16_head_self_attention/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_0/first_attend_to_encoder/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_0/first_attend_to_encoder/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_0/first_attend_to_encoder/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_0/first_attend_to_encoder/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv11x1/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv11x1/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv11x1/bias:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv_7x1_1/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv_7x1_1/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv_7x1_1/bias:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/layer_prepostprocess_1/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/layer_prepostprocess_1/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv_7x1_2/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv_7x1_2/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv_7x1_2/bias:0", + "evolved_transformer/body/decoder/layer_0/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_0/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_0/self_attention/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_0/self_attention/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_0/self_attention/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_0/self_attention/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_0/second_attend_to_encoder/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_0/second_attend_to_encoder/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_0/second_attend_to_encoder/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_0/second_attend_to_encoder/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_0/second_attend_to_encoder/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_0/second_attend_to_encoder/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_0/dense_layers/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_0/dense_layers/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_0/dense_layers/dense/kernel:0", + "evolved_transformer/body/decoder/layer_0/dense_layers/dense/bias:0", + "evolved_transformer/body/decoder/layer_0/dense_layers/layer_prepostprocess_1/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_0/dense_layers/layer_prepostprocess_1/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_0/dense_layers/dense_1/kernel:0", + "evolved_transformer/body/decoder/layer_0/dense_layers/dense_1/bias:0", + "evolved_transformer/body/decoder/layer_1/16_head_self_attention/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_1/16_head_self_attention/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_1/16_head_self_attention/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_1/16_head_self_attention/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_1/16_head_self_attention/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_1/16_head_self_attention/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_1/first_attend_to_encoder/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_1/first_attend_to_encoder/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_1/first_attend_to_encoder/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_1/first_attend_to_encoder/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv11x1/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv11x1/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv11x1/bias:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv_7x1_1/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv_7x1_1/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv_7x1_1/bias:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/layer_prepostprocess_1/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/layer_prepostprocess_1/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv_7x1_2/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv_7x1_2/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv_7x1_2/bias:0", + "evolved_transformer/body/decoder/layer_1/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_1/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_1/self_attention/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_1/self_attention/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_1/self_attention/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_1/self_attention/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_1/second_attend_to_encoder/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_1/second_attend_to_encoder/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_1/second_attend_to_encoder/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_1/second_attend_to_encoder/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_1/second_attend_to_encoder/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_1/second_attend_to_encoder/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_1/dense_layers/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_1/dense_layers/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_1/dense_layers/dense/kernel:0", + "evolved_transformer/body/decoder/layer_1/dense_layers/dense/bias:0", + "evolved_transformer/body/decoder/layer_1/dense_layers/layer_prepostprocess_1/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_1/dense_layers/layer_prepostprocess_1/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_1/dense_layers/dense_1/kernel:0", + "evolved_transformer/body/decoder/layer_1/dense_layers/dense_1/bias:0", + ] + train_names = [ + "evolved_transformer/body/decoder/layer_2/16_head_self_attention/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_2/16_head_self_attention/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_2/16_head_self_attention/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_2/16_head_self_attention/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_2/16_head_self_attention/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_2/16_head_self_attention/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_2/first_attend_to_encoder/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_2/first_attend_to_encoder/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_2/first_attend_to_encoder/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_2/first_attend_to_encoder/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv11x1/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv11x1/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv11x1/bias:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv_7x1_1/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv_7x1_1/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv_7x1_1/bias:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/layer_prepostprocess_1/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/layer_prepostprocess_1/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv_7x1_2/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv_7x1_2/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv_7x1_2/bias:0", + "evolved_transformer/body/decoder/layer_2/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_2/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_2/self_attention/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_2/self_attention/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_2/self_attention/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_2/self_attention/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_2/second_attend_to_encoder/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_2/second_attend_to_encoder/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_2/second_attend_to_encoder/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_2/second_attend_to_encoder/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_2/second_attend_to_encoder/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_2/second_attend_to_encoder/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_2/dense_layers/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_2/dense_layers/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_2/dense_layers/dense/kernel:0", + "evolved_transformer/body/decoder/layer_2/dense_layers/dense/bias:0", + "evolved_transformer/body/decoder/layer_2/dense_layers/layer_prepostprocess_1/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_2/dense_layers/layer_prepostprocess_1/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_2/dense_layers/dense_1/kernel:0", + "evolved_transformer/body/decoder/layer_2/dense_layers/dense_1/bias:0", + "evolved_transformer/body/decoder/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/symbol_modality_10_4/softmax/weights_1:0", + "evolved_transformer/symbol_modality_10_4/softmax/weights_2:0", + "evolved_transformer/symbol_modality_10_4/softmax/weights_3:0", + "evolved_transformer/symbol_modality_10_4/softmax/weights_4:0", + "evolved_transformer/symbol_modality_10_4/softmax/weights_5:0", + "evolved_transformer/symbol_modality_10_4/softmax/weights_6:0", + "evolved_transformer/symbol_modality_10_4/softmax/weights_7:0", + "evolved_transformer/symbol_modality_10_4/softmax/weights_8:0", + "evolved_transformer/symbol_modality_10_4/softmax/weights_9:0", + ] + frozen_vars = get_vars(frozen_names) + train_vars = get_vars(train_names) + print_vars() + + # Act. + with self.test_session() as session: + tf.global_variables_initializer().run() + frozen_values_before = session.run(frozen_vars) + train_values_before = session.run(train_vars) + for _ in range(10): # Arbitrary number of training steps. + apply_grad.run() + frozen_values_after = session.run(frozen_vars) + train_values_after = session.run(train_vars) + + # Assert. + self.assertTrue( + model._original_hparams.shared_embedding_and_softmax_weights) + self.assertFalse(model.hparams.shared_embedding_and_softmax_weights) + self.assertTrue(model.hparams.shared_embedding) + for name, before, after in zip(frozen_names, frozen_values_before, + frozen_values_after): + assert_with_message( + self.assertAllClose, before, after, + "%s should be frozen, but changed after training." % name) + for name, before, after in zip(train_names, train_values_before, + train_values_after): + assert_with_message( + self.assertNotAllClose, before, after, + "%s should be trainable, but did not change after training." % name) + + def testAllWeightsTrainableByDefault(self): + # Arrange. + model, features = get_model( + transformer.transformer_tiny(), num_decoder_layers=3) + out_logits, _ = model(features) + out_logits = tf.squeeze(out_logits, axis=[2, 3]) + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]), + labels=tf.reshape(features["targets"], [-1])) + loss = tf.reduce_mean(loss) + apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss) + var_names = [ + "evolved_transformer/symbol_modality_10_4/shared/weights_0:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_1:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_2:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_3:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_4:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_5:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_6:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_7:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_8:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_9:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_10:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_11:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_12:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_13:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_14:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_15:0", + "evolved_transformer/body/target_space_embedding/kernel:0", + "evolved_transformer/body/encoder/layer_0/gated_linear_unit/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/encoder/layer_0/gated_linear_unit/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/encoder/layer_0/gated_linear_unit/dense/kernel:0", + "evolved_transformer/body/encoder/layer_0/gated_linear_unit/dense/bias:0", + "evolved_transformer/body/encoder/layer_0/gated_linear_unit/dense_1/kernel:0", + "evolved_transformer/body/encoder/layer_0/gated_linear_unit/dense_1/bias:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/dense/kernel:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/dense/bias:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/standard_conv_3x1/kernel:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/standard_conv_3x1/bias:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/layer_prepostprocess_1/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/layer_prepostprocess_1/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/separable_conv_9x1/depthwise_kernel:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/separable_conv_9x1/pointwise_kernel:0", + "evolved_transformer/body/encoder/layer_0/conv_branches/separable_conv_9x1/bias:0", + "evolved_transformer/body/encoder/layer_0/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/encoder/layer_0/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/encoder/layer_0/self_attention/multihead_attention/q/kernel:0", + "evolved_transformer/body/encoder/layer_0/self_attention/multihead_attention/k/kernel:0", + "evolved_transformer/body/encoder/layer_0/self_attention/multihead_attention/v/kernel:0", + "evolved_transformer/body/encoder/layer_0/self_attention/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/encoder/layer_0/dense_layers/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/encoder/layer_0/dense_layers/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/encoder/layer_0/dense_layers/dense/kernel:0", + "evolved_transformer/body/encoder/layer_0/dense_layers/dense/bias:0", + "evolved_transformer/body/encoder/layer_0/dense_layers/dense_1/kernel:0", + "evolved_transformer/body/encoder/layer_0/dense_layers/dense_1/bias:0", + "evolved_transformer/body/encoder/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/encoder/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_0/16_head_self_attention/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_0/16_head_self_attention/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_0/16_head_self_attention/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_0/16_head_self_attention/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_0/16_head_self_attention/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_0/16_head_self_attention/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_0/first_attend_to_encoder/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_0/first_attend_to_encoder/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_0/first_attend_to_encoder/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_0/first_attend_to_encoder/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv11x1/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv11x1/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv11x1/bias:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv_7x1_1/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv_7x1_1/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv_7x1_1/bias:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/layer_prepostprocess_1/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/layer_prepostprocess_1/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv_7x1_2/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv_7x1_2/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_0/conv_branches/separable_conv_7x1_2/bias:0", + "evolved_transformer/body/decoder/layer_0/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_0/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_0/self_attention/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_0/self_attention/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_0/self_attention/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_0/self_attention/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_0/second_attend_to_encoder/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_0/second_attend_to_encoder/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_0/second_attend_to_encoder/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_0/second_attend_to_encoder/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_0/second_attend_to_encoder/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_0/second_attend_to_encoder/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_0/dense_layers/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_0/dense_layers/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_0/dense_layers/dense/kernel:0", + "evolved_transformer/body/decoder/layer_0/dense_layers/dense/bias:0", + "evolved_transformer/body/decoder/layer_0/dense_layers/layer_prepostprocess_1/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_0/dense_layers/layer_prepostprocess_1/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_0/dense_layers/dense_1/kernel:0", + "evolved_transformer/body/decoder/layer_0/dense_layers/dense_1/bias:0", + "evolved_transformer/body/decoder/layer_1/16_head_self_attention/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_1/16_head_self_attention/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_1/16_head_self_attention/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_1/16_head_self_attention/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_1/16_head_self_attention/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_1/16_head_self_attention/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_1/first_attend_to_encoder/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_1/first_attend_to_encoder/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_1/first_attend_to_encoder/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_1/first_attend_to_encoder/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv11x1/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv11x1/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv11x1/bias:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv_7x1_1/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv_7x1_1/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv_7x1_1/bias:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/layer_prepostprocess_1/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/layer_prepostprocess_1/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv_7x1_2/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv_7x1_2/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_1/conv_branches/separable_conv_7x1_2/bias:0", + "evolved_transformer/body/decoder/layer_1/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_1/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_1/self_attention/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_1/self_attention/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_1/self_attention/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_1/self_attention/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_1/second_attend_to_encoder/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_1/second_attend_to_encoder/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_1/second_attend_to_encoder/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_1/second_attend_to_encoder/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_1/second_attend_to_encoder/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_1/second_attend_to_encoder/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_1/dense_layers/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_1/dense_layers/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_1/dense_layers/dense/kernel:0", + "evolved_transformer/body/decoder/layer_1/dense_layers/dense/bias:0", + "evolved_transformer/body/decoder/layer_1/dense_layers/layer_prepostprocess_1/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_1/dense_layers/layer_prepostprocess_1/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_1/dense_layers/dense_1/kernel:0", + "evolved_transformer/body/decoder/layer_1/dense_layers/dense_1/bias:0", + "evolved_transformer/body/decoder/layer_2/16_head_self_attention/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_2/16_head_self_attention/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_2/16_head_self_attention/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_2/16_head_self_attention/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_2/16_head_self_attention/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_2/16_head_self_attention/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_2/first_attend_to_encoder/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_2/first_attend_to_encoder/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_2/first_attend_to_encoder/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_2/first_attend_to_encoder/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv11x1/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv11x1/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv11x1/bias:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv_7x1_1/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv_7x1_1/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv_7x1_1/bias:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/layer_prepostprocess_1/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/layer_prepostprocess_1/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv_7x1_2/depthwise_kernel:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv_7x1_2/pointwise_kernel:0", + "evolved_transformer/body/decoder/layer_2/conv_branches/separable_conv_7x1_2/bias:0", + "evolved_transformer/body/decoder/layer_2/self_attention/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_2/self_attention/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_2/self_attention/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_2/self_attention/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_2/self_attention/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_2/self_attention/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_2/second_attend_to_encoder/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_2/second_attend_to_encoder/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_2/second_attend_to_encoder/multihead_attention/q/kernel:0", + "evolved_transformer/body/decoder/layer_2/second_attend_to_encoder/multihead_attention/k/kernel:0", + "evolved_transformer/body/decoder/layer_2/second_attend_to_encoder/multihead_attention/v/kernel:0", + "evolved_transformer/body/decoder/layer_2/second_attend_to_encoder/multihead_attention/output_transform/kernel:0", + "evolved_transformer/body/decoder/layer_2/dense_layers/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_2/dense_layers/layer_prepostprocess/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_2/dense_layers/dense/kernel:0", + "evolved_transformer/body/decoder/layer_2/dense_layers/dense/bias:0", + "evolved_transformer/body/decoder/layer_2/dense_layers/layer_prepostprocess_1/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_2/dense_layers/layer_prepostprocess_1/layer_norm/layer_norm_bias:0", + "evolved_transformer/body/decoder/layer_2/dense_layers/dense_1/kernel:0", + "evolved_transformer/body/decoder/layer_2/dense_layers/dense_1/bias:0", + "evolved_transformer/body/decoder/layer_prepostprocess/layer_norm/layer_norm_scale:0", + "evolved_transformer/body/decoder/layer_prepostprocess/layer_norm/layer_norm_bias:0", + ] + variables = get_vars(var_names) + print_vars() + + # Act. + with self.test_session() as session: + tf.global_variables_initializer().run() + values_before = session.run(variables) + for _ in range(10): # Arbitrary number of training steps. + apply_grad.run() + values_after = session.run(variables) + + # Assert. + self.assertTrue( + model._original_hparams.shared_embedding_and_softmax_weights) + self.assertTrue(model.hparams.shared_embedding_and_softmax_weights) + self.assertFalse(model.hparams.shared_embedding) + self.assertSameElements(var_names, + [var.name for var in tf.trainable_variables()]) + empty_vars = { + "evolved_transformer/symbol_modality_10_4/shared/weights_10:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_11:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_12:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_13:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_14:0", + "evolved_transformer/symbol_modality_10_4/shared/weights_15:0" + } + for name, before, after in zip(var_names, values_before, values_after): + if name in empty_vars: + self.assertEqual(before.size, after.size) + self.assertEqual(before.size, 0) + else: + assert_with_message( + self.assertNotAllClose, before, after, + "%s should be trainable, but did not change after training." % name) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/image_transformer.py b/tensor2tensor/models/image_transformer.py new file mode 100644 index 000000000..dd7c2d882 --- /dev/null +++ b/tensor2tensor/models/image_transformer.py @@ -0,0 +1,1158 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""image generation with transformer (attention). + +encoder: [Self-Attention, Feed-forward] x n +decoder: [Self-Attention, Source-Target-Attention, Feed-forward] x n + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_image_attention as cia +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import modalities +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +@registry.register_model +class Imagetransformer(t2t_model.T2TModel): + """Conditional image generation with attention. See file docstring. + + The model admits either a Categorical or discretized mixture of logistic + distributions (DMOL) as the likelihood. When using DMOL for training, double + check that the evaluation metrics also use it. + """ + + def body(self, features): + hparams = copy.copy(self._hparams) + targets = features["targets"] + if (hparams.likelihood == cia.DistributionType.DMOL and + hparams.num_channels != 1): + raise ValueError("When using DMOL for the likelihood, bottom function " + " must be identity and num_channels must be 1.") + if (not tf.get_variable_scope().reuse and + hparams.mode != tf_estimator.ModeKeys.PREDICT): + tf.summary.image("targets", tf.to_float(targets), max_outputs=1) + + # Extra losses list if we want to use moe. + losses = [] + # Prepare decoder inputs and bias. + decoder_input, rows, cols = cia.prepare_decoder(targets, hparams) + # Add class label to decoder input. + if not hparams.unconditional: + inputs = features["inputs"] + decoder_input += tf.reshape( + inputs, + [common_layers.shape_list(targets)[0], 1, 1, hparams.hidden_size]) + decoder_output = cia.transformer_decoder_layers( + decoder_input, + None, + hparams.num_decoder_layers or hparams.num_hidden_layers, + hparams, + attention_type=hparams.dec_attention_type, + losses=losses, + name="decoder") + output = cia.create_output(decoder_output, rows, cols, targets, hparams) + + if losses: + return output, {"extra_loss": tf.add_n(losses)} + else: + return output + + def loss(self, logits, features): + if self._hparams.likelihood == cia.DistributionType.DMOL: + return common_layers.dml_loss(logits, features["targets"]) + + return super(Imagetransformer, self).loss(logits, features) + + def sample(self, features): + """Run the model and extract samples. + + Args: + features: an map of string to `Tensor`. + + Returns: + samples: an integer `Tensor`. + logits: a list of `Tensor`s, one per datashard. + losses: a dictionary: {loss-name (string): floating point `Scalar`}. + """ + if self._hparams.likelihood == cia.DistributionType.DMOL: + logits, losses = self(features) # pylint: disable=not-callable + samples = common_layers.sample_from_discretized_mix_logistic( + logits, seed=None) + return samples, logits, losses + + return super(Imagetransformer, self).sample(features) + + def _slow_greedy_infer(self, features, decode_length): + """A slow greedy inference method. + + Quadratic time in decode_length. + + Args: + features: an map of string to `Tensor` + decode_length: an integer. How many additional timesteps to decode. + + Returns: + samples: an integer `Tensor`. + logits: `Tensor` of shape [batch_size, time, 1, 1, vocab_size]. + losses: a dictionary: {loss-name (string): floating point `Scalar`} + """ + if self._hparams.likelihood == cia.DistributionType.DMOL: + raise NotImplementedError("Decoding is not currently available for DMOL.") + return super(Imagetransformer, self)._slow_greedy_infer(features, + decode_length) + + +@registry.register_model +class ImagetransformerMoe(t2t_model.T2TModel): + """Conditional image generation with attention and MoE.""" + + @staticmethod + def use_body_sharded(): + return True + + def body_sharded(self, sharded_features): + dp = self._data_parallelism + hparams = copy.copy(self._hparams) + inputs = sharded_features["inputs"] + targets = sharded_features["targets"] + + # Determine attention type and padding from hparams. + q_padding, kv_padding = "VALID", "VALID" + if hparams.q_filter_width > 1: + q_padding = "LEFT" + if hparams.kv_filter_width > 1: + kv_padding = "LEFT" + + # Prepare decoder inputs and bias. + decoder_input, rows, cols = dp(cia.prepare_decoder_inputs, + inputs, targets, hparams) + + # Run decoder. + # TODO(nikip): Use q_padding and kv_padding + del q_padding, kv_padding + decoder_output, extra_loss = cia.transformer_layers_sharded( + dp, + self._ps_devices, + decoder_input, + hparams.num_hidden_layers, + hparams, + self_attention_bias=None, + enc_output=None, + attention_type=hparams.dec_attention_type, + name="decoder") + + output = dp(cia.create_output, decoder_output, rows, cols, targets, hparams) + return output, extra_loss + + +@registry.register_hparams +def image_transformer_base(): + """Set of hyperparameters.""" + hparams = common_hparams.basic_params1() + hparams.hidden_size = 512 + hparams.batch_size = 4 + hparams.max_length = 3075 + hparams.dropout = 0.0 + hparams.clip_grad_norm = 0. # i.e. no gradient clipping + hparams.optimizer_adam_epsilon = 1e-9 + hparams.learning_rate_decay_scheme = "noam" + hparams.learning_rate = 0.1 + hparams.learning_rate_warmup_steps = 4000 + hparams.initializer_gain = 0.2 + hparams.num_hidden_layers = 6 + hparams.initializer = "uniform_unit_scaling" + hparams.weight_decay = 0.0 + hparams.optimizer_adam_beta1 = 0.9 + hparams.optimizer_adam_beta2 = 0.98 + hparams.label_smoothing = 0.0 + hparams.bottom["targets"] = modalities.image_channel_embeddings_bottom + hparams.top["targets"] = modalities.identity_top + hparams.norm_type = "layer" + hparams.layer_prepostprocess_dropout = 0.0 + hparams.add_hparam("filter_size", 512) # Add new ones like this. + + # attention-related flags + hparams.add_hparam("num_heads", 8) + hparams.add_hparam("attention_key_channels", 0) + hparams.add_hparam("attention_value_channels", 0) + hparams.add_hparam("ffn_layer", "conv_hidden_relu") + # All hyperparameters ending in "dropout" are automatically set to 0.0 + # when not in training mode. + hparams.add_hparam("attention_dropout", 0.0) + hparams.add_hparam("relu_dropout", 0.0) + hparams.add_hparam("pos", "timing") # timing, none + hparams.add_hparam("nbr_decoder_problems", 1) + hparams.add_hparam("num_output_layers", 3) + hparams.add_hparam("block_size", 1) + + # dilated attention based flags + hparams.add_hparam("gap_sizes", [2, 4, 8, 16, 32, 64, 2, 4, 8, 16, 32, 64]) + + # image size related flags + # assuming that the image has same height and width + hparams.add_hparam("img_len", 32) + hparams.add_hparam("num_channels", 3) + # Local attention params + hparams.add_hparam("local_and_global_att", False) + hparams.add_hparam("block_length", 256) + hparams.add_hparam("block_width", 128) + hparams.add_hparam("num_encoder_layers", 4) + hparams.add_hparam("num_decoder_layers", 12) + hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_1D) + hparams.add_hparam("block_raster_scan", False) + + # multipos attention params + hparams.add_hparam("q_filter_width", 1) + hparams.add_hparam("kv_filter_width", 1) + + hparams.add_hparam("likelihood", cia.DistributionType.CAT) + hparams.add_hparam("unconditional", False) # unconditional generation + + # parameters of discretized mixture of logistics loss from pixel cnn++ + hparams.add_hparam("num_mixtures", 10) + + # These parameters are only used when ffn_layer=="local_moe_tpu" + hparams.add_hparam("moe_overhead_train", 1.0) + hparams.add_hparam("moe_overhead_eval", 2.0) + hparams.moe_num_experts = 8 + hparams.moe_loss_coef = 1e-3 + + # These parameters are for relative attention + hparams.add_hparam("shared_rel", False) # share relative embeddings + return hparams + + +@registry.register_hparams +def imagetransformer_base(): + hparams = image_transformer_base() + return hparams + + +@registry.register_hparams +def imagetransformer_cifar10_base(): + """Best config for 2.90 bits/dim on CIFAR10 using cross entropy.""" + hparams = image_transformer_base() + hparams.batch_size = 4 + hparams.num_heads = 4 + hparams.num_decoder_layers = 12 + hparams.block_length = 256 + hparams.hidden_size = 512 + hparams.filter_size = 2048 + hparams.learning_rate = 0.5 + hparams.learning_rate_warmup_steps = 4000 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.layer_prepostprocess_dropout = 0.3 + hparams.unconditional = True + return hparams + + +@registry.register_hparams +def imagetransformer_cifar10_base_dmol(): + """Best config for 2.90 bits/dim on CIFAR10 using DMOL.""" + hparams = image_transformer_base() + hparams.likelihood = cia.DistributionType.DMOL + hparams.num_channels = 1 + hparams.bottom["targets"] = modalities.image_channel_compress_targets_bottom + hparams.top["targets"] = modalities.identity_top + hparams.num_heads = 8 + hparams.batch_size = 8 + hparams.sampling_method = "random" + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + hparams.summarize_grads = True + hparams.hidden_size = 256 + hparams.filter_size = 512 + hparams.attention_key_channels = 512 + hparams.attention_value_channels = 512 + hparams.num_decoder_layers = 12 + hparams.layer_prepostprocess_dropout = 0.1 + hparams.learning_rate = 0.1 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.pos = "emb" + hparams.unconditional = True + return hparams + + +@registry.register_hparams +def imagetransformer_base_tpu(): + """Transformer base params for cifar-10.""" + hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() + update_hparams_for_tpu(hparams) + hparams.batch_size = 4 + hparams.num_heads = 4 # heads are expensive on tpu + hparams.num_decoder_layers = 12 + hparams.block_length = 128 + hparams.hidden_size = 512 + hparams.filter_size = 2048 + hparams.learning_rate = 0.2 + hparams.learning_rate_warmup_steps = 6000 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.layer_prepostprocess_dropout = 0.3 + return hparams + + +@registry.register_hparams +def imagetransformer_base_imagenet_tpu(): + """Transformer base params for cifar-10.""" + hparams = imagetransformer_base_tpu() + hparams.batch_size = 4 + hparams.num_heads = 4 # heads are expensive on tpu + hparams.num_decoder_layers = 12 + hparams.block_length = 128 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def imagetransformer_imagenet32_base(): + """Best config for ImageNet-32 with 3.77 bits/dim using cross entropy.""" + hparams = imagetransformer_cifar10_base() + hparams.batch_size = 4 + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def imagetransformer_base_rel(): + """Base with relative attention.""" + hparams = imagetransformer_base() + hparams.dec_attention_type = cia.AttentionType.RELATIVE_LOCAL_1D + return hparams + + +@registry.register_hparams +def imagetransformer_sep_channels(): + """separate rgb embeddings.""" + hparams = imagetransformer_base() + hparams.num_heads = 4 + hparams.attention_key_channels = hparams.attention_value_channels = 0 + hparams.hidden_size = 256 + hparams.filter_size = 512 + hparams.num_hidden_layers = 6 + return hparams + + +@registry.register_hparams +def imagetransformer_sep_channels_8l(): + """separate rgb embeddings.""" + hparams = imagetransformer_base() + hparams.num_heads = 4 + hparams.attention_key_channels = hparams.attention_value_channels = 0 + hparams.hidden_size = 256 + hparams.filter_size = 256 + hparams.num_hidden_layers = 8 + hparams.sampling_method = "random" + return hparams + + +@registry.register_hparams +def imagetransformer_sep_channels_8l_multipos3(): + """separate rgb embeddings.""" + hparams = imagetransformer_sep_channels_8l() + hparams.q_filter_width = 3 + hparams.kv_filter_width = 3 + return hparams + + +@registry.register_hparams +def imagetransformer_base_8l_8h_big_cond_dr03_dan(): + """big 1d model for conditional image generation.2.99 on cifar10.""" + hparams = imagetransformer_sep_channels_8l() + hparams.block_width = 256 + hparams.block_length = 256 + hparams.hidden_size = 512 + hparams.num_heads = 8 + hparams.filter_size = 2048 + hparams.batch_size = 4 + hparams.max_length = 3075 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.num_decoder_layers = 8 + hparams.layer_prepostprocess_dropout = 0.3 + return hparams + + +@registry.register_hparams +def imagetransformer_base_10l_8h_big_uncond_dr03_dan_64(): + """big 1d model for unconditional generation on imagenet.""" + hparams = imagetransformer_base_10l_8h_big_cond_dr03_dan() + hparams.unconditional = True + hparams.max_length = 14000 + hparams.batch_size = 1 + hparams.img_len = 64 + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def imagetransformerpp_sep_channels_8l_8h(): + """separate rgb embeddings.""" + hparams = imagetransformer_base() + hparams.likelihood = cia.DistributionType.DMOL + hparams.num_channels = 1 + hparams.bottom["targets"] = modalities.image_channel_compress_targets_bottom + hparams.top["targets"] = modalities.identity_top + hparams.num_heads = 8 + hparams.batch_size = 4 + hparams.attention_key_channels = hparams.attention_value_channels = 0 + hparams.hidden_size = 512 + hparams.filter_size = 512 + hparams.num_hidden_layers = 8 + hparams.sampling_method = "random" + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + hparams.summarize_grads = True + hparams.learning_rate = 0.1 + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_8l_8h_big_cond_dr03_dan(): + """big 1d model for conditional image generation.2.99 on cifar10.""" + hparams = imagetransformerpp_sep_channels_8l_8h() + hparams.hidden_size = 512 + hparams.num_heads = 8 + hparams.filter_size = 2048 + hparams.batch_size = 4 + hparams.max_length = 3075 + hparams.layer_prepostprocess_dropout = 0.3 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.summarize_grads = True + hparams.learning_rate = 0.01 + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_8l_8h_big_cond_dr03_dan_a(): + hparams = imagetransformerpp_base_8l_8h_big_cond_dr03_dan() + hparams.learning_rate = 0.1 + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_10l_8h_big_uncond_dr03_dan(): + hparams = imagetransformerpp_base_8l_8h_big_cond_dr03_dan_a() + hparams.unconditional = True + hparams.num_decoder_layers = 10 + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_a(): + hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan() + hparams.learning_rate = 0.01 + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_b(): + hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan() + hparams.learning_rate = 0.1 + hparams.hidden_size = 256 + hparams.attention_key_channels = 512 + hparams.attention_value_channels = 512 + hparams.filter_size = 1024 + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g(): + hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_b() + hparams.filter_size = 512 + hparams.layer_prepostprocess_dropout = 0.1 + hparams.learning_rate = 0.1 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.pos = "emb" + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_k(): + hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g() + hparams.num_decoder_layers = 12 + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l(): + hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g() + hparams.num_decoder_layers = 12 + hparams.clip_grad_norm = 40. + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m(): + hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_k() + hparams.batch_size = 8 + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m_rel(): + hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_k() + hparams.batch_size = 8 + hparams.dec_attention_type = cia.AttentionType.RELATIVE_LOCAL_1D + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m_relsh(): + hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m_rel() + hparams.shared_rel = True + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p(): + """Gets to 2.92 in just under 4 days on 8 p100s.""" + hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l() + hparams.num_decoder_layers = 14 + hparams.batch_size = 8 + hparams.layer_prepostprocess_dropout = 0.2 + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m_bs1(): + """For 128x128.""" + # TODO(trandustin): why are these running? max_length and img_len not set + # 256x256 was also training without setting max_length + hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_m() + hparams.batch_size = 1 + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p_bs1(): + """For 128x128.""" + hparams = imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_p() + hparams.batch_size = 1 + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_5l_8h_big_uncond_dr00_dan_g_bs1(): + """For 256x256.""" + hparams = imagetransformerpp_base_10l_8h_big_uncond_dr03_dan_g() + # TODO(trandustin): I forgot to set this in the runs! Maybe it's not used in + # image transformer training implementation? + # hparams.img_len = 256 + hparams.max_length = 66000 # allow for 256x256 + hparams.batch_size = 1 + hparams.num_decoder_layers = 5 + hparams.hidden_size = 128 + hparams.filter_size = 128 + hparams.attention_key_channels = 64 + hparams.attention_value_channels = 64 + hparams.layer_prepostprocess_dropout = 0.0 + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_5l_8h_dr00_dan_g_bs1_adafactor(): + """For 256x256.""" + hparams = imagetransformerpp_base_5l_8h_big_uncond_dr00_dan_g_bs1() + # Use Adafactor which uses less memory than Adam, and its recommendations. + hparams.optimizer = "Adafactor" + hparams.learning_rate_schedule = "rsqrt_decay" + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_6l_8h_dr00_dan_g_bs1_adafactor(): + """For 256x256.""" + hparams = imagetransformerpp_base_5l_8h_dr00_dan_g_bs1_adafactor() + hparams.num_decoder_layers = 6 + return hparams + + +@registry.register_hparams +def imagetransformerpp_base_14l_8h_big_uncond_dr03_dan_eval(): + """Gets to 2.92 in just under 4 days on 8 p100s.""" + hparams = imagetransformerpp_base_12l_8h_big_uncond_dr03_dan_l() + hparams.num_decoder_layers = 14 + hparams.batch_size = 8 + # hparams.layer_prepostprocess_dropout = 0.2 + return hparams + + +@registry.register_hparams +def imagetransformer_base_8l_8h_big_cond_dr03_dan_128(): + hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan() + hparams.block_width = 128 + hparams.block_length = 128 + return hparams + + +@registry.register_hparams +def imagetransformer_base_10l_8h_big_cond_dr03_dan(): + """Best conditional Cifar10 gen param.""" + hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan() + hparams.num_decoder_layers = 10 + return hparams + + +@registry.register_hparams +def imagetransformer_base_10l_8h_big_uncond_dr03_dan(): + """Best unconditional Cifar10 gen param.""" + hparams = imagetransformer_base_10l_8h_big_cond_dr03_dan() + hparams.num_decoder_layers = 10 + return hparams + + +@registry.register_hparams +def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated(): + """Dilated hparams.""" + hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan() + hparams.gap_sizes = [0, 16, 64, 0, 16, 64, 128, 0] + hparams.dec_attention_type = cia.AttentionType.DILATED + hparams.block_length = 128 + hparams.block_width = 128 + hparams.add_hparam("num_memory_blocks", 1) + return hparams + + +@registry.register_hparams +def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_b(): + """Dilated hparams.""" + hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated() + hparams.block_width = 64 + hparams.num_memory_blocks = 2 + return hparams + + +@registry.register_hparams +def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_c(): + """Dilated hparams.""" + hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated() + hparams.block_width = 32 + hparams.num_memory_blocks = 4 + return hparams + + +@registry.register_hparams +def imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated_d(): + """Dilated hparams.""" + hparams = imagetransformer_base_8l_8h_big_cond_dr03_dan_dilated() + hparams.gap_sizes = [0, 16, 64, 16, 64, 128, 256, 0] + return hparams + + +@registry.register_hparams +def imagetransformer_base_12l_8h_big(): + """big 1d model for conditional image generation.""" + hparams = imagetransformer_sep_channels_8l_8h() + hparams.filter_size = 1024 + hparams.num_decoder_layers = 12 + hparams.batch_size = 1 + hparams.hidden_size = 512 + hparams.learning_rate_warmup_steps = 4000 + hparams.sampling_method = "random" + hparams.beam_size = 1 + hparams.block_width = 256 + return hparams + + +@registry.register_hparams +def imagetransformer1d_base_8l_64by64(): + """hparams fo 12 layer big 1d model for imagenet 64x64.""" + hparams = image_transformer_base() + hparams.num_heads = 8 + hparams.hidden_size = 512 + hparams.filter_size = 2048 + hparams.num_decoder_layers = 8 + hparams.batch_size = 1 + hparams.block_length = 512 + hparams.block_width = 768 + hparams.layer_prepostprocess_dropout = 0.1 + hparams.max_length = 14000 + hparams.unconditional = int(False) + return hparams + + +@registry.register_hparams +def imagetransformer1d_base_12l_64by64(): + """hparams fo 12 layer big 1d model for imagenet 64x64.""" + hparams = image_transformer_base() + hparams.num_heads = 8 + hparams.hidden_size = 512 + hparams.filter_size = 2048 + hparams.num_decoder_layers = 12 + hparams.batch_size = 1 + hparams.block_length = 512 + hparams.block_width = 768 + hparams.layer_prepostprocess_dropout = 0.1 + hparams.max_length = 14000 + hparams.unconditional = int(False) + return hparams + + +@registry.register_hparams +def imagetransformer_base_14l_8h_big(): + """big 1d model for conditional image generation.""" + hparams = imagetransformer_base_12l_8h_big() + hparams.num_decoder_layers = 14 + return hparams + + +@registry.register_hparams +def imagetransformer_base_14l_8h_big_dr01(): + """big 1d model for conditional image generation.""" + hparams = imagetransformer_base_14l_8h_big() + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def imagetransformer_base_12l_8h_big_uncond(): + """big 1d model for conditional image generation.""" + hparams = imagetransformer_base_12l_8h_big() + hparams.unconditional = True + return hparams + + +@registry.register_hparams +def imagetransformer_base_14l_8h_big_uncond(): + """big 1d model for conditional image generation.""" + hparams = imagetransformer_base_12l_8h_big_uncond() + hparams.num_decoder_layers = 14 + return hparams + + +@registry.register_hparams +def imagetransformer_sep_channels_12l_16h_imagenet_large(): + """separate rgb embeddings.""" + hparams = imagetransformer_sep_channels_8l_8h() + hparams.num_hidden_layers = 12 + hparams.batch_size = 1 + hparams.filter_size = 2048 + hparams.num_heads = 16 + hparams.learning_rate_warmup_steps = 16000 + hparams.sampling_method = "random" + hparams.learning_rate = 0.1 + return hparams + + +@registry.register_hparams +def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc(): + """separate rgb embeddings.""" + hparams = imagetransformer_sep_channels_12l_16h_imagenet_large() + hparams.num_hidden_layers = 16 + hparams.local_attention = True + hparams.batch_size = 1 + hparams.block_length = 256 + return hparams + + +@registry.register_hparams +def imagetransformer_sep_channels_16l_16h_imgnet_lrg_loc_128(): + """separate rgb embeddings.""" + hparams = imagetransformer_sep_channels_12l_16h_imagenet_large() + hparams.num_hidden_layers = 16 + hparams.local_attention = True + hparams.batch_size = 1 + hparams.block_length = 128 + return hparams + + +@registry.register_hparams +def imagetransformer_sep_output_channels_8l_local_and_global_att(): + """separate rgb embeddings.""" + hparams = imagetransformer_sep_channels_8l() + hparams.sampling_method = "random" + hparams.local_and_global_att = True + return hparams + + +@registry.register_hparams +def imagetransformer_base_10l_16h_big_uncond_dr01_imgnet(): + """big 1d model for conditional image generation.""" + hparams = imagetransformer_base_14l_8h_big_dr01() + # num_hidden_layers + hparams.num_decoder_layers = 10 + hparams.num_heads = 16 + hparams.hidden_size = 1024 + hparams.filter_size = 4096 + hparams.batch_size = 1 + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def imagetransformer_base_10l_16h_big_dr01_imgnet(): + """big 1d model for conditional image generation.""" + hparams = imagetransformer_base_14l_8h_big_dr01() + # num_hidden_layers + hparams.num_decoder_layers = 10 + hparams.num_heads = 16 + hparams.hidden_size = 1024 + hparams.filter_size = 4096 + hparams.batch_size = 1 + hparams.unconditional = False + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def imagetransformer_sep_channels_8l_8h(): + """separate rgb embeddings.""" + hparams = imagetransformer_base() + hparams.num_heads = 8 + hparams.batch_size = 1 + hparams.attention_key_channels = hparams.attention_value_channels = 0 + hparams.hidden_size = 512 + hparams.filter_size = 512 + hparams.num_hidden_layers = 8 + hparams.sampling_method = "random" + return hparams + + +@registry.register_hparams +def imagetransformer_sep_channels_8l_8h_local_and_global_att(): + """separate rgb embeddings.""" + hparams = imagetransformer_sep_channels_8l_8h() + hparams.num_heads = 8 + hparams.batch_size = 1 + hparams.attention_key_channels = hparams.attention_value_channels = 0 + hparams.hidden_size = 256 + hparams.filter_size = 256 + hparams.num_hidden_layers = 4 + hparams.sampling_method = "random" + hparams.local_and_global_att = True + return hparams + + +@registry.register_hparams +def imagetransformer_bas8l_8h_big_uncond_dr03_imgnet(): + """big 1d model for conditional image generation.""" + hparams = imagetransformer_base_14l_8h_big_dr01() + # num_hidden_layers + hparams.num_decoder_layers = 8 + hparams.num_heads = 8 + hparams.hidden_size = 512 + hparams.filter_size = 2048 + hparams.layer_prepostprocess_dropout = 0.3 + return hparams + + +@registry.register_hparams +def imagetransformer_tiny(): + hparams = imagetransformer_base() + hparams.num_decoder_layers = 2 + hparams.hidden_size = 64 + hparams.batch_size = 1 + hparams.unconditional = True + hparams.max_length = 66000 # allow for 256x256 + return hparams + + +@registry.register_hparams +def imagetransformerpp_tiny(): + hparams = imagetransformer_tiny() + hparams.likelihood = cia.DistributionType.DMOL + hparams.num_channels = 1 + hparams.bottom["targets"] = modalities.image_channel_compress_targets_bottom + hparams.top["targets"] = modalities.identity_top + return hparams + + +@registry.register_hparams +def imagetransformer_tiny_tpu(): + hparams = imagetransformer_tiny() + update_hparams_for_tpu(hparams) + hparams.num_hidden_layers = 2 + hparams.hidden_size = 16 + hparams.batch_size = 2 + hparams.num_heads = 2 + return hparams + + +@registry.register_hparams +def imagetransformer_base_10l_16h_big_dr01_moe_imgnet(): + """big 1d model for conditional image generation.""" + hparams = imagetransformer_base_10l_16h_big_dr01_imgnet() + hparams.initializer = "orthogonal" + hparams.learning_rate_warmup_steps = 16000 + hparams.add_hparam("moe_layers_decoder", "2,7") # Which layer is MoE. + hparams.moe_hidden_sizes = "4096" # Hidden layer sizes (comma-separated). + hparams.moe_num_experts = 64 # Number of experts in each MoE layer. + hparams.moe_k = 4 # How many experts to use per batch element (try 2 or 4). + hparams.moe_loss_coef = 3e-2 # MoE loss coefficient (1e-2 is usually ok). + hparams.scheduled_sampling_prob = 0.1 + hparams.scheduled_sampling_warmup_steps = 200000 + return hparams + + +@registry.register_hparams +def imagetransformer_moe_tiny(): + """Set of hyperparameters for a very small imagetransformer with MoE.""" + hparams = imagetransformer_tiny() + hparams.hidden_size = 64 + hparams.batch_size = 1 + hparams.num_hidden_layers = 3 + hparams.dec_attention_type = cia.AttentionType.MOE_LOCAL_1D + hparams.add_hparam("moe_layers_decoder", "1") # Which layer is MoE. + hparams.moe_hidden_sizes = "1024" # Hidden layer sizes (comma-separated). + hparams.moe_num_experts = 16 # Number of experts in each MoE layer. + hparams.moe_k = 2 # How many experts to use per batch element (try 2 or 4). + hparams.moe_loss_coef = 1e-2 # MoE loss coefficient (1e-2 is usually ok). + return hparams + + +def update_hparams_for_tpu(hparams): + hparams.optimizer = "Adafactor" + hparams.learning_rate_schedule = "rsqrt_decay" + hparams.learning_rate_warmup_steps = 6000 + hparams.batch_size = 4 + + +@registry.register_hparams +def imagetransformer_sep_channels_8l_tpu(): + """Hparams for training imagetransformer on tpu.""" + hparams = imagetransformer_sep_channels_8l() + update_hparams_for_tpu(hparams) + hparams.batch_size = 4 + hparams.num_heads = 4 # heads are expensive on tpu + hparams.shared_embedding_and_softmax_weights = False + return hparams + + +@registry.register_hparams +def imagetransformer_b10l_4h_big_uncond_dr03_tpu(): + """Small model for tpu cifar 10.""" + hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() + update_hparams_for_tpu(hparams) + hparams.batch_size = 4 + hparams.num_heads = 4 # heads are expensive on tpu + hparams.num_decoder_layers = 10 + hparams.block_length = 128 + hparams.hidden_size = 512 + hparams.filter_size = 1024 + hparams.learning_rate = 0.2 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + return hparams + + +@registry.register_hparams +def imagetransformer_b10l_dr03_moe_tpu(): + """Moe tpu params.""" + hparams = imagetransformer_b10l_4h_big_uncond_dr03_tpu() + update_hparams_for_tpu(hparams) + hparams.batch_size = 4 + hparams.num_heads = 4 # heads are expensive on tpu + hparams.num_decoder_layers = 10 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.ffn_layer = "local_moe_tpu" + return hparams + + +@registry.register_hparams +def imagetransformer_b10l_4h_big_uncond_dr03_lr025_tpu(): + """TPU related small model.""" + hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() + update_hparams_for_tpu(hparams) + hparams.batch_size = 4 + hparams.num_heads = 4 # heads are expensive on tpu + hparams.num_decoder_layers = 10 + hparams.learning_rate = 0.25 + hparams.learning_rate_warmup_steps = 8000 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + # hparams.unconditional = True + return hparams + + +@registry.register_hparams +def imagetransformer_b12l_4h_big_uncond_dr03_tpu(): + """TPU 12 layer model.""" + hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() + update_hparams_for_tpu(hparams) + hparams.batch_size = 4 + hparams.num_heads = 4 # heads are expensive on tpu + hparams.num_decoder_layers = 12 + hparams.block_length = 128 + hparams.hidden_size = 512 + hparams.filter_size = 1024 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.layer_prepostprocess_dropout = 0.3 + return hparams + + +@registry.register_hparams +def imagetransformer_b12l_4h_big_uncond_dr03_lr025_tpu(): + hparams = imagetransformer_b12l_4h_big_uncond_dr03_tpu() + update_hparams_for_tpu(hparams) + hparams.learning_rate = 0.25 + hparams.learning_rate_warmup_steps = 5000 + return hparams + + +@registry.register_hparams +def imagetransformer_b12l_4h_b256_uncond_dr03_tpu(): + """works very well on 4x4.""" + hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() + update_hparams_for_tpu(hparams) + hparams.batch_size = 4 + hparams.num_heads = 4 # heads are expensive on tpu + hparams.num_decoder_layers = 12 + hparams.block_length = 256 + hparams.hidden_size = 512 + hparams.filter_size = 2048 + hparams.learning_rate = 0.5 + hparams.learning_rate_warmup_steps = 4000 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.layer_prepostprocess_dropout = 0.3 + hparams.unconditional = True + return hparams + + +@registry.register_hparams +def imagetransformer_b12l_4h_b256_uncond_dr03_rel_tpu(): + """works very well on 4x4.""" + hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu() + hparams.shared_rel = True + hparams.dec_attention_type = cia.AttentionType.RELATIVE_LOCAL_1D + return hparams + + +@registry.register_ranged_hparams +def imagetransformer_cifar_tpu_range(rhp): + """Range of hyperparameters for vizier.""" + # After starting from base, set intervals for some parameters. + rhp.set_float("learning_rate", 0.01, 1.0, scale=rhp.LOG_SCALE) + rhp.set_discrete("num_decoder_layers", [8, 10, 12, 14, 16]) + rhp.set_discrete("hidden_size", [256, 512, 1024]) + rhp.set_discrete("block_length", [128, 256, 512]) + rhp.set_categorical("dec_attention_type", [ + cia.AttentionType.RELATIVE_LOCAL_1D, cia.AttentionType.LOCAL_1D]) + + +@registry.register_hparams +def imagetransformer_b12l_4h_b128_h512_uncond_dr03_tpu(): + """TPU related big model.""" + hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() + update_hparams_for_tpu(hparams) + hparams.batch_size = 4 + hparams.num_heads = 4 # heads are expensive on tpu + hparams.num_decoder_layers = 12 + hparams.block_length = 128 + hparams.hidden_size = 512 + hparams.filter_size = 2048 + hparams.learning_rate = 0.2 + hparams.learning_rate_warmup_steps = 6000 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.layer_prepostprocess_dropout = 0.3 + return hparams + + +@registry.register_hparams +def imagetransformer_b12l_4h_b128_h512_uncond_dr01_im(): + """TPU related imagenet model.""" + hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu() + update_hparams_for_tpu(hparams) + hparams.batch_size = 4 + hparams.optimizer = "Adafactor" + hparams.learning_rate_schedule = "rsqrt_decay" + hparams.learning_rate_warmup_steps = 6000 + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def imagetransformer_b12l_4h_uncond_dr03_tpu(): + """TPU related small model.""" + hparams = imagetransformer_b12l_4h_b256_uncond_dr03_tpu() + hparams.learning_rate = 0.2 + hparams.learning_rate_warmup_steps = 4000 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.layer_prepostprocess_dropout = 0.3 + return hparams + + +@registry.register_hparams +def imagetransformer_b12l_4h_b128_uncond_dr03_tpu(): + """TPU config for cifar 10.""" + hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() + update_hparams_for_tpu(hparams) + hparams.batch_size = 2 + hparams.num_heads = 4 # heads are expensive on tpu + hparams.num_decoder_layers = 12 + hparams.block_length = 128 + hparams.hidden_size = 256 + hparams.filter_size = 2048 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.layer_prepostprocess_dropout = 0.1 + hparams.optimizer = "Adafactor" + hparams.learning_rate_schedule = "rsqrt_decay" + hparams.learning_rate_warmup_steps = 10000 + return hparams + + +@registry.register_hparams +def imagetransformer_b12l_8h_b256_uncond_dr03_tpu(): + """TPU related 12 layer 8 heads model.""" + hparams = imagetransformer_bas8l_8h_big_uncond_dr03_imgnet() + update_hparams_for_tpu(hparams) + hparams.batch_size = 2 + hparams.num_heads = 8 # heads are expensive on tpu + hparams.num_decoder_layers = 12 + hparams.block_length = 256 + hparams.hidden_size = 512 + hparams.filter_size = 2048 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.layer_prepostprocess_dropout = 0.3 + return hparams + + +@registry.register_hparams +def imagetransformer_b10l_4h_big_uncond_dr01_tpu(): + """big 1d model for conditional image generation.""" + hparams = imagetransformer_b12l_4h_big_uncond_dr03_tpu() + # num_hidden_layers + hparams.num_decoder_layers = 10 + hparams.num_heads = 4 + hparams.hidden_size = 1024 + hparams.filter_size = 4096 + hparams.batch_size = 1 + hparams.layer_prepostprocess_dropout = 0.1 + return hparams diff --git a/tensor2tensor/models/image_transformer_2d.py b/tensor2tensor/models/image_transformer_2d.py new file mode 100644 index 000000000..32c4aa59a --- /dev/null +++ b/tensor2tensor/models/image_transformer_2d.py @@ -0,0 +1,908 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""image generation with transformer (attention). + +encoder: [Self-Attention, Feed-forward] x n +decoder: [Self-Attention, Source-Target-Attention, Feed-forward] x n + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import numpy as np +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_image_attention as cia +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import modalities +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +@registry.register_model +class Imagetransformer2d(t2t_model.T2TModel): + """Conditional image generation with attention. See file docstring.""" + + def body(self, features): + hparams = copy.copy(self._hparams) + inputs = features["inputs"] + targets = features["targets"] + targets_shape = common_layers.shape_list(targets) + if not (tf.get_variable_scope().reuse or + hparams.mode == tf_estimator.ModeKeys.PREDICT): + tf.summary.image("targets", targets, max_outputs=1) + + decoder_input, rows, cols = cia.prepare_decoder( + targets, hparams) + # Add class label to decoder input. + if not hparams.unconditional: + decoder_input += tf.reshape(inputs, + [targets_shape[0], 1, 1, hparams.hidden_size]) + + decoder_output = cia.transformer_decoder_layers( + decoder_input, None, + hparams.num_decoder_layers, + hparams, + attention_type=hparams.dec_attention_type, + name="decoder") + + output = cia.create_output(decoder_output, rows, cols, targets, hparams) + return output + + +@registry.register_model +class Img2imgTransformer(t2t_model.T2TModel): + """Image 2 Image transformer net.""" + + def body(self, features): + hparams = copy.copy(self._hparams) + targets = features["targets"] + inputs = features["inputs"] + if not (tf.get_variable_scope().reuse or + hparams.mode == tf_estimator.ModeKeys.PREDICT): + tf.summary.image("inputs", inputs, max_outputs=1) + tf.summary.image("targets", targets, max_outputs=1) + + encoder_input = cia.prepare_encoder(inputs, hparams) + encoder_output = cia.transformer_encoder_layers( + encoder_input, + hparams.num_encoder_layers, + hparams, + attention_type=hparams.enc_attention_type, + name="encoder") + decoder_input, rows, cols = cia.prepare_decoder( + targets, hparams) + decoder_output = cia.transformer_decoder_layers( + decoder_input, + encoder_output, + hparams.num_decoder_layers, + hparams, + attention_type=hparams.dec_attention_type, + name="decoder") + output = cia.create_output(decoder_output, rows, cols, targets, hparams) + return output + + +@registry.register_model +class Img2imgTransformerBlockParallel(t2t_model.T2TModel): + """Image-to-image transformer predicting blocks of the output in parallel.""" + + def body(self, features): + assert self._hparams.block_size > 0 + assert not common_layers.is_xla_compiled() + + hparams = copy.copy(self._hparams) + targets = features["targets"] + inputs = features["inputs"] + if not (tf.get_variable_scope().reuse or + hparams.mode == tf_estimator.ModeKeys.PREDICT): + tf.summary.image("inputs", inputs, max_outputs=1) + tf.summary.image("targets", targets, max_outputs=1) + + encoder_input = cia.prepare_encoder(inputs, hparams) + encoder_output = cia.transformer_encoder_layers( + encoder_input, + hparams.num_encoder_layers, + hparams, + attention_type=hparams.enc_attention_type, + name="encoder") + decoder_input, rows, cols = cia.prepare_decoder( + targets, hparams) + decoder_output = cia.transformer_decoder_layers( + decoder_input, + encoder_output, + hparams.num_decoder_layers, + hparams, + attention_type=hparams.dec_attention_type, + name="decoder") + + assert not isinstance(decoder_output, tuple) + assert len(decoder_output.shape) == 4 + + relu_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(self._hparams, "relu_dropout_broadcast_dims", ""))) + + with tf.variable_scope("block_size_%d" % self._hparams.block_size): + tf.logging.info("Using block_size %d", self._hparams.block_size) + block_output = common_layers.dense_relu_dense( + decoder_output, + self._hparams.block_size * self._hparams.filter_size, + self._hparams.block_size * self._hparams.hidden_size, + dropout=self._hparams.relu_dropout, + dropout_broadcast_dims=relu_dropout_broadcast_dims) + + batch_size, rows, cols = common_layers.shape_list(decoder_output)[:3] + decoder_output = tf.reshape(decoder_output, [ + batch_size, + rows, + cols, + 1, + self._hparams.hidden_size + ]) + block_output = tf.reshape(block_output, [ + batch_size, + rows, + cols, + self._hparams.block_size, + self._hparams.hidden_size + ]) + + block_output = common_layers.layer_postprocess( + decoder_output, block_output, self._hparams) + + return block_output + + def top(self, body_output, features): + assert self._hparams.block_size > 0 + + train_or_eval = ( + self._hparams.mode == tf_estimator.ModeKeys.TRAIN or + self._hparams.mode == tf_estimator.ModeKeys.EVAL) + + if train_or_eval: + if self._hparams.mode == tf_estimator.ModeKeys.TRAIN: + features["block_index"] = tf.random_uniform( + shape=[], minval=0, maxval=self._hparams.block_size, dtype=tf.int64) + else: + features["block_index"] = 0 + body_output = body_output[:, :, :, features["block_index"], :] + + decoded_image = tf.layers.dense( + body_output, 256, use_bias=True, activation=None, name="output_conv") + + assert len(features["targets"].shape) == 4 + targets_shape = common_layers.shape_list(features["targets"]) + + if train_or_eval: + output = tf.reshape(decoded_image, targets_shape + [256]) + else: + output = tf.reshape(decoded_image, [ + targets_shape[0], -1, self._hparams.block_size, 1, 256]) + output = output[:, :targets_shape[1], :, :, :] + + return output + + def loss(self, logits, features): + assert self._hparams.block_size > 0 + + if self._hparams.mode == tf_estimator.ModeKeys.PREDICT: + return 0.0 + + def shift_left_2d(x, k): + return tf.pad(x, [[0, 0], [0, k]])[:, k:] + + def shift_left_4d_raster_scan(x, k): + batch_size = common_layers.shape_list(x)[0] + return tf.reshape( + shift_left_2d(tf.reshape(x, [batch_size, -1]), k), tf.shape(x)) + + targets = features["targets"] + assert len(targets.shape) == 4 + + targets = tf.stack([ + shift_left_4d_raster_scan(targets, i) + for i in range(self._hparams.block_size) + ], axis=4) + + if (self._hparams.mode == tf_estimator.ModeKeys.TRAIN or + self._hparams.mode == tf_estimator.ModeKeys.EVAL): + assert "block_index" in features + targets = targets[:, :, :, :, features["block_index"]] + + features["targets"] = targets + + loss = super(Img2imgTransformerBlockParallel, self).loss(logits, features) + + if self._hparams.mode == tf_estimator.ModeKeys.TRAIN: + k = features["block_index"] + loss_num, loss_den = loss + loss_val = loss_num / loss_den + for i in range(self._hparams.block_size): + # Hack: if you report a loss of NaN, TensorBoard will plot a point at + # the previous value without a connecting line. This is used here to + # separate out the training losses by block index. + one_or_nan = tf.cond(tf.equal(k, i), lambda: 1.0, lambda: float("nan")) + tf.summary.scalar( + "block_index_%d" % i, one_or_nan * loss_val, family="losses") + + return loss + + def _greedy_infer(self, features, decode_length, use_tpu=False): + assert not use_tpu + return self._slow_greedy_infer_guess_and_check(features, decode_length) + + def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha): + raise NotImplementedError + + def _slow_greedy_infer_guess_and_check(self, features, decode_length): + assert self._hparams.block_size > 0 + assert self._hparams.force_full_predict + assert self._hparams.sampling_method == "argmax" + assert self._decode_hparams.batch_size == 1 + assert self._decode_hparams.block_size > 0 + assert self._decode_hparams.block_size <= self._hparams.block_size + assert ( + (self._decode_hparams.guess_and_check_top_k > 0) + + (self._decode_hparams.guess_and_check_epsilon >= 0) == 1) + + inputs_old = features["inputs"] + assert "targets" not in features + + assert len(features["inputs"].shape) in [3, 4] + if len(features["inputs"].shape) < 4: + features["inputs"] = tf.expand_dims(features["inputs"], 2) + + block_size = self._decode_hparams.block_size + decode_length += tf.shape(features["inputs"])[1] + + def while_exit_cond(result, length): # pylint: disable=unused-argument + return length < decode_length + + def infer_step(result, length): + """Inference step.""" + + def print_info(samples, result, length, new_length): + tf.logging.info( + "length=%s new_length=%s length_diff=%s samples-result=%s", + length, + new_length, + new_length - length, + np.array_str( + samples[0, -block_size-1:-1, 0, 0] - + result[0, -block_size:, 0, 0] + ).replace("\n", ""), + ) + + features["targets"] = tf.pad(result, [[0, 0], [0, 1], [0, 0], [0, 0]]) + samples, logits, losses = self.sample(features) # pylint: disable=unused-variable + + _, top_k_indices = tf.nn.top_k( + logits[:, :-1, :1, :, :], + k=self._decode_hparams.guess_and_check_top_k) + in_top_k = tf.reduce_any( + tf.equal(tf.to_int64(top_k_indices), tf.expand_dims(result, 4)), + axis=4) + + within_epsilon = tf.less_equal( + tf.abs(result - samples[:, :-1, :1, :]), + self._decode_hparams.guess_and_check_epsilon) + + if self._decode_hparams.guess_and_check_top_k: + tf.logging.info( + "Using guess_and_check_top_k=%s", + self._decode_hparams.guess_and_check_top_k) + correct = in_top_k + else: + tf.logging.info( + "Using guess_and_check_epsilon=%s", + self._decode_hparams.guess_and_check_epsilon) + correct = within_epsilon + + correct_cumsum = tf.cumsum(tf.to_int32(correct), axis=1) + perfect_cumsum = 1 + tf.range(tf.shape(correct)[1]) + for axis in [0, 2, 3]: + perfect_cumsum = tf.expand_dims(perfect_cumsum, axis=axis) + + new_length = tf.reduce_sum( + tf.to_int32(tf.equal(correct_cumsum, perfect_cumsum)), axis=1) + new_length = tf.squeeze(new_length, axis=[0, 1, 2]) + new_length = tf.minimum(new_length, decode_length) + + new_result = tf.concat([ + result[:, :new_length, :, :], + tf.reshape( + samples[:, new_length, :block_size, :], [1, block_size, 1, 1]) + ], axis=1) + + with tf.control_dependencies([ + tf.py_func(print_info, [samples, result, length, new_length], []) + ]): + new_result = tf.identity(new_result) + + return new_result, new_length + + result = tf.zeros((1, 0, 1, 1), dtype=tf.int64) + length = tf.squeeze(tf.zeros(1, dtype=tf.int32)) + + result, length = tf.while_loop( + while_exit_cond, + infer_step, + [result, length], + shape_invariants=[ + tf.TensorShape([1, None, 1, 1]), + tf.TensorShape([]), + ], + back_prop=False, + parallel_iterations=1) + + result = result[:, :length, :, :] + + features["inputs"] = inputs_old + + return { + "outputs": result, + "scores": None, + } + + +@registry.register_hparams +def image_transformer2d_base(): + """Set of hyperparameters.""" + hparams = common_hparams.basic_params1() + hparams.hidden_size = 512 + hparams.batch_size = 1 + hparams.max_length = 256 + hparams.dropout = 0.0 + hparams.clip_grad_norm = 0. # i.e. no gradient clipping + hparams.optimizer_adam_epsilon = 1e-9 + hparams.learning_rate_decay_scheme = "noam" + hparams.learning_rate = 0.1 + hparams.learning_rate_warmup_steps = 4000 + hparams.initializer_gain = 0.2 + hparams.initializer = "uniform_unit_scaling" + hparams.weight_decay = 0.0 + hparams.optimizer_adam_beta1 = 0.9 + hparams.optimizer_adam_beta2 = 0.98 + hparams.label_smoothing = 0.0 + hparams.bottom["targets"] = modalities.make_targets_bottom( + modalities.image_channel_embeddings_bottom) + hparams.top["targets"] = modalities.identity_top + hparams.norm_type = "layer" + hparams.layer_prepostprocess_dropout = 0.0 + hparams.add_hparam("filter_size", 512) # Add new ones like this. + + # attention-related flags + hparams.add_hparam("num_heads", 8) + hparams.add_hparam("attention_key_channels", 0) + hparams.add_hparam("attention_value_channels", 0) + hparams.add_hparam("ffn_layer", "conv_hidden_relu") + # All hyperparameters ending in "dropout" are automatically set to 0.0 + # when not in training mode. + hparams.add_hparam("attention_dropout", 0.0) + hparams.add_hparam("relu_dropout", 0.0) + hparams.add_hparam("pos", "timing") # timing, none + hparams.add_hparam("nbr_decoder_problems", 1) + hparams.add_hparam("num_output_layers", 3) + hparams.add_hparam("block_size", 1) + + # image size related flags + # assuming that the image has same height and width + hparams.add_hparam("img_len", 32) + hparams.add_hparam("num_channels", 3) + # Local attention params + hparams.add_hparam("local_and_global_att", False) + hparams.add_hparam("block_length", 256) + hparams.add_hparam("block_width", 128) + # Local 2D attention params + hparams.add_hparam("query_shape", (16, 16)) + hparams.add_hparam("memory_flange", (16, 32)) + hparams.add_hparam("num_encoder_layers", 4) + hparams.add_hparam("num_decoder_layers", 8) + # attention type related params + hparams.add_hparam("enc_attention_type", cia.AttentionType.GLOBAL) + hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_2D) + hparams.add_hparam("block_raster_scan", False) + + # multipos attention params + hparams.add_hparam("q_filter_width", 1) + hparams.add_hparam("kv_filter_width", 1) + + hparams.add_hparam("unconditional", False) # unconditional generation + + # relative embedding hparams + hparams.add_hparam("shared_rel", False) + return hparams + + +@registry.register_hparams +def imagetransformer2d_base(): + hparams = image_transformer2d_base() + hparams.dec_attention_type = cia.AttentionType.LOCAL_2D + hparams.block_raster_scan = True + return hparams + + +@registry.register_hparams +def imagetransformer2d_base_8l_8_16(): + hparams = image_transformer2d_base() + hparams.num_decoder_layers = 8 + hparams.batch_size = 1 + hparams.memory_flange = (8, 16) + return hparams + + +@registry.register_hparams +def imagetransformer2d_base_8l_8_16_ls(): + hparams = image_transformer2d_base() + hparams.num_decoder_layers = 8 + hparams.label_smoothing = 0.05 + hparams.batch_size = 1 + hparams.memory_flange = (8, 16) + return hparams + + +@registry.register_hparams +def imagetransformer2d_base_8l_8_16_big(): + hparams = image_transformer2d_base() + hparams.filter_size = 1024 + hparams.num_decoder_layers = 8 + hparams.batch_size = 1 + hparams.memory_flange = (8, 16) + return hparams + + +@registry.register_hparams +def imagetransformer2d_base_12l_8_16_big(): + hparams = image_transformer2d_base() + hparams.filter_size = 1024 + hparams.num_decoder_layers = 12 + hparams.batch_size = 1 + hparams.memory_flange = (8, 16) + hparams.sampling_method = "random" + hparams.beam_size = 1 + return hparams + + +@registry.register_hparams +def imagetransformer2d_base_8l_8_32_big(): + """hparams fo 8 layer big 2d model for cifar 10.""" + hparams = image_transformer2d_base() + hparams.num_heads = 16 + hparams.hidden_size = 1024 + hparams.filter_size = 2048 + hparams.num_decoder_layers = 8 + hparams.batch_size = 1 + hparams.layer_prepostprocess_dropout = 0.3 + hparams.query_shape = (8, 16) + hparams.memory_flange = (0, 32) + hparams.unconditional = int(False) + return hparams + + +@registry.register_hparams +def imagetransformer_base_10l_8h_big_uncond_dr03_dan_64_2d(): + """big 1d model for unconditional generation on imagenet.""" + hparams = image_transformer2d_base() + hparams.unconditional = True + hparams.hidden_size = 512 + hparams.batch_size = 1 + hparams.img_len = 64 + hparams.num_heads = 8 + hparams.filter_size = 2048 + hparams.batch_size = 1 + hparams.max_length = 3075 + hparams.max_length = 14000 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.layer_prepostprocess_dropout = 0.1 + hparams.dec_attention_type = cia.AttentionType.LOCAL_2D + hparams.query_shape = (16, 16) + hparams.memory_flange = (8, 8) + return hparams + + +@registry.register_hparams +def imagetransformer2d_base_8l_8_64_64by64(): + """hparams fo 12 layer big 2d model for imagenet 64x64.""" + hparams = image_transformer2d_base() + hparams.num_heads = 8 + hparams.hidden_size = 512 + hparams.filter_size = 2048 + hparams.num_decoder_layers = 8 + hparams.batch_size = 1 + hparams.layer_prepostprocess_dropout = 0.1 + hparams.query_shape = (8, 64) + hparams.memory_flange = (4, 32) + hparams.unconditional = int(False) + hparams.max_length = 14000 + return hparams + + +@registry.register_hparams +def imagetransformer2d_base_12l_8_64_64by64(): + """hparams fo 12 layer big 2d model for imagenet 64x64.""" + hparams = image_transformer2d_base() + hparams.num_heads = 8 + hparams.hidden_size = 512 + hparams.filter_size = 2048 + hparams.num_decoder_layers = 12 + hparams.batch_size = 1 + hparams.layer_prepostprocess_dropout = 0.1 + hparams.query_shape = (8, 64) + hparams.memory_flange = (4, 32) + hparams.unconditional = int(False) + hparams.max_length = 14000 + return hparams + + +@registry.register_hparams +def imagetransformer2d_base_14l_8_16_big(): + hparams = image_transformer2d_base() + hparams.filter_size = 1024 + hparams.num_decoder_layers = 14 + hparams.batch_size = 1 + hparams.memory_flange = (8, 16) + return hparams + + +@registry.register_hparams +def imagetransformer2d_base_14l_8_16_big_uncond(): + hparams = imagetransformer2d_base_14l_8_16_big() + hparams.unconditional = True + return hparams + + +@registry.register_hparams +def imagetransformer2d_base_8l_8_16_big_16k(): + hparams = image_transformer2d_base() + hparams.filter_size = 1024 + hparams.num_decoder_layers = 8 + hparams.batch_size = 1 + hparams.memory_flange = (8, 16) + hparams.learning_rate_warmup_steps = 16000 + return hparams + + +@registry.register_hparams +def img2img_transformer2d_base(): + """Base params for img2img 2d attention.""" + hparams = image_transformer2d_base() + # learning related flags + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + # This version seems to benefit from a higher learning rate. + hparams.learning_rate = 0.2 + hparams.layer_prepostprocess_dropout = 0.1 + hparams.learning_rate_warmup_steps = 12000 + hparams.filter_size = 2048 + hparams.num_encoder_layers = 4 + hparams.num_decoder_layers = 8 + hparams.bottom["inputs"] = modalities.image_channel_embeddings_bottom + hparams.dec_attention_type = cia.AttentionType.LOCAL_2D + hparams.block_raster_scan = True + return hparams + + +@registry.register_hparams +def img2img_transformer2d_q1(): + hparams = img2img_transformer2d_base() + hparams.batch_size = 2 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.query_shape = (16, 16) + hparams.memory_flange = (16, 64) + return hparams + + +@registry.register_hparams +def img2img_transformer2d_q2(): + hparams = img2img_transformer2d_q1() + hparams.batch_size = 2 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.query_shape = (16, 16) + hparams.memory_flange = (16, 32) + return hparams + + +@registry.register_hparams +def img2img_transformer2d_q3(): + """Current best hparams for local 2d.""" + hparams = img2img_transformer2d_q1() + hparams.batch_size = 2 + hparams.query_shape = (8, 16) + hparams.memory_flange = (8, 32) + return hparams + + +@registry.register_hparams +def img2img_transformer_base(): + """Base params for local1d attention.""" + hparams = image_transformer2d_base() + # learning related flags + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + # This version seems to benefit from a higher learning rate. + hparams.learning_rate = 0.2 + hparams.layer_prepostprocess_dropout = 0.1 + hparams.learning_rate_warmup_steps = 12000 + hparams.filter_size = 2048 + hparams.num_encoder_layers = 4 + hparams.num_decoder_layers = 8 + hparams.block_length = 256 + hparams.block_width = 256 + hparams.dec_attention_type = cia.AttentionType.LOCAL_1D + hparams.block_raster_scan = False + return hparams + + +@registry.register_hparams +def img2img_transformer_b1(): + hparams = img2img_transformer_base() + hparams.batch_size = 2 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.block_length = 512 + return hparams + + +@registry.register_hparams +def img2img_transformer_b2(): + hparams = img2img_transformer_base() + hparams.batch_size = 2 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.block_length = 256 + return hparams + + +@registry.register_hparams +def img2img_transformer_b3(): + """Current best hparams for local 1d.""" + hparams = img2img_transformer_base() + hparams.batch_size = 2 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.block_length = 128 + hparams.sampling_temp = 0.9 + return hparams + + +@registry.register_hparams +def img2img_transformer_b3_bs1(): + hparams = img2img_transformer_b3() + hparams.block_size = 1 + return hparams + + +@registry.register_hparams +def img2img_transformer_b3_bs2(): + hparams = img2img_transformer_b3() + hparams.block_size = 2 + return hparams + + +@registry.register_hparams +def img2img_transformer_b3_bs3(): + hparams = img2img_transformer_b3() + hparams.block_size = 3 + return hparams + + +@registry.register_hparams +def img2img_transformer_b3_bs4(): + hparams = img2img_transformer_b3() + hparams.block_size = 4 + return hparams + + +@registry.register_hparams +def img2img_transformer_b3_bs5(): + hparams = img2img_transformer_b3() + hparams.block_size = 5 + return hparams + + +@registry.register_hparams +def img2img_transformer_b3_bs6(): + hparams = img2img_transformer_b3() + hparams.block_size = 6 + return hparams + + +@registry.register_hparams +def img2img_transformer_b3_bs7(): + hparams = img2img_transformer_b3() + hparams.block_size = 7 + return hparams + + +@registry.register_hparams +def img2img_transformer_b3_bs8(): + hparams = img2img_transformer_b3() + hparams.block_size = 8 + return hparams + + +@registry.register_hparams +def img2img_transformer_b3_bs9(): + hparams = img2img_transformer_b3() + hparams.block_size = 9 + return hparams + + +@registry.register_hparams +def img2img_transformer_b3_bs10(): + hparams = img2img_transformer_b3() + hparams.block_size = 10 + return hparams + + +@registry.register_hparams +def img2img_transformer_dilated(): + """Try dilated.""" + hparams = img2img_transformer_base() + hparams.add_hparam("num_memory_blocks", 1) + hparams.num_heads = 8 + hparams.attention_key_channels = hparams.attention_value_channels = 0 + hparams.hidden_size = 512 + hparams.filter_size = 2048 + hparams.num_decoder_layers = 8 + hparams.sampling_method = "random" + hparams.gap_sizes = [0, 16, 64, 0, 16, 64, 128, 0] + hparams.dec_attention_type = cia.AttentionType.DILATED + hparams.img_len = 64 + hparams.block_length = 128 + hparams.block_width = 128 + return hparams + + +@registry.register_hparams +def imagetransformer2d_tiny(): + hparams = imagetransformer2d_base() + hparams.num_decoder_layers = 2 + hparams.hidden_size = 64 + hparams.batch_size = 1 + return hparams + + +def update_hparams_for_tpu(hparams): + hparams.use_pad_remover = False # where op not supported + hparams.optimizer = "true_adam" + hparams.batch_size = 4 + + +@registry.register_hparams +def img2img_transformer_base_tpu(): + """Hparams for training img2img_transformer on tpu.""" + hparams = img2img_transformer_base() + update_hparams_for_tpu(hparams) + hparams.batch_size = 2 + hparams.num_heads = 4 # heads are expensive on tpu + hparams.num_decoder_layers = 8 + hparams.num_encoder_layers = 4 + hparams.shared_embedding_and_softmax_weights = False + return hparams + + +@registry.register_hparams +def img2img_transformer_tiny_tpu(): + hparams = img2img_transformer_base_tpu() + hparams.num_hidden_layers = 2 + hparams.hidden_size = 16 + hparams.batch_size = 2 + hparams.num_heads = 2 + return hparams + + +@registry.register_hparams +def img2img_transformer2d_n3(): + hparams = img2img_transformer2d_base() + hparams.batch_size = 1 + hparams.num_encoder_layers = 4 + hparams.num_decoder_layers = 12 + hparams.query_shape = (16, 32) + hparams.memory_flange = (16, 16) + hparams.layer_prepostprocess_dropout = 0.0 + return hparams + + +@registry.register_hparams +def img2img_transformer2d_n31(): + """Set of hyperparameters.""" + hparams = img2img_transformer2d_base() + hparams.batch_size = 1 + hparams.num_encoder_layers = 6 + hparams.num_decoder_layers = 12 + hparams.num_heads = 8 + hparams.query_shape = (16, 32) + hparams.memory_flange = (16, 32) + return hparams + + +@registry.register_hparams +def img2img_transformer2d_n24(): + """Set of hyperparameters.""" + hparams = img2img_transformer2d_base() + hparams.batch_size = 1 + hparams.hidden_size = 1024 + hparams.filter_size = 2048 + hparams.layer_prepostprocess_dropout = 0.2 + hparams.num_decoder_layers = 8 + hparams.query_shape = (8, 16) + hparams.memory_flange = (8, 32) + return hparams + + +@registry.register_hparams +def img2img_transformer2d_n44(): + hparams = img2img_transformer2d_base() + hparams.batch_size = 1 + hparams.num_decoder_layers = 8 + hparams.query_shape = (8, 16) + hparams.memory_flange = (8, 32) + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def img2img_transformer2d_n103(): + """Best config for img2img.""" + hparams = img2img_transformer2d_base() + hparams.batch_size = 1 + hparams.num_decoder_layers = 12 + hparams.num_encoder_layers = 6 + hparams.query_shape = (8, 32) + hparams.memory_flange = (8, 64) + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def img2img_transformer2d_tiny(): + """Tiny params.""" + hparams = img2img_transformer2d_base() + hparams.num_decoder_layers = 2 + hparams.hidden_size = 128 + hparams.batch_size = 4 + hparams.max_length = 128 + hparams.attention_key_channels = hparams.attention_value_channels = 0 + hparams.filter_size = 128 + hparams.num_heads = 4 + hparams.pos = "timing" + hparams.img_len = 32 + return hparams + + +@registry.register_hparams +def img2img_transformer_tiny(): + """Tiny params.""" + hparams = img2img_transformer2d_base() + hparams.num_hidden_layers = 2 + hparams.hidden_size = 128 + hparams.batch_size = 4 + hparams.max_length = 128 + hparams.attention_key_channels = hparams.attention_value_channels = 0 + hparams.filter_size = 128 + hparams.num_heads = 1 + hparams.pos = "timing" + return hparams diff --git a/tensor2tensor/models/image_transformer_2d_test.py b/tensor2tensor/models/image_transformer_2d_test.py new file mode 100644 index 000000000..de3e73837 --- /dev/null +++ b/tensor2tensor/models/image_transformer_2d_test.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for Transformer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np + +from tensor2tensor.data_generators import celeba # pylint: disable=unused-import +from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.models import image_transformer_2d +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class Img2imgTransformerTest(tf.test.TestCase): + + def _test_img2img_transformer(self, net): + batch_size = 3 + hparams = image_transformer_2d.img2img_transformer2d_tiny() + hparams.data_dir = "" + p_hparams = registry.problem("image_celeba").get_hparams(hparams) + inputs = np.random.randint(256, size=(batch_size, 4, 4, 3)) + targets = np.random.randint(256, size=(batch_size, 8, 8, 3)) + with self.test_session() as session: + features = { + "inputs": tf.constant(inputs, dtype=tf.int32), + "targets": tf.constant(targets, dtype=tf.int32), + "target_space_id": tf.constant(1, dtype=tf.int32), + } + model = net(hparams, tf_estimator.ModeKeys.TRAIN, p_hparams) + logits, _ = model(features) + session.run(tf.global_variables_initializer()) + res = session.run(logits) + self.assertEqual(res.shape, (batch_size, 8, 8, 3, 256)) + + def testImg2imgTransformer(self): + self._test_img2img_transformer(image_transformer_2d.Img2imgTransformer) + + +class Imagetransformer2dTest(tf.test.TestCase): + + def _test_imagetransformer_2d(self, net): + batch_size = 3 + size = 7 + vocab_size = 256 + hparams = image_transformer_2d.imagetransformer2d_tiny() + p_hparams = problem_hparams.test_problem_hparams(vocab_size, + vocab_size, + hparams) + inputs = np.random.randint( + vocab_size, size=(batch_size, 1, 1, 1)) + targets = np.random.randint( + vocab_size, size=(batch_size, size, size, 3)) + with self.test_session() as session: + features = { + "inputs": tf.constant(inputs, dtype=tf.int32), + "targets": tf.constant(targets, dtype=tf.int32), + "target_space_id": tf.constant(1, dtype=tf.int32), + } + model = net(hparams, tf_estimator.ModeKeys.TRAIN, p_hparams) + logits, _ = model(features) + session.run(tf.global_variables_initializer()) + res = session.run(logits) + self.assertEqual(res.shape, (batch_size, size, size, 3, vocab_size)) + + def testImagetransformer2d(self): + self._test_imagetransformer_2d(image_transformer_2d.Imagetransformer2d) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/image_transformer_test.py b/tensor2tensor/models/image_transformer_test.py new file mode 100644 index 000000000..6dde81d5e --- /dev/null +++ b/tensor2tensor/models/image_transformer_test.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for Transformer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +import numpy as np + +from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.layers import common_image_attention +from tensor2tensor.models import image_transformer + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class ImagetransformerTest(parameterized.TestCase, tf.test.TestCase): + + @parameterized.named_parameters( + ("ImageTransformerCat", + image_transformer.Imagetransformer, + image_transformer.imagetransformer_tiny()), + ("ImageTransformerDmol", + image_transformer.Imagetransformer, + image_transformer.imagetransformerpp_tiny()), + ) + def testImagetransformer(self, net, hparams): + batch_size = 3 + size = 7 + vocab_size = 256 + p_hparams = problem_hparams.test_problem_hparams(vocab_size, + vocab_size, + hparams) + inputs = np.random.randint( + vocab_size, size=(batch_size, 1, 1, 1)) + targets = np.random.randint( + vocab_size, size=(batch_size, size, size, 3)) + with self.test_session() as session: + features = { + "inputs": tf.constant(inputs, dtype=tf.int32), + "targets": tf.constant(targets, dtype=tf.int32), + "target_space_id": tf.constant(1, dtype=tf.int32), + } + model = net(hparams, tf_estimator.ModeKeys.TRAIN, p_hparams) + logits, _ = model(features) + session.run(tf.global_variables_initializer()) + res = session.run(logits) + if hparams.likelihood == common_image_attention.DistributionType.CAT: + expected = (batch_size, size, size, 3, vocab_size) + else: + expected = (batch_size, size, size, hparams.num_mixtures * 10) + self.assertEqual(res.shape, expected) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/lstm.py b/tensor2tensor/models/lstm.py new file mode 100644 index 000000000..f59dabb19 --- /dev/null +++ b/tensor2tensor/models/lstm.py @@ -0,0 +1,524 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""RNN LSTM models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +from tensor2tensor.layers import area_attention +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +def _dropout_lstm_cell(hparams, train): + return tf.nn.rnn_cell.DropoutWrapper( + tf.nn.rnn_cell.LSTMCell(hparams.hidden_size), + input_keep_prob=1.0 - hparams.dropout * tf.to_float(train)) + + +def lstm(inputs, sequence_length, hparams, train, name, initial_state=None): + """Adds a stack of LSTM layers on top of input. + + Args: + inputs: The input `Tensor`, shaped `[batch_size, time_steps, hidden_size]`. + sequence_length: Lengths of the actual input sequence, excluding padding; a + `Tensor` shaped `[batch_size]`. + hparams: HParams; hyperparameters. + train: bool; `True` when constructing training graph to enable dropout. + name: string; Create variable names under this scope. + initial_state: tuple of `LSTMStateTuple`s; the initial state of each layer. + + Returns: + A tuple (outputs, states), where: + outputs: The output `Tensor`, shaped `[batch_size, time_steps, + hidden_size]`. + states: A tuple of `LSTMStateTuple`s; the final state of each layer. + Bidirectional LSTM returns a concatenation of last forward and backward + state, reduced to the original dimensionality. + """ + layers = [_dropout_lstm_cell(hparams, train) + for _ in range(hparams.num_hidden_layers)] + with tf.variable_scope(name): + return tf.nn.dynamic_rnn( + tf.nn.rnn_cell.MultiRNNCell(layers), + inputs, + sequence_length, + initial_state=initial_state, + dtype=tf.float32, + time_major=False) + + +def lstm_attention_decoder(inputs, hparams, train, name, initial_state, + encoder_outputs, encoder_output_length, + decoder_input_length): + """Run LSTM cell with attention on inputs of shape [batch x time x size]. + + Args: + inputs: The decoder input `Tensor`, shaped `[batch_size, decoder_steps, + hidden_size]`. + hparams: HParams; hyperparameters. + train: bool; `True` when constructing training graph to enable dropout. + name: string; Create variable names under this scope. + initial_state: Tuple of `LSTMStateTuple`s; the initial state of each layer. + encoder_outputs: Encoder outputs; a `Tensor` shaped `[batch_size, + encoder_steps, hidden_size]`. + encoder_output_length: Lengths of the actual encoder outputs, excluding + padding; a `Tensor` shaped `[batch_size]`. + decoder_input_length: Lengths of the actual decoder inputs, excluding + padding; a `Tensor` shaped `[batch_size]`. + + Raises: + ValueError: If the hparams.attention_mechanism is anything other than + luong or bahdanau. + + Returns: + The decoder output `Tensor`, shaped `[batch_size, decoder_steps, + hidden_size]`. + """ + layers = [_dropout_lstm_cell(hparams, train) + for _ in range(hparams.num_hidden_layers)] + if hparams.attention_mechanism == "luong": + attention_mechanism_class = contrib.seq2seq().LuongAttention + elif hparams.attention_mechanism == "bahdanau": + attention_mechanism_class = contrib.seq2seq().BahdanauAttention + else: + raise ValueError("Unknown hparams.attention_mechanism = %s, must be " + "luong or bahdanau." % hparams.attention_mechanism) + if hparams.get("max_area_width", 1) > 1: + def _area_key_value_fn(keys, values): + """Custom fn for computing area keys and values.""" + tf.logging.info("max_area_width=%d, area_key_mode=%s, area_value_mode=%s", + hparams.get("max_area_width", 1), + hparams.get("area_key_mode", "none"), + hparams.get("area_value_mode", "none")) + keys = area_attention.compute_area_key( + keys, max_area_width=hparams.get("max_area_width", 1), + mode=hparams.get("area_key_mode", "none"), name="decoder_encoder", + training=(hparams.mode == tf_estimator.ModeKeys.TRAIN)) + if hparams.get("area_value_mode", "none") == "sum": + _, _, values, _, _ = area_attention.compute_area_features( + values, max_area_width=hparams.get("max_area_width", 1)) + elif hparams.get("area_value_mode", "none") == "mean": + values, _, _, _, _ = area_attention.compute_area_features( + values, max_area_width=hparams.get("max_area_width", 1)) + else: + raise ValueError( + "Unsupported area_value_mode: %s" % hparams.get( + "area_value_mode", "none")) + return keys, values + area_mask = area_attention.lengths_to_area_mask( + feature_length=encoder_output_length, + length=common_layers.shape_list(encoder_outputs)[1], + max_area_size=hparams.get("max_area_width", "1")) + def _area_prob_fn(score): + alignments = tf.nn.softmax(score) + alignments = tf.where(area_mask, alignments, tf.zeros_like(alignments)) + alignments = tf.div(alignments, tf.reduce_sum( + alignments, axis=-1, keepdims=True)) + return alignments + attention_mechanism = attention_mechanism_class( + hparams.hidden_size, encoder_outputs, + memory_sequence_length=None, + probability_fn=_area_prob_fn, + custom_key_value_fn=_area_key_value_fn) + else: + attention_mechanism = attention_mechanism_class(hparams.hidden_size, + encoder_outputs) + cell = contrib.seq2seq().AttentionWrapper( + tf.nn.rnn_cell.MultiRNNCell(layers), + [attention_mechanism] * hparams.num_heads, + attention_layer_size=[hparams.attention_layer_size] * hparams.num_heads, + output_attention=(hparams.output_attention == 1)) + + batch_size = common_layers.shape_list(inputs)[0] + + initial_state = cell.zero_state(batch_size, tf.float32).clone( + cell_state=initial_state) + + with tf.variable_scope(name): + output, _ = tf.nn.dynamic_rnn( + cell, + inputs, + decoder_input_length, + initial_state=initial_state, + dtype=tf.float32, + time_major=False) + # output is [batch_size, decoder_steps, attention_size], where + # attention_size is either hparams.hidden_size (when + # hparams.output_attention is 0) or hparams.attention_layer_size (when + # hparams.output_attention is 1) times the number of attention heads. + # + # For multi-head attention project output back to hidden size. + if hparams.output_attention == 1 and hparams.num_heads > 1: + output = tf.layers.dense(output, hparams.hidden_size) + + return output + + +def lstm_seq2seq_internal(inputs, targets, hparams, train): + """The basic LSTM seq2seq model, main step used for training.""" + with tf.variable_scope("lstm_seq2seq"): + if inputs is not None: + inputs_length = common_layers.length_from_embedding(inputs) + # Flatten inputs. + inputs = common_layers.flatten4d3d(inputs) + + # LSTM encoder. + inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1) + _, final_encoder_state = lstm(inputs, inputs_length, hparams, train, + "encoder") + else: + final_encoder_state = None + + # LSTM decoder. + shifted_targets = common_layers.shift_right(targets) + # Add 1 to account for the padding added to the left from shift_right + targets_length = common_layers.length_from_embedding(shifted_targets) + 1 + decoder_outputs, _ = lstm( + common_layers.flatten4d3d(shifted_targets), + targets_length, + hparams, + train, + "decoder", + initial_state=final_encoder_state) + return tf.expand_dims(decoder_outputs, axis=2) + + +def lstm_seq2seq_internal_attention(inputs, targets, hparams, train, + inputs_length, targets_length): + """LSTM seq2seq model with attention, main step used for training.""" + with tf.variable_scope("lstm_seq2seq_attention"): + # Flatten inputs. + inputs = common_layers.flatten4d3d(inputs) + + # LSTM encoder. + inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1) + encoder_outputs, final_encoder_state = lstm( + inputs, inputs_length, hparams, train, "encoder") + + # LSTM decoder with attention. + shifted_targets = common_layers.shift_right(targets) + # Add 1 to account for the padding added to the left from shift_right + targets_length = targets_length + 1 + decoder_outputs = lstm_attention_decoder( + common_layers.flatten4d3d(shifted_targets), hparams, train, "decoder", + final_encoder_state, encoder_outputs, inputs_length, targets_length) + return tf.expand_dims(decoder_outputs, axis=2) + + +def lstm_bid_encoder(inputs, sequence_length, hparams, train, name): + """Bidirectional LSTM for encoding inputs that are [batch x time x size].""" + + with tf.variable_scope(name): + cell_fw = tf.nn.rnn_cell.MultiRNNCell( + [_dropout_lstm_cell(hparams, train) + for _ in range(hparams.num_hidden_layers)]) + + cell_bw = tf.nn.rnn_cell.MultiRNNCell( + [_dropout_lstm_cell(hparams, train) + for _ in range(hparams.num_hidden_layers)]) + + ((encoder_fw_outputs, encoder_bw_outputs), + (encoder_fw_state, encoder_bw_state)) = tf.nn.bidirectional_dynamic_rnn( + cell_fw, + cell_bw, + inputs, + sequence_length, + dtype=tf.float32, + time_major=False) + + encoder_outputs = tf.concat((encoder_fw_outputs, encoder_bw_outputs), 2) + encoder_states = [] + + for i in range(hparams.num_hidden_layers): + if isinstance(encoder_fw_state[i], tf.nn.rnn_cell.LSTMStateTuple): + encoder_state_c = tf.concat( + values=(encoder_fw_state[i].c, encoder_bw_state[i].c), + axis=1, + name="encoder_fw_state_c") + encoder_state_h = tf.concat( + values=(encoder_fw_state[i].h, encoder_bw_state[i].h), + axis=1, + name="encoder_fw_state_h") + encoder_state = tf.nn.rnn_cell.LSTMStateTuple( + c=encoder_state_c, h=encoder_state_h) + elif isinstance(encoder_fw_state[i], tf.Tensor): + encoder_state = tf.concat( + values=(encoder_fw_state[i], encoder_bw_state[i]), + axis=1, + name="bidirectional_concat") + + encoder_states.append(encoder_state) + + encoder_states = tuple(encoder_states) + return encoder_outputs, encoder_states + + +def lstm_seq2seq_internal_bid_encoder(inputs, targets, hparams, train): + """The basic LSTM seq2seq model with bidirectional encoder.""" + with tf.variable_scope("lstm_seq2seq_bid_encoder"): + if inputs is not None: + inputs_length = common_layers.length_from_embedding(inputs) + # Flatten inputs. + inputs = common_layers.flatten4d3d(inputs) + # LSTM encoder. + _, final_encoder_state = lstm_bid_encoder( + inputs, inputs_length, hparams, train, "encoder") + else: + inputs_length = None + final_encoder_state = None + # LSTM decoder. + shifted_targets = common_layers.shift_right(targets) + # Add 1 to account for the padding added to the left from shift_right + targets_length = common_layers.length_from_embedding(shifted_targets) + 1 + hparams_decoder = copy.copy(hparams) + hparams_decoder.hidden_size = 2 * hparams.hidden_size + decoder_outputs, _ = lstm( + common_layers.flatten4d3d(shifted_targets), + targets_length, + hparams_decoder, + train, + "decoder", + initial_state=final_encoder_state) + return tf.expand_dims(decoder_outputs, axis=2) + + +def lstm_seq2seq_internal_attention_bid_encoder(inputs, targets, hparams, + train): + """LSTM seq2seq model with attention, main step used for training.""" + with tf.variable_scope("lstm_seq2seq_attention_bid_encoder"): + inputs_length = common_layers.length_from_embedding(inputs) + # Flatten inputs. + inputs = common_layers.flatten4d3d(inputs) + # LSTM encoder. + encoder_outputs, final_encoder_state = lstm_bid_encoder( + inputs, inputs_length, hparams, train, "encoder") + # LSTM decoder with attention + shifted_targets = common_layers.shift_right(targets) + # Add 1 to account for the padding added to the left from shift_right + targets_length = common_layers.length_from_embedding(shifted_targets) + 1 + hparams_decoder = copy.copy(hparams) + hparams_decoder.hidden_size = 2 * hparams.hidden_size + decoder_outputs = lstm_attention_decoder( + common_layers.flatten4d3d(shifted_targets), hparams_decoder, train, + "decoder", final_encoder_state, encoder_outputs, + inputs_length, targets_length) + return tf.expand_dims(decoder_outputs, axis=2) + + +@registry.register_model +class LSTMEncoder(t2t_model.T2TModel): + """LSTM encoder only.""" + + def body(self, features): + if self._hparams.initializer == "orthogonal": + raise ValueError("LSTM models fail with orthogonal initializer.") + train = self._hparams.mode == tf_estimator.ModeKeys.TRAIN + inputs = features.get("inputs") + inputs_length = common_layers.length_from_embedding(inputs) + # Flatten inputs. + inputs = common_layers.flatten4d3d(inputs) + # LSTM encoder. + inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1) + encoder_output, _ = lstm(inputs, inputs_length, self._hparams, train, + "encoder") + return tf.expand_dims(encoder_output, axis=2) + + +@registry.register_model +class LSTMSeq2seq(t2t_model.T2TModel): + + def body(self, features): + # TODO(lukaszkaiser): investigate this issue and repair. + if self._hparams.initializer == "orthogonal": + raise ValueError("LSTM models fail with orthogonal initializer.") + train = self._hparams.mode == tf_estimator.ModeKeys.TRAIN + return lstm_seq2seq_internal(features.get("inputs"), features["targets"], + self._hparams, train) + + +@registry.register_model +class LSTMSeq2seqAttention(t2t_model.T2TModel): + """Seq to seq LSTM with attention.""" + + def body(self, features): + # TODO(lukaszkaiser): investigate this issue and repair. + if self._hparams.initializer == "orthogonal": + raise ValueError("LSTM models fail with orthogonal initializer.") + train = self._hparams.mode == tf_estimator.ModeKeys.TRAIN + # This is a temporary fix for varying-length sequences within in a batch. + # A more complete fix should pass a length tensor from outside so that + # all the lstm variants can use it. + input_shape = common_layers.shape_list(features["inputs_raw"]) + flat_input = tf.reshape(features["inputs_raw"], + [input_shape[0], input_shape[1]]) + inputs_length = tf.reduce_sum(tf.minimum(flat_input, 1), -1) + target_shape = common_layers.shape_list(features["targets_raw"]) + flat_target = tf.reshape(features["targets_raw"], + [target_shape[0], target_shape[1]]) + targets_length = tf.reduce_sum(tf.minimum(flat_target, 1), -1) + tf.logging.info(self._hparams) + return lstm_seq2seq_internal_attention( + features["inputs"], features["targets"], self._hparams, train, + inputs_length, targets_length) + + +@registry.register_model +class LSTMSeq2seqBidirectionalEncoder(t2t_model.T2TModel): + + def body(self, features): + # TODO(lukaszkaiser): investigate this issue and repair. + if self._hparams.initializer == "orthogonal": + raise ValueError("LSTM models fail with orthogonal initializer.") + train = self._hparams.mode == tf_estimator.ModeKeys.TRAIN + return lstm_seq2seq_internal_bid_encoder( + features.get("inputs"), features["targets"], self._hparams, train) + + +@registry.register_model +class LSTMSeq2seqAttentionBidirectionalEncoder(t2t_model.T2TModel): + + def body(self, features): + # TODO(lukaszkaiser): investigate this issue and repair. + if self._hparams.initializer == "orthogonal": + raise ValueError("LSTM models fail with orthogonal initializer.") + train = self._hparams.mode == tf_estimator.ModeKeys.TRAIN + return lstm_seq2seq_internal_attention_bid_encoder( + features.get("inputs"), features["targets"], self._hparams, train) + + +@registry.register_hparams +def lstm_seq2seq(): + """hparams for LSTM.""" + hparams = common_hparams.basic_params1() + hparams.daisy_chain_variables = False + hparams.batch_size = 1024 + hparams.hidden_size = 128 + hparams.num_hidden_layers = 2 + hparams.initializer = "uniform_unit_scaling" + hparams.initializer_gain = 1.0 + hparams.weight_decay = 0.0 + return hparams + + +def lstm_attention_base(): + """Base attention params.""" + hparams = lstm_seq2seq() + hparams.add_hparam("attention_layer_size", hparams.hidden_size) + hparams.add_hparam("output_attention", True) + hparams.add_hparam("num_heads", 1) + return hparams + + +@registry.register_hparams +def lstm_bahdanau_attention(): + """Hparams for LSTM with bahdanau attention.""" + hparams = lstm_attention_base() + hparams.add_hparam("attention_mechanism", "bahdanau") + return hparams + + +@registry.register_hparams +def lstm_luong_attention(): + """Hparams for LSTM with luong attention.""" + hparams = lstm_attention_base() + hparams.add_hparam("attention_mechanism", "luong") + return hparams + + +@registry.register_hparams +def lstm_attention(): + """For backwards compatibility, defaults to bahdanau.""" + return lstm_bahdanau_attention() + + +@registry.register_hparams +def lstm_bahdanau_attention_multi(): + """Multi-head Bahdanau attention.""" + hparams = lstm_bahdanau_attention() + hparams.num_heads = 4 + return hparams + + +@registry.register_hparams +def lstm_luong_attention_multi(): + """Multi-head Luong attention.""" + hparams = lstm_luong_attention() + hparams.num_heads = 4 + return hparams + + +@registry.register_hparams +def lstm_asr_v1(): + """Basic LSTM Params.""" + hparams = lstm_bahdanau_attention() + hparams.num_hidden_layers = 2 + hparams.hidden_size = 256 + hparams.batch_size = 36 + hparams.max_input_seq_length = 600000 + hparams.max_target_seq_length = 350 + hparams.max_length = hparams.max_input_seq_length + hparams.min_length_bucket = hparams.max_input_seq_length // 2 + hparams.learning_rate = 0.05 + return hparams + + +@registry.register_hparams +def lstm_area_attention_base(): + """Hparams for LSTM with area attention.""" + hparams = lstm_luong_attention() + hparams.batch_size = 16384 + hparams.num_hidden_layers = 2 + hparams.hidden_size = 1024 + hparams.num_heads = 4 + hparams.dropout = 0.2 + hparams.learning_rate = 0.1 + hparams.max_area_width = 2 + hparams.area_key_mode = "mean" + hparams.area_value_mode = "sum" + return hparams + + +@registry.register_hparams +def lstm_area_attention_enfr(): + """Hparams for LSTM with area attention.""" + hparams = lstm_area_attention_base() + hparams.dropout = 0.1 + return hparams + + +@registry.register_hparams +def lstm_area_attention_char(): + """Hparams for LSTM with area attention.""" + hparams = lstm_area_attention_base() + hparams.batch_size = 20480 + return hparams + + +@registry.register_hparams +def lstm_area_attention_char_enfr(): + """Hparams for LSTM with area attention.""" + hparams = lstm_area_attention_char() + hparams.dropout = 0.1 + return hparams diff --git a/tensor2tensor/models/lstm_test.py b/tensor2tensor/models/lstm_test.py new file mode 100644 index 000000000..4723998db --- /dev/null +++ b/tensor2tensor/models/lstm_test.py @@ -0,0 +1,120 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""LSTMSeq2Seq models tests.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np + +from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.models import lstm + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class LSTMTest(tf.test.TestCase): + + def testLSTMSeq2Seq(self): + vocab_size = 9 + x = np.random.randint(1, high=vocab_size, size=(3, 5, 1, 1)) + y = np.random.randint(1, high=vocab_size, size=(3, 6, 1, 1)) + hparams = lstm.lstm_seq2seq() + p_hparams = problem_hparams.test_problem_hparams(vocab_size, + vocab_size, + hparams) + with self.test_session() as session: + features = { + "inputs": tf.constant(x, dtype=tf.int32), + "targets": tf.constant(y, dtype=tf.int32), + } + model = lstm.LSTMSeq2seq(hparams, tf_estimator.ModeKeys.TRAIN, + p_hparams) + logits, _ = model(features) + session.run(tf.global_variables_initializer()) + res = session.run(logits) + self.assertEqual(res.shape, (3, 6, 1, 1, vocab_size)) + + def testLSTMSeq2SeqAttention(self): + vocab_size = 9 + x = np.random.randint(1, high=vocab_size, size=(3, 5, 1, 1)) + y = np.random.randint(1, high=vocab_size, size=(3, 6, 1, 1)) + hparams = lstm.lstm_attention() + + p_hparams = problem_hparams.test_problem_hparams(vocab_size, + vocab_size, + hparams) + x = tf.constant(x, dtype=tf.int32) + x = tf.placeholder_with_default(x, shape=[None, None, 1, 1]) + + with self.test_session() as session: + features = { + "inputs": x, + "targets": tf.constant(y, dtype=tf.int32), + } + model = lstm.LSTMSeq2seqAttention( + hparams, tf_estimator.ModeKeys.TRAIN, p_hparams) + logits, _ = model(features) + session.run(tf.global_variables_initializer()) + res = session.run(logits) + self.assertEqual(res.shape, (3, 6, 1, 1, vocab_size)) + + def testLSTMSeq2seqBidirectionalEncoder(self): + vocab_size = 9 + x = np.random.randint(1, high=vocab_size, size=(3, 5, 1, 1)) + y = np.random.randint(1, high=vocab_size, size=(3, 6, 1, 1)) + hparams = lstm.lstm_seq2seq() + p_hparams = problem_hparams.test_problem_hparams(vocab_size, + vocab_size, + hparams) + with self.test_session() as session: + features = { + "inputs": tf.constant(x, dtype=tf.int32), + "targets": tf.constant(y, dtype=tf.int32), + } + model = lstm.LSTMSeq2seqBidirectionalEncoder( + hparams, tf_estimator.ModeKeys.TRAIN, p_hparams) + logits, _ = model(features) + session.run(tf.global_variables_initializer()) + res = session.run(logits) + self.assertEqual(res.shape, (3, 6, 1, 1, vocab_size)) + + def testLSTMSeq2seqAttentionBidirectionalEncoder(self): + vocab_size = 9 + x = np.random.randint(1, high=vocab_size, size=(3, 5, 1, 1)) + y = np.random.randint(1, high=vocab_size, size=(3, 6, 1, 1)) + hparams = lstm.lstm_attention() + + p_hparams = problem_hparams.test_problem_hparams(vocab_size, vocab_size) + x = tf.constant(x, dtype=tf.int32) + x = tf.placeholder_with_default(x, shape=[None, None, 1, 1]) + + with self.test_session() as session: + features = { + "inputs": x, + "targets": tf.constant(y, dtype=tf.int32), + } + model = lstm.LSTMSeq2seqAttentionBidirectionalEncoder( + hparams, tf_estimator.ModeKeys.TRAIN, p_hparams) + logits, _ = model(features) + session.run(tf.global_variables_initializer()) + res = session.run(logits) + self.assertEqual(res.shape, (3, 6, 1, 1, vocab_size)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/mtf_image_transformer.py b/tensor2tensor/models/mtf_image_transformer.py new file mode 100644 index 000000000..dffe8c66b --- /dev/null +++ b/tensor2tensor/models/mtf_image_transformer.py @@ -0,0 +1,637 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Image Transformer model with model and data parallelism using MTF. + +Integration of Mesh tensorflow with Image Transformer to do model parallelism. +Currently, this supports unconditional image generation. Specify a particular +architecture layout in the hparams that specifies how different dimensions are +split or replicated along the mesh dimensions. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import mesh_tensorflow as mtf + +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import mtf_model +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +@registry.register_model +class MtfImageTransformer(mtf_model.MtfModel): + """Image Transformer in mesh_tensorflow.""" + + @property + def inputs_vocab_dim(self): + assert self.has_input + return mtf.Dimension("inputs_vocab", self._hparams.num_classes) + + @property + def targets_vocab_dim(self): + vocab_size = self._problem_hparams.vocab_size["targets"] + if hasattr(self._hparams, "vocab_divisor"): + vocab_size += (-vocab_size) % self._hparams.vocab_divisor + return mtf.Dimension("vocab", vocab_size) + + @property + def outputs_vocab_dim(self): + return mtf.Dimension("output_vocab", 256) + + @property + def pos_dim(self): + return mtf.Dimension("pos", self._hparams.img_len) + + @property + def rows_dim(self): + return mtf.Dimension("rows", self._hparams.img_len) + + @property + def cols_dim(self): + return mtf.Dimension( + "cols", self._hparams.img_len*self._hparams.num_channels) + + @property + def orig_cols_dim(self): + return mtf.Dimension("orig_cols", self._hparams.img_len) + + @property + def channels_dim(self): + return mtf.Dimension("channels", self._hparams.num_channels) + + @property + def model_dim(self): + return mtf.Dimension("d_model", self._hparams.hidden_size) + + @property + def max_length_dim(self): + return mtf.Dimension( + "max_length", + self._hparams.img_len*self._hparams.img_len*self._hparams.num_channels) + + @property + def length_dim(self): + return mtf.Dimension( + "length", + self._hparams.img_len*self._hparams.img_len*self._hparams.num_channels) + + @property + def heads_dim(self): + return mtf.Dimension("heads", self._hparams.num_heads) + + @property + def kv_dim(self): + return mtf.Dimension("d_kv", self._hparams.d_kv) + + @property + def feedforward_dim(self): + return mtf.Dimension("d_ff", self._hparams.d_ff) + + @property + def activation_type(self): + hparams = self._hparams + if hparams.activation_dtype == "float32": + activation_dtype = tf.float32 + elif hparams.activation_dtype == "float16": + activation_dtype = tf.float16 + elif hparams.activation_dtype == "bfloat16": + activation_dtype = tf.bfloat16 + else: + raise ValueError( + "unknown hparams.activation_dtype %s" % hparams.activation_dtype) + return activation_dtype + + def create_positional_emb_2d(self, targets): + """Learned 2d positional embedding for images.""" + mesh = targets.mesh + + positional_emb_rows_var = mtf.get_variable( + mesh, "positional_emb_rows", + mtf.Shape([self.pos_dim, self.model_dim]), + initializer=tf.random_normal_initializer(), + activation_dtype=self.activation_type) + positional_emb_cols_var = mtf.get_variable( + mesh, "positional_emb_cols", + mtf.Shape([self.pos_dim, self.model_dim]), + initializer=tf.random_normal_initializer(), + activation_dtype=self.activation_type) + + targets_position_x = mtf.range(mesh, self.rows_dim, dtype=tf.int32) + targets_position_y = mtf.range(mesh, self.cols_dim, dtype=tf.int32) + position_x = mtf.broadcast( + mtf.gather(positional_emb_rows_var, targets_position_x, + self.pos_dim), + mtf.Shape([self.rows_dim, self.cols_dim, self.model_dim])) + + position_y = mtf.broadcast( + mtf.gather(positional_emb_cols_var, targets_position_y, + self.pos_dim), + mtf.Shape([self.rows_dim, self.cols_dim, self.model_dim])) + return position_x + position_y + + def mtf_model_fn(self, features, mesh): + features = copy.copy(features) + tf.logging.info("features = %s" % features) + hparams = self._hparams + activation_dtype = self.activation_type + + # We assume fixed vocab size for targets + targets = tf.to_int32(features["targets"]) + + # Image preprocessing, reshape into a 1D sequence and shift right. + length = hparams.img_len*hparams.img_len*hparams.num_channels + targets = tf.reshape(targets, [hparams.batch_size, length]) + shifted_targets = common_layers.shift_right_2d(targets) + + # Declare all the dimensions + batch_dim = mtf.Dimension("batch", hparams.batch_size) + + def import_to_batch_by_length(x, name): + return mtf.import_tf_tensor( + mesh, x, mtf.Shape([batch_dim, self.length_dim]), name=name) + + targets = import_to_batch_by_length(targets, "targets") + shifted_targets = import_to_batch_by_length( + shifted_targets, "shifted_targets") + + extra_losses = [] + + # Create targets content and position embeddings. + # Create embedding var for targets and positions and do a gather. + targets_embedding_var = mtf.get_variable( + mesh, "targets_embedding", + mtf.Shape([self.targets_vocab_dim, self.model_dim]), + initializer=tf.random_normal_initializer(), + activation_dtype=activation_dtype) + + x = mtf.gather(targets_embedding_var, + shifted_targets, self.targets_vocab_dim) + + # Add positional embeddings + x += mtf.reshape(self.create_positional_emb_2d(targets), + [self.length_dim, self.model_dim]) + + # If conditional and input is given, add the input embedding to the target. + # TODO(nikip): Verify conditional. + if self.has_input and not hparams.unconditional: + inputs = tf.squeeze(tf.to_int32(features["inputs"]), [2, 3]) + inputs = import_to_batch_by_length(inputs, "inputs") + + # Input embeddings + inputs_embedding_var = mtf.layers.embedding( + mesh, "input_embedding", + mtf.Shape([self.inputs_vocab_dim, self.model_dim]), + activation_dtype=activation_dtype) + inputs_emb = mtf.gather( + inputs_embedding_var, inputs, self.inputs_vocab_dim) + x += inputs_emb + + # Image Transformer Decoder + # [ self attention - ffn - residual + dropout] x n + if hparams.attention_type == "local1d_spatial": + decoder_output = local_attention1d_spatial_decoder( + x, self.kv_dim, self.heads_dim, self.feedforward_dim, hparams) + elif hparams.attention_type == "local2d_spatial": + decoder_output = local_attention2d_spatial_decoder( + x, self.kv_dim, self.heads_dim, self.feedforward_dim, hparams) + elif hparams.attention_type == "local1d": + decoder_output = local_attention1d_masked_decoder( + x, self.kv_dim, self.heads_dim, self.feedforward_dim, hparams) + else: + raise ValueError("Invalid attention type.") + + # Calculate the logits and loss. + logits = mtf.layers.dense( + decoder_output, self.outputs_vocab_dim, name="logits") + # Need a reshape for logits + logits = mtf.reshape( + logits, mtf.Shape([batch_dim, self.length_dim, self.outputs_vocab_dim])) + soft_targets = mtf.one_hot( + targets, self.outputs_vocab_dim, dtype=activation_dtype) + loss = mtf.layers.softmax_cross_entropy_with_logits( + logits, soft_targets, self.outputs_vocab_dim) + loss = mtf.reduce_mean(loss) + for l in extra_losses: + loss += l + + # Reshape logits to original target shape. + logits = mtf.reshape( + logits, + mtf.Shape([batch_dim, self.rows_dim, self.orig_cols_dim, + self.channels_dim, self.outputs_vocab_dim])) + + return logits, loss + + +def layer_prepostprocess_dropout(x, hparams): + batch_dim = x.shape.dims[0] + model_dim = x.shape.dims[-1] + mode = getattr(hparams, "mode", tf_estimator.ModeKeys.TRAIN) + is_training = mode == tf_estimator.ModeKeys.TRAIN + return mtf.dropout( + x, is_training, + keep_prob=1.0 - hparams.layer_prepostprocess_dropout, + noise_shape=mtf.Shape([batch_dim, model_dim])) + + +def local_attention1d_spatial_decoder(x, kv_dim, heads_dim, + feedforward_dim, hparams): + """Image Transformer decoder with local1D spatial layers.""" + batch_dim, length_dim, model_dim = x.shape.dims + blocks_w_dim = mtf.Dimension("blocksw", hparams.block_length) + num_w_blocks_dim = mtf.Dimension("num_wblocks", + length_dim.size // blocks_w_dim.size) + x = mtf.reshape( + x, mtf.Shape([batch_dim, num_w_blocks_dim, blocks_w_dim, model_dim])) + # [ self attention - ffn - residual + dropout] x n + mode = getattr(hparams, "mode", tf_estimator.ModeKeys.TRAIN) + is_training = mode == tf_estimator.ModeKeys.TRAIN + for layer in range(hparams.num_decoder_layers): + layer_name = "decoder_layer_%d" % layer + with tf.variable_scope(layer_name): + # Self attention layer + x += layer_prepostprocess_dropout( + mtf.layers.local_self_attention_spatial_blocks( + mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"), + kv_dim, + heads_dim, + is_training, + memory_w_dim=blocks_w_dim, + mask_right=True, + name="self_att"), hparams) + # ffn layer + x += layer_prepostprocess_dropout( + mtf.layers.dense_relu_dense( + mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"), + feedforward_dim, + is_training, + hparams.dropout, + dropout_broadcast_dims=[length_dim]), hparams) + + output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm") + return output + + +def local_attention2d_spatial_decoder(x, kv_dim, heads_dim, + feedforward_dim, hparams): + """Image Transformer decoder with local2D spatial layers.""" + batch_dim, length_dim, model_dim = x.shape.dims + blocks_h_dim = mtf.Dimension("blocksh", hparams.block_height) + blocks_w_dim = mtf.Dimension("blocksw", hparams.block_width) + num_h_blocks_dim = mtf.Dimension("num_h_blocks", + hparams.img_len // hparams.block_height) + num_w_blocks_dim = mtf.Dimension( + "num_w_blocks", + hparams.img_len * hparams.num_channels // hparams.block_width) + x = mtf.transpose( + mtf.reshape( + x, + mtf.Shape([ + batch_dim, num_h_blocks_dim, blocks_h_dim, + num_w_blocks_dim, blocks_w_dim, model_dim + ])), + mtf.Shape([ + batch_dim, num_h_blocks_dim, num_w_blocks_dim, + blocks_h_dim, blocks_w_dim, model_dim + ])) + mode = getattr(hparams, "mode", tf_estimator.ModeKeys.TRAIN) + is_training = mode == tf_estimator.ModeKeys.TRAIN + # Image Transformer Decoder + # [ self attention - ffn - residual + dropout] x n + for layer in range(hparams.num_decoder_layers): + layer_name = "decoder_layer_%d" % layer + with tf.variable_scope(layer_name): + # Self attention layer + x += layer_prepostprocess_dropout( + mtf.layers.local_2d_self_attention_spatial_blocks( + mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"), + kv_dim, + heads_dim, + is_training, + memory_h_dim=num_h_blocks_dim, + memory_w_dim=num_w_blocks_dim, + name="self_att"), hparams) + # ffn layer + x += layer_prepostprocess_dropout( + mtf.layers.dense_relu_dense( + mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"), + feedforward_dim, + hparams.dropout, + dropout_broadcast_dims=[length_dim]), hparams) + + output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm") + return output + + +def local_attention1d_masked_decoder(x, kv_dim, heads_dim, + feedforward_dim, hparams): + """Image Transformer decoder with local1D masked layers.""" + print(x) + _, length_dim, model_dim = x.shape.dims + mode = getattr(hparams, "mode", tf_estimator.ModeKeys.TRAIN) + is_training = mode == tf_estimator.ModeKeys.TRAIN + for layer in range(hparams.num_decoder_layers): + layer_name = "decoder_layer_%d" % layer + with tf.variable_scope(layer_name): + # Self attention layer + length_per_split = mtf.tensor_dim_to_size_per_split( + hparams.layout, hparams.mesh_shape, length_dim) + x += layer_prepostprocess_dropout( + mtf.layers.masked_local_attention_1d( + mtf.layers.layer_norm(x, model_dim, name="layer_norm_att"), + kv_dim, + heads_dim, + is_training, + window_size=hparams.block_length, + length_per_split=length_per_split, + name="self_att"), hparams) + # ffn layer + x += layer_prepostprocess_dropout( + mtf.layers.dense_relu_dense( + mtf.layers.layer_norm(x, model_dim, name="layer_norm_ffn"), + feedforward_dim, + hparams.dropout, + dropout_broadcast_dims=[length_dim]), hparams) + + output = mtf.layers.layer_norm(x, model_dim, name="final_layer_norm") + return output + + +@registry.register_hparams +def mtf_image_transformer_base(): + """Set of hyperparameters.""" + hparams = common_hparams.basic_params1() + hparams.no_data_parallelism = True + hparams.use_fixed_batch_size = True + hparams.batch_size = 1 + hparams.max_length = 3072 + hparams.hidden_size = 256 + hparams.label_smoothing = 0.0 + # 8-way model-parallelism + hparams.add_hparam("mesh_shape", "batch:8") + hparams.add_hparam("layout", "batch:batch") + hparams.add_hparam("mtf_mode", True) + hparams.add_hparam("num_heads", 8) + hparams.add_hparam("filter_size", 1024) + hparams.add_hparam("num_encoder_layers", 0) + hparams.add_hparam("num_decoder_layers", 6) + hparams.add_hparam("attention_key_size", 256) + hparams.add_hparam("attention_value_size", 256) + # Share weights between input and target embeddings + hparams.shared_embedding = True + + # mixture of experts hparams + hparams.add_hparam("ffn_layer", "dense_relu_dense") + hparams.add_hparam("moe_overhead_train", 1.0) + hparams.add_hparam("moe_overhead_eval", 2.0) + hparams.moe_num_experts = 16 + hparams.moe_loss_coef = 1e-3 + + hparams.shared_embedding_and_softmax_weights = True + hparams.optimizer = "Adafactor" + hparams.learning_rate_schedule = "rsqrt_decay" + hparams.learning_rate_warmup_steps = 10000 + hparams.add_hparam("d_kv", 64) + hparams.add_hparam("d_ff", 2048) + + # Image related hparams + hparams.add_hparam("img_len", 32) + hparams.add_hparam("num_channels", 3) + hparams.add_hparam("unconditional", True) + + # Local Attention related params + hparams.add_hparam("block_length", 128) + hparams.add_hparam("block_height", 16) + hparams.add_hparam("block_width", 16) + hparams.add_hparam("attention_type", "local1d") + return hparams + + +@registry.register_hparams +def mtf_image_transformer_tiny(): + """Catch bugs locally...""" + hparams = mtf_image_transformer_base() + hparams.hidden_size = 128 + hparams.d_ff = 256 + hparams.batch_size = 4 + hparams.num_encoder_layers = 1 + hparams.num_decoder_layers = 4 + hparams.num_heads = 4 + hparams.attention_key_size = 128 + hparams.attention_value_size = 128 + hparams.block_length = 32 + # data parallelism and model-parallelism + hparams.mesh_shape = "batch:2" + hparams.layout = "batch:batch" + return hparams + + +@registry.register_hparams +def mtf_image_transformer_single(): + """Small single parameters.""" + hparams = mtf_image_transformer_tiny() + hparams.mesh_shape = "" + hparams.layout = "" + hparams.hidden_size = 32 + hparams.filter_size = 32 + hparams.batch_size = 1 + hparams.num_encoder_layers = 1 + hparams.num_decoder_layers = 1 + hparams.num_heads = 2 + hparams.attention_key_size = 32 + hparams.attention_value_size = 32 + hparams.block_length = 16 + return hparams + + +@registry.register_hparams +def mtf_image_transformer_base_single(): + """Small single parameters.""" + hparams = mtf_image_transformer_base() + hparams.num_decoder_layers = 6 + hparams.filter_size = 256 + hparams.block_length = 128 + hparams.mesh_shape = "" + hparams.layout = "" + return hparams + + +@registry.register_hparams +def mtf_image_transformer_tiny_spatial1d(): + """Small single parameters.""" + hparams = mtf_image_transformer_tiny() + hparams.num_decoder_layers = 6 + hparams.filter_size = 128 + hparams.block_height = 8 + hparams.block_width = 8 + hparams.attention_type = "local1d_spatial" + hparams.mesh_shape = "" + hparams.layout = "" + return hparams + + +@registry.register_hparams +def mtf_image_transformer_tiny_spatial2d(): + """Small single parameters.""" + hparams = mtf_image_transformer_tiny() + hparams.num_decoder_layers = 6 + hparams.filter_size = 128 + hparams.block_height = 8 + hparams.block_width = 8 + hparams.attention_type = "local2d_spatial" + hparams.mesh_shape = "b1:2,b2:2" + hparams.layout = "num_h_blocks:b1,num_wblocks:b2" + return hparams + + +@registry.register_hparams +def mtf_image_transformer_base_cifar(): + """Data parallel CIFAR parameters.""" + hparams = mtf_image_transformer_base() + hparams.mesh_shape = "batch:8" + hparams.layout = "batch:batch" + hparams.learning_rate_decay_steps = 13600 # one epoch + hparams.batch_size = 32 + hparams.num_heads = 4 + hparams.num_decoder_layers = 12 + hparams.block_length = 256 + hparams.hidden_size = 512 + hparams.d_ff = 2048 + hparams.learning_rate = 0.5 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.layer_prepostprocess_dropout = 0.3 + hparams.unconditional = True + return hparams + + +@registry.register_hparams +def mtf_image_transformer_cifar_4x(): + """Data parallel CIFAR parameters.""" + hparams = mtf_image_transformer_base_cifar() + hparams.mesh_shape = "batch:32" + hparams.layout = "batch:batch" + hparams.batch_size = 128 + return hparams + + +@registry.register_hparams +def mtf_image_transformer_cifar_mp_4x(): + """Data parallel CIFAR parameters.""" + hparams = mtf_image_transformer_base_cifar() + hparams.mesh_shape = "model:4;batch:8" + hparams.layout = "batch:batch;d_ff:model;heads:model" + hparams.batch_size = 32 + hparams.num_heads = 8 + hparams.d_ff = 8192 + return hparams + + +@registry.register_hparams +def mtf_image_transformer_base_imagenet(): + """Data parallel CIFAR parameters.""" + hparams = mtf_image_transformer_base_cifar() + hparams.mesh_shape = "batch:32" + hparams.layout = "batch:batch" + hparams.batch_size = 128 + hparams.d_ff = 2048 + hparams.hidden_size = 512 + hparams.num_decoder_layers = 12 + hparams.learning_rate = 0.5 + hparams.learning_rate_warmup_steps = 31250 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.layer_prepostprocess_dropout = 0.1 + hparams.unconditional = True + return hparams + + +@registry.register_hparams +def mtf_image_transformer_base_imagenet_mp(): + """Model parallel ImageNet parameters.""" + hparams = mtf_image_transformer_base_imagenet() + hparams.mesh_shape = "model:4;batch:8" + hparams.layout = "batch:batch;d_ff:model;heads:model" + hparams.batch_size = 32 + hparams.num_heads = 8 + hparams.d_ff = 8192 + hparams.learning_rate_warmup_steps = 31250 + hparams.unconditional = True + return hparams + + +@registry.register_hparams +def mtf_image_transformer_base_imagenet_mp128(): + """Model parallel ImageNet parameters.""" + hparams = mtf_image_transformer_base_imagenet() + hparams.mesh_shape = "model:8;batch:4" + hparams.layout = "batch:batch;d_ff:model;heads:model" + hparams.batch_size = 8 + hparams.img_len = 128 + hparams.block_length = 128 + hparams.num_heads = 8 + hparams.num_decoder_layers = 4 + hparams.d_ff = 4096 + hparams.learning_rate_warmup_steps = 31250 + hparams.unconditional = True + hparams.max_length = 256*256*3 + return hparams + + +@registry.register_hparams +def mtf_image_transformer_base_imagenet_mp_sp(): + """Model parallel ImageNet parameters.""" + hparams = mtf_image_transformer_base_imagenet_mp128() + hparams.mesh_shape = "model:8;batch:4" + hparams.layout = "batch:batch;d_ff:model;num_wblocks:model" + hparams.batch_size = 8 + hparams.img_len = 128 + hparams.block_length = 128 + hparams.attention_type = "local1d_spatial" + return hparams + + +@registry.register_hparams +def mtf_image_transformer_base_imagenet_mp64(): + """Model parallel ImageNet parameters.""" + hparams = mtf_image_transformer_base_imagenet() + hparams.mesh_shape = "model:8;batch:4" + hparams.layout = "batch:batch;d_ff:model;heads:model" + hparams.batch_size = 8 + hparams.img_len = 64 + hparams.num_decoder_layers = 8 + return hparams + + +@registry.register_hparams +def mtf_image_transformer_tiny_8gpu(): + hparams = mtf_image_transformer_tiny() + hparams.mesh_shape = "all:8" + hparams.layout = "vocab:all;filter_size:all;heads:all" + return hparams + + +@registry.register_hparams +def mtf_image_transformer_length_sharded(): + hparams = mtf_image_transformer_tiny() + hparams.mesh_shape = "all:2" + hparams.layout = "length:all" + return hparams diff --git a/tensor2tensor/models/mtf_image_transformer_test.py b/tensor2tensor/models/mtf_image_transformer_test.py new file mode 100644 index 000000000..4737d16ea --- /dev/null +++ b/tensor2tensor/models/mtf_image_transformer_test.py @@ -0,0 +1,142 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for Image Transformer on Mesh TensorFlow.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import mesh_tensorflow as mtf + +import numpy as np +from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.models import mtf_image_transformer + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +# Constants shared between all functions. +BATCH_SIZE = 8 +IMG_LENGTH = 8 +VOCAB_SIZE = 256 + + +def get_model(hparams=None, + mode=tf_estimator.ModeKeys.TRAIN, + model_cls=mtf_image_transformer.MtfImageTransformer): + if hparams is None: + hparams = mtf_image_transformer.mtf_image_transformer_single() + hparams.max_length = IMG_LENGTH*IMG_LENGTH + hparams.batch_size = BATCH_SIZE + hparams.img_len = IMG_LENGTH + hparams.num_channels = 1 + + p_hparams = problem_hparams.test_problem_hparams(VOCAB_SIZE, + VOCAB_SIZE, + hparams) + del p_hparams.modality["inputs"] + hparams.problem_hparams = p_hparams + + targets = np.random.randint( + VOCAB_SIZE, size=(BATCH_SIZE, IMG_LENGTH, IMG_LENGTH, 1, 1)) + features = { + "targets": tf.constant(targets, dtype=tf.int32, name="targets"), + } + + return model_cls(hparams, mode, p_hparams), features, hparams + + +def get_placement_mesh(hparams): + graph = mtf.Graph() + mesh = mtf.Mesh(graph, "my_mesh") + mesh_shape = mtf.convert_to_shape(hparams.mesh_shape) + + mesh_devices = [""] * mesh_shape.size + mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl( + mesh_shape, hparams.layout, mesh_devices) + return mesh, mesh_impl + + +class MtfImageTransformerTest(tf.test.TestCase): + + def testMtfImageTransformer(self): + hparams = mtf_image_transformer.mtf_image_transformer_single() + + # need to know layout ahead of time for local attention. + hparams.mesh_shape = "" + hparams.layout = "" + model, features, hparams = get_model(hparams) + mesh, mesh_impl = get_placement_mesh(hparams) + + logits, _ = model.mtf_model_fn(features, mesh) + lowering = mtf.Lowering(mesh.graph, {mesh: mesh_impl}) + tf_group = lowering.copy_masters_to_slices() + tf_logits = lowering.export_to_tf_tensor(logits) + + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + session.run(tf_group) + res = session.run(tf_logits) + self.assertEqual(res.shape, + (BATCH_SIZE, IMG_LENGTH, IMG_LENGTH, + hparams.num_channels, VOCAB_SIZE)) + + def testMtfImageTransformerDataParallel(self): + hparams = mtf_image_transformer.mtf_image_transformer_single() + + # need to know layout ahead of time for local attention. + hparams.mesh_shape = "all:2" + hparams.layout = "batch:all" + model, features, hparams = get_model(hparams) + mesh, mesh_impl = get_placement_mesh(hparams) + + logits, _ = model.mtf_model_fn(features, mesh) + lowering = mtf.Lowering(mesh.graph, {mesh: mesh_impl}) + tf_group = lowering.copy_masters_to_slices() + tf_logits = lowering.export_to_tf_tensor(logits) + + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + session.run(tf_group) + res = session.run(tf_logits) + self.assertEqual(res.shape, + (BATCH_SIZE, IMG_LENGTH, IMG_LENGTH, + hparams.num_channels, VOCAB_SIZE)) + + def testMtfImageTransformerModelParallel(self): + hparams = mtf_image_transformer.mtf_image_transformer_single() + + # need to know layout ahead of time for local attention. + hparams.mesh_shape = "all:2" + hparams.layout = "length:all" + model, features, hparams = get_model(hparams) + mesh, mesh_impl = get_placement_mesh(hparams) + + logits, _ = model.mtf_model_fn(features, mesh) + lowering = mtf.Lowering(mesh.graph, {mesh: mesh_impl}) + tf_group = lowering.copy_masters_to_slices() + tf_logits = lowering.export_to_tf_tensor(logits) + + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + session.run(tf_group) + res = session.run(tf_logits) + self.assertEqual( + res.shape, + (BATCH_SIZE, IMG_LENGTH, IMG_LENGTH, hparams.num_channels, VOCAB_SIZE)) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/mtf_resnet.py b/tensor2tensor/models/mtf_resnet.py new file mode 100644 index 000000000..4ad14ee63 --- /dev/null +++ b/tensor2tensor/models/mtf_resnet.py @@ -0,0 +1,426 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""ResNet model with model and data parallelism using MTF. + +Integration of Mesh tensorflow with ResNet to do model parallelism. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import mesh_tensorflow as mtf + +from tensor2tensor.layers import common_hparams +from tensor2tensor.utils import mtf_model +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +BATCH_NORM_DECAY = 0.9 +BATCH_NORM_EPSILON = 1e-5 + + +def batch_norm_relu(inputs, is_training, relu=True): + """Block of batch norm and relu.""" + inputs = mtf.layers.batch_norm( + inputs, + is_training, + BATCH_NORM_DECAY, + epsilon=BATCH_NORM_EPSILON, + init_zero=(not relu)) + if relu: + inputs = mtf.relu(inputs) + return inputs + + +def bottleneck_block(inputs, + filters, + is_training, + strides, + projection_shortcut=None, + row_blocks_dim=None, + col_blocks_dim=None): + """Bottleneck block variant for residual networks with BN after convolutions. + + Args: + inputs: a `mtf.Tensor` of shape + `[batch_dim, row_blocks, col_blocks, rows, cols, in_channels]`. + filters: `int` number of filters for the first two convolutions. Note + that the third and final convolution will use 4 times as many filters. + is_training: `bool` for whether the model is in training mode. + strides: `int` block stride. If greater than 1, this block will ultimately + downsample the input. + projection_shortcut: `function` to use for projection shortcuts (typically + a 1x1 convolution to match the filter dimensions). If None, no + projection is used and the input is passed as unchanged through the + shortcut connection. + row_blocks_dim: a mtf.Dimension, row dimension which is + spatially partitioned along mesh axis + col_blocks_dim: a mtf.Dimension, row dimension which is + spatially partitioned along mesh axis + + Returns: + The output `Tensor` of the block. + """ + shortcut = inputs + + if projection_shortcut is not None: + filters_dim = mtf.Dimension("filtersp", filters) + shortcut = projection_shortcut(inputs, filters_dim) + + # First conv block + inputs = mtf.layers.conv2d_with_blocks( + inputs, + mtf.Dimension("filters1", filters), + filter_size=[1, 1], + strides=[1, 1], + padding="SAME", + h_blocks_dim=None, w_blocks_dim=col_blocks_dim, + name="conv0") + + # TODO(nikip): Add Dropout? + inputs = batch_norm_relu(inputs, is_training) + + # Second conv block + inputs = mtf.layers.conv2d_with_blocks( + inputs, + mtf.Dimension("filters2", 4 * filters), + filter_size=[3, 3], + strides=[1, 1], + padding="SAME", + h_blocks_dim=row_blocks_dim, w_blocks_dim=col_blocks_dim, + name="conv1") + + inputs = batch_norm_relu(inputs, is_training) + + # Third wide conv filter block + inputs = mtf.layers.conv2d_with_blocks( + inputs, + mtf.Dimension("filters3", filters), + filter_size=[1, 1], + strides=strides, + padding="SAME", + h_blocks_dim=None, w_blocks_dim=col_blocks_dim, + name="conv2") + + # TODO(nikip): Althought the original resnet code has this batch norm, in our + # setup this is causing no gradients to be passed. Investigate further. + # inputs = batch_norm_relu(inputs, is_training, relu=True) + + # TODO(nikip): Maybe add residual with a projection? + return mtf.relu( + shortcut + mtf.rename_dimension( + inputs, inputs.shape.dims[-1].name, shortcut.shape.dims[-1].name)) + + +def block_layer(inputs, + filters, + blocks, + strides, + is_training, + name, + row_blocks_dim=None, + col_blocks_dim=None): + """Creates one layer of blocks for the ResNet model. + + Args: + inputs: `Tensor` of size `[batch, channels, height, width]`. + filters: `int` number of filters for the first convolution of the layer. + blocks: `int` number of blocks contained in the layer. + strides: `int` stride to use for the first convolution of the layer. If + greater than 1, this layer will downsample the input. + is_training: `bool` for whether the model is training. + name: `str`name for the Tensor output of the block layer. + row_blocks_dim: a mtf.Dimension, row dimension which is + spatially partitioned along mesh axis + col_blocks_dim: a mtf.Dimension, row dimension which is + spatially partitioned along mesh axis + + Returns: + The output `Tensor` of the block layer. + """ + with tf.variable_scope(name, default_name="block_layer"): + # Only the first block per block_layer uses projection_shortcut and strides + def projection_shortcut(inputs, output_dim): + """Project identity branch.""" + inputs = mtf.layers.conv2d_with_blocks( + inputs, + output_dim, + filter_size=[1, 1], + strides=strides, + padding="SAME", + h_blocks_dim=None, w_blocks_dim=col_blocks_dim, + name="shortcut0") + return batch_norm_relu( + inputs, is_training, relu=False) + + inputs = bottleneck_block( + inputs, + filters, + is_training, + strides=strides, + projection_shortcut=projection_shortcut, + row_blocks_dim=row_blocks_dim, + col_blocks_dim=col_blocks_dim) + + for i in range(1, blocks): + with tf.variable_scope("bottleneck_%d" % i): + inputs = bottleneck_block( + inputs, + filters, + is_training, + strides=[1, 1, 1, 1], + projection_shortcut=None, + row_blocks_dim=row_blocks_dim, + col_blocks_dim=col_blocks_dim) + + return inputs + + +@registry.register_model +class MtfResNet(mtf_model.MtfModel): + """ResNet in mesh_tensorflow.""" + + def set_activation_type(self): + hparams = self._hparams + if hparams.activation_dtype == "float32": + activation_dtype = tf.float32 + elif hparams.activation_dtype == "float16": + activation_dtype = tf.float16 + elif hparams.activation_dtype == "bfloat16": + activation_dtype = tf.bfloat16 + else: + raise ValueError( + "unknown hparams.activation_dtype %s" % hparams.activation_dtype) + return activation_dtype + + def mtf_model_fn(self, features, mesh): + features = copy.copy(features) + tf.logging.info("features = %s" % features) + hparams = self._hparams + activation_dtype = self.set_activation_type() + is_training = hparams.mode == tf_estimator.ModeKeys.TRAIN + + # Declare all the dimensions + batch_dim = mtf.Dimension("batch", hparams.batch_size) + hidden_dim = mtf.Dimension("hidden", hparams.hidden_size) + filter_dim = mtf.Dimension("filters", hparams.filter_sizes[0]) + rows_dim = mtf.Dimension("rows_size", hparams.rows_size) + cols_dim = mtf.Dimension("cols_size", hparams.cols_size) + row_blocks_dim = mtf.Dimension("row_blocks", hparams.row_blocks) + col_blocks_dim = mtf.Dimension("col_blocks", hparams.col_blocks) + classes_dim = mtf.Dimension("classes", 10) + channels_dim = mtf.Dimension("channels", 3) + one_channel_dim = mtf.Dimension("one_channel", 1) + + inputs = features["inputs"] + x = mtf.import_tf_tensor( + mesh, tf.reshape(inputs, [ + hparams.batch_size, + hparams.row_blocks, + hparams.rows_size // hparams.row_blocks, + hparams.col_blocks, + hparams.num_channels*hparams.cols_size // hparams.col_blocks, + hparams.num_channels]), + mtf.Shape( + [batch_dim, row_blocks_dim, rows_dim, + col_blocks_dim, cols_dim, channels_dim])) + x = mtf.transpose(x, [batch_dim, row_blocks_dim, col_blocks_dim, + rows_dim, cols_dim, channels_dim]) + + x = mtf.to_float(x) + x = mtf.layers.conv2d_with_blocks( + x, + filter_dim, + filter_size=[3, 3], + strides=[1, 1], + padding="SAME", + h_blocks_dim=None, w_blocks_dim=col_blocks_dim, + name="initial_filter") + + x = batch_norm_relu(x, is_training) + + # Conv blocks + # [block - strided block layer - strided block layer] x n + for layer in range(hparams.num_layers): + layer_name = "block_layer_%d" % layer + with tf.variable_scope(layer_name): + # Residual block layer + x = block_layer( + inputs=x, + filters=hparams.filter_sizes[0], + blocks=hparams.layer_sizes[0], + strides=[1, 1], + is_training=is_training, + name="block_layer1", + row_blocks_dim=None, + col_blocks_dim=None) + x = block_layer( + inputs=x, + filters=hparams.filter_sizes[1], + blocks=hparams.layer_sizes[1], + strides=[1, 1], + is_training=is_training, + name="block_layer2", + row_blocks_dim=None, + col_blocks_dim=None) + x = block_layer( + inputs=x, + filters=hparams.filter_sizes[2], + blocks=hparams.layer_sizes[2], + strides=[1, 1], + is_training=is_training, + name="block_layer3", + row_blocks_dim=None, + col_blocks_dim=None) + + # Calculate the logits and loss. + out = x + outputs = mtf.layers.dense( + out, hidden_dim, + reduced_dims=out.shape.dims[-5:], + activation=mtf.relu, name="dense") + + # We assume fixed vocab size for targets + labels = tf.squeeze(tf.to_int32(features["targets"]), [2, 3]) + labels = mtf.import_tf_tensor( + mesh, tf.reshape(labels, [hparams.batch_size]), mtf.Shape([batch_dim])) + + logits = mtf.layers.dense(outputs, classes_dim, name="logits") + soft_targets = mtf.one_hot(labels, classes_dim, dtype=activation_dtype) + loss = mtf.layers.softmax_cross_entropy_with_logits( + logits, soft_targets, classes_dim) + + # Reshape logits so it doesn't break inside t2t. + logits = mtf.reshape( + logits, + mtf.Shape([batch_dim, one_channel_dim, classes_dim])) + loss = mtf.reduce_mean(loss) + return logits, loss + + +@registry.register_hparams +def mtf_resnet_base(): + """Set of hyperparameters.""" + hparams = common_hparams.basic_params1() + hparams.no_data_parallelism = True + hparams.use_fixed_batch_size = True + hparams.batch_size = 32 + hparams.max_length = 3072 + hparams.hidden_size = 256 + hparams.label_smoothing = 0.0 + # 8-way model-parallelism + hparams.add_hparam("mesh_shape", "batch:8") + hparams.add_hparam("layout", "batch:batch") + hparams.add_hparam("filter_size", 1024) + + hparams.add_hparam("num_layers", 6) + # Share weights between input and target embeddings + hparams.shared_embedding = True + + hparams.shared_embedding_and_softmax_weights = True + hparams.optimizer = "Adafactor" + hparams.learning_rate_schedule = "rsqrt_decay" + hparams.learning_rate_warmup_steps = 10000 + hparams.add_hparam("d_kv", 32) + + # Image related hparams + hparams.add_hparam("img_len", 32) + hparams.add_hparam("num_channels", 3) + hparams.add_hparam("row_blocks", 1) + hparams.add_hparam("col_blocks", 1) + hparams.add_hparam("rows_size", 32) + hparams.add_hparam("cols_size", 32) + + # Model-specific parameters + hparams.add_hparam("layer_sizes", [3, 4, 6, 3]) + hparams.add_hparam("filter_sizes", [64, 64, 128, 256, 512]) + hparams.add_hparam("is_cifar", False) + + # Variable init + hparams.initializer = "normal_unit_scaling" + hparams.initializer_gain = 2. + + # TODO(nikip): Change optimization scheme? + hparams.learning_rate = 0.1 + return hparams + + +@registry.register_hparams +def mtf_resnet_tiny(): + """Catch bugs locally...""" + hparams = mtf_resnet_base() + hparams.num_layers = 2 + hparams.hidden_size = 64 + hparams.filter_size = 64 + hparams.batch_size = 16 + # data parallelism and model-parallelism + hparams.col_blocks = 1 + hparams.mesh_shape = "batch:2" + hparams.layout = "batch:batch" + hparams.layer_sizes = [1, 2, 3] + hparams.filter_sizes = [64, 64, 64] + return hparams + + +@registry.register_hparams +def mtf_resnet_single(): + """Small single parameters.""" + hparams = mtf_resnet_tiny() + hparams.mesh_shape = "" + hparams.layout = "" + hparams.hidden_size = 32 + hparams.filter_size = 32 + hparams.batch_size = 1 + hparams.num_encoder_layers = 1 + hparams.num_layers = 1 + hparams.block_length = 16 + return hparams + + +@registry.register_hparams +def mtf_resnet_base_single(): + """Small single parameters.""" + hparams = mtf_resnet_base() + hparams.num_layers = 6 + hparams.filter_size = 256 + hparams.block_length = 128 + hparams.mesh_shape = "" + hparams.layout = "" + return hparams + + +@registry.register_hparams +def mtf_resnet_base_cifar(): + """Data parallel CIFAR parameters.""" + hparams = mtf_resnet_base() + hparams.mesh_shape = "batch:32" + hparams.layoyt = "batch:batch" + hparams.batch_size = 8 + hparams.num_layers = 12 + hparams.block_length = 256 + hparams.hidden_size = 512 + hparams.filter_size = 2048 + hparams.learning_rate = 0.5 + hparams.learning_rate_warmup_steps = 4000 + hparams.layer_preprocess_sequence = "none" + hparams.layer_postprocess_sequence = "dan" + hparams.layer_prepostprocess_dropout = 0.3 + hparams.unconditional = True + return hparams diff --git a/tensor2tensor/models/mtf_transformer.py b/tensor2tensor/models/mtf_transformer.py new file mode 100644 index 000000000..42bb88705 --- /dev/null +++ b/tensor2tensor/models/mtf_transformer.py @@ -0,0 +1,1195 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Transformer model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import mesh_tensorflow as mtf +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import modalities +from tensor2tensor.models.research import moe +from tensor2tensor.utils import mtf_model +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +@registry.register_model +class MtfTransformer(mtf_model.MtfModel): + """Transformer in mesh_tensorflow.""" + + def __init__(self, + hparams, + mode=tf_estimator.ModeKeys.TRAIN, + problem_hparams=None, + data_parallelism=None, + decode_hparams=None, + **kwargs): + """Init with assignments of hparams.encoder_layers / decoder_layers.""" + # Finalize encoder_layers, decoder_layers + hparams.encoder_layers = ( + hparams.encoder_layers * hparams.encoder_replicate_factor) + hparams.decoder_layers = ( + hparams.decoder_layers * hparams.decoder_replicate_factor) + + super(MtfTransformer, self).__init__(hparams, + mode=mode, + problem_hparams=problem_hparams, + data_parallelism=data_parallelism, + decode_hparams=decode_hparams, + **kwargs) + + @property + def batch_dims(self): + hparams = self._hparams + if hparams.outer_batch_size == 0: + return [mtf.Dimension("batch", hparams.batch_size)] + else: + if hparams.batch_size % hparams.outer_batch_size != 0: + raise ValueError( + "hparams.outer_batch_size must divide hparams.batch_size") + return [ + mtf.Dimension("outer_batch", hparams.outer_batch_size), + mtf.Dimension("inner_batch", + hparams.batch_size // hparams.outer_batch_size)] + + @property + def inputs_vocab_dim(self): + assert self.has_input + return mtf.Dimension("vocab", self._inputs_vocab_size) + + @property + def targets_vocab_dim(self): + return mtf.Dimension("vocab", self._targets_vocab_size) + + @property + def model_dim(self): + return mtf.Dimension("d_model", self._hparams.d_model) + + @property + def max_length_dim(self): + return mtf.Dimension("max_length", self._hparams.max_length) + + @property + def length_dim(self): + return mtf.Dimension("length", self._hparams.max_length) + + @property + def memory_length_dim(self): + return mtf.Dimension("memory_length", self._hparams.max_length) + + @property + def heads_dim(self): + return mtf.Dimension("heads", self._hparams.num_heads) + + @property + def kv_dim(self): + return mtf.Dimension("d_kv", self._hparams.d_kv) + + @property + def feedforward_dim(self): + return mtf.Dimension("d_ff", self._hparams.d_ff) + + @property + def master_dtype(self): + return tf.as_dtype(self._hparams.master_dtype) + + @property + def slice_dtype(self): + return tf.as_dtype(self._hparams.slice_dtype) + + @property + def activation_dtype(self): + return tf.as_dtype(self._hparams.activation_dtype) + + def _import_to_batch_by_length(self, x, name, mesh, hparams): + del hparams + mtf_shape = mtf.Shape(self.batch_dims + [self.length_dim]) + x = tf.reshape(x, mtf_shape.to_integer_list) + return mtf.import_fully_replicated(mesh, x, mtf_shape, name=name) + + def _embedding_and_softmax_vars(self, mesh): + hparams = self._hparams + if hparams.transformer_type == "encoder": + targets_embedding_var = None + else: + targets_embedding_var = mtf.get_variable( + mesh, "targets_embedding", + mtf.Shape([self.targets_vocab_dim, self.model_dim]), + initializer=tf.random_normal_initializer(), + master_dtype=self.master_dtype, + slice_dtype=self.slice_dtype, + activation_dtype=self.activation_dtype) + if hparams.transformer_type == "decoder": + inputs_embedding_var = None + else: + if hparams.shared_embedding and targets_embedding_var: + inputs_embedding_var = targets_embedding_var + else: + inputs_embedding_var = mtf.get_variable( + mesh, "inputs_embedding", + mtf.Shape([self.inputs_vocab_dim, self.model_dim]), + initializer=tf.random_normal_initializer(), + master_dtype=self.master_dtype, + slice_dtype=self.slice_dtype, + activation_dtype=self.activation_dtype) + if hparams.shared_embedding_and_softmax_weights: + softmax_var = (targets_embedding_var or inputs_embedding_var) * ( + self.model_dim.size ** -0.5) + else: + softmax_var = mtf.get_variable( + mesh, + "softmax", + mtf.Shape([self.targets_vocab_dim, self.model_dim]), + initializer=tf.random_normal_initializer( + stddev=self.model_dim.size**-0.5), + master_dtype=self.master_dtype, + slice_dtype=self.slice_dtype, + activation_dtype=self.activation_dtype) + positional_embedding_var = mtf.get_variable( + mesh, "positional_embedding", + mtf.Shape([self.max_length_dim, self.model_dim]), + initializer=tf.random_normal_initializer(), + activation_dtype=self.activation_dtype) + return (inputs_embedding_var, targets_embedding_var, + softmax_var, positional_embedding_var) + + def _noisy_targets_from_spec(self, targets, noising_spec, losses=None): + if noising_spec["type"] == "mask": + # Replace a randomly-chosen noising_spec["prob"] of input tokens with 0. + return targets * mtf.cast( + mtf.greater(mtf.random_uniform(targets.mesh, targets.shape), + noising_spec["prob"]), targets.dtype) + elif noising_spec["type"] == "random_zipfian": + # Replace a randomly-chosen noising_spec["prob"] of input tokens. + # Rather than drawing the replacement tokens uniformly, we sample from + # a distribution favoring lower token-ids, assuming that the ids have + # been assigned in frequency order. The probability of choosing an + # id is proportional to 1/(id+10) + logits = mtf.log(1.0 / (mtf.range( + targets.mesh, self.targets_vocab_dim, dtype=tf.float32) + 10.0)) + logits = mtf.broadcast(logits, new_shape=targets.shape + logits.shape) + r = mtf.sample_with_temperature(logits, self.targets_vocab_dim) + use_noise = mtf.less( + mtf.random_uniform(targets.mesh, targets.shape), noising_spec["prob"]) + return mtf.where(use_noise, r, targets) + elif noising_spec["type"] == "transformer": + # Train a small transformer to fill in masked out values, then + # sample from it. + hparams = self._hparams + if hparams.mode != tf_estimator.ModeKeys.TRAIN: + raise NotImplementedError("Not implemented") + noiser_hparams = copy.copy(self._hparams) + noiser_hparams.del_hparam("mode") + noiser_hparams.override_from_dict(noising_spec["overrides"]) + with tf.variable_scope("noiser"): + noiser = MtfTransformer( + noiser_hparams, + mode=hparams.mode, + problem_hparams=self._problem_hparams) + logits, loss = noiser._mtf_model_fn( # pylint: disable=protected-access + self._original_features, targets.mesh) + samples = mtf.sample_with_temperature(logits, self.targets_vocab_dim) + losses.append(loss) + return samples + else: + raise ValueError("unknown noising spec %s" % noising_spec) + + def _noisy_targets(self, targets, losses=None): + """Generate noisy targets for denoising models. + + Args: + targets: a Tensor + losses: an optional list onto which to append traning losses + Returns: + a Tensor the same dtype and shape as Targets + """ + hparams = self._hparams + if hparams.mode == tf_estimator.ModeKeys.TRAIN: + nt_train = self._noisy_targets_from_spec( + targets, hparams.noising_spec_train, losses=losses) + if hparams.noising_use_eval_during_train > 0: + nt_eval = self._noisy_targets_from_spec( + targets, hparams.noising_spec_eval) + use_eval_noising = mtf.less( + mtf.random_uniform(targets.mesh, targets.shape - self.length_dim), + hparams.noising_use_eval_during_train) + nt_train = mtf.where(use_eval_noising, nt_eval, nt_train) + return nt_train + else: + return self._noisy_targets_from_spec(targets, hparams.noising_spec_eval) + + def _mtf_model_fn(self, features, mesh): + self._original_features = features + features = copy.copy(features) + hparams = self._hparams + extra_losses = [] + targets = tf.to_int32(features["targets"]) + mode = getattr(hparams, "mode", tf_estimator.ModeKeys.TRAIN) + is_training = mode == tf_estimator.ModeKeys.TRAIN + if len(targets.get_shape()) > 2: + tf.logging.info("targets = %s" % targets) + targets = tf.squeeze(targets, [2, 3]) + # pad targets to max_length + def pad_to_max_length(x): + extra_length = hparams.max_length - tf.shape(x)[1] + x = tf.pad(x, [[0, 0], [0, extra_length]]) + x = tf.reshape(x, [hparams.batch_size, hparams.max_length]) + return x + targets = pad_to_max_length(targets) + targets = self._import_to_batch_by_length(targets, "targets", mesh, hparams) + for key in ["targets_segmentation", "targets_position", + "inputs_segmentation", "inputs_position"]: + if key in features: + features[key] = pad_to_max_length(features[key]) + if hparams.decoder_type == "autoregressive": + shifted_targets = mtf.shift( + targets, offset=1, dim=self.length_dim, wrap=False) + elif hparams.decoder_type == "denoising": + shifted_targets = self._noisy_targets(targets, extra_losses) + else: + raise ValueError( + "unknown hparams.decoder_type = %s" % hparams.decoder_type) + + if "targets_segmentation" in features: + # "Packed" dataset - keep the examples from seeing each other. + targets_segmentation = self._import_to_batch_by_length( + features["targets_segmentation"], "targets_segmentation", + mesh, hparams) + targets_position = self._import_to_batch_by_length( + features["targets_position"], "targets_position", + mesh, hparams) + decoder_self_attention_mask = mtf.layers.attention_mask_same_segment( + targets_segmentation, dtype=self.activation_dtype) + if hparams.decoder_type == "autoregressive": + decoder_self_attention_mask += mtf.layers.attention_mask_autoregressive( + targets_position, dtype=self.activation_dtype) + else: + targets_position = mtf.range(mesh, self.length_dim, dtype=tf.int32) + if hparams.decoder_type == "autoregressive": + decoder_self_attention_mask = mtf.layers.attention_mask_autoregressive( + targets_position, dtype=self.activation_dtype) + else: + decoder_self_attention_mask = None + + def layer_prepostprocess_dropout(x): + return mtf.dropout( + x, is_training, keep_prob=1.0 - hparams.layer_prepostprocess_dropout, + noise_shape=mtf.Shape(self.batch_dims + [self.model_dim])) + + (inputs_embedding_var, + targets_embedding_var, + softmax_var, + positional_embedding_var) = self._embedding_and_softmax_vars(mesh) + if hparams.transformer_type == "decoder": + encoder_output = None + encoder_decoder_attention_mask = None + else: + inputs = tf.squeeze(tf.to_int32(features["inputs"]), [2, 3]) + inputs = pad_to_max_length(inputs) + inputs = self._import_to_batch_by_length(inputs, "inputs", mesh, hparams) + if "inputs_segmentation" in features: + # "Packed" dataset - keep the examples from seeing each other. + inputs_segmentation = self._import_to_batch_by_length( + features["inputs_segmentation"], "inputs_segmentation", + mesh, hparams) + inputs_position = self._import_to_batch_by_length( + features["inputs_position"], "inputs_position", + mesh, hparams) + encoder_self_attention_mask = ( + mtf.layers.attention_mask_same_segment( + inputs_segmentation, dtype=self.activation_dtype)) + else: + inputs_position = mtf.range(mesh, self.length_dim, dtype=tf.int32) + encoder_self_attention_mask = ( + mtf.layers.attention_mask_ignore_padding( + inputs, dtype=self.activation_dtype)) + + x = (mtf.gather(inputs_embedding_var, inputs, self.inputs_vocab_dim) + + mtf.gather(positional_embedding_var, inputs_position, + self.max_length_dim)) + x = layer_prepostprocess_dropout(x) + with tf.variable_scope("encoder"): + x = self._layer_stack(x, + hparams.encoder_layers, + self_attention_mask=encoder_self_attention_mask, + losses=extra_losses) + + if hparams.transformer_type == "encdec": + if "inputs_segmentation" in features: + encoder_decoder_attention_mask = ( + mtf.layers.attention_mask_same_segment( + targets_segmentation, inputs_segmentation, + dtype=self.activation_dtype)) + else: + encoder_decoder_attention_mask = encoder_self_attention_mask + encoder_output = mtf.rename_dimension( + x, self.length_dim.name, self.memory_length_dim.name) + + if hparams.transformer_type != "encoder": + # DECODER + x = (mtf.gather( + targets_embedding_var, shifted_targets, self.targets_vocab_dim) + + mtf.gather( + positional_embedding_var, targets_position, self.max_length_dim)) + x = layer_prepostprocess_dropout(x) + with tf.variable_scope("decoder"): + x = self._layer_stack( + x, + hparams.decoder_layers, + encoder_output=encoder_output, + self_attention_mask=decoder_self_attention_mask, + encdec_attention_mask=encoder_decoder_attention_mask, + losses=extra_losses) + if (hparams.reshape_logits_hack and + hparams.mode == tf_estimator.ModeKeys.TRAIN): + # For some reason, the logits computation is extremely slow on TPU + # in some cases where the batch size per core is 1. Reshape the logits + # and the targets to double the batch size and halve the length. + # TODO(noam): file a bug. + old_dims = self.batch_dims + [self.length_dim] + new_dims = self.batch_dims[:-1] + [ + mtf.Dimension(self.batch_dims[-1].name, + self.batch_dims[-1].size * 2), + mtf.Dimension(self.length_dim.name, self.length_dim.size // 2)] + x = mtf.reshape(x, new_dims + [self.model_dim]) + targets = mtf.reshape(targets, new_dims) + + logits = mtf.matmul(x, softmax_var) + if hparams.mode == tf_estimator.ModeKeys.TRAIN: + logits = mtf.layers.multiplicative_jitter(logits, epsilon=1e-2) + off_value = hparams.label_smoothing / self._targets_vocab_size + on_value = 1.0 - hparams.label_smoothing + off_value + soft_targets = mtf.one_hot( + targets, self.targets_vocab_dim, on_value=on_value, off_value=off_value, + dtype=self.activation_dtype) + loss = mtf.layers.softmax_cross_entropy_with_logits( + logits, soft_targets, self.targets_vocab_dim) + weights = mtf.layers.weights_nonzero(targets, dtype=self.activation_dtype) + loss = mtf.reduce_mean(loss * weights) + for l in extra_losses: + loss += l + if (hparams.reshape_logits_hack and + hparams.mode == tf_estimator.ModeKeys.TRAIN): + logits = mtf.reshape(logits, old_dims + [self.targets_vocab_dim]) + logits = mtf.to_float(logits) + return logits, loss + + def mtf_model_fn(self, features, mesh): + with tf.variable_scope("transformer"): + logits, loss = self._mtf_model_fn(features, mesh) + # combine batch dims + if len(self.batch_dims) > 1: + combined_batch_dim = mtf.Dimension( + self.batch_dims[0].name, mtf.Shape(self.batch_dims).size) + logits = mtf.reshape( + logits, [combined_batch_dim] + logits.shape.dims[-2:]) + return logits, loss + + @property + def _targets_vocab_size(self): + targets_vocab_size = self._problem_hparams.vocab_size["targets"] + targets_vocab_size += (-targets_vocab_size) % self._hparams.vocab_divisor + return targets_vocab_size + + @property + def _inputs_vocab_size(self): + inputs_vocab_size = self._problem_hparams.vocab_size["inputs"] + inputs_vocab_size += (-inputs_vocab_size) % self._hparams.vocab_divisor + return inputs_vocab_size + + def _feedforward_layer(self, x, layer_type, losses=None): + """Feed-forward layer. + + Args: + x: a mtf.Tensor with shape [, length_dim, model_dim] + layer_type: a string + losses: a list to be appended-to + Returns: + a mtf.Tensor with shape [, length_dim, model_dim] + Raises: + ValueError: if hparams make no sense + """ + hparams = self._hparams + mode = getattr(hparams, "mode", tf_estimator.ModeKeys.TRAIN) + is_training = mode == tf_estimator.ModeKeys.TRAIN + if layer_type == "drd": + return mtf.layers.dense_relu_dense( + x, self.feedforward_dim, is_training, dropout=hparams.relu_dropout, + dropout_broadcast_dims=[self.length_dim], + master_dtype=self.master_dtype, + slice_dtype=self.slice_dtype) + elif layer_type == "none": + return x + elif layer_type == "moe": + output, loss = moe.transformer_moe_layer_v1( + x, + self.model_dim, + hparams, + hparams.mode == tf_estimator.ModeKeys.TRAIN, + master_dtype=self.master_dtype, + slice_dtype=self.slice_dtype) + if losses is not None: + losses.append(loss) + return output + elif layer_type == "hmoe": + output, loss = moe.transformer_moe_layer_v2( + x, + self.model_dim, + hparams, + hparams.mode == tf_estimator.ModeKeys.TRAIN, + master_dtype=self.master_dtype, + slice_dtype=self.slice_dtype) + if losses is not None: + losses.append(loss) + return output + else: + raise ValueError("layer_type not recognized %s" % layer_type) + + def _layer_stack(self, + x, + layers, + encoder_output=None, + self_attention_mask=None, + encdec_attention_mask=None, + losses=None, + step_num=None, + encdec_tensors=None, + states=None): + """Encoder or decoder stack. + + Args: + x: a mtf.Tensor with shape [, length_dim, model_dim] + layers: an list of strings + encoder_output: an optional mtf.Tensor with shape + [, encoder_length_dim, model_dim] + self_attention_mask: an optional mtf.Tensor with shape + [batch, length_dim, memory_length_dim] containing values 0 or -inf. + encdec_attention_mask: an optional mtf.Tensor with shape + [batch, length_dim, encoder_length_dim] containing values 0 or -inf. + losses: a list to be appended-to + step_num: an optional mtf integer Scalar (used in incrmenental mode) + encdec_tensors: an optional list of num_layers tuples, each of the form + (q_var, o_var, k, v), (used in incremental mode) + states: an optional list of Tensors (used in incremental mode) + Returns: + a mtf.Tensor with shape [, length_dim, model_dim] + Raises: + ValueError: if hparams make no sense + """ + hparams = self._hparams + is_incremental = (step_num is not None) + mode = getattr(hparams, "mode", tf_estimator.ModeKeys.TRAIN) + is_training = mode == tf_estimator.ModeKeys.TRAIN + def layer_prepostprocess_dropout(x): + if is_incremental: + return x + return mtf.dropout( + x, is_training, keep_prob=1.0 - hparams.layer_prepostprocess_dropout, + noise_shape=mtf.Shape(self.batch_dims + [self.model_dim])) + num_layers = len(layers) + num_layer_norms = num_layers + 1 + layer_norms_dim = mtf.Dimension("layer_norms", num_layer_norms) + layer_norm_combined_var = mtf.get_variable( + x.mesh, + "layer_norm_scale", + mtf.Shape([layer_norms_dim, self.model_dim]), + initializer=tf.ones_initializer(), + activation_dtype=x.dtype) + layer_norm_vars = mtf.unstack(layer_norm_combined_var, layer_norms_dim) + def normalize(x): + scale = layer_norm_vars.pop(0) + variance = mtf.reduce_mean(mtf.square(x), reduced_dim=self.model_dim) + return x * mtf.rsqrt(variance + hparams.norm_epsilon) * scale + + if is_incremental: + states = list(states) + new_states = [] + tf.logging.info("states = %s" % (states,)) + + for lnum, layer_type in enumerate(layers): + with tf.variable_scope("%s_%d" % (layer_type, lnum)): + if layer_type == "att": + # Self attention layer + if is_incremental: + y, new_k, new_v = mtf.layers.multihead_self_attention_incremental( + normalize(x), + prev_k=states.pop(0), + prev_v=states.pop(0), + step_num=step_num, + master_dtype=self.master_dtype, + slice_dtype=self.slice_dtype, + name="att") + new_states.append(new_k) + new_states.append(new_v) + x += y + else: + x += layer_prepostprocess_dropout( + mtf.layers.multihead_attention( + normalize(x), None, + self_attention_mask, self.kv_dim, self.heads_dim, + is_training, + dropout=hparams.attention_dropout, + dropout_broadcast_dims=[self.length_dim], + master_dtype=self.master_dtype, + slice_dtype=self.slice_dtype, + name="att")) + elif layer_type == "enc_att": + # Encoder-Decoder attention layer + if is_incremental: + # Encoder-Decoder attention layer + q_var, o_var, k, v = encdec_tensors[lnum] + x += mtf.layers.multihead_encdec_attention_incremental( + normalize(x), + q_var, o_var, k, v, + encdec_attention_mask, + name="enc_att") + else: + x += layer_prepostprocess_dropout( + mtf.layers.multihead_attention( + normalize(x), encoder_output, + encdec_attention_mask, self.kv_dim, self.heads_dim, + is_training, + dropout=hparams.attention_dropout, + dropout_broadcast_dims=[self.length_dim], + master_dtype=self.master_dtype, + slice_dtype=self.slice_dtype, + name="enc_att")) + elif layer_type == "local_att": + if is_incremental: + y, new_k, new_v = mtf.layers.masked_local_attention_1d_incremental( + normalize(x), + prev_k=states.pop(0), + prev_v=states.pop(0), + step_num=step_num, + master_dtype=self.master_dtype, + slice_dtype=self.slice_dtype, + name="local_att") + new_states.append(new_k) + new_states.append(new_v) + x += y + else: + x += layer_prepostprocess_dropout( + mtf.layers.masked_local_attention_1d( + normalize(x), + self.kv_dim, self.heads_dim, is_training, + window_size=hparams.local_attention_window_size, + master_dtype=self.master_dtype, + slice_dtype=self.slice_dtype, + length_per_split=mtf.tensor_dim_to_size_per_split( + hparams.layout, hparams.mesh_shape, + self.max_length_dim), + name="local_att")) + elif layer_type == "compressed_att": + if is_incremental: + raise ValueError("compressed_att incremental not implemented") + else: + x += layer_prepostprocess_dropout( + mtf.layers.multihead_self_attention_memory_compressed( + normalize(x), + mask_right=True, + compression_factor=hparams.compression_factor, + kv_channels=self.kv_dim, + heads=self.heads_dim, + is_training=is_training, + dropout=hparams.attention_dropout, + dropout_broadcast_dims=[self.length_dim], + master_dtype=self.master_dtype, + slice_dtype=self.slice_dtype, + name="compressed_att")) + else: + if is_incremental: + # insert length dimension. + x_shape = x.shape + shape_with_length = mtf.Shape( + x_shape.dims[:-1] + [mtf.Dimension("length", 1)] + + x_shape.dims[-1:]) + x = mtf.reshape(x, shape_with_length) + # ffn layer + x += layer_prepostprocess_dropout( + self._feedforward_layer(normalize(x), layer_type, losses=losses)) + if is_incremental: + # remove length dimension + x = mtf.reshape(x, x_shape) + + x = layer_prepostprocess_dropout(normalize(x)) + assert not layer_norm_vars + if is_incremental: + return x, new_states + else: + return x + + def sample(self, features, mesh): + with tf.variable_scope("transformer"): + return self._sample(features, mesh) + + def _sample(self, features, mesh): + hparams = self._hparams + (inputs_embedding_var, + targets_embedding_var, + softmax_var, + positional_embedding_var) = self._embedding_and_softmax_vars(mesh) + if hparams.transformer_type == "encdec": + inputs = features["inputs"] + while len(inputs.shape.as_list()) > 2: + inputs = tf.squeeze(inputs, axis=2) + actual_batch_size = tf.shape(inputs)[0] + actual_length = tf.shape(inputs)[1] + inputs = tf.pad( + inputs, [[0, hparams.batch_size - actual_batch_size], + [0, hparams.max_length - actual_length]]) + inputs = self._import_to_batch_by_length( + inputs, "inputs", mesh, hparams) + x = (mtf.gather(inputs_embedding_var, inputs, self.inputs_vocab_dim) + + mtf.reshape(positional_embedding_var, + mtf.Shape([self.length_dim, self.model_dim]))) + encoder_attention_mask = ( + mtf.layers.attention_mask_ignore_padding( + inputs, dtype=self.activation_dtype)) + with tf.variable_scope("encoder"): + x = self._layer_stack(x, + hparams.encoder_layers, + self_attention_mask=encoder_attention_mask) + encoder_output = mtf.rename_dimension( + x, self.length_dim.name, self.memory_length_dim.name) + encdec_tensors = [] + for layer_num, layer_type in enumerate(hparams.decoder_layers): + if layer_type == "enc_att": + with tf.variable_scope("decoder/enc_att_%d/enc_att" % layer_num): + q_var, k_var, v_var, o_var = mtf.layers.multihead_attention_vars( + mesh, self.heads_dim, self.model_dim, + self.kv_dim, self.master_dtype, self.slice_dtype, + self.activation_dtype) + k = mtf.einsum( + [encoder_output, k_var], + mtf.Shape( + self.batch_dims + [self.heads_dim, + self.memory_length_dim, self.kv_dim])) + v = mtf.einsum( + [encoder_output, v_var], + mtf.Shape( + self.batch_dims + [self.heads_dim, + self.memory_length_dim, self.kv_dim])) + encdec_tensors.append((q_var, o_var, k, v)) + else: + encdec_tensors.append(None) + partial_targets = None + elif hparams.transformer_type == "decoder": + encdec_tensors = None + encoder_output = None + encoder_attention_mask = None + # Prepare partial targets. + # In either features["inputs"] or features["targets"]. + # We force the outputs to begin with these sequences. + partial_targets = features.get("inputs", None) + if partial_targets is None: + partial_targets = features.get("targets", None) + if partial_targets is not None: + partial_targets = common_layers.expand_squeeze_to_nd(partial_targets, 2) + partial_targets = tf.to_int32(partial_targets) + partial_targets_batch = tf.shape(partial_targets)[0] + partial_targets_length = tf.shape(partial_targets)[1] + partial_targets = tf.pad( + partial_targets, [[0, hparams.batch_size - partial_targets_batch], + [0, hparams.max_length - partial_targets_length]]) + partial_targets = self._import_to_batch_by_length( + partial_targets, "partial_targets", mesh, hparams) + else: + raise ValueError( + "hparams.model_type = %s not yet supported" + % hparams.transformer_type) + + local_attention_window = mtf.Dimension( + "local_attention_window", hparams.local_attention_window_size) + if hparams.beam_size == 1: + ids_shape = mtf.Shape(self.batch_dims + [self.length_dim]) + kv_shape = mtf.Shape(self.batch_dims + + [self.heads_dim, + self.memory_length_dim, self.kv_dim]) + local_kv_shape = mtf.Shape(self.batch_dims + + [self.heads_dim, + local_attention_window, self.kv_dim]) + else: + beam_dim = mtf.Dimension("beam", hparams.beam_size) + ids_shape = mtf.Shape(self.batch_dims + [beam_dim, self.length_dim]) + kv_shape = mtf.Shape(self.batch_dims + + [beam_dim, self.heads_dim, + self.memory_length_dim, self.kv_dim]) + local_kv_shape = mtf.Shape(self.batch_dims + + [beam_dim, self.heads_dim, + local_attention_window, self.kv_dim]) + + initial_ids = mtf.constant(mesh, 0, ids_shape, dtype=tf.int32) + initial_states = [] + for layer in hparams.decoder_layers: + if layer == "att": + initial_states.extend( + [mtf.zeros(mesh, kv_shape, dtype=self.activation_dtype)] * 2) + elif layer == "local_att": + initial_states.extend( + [mtf.zeros(mesh, local_kv_shape, dtype=self.activation_dtype)] * 2) + + def logits_fn(step_num, ids, states): + """Produce logits for this step, and new states.""" + ids_this_step = mtf.gather(ids, step_num - 1, self.length_dim) + x = (mtf.gather(targets_embedding_var, ids_this_step, + self.targets_vocab_dim) + + mtf.gather(positional_embedding_var, step_num, self.max_length_dim)) + with tf.variable_scope("decoder"): + x, new_states = self._layer_stack( + x, + hparams.decoder_layers, + encdec_attention_mask=encoder_attention_mask, + step_num=step_num, + encdec_tensors=encdec_tensors, + states=states) + logits = mtf.matmul(x, softmax_var) + return logits, new_states + + if hparams.beam_size == 1: + temperature = (0.0 if hparams.sampling_method == "argmax" + else hparams.sampling_temp) + return mtf.beam_search.greedy_decode( + logits_fn, + initial_ids, + temperature=temperature, + initial_states=initial_states, + forced_ids=partial_targets, + use_tpu=hparams.use_tpu) + else: + if hparams.transformer_type == "encdec": + input_length = mtf.reduce_sum( + mtf.to_float(mtf.cast(inputs, tf.bool)), + reduced_dim=self.length_dim) + max_input_length = mtf.reduce_max(input_length) + decode_length = mtf.cast( + max_input_length * hparams.decode_length_multiplier + + hparams.decode_length_constant, tf.int32) + else: + decode_length = None + beams, unused_scores = mtf.beam_search.beam_search( + logits_fn, + initial_ids, + hparams.alpha, + states=initial_states, + decode_length=decode_length, + use_tpu=hparams.use_tpu, + dtype=self.activation_dtype) + return mtf.gather(beams, mtf.constant(mesh, 0, dtype=tf.int32), beam_dim) + + +@registry.register_hparams +def mtf_transformer_base(): + """Set of hyperparameters.""" + hparams = common_hparams.basic_params1() + hparams.no_data_parallelism = True + hparams.use_fixed_batch_size = True + hparams.add_hparam("mtf_mode", True) + hparams.batch_size = 64 + hparams.max_length = 256 + hparams.add_hparam("d_model", 512) + hparams.add_hparam("d_kv", 128) + hparams.add_hparam("local_attention_window_size", 128) + hparams.label_smoothing = 0.1 + # 8-way model-parallelism + hparams.add_hparam("mesh_shape", "model:8") + hparams.add_hparam("layout", "batch:batch;vocab:model;d_ff:model;heads:model") + hparams.add_hparam("num_heads", 8) + hparams.add_hparam("d_ff", 2048) + hparams.add_hparam("encoder_replicate_factor", 1) + hparams.add_hparam("decoder_replicate_factor", 1) + hparams.add_hparam("encoder_layers", ["att", "drd"] * 6) + hparams.add_hparam("decoder_layers", ["att", "enc_att", "drd"] * 6) + hparams.add_hparam("attention_dropout", 0.1) + hparams.add_hparam("relu_dropout", 0.1) + hparams.layer_prepostprocess_dropout = 0.1 + + # Describes what model architecture: + # "encdec": encoder + autoregressive decoder + # "decoder": single-stack autoregressive sequence model. + # "encoder": single-stack non-autoregressive model + # with equal-length inputs and outputs. + hparams.add_hparam("transformer_type", "encdec") + + # What does the decoder do: + # "autoregressive": Decoder left to right + # "denoising": Fills in masked-out values simultaneously + hparams.add_hparam("decoder_type", "autoregressive") + + # Parameters describing the noising algorithm for denoising decoders + hparams.add_hparam("noising_spec_train", {"type": "mask", "prob": 0.15}) + hparams.add_hparam("noising_spec_eval", {"type": "mask", "prob": 0.15}) + # during training, we use the eval noiser with this probability + hparams.add_hparam("noising_use_eval_during_train", 0.1) + + # round up vocab sizes to be a multiple of this value + hparams.vocab_divisor = 128 + + # options are dense_relu_dense, moe, hmoe + hparams.add_hparam("feedforward_layer", "drd") + + # If True, then reuse targets_embedding_var * rsqrt(d_model) as softmax_var + # If hparams.transformer_type == "encoder", then there is no targets embedding + # so we reuse the inputs embedding instead. + hparams.shared_embedding_and_softmax_weights = True + # Reuse targets_embedding_var as inputs_embedding_var + # relevant only if hparams.transformer_type == "encdec" + hparams.shared_embedding = True + hparams.optimizer = "Adafactor" + hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay" + hparams.learning_rate_warmup_steps = 10000 + hparams.add_hparam("master_dtype", "bfloat16") + hparams.add_hparam("slice_dtype", "float32") + hparams.activation_dtype = "bfloat16" + + # These parameters make Transformer model compatible with MtfTransformer + # Do not override these, as mtf_transformer does not support other options. + hparams.clip_grad_norm = 0. # i.e. no gradient clipping + hparams.bottom = { + "inputs": modalities.identity_bottom, + "targets": modalities.identity_bottom, + } + hparams.top = { + "targets": modalities.identity_top, + } + + # Parameters for computing the maximum decode length in beam search. + # Maximum decode length is: + # min(max_length, + # decode_length_multiplier * input_length + decode_length_constant) + hparams.add_hparam("decode_length_multiplier", 1.5) + hparams.add_hparam("decode_length_constant", 10.0) + + # If nonzero, we split the batch across two tensor-dimensions named + # "outer_batch" and "inner_batch", allowing for splitting across two mesh + # dimensions. This is necessary for hierarchical mixture of experts. + # The two tensor dimensions have sizes hparams.outer_batch_size and + # hparams.batch_size // hparams.outer_batch_size. + hparams.add_hparam("outer_batch_size", 0) + + # TODO(noam): file a bug + hparams.add_hparam("reshape_logits_hack", False) + hparams.add_hparam("compression_factor", 4) + + return hparams + + +@registry.register_hparams +def mtf_transformer_base_lm(): + hparams = mtf_transformer_base() + hparams.decoder_layers = hparams.encoder_layers + hparams.transformer_type = "decoder" + hparams.label_smoothing = 0.0 + hparams.sampling_method = "random" + return hparams + + +@registry.register_hparams +def mtf_transformer_tiny(): + """Catch bugs locally...""" + hparams = mtf_transformer_base() + hparams.d_model = 128 + hparams.d_ff = 512 + hparams.batch_size = 8 + hparams.encoder_layers = ["att", "drd"] * 2 + hparams.decoder_layers = ["att", "enc_att", "drd"] * 2 + hparams.num_heads = 8 + # data parallelism and model-parallelism + hparams.mesh_shape = "batch:2;model:4" + hparams.activation_dtype = "float32" + return hparams + + +@registry.register_hparams +def mtf_transformer_tiny_lm(): + hparams = mtf_transformer_tiny() + hparams.decoder_layers = hparams.encoder_layers + hparams.transformer_type = "decoder" + hparams.label_smoothing = 0.0 + hparams.sampling_method = "random" + return hparams + + +@registry.register_hparams +def mtf_transformer_tiny_denoising(): + hparams = mtf_transformer_tiny_lm() + hparams.decoder_type = "denoising" + hparams.noising_spec_train = ("random_zipfian", 0.3) + hparams.noising_use_eval_during_train = 0.5 + hparams.max_length = 1024 + return hparams + + +@registry.register_hparams +def mtf_transformer_single(): + hparams = mtf_transformer_tiny() + hparams.mesh_shape = "" + return hparams + + +@registry.register_hparams +def mtf_transformer_enc_single(): + hparams = mtf_transformer_single() + hparams.transformer_type = "encoder" + return hparams + + +@registry.register_hparams +def mtf_transformer_tiny_8gpu(): + hparams = mtf_transformer_tiny() + hparams.mesh_shape = "model:8" + return hparams + + +def mtf_transformer_paper_lm(size): + """Config for language-model experiments. + + Train these on languagemodel_lm1b32k_packed for 136000 steps (10 epochs) + + The size parameter is an integer that controls the number of heads and the + size of the size of the feedforward hidden layers. Increasing size by 1 + doubles each of these. + + Results: + size params/10^9 log-ppl(per-token) + -1 0.14 3.209 + 0 0.22 3.119 + 1 0.37 3.037 + 2 0.67 2.969 + 3 1.28 2.912 + 4 2.48 2.874 + 5 4.90 2.871 + + (to get word-level log-ppl, multiply by 1.1078) + + Args: + size: an integer + Returns: + a hparams object + """ + n = 2 ** size + hparams = mtf_transformer_base_lm() + hparams.batch_size = 256 + hparams.d_model = 1024 + hparams.d_ff = int(8192 * n) + hparams.d_kv = 256 + hparams.num_heads = int(8 * n) + hparams.shared_embedding_and_softmax_weights = False + # one epoch for languagemodel_lm1b32k_packed = 13600 steps + hparams.learning_rate_decay_steps = 13600 + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_lm_m1(): + hparams = mtf_transformer_paper_lm(-1) + hparams.mesh_shape = "batch:32" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_lm_0(): + hparams = mtf_transformer_paper_lm(0) + hparams.mesh_shape = "batch:32" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_lm_1(): + hparams = mtf_transformer_paper_lm(1) + hparams.mesh_shape = "model:4;batch:8" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_lm_2(): + hparams = mtf_transformer_paper_lm(2) + hparams.mesh_shape = "model:4;batch:8" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_lm_3(): + hparams = mtf_transformer_paper_lm(3) + hparams.mesh_shape = "model:8;batch:16" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_lm_4(): + hparams = mtf_transformer_paper_lm(4) + hparams.mesh_shape = "batch:16;model:32" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_lm_5(): + hparams = mtf_transformer_paper_lm(5) + hparams.mesh_shape = "batch:16;model:32" + return hparams + + +def mtf_transformer_paper_tr(size): + """Config for translation experiments. + + Train these on translate_enfr_wmt32k_packed for 154000 steps (3 epochs) + + The size parameter is an integer that controls the number of heads and the + size of the size of the feedforward hidden layers. Increasing size by 1 + doubles each of these. + + Args: + size: an integer + Returns: + a hparams object + """ + n = 2 ** size + hparams = mtf_transformer_base() + hparams.label_smoothing = 0.1 + hparams.batch_size = 128 + hparams.d_model = 1024 + hparams.d_ff = int(4096 * n) + hparams.num_heads = int(8 * n) + hparams.shared_embedding_and_softmax_weights = False + # one epoch for translate_enfr_wmt32k_packed = 51400 steps + hparams.learning_rate_decay_steps = 51400 + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_tr_m1(): + hparams = mtf_transformer_paper_tr(-1) + hparams.mesh_shape = "batch:32" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_tr_0(): + hparams = mtf_transformer_paper_tr(0) + hparams.mesh_shape = "batch:32" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_tr_0_a32(): + hparams = mtf_transformer_paper_tr_0() + hparams.activation_dtype = "float32" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_tr_0_nf(): + hparams = mtf_transformer_paper_tr_0() + hparams.optimizer_adafactor_factored = False + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_tr_1(): + hparams = mtf_transformer_paper_tr(1) + hparams.mesh_shape = "model:4;batch:8" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_tr_2(): + hparams = mtf_transformer_paper_tr(2) + hparams.mesh_shape = "model:4;batch:8" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_tr_3(): + hparams = mtf_transformer_paper_tr(3) + hparams.mesh_shape = "model:8;batch:16" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_tr_4(): + hparams = mtf_transformer_paper_tr(4) + hparams.mesh_shape = "model:8;batch:16" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_tr_0_mesh_8(): + hparams = mtf_transformer_paper_tr(0) + hparams.mesh_shape = "batch:8" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_tr_4_mesh_16_8(): + hparams = mtf_transformer_paper_tr(4) + hparams.mesh_shape = "batch:8;model:16" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_tr_6_mesh_64_8(): + # Note: This mesh shape does align well with physical [16, 16, 2] topology. + hparams = mtf_transformer_paper_tr(6) + hparams.mesh_shape = "model:64;batch:8" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_tr_0_mesh_8_v2(): + hparams = mtf_transformer_paper_tr(0) + hparams.batch_size = int(hparams.batch_size / 4) + hparams.mesh_shape = "batch:8" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_tr_0_mesh_128(): + hparams = mtf_transformer_paper_tr(0) + hparams.batch_size = int(hparams.batch_size * 4) + hparams.mesh_shape = "batch:128" + return hparams + + +@registry.register_hparams +def mtf_transformer_paper_tr_0_mesh_512(): + hparams = mtf_transformer_paper_tr(0) + hparams.batch_size = int(hparams.batch_size * 16) + hparams.mesh_shape = "batch:512" + return hparams + + +@registry.register_hparams +def mtf_transformer_lm_baseline(): + """Small language model to run on 1 TPU. + + Run this on 2x2 on languagemodel_lm1b32k_packed for 272000 steps (10 epochs) + Results: + params/10^9 log-ppl(per-token) + 0.14 3.202 + + Returns: + a hparams + """ + hparams = mtf_transformer_paper_lm(-1) + hparams.batch_size = 128 + hparams.learning_rate_decay_steps = 27200 # one epoch on lm1b + hparams.mesh_shape = "batch:8" + return hparams diff --git a/tensor2tensor/models/mtf_transformer2.py b/tensor2tensor/models/mtf_transformer2.py new file mode 100644 index 000000000..ed3ffa88d --- /dev/null +++ b/tensor2tensor/models/mtf_transformer2.py @@ -0,0 +1,868 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Transformer model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import mesh_tensorflow as mtf +from mesh_tensorflow.transformer import moe +from mesh_tensorflow.transformer import transformer +from mesh_tensorflow.transformer import transformer_layers +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import modalities +from tensor2tensor.utils import mtf_model +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +@registry.register_model +class MtfUnitransformer(mtf_model.MtfModel): + """Single-stack Transformer (Transformer Decoder) in mesh_tensorflow. + + Can optionally be autoregressive (language generation) or non-autoregressive + like BERT. + """ + + @property + def batch_dims(self): + hparams = self._hparams + if hparams.outer_batch_size == 0: + return [mtf.Dimension("batch", hparams.batch_size)] + else: + if hparams.batch_size % hparams.outer_batch_size != 0: + raise ValueError( + "hparams.outer_batch_size must divide hparams.batch_size") + return [ + mtf.Dimension("outer_batch", hparams.outer_batch_size), + mtf.Dimension("inner_batch", + hparams.batch_size // hparams.outer_batch_size)] + + def combine_batch_dims(self, x): + if len(self.batch_dims) <= 1: + return x + return mtf.replace_dimensions( + x, self.batch_dims, mtf.combined_dimension(self.batch_dims)) + + @property + def autoregressive(self): + return self._hparams.autoregressive + + @property + def variable_dtype(self): + return mtf.VariableDType( + tf.as_dtype(self._hparams.master_dtype), + tf.as_dtype(self._hparams.slice_dtype), + tf.as_dtype(self._hparams.activation_dtype)) + + @property + def length_dim(self): + return mtf.Dimension( + "length", self._hparams.length or self._hparams.max_length) + + def _import_to_batch_by_length(self, x, name, mesh): + mtf_shape = mtf.Shape(self.batch_dims + [self.length_dim]) + x = tf.reshape(x, mtf_shape.to_integer_list) + return mtf.import_fully_replicated(mesh, x, mtf_shape, name=name) + + def _import_feature(self, features, mesh, key): + """Import a feature from the features dictionary into a mtf.Tensor. + + Args: + features: a features dictionary + mesh: a Mesh + key: a string + + Returns: + a mtf.Tensor with dtype int32 and shape self.batch_dims + self.length_dim + """ + if key not in features: + return None + x = tf.to_int32(features[key]) + x = common_layers.expand_squeeze_to_nd(x, 2) + batch_size = mtf.Shape(self.batch_dims).size + x = x[:, :self.length_dim.size] + extra_length = self.length_dim.size - tf.shape(x)[1] + extra_batch = batch_size - tf.shape(x)[0] + x = tf.pad(x, [[0, extra_batch], [0, extra_length]]) + mtf_shape = mtf.Shape(self.batch_dims + [self.length_dim]) + x = tf.reshape(x, mtf_shape.to_integer_list) + return mtf.import_fully_replicated(mesh, x, mtf_shape, name=key) + + def model(self): + hparams = self._hparams + if hparams.label_smoothing != 0: + raise NotImplementedError( + "Label smoothing not implemented in unitransformer." + " Do you really want it?") + layer_stack = layer_stack_from_hparams(hparams, "") + if self.autoregressive: + input_vocab_size = self._targets_vocab_size + else: + input_vocab_size = self._inputs_vocab_size + return transformer.Unitransformer( + layer_stack=layer_stack, + d_model=hparams.d_model, + input_vocab_size=input_vocab_size, + output_vocab_size=self._targets_vocab_size, + autoregressive=self.autoregressive, + max_length=hparams.max_length, + shared_embedding_and_softmax_weights=( + hparams.shared_embedding_and_softmax_weights), + z_loss=hparams.z_loss, + layout=hparams.layout, + mesh_shape=hparams.mesh_shape) + + def _mtf_model_fn(self, features, mesh): + self._original_features = features + hparams = self._hparams + def import_feature(key): + return self._import_feature(features, mesh, key) + targets = import_feature("targets") + sequence_id = import_feature("targets_segmentation") + if hparams.use_global_position_in_packed_sequence: + position = None + else: + position = import_feature("targets_position") + if self.autoregressive: + inputs = mtf.shift( + targets, offset=1, dim=self.length_dim, wrap=False) + # We should have a 0 at the beginning of each sequence rather than the + # shifted EOS (1) from the previous sequence. + inputs -= mtf.to_int32(mtf.equal(inputs, 1)) + else: + inputs = import_feature("inputs") + # TODO(noam): options for bert-style masking here? + model = self.model() + logits, loss = model.call_simple( + inputs=inputs, + targets=targets, + compute_loss=True, + mode=hparams.mode, + variable_dtype=self.variable_dtype, + sequence_id=sequence_id, + position=position) + return logits, loss + + def mtf_model_fn(self, features, mesh): + logits, loss = self._mtf_model_fn(features, mesh) + # combine batch dims + logits = self.combine_batch_dims(logits) + return logits, loss + + @property + def _targets_vocab_size(self): + targets_vocab_size = self._problem_hparams.vocab_size["targets"] + targets_vocab_size += (-targets_vocab_size) % self._hparams.vocab_divisor + return targets_vocab_size + + @property + def _inputs_vocab_size(self): + inputs_vocab_size = self._problem_hparams.vocab_size["inputs"] + inputs_vocab_size += (-inputs_vocab_size) % self._hparams.vocab_divisor + return inputs_vocab_size + + def sample(self, features, mesh): + hparams = self._hparams + model = self.model() + def import_feature(key): + return self._import_feature(features, mesh, key) + + if self.autoregressive: + # Prepare partial targets. + # In either features["inputs"] or features["targets"]. + # We force the outputs to begin with these sequences. + partial_targets = import_feature("inputs") + if partial_targets is None: + partial_targets = import_feature("targets") + if partial_targets: + partial_targets *= mtf.cast( + mtf.not_equal(partial_targets, 1), partial_targets.dtype) + else: + ids_shape = mtf.Shape(self.batch_dims + [self.length_dim]) + partial_targets = mtf.constant(mesh, 0, ids_shape, dtype=tf.int32) + if hparams.beam_size > 1: + raise NotImplementedError( + "Beam search not implemented for unitransformer.") + ret = model.sample_autoregressive( + partial_targets, + temperature=hparams.sampling_temp, + variable_dtype=self.variable_dtype) + return self.combine_batch_dims(ret) + else: + raise ValueError( + "Don't know how to sample from non-autoregressive unitransformer") + + +@registry.register_model +class MtfBitransformer(MtfUnitransformer): + """Encoder-Decoder Transformer in mesh_tensorflow.""" + + def model(self): + hparams = self._hparams + encoder_layer_stack = layer_stack_from_hparams(hparams, "encoder_") + decoder_layer_stack = layer_stack_from_hparams(hparams, "decoder_") + encoder = transformer.Unitransformer( + layer_stack=encoder_layer_stack, + d_model=hparams.d_model, + input_vocab_size=self._inputs_vocab_size, + output_vocab_size=None, + autoregressive=False, + max_length=hparams.max_length, + name="encoder", + layout=hparams.layout, + mesh_shape=hparams.mesh_shape, + ) + decoder = transformer.Unitransformer( + layer_stack=decoder_layer_stack, + d_model=hparams.d_model, + input_vocab_size=self._targets_vocab_size, + output_vocab_size=self._targets_vocab_size, + autoregressive=True, + max_length=hparams.max_length, + label_smoothing=hparams.label_smoothing, + shared_embedding_and_softmax_weights=( + hparams.shared_embedding_and_softmax_weights), + z_loss=hparams.z_loss, + name="decoder", + layout=hparams.layout, + mesh_shape=hparams.mesh_shape, + ) + return transformer.Bitransformer( + encoder, decoder, shared_embedding=hparams.shared_embedding) + + def _mtf_model_fn(self, features, mesh): + self._original_features = features + hparams = self._hparams + def import_feature(key): + return self._import_feature(features, mesh, key) + targets = import_feature("targets") + inputs = import_feature("inputs") + if not inputs: + raise ValueError("inputs feature is missing") + encoder_sequence_id = import_feature("inputs_segmentation") + if not encoder_sequence_id: + encoder_sequence_id = mtf.to_int32(mtf.not_equal(inputs, 0)) + decoder_sequence_id = import_feature("targets_segmentation") + if decoder_sequence_id is None: + decoder_sequence_id = mtf.to_int32(mtf.not_equal(targets, 0)) + if hparams.use_global_position_in_packed_sequence: + encoder_position = None + decoder_position = None + else: + encoder_position = import_feature("inputs_position") + decoder_position = import_feature("targets_position") + model = self.model() + logits, loss = model.call_simple( + inputs=inputs, + targets=targets, + compute_loss=True, + mode=hparams.mode, + variable_dtype=self.variable_dtype, + encoder_sequence_id=encoder_sequence_id, + decoder_sequence_id=decoder_sequence_id, + encoder_position=encoder_position, + decoder_position=decoder_position) + return logits, loss + + def sample(self, features, mesh): + hparams = self._hparams + model = self.model() + inputs = self._import_feature(features, mesh, "inputs") + ret = model.decode( + inputs, + self.variable_dtype, + beam_size=hparams.beam_size, + alpha=hparams.alpha, + temperature=hparams.sampling_temp if hparams.beam_size == 1 else 0, + decode_length_multiplier=hparams.decode_length_multiplier, + decode_length_constant=hparams.decode_length_constant) + return self.combine_batch_dims(ret) + + +layers_registry = registry.Registries.mtf_layers + + +# The following functions construct layers based on hyperparmeters +def attention_kwargs_from_hparams(hparams): + return { + "dropout_rate": hparams.attention_dropout, + "extra_logit": 0.0 if hparams.extra_logit else None, + } + + +@layers_registry.register("self_att") +def self_attention_layer(hparams, prefix): + """Create self-attention layer based on hyperparameters.""" + return transformer_layers.SelfAttention( + num_heads=hparams.get(prefix + "num_heads"), + num_memory_heads=hparams.get(prefix + "num_memory_heads"), + key_value_size=hparams.d_kv, + shared_kv=hparams.get(prefix + "shared_kv", False), + attention_kwargs=attention_kwargs_from_hparams(hparams)) + + +@layers_registry.register("local_self_att") +def local_self_attention_layer(hparams, prefix): + """Create self-attention layer based on hyperparameters.""" + return transformer_layers.LocalSelfAttention( + num_heads=hparams.get(prefix + "num_heads"), + num_memory_heads=hparams.get(prefix + "num_memory_heads"), + radius=hparams.local_attention_radius, + key_value_size=hparams.d_kv, + shared_kv=hparams.get(prefix + "shared_kv", False), + attention_kwargs=attention_kwargs_from_hparams(hparams)) + + +@layers_registry.register("enc_att") +def enc_dec_attention_layer(hparams, prefix): + return transformer_layers.EncDecAttention( + num_heads=hparams.get(prefix + "num_heads"), + num_memory_heads=hparams.get(prefix + "num_memory_heads"), + key_value_size=hparams.d_kv, + shared_kv=hparams.get(prefix + "shared_kv", False), + attention_kwargs=attention_kwargs_from_hparams(hparams)) + + +@layers_registry.register("drd") +def dense_relu_dense_layer(hparams, prefix): + del prefix + return transformer_layers.DenseReluDense( + hidden_size=hparams.d_ff, + dropout_rate=hparams.relu_dropout) + + +@layers_registry.register("moe_1d") +def moe_1d_layer(hparams, prefix): + del prefix + return moe.MoE1D(num_experts=hparams.moe_num_experts, + hidden_size=hparams.moe_hidden_size) + + +@layers_registry.register("moe_2d") +def moe_2d_layer(hparams, prefix): + del prefix + return moe.MoE2D(expert_x=hparams.moe_expert_x, + expert_y=hparams.moe_expert_y, + hidden_size=hparams.moe_hidden_size) + + +def layer_stack_from_hparams(hparams, prefix): + """Create a layer stack based on the hyperparameter values.""" + layers = hparams.get(prefix + "layers") + return transformer.LayerStack( + [layers_registry[l](hparams, prefix) for l in layers], + dropout_rate=hparams.layer_prepostprocess_dropout, + norm_epsilon=hparams.norm_epsilon) + + +def mtf_transformer2_base(): + """Hyperparameters common to both unitransformer and bitransformer.""" + hparams = common_hparams.basic_params1() + + hparams.add_hparam("d_model", 1024) + hparams.batch_size = 4 + hparams.max_length = 1024 + hparams.label_smoothing = 0.0 + # a small positive value - this seems important for stability when training + # with bfloat16 activations. + hparams.add_hparam("z_loss", 1e-4) + + # hparams applying to both encoder and decoder layer stacks. + hparams.add_hparam("d_ff", 2048) + hparams.add_hparam("d_kv", 128) + hparams.add_hparam("attention_dropout", 0.0) + hparams.add_hparam("relu_dropout", 0.0) + hparams.del_hparam("num_heads") + hparams.del_hparam("num_hidden_layers") + hparams.layer_prepostprocess_dropout = 0.0 + hparams.add_hparam("extra_logit", False) + # number of experts for moe_1d + hparams.moe_num_experts = 32 + # number of experts for moe_2d = moe_expert_x * moe_expert_y + hparams.add_hparam("moe_expert_x", 8) + hparams.add_hparam("moe_expert_y", 4) + hparams.add_hparam("moe_hidden_size", 32768) + + # round up vocab sizes to be a multiple of this value + hparams.vocab_divisor = 128 + + hparams.optimizer = "Adafactor" + hparams.learning_rate_schedule = "rsqrt_decay*linear_decay" + hparams.learning_rate_warmup_steps = 10000 + hparams.add_hparam("master_dtype", "bfloat16") + hparams.add_hparam("slice_dtype", "float32") + hparams.activation_dtype = "bfloat16" + + # 8-way model-parallelism + hparams.add_hparam("mesh_shape", "model:8") + hparams.add_hparam("layout", "batch:batch;vocab:model;d_ff:model;heads:model") + + # If nonzero, we split the batch across two tensor-dimensions named + # "outer_batch" and "inner_batch", allowing for splitting across two mesh + # dimensions. This is necessary for hierarchical mixture of experts. + # The two tensor dimensions have sizes hparams.outer_batch_size and + # hparams.batch_size // hparams.outer_batch_size. + hparams.add_hparam("outer_batch_size", 0) + + hparams.shared_embedding_and_softmax_weights = False + # length for training or decoding - defaults to max_length + hparams.add_hparam("length", 0) + + # These parameters make Transformer model compatible with mtf + # Do not override these. + hparams.no_data_parallelism = True + hparams.use_fixed_batch_size = True + hparams.add_hparam("mtf_mode", True) + hparams.clip_grad_norm = 0. # i.e. no gradient clipping + hparams.bottom = { + "inputs": modalities.identity_bottom, + "targets": modalities.identity_bottom, + } + hparams.top = { + "targets": modalities.identity_top, + } + hparams.add_hparam("beam_size", 1) + + # If this is True, then in a packed dataset (where exaples are concatenated + # to form longer examples) we use the global position (within the concatenated + # sequence) to compute the positional embedding, instead of the position + # within the individual sequence. This is counterintuitive, but for some + # reason, it keeps the model from diverging. + hparams.add_hparam("use_global_position_in_packed_sequence", True) + + return hparams + + +@registry.register_hparams +def mtf_unitransformer_base(): + """Hyperparameters for single-stack Transformer.""" + hparams = mtf_transformer2_base() + hparams.add_hparam("autoregressive", True) + # HYPERPARAMETERS FOR THE SINGLE LAYER STACK + hparams.add_hparam("layers", ["self_att", "drd"] * 6) + # number of heads in multihead attention + hparams.add_hparam("num_heads", 8) + # default of 0 for standard transformer behavior + # 1 means a single set of keys and values that are read by all query heads + hparams.add_hparam("num_memory_heads", 0) + # share attention keys and values + hparams.add_hparam("shared_kv", False) + # if nonzero then use local attention + hparams.add_hparam("local_attention_radius", 128) + return hparams + + +@registry.register_hparams +def mtf_bitransformer_base(): + """Machine translation base configuration.""" + hparams = mtf_transformer2_base() + hparams.max_length = 256 + hparams.shared_embedding = True + # HYPERPARAMETERS FOR THE LAYER STACKS + hparams.add_hparam("encoder_layers", ["self_att", "drd"] * 6) + hparams.add_hparam("decoder_layers", ["self_att", "enc_att", "drd"] * 6) + hparams.add_hparam("encoder_num_layers", 6) + hparams.add_hparam("decoder_num_layers", 6) + # number of heads in multihead attention + hparams.add_hparam("encoder_num_heads", 8) + hparams.add_hparam("decoder_num_heads", 8) + hparams.add_hparam("local_attention_radius", 128) + + # default of 0 for standard transformer behavior + # 1 means a single set of keys and values that are read by all query heads + hparams.add_hparam("encoder_num_memory_heads", 0) + hparams.add_hparam("decoder_num_memory_heads", 0) + # share attention keys and values + hparams.add_hparam("encoder_shared_kv", False) + hparams.add_hparam("decoder_shared_kv", False) + + # Parameters for computing the maximum decode length in beam search. + # Maximum decode length is: + # min(max_length, + # decode_length_multiplier * input_length + decode_length_constant) + hparams.add_hparam("decode_length_multiplier", 1.5) + hparams.add_hparam("decode_length_constant", 10.0) + # used during decoding + hparams.add_hparam("alpha", 0.6) + hparams.sampling_temp = 0.0 + return hparams + + +@registry.register_hparams +def mtf_unitransformer_tiny(): + hparams = mtf_unitransformer_base() + hparams.batch_size = 2 + hparams.mesh_shape = "" + hparams.d_model = 128 + hparams.layers = ["self_att", "drd"] * 2 + hparams.num_heads = 4 + hparams.d_ff = 512 + return hparams + + +@registry.register_hparams +def mtf_bitransformer_tiny(): + """Small encoder-decoder model for testing.""" + hparams = mtf_bitransformer_base() + hparams.batch_size = 2 + hparams.mesh_shape = "" + hparams.d_model = 128 + hparams.encoder_layers = ["self_att", "drd"] * 2 + hparams.decoder_layers = ["self_att", "enc_att", "drd"] * 2 + hparams.num_heads = 4 + hparams.d_ff = 512 + return hparams + + +@registry.register_hparams +def mtf_unitransformer_all_layers_tiny(): + """Test out all the layers on local CPU.""" + hparams = mtf_unitransformer_tiny() + hparams.moe_num_experts = 4 + hparams.moe_expert_x = 4 + hparams.moe_expert_y = 4 + hparams.moe_hidden_size = 512 + hparams.layers = ["self_att", "local_self_att", "moe_1d", "moe_2d", "drd"] + return hparams + + +@registry.register_hparams +def mtf_bitransformer_all_layers_tiny(): + """Test out all the layers on local CPU.""" + hparams = mtf_bitransformer_tiny() + hparams.moe_num_experts = 4 + hparams.moe_expert_x = 4 + hparams.moe_expert_y = 4 + hparams.moe_hidden_size = 512 + hparams.encoder_layers = [ + "self_att", "local_self_att", "moe_1d", "moe_2d", "drd"] + hparams.decoder_layers = [ + "self_att", "local_self_att", "enc_att", "moe_1d", "moe_2d", "drd"] + return hparams + + +@registry.register_hparams +def mtr_lm_dense(sz): + """Series of architectures for language modeling. + + We assume infinite training data, so no dropout necessary. + + You can use languagemodel_wiki_noref_v32k_l1k. + (1 epoch = ~46000 steps). + TODO(noam): find a large enough dataset for these experiments. + + Args: + sz: an integer + + Returns: + a hparams + """ + n = 2 ** sz + hparams = mtf_unitransformer_base() + hparams.d_model = 1024 + hparams.max_length = 1024 + hparams.batch_size = 128 + # Parameters for my_layer_stack() + hparams.num_hidden_layers = 6 + hparams.d_ff = 8192 * n + hparams.d_kv = 256 + hparams.num_heads = 8 * n + hparams.learning_rate_decay_steps = 65536 + hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model" + hparams.mesh_shape = "batch:32" + return hparams + + +@registry.register_hparams +def mtr_lm_dense_0(): + return mtr_lm_dense(0) + + +@registry.register_hparams +def mtr_lm_dense_0_h1_16(): + hparams = mtr_lm_dense_0() + hparams.decoder_num_heads = 16 + hparams.decoder_num_memory_heads = 1 + return hparams + + +@registry.register_hparams +def mtr_lm_dense_1(): + return mtr_lm_dense(1) + + +@registry.register_hparams +def mtr_lm_dense_2(): + hparams = mtr_lm_dense(2) + hparams.mesh_shape = "model:4;batch:8" + return hparams + + +@registry.register_hparams +def mtr_lm_dense_3(): + hparams = mtr_lm_dense(3) + hparams.mesh_shape = "model:4;batch:8" + return hparams + + +@registry.register_hparams +def mtr_lm_v1(): + """Model incorporating mixture-of-experts, local and global attention. + + ~6B parameters + + 32 experts in 3 hierarchichal moe layers. + + Returns: + a hparams + """ + hparams = mtr_lm_dense(0) + hparams.layers = (["local_self_att", "local_self_att", "drd", + "self_att", "drd", "local_self_att", + "local_self_att", "moe_2d"] * 4)[:-1] + hparams.d_kv = 128 + hparams.moe_expert_x = 8 + hparams.moe_expert_y = 4 + hparams.moe_hidden_size = 32768 + hparams.d_ff = 2048 + hparams.num_memory_heads = 0 + hparams.mesh_shape = "b0:4;b1:8" + hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0" + hparams.outer_batch_size = 4 + return hparams + + +@registry.register_hparams +def mtr_lm_v1_h1_8(): + """Version for fast decoding.""" + hparams = mtr_lm_v1() + hparams.num_memory_heads = 1 + return hparams + + +def mtr_tr_dense(sz): + """Series of machine translation models. + + All models are trained on sequences of 256 tokens. + + You can use the dataset translate_enfr_wmt32k_packed. + 154000 steps = 3 epochs. + + Args: + sz: an integer + + Returns: + a hparams + """ + n = 2 ** sz + hparams = mtf_bitransformer_base() + hparams.d_model = 1024 + hparams.max_length = 256 + hparams.batch_size = 128 + hparams.d_ff = int(4096 * n) + hparams.d_kv = 128 + hparams.encoder_num_heads = int(8 * n) + hparams.decoder_num_heads = int(8 * n) + # one epoch for translate_enfr_wmt32k_packed = 51400 steps + hparams.learning_rate_decay_steps = 51400 + hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model" + hparams.mesh_shape = "batch:32" + hparams.label_smoothing = 0.1 + hparams.layer_prepostprocess_dropout = 0.1 + hparams.attention_dropout = 0.1 + hparams.relu_dropout = 0.1 + return hparams + + +@registry.register_hparams +def mtr_tr_dense_0(): + return mtr_tr_dense(0) + + +@registry.register_hparams +def mtr_tr_dense_1(): + return mtr_tr_dense(1) + + +@registry.register_hparams +def mtr_tr_dense_2(): + hparams = mtr_tr_dense(2) + hparams.mesh_shape = "model:4;batch:8" + return hparams + + +@registry.register_hparams +def mtr_tr_dense_3(): + hparams = mtr_tr_dense(3) + hparams.mesh_shape = "model:4;batch:8" + return hparams + + +@registry.register_hparams +def mtr_tr_dense_3_88(): + hparams = mtr_tr_dense(3) + hparams.mesh_shape = "model:8;batch:16" + return hparams + + +@registry.register_hparams +def mtr_tr_dense_3_fast(): + hparams = mtr_tr_dense_3() + hparams.local_attention_radius = 32 + hparams.decoder_num_heads = 128 + hparams.decoder_num_memory_heads = 8 + return hparams + + +def mtr_tr_dense_local(sz): + """With local self-attention in the decoder.""" + hparams = mtr_tr_dense(sz) + hparams.decoder_layers = ["local_self_att", "enc_att", "drd"] * 6 + hparams.local_attention_radius = 32 + return hparams + + +@registry.register_hparams +def mtr_tr_dense_local_0(): + return mtr_tr_dense_local(0) + + +@registry.register_hparams +def mtr_tr_dense_local_0_w8(): + hparams = mtr_tr_dense_local_0() + hparams.local_attention_radius = 8 + return hparams + + +@registry.register_hparams +def mtr_tr_dense_local_0_h1_16(): + hparams = mtr_tr_dense_local_0() + hparams.decoder_num_heads = 16 + hparams.decoder_num_memory_heads = 1 + return hparams + + +@registry.register_hparams +def mtr_tr_dense_local_0_h1_16_shared(): + hparams = mtr_tr_dense_local_0_h1_16() + hparams.shared_embedding_and_softmax_weights = True + return hparams + + +@registry.register_hparams +def mtr_tr_dense_local_0_h1_8_kv256(): + hparams = mtr_tr_dense_local_0() + hparams.decoder_num_heads = 8 + hparams.decoder_num_memory_heads = 1 + hparams.d_kv = 256 + return hparams + + +@registry.register_hparams +def mtr_tr_dense_local_0_h1_16_shared_kv(): + hparams = mtr_tr_dense_local_0_h1_16() + hparams.decoder_shared_kv = True + return hparams + + +@registry.register_hparams +def mtr_tr_dense_0_h4(): + hparams = mtr_tr_dense_0() + hparams.decoder_num_heads = 4 + return hparams + + +@registry.register_hparams +def mtr_tr_dense_0_h16(): + hparams = mtr_tr_dense_0() + hparams.decoder_num_heads = 16 + return hparams + + +@registry.register_hparams +def mtr_tr_dense_0_extra_logit(): + hparams = mtr_tr_dense_0() + hparams.extra_logit = True + return hparams + + +@registry.register_hparams +def mtr_tr_dense_0_h1_8(): + hparams = mtr_tr_dense_0() + hparams.decoder_num_memory_heads = 1 + return hparams + + +@registry.register_hparams +def mtr_tr_dense_0_h1_1(): + hparams = mtr_tr_dense_0() + hparams.decoder_num_heads = 1 + return hparams + + +@registry.register_hparams +def mtr_tr_dense_0_h1_16(): + hparams = mtr_tr_dense_0() + hparams.decoder_num_heads = 16 + hparams.decoder_num_memory_heads = 1 + return hparams + + +@registry.register_hparams +def mtr_tr_dense_0_h2_16(): + hparams = mtr_tr_dense_0() + hparams.decoder_num_heads = 16 + hparams.decoder_num_memory_heads = 2 + return hparams + + +@registry.register_hparams +def mtr_tr_dense_0_shared_kv(): + hparams = mtr_tr_dense_0() + hparams.decoder_shared_kv = True + return hparams + + +@registry.register_hparams +def mtr_tr_enfr_v0(): + # good parameters for wmt-en-fr + hparams = mtr_tr_dense_local_0_h1_16() + return hparams + + +@registry.register_hparams +def mtr_tr_ende_v0(): + # good parameters for wmt-en-de + hparams = mtr_tr_dense_local_0_h1_16() + hparams.learning_rate_decay_steps = 20000 + hparams.shared_embedding_and_softmax_weights = True + hparams.layer_prepostprocess_dropout = 0.2 + return hparams + + +@registry.register_hparams +def mtr_tr_ende_deep(): + hparams = mtr_tr_ende_v0() + hparams.decoder_num_heads = 8 + hparams.encoder_num_heads = 4 + hparams.d_ff = 2048 + hparams.encoder_num_layers = 12 + hparams.decoder_num_layers = 12 + return hparams diff --git a/tensor2tensor/models/mtf_transformer_test.py b/tensor2tensor/models/mtf_transformer_test.py new file mode 100644 index 000000000..f411e078b --- /dev/null +++ b/tensor2tensor/models/mtf_transformer_test.py @@ -0,0 +1,176 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for Transformer on Mesh TensorFlow.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import mesh_tensorflow as mtf +import numpy as np + +from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.models import mtf_transformer + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +# Constants shared between all functions. +BATCH_SIZE = 2 +INPUT_LENGTH = 6 +TARGET_LENGTH = 6 +VOCAB_SIZE = 128 + + +def get_model(hparams=None, mode=tf_estimator.ModeKeys.TRAIN, + has_input=True, model_cls=mtf_transformer.MtfTransformer): + if hparams is None: + hparams = mtf_transformer.mtf_transformer_single() + hparams.max_length = INPUT_LENGTH + hparams.batch_size = BATCH_SIZE + + p_hparams = problem_hparams.test_problem_hparams(VOCAB_SIZE, + VOCAB_SIZE, + hparams) + if not has_input: + del p_hparams.modality["inputs"] + hparams.problem_hparams = p_hparams + + inputs = np.random.randint( + VOCAB_SIZE, size=(BATCH_SIZE, INPUT_LENGTH, 1, 1)) + targets = np.random.randint( + VOCAB_SIZE, size=(BATCH_SIZE, TARGET_LENGTH, 1, 1)) + features = { + "targets": tf.constant(targets, dtype=tf.int32, name="targets"), + "target_space_id": tf.constant(1, dtype=tf.int32) + } + if has_input: + features["inputs"] = tf.constant(inputs, dtype=tf.int32, name="inputs") + + return model_cls(hparams, mode, p_hparams), features, hparams + + +def get_placement_mesh(hparams): + graph = mtf.Graph() + mesh = mtf.Mesh(graph, "my_mesh") + mesh_shape = mtf.convert_to_shape(hparams.mesh_shape) + + mesh_devices = [""] * mesh_shape.size + mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl( + mesh_shape, hparams.layout, mesh_devices) + return mesh, mesh_impl + + +class MtfTransformerTest(tf.test.TestCase): + + def testMtfTransformer(self): + hparams = mtf_transformer.mtf_transformer_single() + + model, features, hparams = get_model(hparams) + hparams.mesh_shape = "" + hparams.layout = "" + mesh, mesh_impl = get_placement_mesh(hparams) + + logits, _ = model.mtf_model_fn(features, mesh) + lowering = mtf.Lowering(mesh.graph, {mesh: mesh_impl}) + tf_group = lowering.copy_masters_to_slices() + tf_logits = lowering.export_to_tf_tensor(logits) + + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + session.run(tf_group) + res = session.run(tf_logits) + self.assertEqual(res.shape, (BATCH_SIZE, TARGET_LENGTH, VOCAB_SIZE)) + + def testMtfTransformerDataParallel(self): + hparams = mtf_transformer.mtf_transformer_single() + + model, features, hparams = get_model(hparams) + hparams.mesh_shape = "all:2" + hparams.layout = "batch:all" + mesh, mesh_impl = get_placement_mesh(hparams) + + logits, _ = model.mtf_model_fn(features, mesh) + lowering = mtf.Lowering(mesh.graph, {mesh: mesh_impl}) + tf_group = lowering.copy_masters_to_slices() + tf_logits = lowering.export_to_tf_tensor(logits) + + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + session.run(tf_group) + res = session.run(tf_logits) + self.assertEqual(res.shape, (BATCH_SIZE, TARGET_LENGTH, VOCAB_SIZE)) + + def testMtfTransformerModelParallel(self): + hparams = mtf_transformer.mtf_transformer_single() + + model, features, hparams = get_model(hparams) + hparams.mesh_shape = "all:2" + hparams.layout = "length:all" + mesh, mesh_impl = get_placement_mesh(hparams) + + logits, _ = model.mtf_model_fn(features, mesh) + lowering = mtf.Lowering(mesh.graph, {mesh: mesh_impl}) + tf_group = lowering.copy_masters_to_slices() + tf_logits = lowering.export_to_tf_tensor(logits) + + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + session.run(tf_group) + res = session.run(tf_logits) + self.assertEqual(res.shape, (BATCH_SIZE, TARGET_LENGTH, VOCAB_SIZE)) + + def testMtfTransformerDataModelParallel(self): + hparams = mtf_transformer.mtf_transformer_single() + + model, features, hparams = get_model(hparams) + hparams.mesh_shape = "batch:2;model:2" + hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model" + mesh, mesh_impl = get_placement_mesh(hparams) + + logits, _ = model.mtf_model_fn(features, mesh) + lowering = mtf.Lowering(mesh.graph, {mesh: mesh_impl}) + tf_group = lowering.copy_masters_to_slices() + tf_logits = lowering.export_to_tf_tensor(logits) + + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + session.run(tf_group) + res = session.run(tf_logits) + self.assertEqual(res.shape, (BATCH_SIZE, TARGET_LENGTH, VOCAB_SIZE)) + + def testMtfTransformerEncoderDataModelParallel(self): + hparams = mtf_transformer.mtf_transformer_enc_single() + + model, features, hparams = get_model(hparams) + hparams.mesh_shape = "batch:2;model:2" + hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model" + mesh, mesh_impl = get_placement_mesh(hparams) + + logits, _ = model.mtf_model_fn(features, mesh) + lowering = mtf.Lowering(mesh.graph, {mesh: mesh_impl}) + tf_group = lowering.copy_masters_to_slices() + tf_logits = lowering.export_to_tf_tensor(logits) + + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + session.run(tf_group) + res = session.run(tf_logits) + self.assertEqual(res.shape, (BATCH_SIZE, TARGET_LENGTH, VOCAB_SIZE)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/multimodel.py b/tensor2tensor/models/multimodel.py deleted file mode 100644 index bcbf16995..000000000 --- a/tensor2tensor/models/multimodel.py +++ /dev/null @@ -1,159 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""MultiModel.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -from tensor2tensor.models import common_layers -from tensor2tensor.models import slicenet -from tensor2tensor.utils import expert_utils as eu -from tensor2tensor.utils import registry -from tensor2tensor.utils import t2t_model - -import tensorflow as tf - - -def experts(xs, moe_n1, moe_n2, hidden_size, filter_size, dp, ps, train): - """Mixture-of-Experts layer.""" - # Set up the hyperparameters for the gating networks. - primary_gating_hp = eu.NoisyTopKGatingParams() - primary_gating_hp.num_experts = moe_n1 - if moe_n2: - # Hierarchical MoE containing moe_n1 groups of moe_n2 experts. - assert moe_n2 > 1 - secondary_gating_hp = eu.NoisyTopKGatingParams() - secondary_gating_hp.num_experts = moe_n2 - else: - # Flat mixture of moe_n1 experts. - secondary_gating_hp = None - # Set up the hyperparameters for the expert networks. - # Each expert contains a hidden RELU layer of size filter_size - expert_hp = eu.FeedForwardExpertParams() - expert_hp.hidden_layer_sizes = [filter_size] - # Create the mixture of experts. - moe = eu.DistributedMixtureOfExperts(primary_gating_hp, secondary_gating_hp, - expert_hp, hidden_size, hidden_size, ps, - "moe") - # MoE expects input tensors to be 2d. Flatten out spatial dimensions. - xs_2d = dp(tf.reshape, xs, [[-1, hidden_size]] * dp.n) - # Call the MoE - moe_out_2d, importance, load, _, _ = moe.Eval( - dp.devices, xs_2d, train, summaries=False, identifiers=None) - # Reshape the output to the original shape. - moe_out = dp(tf.reshape, moe_out_2d, dp(tf.shape, xs)) - # These losses encourage equal load on the different experts. - loss = eu.CVSquared(importance) + eu.CVSquared(load) - - # Apply residual and normalize. - def add_and_normalize(x, y): - return common_layers.layer_norm(x + y, hidden_size, name="moe_norm") - - return dp(add_and_normalize, xs, moe_out), loss - - -@registry.register_model -class MultiModel(t2t_model.T2TModel): - - def model_fn_body_sharded(self, sharded_features, train): - dp = self._data_parallelism - hparams = self._hparams - targets = sharded_features["targets"] - - def flatten(inputs): - return tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2) - - inputs = dp(flatten, sharded_features["inputs"]) - - # Encode inputs. - def encode_half(inputs, inputs_mask, hparams): - # Add timing and encode. - inputs = common_layers.add_timing_signal(inputs) - return slicenet.multi_conv_res(inputs, "SAME", "encoder1", - hparams.num_hidden_layers // 2, - hparams, train, mask=inputs_mask) - - target_space_emb = dp(slicenet.embed_target_space, - sharded_features["target_space_id"], - hparams.hidden_size) - inputs_pad = dp(slicenet.embedding_to_padding, inputs) - inputs_mask = dp(lambda x: 1.0 - x, inputs_pad) - inputs_encoded = dp(encode_half, inputs, inputs_mask, hparams) - with tf.variable_scope("experts_enc"): - inputs_encoded, expert_loss = experts( - inputs_encoded, hparams.moe_n1, hparams.moe_n2, hparams.hidden_size, - hparams.hidden_size, dp, self._ps_devices, train) - expert_loss *= hparams.moe_loss_coef - inputs_encoded = dp( - slicenet.multi_conv_res, inputs_encoded, "SAME", - "encoder2", hparams.num_hidden_layers, hparams, train, - mask=inputs_mask) - - # If we're just predicing a class, there is no use for a decoder, return. - target_modality = hparams.problems[self._problem_idx].target_modality - if "class_label_modality" in target_modality.name: - return inputs_encoded, tf.reduce_mean(expert_loss) - - # Do the middle part. - decoder_start, similarity_loss = dp( - slicenet.slicenet_middle, inputs_encoded, targets, - target_space_emb, inputs_mask, hparams, train) - - # Decode. - decoder_half = dp( - slicenet.multi_conv_res, - decoder_start, - "LEFT", - "decoder1", - hparams.num_hidden_layers // 2, - hparams, - train, - mask=inputs_mask, - source=inputs_encoded) - with tf.variable_scope("experts_dec"): - decoder_half, expert_dec_loss = experts( - decoder_half, hparams.moe_n1, hparams.moe_n2, hparams.hidden_size, - hparams.hidden_size, dp, self._ps_devices, train) - expert_loss += expert_dec_loss * hparams.moe_loss_coef - decoder_final = dp( - slicenet.multi_conv_res, - decoder_half, - "LEFT", - "decoder2", - hparams.num_hidden_layers // 2, - hparams, - train, - mask=inputs_mask, - source=inputs_encoded) - - total_loss = tf.reduce_mean(expert_loss) + tf.reduce_mean(similarity_loss) - return decoder_final, total_loss - - -@registry.register_hparams("multimodel1p8") -def multimodel_params1_p8(): - """Version for eight problem runs.""" - hparams = slicenet.slicenet_params1() - hparams.problem_choice = "distributed" - hparams.attention_type = "simple" # TODO(lukaszkaiser): add transformer. - hparams.hidden_size = 1536 - hparams.moe_n1 = 120 - hparams.shared_embedding_and_softmax_weights = int(False) - hparams.dropout = 0.1 - hparams.attention_dropout = 0.1 - hparams.learning_rate_decay_scheme = "exp500k" - return hparams diff --git a/tensor2tensor/models/neural_architecture_search/README.md b/tensor2tensor/models/neural_architecture_search/README.md new file mode 100644 index 000000000..c197c88ab --- /dev/null +++ b/tensor2tensor/models/neural_architecture_search/README.md @@ -0,0 +1,3 @@ +This directory contains the configurable model code used in the Evolved +Transformer paper (https://arxiv.org/abs/1901.11117). It can be used to train +models in the search space as was done in the paper. diff --git a/tensor2tensor/models/neural_architecture_search/__init__.py b/tensor2tensor/models/neural_architecture_search/__init__.py new file mode 100644 index 000000000..06080ebe9 --- /dev/null +++ b/tensor2tensor/models/neural_architecture_search/__init__.py @@ -0,0 +1,16 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + diff --git a/tensor2tensor/models/neural_architecture_search/nas_layers.py b/tensor2tensor/models/neural_architecture_search/nas_layers.py new file mode 100644 index 000000000..c89fc1f78 --- /dev/null +++ b/tensor2tensor/models/neural_architecture_search/nas_layers.py @@ -0,0 +1,686 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Bank of layers for Translation NAS searches. + +All encoder layers are registered in the global LayerRegistry ENCODER_LAYERS. +All decoder layers are registered on the global LayerRegistry DECODER_LAYERS. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import abc +import six + +from tensor2tensor.layers import common_attention + +import tensorflow.compat.v1 as tf + +# Registry layer keys. +ATTEND_TO_ENCODER_REGISTRY_KEY = "attend_to_encoder" +ATTENTION_32_HEADS_REGISTRY_KEY = "attention_32_heads" +ATTENTION_16_HEADS_REGISTRY_KEY = "attention_16_heads" +ATTENTION_4_HEADS_REGISTRY_KEY = "attention_4_heads" +DEPTHWISE_CONV_3X1_REGISTRY_KEY = "depthwise_conv_3x1" +DEPTHWISE_CONV_5X1_REGISTRY_KEY = "depthwise_conv_5x1" +DEPTHWISE_CONV_7X1_REGISTRY_KEY = "depthwise_conv_7x1" +DILATED_CONV_3X1_REGISTRY_KEY = "dilated_conv_3x1" +DILATED_CONV_5X1_REGISTRY_KEY = "dilated_conv_5x1" +GATED_LINEAR_UNIT_REGISTRY_KEY = "gated_linear_unit" +IDENTITY_REGISTRY_KEY = "identity" +# Lightweight convolution naming convention uses "R_X" where X is the variable +# reduction factor. +LIGHTWEIGHT_CONV_3X1_R_1_REGISTRY_KEY = "lightweight_conv_3x1_r_1" +LIGHTWEIGHT_CONV_3X1_R_4_REGISTRY_KEY = "lightweight_conv_3x1_r_4" +LIGHTWEIGHT_CONV_3X1_R_16_REGISTRY_KEY = "lightweight_conv_3x1_r_16" +LIGHTWEIGHT_CONV_5X1_R_1_REGISTRY_KEY = "lightweight_conv_5x1_r_1" +LIGHTWEIGHT_CONV_5X1_R_4_REGISTRY_KEY = "lightweight_conv_5x1_r_4" +LIGHTWEIGHT_CONV_5X1_R_16_REGISTRY_KEY = "lightweight_conv_5x1_r_16" +LIGHTWEIGHT_CONV_7X1_R_1_REGISTRY_KEY = "lightweight_conv_7x1_r_1" +LIGHTWEIGHT_CONV_7X1_R_4_REGISTRY_KEY = "lightweight_conv_7x1_r_4" +LIGHTWEIGHT_CONV_7X1_R_16_REGISTRY_KEY = "lightweight_conv_7x1_r_16" +LIGHTWEIGHT_CONV_15X1_R_1_REGISTRY_KEY = "lightweight_conv_15x1_r_1" +LIGHTWEIGHT_CONV_15X1_R_4_REGISTRY_KEY = "lightweight_conv_15x1_r_4" +LIGHTWEIGHT_CONV_15X1_R_16_REGISTRY_KEY = "lightweight_conv_15x1_r_16" +SEPARABLE_CONV_3X1_REGISTRY_KEY = "separable_conv_3x1" +SEPARABLE_CONV_5X1_REGISTRY_KEY = "separable_conv_5x1" +SEPARABLE_CONV_7X1_REGISTRY_KEY = "separable_conv_7x1" +SEPARABLE_CONV_9X1_REGISTRY_KEY = "separable_conv_9x1" +SEPARABLE_CONV_11X1_REGISTRY_KEY = "separable_conv_11x1" +SEPARABLE_CONV_13X1_REGISTRY_KEY = "separable_conv_13x1" +SEPARABLE_CONV_15X1_REGISTRY_KEY = "separable_conv_15x1" +STANDARD_CONV_1X1_REGISTRY_KEY = "standard_conv_1x1" +STANDARD_CONV_3X1_REGISTRY_KEY = "standard_conv_3x1" +STANDARD_CONV_5X1_REGISTRY_KEY = "standard_conv_5x1" +STANDARD_ATTENTION_REGISTRY_KEY = "standard_attention" + + +class TranslationLayer(object): + """Interface for the layers used in the Translation search space.""" + + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def _apply_logic(self, input_tensor, output_depth, hparams, var_scope_suffix, + nonpadding, mask_future, **kwargs): + """Applies the layer specific logic to the `input_tensor`. + + This is called by `apply_layer()` to apply the subclass specific logic to + the preprocessed `input_tensor`. + + Args: + input_tensor: [batch_size, batch time_steps, embedding_depth] tensor. + output_depth: Depth of the output tensor. + hparams: Hyperparameters for the layer. + var_scope_suffix: Suffix appended to the end of the variable scope. + nonpadding: a [batch_size, batch time_steps] tensor with 1 where each + batch member has sequence information and 0 everywhere else. This is + used to mask out the irrelevant padded portions of the input. + mask_future: Boolean. If False, information moves across the + spatial/temporal dimension freely. If True, each timestep can only + process the information that has come before it. + **kwargs: Subclass-specific arguments. + + Returns: + logic_output: [batch_size, batch time_steps, output_depth] tensor output + of the logic. + """ + + def apply_layer(self, + input_tensor, + residual_tensor, + output_depth, + activation, + hparams, + var_scope_suffix, + nonpadding, + mask_future, + layer_preprocess_fn=None, + postprocess_dropout=True, + **kwargs): + """Applies the layer to the input. + + Also applies pad masking, preprocessing, postprocessing, and nonlinearity. + + Args: + input_tensor: [batch_size, batch time_steps, embedding_depth] tensor. + residual_tensor: Tensor that gets added to the output residually if + `layer_postprocess` is True. + output_depth: Depth of the output tensor. + activation: Activation to be applied to the `layer_output`. If None, no + activation will be applied. + hparams: Hyperparameters for the layer. + var_scope_suffix: Suffix appended to the end of the variable scope. + nonpadding: a [batch_size, batch time_steps] tensor with 1 where each + batch member has sequence information and 0 everywhere else. This is + used to mask out the irrelevant padded portions of the input. + mask_future: Boolean. If False, information moves across the + spatial/temporal dimension freely. If True, each timestep can only + process the information that has come before it. + layer_preprocess_fn: Preprocess function applied to the input. + postprocess_dropout: Whether or not to apply dropout. + **kwargs: Arguments used by specific TranslationLayers. + + Returns: + layer_output: The output of the layer. + """ + input_depth = input_tensor.shape.as_list()[-1] + layer_output = input_tensor + if nonpadding is not None: + nonpadding_input_tiled = tf.tile( + tf.expand_dims(nonpadding, 2), [1, 1, input_depth]) + layer_output *= nonpadding_input_tiled + + if layer_preprocess_fn: + layer_output = layer_preprocess_fn(layer_output) + if nonpadding is not None: + layer_output *= nonpadding_input_tiled + + layer_output = self._apply_logic(layer_output, output_depth, hparams, + var_scope_suffix, nonpadding, mask_future, + **kwargs) + + if activation: + layer_output = activation(layer_output) + + if postprocess_dropout: + layer_output = tf.nn.dropout(layer_output, 1 - hparams.relu_dropout) + + if residual_tensor is not None: + layer_output += residual_tensor + + # Remove the output padding items. + if nonpadding is not None: + nonpadding_output_tiled = tf.tile( + tf.expand_dims(nonpadding, 2), [1, 1, output_depth]) + layer_output *= nonpadding_output_tiled + + return layer_output + + @abc.abstractmethod + def num_params(self, input_depth, output_depth, **kwargs): + """Returns num_params in the layer for the given input and output depths. + + NOTE: This does not include layer norm parameters that appear in + layer_preprocess or layer_postprocess! + + Args: + input_depth: The depth of the input. + output_depth: The depth of the output. + **kwargs: TranslationLayer specific arguments. + """ + + +class LayerRegisteredError(Exception): + """Layer name is already used in LayerRegistry.""" + + +class LayerRegistry(object): + """Registry of TranslationLayers. + + The registry is a mapping of string names to TranslationLayers. Layers can be + added to the registry via `registry_layer()` and can be accessed via `get()`. + """ + + def __init__(self): + self._layers = {} + + def register_layer(self, name, translation_layer): + """Register a TranslationLayer under the key `name`.""" + if name in self._layers and self._layers[name] != translation_layer: + raise LayerRegisteredError( + "Already registered %s in layer registry with a different object!" % + name) + + self._layers[name] = translation_layer + + def get(self, name): + return self._layers[name] + + def get_layer_names(self): + return sorted(six.iterkeys(self._layers)) + + +DECODER_LAYERS = LayerRegistry() +ENCODER_LAYERS = LayerRegistry() + + +class ConvLayerBase(TranslationLayer): + """Convolution TranslationLayer base class.""" + + def __init__(self, conv_type, conv_width, dilation_rate): + self._conv_type = conv_type + self._conv_width = conv_width + self._dilation_rate = dilation_rate + + def _conv_function(self, input_tensor, output_depth, padding): + """Conv function that will be applied to the input tensor.""" + raise NotImplementedError() + + def _apply_logic(self, input_tensor, output_depth, hparams, var_scope_suffix, + nonpadding, mask_future, **unused_kwargs): + """Applies conv logic to `input_tensor`.""" + with tf.variable_scope("%s_conv_%s" % (self._conv_type, var_scope_suffix)): + if mask_future: + # Pad shift the inputs so that temporal information does not leak. This + # must be used in tandem with VALID padding. + pad_amount = int(self._conv_width - 1) * self._dilation_rate + logic_output = tf.pad( + input_tensor, paddings=[[0, 0], [pad_amount, 0], [0, 0]]) + padding = "VALID" + else: + logic_output = input_tensor + padding = "SAME" + + logic_output = tf.expand_dims(logic_output, 2) + logic_output = self._conv_function(logic_output, output_depth, padding) + + logic_output = tf.squeeze(logic_output, 2) + return logic_output + + +class SeparableConvLayer(ConvLayerBase): + """Separable convolution TranslationLayer base class.""" + + def __init__(self, conv_width): + super(SeparableConvLayer, self).__init__("separable", conv_width, 1) + + def _conv_function(self, input_tensor, output_depth, padding): + conv_output = tf.squeeze(input_tensor, 2) + separable_conv_1d = tf.layers.SeparableConv1D( + output_depth, + self._conv_width, + padding=padding, + name="separable_conv_%sx1" % self._conv_width) + conv_output = separable_conv_1d.apply(conv_output) + return tf.expand_dims(conv_output, 2) + + def num_params(self, input_depth, output_depth, **unused_kwargs): + return (self._conv_width * input_depth + input_depth * output_depth + + output_depth) + + +class StandardConvLayer(ConvLayerBase): + """Standard convolutional TranslationLayer base class.""" + + def __init__(self, conv_width): + super(StandardConvLayer, self).__init__("standard", conv_width, 1) + + def _conv_function(self, input_tensor, output_depth, padding): + return tf.layers.conv2d( + input_tensor, + output_depth, [self._conv_width, 1], + padding=padding, + name="conv_%sx1" % self._conv_width) + + def num_params(self, input_depth, output_depth, **unused_kwargs): + return self._conv_width * input_depth * output_depth + output_depth + + +def calculate_depthwise_channel_multiplier(input_depth, output_depth): + """Calculates channel multiplier for depthwise convolution.""" + # Check to see if the output_depth >= input_depth + # and output_depth % input_depth == 0. If this is the case then we + # can satify the output_depth constraint, so the channel multiplier + # will be set accordingly. + if output_depth >= input_depth and output_depth % input_depth == 0: + return output_depth // input_depth + return 1 + + +class DepthwiseConvLayer(ConvLayerBase): + """Depthwise convolution TranslationLayer base class.""" + + def __init__(self, conv_width): + super(DepthwiseConvLayer, self).__init__("depthwise", conv_width, 1) + + def _conv_function(self, input_tensor, output_depth, padding): + input_depth = input_tensor.shape.as_list()[-1] + if not ((output_depth >= input_depth) and + (output_depth % input_depth == 0)): + raise ValueError( + "Depthwise layer output_depth (%s) must be greater or equal to and " + "a multiple of the depth of the " + "input tensor (%s)." % (output_depth, input_depth)) + channel_multiplier = calculate_depthwise_channel_multiplier( + input_depth, output_depth) + kernel = tf.get_variable( + "kernel", [self._conv_width, 1, input_depth, channel_multiplier]) + return tf.nn.depthwise_conv2d( + input_tensor, + kernel, [1, 1, 1, 1], + padding=padding, + name="depthwise_conv_%sx1" % str(self._conv_width)) + + def num_params(self, input_depth, output_depth, **unused_kwargs): + channel_multiplier = calculate_depthwise_channel_multiplier( + input_depth, output_depth) + return self._conv_width * input_depth * channel_multiplier + + +class LightweightConvLayer(ConvLayerBase): + """Lightweight convolution TranslationLayer base class.""" + + def __init__(self, conv_width, num_repeat): + super(LightweightConvLayer, self).__init__("depthwise", conv_width, 1) + self._num_repeat = num_repeat + + def _conv_function(self, input_tensor, output_depth, padding): + input_depth = input_tensor.shape.as_list()[-1] + if not ((output_depth >= input_depth) and + (output_depth % input_depth == 0)): + raise ValueError( + "Depthwise layer output_depth (%s) must be greater or equal to and " + "a multiple of the depth of the " + "input tensor (%s)." % (output_depth, input_depth)) + channel_multiplier = calculate_depthwise_channel_multiplier( + input_depth, output_depth) + + num_input_variables = input_depth // self._num_repeat + kernel_base = tf.get_variable( + "kernel_base", + [self._conv_width, 1, num_input_variables, channel_multiplier]) + kernel = tf.concat([kernel_base] * self._num_repeat, axis=2) + + num_nonrepeated_variables = input_depth % self._num_repeat + if num_nonrepeated_variables: + nonrepeated_variables = tf.get_variable( + "nonrepeated_kernel_variables", + [self._conv_width, 1, num_nonrepeated_variables, channel_multiplier]) + kernel = tf.concat([kernel, nonrepeated_variables], axis=2) + + kernel = tf.nn.softmax(kernel, axis=0) + return tf.nn.depthwise_conv2d( + input_tensor, + kernel, [1, 1, 1, 1], + padding=padding, + name="lightweight_conv_%sx1_r_%s" % (str(self._conv_width), + str(self._num_repeat))) + + def num_params(self, input_depth, output_depth, **unused_kwargs): + channel_multiplier = calculate_depthwise_channel_multiplier( + input_depth, output_depth) + return self._conv_width * (input_depth // self._num_repeat + ( + input_depth % self._num_repeat)) * channel_multiplier + + +class DilatedConvLayer(ConvLayerBase): + """Dilated convolution TranslationLayer base class.""" + + def __init__(self, conv_width): + super(DilatedConvLayer, self).__init__("dilated", conv_width, 2) + + def _conv_function(self, input_tensor, output_depth, padding): + input_depth = input_tensor.shape.as_list()[-1] + kernel = tf.get_variable("kernel", + [self._conv_width, 1, input_depth, output_depth]) + return tf.nn.atrous_conv2d( + input_tensor, + kernel, + self._dilation_rate, + padding=padding, + name="dilated_conv_%sx1" % str(self._conv_width)) + + def num_params(self, input_depth, output_depth, **unused_kwargs): + return self._conv_width * input_depth * output_depth + + +class AttentionLayer(TranslationLayer): + """Attention layer base class.""" + + def __init__(self, + hidden_dim_multiplier, + project_q, + project_k, + project_v, + num_heads=None): + self._hidden_dim_multiplier = hidden_dim_multiplier + self._project_q = project_q + self._project_k = project_k + self._project_v = project_v + self._num_heads = num_heads + + def _apply_logic(self, + input_tensor, + output_depth, + hparams, + var_scope_suffix, + nonpadding, + mask_future, + decoder_self_attention_bias=None, + attention_dropout_broadcast_dims=None, + **kwargs): + """Applies attention logic to `input_tensor`.""" + with tf.variable_scope("standard_attention_layer_" + var_scope_suffix): + hidden_depth = int( + input_tensor.shape.as_list()[-1] * self._hidden_dim_multiplier) + + attention_bias = decoder_self_attention_bias + + # TODO(davidso): This dropout rate differs from the other layers. This + # should be fixed so that they all use the same dropout + # rate. + num_heads = self._num_heads + if num_heads is None: + num_heads = hparams.num_heads + logic_output = common_attention.multihead_attention( + input_tensor, + None, + attention_bias, + hidden_depth, + hidden_depth, + output_depth, + num_heads, + hparams.attention_dropout, + attention_type=hparams.self_attention_type, + max_relative_position=hparams.max_relative_position, + dropout_broadcast_dims=attention_dropout_broadcast_dims) + return logic_output + + def num_params(self, input_depth, output_depth, **unused_kwargs): + # First account for the hidden to output projection params. + hidden_depth = input_depth * self._hidden_dim_multiplier + output_params = hidden_depth * output_depth + + # Next account for all the hidden projections. + num_projections = sum([self._project_q, self._project_k, self._project_v]) + return input_depth * hidden_depth * num_projections + output_params + + +class AttendToEncoderLayerBase(TranslationLayer): + """Attend to encoder base, with configurable encoder attend points.""" + + def _determine_encoder_cell_index(self, cell_number, num_encoder_cells): + """Determine the encoder cell index to attend to.""" + raise NotImplementedError() + + def _apply_logic(self, + input_tensor, + output_depth, + hparams, + var_scope_suffix, + nonpadding, + mask_future, + encoder_decoder_attention_bias, + encoder_cell_outputs, + cell_number, + attention_dropout_broadcast_dims=None, + **unused_kwargs): + """Applies attention logic to `input_tensor`.""" + with tf.variable_scope("attend_to_encoder_layer_" + var_scope_suffix): + hidden_depth = int(input_tensor.shape.as_list()[-1]) + num_encoder_cells = len(encoder_cell_outputs) + encoder_cell_index = self._determine_encoder_cell_index( + cell_number, num_encoder_cells) + encoder_layer = encoder_cell_outputs[encoder_cell_index] + + # TODO(davidso): This dropout rate differs from the other layers. This + # should be fixed so that they all use the same dropout + # rate. + logic_output = common_attention.multihead_attention( + input_tensor, + encoder_layer, + encoder_decoder_attention_bias, + hidden_depth, + hidden_depth, + output_depth, + hparams.num_heads, + hparams.attention_dropout, + attention_type=hparams.self_attention_type, + max_relative_position=hparams.max_relative_position, + dropout_broadcast_dims=attention_dropout_broadcast_dims) + + return logic_output + + # Assumes uniform encoder output depths. + def num_params(self, input_depth, output_depth, **kwargs): + try: + encoder_depth = kwargs["encoder_depth"] + except KeyError: + raise ValueError("`encoder_depth` must be in kwargs passed to " + "AttendToEncoder.num_params().") + hidden_depth = input_depth + + # The number of params is comprised of the projection from the input tensor + # to its hidden tensor, the two encoder tensor projects to its hidden + # tensors, and the projection from the hidden concatenation to the output + # tensor. + return (input_depth * hidden_depth + 2 * encoder_depth * hidden_depth + + hidden_depth * output_depth) + + +class AttendToEncoderTopDownLayer(AttendToEncoderLayerBase): + """Attend to the encoder starting with the highest layer, then moving down. + + This allows the decoder to see higher level features first and then + eventually move on to incorporate lower level information. + """ + + def __init__(self, delay, increment_step): + self.delay = delay + self.increment_step = increment_step + + def _determine_encoder_cell_index(self, cell_number, num_encoder_cells): + """Attend to final encoder cell output first, then move down.""" + return max( + 0, num_encoder_cells - + max(0, (cell_number - self.delay) * self.increment_step) - 1) + + +class GatedLinearUnitLayer(TranslationLayer): + """Gated Linaer Unit Layer.""" + + def __init__(self): + pass + + def _apply_logic(self, input_tensor, output_depth, hparams, var_scope_suffix, + nonpadding, mask_future, **unused_kwargs): + values = tf.layers.dense(input_tensor, output_depth) + gates = tf.layers.dense( + input_tensor, output_depth, activation=tf.nn.sigmoid) + return values * gates + + def num_params(self, input_depth, output_depth, **unused_kwargs): + return input_depth * output_depth * 2 + output_depth * 2 + + +class IdentityLayer(TranslationLayer): + """Identity TranslationLayer.""" + + def _apply_logic(self, input_tensor, output_depth, hparams, var_scope_suffix, + nonpadding, mask_future, **unused_kwargs): + input_depth = input_tensor.shape.as_list()[-1] + if output_depth != input_depth: + raise ValueError( + "Identity layer output_depth (%s) must be equal to the depth of the " + "input tensor (%s)." % (output_depth, input_depth)) + return input_tensor + + def num_params(self, input_depth, output_depth, **unused_kwargs): + return 0 + + +def register_encoder_decoder_layer(name, translation_layer): + ENCODER_LAYERS.register_layer(name, translation_layer) + DECODER_LAYERS.register_layer(name, translation_layer) + + +# Register all strictly decoder layers. +DECODER_LAYERS.register_layer( + ATTEND_TO_ENCODER_REGISTRY_KEY, + AttendToEncoderTopDownLayer(delay=0, increment_step=0)) + +# Register all encoder and decoder layers. +register_encoder_decoder_layer(IDENTITY_REGISTRY_KEY, IdentityLayer()) + +register_encoder_decoder_layer(SEPARABLE_CONV_3X1_REGISTRY_KEY, + SeparableConvLayer(conv_width=3)) +register_encoder_decoder_layer(SEPARABLE_CONV_5X1_REGISTRY_KEY, + SeparableConvLayer(conv_width=5)) +register_encoder_decoder_layer(SEPARABLE_CONV_7X1_REGISTRY_KEY, + SeparableConvLayer(conv_width=7)) +register_encoder_decoder_layer(SEPARABLE_CONV_9X1_REGISTRY_KEY, + SeparableConvLayer(conv_width=9)) +register_encoder_decoder_layer(SEPARABLE_CONV_11X1_REGISTRY_KEY, + SeparableConvLayer(conv_width=11)) +register_encoder_decoder_layer(SEPARABLE_CONV_13X1_REGISTRY_KEY, + SeparableConvLayer(conv_width=13)) +register_encoder_decoder_layer(SEPARABLE_CONV_15X1_REGISTRY_KEY, + SeparableConvLayer(conv_width=15)) +register_encoder_decoder_layer(STANDARD_CONV_1X1_REGISTRY_KEY, + StandardConvLayer(conv_width=1)) +register_encoder_decoder_layer(STANDARD_CONV_3X1_REGISTRY_KEY, + StandardConvLayer(conv_width=3)) +register_encoder_decoder_layer(STANDARD_CONV_5X1_REGISTRY_KEY, + StandardConvLayer(conv_width=5)) +register_encoder_decoder_layer(DEPTHWISE_CONV_3X1_REGISTRY_KEY, + DepthwiseConvLayer(conv_width=3)) +register_encoder_decoder_layer(DEPTHWISE_CONV_5X1_REGISTRY_KEY, + DepthwiseConvLayer(conv_width=5)) +register_encoder_decoder_layer(DEPTHWISE_CONV_7X1_REGISTRY_KEY, + DepthwiseConvLayer(conv_width=7)) +register_encoder_decoder_layer(DILATED_CONV_3X1_REGISTRY_KEY, + DilatedConvLayer(conv_width=3)) +register_encoder_decoder_layer(DILATED_CONV_5X1_REGISTRY_KEY, + DilatedConvLayer(conv_width=5)) + + +register_encoder_decoder_layer(LIGHTWEIGHT_CONV_3X1_R_1_REGISTRY_KEY, + LightweightConvLayer(conv_width=3, num_repeat=1)) +register_encoder_decoder_layer(LIGHTWEIGHT_CONV_3X1_R_4_REGISTRY_KEY, + LightweightConvLayer(conv_width=3, num_repeat=4)) +register_encoder_decoder_layer( + LIGHTWEIGHT_CONV_3X1_R_16_REGISTRY_KEY, + LightweightConvLayer(conv_width=3, num_repeat=16)) +register_encoder_decoder_layer(LIGHTWEIGHT_CONV_5X1_R_1_REGISTRY_KEY, + LightweightConvLayer(conv_width=5, num_repeat=1)) +register_encoder_decoder_layer(LIGHTWEIGHT_CONV_5X1_R_4_REGISTRY_KEY, + LightweightConvLayer(conv_width=5, num_repeat=4)) +register_encoder_decoder_layer( + LIGHTWEIGHT_CONV_5X1_R_16_REGISTRY_KEY, + LightweightConvLayer(conv_width=5, num_repeat=16)) +register_encoder_decoder_layer(LIGHTWEIGHT_CONV_7X1_R_1_REGISTRY_KEY, + LightweightConvLayer(conv_width=7, num_repeat=1)) +register_encoder_decoder_layer(LIGHTWEIGHT_CONV_7X1_R_4_REGISTRY_KEY, + LightweightConvLayer(conv_width=7, num_repeat=4)) +register_encoder_decoder_layer( + LIGHTWEIGHT_CONV_7X1_R_16_REGISTRY_KEY, + LightweightConvLayer(conv_width=7, num_repeat=16)) +register_encoder_decoder_layer( + LIGHTWEIGHT_CONV_15X1_R_1_REGISTRY_KEY, + LightweightConvLayer(conv_width=15, num_repeat=1)) +register_encoder_decoder_layer( + LIGHTWEIGHT_CONV_15X1_R_4_REGISTRY_KEY, + LightweightConvLayer(conv_width=15, num_repeat=4)) +register_encoder_decoder_layer( + LIGHTWEIGHT_CONV_15X1_R_16_REGISTRY_KEY, + LightweightConvLayer(conv_width=15, num_repeat=16)) + +register_encoder_decoder_layer( + GATED_LINEAR_UNIT_REGISTRY_KEY, + GatedLinearUnitLayer()) + + +register_encoder_decoder_layer( + STANDARD_ATTENTION_REGISTRY_KEY, + AttentionLayer( + hidden_dim_multiplier=1, project_q=True, project_k=True, + project_v=True)) +register_encoder_decoder_layer( + ATTENTION_16_HEADS_REGISTRY_KEY, + AttentionLayer( + hidden_dim_multiplier=1, + project_q=True, + project_k=True, + project_v=True, + num_heads=16)) +register_encoder_decoder_layer( + ATTENTION_32_HEADS_REGISTRY_KEY, + AttentionLayer( + hidden_dim_multiplier=1, + project_q=True, + project_k=True, + project_v=True, + num_heads=32)) +register_encoder_decoder_layer( + ATTENTION_4_HEADS_REGISTRY_KEY, + AttentionLayer( + hidden_dim_multiplier=1, + project_q=True, + project_k=True, + project_v=True, + num_heads=4)) diff --git a/tensor2tensor/models/neural_architecture_search/nas_layers_test.py b/tensor2tensor/models/neural_architecture_search/nas_layers_test.py new file mode 100644 index 000000000..11b13324a --- /dev/null +++ b/tensor2tensor/models/neural_architecture_search/nas_layers_test.py @@ -0,0 +1,320 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Layers tests.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import itertools +from absl.testing import parameterized +import numpy as np + +from tensor2tensor.layers import common_attention +from tensor2tensor.models import transformer +from tensor2tensor.models.neural_architecture_search import nas_layers as layers + +import tensorflow.compat.v1 as tf + +_BATCH_SIZE = 32 +_TOTAL_SEQUENCE_LENGTH = 20 +_INPUT_DEPTH = 256 +_NUM_CELLS = 6 +_CELL_NUMBER = 3 + +# The list of prefixes for layers that will not be tested for resizing outputs. +_RESIZE_EXEMPT_LAYER_PREFIXES = [ + "depthwise_conv", "squeeze_and_excitation", "identity", "lightweight_conv", +] + + +def _apply_encoder_layer(translation_layer, output_depth, nonpadding_list): + """Applies an encoder layer with basic arguments.""" + + input_tensor = tf.random_uniform( + [_BATCH_SIZE, _TOTAL_SEQUENCE_LENGTH, _INPUT_DEPTH]) / 4.0 + nonpadding = tf.constant(nonpadding_list) + residual_tensor = tf.random_uniform( + [_BATCH_SIZE, _TOTAL_SEQUENCE_LENGTH, output_depth]) + hparams = transformer.transformer_base() + + return translation_layer.apply_layer( + input_tensor, + residual_tensor, + output_depth, + tf.nn.relu, + hparams, + "", + mask_future=False, + nonpadding=nonpadding, + layer_preprocess_fn=None, + postprocess_dropout=True) + + +def _apply_decoder_layer(translation_layer, input_tensor, output_depth, + encoder_depth): + """Applies an decoder layer with basic arguments.""" + + residual_tensor_values = np.random.rand( + *[_BATCH_SIZE, _TOTAL_SEQUENCE_LENGTH, output_depth]) - .5 + residual_tensor = tf.constant(residual_tensor_values, dtype=tf.float32) + encoder_output_values = np.random.rand( + *[_BATCH_SIZE, _TOTAL_SEQUENCE_LENGTH, encoder_depth]) - .5 + encoder_output = tf.constant(encoder_output_values, dtype=tf.float32) + encoder_cell_outputs = [encoder_output] * _NUM_CELLS + hparams = transformer.transformer_base() + hparams.attention_dropout = 0 + decoder_self_attention_bias = ( + common_attention.attention_bias_lower_triangle(_TOTAL_SEQUENCE_LENGTH)) + + output_tensor = translation_layer.apply_layer( + input_tensor, + residual_tensor, + output_depth, + None, + hparams, + "", + nonpadding=None, + mask_future=True, + layer_preprocess_fn=None, + postprocess_dropout=False, + decoder_self_attention_bias=decoder_self_attention_bias, + encoder_decoder_attention_bias=None, + encoder_cell_outputs=encoder_cell_outputs, + cell_number=_CELL_NUMBER) + + return output_tensor + + +def _zero_after_index_copy(feed_input, zero_after_index): + """Creates a copy of `feed_input` with zeros after `zero_after_index`.""" + transformed_feed_input = copy.deepcopy(feed_input) + for i in range(_BATCH_SIZE): + for j in range(zero_after_index + 1, _TOTAL_SEQUENCE_LENGTH): + transformed_feed_input[i][j] = [0.0] * len(transformed_feed_input[i][j]) + return transformed_feed_input + + +def _get_empirical_parameters(): + """Gets the number of parameters built into the current Tensorflow graph.""" + trainable_variables_list = tf.trainable_variables() + + empirical_num_params = 0 + for variable_tensor in trainable_variables_list: + empirical_num_params += np.prod(variable_tensor.shape) + + return empirical_num_params + + +def _create_nonpadding_list(): + """Creates the `nonpadding_list` for applying the encoder layers.""" + nonpadding_list = [] + for i in range(_BATCH_SIZE): + nonpadding_list.append([1.0] * min(i + 2, _TOTAL_SEQUENCE_LENGTH) + + [0.0] * max((_TOTAL_SEQUENCE_LENGTH - i - 2), 0)) + return nonpadding_list + + +class LayersTest(parameterized.TestCase, tf.test.TestCase): + """Tests params, residual capabilities, padding leaks, and output shape.""" + + # Test that the encoder registry contains all the expected layers. + def test_encoder_registry(self): + encoder_layers = [ + "separable_conv_3x1", + "separable_conv_5x1", + "separable_conv_7x1", + "separable_conv_9x1", + "separable_conv_11x1", + "separable_conv_13x1", + "separable_conv_15x1", + "standard_conv_1x1", + "standard_conv_3x1", + "standard_conv_5x1", + "depthwise_conv_3x1", + "depthwise_conv_5x1", + "depthwise_conv_7x1", + "dilated_conv_3x1", + "dilated_conv_5x1", + "standard_attention", + "identity", + "attention_4_heads", + "attention_16_heads", + "attention_32_heads", + "gated_linear_unit", + "lightweight_conv_3x1_r_1", + "lightweight_conv_3x1_r_4", + "lightweight_conv_3x1_r_16", + "lightweight_conv_5x1_r_1", + "lightweight_conv_5x1_r_4", + "lightweight_conv_5x1_r_16", + "lightweight_conv_7x1_r_1", + "lightweight_conv_7x1_r_4", + "lightweight_conv_7x1_r_16", + "lightweight_conv_15x1_r_1", + "lightweight_conv_15x1_r_4", + "lightweight_conv_15x1_r_16", + ] + self.assertSameElements(encoder_layers, + layers.ENCODER_LAYERS.get_layer_names()) + + # Test that the decoder registry contains all the expected layers. + def test_decoder_registry(self): + decoder_layers = sorted([ + "separable_conv_3x1", + "separable_conv_5x1", + "separable_conv_7x1", + "separable_conv_9x1", + "separable_conv_11x1", + "separable_conv_13x1", + "separable_conv_15x1", + "standard_conv_1x1", + "standard_conv_3x1", + "standard_conv_5x1", + "depthwise_conv_3x1", + "depthwise_conv_5x1", + "depthwise_conv_7x1", + "dilated_conv_3x1", + "dilated_conv_5x1", + "standard_attention", + "attend_to_encoder", + "identity", + "attention_4_heads", + "attention_16_heads", + "attention_32_heads", + "gated_linear_unit", + "lightweight_conv_3x1_r_1", + "lightweight_conv_3x1_r_4", + "lightweight_conv_3x1_r_16", + "lightweight_conv_5x1_r_1", + "lightweight_conv_5x1_r_4", + "lightweight_conv_5x1_r_16", + "lightweight_conv_7x1_r_1", + "lightweight_conv_7x1_r_4", + "lightweight_conv_7x1_r_16", + "lightweight_conv_15x1_r_1", + "lightweight_conv_15x1_r_4", + "lightweight_conv_15x1_r_16", + ]) + self.assertSameElements(decoder_layers, + layers.DECODER_LAYERS.get_layer_names()) + + # Test encoder layer. This includes checking that output dims are as + # expected, checking that num_params() agrees with the empirical number of + # variables produced, and that information does not leak from 0 padded + # areas of the input. + @parameterized.parameters( + itertools.product(layers.ENCODER_LAYERS.get_layer_names(), + (256, 128, 512))) + def test_encoder_layer(self, translation_layer_name, output_depth): + with self.test_session(graph=tf.Graph()) as sess: + nonpadding_list = _create_nonpadding_list() + for prefix in _RESIZE_EXEMPT_LAYER_PREFIXES: + if prefix in translation_layer_name: + output_depth = _INPUT_DEPTH + translation_layer = layers.ENCODER_LAYERS.get(translation_layer_name) + output_tensor = _apply_encoder_layer(translation_layer, output_depth, + nonpadding_list) + + # Check that the output shape is as expected. + self.assertEqual(output_tensor.shape.as_list(), + [_BATCH_SIZE, _TOTAL_SEQUENCE_LENGTH, output_depth]) + + # Check that the number of parameters is as expected. + empirical_num_params = _get_empirical_parameters() + reported_num_params = translation_layer.num_params( + _INPUT_DEPTH, output_depth) + self.assertEqual(empirical_num_params, reported_num_params) + + # Make sure padding is applied properly (no leaks). + sess.run(tf.global_variables_initializer()) + output = sess.run(output_tensor) + + for i, j in itertools.product( + range(_BATCH_SIZE), range(_TOTAL_SEQUENCE_LENGTH)): + if nonpadding_list[i][j] == 0: + self.assertAllEqual(output[i][j], np.array([0] * output_depth), + "Output row %s, column %s not zeroed out." % (i, j)) + + # Test decoder layer. This includes checking that output dims are as + # expected, checking that num_params() agrees with the empirical number of + # variables produced, and that temporal information does not leak. + @parameterized.parameters( + itertools.product(layers.DECODER_LAYERS.get_layer_names(), + (256, 128, 512))) + def test_decoder_layer(self, translation_layer_name, output_depth): + with self.test_session(graph=tf.Graph()) as sess: + + # Check that the output shape is as expected. + input_tensor = tf.placeholder( + tf.float32, [_BATCH_SIZE, _TOTAL_SEQUENCE_LENGTH, _INPUT_DEPTH]) + encoder_depth = int(_INPUT_DEPTH / 2) + for prefix in _RESIZE_EXEMPT_LAYER_PREFIXES: + if prefix in translation_layer_name: + output_depth = _INPUT_DEPTH + translation_layer = layers.DECODER_LAYERS.get(translation_layer_name) + output_tensor = _apply_decoder_layer(translation_layer, input_tensor, + output_depth, encoder_depth) + self.assertEqual(output_tensor.shape.as_list(), + [_BATCH_SIZE, _TOTAL_SEQUENCE_LENGTH, output_depth]) + + # Check that the number of parameters is as expected. + empirical_num_params = _get_empirical_parameters() + reported_num_params = translation_layer.num_params( + _INPUT_DEPTH, + output_depth, + encoder_depth=encoder_depth) + self.assertEqual(empirical_num_params, reported_num_params) + + # Check that there is no temporal information leak. Specifically, check + # that values before `test_index` remain unchanged, while the values + # after it have changed. Sums are used because two values could + # potentially be the same between the zero and non-zero portions, even + # if the masking is working correctly. Note: This assumes that the + # output at t is dependent on the input at t. + feed_input = np.random.random( + [_BATCH_SIZE, _TOTAL_SEQUENCE_LENGTH, _INPUT_DEPTH]) / 10.0 + test_index = int(_TOTAL_SEQUENCE_LENGTH / 2) + transformed_feed_input = _zero_after_index_copy(feed_input, test_index) + + # Produce the outputs for both types of input. + feed_dict = { + v: np.random.rand(*v.shape.as_list()) - .5 + for v in tf.all_variables() + } + feed_dict[input_tensor] = feed_input + control_output = sess.run(output_tensor, feed_dict) + + feed_dict[input_tensor] = transformed_feed_input + variable_output = sess.run(output_tensor, feed_dict) + + self.assertAllClose( + control_output[:, :test_index + 1], + variable_output[:, :test_index + 1], + rtol=1) + + with self.assertRaises( + AssertionError, + msg="Time-masked portion of output too close to control output."): + self.assertAllClose( + control_output[:, test_index + 1:], + variable_output[:, test_index + 1:], + rtol=1) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/neural_architecture_search/nas_model.py b/tensor2tensor/models/neural_architecture_search/nas_model.py new file mode 100644 index 000000000..7e405c870 --- /dev/null +++ b/tensor2tensor/models/neural_architecture_search/nas_model.py @@ -0,0 +1,1133 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""NasSeq2Seq class which can be configured to produce a variety of models. + +This was the class used in the Evolved Transformer paper +(https://arxiv.org/abs/1901.11117) to create configurable models. It can be used +to train models in the search space as was done in the paper. + +To use NasSeq2Seq: + - set model=nas_seq2_seq. + - set hparams_set=nas_seq2seq_base. + - use hparams to specify the configuration you want to run. See + nas_seq2seq_base() for an example. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import abc +import six +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.models import transformer +from tensor2tensor.models.neural_architecture_search import nas_layers as layers +from tensor2tensor.utils import contrib +from tensor2tensor.utils import metrics +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +# Keys for the activation map. +LEAKY_RELU_ACTIVATION_KEY = "leaky_relu" +NONE_ACTIVATION_KEY = "none" +RELU_ACTIVATION_KEY = "relu" +SIGMOID_ACTIVATION_KEY = "sigmoid" +SWISH_ACTIVATION_KEY = "swish" +SOFTMAX_ACTIVATION_KEY = "softmax" + +# Mapping from string names to activation function. +ACTIVATION_MAP = { + SWISH_ACTIVATION_KEY: tf.nn.swish, + LEAKY_RELU_ACTIVATION_KEY: tf.nn.leaky_relu, + RELU_ACTIVATION_KEY: tf.nn.relu, + NONE_ACTIVATION_KEY: None, + SIGMOID_ACTIVATION_KEY: tf.nn.sigmoid, + SOFTMAX_ACTIVATION_KEY: tf.nn.softmax +} + +# Norm strings. +LAYER_NORM_KEY = "layer_norm" +NO_NORM_KEY = "none" + +# Combiner function strings. +ADD_COMBINER_FUNC_KEY = "add" +MULTIPLY_COMBINER_FUNC_KEY = "multiply" +CONCAT_COMBINER_FUNC_KEY = "concat" + +# Layers that force the output_dim to be equal to the input_dim if +# enforce_fixed_output_sizes is True. +LAYERS_TO_FIX_OUTPUT_SIZE = [ + layers.IDENTITY_REGISTRY_KEY, +] + +# Depthwise layers that the output dimension will need to be changed for +# if channel multiplier cannot be changed to match output dimension. +DEPTHWISE_LAYERS = [ + layers.DEPTHWISE_CONV_3X1_REGISTRY_KEY, + layers.DEPTHWISE_CONV_5X1_REGISTRY_KEY, + layers.DEPTHWISE_CONV_7X1_REGISTRY_KEY +] + +DEAD_BRANCH_KEY = "dead_branch" + + +def should_alter_output_dim(layer_name, enforce_fixed_output_sizes, input_depth, + output_depth): + """Check if the output_depth for the specified layer should be changed.""" + # Check to see if output_depth should be changed if we are using + # a depthwise operation and the channel multiplier is returned as 1, + # which means that the depthwise multiplier could not be set to match + # output_depth. + change_dim_for_depthwise = ((layer_name in DEPTHWISE_LAYERS) and + (layers.calculate_depthwise_channel_multiplier( + input_depth, output_depth) == 1)) + # See if layer is in LAYERS_TO_FIX_OUTPUT_SIZE and if it is then we + # know that the output_dim must be input_dim. + change_dim_for_other = layer_name in LAYERS_TO_FIX_OUTPUT_SIZE + # Must be sure enforce_fixed_output_sizes is true. + return ((change_dim_for_depthwise or change_dim_for_other) and + enforce_fixed_output_sizes) + + +def get_activation_names(): + return ACTIVATION_MAP.keys() + + +def _pad_shallow_tensors(tensors, pad_value): + """Pads the shorter tensors to be as long as the longest.""" + max_dim = 0 + for tensor in tensors: + dim = tensor.shape.as_list()[-1] + if dim > max_dim: + max_dim = dim + + output_tensors = [] + for tensor in tensors: + dim = tensor.shape.as_list()[-1] + if tensor.shape.as_list()[-1] < max_dim: + output_tensors.append( + tf.pad( + tensor, [[0, 0], [0, 0], [0, max_dim - dim]], + constant_values=pad_value)) + else: + output_tensors.append(tensor) + print(output_tensors) + + return output_tensors + + +class CombinerFunction(object): + """Interface for combiner functions.""" + + __metaclass__ = abc.ABCMeta + + @abc.abstractmethod + def combine_tensors(self, tensors): + """Combines `tensors`. + + Args: + tensors: List of tensors to combine. + + Returns: + Combined tensor. + """ + + @abc.abstractmethod + def combined_output_dim(self, output_dims): + """Determines the output dimension of the combined tensor. + + Args: + output_dims: List of output dimensions of combined tensors. + + Returns: + Output dimension of the combined tensor. + """ + + +class AddCombiner(CombinerFunction): + """Addition CombinerFunction.""" + + def combine_tensors(self, tensors): + assert tensors + + if len(tensors) == 1: + return tensors[0] + + tensors_to_combine = _pad_shallow_tensors(tensors, 0) + + output_tensor = tensors_to_combine[0] + tensors_to_combine[1] + for tensor in tensors_to_combine[2:]: + output_tensor += tensor + + return output_tensor + + def combined_output_dim(self, output_dims): + return max(output_dims) + + +class MultiplyCombiner(CombinerFunction): + """Multiply CombinerFunction.""" + + def combine_tensors(self, tensors): + assert tensors + + if len(tensors) == 1: + return tensors[0] + + tensors_to_combine = _pad_shallow_tensors(tensors, 1) + + output_tensor = tensors_to_combine[0] * tensors_to_combine[1] + for tensor in tensors_to_combine[2:]: + output_tensor *= tensor + + return output_tensor + + def combined_output_dim(self, output_dims): + return max(output_dims) + + +class ConcatCombiner(CombinerFunction): + """Concat CombinerFunction.""" + + def combine_tensors(self, tensors): + assert tensors + + if len(tensors) == 1: + return tensors[0] + + return tf.concat(tensors, 2) + + def combined_output_dim(self, output_dims): + concat_tensor_dim = 0 + for output_dim in output_dims: + concat_tensor_dim += output_dim + + return concat_tensor_dim + + +# Dict of combiner functions where each key is the function key string and each +# value is a function that takes a list of tensors and outputs the tensors' +# combination. +COMBINER_FUNCTIONS = { + ADD_COMBINER_FUNC_KEY: AddCombiner, + MULTIPLY_COMBINER_FUNC_KEY: MultiplyCombiner, + CONCAT_COMBINER_FUNC_KEY: ConcatCombiner, +} + + +@registry.register_model +class NasSeq2Seq(transformer.Transformer): + """Configurable seq2seq model used for Neural Architecture Search. + + Models are defined by 26 hparam fields. They are: + - _num_cells: The number of cells in the . + - __layers: List of layers used the + + branch. For available layers, see + the nas_layers.py file. + - _: List of inputs to the + + layers. Each index i specifies the + i_th layer's output with 0 + representing the cell input + tensor. + - __output_dims: List of absolute output + dimensions for each layer. + - __activation: List of activations applied + after each layer. + ACTIVATION_MAP holds the valid + activations. + - __norms: List of norms applied before each + layer. Must be either "layer_norm" + or "none". + - _combiner_functions: List of functions used to combine + each left/right branch pair. + Options are listed in + COMBINER_FUNCTIONS. + - _final_combiner_function: Function applied to combine + all the block outputs that are + not used as inputs to other + blocks. Options are listed in + COMBINER_FUNCTIONS. + + For an example of how to set these hparams, please see nas_seq2seq_base(). + """ + __metaclass__ = abc.ABCMeta + + def encode(self, inputs, target_space, hparams, features=None, losses=None): + """Encode inputs using _encoder(). + + This performs the same way as transformer.Transformer.encode with the + encoder portion replaced with _encoder(). + + Args: + inputs: Input [batch_size, input_length, input_height, hidden_dim] tensor + which will be flattened along the two spatial dimensions. + target_space: scalar, target space ID. + hparams: Hyperparmeters for model. + features: Optionally pass the entire features dictionary as well. This is + needed now for "packed" datasets. + losses: Unused list of losses. + + Returns: + Tuple of: + encoder_output: Encoder representation. + [batch_size, input_length, hidden_dim] + encoder_decoder_attention_bias: Bias and mask weights for + encodre-decoder attention. [batch_size, input_length] + + Raises: + ValueError: If encoder type not found. + """ + inputs = common_layers.flatten4d3d(inputs) + + encoder_input, self_attention_bias, encoder_decoder_attention_bias = ( + transformer.transformer_prepare_encoder( + inputs, target_space, hparams, features=features)) + + encoder_input = tf.nn.dropout(encoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + + encoder_output = self._encoder( + encoder_input, + self_attention_bias, + hparams, + nonpadding=transformer.features_to_nonpadding(features, "inputs"), + save_weights_to=self.attention_weights) + + return encoder_output, encoder_decoder_attention_bias + + def decode(self, + decoder_input, + encoder_output, + encoder_decoder_attention_bias, + decoder_self_attention_bias, + hparams, + cache=None, + nonpadding=None, + losses=None): + """Decode inputs using _decoder(). + + This performs the same way as transformer.Transformer.decode with the + decoder portion replaced with _decoder(). + + Args: + decoder_input: Inputs to bottom of the model. [batch_size, decoder_length, + hidden_dim] + encoder_output: Encoder representation. [batch_size, input_length, + hidden_dim] + encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder + attention. [batch_size, input_length] + decoder_self_attention_bias: Bias and mask weights for decoder + self-attention. [batch_size, decoder_length] + hparams: Hyperparmeters for model. + cache: Dict, containing tensors which are the results of previous + attentions, used for fast decoding. + nonpadding: Optional Tensor with shape [batch_size, decoder_length] + losses: Unused losses. + + Returns: + Final decoder representation. [batch_size, decoder_length, hidden_dim] + """ + decoder_input = tf.nn.dropout(decoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + + decoder_output = self._decoder( + decoder_input, + encoder_output, + decoder_self_attention_bias, + encoder_decoder_attention_bias, + hparams, + cache=cache, + nonpadding=nonpadding, + save_weights_to=self.attention_weights) + + if (common_layers.is_xla_compiled() and + hparams.mode == tf_estimator.ModeKeys.TRAIN): + # TPU does not react kindly to extra dimensions. + return decoder_output + + # Expand since t2t expects 4d tensors. + return tf.expand_dims(decoder_output, axis=2) + + def _encoder(self, + encoder_input, + encoder_self_attention_bias, + hparams, + nonpadding=None, + save_weights_to=None): + encoder_output, encoder_cell_outputs = nas_encoder( + encoder_input, encoder_self_attention_bias, hparams, nonpadding) + self._encoder_cell_outputs = encoder_cell_outputs + return encoder_output + + def _decoder(self, + decoder_input, + encoder_output, + decoder_self_attention_bias, + encoder_decoder_attention_bias, + hparams, + cache=None, + nonpadding=None, + save_weights_to=None): + assert self._encoder_cell_outputs + return nas_decoder(decoder_input, self._encoder_cell_outputs, + decoder_self_attention_bias, + encoder_decoder_attention_bias, hparams) + + def estimator_spec_eval(self, features, logits, labels, loss, losses_dict): + """Construct EstimatorSpec for EVAL mode.""" + if self.hparams.use_tpu: + return self._tpu_estimator_spec_eval(features, logits, labels, loss, + losses_dict) + return self._gpu_estimator_spec_eval(features, logits, labels, loss, + losses_dict) + + # This function is overridden because py_func is not supported on distributed + # training, which is necessary for NAS. This function works + # the exact same way as the original Transformer.estimator_spec_eval(), + # except only neg log perplexity is accepted as a metric. + def _gpu_estimator_spec_eval(self, features, logits, labels, loss, + losses_dict): + """Construct EstimatorSpec for GPU EVAL mode.""" + hparams = self.hparams + + if not hasattr(hparams, "problem"): + raise NotImplementedError( + "hparams is missing attribute `problem`. NasSeq2Seq must " + "be used with a problem.") + + # TPU is not supported. + eval_metrics_fns = metrics.create_evaluation_metrics([hparams.problem], + hparams) + eval_metrics = {} + for metric_name, metric_fn in six.iteritems(eval_metrics_fns): + if "rouge" not in metric_name and "bleu" not in metric_name: + eval_metrics[metric_name] = metric_fn(logits, features, + features["targets"]) + + return tf_estimator.EstimatorSpec( + tf_estimator.ModeKeys.EVAL, + predictions={"predictions": logits}, + eval_metric_ops=eval_metrics, + loss=loss) + + def _tpu_estimator_spec_eval(self, features, logits, labels, loss, + losses_dict): + """Construct EstimatorSpec for TPU EVAL mode.""" + del losses_dict + hparams = self.hparams + + if not hasattr(hparams, "problem"): + raise NotImplementedError( + "hparams is missing attribute `problem`. NasSeq2Seq must " + "be used with a problem.") + + problem = hparams.problem + t2t_model.remove_summaries() + eval_metrics_fn = t2t_model.create_tpu_eval_metrics_fn(problem, hparams) + if isinstance(logits, dict): + # For TPU, logits dict will be passed as keyword arguments to + # eval_metrics_fn. Here we add the labels to those arguments. + logits.update({"labels": labels}) + return contrib.tpu().TPUEstimatorSpec( + tf_estimator.ModeKeys.EVAL, + eval_metrics=(eval_metrics_fn, logits), + loss=loss) + else: + return contrib.tpu().TPUEstimatorSpec( + tf_estimator.ModeKeys.EVAL, + eval_metrics=(eval_metrics_fn, [logits, labels]), + loss=loss) + + def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha, + use_tpu): + """Forced slow beam decode. + + Args: + features: an map of string to `Tensor`. + decode_length: an integer. How many additional timesteps to decode. + beam_size: number of beams. + top_beams: an integer. How many of the beams to return. + alpha: Float that controls the length penalty. larger the alpha, stronger + the preference for longer translations. + use_tpu: Whether or not TPU is being used. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length]. + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1). + } + """ + return self._beam_decode_slow(features, decode_length, beam_size, top_beams, + alpha, use_tpu) + + +def _apply_layer_norm(input_tensor, nonpadding, hparams): + """Applies Tensor2Tensor layer_norm to |input_tensor|.""" + input_depth = input_tensor.shape.as_list()[-1] + if nonpadding is not None: + nonpadding_input_tiled = tf.tile( + tf.expand_dims(nonpadding, 2), [1, 1, input_depth]) + output_tensor = input_tensor * nonpadding_input_tiled + + output_tensor = common_layers.layer_preprocess(input_tensor, hparams) + if nonpadding is not None: + output_tensor *= nonpadding_input_tiled + + return output_tensor + + +def _apply_nas_branch(norm, layer_norm_dict, hidden_states, nonpadding, hparams, + input_index, layer_name, activation_name, layer_registry, + output_dim, branch_scope_name, mask_future, + dropout_broadcast_dims, encoder_decoder_attention_bias, + encoder_cell_outputs, decoder_self_attention_bias, + cell_number): + """Applies a single NAS branch.""" + with tf.variable_scope(branch_scope_name): + # Apply layer norm to an individual layer at most one time. + if norm == LAYER_NORM_KEY: + try: + output_tensor = layer_norm_dict[input_index] + except KeyError: + output_tensor = _apply_layer_norm(hidden_states[input_index], + nonpadding, hparams) + layer_norm_dict[input_index] = output_tensor + elif norm == NO_NORM_KEY: + output_tensor = hidden_states[input_index] + else: + raise ValueError("norm must be either '%s' or '%s'. Got %s" % + (LAYER_NORM_KEY, NO_NORM_KEY, norm)) + + layer_class = layer_registry.get(layer_name) + activation = ACTIVATION_MAP[activation_name] + + postprocess_dropout = layer_name != layers.IDENTITY_REGISTRY_KEY + output_tensor = layer_class.apply_layer( + output_tensor, + None, + int(output_dim), + activation, + hparams, + branch_scope_name, + mask_future=mask_future, + layer_preprocess_fn=None, + postprocess_dropout=postprocess_dropout, + nonpadding=nonpadding, + attention_dropout_broadcast_dims=dropout_broadcast_dims, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + encoder_cell_outputs=encoder_cell_outputs, + cell_number=cell_number, + decoder_self_attention_bias=decoder_self_attention_bias) + + return output_tensor + + +def apply_nas_layers(input_tensor, + left_inputs, + left_layers, + left_activations, + left_output_dims, + left_norms, + right_inputs, + right_layers, + right_activations, + right_output_dims, + right_norms, + combiner_functions, + final_combiner_function, + num_cells, + nonpadding, + layer_registry, + mask_future, + hparams, + var_scope, + encoder_decoder_attention_bias=None, + encoder_cell_outputs=None, + decoder_self_attention_bias=None, + final_layer_norm=True, + enforce_fixed_output_sizes=True): + """Applies layers with NasNet search space style branching. + + Args: + input_tensor: Input [batch_size, input_length, hidden_dim] sequence tensor. + left_inputs: Int list of left branch hidden layer input indexes. + left_layers: String list of left branch layers. + left_activations: String list of left branch activations. + left_output_dims: String list of left branch output dimensions. + left_norms: String list of left branch norms. + right_inputs: Int list of right branch hidden layer input indexes. + right_layers: String list of right branch layers. + right_activations: String list of right branch activations. + right_output_dims: String list of right branch output dimensions. + right_norms: String list of right branch norms. + combiner_functions: String list of branch combining functions. + final_combiner_function: String. The final combiner function that combines + all the unused hidden layers in a cell. + num_cells: The number of cells. This is the number of times the given + layers will be repeated. + nonpadding: Tensor with 1s at all nonpadding time step positions and 0s + everywhere else. + layer_registry: The LayerRegistry that holds all valid layers. + mask_future: Whether or not to mask future sequence values. + hparams: Hyperparameters for the model. + var_scope: The variable scope name. + encoder_decoder_attention_bias: The attention bias for decoder attending to + `encoder_output`. + encoder_cell_outputs: List of tensors. The encoder cell outputs, listed in + order. + decoder_self_attention_bias: The self attention bias for decoders. This + needs to be set for decoders. + final_layer_norm: Whether or not to apply a final layer_norm to the output + of the model. + enforce_fixed_output_sizes: Whether or not to automatically resize output + dimensions to match the input dimension if `should_alter_output_dim()` + returns True. + + Raises: + ValueError: When branching inputs are not of the same length. + ValueError: If item in left_norms is not LAYER_NORM_KEY or NO_NORM_KEY. + ValueError: If item in right_norms is not LAYER_NORM_KEY or NO_NORM_KEY. + + Returns: + Output of applied layers and list of each cell's outputs in order. + """ + + if not (len(left_inputs) == len(left_layers) == len(left_activations) == + len(left_output_dims) == len(left_norms) == len(right_inputs) == + len(right_layers) == len(right_activations) == len(right_output_dims) + == len(right_norms) == len(combiner_functions)): + raise ValueError("All branching inputs must be of the same length.") + + cell_output = None + modified_left_inputs = [ + left_inputs[i] + for i in range(len(left_inputs)) + if left_layers[i] != DEAD_BRANCH_KEY + ] + modified_right_inputs = [ + right_inputs[i] + for i in range(len(right_inputs)) + if right_layers[i] != DEAD_BRANCH_KEY + ] + unused_cell_hidden_states = [ + i for i in range(len(left_inputs) + 1) + if i not in modified_left_inputs and i not in modified_right_inputs + ] + assert unused_cell_hidden_states + + cell_outputs = [] + + with tf.variable_scope(var_scope): + dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(hparams, "attention_dropout_broadcast_dims", ""))) + + for cell_num in range(num_cells): + # h_0 is the input tensor. + # Keep a dict for layer norm states. + if cell_output is not None: + cell_hidden_states = [cell_output] + else: + cell_hidden_states = [input_tensor] + layer_norm_dict = {} + + with tf.variable_scope("cell_%d" % cell_num): + + for i, (left_input, left_layer_name, left_activation_name, + left_output_dim, left_norm, right_input, right_layer_name, + right_activation_name, right_output_dim, right_norm, + combiner) in enumerate( + zip(left_inputs, left_layers, left_activations, + left_output_dims, left_norms, right_inputs, + right_layers, right_activations, right_output_dims, + right_norms, combiner_functions)): + left_input = int(left_input) + right_input = int(right_input) + + with tf.variable_scope("layer_%d" % i): + + assert not (left_layer_name == DEAD_BRANCH_KEY and + right_layer_name == DEAD_BRANCH_KEY) + + if left_layer_name != DEAD_BRANCH_KEY: + + left_raw_input_tensor = cell_hidden_states[left_input] + left_input_dim = left_raw_input_tensor.shape.as_list()[-1] + if should_alter_output_dim(left_layer_name, + enforce_fixed_output_sizes, + left_input_dim, left_output_dim): + left_output_dim = left_input_dim + + # First process the left branch. + left_tensor = _apply_nas_branch( + norm=left_norm, + layer_norm_dict=layer_norm_dict, + hidden_states=cell_hidden_states, + nonpadding=nonpadding, + hparams=hparams, + input_index=left_input, + layer_name=left_layer_name, + activation_name=left_activation_name, + layer_registry=layer_registry, + output_dim=left_output_dim, + branch_scope_name="left_%s" % str(i), + mask_future=mask_future, + dropout_broadcast_dims=dropout_broadcast_dims, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + encoder_cell_outputs=encoder_cell_outputs, + decoder_self_attention_bias=decoder_self_attention_bias, + cell_number=cell_num) + + if right_layer_name != DEAD_BRANCH_KEY: + right_raw_input_tensor = cell_hidden_states[right_input] + right_input_dim = right_raw_input_tensor.shape.as_list()[-1] + if should_alter_output_dim(right_layer_name, + enforce_fixed_output_sizes, + right_input_dim, right_output_dim): + right_output_dim = right_input_dim + # Next process the right branch. + right_tensor = _apply_nas_branch( + norm=right_norm, + layer_norm_dict=layer_norm_dict, + hidden_states=cell_hidden_states, + nonpadding=nonpadding, + hparams=hparams, + input_index=right_input, + layer_name=right_layer_name, + activation_name=right_activation_name, + layer_registry=layer_registry, + output_dim=right_output_dim, + branch_scope_name="right_%s" % str(i), + mask_future=mask_future, + dropout_broadcast_dims=dropout_broadcast_dims, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + encoder_cell_outputs=encoder_cell_outputs, + decoder_self_attention_bias=decoder_self_attention_bias, + cell_number=cell_num) + + # Combine the branches. + if left_layer_name == DEAD_BRANCH_KEY: + hidden_tensor = right_tensor + elif right_layer_name == DEAD_BRANCH_KEY: + hidden_tensor = left_tensor + else: + hidden_tensor = COMBINER_FUNCTIONS[combiner]().combine_tensors( + [left_tensor, right_tensor]) + cell_hidden_states.append(hidden_tensor) + + states_to_combine = [ + cell_hidden_states[j] for j in unused_cell_hidden_states + ] + cell_output = COMBINER_FUNCTIONS[final_combiner_function]( + ).combine_tensors(states_to_combine) + cell_outputs.append(cell_output) + + if final_layer_norm: + final_output = common_layers.layer_preprocess(cell_output, hparams) + cell_outputs = [ + common_layers.layer_preprocess(cell_output, hparams) + for cell_output in cell_outputs + ] + return final_output, cell_outputs + else: + return cell_output, cell_outputs + + +def nas_encoder(encoder_input, + encoder_self_attention_bias, + hparams, + nonpadding=None, + final_layer_norm=True): + """Encoder for configurable NAS model. + + Args: + encoder_input: Input tensor. + encoder_self_attention_bias: Attention bias tensor with 0s for all valid + postions and large negative numbers for the padding positions. + hparams: transformer.Transformer hparams that must also contain: + + encoder__inputs: List of ints specifying the hidden layer + input indexes for the branches. + + encoder__layers: String list of layers. Each string must be + the name of a TranslationLayer registered in layers.py's ENCODER_LAYERS. + + encoder__activations: String list of activations. Each + string in this list must have a corresponding activation in + ACTIVATION_MAP. + + encoder__output_dims: Int list of output dimensions for + branch layers. + + encoder__norms: String list of norms to apply to the + layer branches. Each item must be either LAYER_NORM_KEY or + NO_NORM_KEY. + + encoder_num_cells: The number of cells in the encoder. This determines + how many times the given layers will be repeated. + + encoder_combiner_functions: String list of functions used to combine + left and right branches. Must be a COMBINER_FUNCTION key. + nonpadding: Tensor with 1s at all nonpadding positions and 0s everywhere + else. If None (default), then nonpadding will be determined from + encoder_self_attention_bias. + final_layer_norm: Whether or not to apply a final layer_norm to the output + of the encoder. + + Returns: + Encoder output and list of each encoder cell's output in order. + """ + if nonpadding is None: + padding = common_attention.attention_bias_to_padding( + encoder_self_attention_bias) + nonpadding = 1.0 - padding + return apply_nas_layers( + input_tensor=encoder_input, + left_inputs=hparams.encoder_left_inputs, + left_layers=hparams.encoder_left_layers, + left_activations=hparams.encoder_left_activations, + left_output_dims=hparams.encoder_left_output_dims, + left_norms=hparams.encoder_left_norms, + right_inputs=hparams.encoder_right_inputs, + right_layers=hparams.encoder_right_layers, + right_activations=hparams.encoder_right_activations, + right_output_dims=hparams.encoder_right_output_dims, + right_norms=hparams.encoder_right_norms, + num_cells=hparams.encoder_num_cells, + combiner_functions=hparams.encoder_combiner_functions, + final_combiner_function=hparams.encoder_final_combiner_function, + nonpadding=nonpadding, + layer_registry=layers.ENCODER_LAYERS, + mask_future=False, + hparams=hparams, + var_scope="encoder", + final_layer_norm=final_layer_norm) + + +def nas_decoder(decoder_input, + encoder_cell_outputs, + decoder_self_attention_bias, + encoder_decoder_attention_bias, + hparams, + final_layer_norm=True): + """Decoder for configurable model. + + Args: + decoder_input: Input tensor. + encoder_cell_outputs: List of tensors. The encoder cell outputs, listed in + order. + decoder_self_attention_bias: Attention bias that the decoder uses when + attending to itself. This should have 0s for all valid positions and large + negative numbers for all hidden future positions. + encoder_decoder_attention_bias: Attention bias that the decoder uses when + attending to the encoder. This should be 0s at all valid positions and + large negative numbers for all padded positions. + hparams: transformer.Transformer hparams that must also contain: + + decoder__inputs: List of ints specifying the hidden layer + input indexes for the branches. + + decoder__layers: String list of layers. Each string must be + the name of a TranslationLayer registered in layers.py's DECODER_LAYERS. + + decoder__activations: String list of activations. Each + string in this list must have a corresponding activation in + ACTIVATION_MAP. + + decoder__output_dims: Int list of output dimensions for + branch layers. + + decoder__norms: String list of norms to apply to the + layer branches. Each item must be either LAYER_NORM_KEY or + NO_NORM_KEY. + + decoder_num_cells: The number of cells in the decoder. This determines + how many times the given layers will be repeated. + + decoder_combiner_functions: String list of functions used to combine + left and right branches. Must be a COMBINER_FUNCTION key. + hparams may also optionally contain: + + enforce_output_size: Boolean that determines whether or not the decoder + output must be resized to hparams.hidden_size. If True, the output will + be resized if it not equal to hparams.hidden_size. If False, the output + will not be resized. If this field is not set, behavior defaults to + True. + final_layer_norm: Whether or not to apply a final layer norm to the output + of the decoder. + + Returns: + Decoder output tensor. + """ + # Enforce that the output tensor depth is equal to the depth of the encoding. + (_, output_depth, _, _) = calculate_branching_model_parameters( + encoding_depth=hparams.hidden_size, + left_inputs=hparams.decoder_left_inputs, + left_layers=hparams.decoder_left_layers, + left_output_dims=hparams.decoder_left_output_dims, + right_inputs=hparams.decoder_right_inputs, + right_layers=hparams.decoder_right_layers, + right_output_dims=hparams.decoder_right_output_dims, + combiner_functions=hparams.decoder_combiner_functions, + final_combiner_function=hparams.decoder_final_combiner_function, + layer_registry=layers.DECODER_LAYERS, + num_cells=hparams.decoder_num_cells, + encoder_depth=hparams.hidden_size) + improper_output_size = output_depth != hparams.hidden_size + + try: + enforce_output_size = hparams.enforce_output_size + except AttributeError: + enforce_output_size = True + resize_output = enforce_output_size and improper_output_size + + decoder_cells_output, _ = apply_nas_layers( + input_tensor=decoder_input, + left_inputs=hparams.decoder_left_inputs, + left_layers=hparams.decoder_left_layers, + left_activations=hparams.decoder_left_activations, + left_output_dims=hparams.decoder_left_output_dims, + left_norms=hparams.decoder_left_norms, + right_inputs=hparams.decoder_right_inputs, + right_layers=hparams.decoder_right_layers, + right_activations=hparams.decoder_right_activations, + right_output_dims=hparams.decoder_right_output_dims, + right_norms=hparams.decoder_right_norms, + num_cells=hparams.decoder_num_cells, + combiner_functions=hparams.decoder_combiner_functions, + final_combiner_function=hparams.decoder_final_combiner_function, + nonpadding=None, + layer_registry=layers.DECODER_LAYERS, + mask_future=True, + hparams=hparams, + var_scope="decoder", + decoder_self_attention_bias=decoder_self_attention_bias, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + encoder_cell_outputs=encoder_cell_outputs, + final_layer_norm=final_layer_norm) + + if not resize_output: + return decoder_cells_output + + # Resize output if necessary. + dense_layer = layers.DECODER_LAYERS.get(layers.STANDARD_CONV_1X1_REGISTRY_KEY) + output = dense_layer.apply_layer( + decoder_cells_output, + None, + hparams.hidden_size, + None, + hparams, + "decoder_resize_dense", + mask_future=True, + layer_preprocess_fn=None, + postprocess_dropout=True, + nonpadding=None, + attention_dropout_broadcast_dims=None, + encoder_decoder_attention_bias=None, + encoder_cell_outputs=None, + decoder_self_attention_bias=None, + ) + if final_layer_norm: + output = common_layers.layer_preprocess(output, hparams) + + return output + + +def calculate_branching_model_parameters(encoding_depth, + left_inputs, + left_layers, + left_output_dims, + right_inputs, + right_layers, + right_output_dims, + combiner_functions, + layer_registry, + num_cells, + final_combiner_function, + encoder_depth=None, + enforce_output_size=False, + enforce_fixed_output_sizes=True): + """Calculates the number of parameters in the given model portion. + + Args: + encoding_depth: Integer. The depth of the initial input tensor. + left_inputs: Integer list. The indexes of the hidden layer inputs for the + left branch. + left_layers: String list. The names of the left branch layers. + left_output_dims: Integer list. The output dimensions for each of the left + branch layers. + right_inputs: Integer list. The indexes of the hidden layer inputs for the + right branch. + right_layers: String list. The names of the right branch layers. + right_output_dims: Integer list. The output dimensions of each of the right + branch layers. + combiner_functions: String list. The functions used to combine the left and + right branch tensors. + layer_registry: layers.LayerRegistry. The LayerRegistry that contains the + layers.TranslationLayers needed to construct the model. + num_cells: Integer. The number of times the given layers are repeated to + produce the model. + final_combiner_function: String. The COMBINER_FUNCTIONS key for the combiner + used to combine the unused hidden dimensions. + encoder_depth: Integer. The depth of the final encoder layer. + enforce_output_size: Boolean. If True, include parameters for the addition + of a dense layer that projects the final output to the appropriate + `encoding_depth` if it is not already that size. If False, do not add any + additional parameters. + enforce_fixed_output_sizes: Whether or not to automatically resize output + dimensions to match the input dimension if `should_alter_output_dim()` + returns True. + + Raises: + ValueError: When the layer config lists are not of equal length. + + Returns: + total_parameters: The total number of parameters in the model, accounting + for repeated cells. + output_depth: The depth of the cell output tensor. + hidden_depths: The depths of the hidden layers. + unused_outputs: List of integer indexes of the hidden layers that are not + used as input, and therefore are concatenated to produce the cell + output. + """ + if not (len(left_inputs) == len(left_layers) == len(left_output_dims) == + len(right_inputs) == len(right_layers) == len(right_output_dims) == + len(combiner_functions)): + raise ValueError("Layer configs must be of equal length.") + + total_parameters = 0 + output_depth = encoding_depth + for _ in range(num_cells): + hidden_depths = [output_depth] + unused_outputs = set(range(len(left_inputs) + 1)) + + for (left_input, left_layer, left_output_dim, right_input, + right_layer, right_output_dim, combiner_function) in zip( + left_inputs, left_layers, left_output_dims, right_inputs, + right_layers, right_output_dims, combiner_functions): + + assert not (left_layer == DEAD_BRANCH_KEY and + right_layer == DEAD_BRANCH_KEY) + + if left_layer == DEAD_BRANCH_KEY: + left_parameters = 0 + + else: + left_input_dim = hidden_depths[left_input] + if should_alter_output_dim(left_layer, enforce_fixed_output_sizes, + left_input_dim, left_output_dim): + left_output_dim = left_input_dim + + left_parameters = layer_registry.get(left_layer).num_params( + left_input_dim, left_output_dim, encoder_depth=encoder_depth) + + if right_layer == DEAD_BRANCH_KEY: + right_parameters = 0 + + else: + right_input_dim = hidden_depths[right_input] + if should_alter_output_dim(right_layer, enforce_fixed_output_sizes, + right_input_dim, right_output_dim): + right_output_dim = right_input_dim + + right_parameters = layer_registry.get(right_layer).num_params( + right_input_dim, right_output_dim, encoder_depth=encoder_depth) + + total_parameters += left_parameters + right_parameters + + if left_layer == DEAD_BRANCH_KEY: + hidden_dim = right_output_dim + elif right_layer == DEAD_BRANCH_KEY: + hidden_dim = left_output_dim + else: + hidden_dim = COMBINER_FUNCTIONS[combiner_function]( + ).combined_output_dim([left_output_dim, right_output_dim]) + hidden_depths.append(hidden_dim) + + try: + if left_layer != DEAD_BRANCH_KEY: + unused_outputs.remove(left_input) + except KeyError: + pass + try: + if right_layer != DEAD_BRANCH_KEY: + unused_outputs.remove(right_input) + except KeyError: + pass + + # All unused outputs combined_together. + unused_hidden_depths = [hidden_depths[index] for index in unused_outputs] + output_depth = COMBINER_FUNCTIONS[final_combiner_function]( + ).combined_output_dim(unused_hidden_depths) + + # Add the resizing layer if needed. + if output_depth != encoding_depth and enforce_output_size: + total_parameters += layer_registry.get( + layers.STANDARD_CONV_1X1_REGISTRY_KEY).num_params( + output_depth, encoding_depth, encoder_depth=encoder_depth) + + return (total_parameters, output_depth, hidden_depths, unused_outputs) + + +@registry.register_hparams +def nas_seq2seq_base(): + """Base parameters for Nas Seq2Seq model. + + The default parameters are set to create the Transformer. + + Returns: + Hyperparameters for Nas Seq2Seq model. + """ + hparams = transformer.transformer_base() + + hparams.add_hparam("encoder_num_cells", 6) + hparams.add_hparam("encoder_left_inputs", [0, 1, 2, 3]) + hparams.add_hparam("encoder_left_layers", [ + "standard_attention", "standard_conv_1x1", "standard_conv_1x1", "identity" + ]) + hparams.add_hparam("encoder_left_output_dims", [512, 2048, 512, 512]) + hparams.add_hparam("encoder_left_activations", + ["none", "relu", "none", "none"]) + hparams.add_hparam("encoder_left_norms", + ["layer_norm", "layer_norm", "none", "none"]) + hparams.add_hparam("encoder_right_inputs", [0, 1, 1, 1]) + hparams.add_hparam("encoder_right_layers", + ["identity", "dead_branch", "identity", "dead_branch"]) + hparams.add_hparam("encoder_right_activations", + ["none", "none", "none", "none"]) + hparams.add_hparam("encoder_right_output_dims", [512, 512, 512, 512]) + hparams.add_hparam("encoder_right_norms", ["none", "none", "none", "none"]) + hparams.add_hparam("encoder_combiner_functions", ["add", "add", "add", "add"]) + hparams.add_hparam("encoder_final_combiner_function", "add") + + hparams.add_hparam("decoder_num_cells", 6) + hparams.add_hparam("decoder_left_inputs", [0, 1, 2, 3, 4]) + hparams.add_hparam("decoder_left_layers", [ + "standard_attention", "attend_to_encoder", "standard_conv_1x1", + "standard_conv_1x1", "identity" + ]) + hparams.add_hparam("decoder_left_activations", + ["none", "none", "relu", "none", "none"]) + hparams.add_hparam("decoder_left_output_dims", [512, 512, 2048, 512, 512]) + hparams.add_hparam("decoder_left_norms", + ["layer_norm", "layer_norm", "layer_norm", "none", "none"]) + hparams.add_hparam("decoder_right_inputs", [0, 1, 2, 2, 4]) + hparams.add_hparam( + "decoder_right_layers", + ["identity", "identity", "dead_branch", "identity", "dead_branch"]) + hparams.add_hparam("decoder_right_activations", + ["none", "none", "none", "none", "none"]) + hparams.add_hparam("decoder_right_output_dims", [512, 512, 512, 512, 512]) + hparams.add_hparam("decoder_right_norms", + ["none", "none", "none", "none", "none"]) + hparams.add_hparam("decoder_combiner_functions", + ["add", "add", "add", "add", "add"]) + hparams.add_hparam("decoder_final_combiner_function", "add") + + return hparams diff --git a/tensor2tensor/models/neural_architecture_search/nas_model_test.py b/tensor2tensor/models/neural_architecture_search/nas_model_test.py new file mode 100644 index 000000000..8d2a9b446 --- /dev/null +++ b/tensor2tensor/models/neural_architecture_search/nas_model_test.py @@ -0,0 +1,469 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for NasSeq2Seq.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from absl.testing import parameterized +import numpy as np +from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.layers import common_attention +from tensor2tensor.models import transformer +from tensor2tensor.models.neural_architecture_search import nas_layers as layers +from tensor2tensor.models.neural_architecture_search import nas_model as translation_nas_net +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +_BATCH_SIZE = 5 +_INPUT_LENGTH = 5 +_TARGET_LENGTH = 6 +_VOCAB_SIZE = 8 +_HIDDEN_SIZE = 512 +_EMBEDDING_DEPTH = _HIDDEN_SIZE + + +def _list_product(num_list): + """Computes product of all elements in a list.""" + product = 1 + for num in num_list: + product *= num + return product + + +def _get_transformer_branching_encoder_config(): + """Returns config for the Transformer encoder.""" + num_cells = 2 + left_inputs = [0, 1, 2, 3] + left_layers = [ + layers.STANDARD_ATTENTION_REGISTRY_KEY, + layers.STANDARD_CONV_1X1_REGISTRY_KEY, + layers.STANDARD_CONV_1X1_REGISTRY_KEY, layers.IDENTITY_REGISTRY_KEY + ] + left_output_dims = [512, 2048, 512, 512] + right_inputs = [0, 1, 1, 3] + right_layers = [ + layers.IDENTITY_REGISTRY_KEY, translation_nas_net.DEAD_BRANCH_KEY, + layers.IDENTITY_REGISTRY_KEY, translation_nas_net.DEAD_BRANCH_KEY + ] + right_output_dims = [512, 512, 512, 512] + combiner_functions = [ + translation_nas_net.ADD_COMBINER_FUNC_KEY, + translation_nas_net.ADD_COMBINER_FUNC_KEY, + translation_nas_net.ADD_COMBINER_FUNC_KEY, + translation_nas_net.ADD_COMBINER_FUNC_KEY + ] + dummy_activations = [translation_nas_net.NONE_ACTIVATION_KEY] * 4 + dummy_norms = [translation_nas_net.NO_NORM_KEY] * 4 + layer_registry = layers.ENCODER_LAYERS + is_decoder = False + final_combiner_function = translation_nas_net.CONCAT_COMBINER_FUNC_KEY + + return (num_cells, left_inputs, left_layers, left_output_dims, right_inputs, + right_layers, right_output_dims, combiner_functions, + final_combiner_function, dummy_activations, dummy_norms, + layer_registry, is_decoder) + + +def _get_transformer_branching_decoder_config(): + """Returns config for the Transformer decoder.""" + num_cells = 2 + left_inputs = [0, 1, 2, 3, 4] + left_layers = [ + layers.STANDARD_ATTENTION_REGISTRY_KEY, + layers.ATTEND_TO_ENCODER_REGISTRY_KEY, + layers.STANDARD_CONV_1X1_REGISTRY_KEY, + layers.STANDARD_CONV_1X1_REGISTRY_KEY, layers.IDENTITY_REGISTRY_KEY + ] + left_output_dims = [512, 512, 1024, 256, 512] + right_inputs = [0, 1, 2, 3, 2] + right_layers = [ + layers.IDENTITY_REGISTRY_KEY, layers.IDENTITY_REGISTRY_KEY, + layers.STANDARD_CONV_1X1_REGISTRY_KEY, + layers.STANDARD_CONV_1X1_REGISTRY_KEY, layers.IDENTITY_REGISTRY_KEY + ] + right_output_dims = [512, 512, 1024, 256, 512] + combiner_functions = [ + translation_nas_net.ADD_COMBINER_FUNC_KEY, + translation_nas_net.ADD_COMBINER_FUNC_KEY, + translation_nas_net.CONCAT_COMBINER_FUNC_KEY, + translation_nas_net.CONCAT_COMBINER_FUNC_KEY, + translation_nas_net.ADD_COMBINER_FUNC_KEY + ] + dummy_activations = [translation_nas_net.NONE_ACTIVATION_KEY] * 5 + dummy_norms = [translation_nas_net.NO_NORM_KEY] * 5 + layer_registry = layers.DECODER_LAYERS + is_decoder = True + final_combiner_function = translation_nas_net.CONCAT_COMBINER_FUNC_KEY + + return (num_cells, left_inputs, left_layers, left_output_dims, right_inputs, + right_layers, right_output_dims, combiner_functions, + final_combiner_function, dummy_activations, dummy_norms, + layer_registry, is_decoder) + + +def _add_transformer_branching_hparams(hparams): + (encoder_num_cells, encoder_left_inputs, encoder_left_layers, + encoder_left_output_dims, encoder_right_inputs, encoder_right_layers, + encoder_right_output_dims, encoder_combiner_functions, + encoder_final_combiner_function, encoder_dummy_activations, + encoder_dummy_norms, _, _) = _get_transformer_branching_encoder_config() + + # Transformer encoder. + hparams.add_hparam("encoder_left_inputs", encoder_left_inputs) + hparams.add_hparam("encoder_left_layers", encoder_left_layers) + hparams.add_hparam("encoder_left_activations", encoder_dummy_activations) + hparams.add_hparam("encoder_left_output_dims", encoder_left_output_dims) + hparams.add_hparam("encoder_left_norms", encoder_dummy_norms) + hparams.add_hparam("encoder_right_inputs", encoder_right_inputs) + hparams.add_hparam("encoder_right_layers", encoder_right_layers) + hparams.add_hparam("encoder_right_activations", encoder_dummy_activations) + hparams.add_hparam("encoder_right_output_dims", encoder_right_output_dims) + hparams.add_hparam("encoder_right_norms", encoder_dummy_norms) + hparams.add_hparam("encoder_combiner_functions", encoder_combiner_functions) + hparams.add_hparam("encoder_num_cells", encoder_num_cells) + hparams.add_hparam("encoder_final_combiner_function", + encoder_final_combiner_function) + + (decoder_num_cells, decoder_left_inputs, decoder_left_layers, + decoder_left_output_dims, decoder_right_inputs, decoder_right_layers, + decoder_right_output_dims, decoder_combiner_functions, + decoder_final_combiner_function, decoder_dummy_activations, + decoder_dummy_norms, _, _) = _get_transformer_branching_decoder_config() + + # Transformer decoder. + hparams.add_hparam("decoder_left_inputs", decoder_left_inputs) + hparams.add_hparam("decoder_left_layers", decoder_left_layers) + hparams.add_hparam("decoder_left_activations", decoder_dummy_activations) + hparams.add_hparam("decoder_left_output_dims", decoder_left_output_dims) + hparams.add_hparam("decoder_left_norms", decoder_dummy_norms) + hparams.add_hparam("decoder_right_inputs", decoder_right_inputs) + hparams.add_hparam("decoder_right_layers", decoder_right_layers) + hparams.add_hparam("decoder_right_activations", decoder_dummy_activations) + hparams.add_hparam("decoder_right_output_dims", decoder_right_output_dims) + hparams.add_hparam("decoder_right_norms", decoder_dummy_norms) + hparams.add_hparam("decoder_combiner_functions", decoder_combiner_functions) + hparams.add_hparam("decoder_num_cells", decoder_num_cells) + hparams.add_hparam("decoder_final_combiner_function", + decoder_final_combiner_function) + + +class NasSeq2SeqTest(parameterized.TestCase, tf.test.TestCase): + + def _test_model(self, model_cls, hparams): + """Test a Translation Nas Net model.""" + tf.reset_default_graph() + + hparams.filter_size = 32 + hparams.num_heads = 1 + hparams.layer_prepostprocess_dropout = 0.0 + hparams.hidden_size = _HIDDEN_SIZE + + p_hparams = problem_hparams.test_problem_hparams(_VOCAB_SIZE, _VOCAB_SIZE, + hparams) + hparams.problems = [p_hparams] + + inputs = -1 + np.random.random_integers( + _VOCAB_SIZE, size=(_BATCH_SIZE, _INPUT_LENGTH, 1, 1)) + targets = -1 + np.random.random_integers( + _VOCAB_SIZE, size=(_BATCH_SIZE, _TARGET_LENGTH, 1, 1)) + features = { + "inputs": tf.constant(inputs, dtype=tf.int32, name="inputs"), + "targets": tf.constant(targets, dtype=tf.int32, name="targets"), + "target_space_id": tf.constant(1, dtype=tf.int32) + } + + model = model_cls(hparams, tf_estimator.ModeKeys.TRAIN, p_hparams) + logits, _ = model(features) + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + res = session.run(logits) + self.assertEqual(res.shape, + (_BATCH_SIZE, _TARGET_LENGTH, 1, 1, _VOCAB_SIZE)) + + def _get_encoder_hparams(self): + hparams = transformer.transformer_small() + hparams.add_hparam("encoder_layer_list", + layers.ENCODER_LAYERS.get_layer_names()) + hparams.add_hparam("encoder_output_dim_list", [32] + [64] * + (len(hparams.encoder_layer_list) - 2) + [32]) + hparams.add_hparam("encoder_activation_list", ["none"] + ["relu"] * + (len(hparams.encoder_layer_list) - 1)) + hparams.add_hparam("encoder_norm_list", ["none"] + ["layer_norm"] * + (len(hparams.encoder_layer_list) - 1)) + return hparams + + def test_nas_seq2seq(self): + hparams = self._get_encoder_hparams() + _add_transformer_branching_hparams(hparams) + self._test_model(translation_nas_net.NasSeq2Seq, hparams) + + def _get_wrong_output_dim_decoder_hparams(self): + tf.reset_default_graph() + + hparams = transformer.transformer_base() + _add_transformer_branching_hparams(hparams) + hparams.num_heads = 1 + # Purposely scale up the final embedding depth. + wrong_output_size = _EMBEDDING_DEPTH + 1 + hparams.decoder_left_output_dims[ + -2] = hparams.decoder_left_output_dims[-2] + 1 + hparams.decoder_left_output_dims[-1] = wrong_output_size + + return hparams, wrong_output_size + + def test_nas_decoder_resizing_output(self): + hparams, wrong_size = self._get_wrong_output_dim_decoder_hparams() + hparams.enforce_output_size = False + input_tensor = tf.zeros([_BATCH_SIZE, _INPUT_LENGTH, _EMBEDDING_DEPTH]) + decoder_self_attention_bias = ( + common_attention.attention_bias_lower_triangle(_INPUT_LENGTH)) + with tf.variable_scope("wrong"): + wrong_size_decoder_output = translation_nas_net.nas_decoder( + decoder_input=input_tensor, + encoder_cell_outputs=[input_tensor] * hparams.encoder_num_cells, + decoder_self_attention_bias=decoder_self_attention_bias, + encoder_decoder_attention_bias=None, + hparams=hparams) + + # Now add the correction. + hparams.enforce_output_size = True + with tf.variable_scope("correct"): + correct_size_decoder_output = translation_nas_net.nas_decoder( + decoder_input=input_tensor, + encoder_cell_outputs=[input_tensor] * hparams.encoder_num_cells, + decoder_self_attention_bias=decoder_self_attention_bias, + encoder_decoder_attention_bias=None, + hparams=hparams) + + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + wrong_output, correct_output = session.run( + [wrong_size_decoder_output, correct_size_decoder_output]) + self.assertEqual(wrong_output.shape, + (_BATCH_SIZE, _INPUT_LENGTH, wrong_size)) + self.assertEqual(correct_output.shape, + (_BATCH_SIZE, _INPUT_LENGTH, _EMBEDDING_DEPTH)) + + @parameterized.parameters([(_get_transformer_branching_encoder_config, + [512, 512, 2048, 512, 512]), + (_get_transformer_branching_decoder_config, + [512, 512, 512, 2048, 512, 512])]) + def test_calculate_branching_model_parameters_transformer( + self, get_config, expected_hidden_depths): + tf.reset_default_graph() + + (num_cells, left_inputs, left_layers, left_output_dims, right_inputs, + right_layers, right_output_dims, combiner_functions, + final_combiner_function, dummy_activations, dummy_norms, layer_registry, + is_decoder) = get_config() + + # Get predicted number of parameters. + (predicted_num_params, output_size, hidden_depths, + _) = translation_nas_net.calculate_branching_model_parameters( + encoding_depth=_EMBEDDING_DEPTH, + left_inputs=left_inputs, + left_layers=left_layers, + left_output_dims=left_output_dims, + right_inputs=right_inputs, + right_layers=right_layers, + right_output_dims=right_output_dims, + combiner_functions=combiner_functions, + final_combiner_function=final_combiner_function, + layer_registry=layer_registry, + num_cells=num_cells, + encoder_depth=_EMBEDDING_DEPTH) + + # Create model graph. + input_tensor = tf.zeros([32, _INPUT_LENGTH, _EMBEDDING_DEPTH]) + hparams = transformer.transformer_small() + + if is_decoder: + nonpadding = None + mask_future = True + decoder_self_attention_bias = ( + common_attention.attention_bias_lower_triangle(_INPUT_LENGTH)) + encoder_cell_outputs = [input_tensor] * 6 + else: + nonpadding = tf.ones([32, _INPUT_LENGTH]) + mask_future = False + decoder_self_attention_bias = None + encoder_cell_outputs = None + + translation_nas_net.apply_nas_layers( + input_tensor=input_tensor, + left_inputs=left_inputs, + left_layers=left_layers, + left_activations=dummy_activations, + left_output_dims=left_output_dims, + left_norms=dummy_norms, + right_inputs=right_inputs, + right_layers=right_layers, + right_activations=dummy_activations, + right_output_dims=right_output_dims, + right_norms=dummy_norms, + combiner_functions=combiner_functions, + final_combiner_function=final_combiner_function, + num_cells=num_cells, + nonpadding=nonpadding, + layer_registry=layer_registry, + mask_future=mask_future, + hparams=hparams, + var_scope="test", + encoder_decoder_attention_bias=None, + encoder_cell_outputs=encoder_cell_outputs, + decoder_self_attention_bias=decoder_self_attention_bias, + final_layer_norm=False) + + # Count graph variables. + trainable_variables_list = tf.trainable_variables() + empirical_num_params = 0 + for variable_tensor in trainable_variables_list: + empirical_num_params += _list_product(variable_tensor.shape.as_list()) + + # Compare. + self.assertEqual(empirical_num_params, predicted_num_params) + self.assertEqual(output_size, _EMBEDDING_DEPTH) + self.assertEqual(hidden_depths, expected_hidden_depths) + + @parameterized.parameters([True, False]) + def test_calculate_branching_model_parameters_decoder_resize( + self, enforce_output_size): + tf.reset_default_graph() + + hparams, _ = self._get_wrong_output_dim_decoder_hparams() + hparams.enforce_output_size = enforce_output_size + hparams.decoder_left_norms = [translation_nas_net.NO_NORM_KEY] * 5 + hparams.decoder_right_norms = [translation_nas_net.NO_NORM_KEY] * 5 + + # Get predicted number of parameters. + (predicted_num_params, _, _, + _) = translation_nas_net.calculate_branching_model_parameters( + encoding_depth=_EMBEDDING_DEPTH, + left_inputs=hparams.decoder_left_inputs, + left_layers=hparams.decoder_left_layers, + left_output_dims=hparams.decoder_left_output_dims, + right_inputs=hparams.decoder_right_inputs, + right_layers=hparams.decoder_right_layers, + right_output_dims=hparams.decoder_right_output_dims, + combiner_functions=hparams.decoder_combiner_functions, + final_combiner_function=hparams.decoder_final_combiner_function, + layer_registry=layers.DECODER_LAYERS, + num_cells=hparams.decoder_num_cells, + encoder_depth=_EMBEDDING_DEPTH, + enforce_output_size=enforce_output_size) + + # Count graph variables. + input_tensor = tf.zeros([_BATCH_SIZE, _INPUT_LENGTH, _EMBEDDING_DEPTH]) + decoder_self_attention_bias = ( + common_attention.attention_bias_lower_triangle(_INPUT_LENGTH)) + _ = translation_nas_net.nas_decoder( + decoder_input=input_tensor, + encoder_cell_outputs=[input_tensor] * hparams.encoder_num_cells, + decoder_self_attention_bias=decoder_self_attention_bias, + encoder_decoder_attention_bias=None, + hparams=hparams, + final_layer_norm=False) + trainable_variables_list = tf.trainable_variables() + empirical_num_params = 0 + for variable_tensor in trainable_variables_list: + empirical_num_params += _list_product(variable_tensor.shape.as_list()) + + self.assertEqual(empirical_num_params, predicted_num_params) + + def test_calculate_branching_model_parameters_output_size_only_final(self): + left_inputs = [0, 1, 2, 3] + right_inputs = [0, 1, 2, 3] + left_output_dims = [1, 10, 100, 1000] + right_output_dims = [10000, 100000, 1000000, 10000000] + right_layers = [ + layers.IDENTITY_REGISTRY_KEY, layers.STANDARD_CONV_1X1_REGISTRY_KEY, + layers.STANDARD_CONV_1X1_REGISTRY_KEY, layers.IDENTITY_REGISTRY_KEY + ] + combiner_functions = [ + translation_nas_net.ADD_COMBINER_FUNC_KEY, + translation_nas_net.ADD_COMBINER_FUNC_KEY, + translation_nas_net.MULTIPLY_COMBINER_FUNC_KEY, + translation_nas_net.CONCAT_COMBINER_FUNC_KEY + ] + + (num_cells, _, left_layers, _, _, _, _, _, final_combiner_function, + dummy_activations, dummy_norms, layer_registry, + _) = _get_transformer_branching_encoder_config() + + # Get predicted number of parameters. + (_, output_size, _, + _) = translation_nas_net.calculate_branching_model_parameters( + encoding_depth=_EMBEDDING_DEPTH, + left_inputs=left_inputs, + left_layers=left_layers, + left_output_dims=left_output_dims, + right_inputs=right_inputs, + right_layers=right_layers, + right_output_dims=right_output_dims, + combiner_functions=combiner_functions, + final_combiner_function=final_combiner_function, + layer_registry=layer_registry, + num_cells=num_cells, + encoder_depth=_EMBEDDING_DEPTH, + enforce_output_size=False, + enforce_fixed_output_sizes=False) + + self.assertEqual(output_size, 10001000) + + def test_calculate_branching_model_parameters_output_size_last_two(self): + left_inputs = [0, 1, 2, 2] + right_inputs = [0, 1, 2, 2] + left_output_dims = [1, 10, 100, 1000] + right_output_dims = [10000, 100000, 1000000, 10000000] + right_layers = [ + layers.IDENTITY_REGISTRY_KEY, layers.STANDARD_CONV_1X1_REGISTRY_KEY, + layers.STANDARD_CONV_1X1_REGISTRY_KEY, layers.IDENTITY_REGISTRY_KEY + ] + combiner_functions = [ + translation_nas_net.ADD_COMBINER_FUNC_KEY, + translation_nas_net.ADD_COMBINER_FUNC_KEY, + translation_nas_net.MULTIPLY_COMBINER_FUNC_KEY, + translation_nas_net.CONCAT_COMBINER_FUNC_KEY + ] + + (num_cells, _, left_layers, _, _, _, _, _, final_combiner_function, + dummy_activations, dummy_norms, layer_registry, + _) = _get_transformer_branching_encoder_config() + + # Get predicted number of parameters. + (_, output_size, _, + _) = translation_nas_net.calculate_branching_model_parameters( + encoding_depth=_EMBEDDING_DEPTH, + left_inputs=left_inputs, + left_layers=left_layers, + left_output_dims=left_output_dims, + right_inputs=right_inputs, + right_layers=right_layers, + right_output_dims=right_output_dims, + combiner_functions=combiner_functions, + final_combiner_function=final_combiner_function, + layer_registry=layer_registry, + num_cells=num_cells, + encoder_depth=_EMBEDDING_DEPTH, + enforce_output_size=False, + enforce_fixed_output_sizes=False) + + self.assertEqual(output_size, 11001000) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/neural_assistant.py b/tensor2tensor/models/neural_assistant.py new file mode 100644 index 000000000..53f87eb1d --- /dev/null +++ b/tensor2tensor/models/neural_assistant.py @@ -0,0 +1,564 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Neural Assistant.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import six +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.models import transformer +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +@registry.register_model +class NeuralAssistant(transformer.Transformer): + """Attention net. See file docstring.""" + + def __init__(self, *args, **kwargs): + super(NeuralAssistant, self).__init__(*args, **kwargs) + self.attention_weights = dict() # For visualizing attention heads. + + # Loss scheduling. + hparams = self._hparams + self.triple_num = hparams.train_triple_num + + def model_fn(self, features): + with tf.variable_scope(tf.get_variable_scope(), use_resource=True) as vs: + self._add_variable_scope("model_fn", vs) + transformed_features = self.bottom(features) + + if self.hparams.activation_dtype == "bfloat16": + for k, v in sorted(six.iteritems(transformed_features)): + if v.dtype == tf.float32: + transformed_features[k] = tf.cast(v, tf.bfloat16) + + with tf.variable_scope("body") as body_vs: + self._add_variable_scope("body", body_vs) + body_out = self.body(transformed_features) + output, losses = self._normalize_body_output(body_out) + + if "training" in losses: + tf.logging.info( + "Skipping T2TModel top and loss because training loss returned from body" + ) + logits = output + else: + tf.logging.warn("The loss will be computed in model_fn now.") + logits = self.top(output, features) + losses["training"] = 0.0 + cur_kb_loss = losses["kb_loss"] + cur_knowledge_training_loss = losses["transe_loss"] + cur_kb_loss_weight = self._hparams.kb_loss_weight + kb_train_weight = self._hparams.kb_train_weight + cur_lm_loss_weight = 1.0 - cur_kb_loss_weight + # Finalize loss + if (self._hparams.mode != tf_estimator.ModeKeys.PREDICT and + self._hparams.mode != "attack"): + lm_loss_num, lm_loss_denom = self.loss(logits, features) + total_loss = (kb_train_weight) * cur_knowledge_training_loss + ( + 1 - kb_train_weight) * ( + cur_kb_loss * cur_kb_loss_weight + + (lm_loss_num / lm_loss_denom) * cur_lm_loss_weight) + tf.summary.scalar("kb_loss", cur_kb_loss) + tf.summary.scalar("transe_loss", cur_knowledge_training_loss) + tf.summary.scalar("lm_loss", (lm_loss_num / lm_loss_denom)) + tf.summary.scalar("cur_kb_loss_weight", + tf.reshape(cur_kb_loss_weight, [])) + tf.logging.info("Loss computed " + str(total_loss)) + losses = {"training": total_loss} + + return logits, losses + + def encode_knowledge_bottom(self, features): + tf.logging.info("Encoding knowledge " + str(self.triple_num)) + # Make sure this is embeddings for triples + # [batch_size, triple_num*max_triple_length, 1, emb_dim] + fact_embedding = features["encoded_triples"] + # [batch_size, triple_num*max_triple_length, emb_dim] + fact_embedding = tf.squeeze(fact_embedding, 2) + + kb_shape = common_layers.shape_list(fact_embedding) + batch_size = kb_shape[0] + embed_dim = kb_shape[2] + # [batch_size*triple_num, max_triple_length, emb_dim] + re_fact_embedding = tf.reshape( + fact_embedding, [batch_size * self.triple_num, -1, embed_dim], + name="reshape_fact_embedding") + + # [batch_size, triple_num] + input_fact_lengths = features["triple_lens"] + # Stack the fact lengths. + # [batch_size*max_triple_num] + re_fact_lengths = tf.reshape( + input_fact_lengths, [batch_size * self.triple_num, 1], + name="reshape_fact_lengths") + + return re_fact_embedding, re_fact_lengths + + def compute_knowledge_selection_and_loss(self, features, encoder_output, + fact_embedding, fact_lengths, margin, + num_negative_samples): + """Compute knowledge selection and loss. + + Args: + features: features. + encoder_output: [batch_size, input_length, hidden_dim] + fact_embedding: [batch_size*triple_num, max_triple_length, + emb_dim] + fact_lengths: # [batch_size*triple_num] + margin: integer value for max margin in TransE loss, + num_negative_samples: shuffle and sample multiple negative examples for + the TransE loss + + Returns: + knowledge_weights: + knowledge_loss: + """ + hparams = self._hparams + encoder_output_shape = common_layers.shape_list(encoder_output) + encoder_hidden_dim = encoder_output_shape[-1] + inputs = features["inputs"] + # [batch_size, input_length, emb_dim] + inputs = tf.squeeze(inputs, 2) + # [batch_size, input_length] + context_padding = common_attention.embedding_to_padding(inputs) + # [batch_size] + context_lens = tf.to_float( + common_attention.padding_to_length(context_padding)) + # [batch_size, 1] + context_lens = tf.expand_dims(context_lens, -1) + # Compute context vector summary. + # [batch_size, hidden_dim] + context_vector_summary = compute_summary_embedding(encoder_output, + context_lens, hparams) + knowledge_encoder_output = compute_average_embedding( + fact_embedding, fact_lengths) + # [batch_size, triple_num, emb_dim] + knowledge_encoder_output = tf.reshape( + knowledge_encoder_output, [-1, self.triple_num, encoder_hidden_dim]) + original_knowledge_encoder_output = knowledge_encoder_output + if hparams.similarity_fuction == "dot_product": + triple_logits = tf.squeeze( + tf.matmul(knowledge_encoder_output, + tf.expand_dims(context_vector_summary, 2)), -1) + elif hparams.similarity_fuction == "bilinear": + # Tile the context vector summary. + # [batch_size, triple_num*hidden_dim] + tiled_context_vector = tf.tile(context_vector_summary, + [1, self.triple_num]) + # [batch_size, triple_num, hidden_dim] + context_vector = tf.reshape(tiled_context_vector, + [-1, self.triple_num, encoder_hidden_dim]) + # compute outer product + context_vector = tf.expand_dims(context_vector, -1) + knowledge_encoder_output = tf.expand_dims(knowledge_encoder_output, 2) + # [batch_size, triple_num, hidden_dim, hidden_dim] + outer_product = tf.matmul(context_vector, knowledge_encoder_output) + outer_product = tf.reshape( + outer_product, + [-1, self.triple_num, encoder_hidden_dim * encoder_hidden_dim]) + triple_logits = tf.squeeze( + tf.layers.dense(outer_product, 1, name="knolwedge_final_mlp"), -1) + + avg_triple_loss = 0.0 + triple_labels = features["triple_labels"] + + subject_mask = tf.reshape(features["subject_mask"], + [-1, self.triple_num, hparams.max_triple_length]) + subject_mask = tf.reshape(subject_mask, [-1, hparams.max_triple_length]) + + predicate_mask = tf.reshape( + features["predicate_mask"], + [-1, self.triple_num, hparams.max_triple_length]) + predicate_mask = tf.reshape(predicate_mask, [-1, hparams.max_triple_length]) + + object_mask = tf.reshape(features["object_mask"], + [-1, self.triple_num, hparams.max_triple_length]) + object_mask = tf.reshape(object_mask, [-1, hparams.max_triple_length]) + + # mask : [bs, max_seq_len, triple_num] + # the below operation will result in [bs*triple_num,emb_dim] + subject_length = tf.cast( + tf.expand_dims(tf.reduce_sum(subject_mask, -1), 1), + tf.float32) # [bs*tn] + object_length = tf.cast( + tf.expand_dims(tf.reduce_sum(object_mask, -1), 1), tf.float32) + predicate_length = tf.cast( + tf.expand_dims(tf.reduce_sum(predicate_mask, -1), 1), tf.float32) + + # expand dimension 2 to be able to broadcast + subject_mask = tf.cast(tf.expand_dims(subject_mask, 2), tf.float32) + predicate_mask = tf.cast(tf.expand_dims(predicate_mask, 2), tf.float32) + object_mask = tf.cast(tf.expand_dims(object_mask, 2), tf.float32) + + subject_vect = tf.reduce_sum(tf.multiply( + fact_embedding, subject_mask), 1) / ( + subject_length + + tf.broadcast_to(tf.constant([1e-5]), tf.shape(subject_length))) + object_vect = tf.reduce_sum(tf.multiply(fact_embedding, object_mask), 1) / ( + object_length + + tf.broadcast_to(tf.constant([1e-5]), tf.shape(object_length))) + predicate_vect = tf.reduce_sum( + tf.multiply(fact_embedding, predicate_mask), 1) / ( + predicate_length + + tf.broadcast_to(tf.constant([1e-5]), tf.shape(predicate_length))) + + # Shuffled rows to generate adversarial samples + shuffled_subject_vect = [] + shuffled_object_vect = [] + + for _ in range(num_negative_samples): + shuffled_subject_vect += [ + tf.gather(subject_vect, + tf.random.shuffle(tf.range(tf.shape(subject_vect)[0]))) + ] # [bs*tn,d] + shuffled_object_vect += [ + tf.gather(object_vect, + tf.random.shuffle(tf.range(tf.shape(object_vect)[0]))) + ] # [bs*tn,d] + + # KB pretraining loss + + positive_loss = tf.reduce_mean( + tf.squared_difference(subject_vect + predicate_vect, object_vect)) + negative_loss = 0 + for n_adv in range(num_negative_samples): + negative_loss += tf.reduce_mean( + tf.squared_difference(shuffled_subject_vect[n_adv] + predicate_vect, + object_vect)) + negative_loss += tf.reduce_mean( + tf.squared_difference(subject_vect + predicate_vect, + shuffled_object_vect[n_adv])) + + # TransE Loss + + negative_loss = negative_loss / (2 * num_negative_samples) + + transe_loss = tf.clip_by_value( + margin + positive_loss - negative_loss, + clip_value_min=0, + clip_value_max=100) + if hparams.mode != tf_estimator.ModeKeys.PREDICT: + triple_losses = tf.nn.weighted_cross_entropy_with_logits( + labels=triple_labels, + logits=triple_logits, + pos_weight=hparams.pos_weight) + avg_triple_loss = tf.reduce_mean(triple_losses) + tf.summary.scalar("triple_loss", avg_triple_loss) + + return triple_logits, avg_triple_loss, original_knowledge_encoder_output, transe_loss + + def body(self, features): + """Transformer main model_fn. + + Args: + features: Map of features to the model. Should contain the following: + "inputs": Transformer inputs [batch_size, input_length, hidden_dim] + "targets": Target decoder outputs. [batch_size, decoder_length, + hidden_dim] + "target_space_id": A scalar int from data_generators.problem.SpaceID. + + Returns: + Final decoder representation. [batch_size, decoder_length, hidden_dim] + """ + tf.logging.info("Using PgScratch BODY function.") + hparams = self._hparams + + losses = {} + inputs = features["inputs"] + target_space = features["target_space_id"] + # encoder_output: [batch_size, input_length, hidden_dim] + # encoder_decoder_attention_bias: [batch_size, input_length] + encoder_output, encoder_decoder_attention_bias = self.encode( + inputs, target_space, hparams, features=features, losses=losses) + + with tf.variable_scope("knowledge"): + with tf.name_scope("knowledge_encoding"): + # Encode knowledge. + # [batch_size, triple_num, emb_dim] + fact_embedding, fact_lengths = self.encode_knowledge_bottom(features) + tf.logging.info("Encoded knowledge") + + with tf.name_scope("knowledge_selection_and_loss"): + # Compute knowledge selection and loss. + triple_logits, avg_triple_selection_loss, knowledge_encoder_output, transe_loss = self.compute_knowledge_selection_and_loss( + features, encoder_output, fact_embedding, fact_lengths, + hparams.margin, hparams.num_negative_samples) + losses["kb_loss"] = avg_triple_selection_loss + losses["transe_loss"] = transe_loss + + if hparams.attend_kb: + tf.logging.info("ATTEND_KB is ACTIVE") + with tf.name_scope("knowledge_attention"): + + knowledge_padding = tf.zeros_like(triple_logits, dtype=tf.float32) + knowledge_attention_bias = common_attention.attention_bias_ignore_padding( + knowledge_padding) + encoder_output = tf.concat([knowledge_encoder_output, encoder_output], + 1) + encoder_decoder_attention_bias = tf.concat( + [knowledge_attention_bias, encoder_decoder_attention_bias], -1) + + else: + tf.logging.info("ATTEND_KB is INACTIVE") + + targets = features["targets"] + targets_shape = common_layers.shape_list(targets) + targets = common_layers.flatten4d3d(targets) + + (decoder_input, + decoder_self_attention_bias) = transformer.transformer_prepare_decoder( + targets, hparams, features=features) + + decode_kwargs = {} + decoder_output = self.decode( + decoder_input, + encoder_output, + encoder_decoder_attention_bias, + decoder_self_attention_bias, + hparams, + nonpadding=transformer.features_to_nonpadding(features, "targets"), + losses=losses, + **decode_kwargs) + + expected_attentions = features.get("expected_attentions") + if expected_attentions is not None: + attention_loss = common_attention.encoder_decoder_attention_loss( + expected_attentions, self.attention_weights, + hparams.expected_attention_loss_type, + hparams.expected_attention_loss_multiplier) + return decoder_output, {"attention_loss": attention_loss} + + ret = tf.reshape(decoder_output, targets_shape) + if losses: + return ret, losses + else: + return ret + + def _normalize_body_output(self, body_out): + if len(body_out) == 2: + output, losses = body_out + if not isinstance(losses, dict): + losses = {"extra": tf.reduce_mean(losses)} + else: + output = body_out + losses = {"extra": 0.0} + + return output, losses + + def _beam_decode(self, + features, + decode_length, + beam_size, + top_beams, + alpha, + use_tpu=False): + """Beam search decoding. + + Args: + features: an map of string to `Tensor` + decode_length: an integer. How many additional timesteps to decode. + beam_size: number of beams. + top_beams: an integer. How many of the beams to return. + alpha: Float that controls the length penalty. larger the alpha, stronger + the preference for longer translations. + use_tpu: A bool, whether to do beam decode on TPU. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } + """ + return super(transformer.Transformer, + self)._beam_decode_slow(features, decode_length, beam_size, + top_beams, alpha, use_tpu) + + def _greedy_infer(self, features, decode_length, use_tpu=False): + """Fast version of greedy decoding. + + Args: + features: an map of string to `Tensor` + decode_length: an integer. How many additional timesteps to decode. + use_tpu: A bool. Whether to build the inference graph for TPU. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } + + Raises: + NotImplementedError: If there are multiple data shards. + """ + return super(transformer.Transformer, + self)._greedy_infer(features, decode_length) + + +def compute_last_embedding(input_embeddings, input_lengths, hparams): + """Computes average of last K embedding. + + Args: + input_embeddings: [bs, max_seq_len, emb_dim] + input_lengths: [bs, 1] + hparams: model hparams + + Returns: + last_k_embedding: [bs, emb_dim] + """ + max_seq_len = tf.shape(input_embeddings)[1] + # [bs, 1, max_seq_len] + mask = tf.sequence_mask(input_lengths, max_seq_len, dtype=tf.float32) + del_mask = tf.sequence_mask( + input_lengths - hparams.last_k, max_seq_len, dtype=tf.float32) + final_mask = mask - del_mask + # [bs, 1, emb_dim] + sum_embedding = tf.matmul(final_mask, input_embeddings) + # [bs, 1, emb_dim] + last_k_embedding = sum_embedding / tf.to_float( + tf.expand_dims( + tf.ones([tf.shape(input_embeddings)[0], 1]) * hparams.last_k, 2)) + # [bs, dim] + return tf.squeeze(last_k_embedding, 1) + + +def compute_max_pool_embedding(input_embeddings, input_lengths): + """Computes max pool embedding. + + Args: + input_embeddings: [bs, max_seq_len, emb_dim] + input_lengths: [bs, 1] + + Returns: + max_pool_embedding: [bs, emb_dim] + """ + max_seq_len = tf.shape(input_embeddings)[1] + # [bs, max_seq_len] + mask = 1.0 - tf.sequence_mask(input_lengths, max_seq_len, dtype=tf.float32) + mask = tf.squeeze(mask * (-1e-6), 1) + mask = tf.expand_dims(mask, 2) + # [bs, emb_dim] + max_pool_embedding = tf.reduce_max(input_embeddings + mask, 1) + # [bs, dim] + return max_pool_embedding + + +def compute_average_embedding(input_embeddings, input_lengths): + """Computes bag-of-words embedding. + + Args: + input_embeddings: [bs, max_seq_len, emb_dim] + input_lengths: [bs, 1] + + Returns: + bow_embedding: [bs, emb_dim] + """ + max_seq_len = tf.shape(input_embeddings)[1] + # [bs, 1, max_seq_len] + mask = tf.sequence_mask(input_lengths, max_seq_len, dtype=tf.float32) + # [bs, 1, emb_dim] + sum_embedding = tf.matmul(mask, input_embeddings) + # [bs, 1, emb_dim] + avg_embedding = sum_embedding / tf.to_float(tf.expand_dims(input_lengths, 2)) + # [bs, dim] + return tf.squeeze(avg_embedding, 1) + + +def compute_summary_embedding(input_embeddings, input_lengths, hparams): + """Convert list of embedding to single embedding. + + Args: + input_embeddings: [bs, max_seq_len, emb_dim] + input_lengths: [bs, 1] + hparams: model hparams + + Returns: + embedding: [bs, emb_dim] + """ + if hparams.pool_technique == "average": + return compute_average_embedding(input_embeddings, input_lengths) + elif hparams.pool_technique == "max_pool": + return compute_max_pool_embedding(input_embeddings, input_lengths) + elif hparams.pool_technique == "last": + return compute_last_embedding(input_embeddings, input_lengths, hparams) + + +@registry.register_hparams +def neural_assistant_base(): + """HParams for a base neural_assistant model.""" + hparams = transformer.transformer_tpu() + hparams.add_hparam("pos_weight", 1.0) # weight for positive triples + hparams.add_hparam("similarity_fuction", + "bilinear") # dot_product or bilinear + hparams.add_hparam("pool_technique", "average") # avg or max pool or last + hparams.add_hparam("last_k", 1) # number of last indices for averaging + hparams.add_hparam("max_triple_length", 30) # max length of every triple + hparams.add_hparam("train_triple_num", + 5000) # max number of triples during training + hparams.add_hparam("attend_kb", True) # if False, it's a transformer model + hparams.add_hparam("kb_loss_weight", 0.0) # weight for distant supervision + hparams.add_hparam("test_triple_num", + 28483) # max triples of KB + hparams.add_hparam("margin", 0.0) # KB training max-margin loss + hparams.add_hparam( + "num_negative_samples", + 1) # Sampling number of different adversarial training examples + hparams.add_hparam("kb_train_weight", 0.0) + # KB_training loss weight which combines Language model and KB selection loss + return hparams + + +@registry.register_hparams +def neural_assistant_tiny(): + """HParams for tiny neural_assistant model.""" + hparams = transformer.transformer_tiny_tpu() + hparams.add_hparam("pos_weight", 1.0) # weight for positive triples + hparams.add_hparam("similarity_fuction", + "bilinear") # dot_product or bilinear + hparams.add_hparam("pool_technique", "average") # avg or max pool or last + hparams.add_hparam("last_k", 1) # number of last indices for averaging + hparams.add_hparam("max_triple_length", 30) # max length of every triple + hparams.add_hparam("train_triple_num", + 5000) # max number of triples during training + hparams.add_hparam("attend_kb", True) # if False, it's a transformer model + hparams.add_hparam("kb_loss_weight", 0.0) # weight for distant supervision + hparams.add_hparam("test_triple_num", + 28483) # max triples of KB + hparams.add_hparam("margin", 1.0) # KB training max-margin loss + hparams.add_hparam( + "num_negative_samples", + 1) # Sampling number of different adversarial training examples + hparams.add_hparam("kb_train_weight", 0.0) + # KB_training loss weight which combines Language model and KB selection loss + return hparams + + +@registry.register_hparams +def neural_assistant_tiny_ds(): + """HParams for tiny neural_assistant model with distant supervision loss.""" + hparams = neural_assistant_tiny() + hparams.kb_loss_weight = 0.2 + return hparams diff --git a/tensor2tensor/models/neural_gpu.py b/tensor2tensor/models/neural_gpu.py index 39aa735e1..953855172 100644 --- a/tensor2tensor/models/neural_gpu.py +++ b/tensor2tensor/models/neural_gpu.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,26 +18,23 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +from six.moves import range # pylint: disable=redefined-builtin -# Dependency imports - -from six.moves import xrange # pylint: disable=redefined-builtin - -from tensor2tensor.models import common_hparams -from tensor2tensor.models import common_layers +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers from tensor2tensor.utils import registry from tensor2tensor.utils import t2t_model -import tensorflow as tf +import tensorflow.compat.v1 as tf -def neural_gpu(inputs, hparams, train, name=None): +def neural_gpu_body(inputs, hparams, name=None): """The core Neural GPU.""" with tf.variable_scope(name, "neural_gpu"): def step(state, inp): # pylint: disable=missing-docstring - x = tf.nn.dropout(state, 1.0 - hparams.dropout * tf.to_float(train)) - for layer in xrange(hparams.num_hidden_layers): + x = tf.nn.dropout(state, 1.0 - hparams.dropout) + for layer in range(hparams.num_hidden_layers): x = common_layers.conv_gru( x, (hparams.kernel_height, hparams.kernel_width), hparams.hidden_size, @@ -57,11 +55,11 @@ def step(state, inp): # pylint: disable=missing-docstring @registry.register_model class NeuralGPU(t2t_model.T2TModel): - def model_fn_body(self, features, train): - return neural_gpu(features["inputs"], self._hparams, train) + def body(self, features): + return neural_gpu_body(features["inputs"], self._hparams) -def diagonal_neural_gpu(inputs, hparams, train, name=None): +def diagonal_neural_gpu(inputs, hparams, name=None): """Improved Neural GPU as in https://arxiv.org/abs/1702.08727.""" with tf.variable_scope(name, "diagonal_neural_gpu"): @@ -69,11 +67,10 @@ def step(state_tup, inp): """Single step of the improved Neural GPU.""" state, _ = state_tup x = state - for layer in xrange(hparams.num_hidden_layers): + for layer in range(hparams.num_hidden_layers): x, new_loss = common_layers.diagonal_conv_gru( x, (hparams.kernel_height, hparams.kernel_width), hparams.hidden_size, - train, dropout=hparams.dropout, name="dcgru_%d" % layer) # Padding input is zeroed-out in the modality, we check this by summing. @@ -93,14 +90,15 @@ def step(state_tup, inp): @registry.register_model class DiagonalNeuralGPU(t2t_model.T2TModel): - def model_fn_body(self, features, train): - return diagonal_neural_gpu(features["inputs"], self._hparams, train) + def body(self, features): + return diagonal_neural_gpu(features["inputs"], self._hparams) -@registry.register_hparams("neural_gpu1") -def neural_gpu_params1(): +@registry.register_hparams +def neural_gpu(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() + hparams.daisy_chain_variables = False hparams.batch_size = 1024 hparams.num_hidden_layers = 1 hparams.hidden_size = 256 @@ -110,7 +108,7 @@ def neural_gpu_params1(): hparams.num_hidden_layers = 1 hparams.kernel_height = 3 hparams.kernel_width = 1 - hparams.learning_rate_decay_scheme = "exp50k" + hparams.learning_rate_decay_scheme = "exp" hparams.learning_rate = 0.02 hparams.learning_rate_warmup_steps = 3000 hparams.initializer_gain = 1.0 diff --git a/tensor2tensor/models/neural_gpu_test.py b/tensor2tensor/models/neural_gpu_test.py index 0d4937a5d..57a4a1f36 100644 --- a/tensor2tensor/models/neural_gpu_test.py +++ b/tensor2tensor/models/neural_gpu_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,16 +18,14 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function - -# Dependency imports - import numpy as np from tensor2tensor.data_generators import problem_hparams -from tensor2tensor.models import common_hparams +from tensor2tensor.layers import common_hparams from tensor2tensor.models import neural_gpu -import tensorflow as tf +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator class NeuralGPUTest(tf.test.TestCase): @@ -38,20 +37,21 @@ def testNeuralGPU(self): target_length = input_length input_vocab_size = 9 target_vocab_size = 11 - p_hparams = problem_hparams.test_problem_hparams(hparams, input_vocab_size, - target_vocab_size) - inputs = -1 + np.random.random_integers( + p_hparams = problem_hparams.test_problem_hparams(input_vocab_size, + target_vocab_size, + hparams) + inputs = np.random.randint( input_vocab_size, size=(batch_size, input_length, 1, 1)) - targets = -1 + np.random.random_integers( + targets = np.random.randint( target_vocab_size, size=(batch_size, target_length, 1, 1)) with self.test_session() as session: features = { "inputs": tf.constant(inputs, dtype=tf.int32), "targets": tf.constant(targets, dtype=tf.int32) } - model = neural_gpu.NeuralGPU(hparams, p_hparams) - shadred_logits, _, _ = model.model_fn(features, True) - logits = tf.concat(shadred_logits, 0) + model = neural_gpu.NeuralGPU(hparams, tf_estimator.ModeKeys.TRAIN, + p_hparams) + logits, _ = model(features) session.run(tf.global_variables_initializer()) res = session.run(logits) self.assertEqual(res.shape, (batch_size, target_length, 1, 1, diff --git a/tensor2tensor/models/research/__init__.py b/tensor2tensor/models/research/__init__.py new file mode 100644 index 000000000..ff174dd63 --- /dev/null +++ b/tensor2tensor/models/research/__init__.py @@ -0,0 +1,15 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tensor2tensor/models/research/adafactor_experiments.py b/tensor2tensor/models/research/adafactor_experiments.py new file mode 100644 index 000000000..60daee121 --- /dev/null +++ b/tensor2tensor/models/research/adafactor_experiments.py @@ -0,0 +1,229 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Experiments with Adafactor. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.models import transformer +from tensor2tensor.utils import registry + + +def mimic_adam_with_adafactor(hparams): + """Switch from Adam to Adafactor, approximating the behavior of Adam. + + Some minor things may be different, like epsilon and beta1 correction. + + Args: + hparams: model hyperparameters where "adam" in hparams.optimizer + """ + assert "adam" in hparams.optimizer + hparams.optimizer = "adafactor" + hparams.optimizer_adafactor_beta1 = hparams.optimizer_adam_beta1 + hparams.optimizer_adafactor_beta2 = hparams.optimizer_adam_beta2 + hparams.optimizer_adafactor_multiply_by_parameter_scale = False + hparams.optimizer_adafactor_factored = False + hparams.optimizer_adafactor_clipping_threshold = None + hparams.optimizer_adafactor_decay_type = "adam" + + +@registry.register_hparams +def afx_adam(): + """Old version - Adam.""" + hparams = transformer.transformer_base_v2() + hparams.optimizer_adam_beta1 = 0.9 + hparams.optimizer_adam_beta2 = 0.999 + hparams.symbol_modality_num_shards = 1 + hparams.batch_size = 2048 + hparams.optimizer = "adam" + hparams.learning_rate_schedule = ( + "constant*rsqrt_decay*linear_warmup*rsqrt_hidden_size") + hparams.learning_rate_constant = 2.0 + return hparams + + +@registry.register_hparams +def afx_mimic_adam(): + """Emulating Adam - should be very similar to afx_adam.""" + hparams = afx_adam() + mimic_adam_with_adafactor(hparams) + return hparams + + +@registry.register_hparams +def afx_base(): + """Baseline - no momentum, beta=0.999.""" + hparams = afx_mimic_adam() + hparams.optimizer_adafactor_beta1 = 0.0 + return hparams + + +@registry.register_hparams +def afx_factored(): + hparams = afx_base() + hparams.optimizer_adafactor_factored = True + return hparams + + +@registry.register_hparams +def afx_fast(): + hparams = afx_base() + hparams.optimizer_adafactor_beta2 = 0.9 + return hparams + + +@registry.register_hparams +def afx_clip(): + hparams = afx_base() + hparams.optimizer_adafactor_clipping_threshold = 1.0 + return hparams + + +@registry.register_hparams +def afx_clip2(): + hparams = afx_base() + hparams.optimizer_adafactor_clipping_threshold = 2.0 + return hparams + + +@registry.register_hparams +def afx_clip_factored(): + hparams = afx_clip() + hparams.optimizer_adafactor_factored = True + return hparams + + +@registry.register_hparams +def afx_pow05(): + hparams = afx_base() + hparams.optimizer_adafactor_decay_type = "pow" + hparams.optimizer_adafactor_memory_exponent = 0.5 + return hparams + + +@registry.register_hparams +def afx_pow08(): + hparams = afx_pow05() + hparams.optimizer_adafactor_memory_exponent = 0.8 + return hparams + + +@registry.register_hparams +def afx_pow10(): + hparams = afx_pow05() + hparams.optimizer_adafactor_memory_exponent = 1.0 + return hparams + + +@registry.register_hparams +def afx_pow08_clip(): + hparams = afx_pow08() + hparams.optimizer_adafactor_clipping_threshold = 1.0 + return hparams + + +@registry.register_hparams +def afx_relative(): + hparams = afx_base() + hparams.optimizer_adafactor_multiply_by_parameter_scale = True + hparams.learning_rate_schedule = "rsqrt_decay" + hparams.learning_rate_warmup_steps = 10000 + return hparams + + +@registry.register_hparams +def afx_unscale(): + hparams = afx_base() + hparams.shared_embedding_and_softmax_weights = False + hparams.multiply_embedding_mode = "none" + return hparams + + +@registry.register_hparams +def afx_unscale_relative(): + hparams = afx_unscale() + hparams.optimizer_adafactor_multiply_by_parameter_scale = True + hparams.learning_rate_schedule = "rsqrt_decay" + hparams.learning_rate_warmup_steps = 10000 + return hparams + + +@registry.register_hparams +def afx_adafactor(): + """Adafactor with recommended learning rate schedule.""" + hparams = afx_adam() + hparams.optimizer = "Adafactor" + hparams.learning_rate_schedule = "rsqrt_decay" + hparams.learning_rate_warmup_steps = 10000 + return hparams + + +@registry.register_hparams +def afx_small(): + """Small transformer model with small batch size for fast step times.""" + hparams = transformer.transformer_tpu() + hparams.filter_size = 1024 + hparams.num_heads = 4 + hparams.num_hidden_layers = 3 + hparams.batch_size = 512 + return hparams + + +@registry.register_hparams +def afx_small_p16(): + """Small transformer model with small batch size for fast step times.""" + hparams = afx_small() + hparams.add_hparam("simulated_quantize_bits", 16) + return hparams + + +@registry.register_hparams +def afx_small_p12(): + hparams = afx_small() + hparams.add_hparam("simulated_parameter_quantize_bits", 12) + return hparams + + +@registry.register_hparams +def afx_small_p11(): + hparams = afx_small() + hparams.add_hparam("simulated_parameter_quantize_bits", 11) + return hparams + + +@registry.register_hparams +def afx_small_p10(): + hparams = afx_small() + hparams.add_hparam("simulated_parameter_quantize_bits", 10) + return hparams + + +@registry.register_hparams +def afx_small_p8(): + hparams = afx_small() + hparams.add_hparam("simulated_parameter_quantize_bits", 8) + return hparams + + +@registry.register_hparams +def afx_small_bfloat16(): + """Small transformer model with small batch size for fast step times.""" + hparams = afx_small() + hparams.weight_dtype = "bfloat16" + hparams.activation_dtype = "bfloat16" + return hparams diff --git a/tensor2tensor/models/research/aligned.py b/tensor2tensor/models/research/aligned.py new file mode 100644 index 000000000..41dda38ac --- /dev/null +++ b/tensor2tensor/models/research/aligned.py @@ -0,0 +1,546 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Single stack of transformations with no masking. + +Produces output aligned with inputs. + +Configurable using hyperparameters to use some combination of convolutions, +attention, mixtures of experts, etc. + +A good problem for this model is languagemodel_wiki_scramble1k50 . +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import expert_utils +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +ModeKeys = tf_estimator.ModeKeys # pylint: disable=invalid-name + + +def _should_preprocess(layer_type): + return layer_type not in ["timing", "pos_emb", "att_memory_efficient"] + + +def _should_postprocess(layer_type): + return layer_type not in ["timing", "pos_emb"] + + +@registry.register_model +class Aligned(t2t_model.T2TModel): + """Attention net. See file docstring.""" + + @staticmethod + def use_body_sharded(): + return True + + def body_sharded(self, sharded_features): + # Remove dropout if not training + hparams = self._hparams + dp = self._data_parallelism + x = dp(tf.squeeze, sharded_features["inputs"], 2) + + def preprocess(x): + return dp(common_layers.layer_preprocess, x, hparams) + + def postprocess(x, y): + return dp(common_layers.layer_postprocess, x, y, hparams) + + x = dp(tf.nn.dropout, x, 1.0 - hparams.layer_prepostprocess_dropout) + extra_loss = 0.0 + ffn_hidden_sizes = [int(s) for s in hparams.ffn_hidden_sizes.split(",")] + if hparams.mask_right: + + def _bias(x): + return common_attention.attention_bias_lower_triangle( + common_layers.shape_list(x)[1]) + + bias = dp(_bias, x) + else: + bias = tf.zeros([1, 1, 1, 1]) + + batch_coordinate = dp(get_batch_coordinate, x) + + layers = hparams.layers.strip(",").split(",") + for layer_num, layer_type in enumerate(layers): + with tf.variable_scope("%s_%d" % (layer_type, layer_num)): + if _should_preprocess(layer_type): + x = preprocess(x) + if layer_type == "timing": + y = dp(common_attention.add_timing_signal_nd, x) + elif layer_type == "pos_emb": + y = dp( + common_attention.add_positional_embedding_nd, + x, + hparams.max_length, + name="pos_emb") + elif layer_type == "att": + y = dp( + common_attention.multihead_attention, + x, + None, + bias, # bias + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout) + elif layer_type == "att_grouped": + multiplicative_overhead = ( + hparams.multiplicative_overhead if hparams.mode == ModeKeys.TRAIN + else hparams.multiplicative_overhead_eval) + y, loss = dp( + common_attention.grouped_attention_multihead, + x, + x, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + num_groups=hparams.attention_num_groups, + memory_target_density=hparams.memory_target_density, + multiplicative_overhead=multiplicative_overhead, + make_image_summary=hparams.attention_image_summary, + mask_right=hparams.mask_right, + ) + extra_loss += tf.add_n(loss) / dp.n + elif layer_type == "att_memory_efficient": + assert hparams.layer_preprocess_sequence == "n" + y = dp(common_attention.multihead_self_attention_memory_efficient, x, + bias, hparams.num_heads) + elif layer_type == "att_local": + y = dp( + common_attention.multihead_attention, + x, + None, + None, # bias + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=("local_mask_right" + if hparams.mask_right else "local_unmasked"), + block_length=hparams.local_attention_window, + block_width=hparams.local_attention_window) + elif layer_type == "att_pseudolocal": + # This is an inefficient implementation of local attention, for the + # purpose of testing model quality. + def _pseudolocal_bias(x): + return common_attention.attention_bias_local( + common_layers.shape_list(x)[1], hparams.local_attention_window, + 0 if hparams.mask_right else hparams.local_attention_window) + + pseudolocal_bias = dp(_pseudolocal_bias, x) + y = dp(common_attention.multihead_attention, x, None, + pseudolocal_bias, hparams.attention_key_channels or + hparams.hidden_size, hparams.attention_value_channels or + hparams.hidden_size, hparams.hidden_size, hparams.num_heads, + hparams.attention_dropout) + elif layer_type == "att_local_expert": + y, loss = dp( + common_attention.local_expert_attention, + x, + k=hparams.attention_moe_k, + loss_coef=hparams.attention_load_balance, + attention_num_experts=hparams.attention_num_experts, + train=hparams.mode == ModeKeys.TRAIN, + batch_coordinate=batch_coordinate, + mask_right=hparams.mask_right, + split_batch=bool(hparams.attention_split_batch), + attention_kq_size=hparams.attention_kq_size, + attention_v_size=hparams.attention_v_size) + # TODO(avaswani, epot, noam): Do we need to divide by num shards ? + extra_loss += tf.add_n(loss) / dp.n + elif layer_type == "att_lsh": + if hparams.lsh_truncated: + attention_fn = common_attention.multihead_attention_sparse_truncated + else: + attention_fn = common_attention.multihead_attention_sparse_dot_prod + y, loss = dp( + attention_fn, + x, + None, + None, # Bias is computed inside + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + + # Additional parameters + bi=[ + common_attention.BatchInfo( + coordinates=batch_coordinate[i], + order=None, # No future mask + ) for i in range(dp.n) + ], + use_map_fn=False, + experts_params=dict(nb_hyperplanes=4,)) + extra_loss += tf.add_n(loss) / dp.n + elif layer_type == "ffn": + y = dp( + expert_utils.ffn_expert_fn(hparams.hidden_size, ffn_hidden_sizes, + hparams.hidden_size), + dp(expert_utils.flatten_all_but_last, x)) + y = dp(common_layers.reshape_like, y, x) + elif layer_type == "conv": + y = dp( + common_layers.conv1d, + x, + hparams.hidden_size, + hparams.kernel_height, + activation=tf.nn.relu, + padding="SAME", + ) + else: + assert False, "unknown sublayer %s" % layer_type + if _should_postprocess(layer_type): + x = postprocess(x, y) + else: + x = y + x = preprocess(x) + + decoder_output = dp(tf.expand_dims, x, 2) + return decoder_output, extra_loss + + def infer(self, + features=None, + decode_length=1, + beam_size=1, + top_beams=1, + alpha=0.0, + use_tpu=False): + """Predict.""" + features["targets"] = tf.identity(features["inputs"]) + logits, _ = self(features) + log_probs = common_layers.log_prob_from_logits(logits) + predictions, scores = common_layers.argmax_with_score(log_probs) + return { + "outputs": predictions, + "scores": scores, + } + + +def get_batch_coordinate(x): + """Return a flat int32 tensor of shape [1, batch_size*length, 1].""" + # Compute the batch coordinate before flattening all batches + batch_coordinate = tf.expand_dims( + common_attention.coordinate_tensor( + common_layers.shape_list(x)[:-1], axis=0), + axis=-1) + return batch_coordinate + + +@registry.register_hparams +def aligned_base(): + """Set of hyperparameters. + + languagemodel_wiki_scramble1k50, 1gpu, 7k steps (10min): log(ppl)_eval = 2.60 + 12.0 steps/sec on P100 + 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00 + + Returns: + a hparams object + """ + hparams = common_hparams.basic_params1() + hparams.force_full_predict = True + hparams.hidden_size = 512 + hparams.batch_size = 5000 + hparams.max_length = 0 + hparams.min_length_bucket = 1024 + hparams.dropout = 0.0 + hparams.layer_prepostprocess_dropout = 0.0 + hparams.label_smoothing = 0.0 + hparams.clip_grad_norm = 0. # i.e. no gradient clipping + hparams.optimizer_adam_epsilon = 1e-9 + hparams.learning_rate_decay_scheme = "noam" + hparams.learning_rate = 0.1 + hparams.learning_rate_warmup_steps = 2000 + hparams.initializer_gain = 1.0 + hparams.initializer = "uniform_unit_scaling" + hparams.weight_decay = 0.0 + hparams.optimizer_adam_beta1 = 0.9 + hparams.optimizer_adam_beta2 = 0.98 + hparams.shared_embedding_and_softmax_weights = True + hparams.add_hparam("ffn_hidden_sizes", "2048") # Add new ones like this. + hparams.moe_num_experts = 32 + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + hparams.add_hparam("layers", "timing," + "conv,att,ffn," * 2) + + # attention-related flags + hparams.add_hparam("num_heads", 8) + hparams.add_hparam("attention_key_channels", 0) + hparams.add_hparam("attention_value_channels", 0) + # All hyperparameters ending in "dropout" are automatically set to 0.0 + # when not in training mode. + hparams.add_hparam("attention_dropout", 0.0) + hparams.add_hparam("pos", "timing") # timing, none + # moe params. local attention moe. + hparams.add_hparam("attention_local", False) + hparams.add_hparam("attention_moe_k", 2) + hparams.add_hparam("attention_num_experts", 16) + hparams.add_hparam("attention_split_batch", False) + # Key, query and value dimensions for the attention + hparams.add_hparam("attention_kq_size", 128) + hparams.add_hparam("attention_v_size", 256) + # Loss coef for load balancing + hparams.add_hparam("attention_load_balance", 2e-2) + hparams.add_hparam("diet_experts", False) + hparams.add_hparam("memory_efficient_ffn", False) + hparams.add_hparam("local_attention_window", 128) + hparams.add_hparam("attention_num_groups", 8) + hparams.add_hparam("memory_target_density", 2.0) + hparams.add_hparam("multiplicative_overhead", 1.25) + hparams.add_hparam("multiplicative_overhead_eval", 2.0) + hparams.add_hparam("attention_image_summary", True) + # LSH params + hparams.add_hparam("lsh_truncated", True) + # For testing right-masking. + # This is not implemented in all layers. + hparams.add_hparam("mask_right", False) + return hparams + + +@registry.register_hparams +def aligned_memory_efficient(): + """Use multihead_self_attention_memory_efficient. + + languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.59 + 8.7 steps/sec on P100 + 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.02 + + Returns: + a hparams object + """ + hparams = aligned_base() + hparams.layers = "timing," + "conv,att_memory_efficient,ffn," * 2 + return hparams + + +@registry.register_hparams +def aligned_local_expert(): + """Use local_expert_attention. + + languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.72 + 10.2 steps/sec on P100 + 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.27 + + Returns: + a hparams object + """ + hparams = aligned_base() + hparams.layers = "timing," + "conv,att_local_expert,ffn," * 2 + return hparams + + +@registry.register_hparams +def aligned_grouped(): + """Use local_expert_attention. + + languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.63 + 10.2 steps/sec on P100 + 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.04 + + Returns: + a hparams object + """ + hparams = aligned_base() + hparams.layers = "timing," + "conv,att_grouped,ffn," * 2 + return hparams + + +@registry.register_hparams +def aligned_local(): + """Use local attention code. + + languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.57 + 12.8 steps/sec on P100 + 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.08 + + Returns: + a hparams object + """ + hparams = aligned_base() + hparams.layers = "timing," + "conv,att_local,ffn," * 2 + return hparams + + +@registry.register_hparams +def aligned_local_1k(): + """Use local attention code, attend to full sequence. + + languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.57 + 7.5 steps/sec on P100 + 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00 + + Returns: + a hparams object + """ + hparams = aligned_local() + hparams.local_attention_window = 1024 + return hparams + + +@registry.register_hparams +def aligned_pseudolocal(): + """Use a bias to simulate local attention. attention radius 128. + + languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.57 + 12.0 steps/sec on P100 + 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.06 + + Returns: + a hparams object + """ + hparams = aligned_base() + hparams.layers = "timing," + "conv,att_pseudolocal,ffn," * 2 + return hparams + + +@registry.register_hparams +def aligned_pseudolocal_256(): + """Use a bias to simulate local attention. attentio radius 256. + + languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.56 + 12.0 steps/sec on P100 + 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.05 + + Returns: + a hparams object + """ + hparams = aligned_pseudolocal() + hparams.local_attention_window = 256 + return hparams + + +@registry.register_hparams +def aligned_no_timing(): + """No timing signal. + + languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.75 + 12.3 steps/sec on P100 + 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.39 + + Returns: + a hparams object + """ + hparams = aligned_base() + hparams.layers = "conv,att,ffn," * 2 + return hparams + + +@registry.register_hparams +def aligned_no_att(): + """No attention at all. + + languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.89 + 20.8 steps/sec on P100 + 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.70 + + Returns: + a hparams object + """ + hparams = aligned_base() + hparams.layers = "conv,ffn," * 2 + return hparams + + +@registry.register_hparams +def aligned_pos_emb(): + """positional embedding insead of timing signal. + + languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.67 + 12.1 steps/sec on P100 + 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00 + + Returns: + a hparams object + """ + hparams = aligned_base() + hparams.layers = "pos_emb," + "conv,att,ffn," * 2 + return hparams + + +@registry.register_hparams +def aligned_moe(): + """mixture of experts instead of ffn. + + languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.62 + 6.7 steps/sec on P100 + 8gpu (8x batch), 7k steps: log(ppl)_eval = 1.94 + + Returns: + a hparams object + """ + hparams = aligned_base() + hparams.layers = "timing," + "conv,att,moe," * 2 + return hparams + + +@registry.register_hparams +def aligned_lsh(): + """Use multihead_attention_sparse_dot_prod. + + Returns: + a hparams object + """ + hparams = aligned_base() + hparams.layers = "timing," + "conv,att_lsh,ffn," * 2 + return hparams + + +@registry.register_hparams +def aligned_8k(): + """version for languagemodel_wiki_scramble8k50. + + languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.93 + 1.5 steps/sec on P100 + + Returns: + a hparams object + """ + hparams = aligned_base() + hparams.batch_size = 8192 + return hparams + + +@registry.register_hparams +def aligned_8k_grouped(): + """version for languagemodel_wiki_scramble8k50. + + languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.92 + 3.3 steps/sec on P100 + 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.15 + + Returns: + a hparams object + """ + hparams = aligned_grouped() + hparams.batch_size = 8192 + # hparams.attention_image_summary = False + hparams.num_groups = 16 + hparams.multiplicative_overhead = 1.1 + return hparams diff --git a/tensor2tensor/models/research/attention_lm.py b/tensor2tensor/models/research/attention_lm.py new file mode 100644 index 000000000..e6a456ef6 --- /dev/null +++ b/tensor2tensor/models/research/attention_lm.py @@ -0,0 +1,215 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Self-attention based language model. + +DEPRECATED. Use Transformer which supports running the decoder only. + +Like transformer.py, but no encoder + +decoder: [Self-Attention, Feed-forward] x n + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from six.moves import range # pylint: disable=redefined-builtin + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf + +framework = contrib.framework(msg="warn") + + +@framework.deprecated( + "2018-09-15", "Use Transformer, which supports decoder-only mode when " + "Transformer.has_input=False.") +@registry.register_model +class AttentionLM(t2t_model.T2TModel): + """Attention net. See file docstring.""" + + def body(self, features): + # Remove dropout if not training + hparams = self._hparams + targets = features["targets"] + targets = tf.squeeze(targets, 2) + + (decoder_input, decoder_self_attention_bias) = attention_lm_prepare_decoder( + targets, hparams) + + decoder_input = tf.nn.dropout(decoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + decoder_output = attention_lm_decoder(decoder_input, + decoder_self_attention_bias, hparams) + decoder_output = tf.expand_dims(decoder_output, 2) + + return decoder_output + + +def attention_lm_prepare_decoder(targets, hparams): + """Prepare one shard of the model for the decoder. + + Args: + targets: a Tensor. + hparams: run hyperparameters + + Returns: + decoder_input: a Tensor, bottom of decoder stack + decoder_self_attention_bias: a Tensor, containing large negative values + to implement masked attention and possibly biases for diagonal alignments + """ + if hparams.prepend_mode == "prepend_inputs_full_attention": + decoder_self_attention_bias = ( + common_attention.attention_bias_prepend_inputs_full_attention( + common_attention.embedding_to_padding(targets))) + else: + decoder_self_attention_bias = ( + common_attention.attention_bias_lower_triangle( + common_layers.shape_list(targets)[1])) + decoder_input = common_layers.shift_right_3d(targets) + if hparams.pos == "timing": + decoder_input = common_attention.add_timing_signal_1d(decoder_input) + return (decoder_input, decoder_self_attention_bias) + + +def attention_lm_decoder(decoder_input, + decoder_self_attention_bias, + hparams, + name="decoder"): + """A stack of attention_lm layers. + + Args: + decoder_input: a Tensor + decoder_self_attention_bias: bias Tensor for self-attention + (see common_attention.attention_bias()) + hparams: hyperparameters for model + name: a string + + Returns: + y: a Tensors + """ + x = decoder_input + with tf.variable_scope(name): + for layer in range(hparams.num_hidden_layers): + with tf.variable_scope("layer_%d" % layer): + with tf.variable_scope("self_attention"): + y = common_attention.multihead_attention( + common_layers.layer_preprocess( + x, hparams), None, decoder_self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) + x = common_layers.layer_postprocess(x, y, hparams) + with tf.variable_scope("ffn"): + y = common_layers.conv_hidden_relu( + common_layers.layer_preprocess(x, hparams), + hparams.filter_size, + hparams.hidden_size, + dropout=hparams.relu_dropout) + x = common_layers.layer_postprocess(x, y, hparams) + return common_layers.layer_preprocess(x, hparams) + + +@registry.register_hparams +def attention_lm_base(): + """Set of hyperparameters.""" + hparams = common_hparams.basic_params1() + hparams.hidden_size = 1024 + hparams.batch_size = 8192 + hparams.max_length = 256 + hparams.dropout = 0.0 + hparams.clip_grad_norm = 0. # i.e. no gradient clipping + hparams.optimizer_adam_epsilon = 1e-9 + hparams.learning_rate_decay_scheme = "noam" + hparams.learning_rate = 0.1 + hparams.learning_rate_warmup_steps = 2000 + hparams.initializer_gain = 1.0 + hparams.num_hidden_layers = 6 + hparams.initializer = "uniform_unit_scaling" + hparams.weight_decay = 0.0 + hparams.optimizer_adam_beta1 = 0.9 + hparams.optimizer_adam_beta2 = 0.98 + hparams.label_smoothing = 0.0 + hparams.shared_embedding_and_softmax_weights = False + + hparams.add_hparam("filter_size", 4096) # Add new ones like this. + # attention-related flags + hparams.add_hparam("num_heads", 8) + hparams.add_hparam("attention_key_channels", 0) + hparams.add_hparam("attention_value_channels", 0) + # All hyperparameters ending in "dropout" are automatically set to 0.0 + # when not in training mode. + hparams.add_hparam("attention_dropout", 0.0) + hparams.add_hparam("relu_dropout", 0.0) + hparams.add_hparam("pos", "timing") # timing, none + hparams.add_hparam("encoder_full_attention", False) + return hparams + + +@registry.register_hparams +def attention_lm_small(): + """Cheap model. + + on lm1b_32k: + 45M params + 2 steps/sec on [GeForce GTX TITAN X] + + Returns: + an hparams object. + """ + hparams = attention_lm_base() + hparams.num_hidden_layers = 4 + hparams.hidden_size = 512 + hparams.filter_size = 2048 + hparams.layer_prepostprocess_dropout = 0.5 + return hparams + + +@registry.register_hparams +def attention_lm_translation(): + """Version to use for seq2seq.""" + hparams = attention_lm_base() + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + hparams.learning_rate = 0.4 + hparams.prepend_mode = "prepend_inputs_masked_attention" + hparams.max_length = 512 + hparams.label_smoothing = 0.1 + hparams.shared_embedding_and_softmax_weights = True + return hparams + + +@registry.register_hparams +def attention_lm_translation_l12(): + """Version to use for seq2seq.""" + hparams = attention_lm_translation() + hparams.batch_size = 4096 + hparams.num_hidden_layers = 12 + return hparams + + +@registry.register_hparams +def attention_lm_translation_full_attention(): + """Version to use for seq2seq.""" + hparams = attention_lm_translation() + hparams.prepend_mode = "prepend_inputs_full_attention" + return hparams diff --git a/tensor2tensor/models/research/attention_lm_moe.py b/tensor2tensor/models/research/attention_lm_moe.py new file mode 100644 index 000000000..c385dacf4 --- /dev/null +++ b/tensor2tensor/models/research/attention_lm_moe.py @@ -0,0 +1,795 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Self-attention based language model. + +Like transformer.py, but no encoder + +decoder: [Self-Attention, Feed-forward] x n + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +from six.moves import range # pylint: disable=redefined-builtin + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import expert_utils +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +ModeKeys = tf_estimator.ModeKeys # pylint: disable=invalid-name + + +class AttentionType(object): + """Enum of the attention layers types.""" + MULTIHEAD = "multihead" + LOCAL_EXPERTS = "local_experts" + GLOBAL_MOE = "global_experts" + MEMORY_EFFICIENT = "memory_efficient" + SPARSE_MULTIHEAD = "sparse_multihead" + SPARSE_MULTIHEAD_TRUNCATED = "sparse_multihead_truncated" + MULTIHEAD_REDUCED = "multihead_reduced" + MULTIHEAD_FULL = "multihead_full" + + @staticmethod + def get_choices(): + return [ + AttentionType.MULTIHEAD, + AttentionType.LOCAL_EXPERTS, + AttentionType.MEMORY_EFFICIENT, + AttentionType.SPARSE_MULTIHEAD, + AttentionType.SPARSE_MULTIHEAD_TRUNCATED, + AttentionType.MULTIHEAD_REDUCED, + AttentionType.MULTIHEAD_FULL, + ] + + +LAYER_SYMBOLS = { + "h": AttentionType.MULTIHEAD, # multi-Head + "e": AttentionType.LOCAL_EXPERTS, # Experts + "m": AttentionType.MEMORY_EFFICIENT, # Memory + "s": AttentionType.SPARSE_MULTIHEAD, # Sparse (Locality sensitive hashing) + "t": AttentionType.SPARSE_MULTIHEAD_TRUNCATED, # Using TruncatedDispatcher + "r": AttentionType.MULTIHEAD_REDUCED, # Reduced + "f": AttentionType.MULTIHEAD_FULL, # Force using full attention +} + + +@registry.register_model +class AttentionLmMoe(t2t_model.T2TModel): + """Attention net. See file docstring.""" + + @staticmethod + def use_body_sharded(): + return True + + def body_sharded(self, sharded_features): + # Remove dropout if not training + hparams = self._hparams + dp = self._data_parallelism + if hparams.use_inputs: + decoder_input = dp(tf.squeeze, sharded_features["inputs"], 2) + decoder_self_attention_bias = None + else: + targets = sharded_features["targets"] + targets = dp(tf.squeeze, targets, 2) + (decoder_input, decoder_self_attention_bias, pad_remover) = dp( + attention_lm_moe_prepare_decoder, targets, hparams) + + def preprocess(x): + return dp(common_layers.layer_preprocess, x, hparams) + + def postprocess(x, y): + return dp(common_layers.layer_postprocess, x, y, hparams) + + x = dp(tf.nn.dropout, decoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + extra_loss = 0.0 + + if not hparams.use_inputs: + # As preprocess and postprocess are called with batch of size one (all + # batches concatenated), we just make sure that batch_norm is not use ( + # should not either way) + assert hparams.norm_type != "batch" + + tf.logging.info("Applying Padding Remover for the attention experts") + + dp_remove_pad = functools.partial( + dp, remove_pad, pad_remover=pad_remover, mode=hparams.mode) + dp_restore_pad = functools.partial( + dp, restore_pad, ref_x=x, pad_remover=pad_remover, mode=hparams.mode) + else: + # Using identity function: No effect + dp_remove_pad = lambda x: x + dp_restore_pad = lambda x: x + + if hparams.attention_exp_factor != 0: + tf.logging.info("Expand/compress tokens before sending them to experts") + dp_expand_bc = lambda x: dp( # pylint: disable=g-long-lambda + expand_batch_coordinates, + x, + hparams.attention_exp_factor) + dp_expand_x = lambda x: dp( # pylint: disable=g-long-lambda + common_attention.deconv_elems_1d, + x, + hparams.attention_exp_factor, + hparams.attention_exp_inputdim) + dp_compress_x = lambda x, l: dp( # pylint: disable=g-long-lambda + common_attention.conv_elems_1d, + x, + hparams.attention_exp_factor, + l) + else: + dp_expand_bc = lambda x: x + dp_expand_x = lambda x: x + dp_compress_x = lambda x, l: x + + def print_shape(x, suffix, debug=False): + # To help debugging, print the input/output shapes at inference and eval + # Inference for long sequences can take a long time, so that's help to + # see the progression of the generation + if not debug and hparams.mode == ModeKeys.TRAIN: + return x + return tf.Print(x, [tf.shape(x)], "shape_x_{}".format(suffix)) + + with tf.name_scope("batch_coordinate_preprocess"): + batch_coordinate = dp(get_batch_coordinate, x) + batch_coordinate = dp_remove_pad(batch_coordinate) + batch_coordinate = dp_expand_bc(batch_coordinate) + batch_order = dp(get_batch_coordinate, x, axis=-1) + batch_order = dp_remove_pad(batch_order) + batch_order = dp_expand_bc(batch_order) + + x = dp(print_shape, x, "in") + + assert hparams.batch_size >= hparams.max_length + + num_hidden_layers = ( + len(hparams.attention_layers) or hparams.num_hidden_layers) + for layer in range(num_hidden_layers): + with tf.variable_scope("layer_%d" % layer): + + # Use the layer type defined in attention_layers + if hparams.attention_layers: + attention_type = LAYER_SYMBOLS[hparams.attention_layers[layer]] + else: + attention_type = hparams.attention_type + + with tf.variable_scope( + "attention_{}".format(attention_type)): + if attention_type in [ + AttentionType.MULTIHEAD, AttentionType.MULTIHEAD_FULL]: + attention_dot_type = ( + "local_mask_right" if hparams.attention_local else + "dot_product") + if attention_type == AttentionType.MULTIHEAD_FULL: + attention_dot_type = "dot_product" + y = dp( + common_attention.multihead_attention, + preprocess(x), + None, + decoder_self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=attention_dot_type, + block_length=hparams.attention_block_length, + name="decoder_self_attention") + elif attention_type == AttentionType.SPARSE_MULTIHEAD: + x_in = preprocess(x) + x_in = dp_remove_pad(x_in) + y, loss_experts = dp( + common_attention.multihead_attention_sparse_dot_prod, + x_in, + None, + None, # Bias is computed inside + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + + # Additional parameters + bi=[common_attention.BatchInfo( + coordinates=batch_coordinate[i], + order=batch_order[i], # No future mask + ) for i in range(dp.n)], + use_map_fn=hparams.lsh_use_map_fn, + experts_params=dict( + nb_hyperplanes=hparams.lsh_num_hyperplanes, + ), + ) + y = dp_restore_pad(y) + + # TODO(avaswani, epot, noam): Do we need to divide by num shards ? + extra_loss += tf.add_n(loss_experts) / dp.n + elif attention_type == AttentionType.SPARSE_MULTIHEAD_TRUNCATED: + x_in = preprocess(x) + y, loss_experts = dp( + common_attention.multihead_attention_sparse_truncated, + x_in, + None, + None, # Bias is computed inside + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + + # Additional parameters + bi=[common_attention.BatchInfo( + coordinates=batch_coordinate[i], + order=batch_order[i], # No future mask + ) for i in range(dp.n)], + mask_right=True, + experts_params=dict( + nb_hyperplanes=hparams.lsh_num_hyperplanes, + ), + ) + + # TODO(avaswani, epot, noam): Do we need to divide by num shards ? + extra_loss += tf.add_n(loss_experts) / dp.n + elif attention_type == AttentionType.MEMORY_EFFICIENT: + assert hparams.layer_preprocess_sequence == "n" + y = dp( + common_attention.multihead_self_attention_memory_efficient, + x, + decoder_self_attention_bias, + hparams.num_heads, + name="decoder_self_attention") + elif attention_type == AttentionType.MULTIHEAD_REDUCED: + y = dp( + common_attention.multihead_self_attention_reduced, + preprocess(x), + factor=hparams.attention_red_factor, + reduction_type=hparams.attention_reduction_type, + nonlinearity=hparams.attention_nonlinearity, + multihead_params=dict( + total_key_depth= + hparams.attention_key_channels or hparams.hidden_size, + total_value_depth= + hparams.attention_value_channels or hparams.hidden_size, + num_heads=hparams.num_heads, + dropout_rate=hparams.attention_dropout, + )) + elif attention_type == AttentionType.LOCAL_EXPERTS: + x_in = preprocess(x) + x_in = dp_remove_pad(x_in) + x_in = dp_expand_x(x_in) + y, loss = dp( + common_attention.local_expert_attention, + x_in, + k=hparams.attention_moe_k, + loss_coef=hparams.attention_load_balance, + attention_num_experts=hparams.attention_num_experts, + train=hparams.mode == ModeKeys.TRAIN, + batch_coordinate=batch_coordinate, + mask_right=not hparams.use_inputs, + split_batch=bool(hparams.attention_split_batch), + attention_num_head=hparams.attention_num_head, + attention_kq_size=hparams.attention_kq_size, + attention_v_size=hparams.attention_v_size) + y = dp_compress_x(y, x[0].get_shape().as_list()[-1]) + y = dp_restore_pad(y) + # TODO(avaswani, epot, noam): Do we need to divide by num shards ? + extra_loss += tf.add_n(loss) / dp.n + else: + raise ValueError("Only {} supported for now.".format( + AttentionType.get_choices())) + x = postprocess(x, y) + with tf.variable_scope("ffn"): + if hparams.memory_efficient_ffn: + assert hparams.layer_preprocess_sequence == "n" + y = dp( + common_layers.conv_hidden_relu_memory_efficient, + x, + hparams.filter_size) + else: + additional_conv_params = {} + if hparams.use_sepconv: + additional_conv_params = dict( + padding="LEFT", + # Parameters copied from the transformer model + kernel_size=(3, 1), + second_kernel_size=(31, 1), + ) + y = dp( + common_layers.conv_hidden_relu, + preprocess(x), + hparams.filter_size, + hparams.hidden_size, + dropout=hparams.relu_dropout, + **additional_conv_params + ) + x = postprocess(x, y) + x = preprocess(x) + + decoder_output = dp(tf.expand_dims, x, 2) + return decoder_output, extra_loss + + +def attention_lm_moe_prepare_decoder(targets, hparams): + """Prepare one shard of the model for the decoder. + + Args: + targets: a Tensor. + hparams: run hyperparameters + + Returns: + decoder_input: a Tensor, bottom of decoder stack + decoder_self_attention_bias: a Tensor, containing large negative values + to implement masked attention and possibly biases for diagonal alignments + pad_remover (expert_utils.PadRemover): an util object to remove padding + """ + targets_pad_mask = common_attention.embedding_to_padding(targets) + with tf.name_scope("pad_remover"): + # Because of the shift_right, the token will be considered as + # padding. In practice, it doesn't really matter, due to the triangular + # mask, this token should never be attended. + pad_remover = expert_utils.PadRemover(targets_pad_mask) + + if hparams.prepend_mode == "prepend_inputs_full_attention": + decoder_self_attention_bias = ( + common_attention.attention_bias_prepend_inputs_full_attention( + targets_pad_mask)) + else: + decoder_self_attention_bias = ( + common_attention.attention_bias_lower_triangle(tf.shape(targets)[1])) + decoder_input = common_layers.shift_right_3d(targets) + if hparams.pos == "timing": + decoder_input = common_attention.add_timing_signal_1d(decoder_input) + return (decoder_input, decoder_self_attention_bias, pad_remover) + + +@expert_utils.add_name_scope() +def get_batch_coordinate(x, axis=0): + """Return a flat int32 tensor of shape [1, batch_size*length, 1].""" + # Compute the batch coordinate before flattening all batches + batch_coordinate = tf.expand_dims( + common_attention.coordinate_tensor(tf.shape(x)[:-1], axis=axis), axis=-1) + return batch_coordinate + + +@expert_utils.add_name_scope() +def expand_batch_coordinates(bc, length_factor): + """Duplicate elements of bc by length_factor. + + Args: + bc (tf.Tensor): int32 tensor of shape [1, length, 1] + length_factor (int): + + Returns: + tf.Tensor: of shape [1, length*length_factor, 1] where every elements has + been duplicated length_factor times. + """ + assert bc.get_shape().as_list() == [1, None, 1] + # bc has shape [1, length, 1] + bc *= tf.constant([[1] * length_factor]) + # bc has shape [1, length, length_factor] + bc = tf.reshape(bc, [1, -1, 1]) + # bc has shape [1, length*length_factor] + return bc + + +@expert_utils.add_name_scope() +def remove_pad(x, pad_remover, mode): + """Remove padding by concatenating all dimension into one. + + Args: + x (tf.Tensor): input of shape [batch_size, length, depth] + pad_remover (obj): a PadRemover object + mode (ModeKeys): infer, train or eval. If inference, the padding remover is + not applied + + Returns: + tf.Tensor of shape [1,length_nonpad,depth] where + length_nonpad <= batch_size*length + """ + # Concatenate all tokens (without padding) + x = expert_utils.flatten_all_but_last(x) + + # Remove padding for training and eval + if mode != ModeKeys.PREDICT: + # This is a hack to allows inference when the token + # is detected as padding and removed. This works for now because there is + # no padding at inference. + x = pad_remover.remove(x) + + x = tf.expand_dims(x, axis=0) # Now batch_size=1 + return x + + +@expert_utils.add_name_scope() +def restore_pad(x, ref_x, pad_remover, mode): + x = tf.squeeze(x, axis=0) + if mode != ModeKeys.PREDICT: + x = pad_remover.restore(x) + x = common_layers.reshape_like(x, ref_x) + return x + + +@registry.register_hparams +def attention_lm_moe_base(): + """Set of hyperparameters. + + suitable for 1 gpu. + on lm1b_32k: + ~229M params + 0.9 steps/sec on [GeForce GTX TITAN X] + + Returns: + a hparams object + """ + hparams = common_hparams.basic_params1() + hparams.hidden_size = 1024 + hparams.batch_size = 8192 + hparams.max_length = 256 + hparams.dropout = 0.0 + hparams.clip_grad_norm = 0. # i.e. no gradient clipping + hparams.optimizer_adam_epsilon = 1e-9 + hparams.learning_rate_decay_scheme = "noam" + hparams.learning_rate = 0.1 + hparams.learning_rate_warmup_steps = 2000 + hparams.initializer_gain = 1.0 + hparams.num_hidden_layers = 4 + hparams.initializer = "uniform_unit_scaling" + hparams.weight_decay = 0.0 + hparams.optimizer_adam_beta1 = 0.9 + hparams.optimizer_adam_beta2 = 0.98 + hparams.num_sampled_classes = 0 + hparams.label_smoothing = 0.0 + hparams.shared_embedding_and_softmax_weights = False + hparams.add_hparam("filter_size", 2048) # Add new ones like this. + hparams.moe_num_experts = 32 + # attention-related flags + hparams.add_hparam("num_heads", 8) + hparams.add_hparam("attention_key_channels", 0) + hparams.add_hparam("attention_value_channels", 0) + # All hyperparameters ending in "dropout" are automatically set to 0.0 + # when not in training mode. + hparams.add_hparam("attention_dropout", 0.0) + hparams.add_hparam("relu_dropout", 0.0) + hparams.add_hparam("pos", "timing") # timing, none + hparams.add_hparam("moe_layers", "2") # comma separated list of layer numbers + # moe params. local attention moe. + # If attention_layers is set, the num_hidden_layers parameter will be ignored + # and each caracter of the string will correspond to one attention + # layer type + hparams.add_hparam("attention_layers", "") + hparams.add_hparam("attention_type", AttentionType.MULTIHEAD) + hparams.add_hparam("attention_local", False) + hparams.add_hparam("attention_moe_k", 2) + hparams.add_hparam("attention_num_head", 1) + hparams.add_hparam("attention_num_experts", 16) + hparams.add_hparam("attention_split_batch", False) + hparams.add_hparam("attention_red_factor", 3) + hparams.add_hparam("attention_block_length", 128) + hparams.add_hparam("attention_reduction_type", "conv") + # Non linearity for the attention reduction. Either "none", or "silu" ( + # Sigmoid Linear-Unit described in https://arxiv.org/abs/1710.05941) + hparams.add_hparam("attention_nonlinearity", "none") + # If attention_exp_factor is set, each input to local_expert_attention (of + # dimensionality hidden size) is projected into attention_exp_factor smaller + # inputs, each of dimensionality attention_exp_inputdim. (otherwise + # attention_exp_inputdim is ignored) + hparams.add_hparam("attention_exp_factor", 0) + hparams.add_hparam("attention_exp_inputdim", 128) + # Key, query and value dimensions for the attention + hparams.add_hparam("attention_kq_size", 128) + hparams.add_hparam("attention_v_size", 256) + # Loss coef for load balancing + hparams.add_hparam("attention_load_balance", 2e-2) + # Locality-sensitive hashing params + hparams.add_hparam("lsh_num_hyperplanes", 4) + hparams.add_hparam("lsh_use_map_fn", False) + + hparams.add_hparam("use_sepconv", False) + hparams.add_hparam("diet_experts", False) + hparams.add_hparam("memory_efficient_ffn", False) + # if True, we learn a non-autoregressive model from "inputs" to "targets". + # if False, we learn an autoregressive model to generate "targets" + hparams.add_hparam("use_inputs", False) + return hparams + + +@registry.register_hparams +def attention_lm_moe_base_long_seq(): + """Hyper parameters specifics for long sequence generation.""" + hparams = attention_lm_moe_base() + + hparams.max_length = 0 # max_length == batch_size + hparams.eval_drop_long_sequences = True + hparams.min_length_bucket = 256 # Avoid cyclic problems for big batches + hparams.use_sepconv = True + + return hparams + + +@registry.register_hparams +def attention_lm_moe_base_ae(): + """Base model with attention expert.""" + hparams = attention_lm_moe_base_long_seq() + hparams.attention_type = AttentionType.LOCAL_EXPERTS + + hparams.learning_rate = 0.05 + hparams.learning_rate_warmup_steps = 10000 + # According to noam, ("n", "da") seems better for harder-to-learn models + # hparams.layer_preprocess_sequence = "n" + # hparams.layer_postprocess_sequence = "da" + return hparams + + +@registry.register_hparams +def attention_lm_moe_base_local(): + """Base model with attention expert.""" + hparams = attention_lm_moe_base_long_seq() + hparams.attention_local = True + return hparams + + +@registry.register_hparams +def attention_lm_moe_base_hybrid(): + """Base model with attention expert.""" + hparams = attention_lm_moe_base_long_seq() + hparams.attention_layers = "hehe" # Alternate local/expert + hparams.attention_local = True + + # hparams.layer_preprocess_sequence = "n" + # hparams.layer_postprocess_sequence = "da" + return hparams + + +@registry.register_hparams +def attention_lm_hybrid_v2(): + hparams = attention_lm_moe_base_long_seq() + hparams.attention_layers = "hheh" # Alternate local/expert + hparams.attention_local = True + hparams.attention_moe_k = 6 + + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + return hparams + + +@registry.register_hparams +def attention_lm_16k(): + hparams = attention_lm_hybrid_v2() + hparams.batch_size = 16384 + return hparams + + +@registry.register_hparams +def attention_lm_12k(): + hparams = attention_lm_hybrid_v2() + hparams.batch_size = 12000 + return hparams + + +@registry.register_hparams +def attention_lm_11k(): + hparams = attention_lm_hybrid_v2() + hparams.batch_size = 11500 + return hparams + + +@registry.register_hparams +def attention_lm_ae_extended(): + """Experiment with the exp_factor params.""" + hparams = attention_lm_moe_base_long_seq() + hparams.attention_layers = "eeee" + hparams.attention_local = True + # hparams.factored_logits=1 # Necessary when the number of expert grow bigger + hparams.attention_moe_k = 2 + hparams.attention_exp_factor = 4 + # hparams.attention_exp_inputdim = 128 + + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + return hparams + + +@registry.register_hparams +def attention_lm_moe_base_memeff(): + """Base model with attention expert.""" + hparams = attention_lm_moe_base_long_seq() + hparams.use_sepconv = False + + hparams.diet_experts = True + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + hparams.layer_prepostprocess_dropout = 0.0 + hparams.memory_efficient_ffn = True + hparams.attention_type = AttentionType.MEMORY_EFFICIENT + hparams.num_heads = 8 + hparams.factored_logits = True + return hparams + + +@registry.register_hparams +def attention_lm_moe_small(): + """Cheap model for single-gpu training. + + on lm1b_32k: + ~312M params + 1.6 steps/sec on [GeForce GTX TITAN X] + After 50K steps on 8 GPUs (synchronous): + eval_log_ppl_per_token = 3.31 + + Returns: + an hparams object. + """ + hparams = attention_lm_moe_base() + hparams.num_hidden_layers = 4 + hparams.hidden_size = 512 + hparams.filter_size = 2048 + hparams.moe_num_experts = 128 + hparams.moe_layers = "2" + return hparams + + +@registry.register_hparams +def attention_lm_moe_tiny(): + """Cheap model for debugging. + + Returns: + an hparams object. + """ + hparams = attention_lm_moe_small() + hparams.moe_num_experts = 32 + return hparams + + +@registry.register_hparams +def attention_lm_attention_moe_tiny(): + """Cheap model for debugging. + + Returns: + an hparams object. + """ + hparams = attention_lm_moe_small() + hparams.moe_layers = "" + hparams.attention_num_experts = 128 + hparams.filter_size = 8192 + hparams.attention_type = AttentionType.LOCAL_EXPERTS + return hparams + + +@registry.register_hparams +def attention_lm_no_moe_small(): + """Without the mixture of experts (for comparison). + + on lm1b_32k: + ~45M params + 2 steps/sec on [GeForce GTX TITAN X] + After 50K steps on 8 GPUs (synchronous): + eval_log_ppl_per_token = 3.51 + + Returns: + an hparams object. + """ + hparams = attention_lm_moe_small() + hparams.moe_layers = "" + return hparams + + +@registry.register_hparams +def attention_lm_moe_large(): + """Large model for distributed training. + + Over 1B parameters, so requires multi-gpu training due to memory + requirements. + + on lm1b_32k: + After 45K steps on 8 GPUs (synchronous): + eval_log_ppl_per_token = 3.18 + eval_ppl_per_word = exp(1.107893 * eval_log_ppl_per_token) = 33.9 + + Returns: + an hparams object. + """ + hparams = attention_lm_moe_base() + hparams.num_hidden_layers = 5 + hparams.moe_layers = "3" + hparams.hidden_size = 1024 + hparams.num_heads = 16 + hparams.filter_size = 4096 + hparams.moe_hidden_sizes = "4096" + hparams.moe_num_experts = 128 + hparams.layer_prepostprocess_dropout = 0.2 + return hparams + + +@registry.register_hparams +def attention_lm_moe_large_diet(): + hparams = attention_lm_moe_large() + hparams.diet_experts = True + return hparams + + +@registry.register_hparams +def attention_lm_moe_memory_efficient(): + """Memory-efficient version.""" + hparams = attention_lm_moe_large() + hparams.diet_experts = True + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + hparams.layer_prepostprocess_dropout = 0.0 + hparams.memory_efficient_ffn = True + hparams.attention_type = AttentionType.MEMORY_EFFICIENT + hparams.num_heads = 8 + hparams.factored_logits = True + return hparams + + +@registry.register_hparams +def attention_lm_moe_32b_diet(): + """Unnecessarily large model with 32B params - because we can.""" + hparams = attention_lm_moe_large_diet() + hparams.moe_hidden_sizes = "16384" + hparams.moe_num_experts = 1024 + return hparams + + +@registry.register_hparams +def attention_lm_moe_24b_diet(): + """Unnecessarily large model with 24B params - because we can.""" + hparams = attention_lm_moe_large_diet() + hparams.moe_hidden_sizes = "12288" + hparams.moe_num_experts = 1024 + hparams.batch_size = 4096 + return hparams + + +@registry.register_hparams +def attention_lm_moe_translation(): + """Version to use for seq2seq.""" + hparams = attention_lm_moe_base() + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + hparams.learning_rate = 0.4 + hparams.prepend_mode = "prepend_inputs_masked_attention" + hparams.max_length = 512 + hparams.label_smoothing = 0.1 + hparams.layer_prepostprocess_dropout = 0.2 + hparams.num_hidden_layers = 6 + hparams.moe_layers = "0,1,2,3,4,5" + hparams.shared_embedding_and_softmax_weights = True + return hparams + + +@registry.register_hparams +def attention_lm_moe_unscramble_base(): + """Version to use with languagemodel_wiki_scramble1k50.""" + hparams = attention_lm_no_moe_small() + hparams.use_inputs = True + hparams.min_length_bucket = 1024 + hparams.max_length = 1024 + hparams.batch_size = 5000 + hparams.layer_prepostprocess_dropout = 0.0 + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + return hparams diff --git a/tensor2tensor/models/research/autoencoders.py b/tensor2tensor/models/research/autoencoders.py new file mode 100644 index 000000000..8a3da53dd --- /dev/null +++ b/tensor2tensor/models/research/autoencoders.py @@ -0,0 +1,1327 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Autoencoders.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import discretization +from tensor2tensor.layers import latent_layers +from tensor2tensor.layers import modalities +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +def reverse_gradient(x, lr=1.0): + return -lr * x + tf.stop_gradient((1.0 + lr) * x) + + +def time_to_channels(embedded_video): + """Put time dimension on channels in an embedded video.""" + video_shape = common_layers.shape_list(embedded_video) + if len(video_shape) != 5: + raise ValueError("Assuming videos given as tensors in the format " + "[batch, time, height, width, channels] but got one " + "of shape: %s" % str(video_shape)) + transposed = tf.transpose(embedded_video, [0, 2, 3, 1, 4]) + return tf.reshape(transposed, [ + video_shape[0], video_shape[2], video_shape[3], + video_shape[1] * video_shape[4] + ]) + + +@registry.register_model +class AutoencoderBasic(t2t_model.T2TModel): + """A basic autoencoder, try with image_mnist_rev or image_cifar10_rev.""" + + def __init__(self, *args, **kwargs): + super(AutoencoderBasic, self).__init__(*args, **kwargs) + self._cur_bottleneck_tensor = None + self.is1d = None + self._encode_on_predict = False + + @property + def num_channels(self): + # TODO(lukaszkaiser): is this a universal enough way to get channels? + try: + num_channels = self.hparams.problem.num_channels + except AttributeError: + num_channels = 1 + return num_channels + + def image_summary(self, name, image_logits, max_outputs=1): + """Helper for image summaries that are safe on TPU.""" + if len(image_logits.get_shape()) != 5: + tf.logging.info("Not generating image summary, maybe not an image.") + return + return tf.summary.image( + name, + common_layers.tpu_safe_image_summary(tf.argmax(image_logits, -1)), + max_outputs=max_outputs) + + def embed(self, x, name="embedding"): + """Input embedding with a non-zero bias for uniform inputs.""" + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + x_shape = common_layers.shape_list(x) + # Merge channels and depth before embedding. + x = tf.reshape(x, x_shape[:-2] + [x_shape[-2] * x_shape[-1]]) + x = tf.layers.dense( + x, + self.hparams.hidden_size, + name="embed", + activation=common_layers.belu, + bias_initializer=tf.random_normal_initializer(stddev=0.01)) + x = common_layers.layer_norm(x, name="ln_embed") + return common_attention.add_timing_signal_nd(x) + + def bottleneck(self, x): + with tf.variable_scope("bottleneck"): + hparams = self.hparams + x = tf.layers.dense(x, hparams.bottleneck_bits, name="bottleneck") + if hparams.mode == tf_estimator.ModeKeys.TRAIN: + noise = 2.0 * tf.random_uniform(common_layers.shape_list(x)) - 1.0 + return tf.tanh(x) + noise * hparams.bottleneck_noise, 0.0 + return tf.tanh(x), 0.0 + + def unbottleneck(self, x, res_size, reuse=None): + with tf.variable_scope("unbottleneck", reuse=reuse): + x = tf.layers.dense(x, res_size, name="dense") + return x + + def make_even_size(self, x): + if not self.is1d: + return common_layers.make_even_size(x) + shape1 = x.get_shape().as_list()[1] + if shape1 is not None and shape1 % 2 == 0: + return x + x, _ = common_layers.pad_to_same_length( + x, x, final_length_divisible_by=2, axis=1) + return x + + def encoder(self, x): + with tf.variable_scope("encoder"): + hparams = self.hparams + layers = [] + kernel, strides = self._get_kernel_and_strides() + # Down-convolutions. + for i in range(hparams.num_hidden_layers): + x = self.make_even_size(x) + layers.append(x) + x = tf.layers.conv2d( + x, + hparams.hidden_size * 2**(i + 1), + kernel, + strides=strides, + padding="SAME", + activation=common_layers.belu, + name="conv_%d" % i) + x = common_layers.layer_norm(x, name="ln_%d" % i) + return x, layers + + def decoder(self, x, encoder_layers): + del encoder_layers + with tf.variable_scope("decoder"): + hparams = self.hparams + kernel, strides = self._get_kernel_and_strides() + # Up-convolutions. + for i in range(hparams.num_hidden_layers): + j = hparams.num_hidden_layers - i - 1 + x = tf.layers.conv2d_transpose( + x, + hparams.hidden_size * 2**j, + kernel, + strides=strides, + padding="SAME", + activation=common_layers.belu, + name="deconv_%d" % j) + x = common_layers.layer_norm(x, name="ln_%d" % i) + return x + + def gumbel_sample(self, reconstr_gan): + hparams = self.hparams + is_training = hparams.mode == tf_estimator.ModeKeys.TRAIN + vocab_size = self._problem_hparams.vocab_size["targets"] + if hasattr(self._hparams, "vocab_divisor"): + vocab_size += (-vocab_size) % self._hparams.vocab_divisor + reconstr_gan = tf.nn.log_softmax(reconstr_gan) + if is_training and hparams.gumbel_temperature > 0.0: + gumbel_samples = discretization.gumbel_sample( + common_layers.shape_list(reconstr_gan)) + gumbel_samples *= hparams.gumbel_noise_factor + reconstr_gan += gumbel_samples + reconstr_sample = latent_layers.multinomial_sample( + reconstr_gan, temperature=hparams.gumbel_temperature) + reconstr_gan = tf.nn.softmax(reconstr_gan / hparams.gumbel_temperature) + else: + reconstr_sample = tf.argmax(reconstr_gan, axis=-1) + reconstr_gan = tf.nn.softmax(reconstr_gan / 0.1) # Sharpen a bit. + # Use 1-hot forward, softmax backward. + reconstr_hot = tf.one_hot(reconstr_sample, vocab_size) + reconstr_gan += reconstr_hot - tf.stop_gradient(reconstr_gan) + return reconstr_gan + + def body(self, features): + hparams = self.hparams + is_training = hparams.mode == tf_estimator.ModeKeys.TRAIN + vocab_size = self._problem_hparams.vocab_size["targets"] + if hasattr(self._hparams, "vocab_divisor"): + vocab_size += (-vocab_size) % self._hparams.vocab_divisor + encoder_layers = None + self.is1d = hparams.sample_width == 1 + if (hparams.mode != tf_estimator.ModeKeys.PREDICT + or self._encode_on_predict): + labels = features["targets_raw"] + labels_shape = common_layers.shape_list(labels) + # handle videos + if len(labels.shape) == 5: + labels = time_to_channels(labels) + shape = common_layers.shape_list(labels) + x = tf.one_hot(labels, vocab_size) + x = self.embed(x) + target_codes = x + if shape[2] == 1: + self.is1d = True + # Run encoder. + x, encoder_layers = self.encoder(x) + # Bottleneck. + b, b_loss = self.bottleneck(x) + xb_loss = 0.0 + b_shape = common_layers.shape_list(b) + self._cur_bottleneck_tensor = b + res_size = common_layers.shape_list(x)[-1] + b = self.unbottleneck(b, res_size) + if not is_training: + x = b + else: + l = 2**hparams.num_hidden_layers + warm_step = int(hparams.bottleneck_warmup_steps * 0.25 * l) + nomix_p = common_layers.inverse_lin_decay(warm_step) + 0.01 + if common_layers.should_generate_summaries(): + tf.summary.scalar("nomix_p_bottleneck", nomix_p) + rand = tf.random_uniform(common_layers.shape_list(x)) + # This is the distance between b and x. Having this as loss helps learn + # the bottleneck function, but if we back-propagated to x it would be + # minimized by just setting x=0 and b=0 -- so we don't want too much + # of the influence of this, and we stop-gradient to not zero-out x. + x_stop = tf.stop_gradient(x) + xb_loss = tf.reduce_mean(tf.reduce_sum( + tf.squared_difference(x_stop, b), axis=-1)) + # To prevent this loss from exploding we clip at 1, but anneal clipping. + clip_max = 1.0 / common_layers.inverse_exp_decay( + warm_step, min_value=0.001) + xb_clip = tf.maximum(tf.stop_gradient(xb_loss), clip_max) + xb_loss *= clip_max / xb_clip + x = tf.where(tf.less(rand, nomix_p), b, x) + if hparams.gan_loss_factor != 0.0: + # Add a purely sampled batch on which we'll compute the GAN loss. + g = self.unbottleneck( + self.sample(shape=b_shape), + common_layers.shape_list(x)[-1], + reuse=True) + x = tf.concat([x, g], axis=0) + else: + if self._cur_bottleneck_tensor is None: + b = self.sample() + else: + b = self._cur_bottleneck_tensor + self._cur_bottleneck_tensor = b + res_size = self.hparams.hidden_size * 2**self.hparams.num_hidden_layers + res_size = min(res_size, hparams.max_hidden_size) + x = self.unbottleneck(b, res_size) + # Run decoder. + x = self.decoder(x, encoder_layers) + + # Cut to the right size and mix before returning. + res = x + if hparams.mode != tf_estimator.ModeKeys.PREDICT: + res = x[:, :shape[1], :shape[2], :] + + # Final dense layer. + res = tf.layers.dense( + res, self.num_channels * hparams.hidden_size, name="res_dense") + + output_shape = common_layers.shape_list(res)[:-1] + [ + self.num_channels, self.hparams.hidden_size + ] + res = tf.reshape(res, output_shape) + + if hparams.mode == tf_estimator.ModeKeys.PREDICT: + if hparams.use_vq_loss: + (reconstr, _, _, _, _) = discretization.vq_loss(res, labels, vocab_size) + else: + reconstr = tf.layers.dense(res, vocab_size, name="autoencoder_final") + return reconstr, {"bottleneck_loss": 0.0} + + if hparams.gan_loss_factor != 0.0: + res, res_gan = tf.split(res, 2, axis=0) + + # Losses. + losses = { + "bottleneck_extra": b_loss, + "bottleneck_l2": hparams.bottleneck_l2_factor * xb_loss + } + + if hparams.use_vq_loss: + vq_temperature = hparams.vq_temperature / common_layers.inverse_exp_decay( + hparams.gan_codes_warmup_steps * 1.2, + min_value=hparams.vq_temperature * 2) + if hparams.mode != tf_estimator.ModeKeys.TRAIN: + vq_temperature = None + with tf.variable_scope("vq_loss"): + (reconstr, _, target_codes, code_loss, + targets_loss) = discretization.vq_loss( + res, labels, vocab_size, temperature=vq_temperature) + losses["code_loss"] = code_loss * hparams.code_loss_factor + losses["training"] = targets_loss + else: + reconstr = tf.layers.dense(res, vocab_size, name="autoencoder_final") + targets_loss = tf.losses.sparse_softmax_cross_entropy( + logits=tf.reshape(reconstr, labels_shape + [vocab_size]), + labels=tf.reshape(labels, labels_shape)) + losses["training"] = targets_loss + + # GAN losses. + if hparams.gan_loss_factor != 0.0: + update_means_factor = common_layers.inverse_exp_decay( + hparams.gan_codes_warmup_steps, min_value=0.0001) + if hparams.use_vq_loss: + with tf.variable_scope("vq_loss", reuse=True): + update_means = tf.less(tf.random_uniform([]), update_means_factor) + reconstr_gan, gan_codes, _, code_loss_gan, _ = discretization.vq_loss( + res_gan, + labels, + vocab_size, + do_update=update_means, + temperature=vq_temperature) + reconstr_gan_nonoise = reconstr_gan + code_loss_gan *= hparams.code_loss_factor * update_means_factor + losses["code_loss_gan"] = code_loss_gan + else: + reconstr_gan = tf.layers.dense( + res_gan, vocab_size, name="autoencoder_final", reuse=True) + reconstr_gan_nonoise = reconstr_gan + reconstr_gan = self.gumbel_sample(reconstr_gan) + # Embed to codes. + gan_codes = self.embed(reconstr_gan) + + # Add GAN loss if requested. + gan_loss = 0.0 + if hparams.gan_loss_factor != 0.0: + self.image_summary("gan", reconstr_gan_nonoise) + + def discriminate(x): + """Run a dioscriminator depending on the hparams.""" + if hparams.discriminator == "default": + return common_layers.deep_discriminator( + x, hparams.discriminator_batchnorm, is_training) + elif hparams.discriminator == "patched": + return common_layers.patch_discriminator(x) + elif hparams.discriminator == "single": + return common_layers.single_discriminator( + x, + hparams.discriminator_size, + hparams.discriminator_kernel_size, + hparams.discriminator_strides, + pure_mean=hparams.discriminator_pure_mean) + elif hparams.discriminator == "double": + return common_layers.double_discriminator( + x, + hparams.discriminator_size, + hparams.discriminator_kernel_size, + hparams.discriminator_strides, + pure_mean=hparams.discriminator_pure_mean) + else: + raise Exception("Unknown discriminator %s" % hparams.discriminator) + + tc_shape = common_layers.shape_list(target_codes) + if len(tc_shape) > 4: + target_codes = tf.reshape(target_codes, + tc_shape[:-2] + [tc_shape[-1] * tc_shape[-2]]) + gan_codes = tf.reshape(gan_codes, + tc_shape[:-2] + [tc_shape[-1] * tc_shape[-2]]) + gan_lr = common_layers.inverse_exp_decay( + hparams.gan_codes_warmup_steps * 1.5) + rev_grad_gan_codes = reverse_gradient(gan_codes, lr=gan_lr) + gan_loss = common_layers.sliced_gan_loss( + target_codes, + rev_grad_gan_codes, + discriminate, + self.hparams.num_sliced_vecs, + do_tanh=hparams.sliced_do_tanh) + gan_loss *= hparams.gan_loss_factor * update_means_factor + losses["gan_loss"] = -gan_loss + + self.image_summary("ae", reconstr) + + logits = tf.reshape(reconstr, labels_shape + [vocab_size]) + return logits, losses + + def sample(self, features=None, shape=None): + del features + hp = self.hparams + div_x = 2**hp.num_hidden_layers + div_y = 1 if self.is1d else 2**hp.num_hidden_layers + size = [ + hp.batch_size, hp.sample_height // div_x, hp.sample_width // div_y, + hp.bottleneck_bits + ] + size = size if shape is None else shape + # Sample in [-1, 1] as the bottleneck is under tanh. + return 2.0 * tf.random_uniform(size) - 1.0 + + def encode(self, x): + """Auto-encode x and return the bottleneck.""" + features = {"targets": x} + self(features) # pylint: disable=not-callable + res = tf.maximum(0.0, self._cur_bottleneck_tensor) # Be 0/1 and not -1/1. + self._cur_bottleneck_tensor = None + return res + + def infer(self, features, *args, **kwargs): # pylint: disable=arguments-differ + """Produce predictions from the model by sampling.""" + del args, kwargs + # Inputs and features preparation needed to handle edge cases. + if not features: + features = {} + inputs_old = None + if "inputs" in features and len(features["inputs"].shape) < 4: + inputs_old = features["inputs"] + features["inputs"] = tf.expand_dims(features["inputs"], 2) + + # Sample and decode. + num_channels = self.num_channels + if "targets" not in features: + features["targets"] = tf.zeros( + [self.hparams.batch_size, 1, 1, num_channels], dtype=tf.int32) + logits, _ = self(features) # pylint: disable=not-callable + samples = tf.argmax(logits, axis=-1) + + # Restore inputs to not confuse Estimator in edge cases. + if inputs_old is not None: + features["inputs"] = inputs_old + + # Return samples. + return samples + + def decode(self, bottleneck): + """Auto-decode from the bottleneck and return the result.""" + # Get the shape from bottleneck and num channels. + shape = common_layers.shape_list(bottleneck) + try: + num_channels = self.hparams.problem.num_channels + except AttributeError: + num_channels = 1 + dummy_targets = tf.zeros(shape[:-1] + [num_channels]) + # Set the bottleneck to decode. + if len(shape) > 4: + bottleneck = tf.squeeze(bottleneck, axis=[1]) + bottleneck = 2 * bottleneck - 1 # Be -1/1 instead of 0/1. + self._cur_bottleneck_tensor = bottleneck + # Run decoding. + res = self.infer({"targets": dummy_targets}) + self._cur_bottleneck_tensor = None + return res + + def _get_kernel_and_strides(self): + hparams = self.hparams + kernel = (hparams.kernel_height, hparams.kernel_width) + kernel = (hparams.kernel_height, 1) if self.is1d else kernel + strides = (2, 1) if self.is1d else (2, 2) + return (kernel, strides) + + +@registry.register_model +class AutoencoderAutoregressive(AutoencoderBasic): + """Autoencoder with an autoregressive part.""" + + def body(self, features): + hparams = self.hparams + # Run the basic autoencoder part first. + basic_result, losses = super(AutoencoderAutoregressive, self).body(features) + if hparams.autoregressive_mode == "none": + assert not hparams.autoregressive_forget_base + return basic_result, losses + if "training" in losses: + plain_training_loss = losses.pop("training") + losses["plain"] = plain_training_loss + res_shape = common_layers.shape_list(basic_result) + vocab_size = self._problem_hparams.vocab_size["targets"] + if hasattr(self._hparams, "vocab_divisor"): + vocab_size += (-vocab_size) % self._hparams.vocab_divisor + targets = tf.one_hot(features["targets_raw"], vocab_size) + # Prepare inputs for autoregressive modes. + if common_layers.shape_list(features["targets"])[1] == 1: + # This happens on the first step of predicitions. + assert hparams.mode == tf_estimator.ModeKeys.PREDICT + targets = tf.zeros_like(basic_result) + targets = self.embed(targets) + if hparams.autoregressive_gumbel_sample: + basic_hot = self.gumbel_sample(basic_result) + else: + basic_hot = basic_result + basic_result = self.embed(basic_hot) + shape = common_layers.shape_list(basic_result) + basic1d = tf.reshape(basic_result, [shape[0], -1, shape[-1]]) + targets = tf.reshape(targets, common_layers.shape_list(basic_result)) + # During autoregressive inference, don't resample. + if hparams.mode == tf_estimator.ModeKeys.PREDICT: + if hasattr(hparams, "sampled_basic1d_tensor"): + basic1d = hparams.sampled_basic1d_tensor + else: + hparams.sampled_basic1d_tensor = basic1d + # Sometimes it's useful to look at non-autoregressive evals. + targets_dropout = targets + if (hparams.mode == tf_estimator.ModeKeys.EVAL and + hparams.autoregressive_eval_pure_autoencoder): + targets_dropout = tf.zeros_like(basic_result) + # Now combine the basic reconstruction with shifted targets. + targets1d = tf.reshape(targets_dropout, [shape[0], -1, shape[-1]]) + targets_shifted = common_layers.shift_right_3d(targets1d) + concat1d = tf.concat([basic1d, targets_shifted], axis=-1) + # The forget_base hparam sets purely-autoregressive mode, no autoencoder. + if hparams.autoregressive_forget_base: + concat1d = tf.reshape(targets, [shape[0], -1, shape[-1]]) + concat1d = common_layers.shift_right_3d(concat1d) + # The autoregressive part depends on the mode. + if hparams.autoregressive_mode == "conv3": + res = common_layers.conv1d( + concat1d, + hparams.hidden_size, + 3, + padding="LEFT", + activation=common_layers.belu, + name="autoregressive_conv3") + res = tf.layers.dense(res, vocab_size, name="autoregressive_final") + return tf.reshape(res, res_shape), losses + if hparams.autoregressive_mode == "conv5": + res = common_layers.conv1d( + concat1d, + hparams.hidden_size, + 5, + padding="LEFT", + activation=common_layers.belu, + name="autoregressive_conv5") + res = tf.layers.dense(res, vocab_size, name="autoregressive_final") + return tf.reshape(res, res_shape), losses + if hparams.autoregressive_mode == "sru": + res = common_layers.conv1d( + concat1d, + hparams.hidden_size, + 3, + padding="LEFT", + activation=common_layers.belu, + name="autoregressive_sru_conv3") + res = common_layers.sru(res) + res = tf.layers.dense(res, vocab_size, name="autoregressive_final") + return tf.reshape(res, res_shape), losses + + raise ValueError( + "Unsupported autoregressive mode: %s" % hparams.autoregressive_mode) + + def infer(self, features, *args, **kwargs): + """Produce predictions from the model by sampling.""" + # Inputs and features preparation needed to handle edge cases. + if not features: + features = {} + inputs_old = None + if "inputs" in features and len(features["inputs"].shape) < 4: + inputs_old = features["inputs"] + features["inputs"] = tf.expand_dims(features["inputs"], 2) + + # Sample first. + try: + num_channels = self.hparams.problem.num_channels + except AttributeError: + num_channels = 1 + if "targets" not in features: + features["targets"] = tf.zeros( + [self.hparams.batch_size, 1, 1, num_channels], dtype=tf.int32) + logits, _ = self(features) # pylint: disable=not-callable + samples = common_layers.sample_with_temperature(logits, 0.0) + shape = common_layers.shape_list(samples) + + # Sample again if requested for the autoregressive part. + extra_samples = self.hparams.autoregressive_decode_steps + for i in range(extra_samples): + if i == extra_samples - 2: + self.hparams.sampling_temp /= 2 + if i == extra_samples - 1: + self.hparams.sampling_temp = 0.0 + features["targets"] = samples + old_samples1d = tf.reshape(samples, [shape[0], -1, shape[3]]) + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + logits, _ = self(features) # pylint: disable=not-callable + samples = common_layers.sample_with_temperature( + logits, self.hparams.sampling_temp) + samples1d = tf.reshape(samples, [shape[0], -1, shape[3]]) + samples1d = tf.concat([old_samples1d[:, :i, :], samples1d[:, i:, :]], + axis=1) + samples = tf.reshape(samples1d, shape) + + # Restore inputs to not confuse Estimator in edge cases. + if inputs_old is not None: + features["inputs"] = inputs_old + + # Return samples. + return samples + + +@registry.register_model +class AutoencoderResidual(AutoencoderAutoregressive): + """Residual autoencoder.""" + + def dropout(self, x): + is_training = self.hparams.mode == tf_estimator.ModeKeys.TRAIN + hparams = self.hparams + if hparams.dropout <= 0.0 or not is_training: + return x + warm_step = hparams.bottleneck_warmup_steps * 2**hparams.num_hidden_layers + dropout = common_layers.inverse_lin_decay(warm_step // 2) * hparams.dropout + return common_layers.dropout_with_broadcast_dims( + x, 1.0 - dropout, broadcast_dims=[-1]) + + def encoder(self, x): + with tf.variable_scope("encoder"): + hparams = self.hparams + layers = [] + kernel, strides = self._get_kernel_and_strides() + residual_kernel = (hparams.residual_kernel_height, + hparams.residual_kernel_width) + residual_kernel1d = (hparams.residual_kernel_height, 1) + residual_kernel = residual_kernel1d if self.is1d else residual_kernel + residual_conv = tf.layers.conv2d + if hparams.residual_use_separable_conv: + residual_conv = tf.layers.separable_conv2d + # Down-convolutions. + for i in range(hparams.num_hidden_layers): + with tf.variable_scope("layer_%d" % i): + x = self.make_even_size(x) + layers.append(x) + x = self.dropout(x) + filters = hparams.hidden_size * 2**(i + 1) + filters = min(filters, hparams.max_hidden_size) + x = common_attention.add_timing_signal_nd(x) + x = tf.layers.conv2d( + x, + filters, + kernel, + strides=strides, + padding="SAME", + activation=common_layers.belu, + name="strided") + y = x + y = tf.nn.dropout(y, 1.0 - hparams.residual_dropout) + for r in range(hparams.num_residual_layers): + residual_filters = filters + if r < hparams.num_residual_layers - 1: + residual_filters = int( + filters * hparams.residual_filter_multiplier) + y = residual_conv( + y, + residual_filters, + residual_kernel, + padding="SAME", + activation=common_layers.belu, + name="residual_%d" % r) + x += y + x = common_layers.layer_norm(x, name="ln") + return x, layers + + def decoder(self, x, encoder_layers=None): + with tf.variable_scope("decoder"): + hparams = self.hparams + is_training = self.hparams.mode == tf_estimator.ModeKeys.TRAIN + kernel, strides = self._get_kernel_and_strides() + residual_kernel = (hparams.residual_kernel_height, + hparams.residual_kernel_width) + residual_kernel1d = (hparams.residual_kernel_height, 1) + residual_kernel = residual_kernel1d if self.is1d else residual_kernel + residual_conv = tf.layers.conv2d + if hparams.residual_use_separable_conv: + residual_conv = tf.layers.separable_conv2d + # Up-convolutions. + for i in range(hparams.num_hidden_layers): + j = hparams.num_hidden_layers - i - 1 + if is_training: + nomix_p = common_layers.inverse_lin_decay( + int(hparams.bottleneck_warmup_steps * 0.25 * 2**j)) + 0.01 + if common_layers.should_generate_summaries(): + tf.summary.scalar("nomix_p_%d" % j, nomix_p) + filters = hparams.hidden_size * 2**j + filters = min(filters, hparams.max_hidden_size) + with tf.variable_scope("layer_%d" % i): + j = hparams.num_hidden_layers - i - 1 + x = tf.layers.conv2d_transpose( + x, + filters, + kernel, + strides=strides, + padding="SAME", + activation=common_layers.belu, + name="strided") + y = x + for r in range(hparams.num_residual_layers): + residual_filters = filters + if r < hparams.num_residual_layers - 1: + residual_filters = int( + filters * hparams.residual_filter_multiplier) + y = residual_conv( + y, + residual_filters, + residual_kernel, + padding="SAME", + activation=common_layers.belu, + name="residual_%d" % r) + x += tf.nn.dropout(y, 1.0 - hparams.residual_dropout) + x = common_layers.layer_norm(x, name="ln") + x = common_attention.add_timing_signal_nd(x) + if encoder_layers is not None: + enc_x = encoder_layers[j] + enc_shape = common_layers.shape_list(enc_x) + x_mix = x[:enc_shape[0], :enc_shape[1], :enc_shape[2], :] + if is_training: # Mix at the beginning of training. + rand = tf.random_uniform(common_layers.shape_list(x_mix)) + x_mix = tf.where(tf.less(rand, nomix_p), x_mix, enc_x) + if hparams.gan_loss_factor != 0: + x_gan = x[enc_shape[0]:, :enc_shape[1], :enc_shape[2], :] + x = tf.concat([x_mix, x_gan], axis=0) + else: + x = x_mix + return x + + +@registry.register_model +class AutoencoderResidualVAE(AutoencoderResidual): + """Residual VAE autoencoder.""" + + def bottleneck(self, x): + hparams = self.hparams + z_size = hparams.bottleneck_bits + x_shape = common_layers.shape_list(x) + with tf.variable_scope("vae"): + mu = tf.layers.dense(x, z_size, name="mu") + if hparams.mode != tf_estimator.ModeKeys.TRAIN: + return mu, 0.0 # No sampling or kl loss on eval. + log_sigma = tf.layers.dense(x, z_size, name="log_sigma") + epsilon = tf.random_normal(x_shape[:-1] + [z_size]) + z = mu + tf.exp(log_sigma / 2) * epsilon + kl = 0.5 * tf.reduce_mean( + tf.expm1(log_sigma) + tf.square(mu) - log_sigma, axis=-1) + free_bits = z_size // 4 + kl_loss = tf.reduce_mean(tf.maximum(kl - free_bits, 0.0)) + return z, kl_loss * hparams.kl_beta + + def sample(self, features=None, shape=None): + del features + hparams = self.hparams + div_x = 2**hparams.num_hidden_layers + div_y = 1 if self.is1d else 2**hparams.num_hidden_layers + size = [ + hparams.batch_size, hparams.sample_height // div_x, + hparams.sample_width // div_y, hparams.bottleneck_bits + ] + size = size if shape is None else shape + return tf.random_normal(size) + + +@registry.register_model +class AutoencoderBasicDiscrete(AutoencoderAutoregressive): + """Discrete autoencoder.""" + + def bottleneck(self, x): + hparams = self.hparams + x = tf.tanh(tf.layers.dense(x, hparams.bottleneck_bits, name="bottleneck")) + d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x) + if hparams.mode == tf_estimator.ModeKeys.TRAIN: + noise = tf.random_uniform(common_layers.shape_list(x)) + noise = 2.0 * tf.to_float(tf.less(hparams.bottleneck_noise, noise)) - 1.0 + d *= noise + x = common_layers.mix(d, x, hparams.discretize_warmup_steps, + hparams.mode == tf_estimator.ModeKeys.TRAIN) + return x, 0.0 + + def sample(self, features=None, shape=None): + del features + hp = self.hparams + div_x = 2**hp.num_hidden_layers + div_y = 1 if self.is1d else 2**hp.num_hidden_layers + size = [ + hp.batch_size, hp.sample_height // div_x, hp.sample_width // div_y, + hp.bottleneck_bits + ] + size = size if shape is None else shape + rand = tf.random_uniform(size) + return 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0 + + +@registry.register_model +class AutoencoderResidualDiscrete(AutoencoderResidual): + """Discrete residual autoencoder.""" + + def variance_loss(self, b): + part = tf.random_uniform(common_layers.shape_list(b)) + selection = tf.to_float(tf.less(part, tf.random_uniform([]))) + selection_size = tf.reduce_sum(selection) + part_avg = tf.abs(tf.reduce_sum(b * selection)) / (selection_size + 1) + return part_avg + + def bottleneck(self, x, bottleneck_bits=None): # pylint: disable=arguments-differ + if bottleneck_bits is not None: + old_bottleneck_bits = self.hparams.bottleneck_bits + self.hparams.bottleneck_bits = bottleneck_bits + res, loss = discretization.parametrized_bottleneck(x, self.hparams) + if bottleneck_bits is not None: + self.hparams.bottleneck_bits = old_bottleneck_bits + return res, loss + + def unbottleneck(self, x, res_size, reuse=None): + with tf.variable_scope("unbottleneck", reuse=reuse): + return discretization.parametrized_unbottleneck(x, res_size, self.hparams) + + def sample(self, features=None, shape=None): + del features + hp = self.hparams + div_x = 2**hp.num_hidden_layers + div_y = 1 if self.is1d else 2**hp.num_hidden_layers + size = [ + hp.batch_size, hp.sample_height // div_x, hp.sample_width // div_y, + hp.bottleneck_bits + ] + size = size if shape is None else shape + rand = tf.random_uniform(size) + res = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0 + # If you want to set some first bits to a fixed value, do this: + # fixed = tf.zeros_like(rand) - 1.0 + # nbits = 3 + # res = tf.concat([fixed[:, :, :, :nbits], res[:, :, :, nbits:]], axis=-1) + return res + + +@registry.register_model +class AutoencoderOrderedDiscrete(AutoencoderResidualDiscrete): + """Ordered discrete autoencoder.""" + + def bottleneck(self, x): # pylint: disable=arguments-differ + hparams = self.hparams + if hparams.unordered: + return super(AutoencoderOrderedDiscrete, self).bottleneck(x) + noise = hparams.bottleneck_noise + hparams.bottleneck_noise = 0.0 # We'll add noise below. + x, loss = discretization.parametrized_bottleneck(x, hparams) + hparams.bottleneck_noise = noise + if hparams.mode == tf_estimator.ModeKeys.TRAIN: + # We want a number p such that p^bottleneck_bits = 1 - noise. + # So log(p) * bottleneck_bits = log(noise) + log_p = tf.log1p(-float(noise) / 2) / float(hparams.bottleneck_bits) + # Probabilities of flipping are p, p^2, p^3, ..., p^bottleneck_bits. + noise_mask = 1.0 - tf.exp(tf.cumsum(tf.zeros_like(x) + log_p, axis=-1)) + # Having the no-noise mask, we can make noise just uniformly at random. + ordered_noise = tf.random_uniform(tf.shape(x)) + # We want our noise to be 1s at the start and random {-1, 1} bits later. + ordered_noise = tf.to_float(tf.less(noise_mask, ordered_noise)) + # Now we flip the bits of x on the noisy positions (ordered and normal). + x *= 2.0 * ordered_noise - 1 + return x, loss + + +@registry.register_model +class AutoencoderDualDiscrete(AutoencoderResidualDiscrete): + """Dual discrete autoencoder.""" + + def body(self, features): + if self.hparams.mode != tf_estimator.ModeKeys.EVAL: + t, i = features["targets_raw"], features["inputs_raw"] + t, i = common_layers.pad_to_same_length(t, i) + features["targets_raw"] = tf.concat([t, i], axis=0) + return super(AutoencoderDualDiscrete, self).body(features) + + def embed(self, x, name="embedding"): + if self.hparams.mode == tf_estimator.ModeKeys.EVAL: + return super(AutoencoderDualDiscrete, self).embed(x, name=name + "_t") + xt, xi = tf.split(x, 2, axis=0) + xte = super(AutoencoderDualDiscrete, self).embed(xt, name=name + "_t") + xie = super(AutoencoderDualDiscrete, self).embed(xi, name=name + "_i") + return tf.concat([xte, xie], axis=0) + + def bottleneck(self, x): + hparams = self.hparams + b, _ = super(AutoencoderDualDiscrete, self).bottleneck(x) + if hparams.mode == tf_estimator.ModeKeys.EVAL: + return b, 0.0 + bt, bi = tf.split(b, 2, axis=0) + if self.hparams.mode != tf_estimator.ModeKeys.TRAIN: + return tf.concat([bi, bi], axis=0), 0.0 + # Share the first hparams.bottleneck_shared_bits. + shared = (bt + bi) / 2 # -1 if both -1, 1 if both were 1, 0 if disagree. + rand = tf.random_uniform(common_layers.shape_list(bt)) + br = tf.where(rand < 0.5, bt, bi) # Break ties at random. + bs = tf.where(shared == 0, br, shared) + bs = tf.concat([bs, bs], axis=0) + n = hparams.bottleneck_shared_bits + step = tf.train.get_global_step() + zero = tf.constant(0, dtype=tf.int64) + if step is None: + step = zero + step = tf.maximum(zero, step - hparams.bottleneck_shared_bits_start_warmup) + f = common_layers.inverse_lin_decay( + hparams.bottleneck_shared_bits_stop_warmup, min_value=0.1, step=step) + n = tf.where(step > 1, n * f, n) + n = tf.cast(n, tf.int64) + b_shape = common_layers.shape_list(b) + b = tf.concat([bs[..., :n], b[..., n:]], axis=-1) + b = tf.reshape(b, b_shape) + return b, 0.0 + + def unbottleneck(self, b, res_size, reuse=None): + x = super(AutoencoderDualDiscrete, self).unbottleneck( + b, res_size, reuse=reuse) + if self.hparams.mode == tf_estimator.ModeKeys.EVAL: + return tf.layers.dense(x, res_size, name="dual_unbottleneck_t") + xt, xi = tf.split(x, 2, axis=0) + xt = tf.layers.dense(xt, res_size, name="dual_unbottleneck_t") + xi = tf.layers.dense(xt, res_size, name="dual_unbottleneck_i") + return tf.concat([xt, xi], axis=0) + + def infer(self, features, *args, **kwargs): # pylint: disable=arguments-differ + """Produce predictions from the model.""" + del args, kwargs + # Inputs and features preparation needed to handle edge cases. + if not features: + features = {} + inputs_old = None + if "inputs" in features and len(features["inputs"].shape) < 4: + inputs_old = features["inputs"] + features["inputs"] = tf.expand_dims(features["inputs"], 2) + + # Set targets to input size firts. + features["targets"] = tf.zeros_like(features["inputs"]) + self._encode_on_predict = True + logits, _ = self(features) # pylint: disable=not-callable + if self.hparams.gan_loss_factor != 0: + logits, _ = tf.split(logits, 2, axis=0) # Remove GAN. + logits, _ = tf.split(logits, 2, axis=0) # Targets and inputs from encoding. + # Uncomment the line below to get reconstructed inputs instead of targets. + # (and comment out the line above at the same time). + # _, logits = tf.split(logits, 2, axis=0) + samples = tf.argmax(logits, axis=-1) + + # Restore inputs to not confuse Estimator in edge cases. + if inputs_old is not None: + features["inputs"] = inputs_old + + # Return samples. + return samples + + +@registry.register_model +class AutoencoderStacked(AutoencoderResidualDiscrete): + """A stacked autoencoder.""" + + def stack(self, b, size, bottleneck_bits, name): + with tf.variable_scope(name + "_stack"): + unb = self.unbottleneck(b, size) + enc = self.encoder(unb) + b, _ = self.bottleneck(enc, bottleneck_bits=bottleneck_bits) + return b + + def unstack(self, b, size, bottleneck_bits, name): + with tf.variable_scope(name + "_unstack"): + unb = self.unbottleneck(b, size) + dec = self.decoder(unb) + pred = tf.layers.dense(dec, bottleneck_bits, name="pred") + pred_shape = common_layers.shape_list(pred) + pred1 = tf.reshape(pred, pred_shape[:-1] + [-1, 2]) + x, y = tf.split(pred1, 2, axis=-1) + x = tf.squeeze(x, axis=[-1]) + y = tf.squeeze(y, axis=[-1]) + gt = 2.0 * tf.to_float(tf.less(x, y)) - 1.0 + gtc = tf.tanh(y - x) + gt += gtc - tf.stop_gradient(gtc) + return gt, pred1 + + def stack_loss(self, b, b_pred, name): + with tf.variable_scope(name): + labels_discrete = tf.to_int32((b + 1.0) * 0.5) + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + labels=labels_discrete, logits=b_pred) + return tf.reduce_mean(loss) + + def full_stack(self, b, x_size, bottleneck_bits, losses, is_training, i): + stack1_b = self.stack(b, x_size, bottleneck_bits, "step%d" % i) + if i > 1: + stack1_b = self.full_stack(stack1_b, 2 * x_size, 2 * bottleneck_bits, + losses, is_training, i - 1) + b1, b_pred = self.unstack(stack1_b, x_size, bottleneck_bits, "step%d" % i) + losses["stack%d_loss" % i] = self.stack_loss(b, b_pred, "step%d" % i) + b_shape = common_layers.shape_list(b) + if is_training: + condition = tf.less(tf.random_uniform([]), 0.5) + condition = tf.reshape(condition, [1] * len(b.shape)) + condition = tf.tile(condition, b.shape) + b1 = tf.where(condition, b, b1) + return tf.reshape(b1, b_shape) + + def body(self, features): + hparams = self.hparams + num_stacks = hparams.num_hidden_layers + hparams.num_hidden_layers = 1 + is_training = hparams.mode == tf_estimator.ModeKeys.TRAIN + if hparams.mode != tf_estimator.ModeKeys.PREDICT: + x = features["targets"] + shape = common_layers.shape_list(x) + is1d = shape[2] == 1 + self.is1d = is1d + x, _ = common_layers.pad_to_same_length( + x, x, final_length_divisible_by=2**num_stacks, axis=1) + if not is1d: + x, _ = common_layers.pad_to_same_length( + x, x, final_length_divisible_by=2**num_stacks, axis=2) + # Run encoder. + x = self.encoder(x) + x_size = common_layers.shape_list(x)[-1] + # Bottleneck (mix during early training, not too important but stable). + b, b_loss = self.bottleneck(x) + losses = {"bottleneck0_loss": b_loss} + b = self.full_stack(b, 2 * x_size, 2 * hparams.bottleneck_bits, losses, + is_training, num_stacks - 1) + b = self.unbottleneck(b, x_size) + b = common_layers.mix(b, x, hparams.bottleneck_warmup_steps, is_training) + x = b + else: + b = self.sample() + res_size = self.hparams.hidden_size * 2**self.hparams.num_hidden_layers + res_size = min(res_size, hparams.max_hidden_size) + x = self.unbottleneck(b, res_size) + # Run decoder. + x = self.decoder(x) + if hparams.mode == tf_estimator.ModeKeys.PREDICT: + return x + # Cut to the right size and mix before returning. + res = x[:, :shape[1], :shape[2], :] + res = common_layers.mix(res, features["targets"], + hparams.bottleneck_warmup_steps // 2, is_training) + hparams.num_hidden_layers = num_stacks + return res, losses + + +@registry.register_hparams +def autoencoder_basic(): + """Basic autoencoder model.""" + hparams = common_hparams.basic_params1() + hparams.optimizer = "adam" + hparams.learning_rate_constant = 0.0002 + hparams.learning_rate_warmup_steps = 500 + hparams.learning_rate_schedule = "constant * linear_warmup" + hparams.label_smoothing = 0.0 + hparams.batch_size = 128 + hparams.hidden_size = 64 + hparams.num_hidden_layers = 5 + hparams.initializer = "uniform_unit_scaling" + hparams.initializer_gain = 1.0 + hparams.weight_decay = 0.0 + hparams.kernel_height = 4 + hparams.kernel_width = 4 + hparams.dropout = 0.05 + hparams.add_hparam("max_hidden_size", 1024) + hparams.add_hparam("bottleneck_bits", 128) + hparams.add_hparam("bottleneck_shared_bits", 0) + hparams.add_hparam("bottleneck_shared_bits_start_warmup", 0) + hparams.add_hparam("bottleneck_shared_bits_stop_warmup", 0) + hparams.add_hparam("bottleneck_noise", 0.1) + hparams.add_hparam("bottleneck_warmup_steps", 2000) + hparams.add_hparam("sample_height", 32) + hparams.add_hparam("sample_width", 32) + hparams.add_hparam("discriminator_batchnorm", True) + hparams.add_hparam("num_sliced_vecs", 20000) + hparams.add_hparam("sliced_do_tanh", int(True)) + hparams.add_hparam("discriminator_size", 256) + hparams.add_hparam("discriminator_kernel_size", 6) + hparams.add_hparam("discriminator_strides", 4) + hparams.add_hparam("discriminator_pure_mean", int(False)) + hparams.add_hparam("code_loss_factor", 1.0) + hparams.add_hparam("gan_codes_warmup_steps", 16000) + hparams.add_hparam("gan_loss_factor", 0.0) + hparams.add_hparam("bottleneck_l2_factor", 0.05) + hparams.add_hparam("gumbel_temperature", 0.5) + hparams.add_hparam("gumbel_noise_factor", 0.5) + hparams.add_hparam("vq_temperature", 0.001) + hparams.add_hparam("use_vq_loss", int(False)) + hparams.add_hparam("discriminator", "double") + return hparams + + +@registry.register_hparams +def autoencoder_autoregressive(): + """Autoregressive autoencoder model.""" + hparams = autoencoder_basic() + hparams.add_hparam("autoregressive_forget_base", False) + hparams.add_hparam("autoregressive_mode", "none") + hparams.add_hparam("autoregressive_decode_steps", 0) + hparams.add_hparam("autoregressive_eval_pure_autoencoder", False) + hparams.add_hparam("autoregressive_gumbel_sample", False) + return hparams + + +@registry.register_hparams +def autoencoder_residual(): + """Residual autoencoder model.""" + hparams = autoencoder_autoregressive() + hparams.optimizer = "Adafactor" + hparams.clip_grad_norm = 1.0 + hparams.learning_rate_constant = 0.5 + hparams.learning_rate_warmup_steps = 500 + hparams.learning_rate_schedule = "constant * linear_warmup * rsqrt_decay" + hparams.num_hidden_layers = 5 + hparams.hidden_size = 64 + hparams.max_hidden_size = 1024 + hparams.add_hparam("num_residual_layers", 2) + hparams.add_hparam("residual_kernel_height", 3) + hparams.add_hparam("residual_kernel_width", 3) + hparams.add_hparam("residual_filter_multiplier", 2.0) + hparams.add_hparam("residual_dropout", 0.2) + hparams.add_hparam("residual_use_separable_conv", int(True)) + hparams.add_hparam("kl_beta", 1.0) + return hparams + + +@registry.register_hparams +def autoencoder_residual_text(): + """Residual autoencoder model for text.""" + hparams = autoencoder_residual() + hparams.bottleneck_bits = 32 + hparams.batch_size = 1024 + hparams.hidden_size = 64 + hparams.max_hidden_size = 512 + hparams.bottleneck_noise = 0.0 + hparams.bottom = { + "inputs": modalities.identity_bottom, + "targets": modalities.identity_bottom, + } + hparams.top = { + "targets": modalities.identity_top, + } + hparams.autoregressive_mode = "none" + hparams.sample_width = 1 + return hparams + + +@registry.register_hparams +def autoencoder_basic_discrete(): + """Basic autoencoder model.""" + hparams = autoencoder_autoregressive() + hparams.num_hidden_layers = 5 + hparams.hidden_size = 64 + hparams.bottleneck_bits = 1024 + hparams.bottleneck_noise = 0.1 + hparams.add_hparam("discretize_warmup_steps", 16000) + return hparams + + +@registry.register_hparams +def autoencoder_residual_discrete(): + """Residual discrete autoencoder model.""" + hparams = autoencoder_residual() + hparams.bottleneck_bits = 1024 + hparams.bottleneck_noise = 0.05 + hparams.add_hparam("discretize_warmup_steps", 16000) + hparams.add_hparam("bottleneck_kind", "tanh_discrete") + hparams.add_hparam("isemhash_noise_dev", 0.5) + hparams.add_hparam("isemhash_mix_prob", 0.5) + hparams.add_hparam("isemhash_filter_size_multiplier", 2.0) + hparams.add_hparam("vq_beta", 0.25) + hparams.add_hparam("vq_decay", 0.999) + hparams.add_hparam("vq_epsilon", 1e-5) + return hparams + + +@registry.register_hparams +def autoencoder_residual_discrete_big(): + """Residual discrete autoencoder model, big version.""" + hparams = autoencoder_residual_discrete() + hparams.hidden_size = 128 + hparams.max_hidden_size = 4096 + hparams.bottleneck_noise = 0.1 + hparams.residual_dropout = 0.4 + return hparams + + +@registry.register_hparams +def autoencoder_ordered_discrete(): + """Ordered discrete autoencoder model.""" + hparams = autoencoder_residual_discrete() + hparams.bottleneck_noise = 0.05 # Use 0.8 for ordered. + hparams.gan_loss_factor = 0.05 + hparams.add_hparam("unordered", True) + return hparams + + +@registry.register_hparams +def autoencoder_ordered_discrete_image64(): + """Ordered discrete autoencoder model.""" + hparams = autoencoder_ordered_discrete() + hparams.batch_size = 32 + hparams.num_hidden_layers = 6 + hparams.bottleneck_warmup_steps *= 2 + hparams.gan_codes_warmup_steps *= 2 + + return hparams + + +@registry.register_hparams +def autoencoder_ordered_discrete_patched(): + """Ordered discrete autoencoder model.""" + hparams = autoencoder_ordered_discrete() + hparams.discriminator = "patched" + return hparams + + +@registry.register_hparams +def autoencoder_ordered_discrete_single(): + """Ordered discrete autoencoder model.""" + hparams = autoencoder_ordered_discrete() + hparams.discriminator = "single" + return hparams + + +@registry.register_hparams +def autoencoder_ordered_discrete_hs256(): + """Ordered discrete autoencoder model.""" + hparams = autoencoder_ordered_discrete() + hparams.hidden_size = 256 + return hparams + + +@registry.register_hparams +def autoencoder_ordered_text(): + """Ordered discrete autoencoder model for text.""" + hparams = autoencoder_ordered_discrete() + hparams.bottleneck_bits = 1024 + hparams.bottleneck_shared_bits = 1024-64 + hparams.bottleneck_shared_bits_start_warmup = 75000 + hparams.bottleneck_shared_bits_stop_warmup = 275000 + hparams.num_hidden_layers = 7 + hparams.batch_size = 1024 + hparams.autoregressive_mode = "conv5" + hparams.max_hidden_size = 1024 + hparams.bottom = { + "inputs": modalities.identity_bottom, + "targets": modalities.identity_bottom, + } + hparams.top = { + "targets": modalities.identity_top, + } + hparams.sample_height = 128 + hparams.sample_width = 1 + return hparams + + +@registry.register_hparams +def autoencoder_ordered_text_small(): + """Ordered discrete autoencoder model for text, small version.""" + hparams = autoencoder_ordered_text() + hparams.bottleneck_bits = 32 + hparams.num_hidden_layers = 3 + hparams.hidden_size = 64 + hparams.max_hidden_size = 512 + hparams.bottleneck_noise = 0.0 + hparams.autoregressive_mode = "conv5" + hparams.sample_height = 4 + return hparams + + +@registry.register_hparams +def autoencoder_ordered_discrete_vq(): + """Ordered discrete autoencoder model with VQ bottleneck.""" + hparams = autoencoder_ordered_discrete() + hparams.bottleneck_kind = "vq" + hparams.bottleneck_bits = 16 + return hparams + + +@registry.register_hparams +def autoencoder_discrete_pong(): + """Discrete autoencoder model for compressing pong frames.""" + hparams = autoencoder_ordered_discrete() + hparams.num_hidden_layers = 3 + hparams.bottleneck_bits = 24 + hparams.batch_size = 2 + hparams.gan_loss_factor = 0.01 + hparams.bottleneck_l2_factor = 0.001 + hparams.add_hparam("video_modality_loss_cutoff", 0.02) + return hparams + + +@registry.register_hparams +def autoencoder_discrete_tiny(): + """Discrete autoencoder model for compressing pong frames for testing.""" + hparams = autoencoder_ordered_discrete() + hparams.num_hidden_layers = 2 + hparams.bottleneck_bits = 24 + hparams.batch_size = 2 + hparams.gan_loss_factor = 0. + hparams.bottleneck_l2_factor = 0.001 + hparams.add_hparam("video_modality_loss_cutoff", 0.02) + hparams.num_residual_layers = 1 + hparams.hidden_size = 32 + hparams.max_hidden_size = 64 + return hparams + + +@registry.register_hparams +def autoencoder_discrete_cifar(): + """Discrete autoencoder model for compressing cifar.""" + hparams = autoencoder_ordered_discrete() + hparams.bottleneck_noise = 0.0 + hparams.bottleneck_bits = 90 + hparams.num_hidden_layers = 2 + hparams.hidden_size = 256 + hparams.num_residual_layers = 4 + hparams.batch_size = 32 + hparams.learning_rate_constant = 1.0 + return hparams + + +@registry.register_ranged_hparams +def autoencoder_range(rhp): + """Tuning grid of the main autoencoder params.""" + rhp.set_float("dropout", 0.01, 0.3) + rhp.set_float("gan_loss_factor", 0.01, 0.1) + rhp.set_float("bottleneck_l2_factor", 0.001, 0.1, scale=rhp.LOG_SCALE) + rhp.set_discrete("bottleneck_warmup_steps", [200, 2000]) + rhp.set_float("gumbel_temperature", 0, 1) + rhp.set_float("gumbel_noise_factor", 0, 0.5) + + +@registry.register_ranged_hparams +def autoencoder_discrete_pong_range(rhp): + """Narrow tuning grid.""" + rhp.set_float("dropout", 0.0, 0.2) + rhp.set_discrete("max_hidden_size", [1024, 2048]) + + +@registry.register_hparams +def autoencoder_stacked(): + """Stacked autoencoder model.""" + hparams = autoencoder_residual_discrete() + hparams.bottleneck_bits = 128 + return hparams diff --git a/tensor2tensor/models/research/autoencoders_test.py b/tensor2tensor/models/research/autoencoders_test.py new file mode 100644 index 000000000..f2c1afbdf --- /dev/null +++ b/tensor2tensor/models/research/autoencoders_test.py @@ -0,0 +1,93 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Autoencoders tests.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np + +from tensor2tensor.data_generators import mnist # pylint: disable=unused-import +from tensor2tensor.models.research import autoencoders # pylint: disable=unused-import +from tensor2tensor.utils import registry +from tensor2tensor.utils import trainer_lib + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class AutoencoderTest(tf.test.TestCase): + + def get_mnist_random_output(self, model_name, hparams_set=None, + mode=tf_estimator.ModeKeys.TRAIN): + hparams_set = hparams_set or model_name + x = np.random.randint(256, size=(1, 28, 28, 1)) + y = np.random.randint(10, size=(1, 1)) + features = { + "targets": tf.constant(x, dtype=tf.int32), + "inputs": tf.constant(y, dtype=tf.int32), + } + hparams = trainer_lib.create_hparams( + hparams_set, problem_name="image_mnist_rev", data_dir=".") + model = registry.model(model_name)(hparams, mode) + tf.train.create_global_step() + logits, _ = model(features) + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + res = session.run(logits) + return res + + @property + def mnist_output_shape(self): + return (1, 28, 28, 1, 256) + + def testAutoencoderBasic(self): + res = self.get_mnist_random_output("autoencoder_basic") + self.assertEqual(res.shape, self.mnist_output_shape) + + def testAutoencoderAutoregressive(self): + res = self.get_mnist_random_output("autoencoder_autoregressive") + self.assertEqual(res.shape, self.mnist_output_shape) + + def testAutoencoderResidual(self): + res = self.get_mnist_random_output("autoencoder_residual") + self.assertEqual(res.shape, self.mnist_output_shape) + + def testAutoencoderBasicDiscrete(self): + res = self.get_mnist_random_output("autoencoder_basic_discrete") + self.assertEqual(res.shape, self.mnist_output_shape) + + def testAutoencoderResidualDiscrete(self): + res = self.get_mnist_random_output("autoencoder_residual_discrete") + self.assertEqual(res.shape, self.mnist_output_shape) + + def testAutoencoderOrderedDiscrete(self): + res = self.get_mnist_random_output("autoencoder_ordered_discrete") + self.assertEqual(res.shape, self.mnist_output_shape) + + def testAutoencoderOrderedDiscreteVQ(self): + res = self.get_mnist_random_output( + "autoencoder_ordered_discrete", "autoencoder_ordered_discrete_vq") + self.assertEqual(res.shape, self.mnist_output_shape) + + # TODO(lukaszkaiser): Re-enable test by conserving lost shape information + # in autoencoder_stacked. + # def testAutoencoderStacked(self): + # res = self.get_mnist_random_output("autoencoder_stacked") + # self.assertEqual(res.shape, self.mnist_output_shape) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/research/cycle_gan.py b/tensor2tensor/models/research/cycle_gan.py new file mode 100644 index 000000000..7146a4ee8 --- /dev/null +++ b/tensor2tensor/models/research/cycle_gan.py @@ -0,0 +1,144 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Cycle GAN.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import modalities +from tensor2tensor.models.research import transformer_vae +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf + + +def discriminator(x, compress, hparams, name, reuse=None): + with tf.variable_scope(name, reuse=reuse): + x = tf.stop_gradient(2 * x) - x # Reverse gradient. + if compress: + x = transformer_vae.compress(x, None, False, hparams, "compress") + else: + x = transformer_vae.residual_conv(x, 1, 3, hparams, "compress_rc") + y = tf.reduce_mean(x, axis=1) + return tf.tanh(tf.layers.dense(y, 1, name="reduce")) + + +def generator(x, hparams, name, reuse=False): + with tf.variable_scope(name, reuse=reuse): + return transformer_vae.residual_conv(x, 1, 3, hparams, "generator") + + +def lossfn(real_input, fake_input, compress, hparams, lsgan, name): + """Loss function.""" + eps = 1e-12 + with tf.variable_scope(name): + d1 = discriminator(real_input, compress, hparams, "discriminator") + d2 = discriminator(fake_input, compress, hparams, "discriminator", + reuse=True) + if lsgan: + dloss = tf.reduce_mean( + tf.squared_difference(d1, 0.9)) + tf.reduce_mean(tf.square(d2)) + gloss = tf.reduce_mean(tf.squared_difference(d2, 0.9)) + loss = (dloss + gloss)/2 + else: # cross_entropy + dloss = -tf.reduce_mean( + tf.log(d1 + eps)) - tf.reduce_mean(tf.log1p(eps - d2)) + gloss = -tf.reduce_mean(tf.log(d2 + eps)) + loss = (dloss + gloss)/2 + return loss + + +def split_on_batch(x): + batch_size = tf.shape(x)[0] + i = batch_size // 2 + return x[:i, :, :, :], x[i:2*i, :, :, :] + + +def cycle_gan_internal(inputs, targets, _, hparams): + """Cycle GAN, main step used for training.""" + with tf.variable_scope("cycle_gan"): + # Embed inputs and targets. + inputs_orig, targets_orig = tf.to_int32(inputs), tf.to_int32(targets) + inputs = common_layers.embedding( + inputs_orig, hparams.vocab_size, hparams.hidden_size, "embed") + targets = common_layers.embedding( + targets_orig, hparams.vocab_size, hparams.hidden_size, + "embed", reuse=True) + + x, _ = split_on_batch(inputs) + _, y = split_on_batch(targets) + + # Y --> X + y_fake = generator(y, hparams, "Fy", reuse=False) + y_to_x_loss = lossfn(y, y_fake, True, hparams, True, "YtoX") + + # X --> Y + x_fake = generator(x, hparams, "Gx", reuse=False) + x_to_y_loss = lossfn(y, x_fake, True, hparams, True, "XtoY") + + # Cycle-Consistency + y_fake_ = generator(y_fake, hparams, "Gx", reuse=True) + x_fake_ = generator(x_fake, hparams, "Fy", reuse=True) + x_to_x_loss = hparams.cycle_loss_multiplier1 * tf.reduce_mean( + tf.abs(x_fake_ - x)) + y_to_y_loss = hparams.cycle_loss_multiplier2 * tf.reduce_mean( + tf.abs(y_fake_ - y)) + cycloss = x_to_x_loss + y_to_y_loss + + sample_generated = generator(inputs, hparams, "Gx", reuse=True) + sample_generated = tf.layers.dense( + sample_generated, hparams.vocab_size, name="softmax", reuse=None) + sample_generated = tf.stop_gradient( + tf.expand_dims(sample_generated, axis=2)) + + losses = {"cycloss": cycloss, + "y_to_x_loss": y_to_x_loss, + "x_to_y_loss": x_to_y_loss} + + return sample_generated, losses + + +@registry.register_model +class CycleGAN(t2t_model.T2TModel): + + def body(self, features): + return cycle_gan_internal( + features["inputs"], features["targets"], features["target_space_id"], + self._hparams) + + +@registry.register_hparams +def cycle_gan_small(): + """Set of hyperparameters.""" + hparams = transformer_vae.transformer_ae_small() + hparams.batch_size = 2048 + hparams.bottom = { + "inputs": modalities.identity_bottom, + "targets": modalities.identity_bottom, + } + hparams.top = { + "targets": modalities.identity_top, + } + hparams.weight_decay = 3.0 + hparams.learning_rate = 0.05 + hparams.kl_warmup_steps = 5000 + hparams.learning_rate_warmup_steps = 3000 + hparams.add_hparam("vocab_size", 66) # Vocabulary size, need to set here. + hparams.add_hparam("cycle_loss_multiplier1", 10.0) + hparams.add_hparam("cycle_loss_multiplier2", 10.0) + return hparams diff --git a/tensor2tensor/models/research/gene_expression.py b/tensor2tensor/models/research/gene_expression.py new file mode 100644 index 000000000..999f26edc --- /dev/null +++ b/tensor2tensor/models/research/gene_expression.py @@ -0,0 +1,150 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Models for gene expression from DNA.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from six.moves import range # pylint: disable=redefined-builtin + +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf + + +@registry.register_model +class GeneExpressionConv(t2t_model.T2TModel): + """Gene expression conv net. + + Based on "Basenji" model from + http://www.biorxiv.org/content/early/2017/07/10/161851 + + Uses layer_norm instead of batch_norm. + + Model expects that if targets are of length m, inputs are of length 32*m. The + original data expected that inputs would be of length 128*m, but the data has + been preprocessed to chunk every 4 bases into 1 ID (see + data_generators/gene_expression.py). + + The magnitude of the length reduction is controlled by the pooling sizes + (hparams.pooling_windows) at each conv layer (hparams.num_conv_layers). + """ + + def body(self, features): + inputs = features["inputs"] + inputs.get_shape().assert_has_rank(4) + + hp = self._hparams + + out = inputs + out = common_layers.flatten4d3d(out) + + # Conv layers + assert hp.num_conv_layers == len(hp.pooling_windows) + for i in range(hp.num_conv_layers): + out = conv_layer( + out, + hp.hidden_size, + hp.kernel_width, + hp.stride, + hp.pooling_windows[i], + hp.dropout, + dilation_rate=1, + name="conv_%d" % (i + 1)) + + # Dense dilated conv layers + for i in range(hp.num_dconv_layers): + dilation_rate = 2**(i + 1) + dconv_out = conv_layer( + out, + hp.hidden_size, + hp.kernel_width, + stride=1, + pooling_window=0, + dropout_rate=hp.dropout, + dilation_rate=dilation_rate, + name="dconv_%d" % (i + 1)) + out = tf.concat([out, dconv_out], axis=2) + + # Fully connected layer + out = fc_layer(out, hp.hidden_size, hp.dropout, name="fc") + + out.get_shape().assert_has_rank(3) + out = tf.expand_dims(out, 2) + return out + + +def conv_layer(x, + hidden_size, + kernel_size, + stride, + pooling_window, + dropout_rate, + dilation_rate, + name="conv"): + """Single conv layer with relu, optional pooling, and dropout.""" + with tf.variable_scope(name): + out = x + out = common_layers.conv1d_block( + out, + hidden_size, [(dilation_rate, kernel_size)], + strides=stride, + first_relu=False, + padding="same") + out = tf.nn.relu(out) + if pooling_window: + out = tf.layers.max_pooling1d( + out, pooling_window, pooling_window, padding="same") + out = tf.layers.dropout(out, dropout_rate) + return out + + +def fc_layer(x, num_out, dropout_rate, name="fc"): + with tf.variable_scope(name): + out = x + out = tf.layers.dense(out, num_out) + out = contrib.layers().layer_norm(out) + out = tf.nn.relu(out) + out = tf.layers.dropout(out, dropout_rate) + return out + + +@registry.register_hparams +def gene_expression_conv_base(): + """Hparams for GeneExpressionConv model.""" + hparams = common_hparams.basic_params1() + + batch_size = 10 + output_length = 2048 + inputs_per_output = 128 + chunk_size = 4 + input_length = output_length * inputs_per_output // chunk_size + hparams.batch_size = input_length * batch_size + + hparams.dropout = 0.1 + hparams.add_hparam("num_conv_layers", 4) + hparams.add_hparam("num_dconv_layers", 7) + # The product of these pooling windows should match + # input_length/target_length. + hparams.add_hparam("pooling_windows", [2, 2, 2, 4]) + + hparams.hidden_size = 256 + hparams.kernel_width = 20 + hparams.add_hparam("stride", 1) + return hparams diff --git a/tensor2tensor/models/research/gene_expression_test.py b/tensor2tensor/models/research/gene_expression_test.py new file mode 100644 index 000000000..a20b82e26 --- /dev/null +++ b/tensor2tensor/models/research/gene_expression_test.py @@ -0,0 +1,76 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for Gene Expression models.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np + +from tensor2tensor.data_generators import gene_expression as gene_data +from tensor2tensor.layers import modalities # pylint: disable=unused-import +from tensor2tensor.models.research import gene_expression + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +def gene_expression_conv_test(): + hparams = gene_expression.gene_expression_conv_base() + hparams.hidden_size = 8 + hparams.num_dconv_layers = 2 + return hparams + + +class GeneExpressionModelsTest(tf.test.TestCase): + + def _test_model(self, hparams, model_cls): + batch_size = 3 + target_length = 6 + target_out = 10 # GeneExpressionProblem.num_output_predictions + input_length = target_length * 128 // 4 # chunk_size=4 + input_vocab_size = 5 + + inputs = np.random.randint( + 1, input_vocab_size + 1, size=(batch_size, input_length, 1, 1)) + targets = np.random.random_sample((batch_size, target_length, 1, + target_out)) + + features = { + "inputs": tf.constant(inputs, dtype=tf.int32), + "targets": tf.constant(targets, dtype=tf.float32), + } + p_hparams = hparams.problem_hparams + logits, _ = model_cls( + hparams, tf_estimator.ModeKeys.TRAIN, p_hparams)(features) + + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + res = sess.run(logits) + + self.assertEqual(res.shape, (batch_size, target_length, 1, target_out)) + + def testGeneExpressionModels(self): + models_hparams = [(gene_expression.GeneExpressionConv, + gene_expression_conv_test())] + for model_cls, hparams in models_hparams: + hparams.add_hparam("data_dir", None) + p_hparams = gene_data.GenomicsExpressionCage10().get_hparams(hparams) + hparams.problem_hparams = p_hparams + self._test_model(hparams, model_cls) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/research/glow.py b/tensor2tensor/models/research/glow.py new file mode 100644 index 000000000..8ebb189d8 --- /dev/null +++ b/tensor2tensor/models/research/glow.py @@ -0,0 +1,206 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Glow generative model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.models.research import glow_init_hook +from tensor2tensor.models.research import glow_ops +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +arg_scope = contrib.framework().arg_scope +add_arg_scope = contrib.framework().add_arg_scope + +GLOW_DECODE_HPARAMS = ("identity_output=True,log_results=False," + "decode_in_memory=True,display_decoded_images=True") + + +@registry.register_hparams +def glow_hparams(): + """Glow Hparams.""" + hparams = common_hparams.basic_params1() + hparams.clip_grad_norm = None + hparams.weight_decay = 0.0 + hparams.learning_rate_constant = 3e-4 + hparams.batch_size = 32 + # can be prev_level, prev_step or normal. + # see: glow_ops.merge_level_and_latent_dist + hparams.add_hparam("level_scale", "prev_level") + hparams.add_hparam("n_levels", 3) + hparams.add_hparam("n_bits_x", 8) + hparams.add_hparam("depth", 32) + # Activation - Relu or Gatu + hparams.add_hparam("activation", "relu") + # Coupling layer, additive or affine. + hparams.add_hparam("coupling", "affine") + hparams.add_hparam("coupling_width", 512) + hparams.add_hparam("coupling_dropout", 0.0) + hparams.add_hparam("top_prior", "single_conv") + # init_batch_size denotes the number of examples used for data-dependent + # initialization. A higher init_batch_size is required for training + # stability especially when hparams.batch_size is low. + hparams.add_hparam("init_batch_size", 256) + hparams.add_hparam("temperature", 1.0) + + return hparams + + +@registry.register_model +class Glow(t2t_model.T2TModel): + """Glow generative model. + + Reference: https://arxiv.org/abs/1807.03039""" + + def init_preprocess(self, features): + """Preprocessing as per the input modality.""" + return features + + def preprocess(self, x): + """Normalize x. + + Args: + x: 4-D Tensor. + + Returns: + x: Scaled such that x lies in-between -0.5 and 0.5 + """ + n_bits_x = self.hparams.n_bits_x + n_bins = 2**n_bits_x + x = tf.cast(x, dtype=tf.float32) + if n_bits_x < 8: + x = tf.floor(x / 2 ** (8 - n_bits_x)) + x = x / n_bins - 0.5 + return x + + @property + def temperature(self): + if self.is_predicting: + return self.hparams.temperature + return 1.0 + + @property + def is_training(self): + return self.hparams.mode == tf_estimator.ModeKeys.TRAIN + + def infer(self, features, *args, **kwargs): # pylint: disable=arguments-differ + del args, kwargs + x = features["inputs"] + batch_size = common_layers.shape_list(x)[0] + features["targets"] = tf.zeros(shape=(batch_size, 1, 1, 1)) + _, _ = self(features) # pylint: disable=not-callable + + ops = [glow_ops.get_variable_ddi, glow_ops.actnorm, glow_ops.get_dropout] + var_scope = tf.variable_scope("glow/body", reuse=True) + # If eps=None, images are sampled from the prior. + with arg_scope(ops, init=False), var_scope: + predictions, _, _, _ = glow_ops.encoder_decoder( + "codec", self.z_sample, self.hparams, eps=None, reverse=True, + temperature=self.temperature) + + return glow_ops.postprocess(predictions, self.hparams.n_bits_x) + + def create_init_batch(self, features): + """Returns a batch of size "hparams.init_batch_size" for initialization. + + Args: + features: input features. + Returns: + init_features: initialization features. + """ + train_dataset = self.hparams.problem.dataset( + tf_estimator.ModeKeys.TRAIN, hparams=self.hparams) + train_dataset = train_dataset.batch(self.hparams.init_batch_size) + train_dataset = self.init_preprocess(train_dataset) + return train_dataset.make_one_shot_iterator().get_next() + + @staticmethod + def train_hooks(hook_context): + del hook_context + return [glow_init_hook.GlowInitHook()] + + def top_prior(self): + """Objective based on the prior over latent z. + + Returns: + dist: instance of tfp.distributions.Normal, prior distribution. + """ + return glow_ops.top_prior( + "top_prior", self.z_top_shape, learn_prior=self.hparams.top_prior, + temperature=self.temperature) + + def body(self, features): + exp_coupling = ["affine", "additive"] + if self.hparams.coupling not in exp_coupling: + raise ValueError("Expected hparams.coupling to be in %s, got %s" % + (exp_coupling, self.hparams.coupling)) + if self.is_training: + init_features = self.create_init_batch(features) + init_op = self.objective_tower(init_features, init=True) + init_op = tf.Print( + init_op, [init_op], message="Triggering data-dependent init.", + first_n=20) + tf.add_to_collection("glow_init_op", init_op) + train_op = self.objective_tower(features, init=False) + return tf.zeros_like(features["targets"]), {"training": train_op} + + def objective_tower(self, features, init=True): + """Objective in terms of bits-per-pixel. + + Args: + features: dict of tensors with "features" and "targets" keys. + init: Whether or not to run data-dependent init. + Returns: + objective: float, bits-per-pixel. + """ + x = features["inputs"] + + # Scale x such that the pixels lie in-between -0.5 and.0.5 + x = self.preprocess(x) + x, objective = glow_ops.uniform_binning_correction(x) + + # The arg_scope call ensures that the actnorm parameters are set such that + # the per-channel output activations have zero mean and unit variance + # ONLY during the first step. After that the parameters are learned + # through optimisation. + ops = [glow_ops.get_variable_ddi, glow_ops.actnorm, glow_ops.get_dropout] + with arg_scope(ops, init=init): + encoder = glow_ops.encoder_decoder + + + self.z, encoder_objective, self.eps, _, _ = encoder( + "codec", x, self.hparams, eps=None, reverse=False) + objective += encoder_objective + + self.z_top_shape = common_layers.shape_list(self.z) + prior_dist = self.top_prior() + prior_objective = tf.reduce_sum( + prior_dist.log_prob(self.z), axis=[1, 2, 3]) + self.z_sample = prior_dist.sample() + objective += prior_objective + + # bits per pixel + _, h, w, c = common_layers.shape_list(x) + objective = -objective / (np.log(2) * h * w * c) + return objective diff --git a/tensor2tensor/models/research/glow_init_hook.py b/tensor2tensor/models/research/glow_init_hook.py new file mode 100644 index 000000000..34dd4fe5b --- /dev/null +++ b/tensor2tensor/models/research/glow_init_hook.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Hook to run glow initialization on a larger batch.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + + +class GlowInitHook(tf.train.SessionRunHook): + """ + Hook that runs data-dependent initialization once before the first step. + + The init op is stored in the tf collection glow_init_op. Look at the + "body" in glow.py for more details. + """ + + def after_create_session(self, session, coord): + del coord + global_step = session.run(tf.train.get_global_step()) + if global_step == 0: + ddi = tf.get_collection("glow_init_op") + # In-case of a multi-GPU system, this just runs the first op in the + # collection. + if ddi: + session.run(ddi[0]) diff --git a/tensor2tensor/models/research/glow_ops.py b/tensor2tensor/models/research/glow_ops.py new file mode 100644 index 000000000..c5ee371cb --- /dev/null +++ b/tensor2tensor/models/research/glow_ops.py @@ -0,0 +1,1389 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Various reversible ops for the glow generative model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import numpy as np +import scipy +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import common_video +from tensor2tensor.utils import contrib +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator +import tensorflow_probability as tfp + +arg_scope = contrib.framework().arg_scope +add_arg_scope = contrib.framework().add_arg_scope + + +def linear_interpolate(tensor1, tensor2, coeffs): + """Linearly interpolate between two tensors at coeff. + + Args: + tensor1: 4-D Tensor, shape=(NHWC) + tensor2: 4-D Tensor, shape=(NHWC) + coeffs: list of floats. + Returns: + interp_latents: 5-D Tensor, with interp_latents[i] representing + interpolations at coeffs[i]. + shape=(len(coeffs), NHWC) + """ + interp_tensors = [] + for coeff in coeffs: + interp_tensor = tensor1 + coeff * (tensor2 - tensor1) + interp_tensors.append(interp_tensor) + return tf.concat(interp_tensors, axis=0) + + +def linear_interpolate_rank(tensor1, tensor2, coeffs, rank=1): + """Linearly interpolate channel at "rank" between two tensors. + + The channels are ranked according to their L2 norm between tensor1[channel] + and tensor2[channel]. + + Args: + tensor1: 4-D Tensor, NHWC + tensor2: 4-D Tensor, NHWC + coeffs: list of floats. + rank: integer. + Returns: + interp_latents: list of interpolated 4-D Tensors, shape=(NHWC) + """ + # sum across space, max across channels. + _, _, _, num_channels = common_layers.shape_list(tensor1) + diff_sq_sum = tf.reduce_sum((tensor1 - tensor2)**2, axis=(0, 1, 2)) + _, feature_ranks = tf.math.top_k(diff_sq_sum, k=rank) + feature_rank = feature_ranks[-1] + channel_inds = tf.range(num_channels, dtype=tf.int32) + channel_mask = tf.equal(channel_inds, feature_rank) + ones_t = tf.ones(num_channels, dtype=tf.float32) + zeros_t = tf.zeros(num_channels, dtype=tf.float32) + + interp_tensors = [] + for coeff in coeffs: + curr_coeff = tf.where(channel_mask, coeff * ones_t, zeros_t) + interp_tensor = tensor1 + curr_coeff * (tensor2 - tensor1) + interp_tensors.append(interp_tensor) + return tf.concat(interp_tensors, axis=0) + + +def postprocess(x, n_bits_x=8): + """Converts x from [-0.5, 0.5], to [0, 255]. + + Args: + x: 3-D or 4-D Tensor normalized between [-0.5, 0.5] + n_bits_x: Number of bits representing each pixel of the output. + Defaults to 8, to default to 256 possible values. + Returns: + x: 3-D or 4-D Tensor representing images or videos. + """ + x = tf.where(tf.is_finite(x), x, tf.ones_like(x)) + x = tf.clip_by_value(x, -0.5, 0.5) + x += 0.5 + x = x * 2**n_bits_x + return tf.cast(tf.clip_by_value(x, 0, 255), dtype=tf.uint8) + + +class TemperedNormal(tfp.distributions.Normal): + """Normal distribution with temperature T.""" + + def __init__(self, loc, scale, temperature=1.0): + self.temperature = temperature + new_scale = scale * self.temperature + tfp.distributions.Normal.__init__(self, loc=loc, scale=new_scale) + + def sample(self, sample_shape=(), seed=None, name="sample"): + if self.temperature == 0.0: + if not sample_shape: + return self.loc + loc = tf.expand_dims(self.loc, axis=0) + return tf.tile(loc, (sample_shape[0], 1, 1)) + return super(TemperedNormal, self).sample( + sample_shape=sample_shape, seed=seed, name=name) + + +def default_initializer(std=0.05): + return tf.random_normal_initializer(0., std) + + +def get_eps(dist, x): + """Z = (X - mu) / sigma.""" + return (x - dist.loc) / dist.scale + + +def set_eps(dist, eps): + """Z = eps * sigma + mu.""" + return eps * dist.scale + dist.loc + + +@add_arg_scope +def assign(w, initial_value): + w = w.assign(initial_value) + with tf.control_dependencies([w]): + return w + + +def get_cond_latents_at_level(cond_latents, level, hparams): + """Returns a single or list of conditional latents at level 'level'.""" + if cond_latents: + if hparams.latent_dist_encoder in ["conv_net", "conv3d_net"]: + return [cond_latent[level] for cond_latent in cond_latents] + elif hparams.latent_dist_encoder in ["pointwise", "conv_lstm"]: + return cond_latents[level] + + +def check_cond_latents(cond_latents, hparams): + """Shape checking for cond_latents.""" + if cond_latents is None: + return + if not isinstance(cond_latents[0], list): + cond_latents = [cond_latents] + exp_num_latents = hparams.num_cond_latents + if hparams.latent_dist_encoder == "conv_net": + exp_num_latents += int(hparams.cond_first_frame) + if len(cond_latents) != exp_num_latents: + raise ValueError("Expected number of cond_latents: %d, got %d" % + (exp_num_latents, len(cond_latents))) + for cond_latent in cond_latents: + if len(cond_latent) != hparams.n_levels - 1: + raise ValueError("Expected level_latents to be %d, got %d" % + (hparams.n_levels - 1, len(cond_latent))) + + +@add_arg_scope +def get_variable_ddi(name, shape, initial_value, dtype=tf.float32, init=False, + trainable=True): + """Wrapper for data-dependent initialization.""" + # If init is a tf bool: w is assigned dynamically at runtime. + # If init is a python bool: then w is determined during graph construction. + w = tf.get_variable(name, shape, dtype, None, trainable=trainable) + if isinstance(init, bool): + if init: + return assign(w, initial_value) + return w + else: + return tf.cond(init, lambda: assign(w, initial_value), lambda: w) + + +@add_arg_scope +def get_dropout(x, rate=0.0, init=True): + """Dropout x with dropout_rate = rate. + + Apply zero dropout during init or prediction time. + + Args: + x: 4-D Tensor, shape=(NHWC). + rate: Dropout rate. + init: Initialization. + Returns: + x: activations after dropout. + """ + if init or rate == 0: + return x + return tf.layers.dropout(x, rate=rate, training=True) + + +@add_arg_scope +def actnorm_3d(name, x, logscale_factor=3.): + """Applies actnorm to each time-step independently. + + There are a total of 2*n_channels*n_steps parameters learnt. + + Args: + name: variable scope. + x: 5-D Tensor, (NTHWC) + logscale_factor: Increases the learning rate of the scale by + logscale_factor. + Returns: + x: 5-D Tensor, (NTHWC) with the per-timestep, per-channel normalization. + """ + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + x = tf.unstack(x, axis=1) + x_normed = [] + for ind, x_step in enumerate(x): + x_step, _ = actnorm("actnorm_%d" % ind, x_step, + logscale_factor=logscale_factor) + x_normed.append(x_step) + return tf.stack(x_normed, axis=1), None + + +@add_arg_scope +def actnorm(name, x, logscale_factor=3., reverse=False, init=False, + trainable=True): + """x_{ij} = s x x_{ij} + b. Per-channel scaling and bias. + + If init is set to True, the scaling and bias are initialized such + that the mean and variance of the output activations of the first minibatch + are zero and one respectively. + + Args: + name: variable scope. + x: input + logscale_factor: Used in actnorm_scale. Optimizes f(ls*s') instead of f(s) + where s' = s / ls. Helps in faster convergence. + reverse: forward or reverse operation. + init: Whether or not to do data-dependent initialization. + trainable: + + Returns: + x: output after adding bias and scaling. + objective: log(sum(s)) + """ + var_arg_scope = arg_scope([get_variable_ddi], trainable=trainable) + var_scope = tf.variable_scope(name, reuse=tf.AUTO_REUSE) + + with var_scope, var_arg_scope: + if not reverse: + x = actnorm_center(name + "_center", x, reverse, init=init) + x, objective = actnorm_scale( + name + "_scale", x, logscale_factor=logscale_factor, + reverse=reverse, init=init) + else: + x, objective = actnorm_scale( + name + "_scale", x, logscale_factor=logscale_factor, + reverse=reverse, init=init) + x = actnorm_center(name + "_center", x, reverse, init=init) + return x, objective + + +@add_arg_scope +def actnorm_center(name, x, reverse=False, init=False): + """Add a bias to x. + + Initialize such that the output of the first minibatch is zero centered + per channel. + + Args: + name: scope + x: 2-D or 4-D Tensor. + reverse: Forward or backward operation. + init: data-dependent initialization. + + Returns: + x_center: (x + b), if reverse is True and (x - b) otherwise. + """ + shape = common_layers.shape_list(x) + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + assert len(shape) == 2 or len(shape) == 4 + if len(shape) == 2: + x_mean = tf.reduce_mean(x, [0], keepdims=True) + b = get_variable_ddi("b", (1, shape[1]), initial_value=-x_mean, + init=init) + elif len(shape) == 4: + x_mean = tf.reduce_mean(x, [0, 1, 2], keepdims=True) + b = get_variable_ddi( + "b", (1, 1, 1, shape[3]), initial_value=-x_mean, init=init) + + if not reverse: + x += b + else: + x -= b + return x + + +@add_arg_scope +def actnorm_scale(name, x, logscale_factor=3., reverse=False, init=False): + """Per-channel scaling of x.""" + x_shape = common_layers.shape_list(x) + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + + # Variance initialization logic. + assert len(x_shape) == 2 or len(x_shape) == 4 + if len(x_shape) == 2: + x_var = tf.reduce_mean(x**2, [0], keepdims=True) + logdet_factor = 1 + var_shape = (1, x_shape[1]) + elif len(x_shape) == 4: + x_var = tf.reduce_mean(x**2, [0, 1, 2], keepdims=True) + logdet_factor = x_shape[1]*x_shape[2] + var_shape = (1, 1, 1, x_shape[3]) + + init_value = tf.log(1.0 / (tf.sqrt(x_var) + 1e-6)) / logscale_factor + logs = get_variable_ddi("logs", var_shape, initial_value=init_value, + init=init) + logs = logs * logscale_factor + + # Function and reverse function. + if not reverse: + x = x * tf.exp(logs) + else: + x = x * tf.exp(-logs) + + # Objective calculation, h * w * sum(log|s|) + dlogdet = tf.reduce_sum(logs) * logdet_factor + if reverse: + dlogdet *= -1 + return x, dlogdet + + +@add_arg_scope +def invertible_1x1_conv(name, x, reverse=False): + """1X1 convolution on x. + + The 1X1 convolution is parametrized as P*L*(U + sign(s)*exp(log(s))) where + 1. P is a permutation matrix. + 2. L is a lower triangular matrix with diagonal entries unity. + 3. U is a upper triangular matrix where the diagonal entries zero. + 4. s is a vector. + + sign(s) and P are fixed and the remaining are optimized. P, L, U and s are + initialized by the PLU decomposition of a random rotation matrix. + + Args: + name: scope + x: Input Tensor. + reverse: whether the pass is from z -> x or x -> z. + + Returns: + x_conv: x after a 1X1 convolution is applied on x. + objective: sum(log(s)) + """ + _, height, width, channels = common_layers.shape_list(x) + w_shape = [channels, channels] + + # Random rotation-matrix Q + random_matrix = np.random.rand(channels, channels) + np_w = scipy.linalg.qr(random_matrix)[0].astype("float32") + + # Initialize P,L,U and s from the LU decomposition of a random rotation matrix + np_p, np_l, np_u = scipy.linalg.lu(np_w) + np_s = np.diag(np_u) + np_sign_s = np.sign(np_s) + np_log_s = np.log(np.abs(np_s)) + np_u = np.triu(np_u, k=1) + + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + p = tf.get_variable("P", initializer=np_p, trainable=False) + l = tf.get_variable("L", initializer=np_l) + sign_s = tf.get_variable( + "sign_S", initializer=np_sign_s, trainable=False) + log_s = tf.get_variable("log_S", initializer=np_log_s) + u = tf.get_variable("U", initializer=np_u) + + # W = P * L * (U + sign_s * exp(log_s)) + l_mask = np.tril(np.ones([channels, channels], dtype=np.float32), -1) + l = l * l_mask + tf.eye(channels, channels) + u = u * np.transpose(l_mask) + tf.diag(sign_s * tf.exp(log_s)) + w = tf.matmul(p, tf.matmul(l, u)) + + # If height or width cannot be statically determined then they end up as + # tf.int32 tensors, which cannot be directly multiplied with a floating + # point tensor without a cast. + objective = tf.reduce_sum(log_s) * tf.cast(height * width, log_s.dtype) + if not reverse: + w = tf.reshape(w, [1, 1] + w_shape) + x = tf.nn.conv2d(x, w, [1, 1, 1, 1], "SAME", data_format="NHWC") + else: + w_inv = tf.reshape(tf.linalg.inv(w), [1, 1]+w_shape) + x = tf.nn.conv2d( + x, w_inv, [1, 1, 1, 1], "SAME", data_format="NHWC") + objective *= -1 + return x, objective + + +def add_edge_bias(x, filter_size): + """Pad x and concatenates an edge bias across the depth of x. + + The edge bias can be thought of as a binary feature which is unity when + the filter is being convolved over an edge and zero otherwise. + + Args: + x: Input tensor, shape (NHWC) + filter_size: filter_size to determine padding. + Returns: + x_pad: Input tensor, shape (NHW(c+1)) + """ + x_shape = common_layers.shape_list(x) + if filter_size[0] == 1 and filter_size[1] == 1: + return x + a = (filter_size[0] - 1) // 2 # vertical padding size + b = (filter_size[1] - 1) // 2 # horizontal padding size + padding = [[0, 0], [a, a], [b, b], [0, 0]] + x_bias = tf.zeros(x_shape[:-1] + [1]) + + x = tf.pad(x, padding) + x_pad = tf.pad(x_bias, padding, constant_values=1) + return tf.concat([x, x_pad], axis=3) + + +def time_pad(x, filter_size, dilations): + """Pad left across time and pad valid across the spatial components. + + Also concats a binary feature that indicates if a feature is padded or not. + + Args: + x: 5-D Tensor, (NTHWC) + filter_size: list of ints + dilations: list of ints, dilations - 1 specifies the number of holes + between two filter elements. + Returns: + x_pad: 5-D Tensor. + """ + x_shape = common_layers.shape_list(x) + if filter_size == [1, 1, 1]: + return x + _, h, w = filter_size + eff_h = h + (h - 1)*(dilations[2] - 1) + eff_w = w + (w - 1)*(dilations[3] - 1) + a = (eff_h - 1) // 2 # vertical padding size + b = (eff_w - 1) // 2 # horizontal padding size + c = filter_size[0] - 1 + + # pad across edges. + padding = [[0, 0], [c, 0], [a, a], [b, b], [0, 0]] + + # concat a binary feature across channels to indicate a padding. + # 1 indicates that the feature is a padding. + x_bias = tf.zeros(x_shape[:-1] + [1]) + x_bias = tf.pad(x_bias, padding, constant_values=1) + x_pad = tf.pad(x, padding) + x_pad = tf.concat((x_bias, x_pad), axis=-1) + return x_pad + + +@add_arg_scope +def conv(name, x, output_channels, filter_size=None, stride=None, + logscale_factor=3.0, apply_actnorm=True, conv_init="default", + dilations=None): + """Convolutional layer with edge bias padding and optional actnorm. + + If x is 5-dimensional, actnorm is applied independently across every + time-step. + + Args: + name: variable scope. + x: 4-D Tensor or 5-D Tensor of shape NHWC or NTHWC + output_channels: Number of output channels. + filter_size: list of ints, if None [3, 3] and [2, 3, 3] are defaults for + 4-D and 5-D input tensors respectively. + stride: list of ints, default stride: 1 + logscale_factor: see actnorm for parameter meaning. + apply_actnorm: if apply_actnorm the activations of the first minibatch + have zero mean and unit variance. Else, there is no scaling + applied. + conv_init: default or zeros. default is a normal distribution with 0.05 std. + dilations: List of integers, apply dilations. + Returns: + x: actnorm(conv2d(x)) + Raises: + ValueError: if init is set to "zeros" and apply_actnorm is set to True. + """ + if conv_init == "zeros" and apply_actnorm: + raise ValueError("apply_actnorm is unstable when init is set to zeros.") + + x_shape = common_layers.shape_list(x) + is_2d = len(x_shape) == 4 + num_steps = x_shape[1] + + # set filter_size, stride and in_channels + if is_2d: + if filter_size is None: + filter_size = [3, 3] + if stride is None: + stride = [1, 1] + if dilations is None: + dilations = [1, 1, 1, 1] + actnorm_func = actnorm + x = add_edge_bias(x, filter_size=filter_size) + conv_filter = tf.nn.conv2d + else: + if filter_size is None: + if num_steps == 1: + filter_size = [1, 3, 3] + else: + filter_size = [2, 3, 3] + if stride is None: + stride = [1, 1, 1] + if dilations is None: + dilations = [1, 1, 1, 1, 1] + actnorm_func = actnorm_3d + x = time_pad(x, filter_size=filter_size, dilations=dilations) + conv_filter = tf.nn.conv3d + + in_channels = common_layers.shape_list(x)[-1] + filter_shape = filter_size + [in_channels, output_channels] + stride_shape = [1] + stride + [1] + + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + + if conv_init == "default": + initializer = default_initializer() + elif conv_init == "zeros": + initializer = tf.zeros_initializer() + + w = tf.get_variable("W", filter_shape, tf.float32, initializer=initializer) + x = conv_filter(x, w, stride_shape, padding="VALID", dilations=dilations) + if apply_actnorm: + x, _ = actnorm_func("actnorm", x, logscale_factor=logscale_factor) + else: + x += tf.get_variable("b", [1, 1, 1, output_channels], + initializer=tf.zeros_initializer()) + logs = tf.get_variable("logs", [1, output_channels], + initializer=tf.zeros_initializer()) + x *= tf.exp(logs * logscale_factor) + return x + + +@add_arg_scope +def conv_block(name, x, mid_channels, dilations=None, activation="relu", + dropout=0.0): + """2 layer conv block used in the affine coupling layer. + + Args: + name: variable scope. + x: 4-D or 5-D Tensor. + mid_channels: Output channels of the second layer. + dilations: Optional, list of integers. + activation: relu or gatu. + If relu, the second layer is relu(W*x) + If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x) + dropout: Dropout probability. + Returns: + x: 4-D Tensor: Output activations. + """ + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + + x_shape = common_layers.shape_list(x) + is_2d = len(x_shape) == 4 + num_steps = x_shape[1] + if is_2d: + first_filter = [3, 3] + second_filter = [1, 1] + else: + # special case when number of steps equal 1 to avoid + # padding. + if num_steps == 1: + first_filter = [1, 3, 3] + else: + first_filter = [2, 3, 3] + second_filter = [1, 1, 1] + + # Edge Padding + conv2d + actnorm + relu: + # [output: 512 channels] + x = conv("1_1", x, output_channels=mid_channels, filter_size=first_filter, + dilations=dilations) + x = tf.nn.relu(x) + x = get_dropout(x, rate=dropout) + + # Padding + conv2d + actnorm + activation. + # [input, output: 512 channels] + if activation == "relu": + x = conv("1_2", x, output_channels=mid_channels, + filter_size=second_filter, dilations=dilations) + x = tf.nn.relu(x) + elif activation == "gatu": + # x = tanh(w1*x) * sigm(w2*x) + x_tanh = conv("1_tanh", x, output_channels=mid_channels, + filter_size=second_filter, dilations=dilations) + x_sigm = conv("1_sigm", x, output_channels=mid_channels, + filter_size=second_filter, dilations=dilations) + x = tf.nn.tanh(x_tanh) * tf.nn.sigmoid(x_sigm) + + x = get_dropout(x, rate=dropout) + return x + + +def dilated_conv_stack(name, x, mid_channels, output_channels, + dilation_rates, activation="relu", + dropout=0.0): + """Dilated convolutional stack. + + Features at different rates are computed independently using a 3 layer + convolutional stack and added. + + Args: + name: variable scope. + x: 5-D Tensor. + mid_channels: Number of output channels of the first layer in the conv + stack. + output_channels: Number of output channels of the last layer. + dilation_rates: A list of dilation rates. + activation: Can be either "relu" or "gatu" + dropout: dropout. + Returns: + output: 5-D Tensor. + """ + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + output = 0.0 + for dil_ind, dil_rate in enumerate(dilation_rates): + # TODO(mechcoder) try (concat across channels + 1x1) modulo memory issues. + curr_out = conv_stack("dil_%d" % dil_ind, x, mid_channels=mid_channels, + output_channels=output_channels, dilations=dil_rate, + activation=activation, dropout=dropout) + output += curr_out + return output + + +@add_arg_scope +def conv_stack(name, x, mid_channels, output_channels, dilations=None, + activation="relu", dropout=0.0): + """3-layer convolutional stack. + + Args: + name: variable scope. + x: 5-D Tensor. + mid_channels: Number of output channels of the first layer. + output_channels: Number of output channels. + dilations: Dilations to apply in the first 3x3 layer and the last 3x3 layer. + By default, apply no dilations. + activation: relu or gatu. + If relu, the second layer is relu(W*x) + If gatu, the second layer is tanh(W1*x) * sigmoid(W2*x) + dropout: float, 0.0 + Returns: + output: output of 3 layer conv network. + """ + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + + x = conv_block("conv_block", x, mid_channels=mid_channels, + dilations=dilations, activation=activation, + dropout=dropout) + + # Final layer. + x = conv("zeros", x, apply_actnorm=False, conv_init="zeros", + output_channels=output_channels, dilations=dilations) + return x + + +@add_arg_scope +def additive_coupling(name, x, mid_channels=512, reverse=False, + activation="relu", dropout=0.0): + """Reversible additive coupling layer. + + Args: + name: variable scope. + x: 4-D Tensor, shape=(NHWC). + mid_channels: number of channels in the coupling layer. + reverse: Forward or reverse operation. + activation: "relu" or "gatu" + dropout: default, 0.0 + Returns: + output: 4-D Tensor, shape=(NHWC) + objective: 0.0 + """ + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + output_channels = common_layers.shape_list(x)[-1] // 2 + x1, x2 = tf.split(x, num_or_size_splits=2, axis=-1) + + z1 = x1 + shift = conv_stack("nn", x1, mid_channels, output_channels=output_channels, + activation=activation, dropout=dropout) + + if not reverse: + z2 = x2 + shift + else: + z2 = x2 - shift + return tf.concat([z1, z2], axis=3), 0.0 + + +@add_arg_scope +def affine_coupling(name, x, mid_channels=512, activation="relu", + reverse=False, dropout=0.0): + """Reversible affine coupling layer. + + Args: + name: variable scope. + x: 4-D Tensor. + mid_channels: number of channels in the coupling layer. + activation: Can be either "relu" or "gatu". + reverse: Forward or reverse operation. + dropout: default, 0.0 + Returns: + output: x shifted and scaled by an affine transformation. + objective: log-determinant of the jacobian + """ + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + x_shape = common_layers.shape_list(x) + x1, x2 = tf.split(x, num_or_size_splits=2, axis=-1) + + # scale, shift = NN(x1) + # If reverse: + # z2 = scale * (x2 + shift) + # Else: + # z2 = (x2 / scale) - shift + z1 = x1 + log_scale_and_shift = conv_stack( + "nn", x1, mid_channels, x_shape[-1], activation=activation, + dropout=dropout) + shift = log_scale_and_shift[:, :, :, 0::2] + scale = tf.nn.sigmoid(log_scale_and_shift[:, :, :, 1::2] + 2.0) + if not reverse: + z2 = (x2 + shift) * scale + else: + z2 = x2 / scale - shift + + objective = tf.reduce_sum(tf.log(scale), axis=[1, 2, 3]) + if reverse: + objective *= -1 + return tf.concat([z1, z2], axis=3), objective + + +@add_arg_scope +def squeeze(name, x, factor=2, reverse=True): + """Block-wise spatial squeezing of x to increase the number of channels. + + Args: + name: Used for variable scoping. + x: 4-D Tensor of shape (batch_size X H X W X C) + factor: Factor by which the spatial dimensions should be squeezed. + reverse: Squueze or unsqueeze operation. + + Returns: + x: 4-D Tensor of shape (batch_size X (H//factor) X (W//factor) X + (cXfactor^2). If reverse is True, then it is factor = (1 / factor) + """ + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + shape = common_layers.shape_list(x) + if factor == 1: + return x + height = int(shape[1]) + width = int(shape[2]) + n_channels = int(shape[3]) + + if not reverse: + assert height % factor == 0 and width % factor == 0 + x = tf.reshape(x, [-1, height//factor, factor, + width//factor, factor, n_channels]) + x = tf.transpose(x, [0, 1, 3, 5, 2, 4]) + x = tf.reshape(x, [-1, height//factor, width // + factor, n_channels*factor*factor]) + else: + x = tf.reshape( + x, (-1, height, width, int(n_channels/factor**2), factor, factor)) + x = tf.transpose(x, [0, 1, 4, 2, 5, 3]) + x = tf.reshape(x, (-1, int(height*factor), + int(width*factor), int(n_channels/factor**2))) + return x + + +def get_dilation_rates(hparams, width): + """Get a list of valid dilation rates. + + Args: + hparams: HParams. + width: spatial dimension. Ensures that the effective filter size is + not larger than the spatial dimension. + Returns: + allowed_dilations: A list of dilation rates. + """ + # dil_rate=1 means no dilation. + allowed_dilations = [[1]*5] + apply_dilations = hparams.get("latent_apply_dilations", False) + dilation_rates = hparams.get("latent_dilation_rates", [1, 3]) + if apply_dilations: + for rate in dilation_rates: + # k + (k - 1) * rate but k is harcoded to be 3 everywhere. + filter_size = 3 + 2 * rate + if filter_size <= width: + curr_dilation = [1, 1, rate+1, rate+1, 1] + allowed_dilations.append(curr_dilation) + return allowed_dilations + + +@add_arg_scope +def temporal_latent_to_dist(name, x, hparams, output_channels=None): + """Network that maps a time-indexed list of 3-D latents to a gaussian. + + Args: + name: variable scope. + x: List of 4-D Tensors indexed by time, (NHWC) + hparams: tf.contrib.training.Hparams. + output_channels: int, Number of channels of the output gaussian mean. + Returns: + dist: tfp.distributions.Normal + """ + _, _, width, _, res_channels = common_layers.shape_list(x) + if output_channels is None: + output_channels = res_channels + dilation_rates = get_dilation_rates(hparams, width) + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + h = x + for i in range(hparams.latent_encoder_depth): + if hparams.latent_apply_dilations: + h2 = dilated_conv_stack("dil_latent_3d_res_%d" % i, h, + mid_channels=hparams.latent_encoder_width, + output_channels=res_channels, + dilation_rates=dilation_rates, + activation=hparams.latent_activation, + dropout=hparams.latent_dropout) + else: + h2 = conv_stack("latent_3d_res_%d" % i, h, + mid_channels=hparams.latent_encoder_width, + output_channels=res_channels, + activation=hparams.latent_activation, + dropout=hparams.latent_dropout) + h += h2 + + # take last activation that should capture all context since padding is + # on left. + h = h[:, -1, :, :, :] + h = conv("res_final", h, apply_actnorm=False, conv_init="zeros", + output_channels=2*output_channels, filter_size=[1, 1]) + mean, log_scale = h[:, :, :, 0::2], h[:, :, :, 1::2] + return tfp.distributions.Normal(mean, tf.exp(log_scale)) + + +@add_arg_scope +def single_conv_dist(name, x, output_channels=None): + """A 3x3 convolution mapping x to a standard normal distribution at init. + + Args: + name: variable scope. + x: 4-D Tensor. + output_channels: number of channels of the mean and std. + """ + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + x_shape = common_layers.shape_list(x) + if output_channels is None: + output_channels = x_shape[-1] + mean_log_scale = conv("conv2d", x, output_channels=2*output_channels, + conv_init="zeros", apply_actnorm=False) + mean = mean_log_scale[:, :, :, 0::2] + log_scale = mean_log_scale[:, :, :, 1::2] + return tf.distributions.Normal(mean, tf.exp(log_scale)) + + +@add_arg_scope +def latent_to_dist(name, x, hparams, output_channels=None): + """Map latent to the mean and log-scale of a Gaussian. + + Args: + name: variable scope. + x: 4-D Tensor of shape (NHWC) + hparams: HParams. + latent_architecture - can be "single_conv", "glow_nn" or "glow_resnet", + default = single_conv + latent_encoder_depth - int, depth of architecture, valid if + latent_architecture is "glow_nn" or "glow_resnet". + latent_pre_output_channels - 512, valid only when latent_architecture + is "glow_nn". + latent_encoder_width - 512, maximum width of the network + output_channels: int, number of output channels of the mean (and std). + if not provided, set it to be the output channels of x. + Returns: + dist: instance of tfp.distributions.Normal + Raises: + ValueError: If architecture not in ["single_conv", "glow_nn"] + """ + architecture = hparams.get("latent_architecture", "single_conv") + depth = hparams.get("latent_encoder_depth", 1) + pre_output_channels = hparams.get("latent_pre_output_channels", 512) + width = hparams.get("latent_encoder_width", 512) + + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + x_shape = common_layers.shape_list(x) + if output_channels is None: + output_channels = x_shape[-1] + if architecture == "single_conv": + return single_conv_dist("single_conv", x, output_channels) + if architecture == "glow_nn": + mean_log_scale = x + for layer in range(1, depth + 1): + mid_channels = pre_output_channels // 2**(depth - layer) + mean_log_scale = conv_block("glow_nn_%d" % layer, mean_log_scale, + mid_channels=mid_channels) + mean_log_scale = conv("glow_nn_zeros", mean_log_scale, + filter_size=[3, 3], stride=[1, 1], + output_channels=2*output_channels, + apply_actnorm=False, conv_init="zeros") + elif architecture == "glow_resnet": + h = x + for layer in range(depth): + h3 = conv_stack("latent_resnet_%d" % layer, h, + mid_channels=width, output_channels=x_shape[-1], + dropout=hparams.coupling_dropout) + h += h3 + mean_log_scale = conv("glow_res_final", h, conv_init="zeros", + output_channels=2*output_channels, + apply_actnorm=False) + else: + raise ValueError("expected architecture to be single_conv or glow_nn " + "got %s" % architecture) + + mean = mean_log_scale[:, :, :, 0::2] + log_scale = mean_log_scale[:, :, :, 1::2] + return tfp.distributions.Normal(mean, tf.exp(log_scale)) + + +@add_arg_scope +def noise_op(latents, hparams): + """Adds isotropic gaussian-noise to each latent. + + Args: + latents: 4-D or 5-D tensor, shape=(NTHWC) or (NHWC). + hparams: HParams. + Returns: + latents: latents with isotropic gaussian noise appended. + """ + if hparams.latent_noise == 0 or hparams.mode != tf_estimator.ModeKeys.TRAIN: + return latents + latent_shape = common_layers.shape_list(latents) + return latents + tf.random_normal(latent_shape, stddev=hparams.latent_noise) + + +@add_arg_scope +def merge_level_and_latent_dist(level_dist, latent_dist, + merge_std="prev_level"): + """Merge level_dist and latent_dist. + + new_dist ~ N(level_dist.mean + latent_dis.mean, std) where std is determined + according to merge_std. + + Args: + level_dist: instance of tfp.distributions.Normal + latent_dist: instance of tfp.distributions.Normal + merge_std: can be "prev_level", "prev_step" or "normal". + Returns: + merged_dist: instance of tfp.distributions.Normal + """ + level_mean, level_std = level_dist.loc, level_dist.scale + latent_mean, latent_std = latent_dist.loc, latent_dist.scale + new_mean = level_mean + latent_mean + if merge_std == "normal": + z_shape = common_layers.shape_list(latent_mean) + log_scale = tf.get_variable( + "merge_std", shape=z_shape, dtype=tf.float32, + initializer=tf.zeros_initializer(), trainable=False) + scale = tf.exp(log_scale * 3.0) + elif merge_std == "prev_level": + scale = level_std + elif merge_std == "prev_step": + scale = latent_std + return tfp.distributions.Normal(loc=new_mean, scale=scale) + + +@add_arg_scope +def level_cond_prior(prior_dist, z, latent, hparams, state): + """Returns a conditional prior for each level. + + Args: + prior_dist: Distribution conditioned on the previous levels. + z: Tensor, output of the previous levels. + latent: Tensor or a list of tensors to condition the latent_distribution. + hparams: next_frame_glow hparams. + state: Current LSTM state. Used only if hparams.latent_dist_encoder is + a lstm. + Raises: + ValueError: If hparams.latent_dist_encoder is "pointwise" and if the shape + of latent is different from z. + """ + latent_dist_encoder = hparams.get("latent_dist_encoder", None) + latent_skip = hparams.get("latent_skip", False) + if latent_dist_encoder == "pointwise": + last_latent = latent + merge_std = hparams.level_scale + latent_shape = common_layers.shape_list(latent) + z_shape = common_layers.shape_list(z) + if latent_shape != z_shape: + raise ValueError("Expected latent_shape to be %s, got %s" % + (latent_shape, z_shape)) + latent_dist = scale_gaussian_prior( + "latent_prior", latent, logscale_factor=3.0) + cond_dist = merge_level_and_latent_dist(prior_dist, latent_dist, + merge_std=merge_std) + + elif latent_dist_encoder == "conv_net": + output_channels = common_layers.shape_list(z)[-1] + last_latent = latent[-1] + latent_stack = tf.concat([prior_dist.loc] + latent, axis=-1) + latent_stack = noise_op(latent_stack, hparams) + cond_dist = latent_to_dist( + "latent_stack", latent_stack, hparams=hparams, + output_channels=output_channels) + + elif latent_dist_encoder == "conv3d_net": + last_latent = latent[-1] + output_channels = common_layers.shape_list(last_latent)[-1] + num_steps = len(latent) + + # Stack across time. + cond_latents = tf.stack(latent, axis=1) + + # Concat latents from previous levels across channels. + prev_latents = tf.tile(tf.expand_dims(prior_dist.loc, axis=1), + [1, num_steps, 1, 1, 1]) + cond_latents = tf.concat((cond_latents, prev_latents), axis=-1) + cond_latents = noise_op(cond_latents, hparams) + cond_dist = temporal_latent_to_dist( + "latent_stack", cond_latents, hparams, output_channels=output_channels) + + elif latent_dist_encoder == "conv_lstm": + last_latent = latent + output_channels = common_layers.shape_list(z)[-1] + latent_stack = tf.concat((prior_dist.loc, latent), axis=-1) + latent_stack = noise_op(latent_stack, hparams) + _, state = common_video.conv_lstm_2d( + latent_stack, state, hparams.latent_encoder_width, kernel_size=3, + name="conv_lstm") + + cond_dist = single_conv_dist( + "state_to_dist", state.h, output_channels=output_channels) + if latent_skip: + new_mean = cond_dist.loc + last_latent + cond_dist = tfp.distributions.Normal(new_mean, cond_dist.scale) + return cond_dist.loc, cond_dist.scale, state + + +@add_arg_scope +def compute_prior(name, z, latent, hparams, condition=False, state=None, + temperature=1.0): + """Distribution on z_t conditioned on z_{t-1} and latent. + + Args: + name: variable scope. + z: 4-D Tensor. + latent: optional, + if hparams.latent_dist_encoder == "pointwise", this is a list + of 4-D Tensors of length hparams.num_cond_latents. + else, this is just a 4-D Tensor + The first-three dimensions of the latent should be the same as z. + hparams: next_frame_glow_hparams. + condition: Whether or not to condition the distribution on latent. + state: tf.nn.rnn_cell.LSTMStateTuple. + the current state of a LSTM used to model the distribution. Used + only if hparams.latent_dist_encoder = "conv_lstm". + temperature: float, temperature with which to sample from the Gaussian. + Returns: + prior_dist: instance of tfp.distributions.Normal + state: Returns updated state. + Raises: + ValueError: If hparams.latent_dist_encoder is "pointwise" and if the shape + of latent is different from z. + """ + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + if isinstance(condition, bool): + condition = tf.constant(condition, dtype=tf.bool) + prior_dist = single_conv_dist("level_prior", z) + prior_mean, prior_scale = prior_dist.loc, prior_dist.scale + + if latent is None: + mean, scale = prior_mean, prior_scale + else: + cond_mean, cond_scale, state = level_cond_prior( + prior_dist, z, latent, hparams, state) + mean, scale = tf.cond( + condition, lambda: (cond_mean, cond_scale), + lambda: (prior_mean, prior_scale)) + dist = TemperedNormal(mean, scale, temperature) + return dist, state + + +@add_arg_scope +def split(name, x, reverse=False, eps=None, eps_std=None, cond_latents=None, + hparams=None, state=None, condition=False, temperature=1.0): + """Splits / concatenates x into x1 and x2 across number of channels. + + For the forward pass, x2 is assumed be gaussian, + i.e P(x2 | x1) ~ N(mu, sigma) where mu and sigma are the outputs of + a network conditioned on x1 and optionally on cond_latents. + For the reverse pass, x2 is determined from mu(x1) and sigma(x1). + This is deterministic/stochastic depending on whether eps is provided. + + Args: + name: variable scope. + x: 4-D Tensor, shape (NHWC). + reverse: Forward or reverse pass. + eps: If eps is provided, x2 is set to be mu(x1) + eps * sigma(x1). + eps_std: Sample x2 with the provided eps_std. + cond_latents: optionally condition x2 on cond_latents. + hparams: next_frame_glow hparams. + state: tf.nn.rnn_cell.LSTMStateTuple.. Current state of the LSTM over z_2. + Used only when hparams.latent_dist_encoder == "conv_lstm" + condition: bool, Whether or not to condition the distribution on + cond_latents. + temperature: Temperature with which to sample from the gaussian. + + Returns: + If reverse: + x: 4-D Tensor, concats input and x2 across channels. + x2: 4-D Tensor, a sample from N(mu(x1), sigma(x1)) + Else: + x1: 4-D Tensor, Output of the split operation. + logpb: log-probability of x2 belonging to mu(x1), sigma(x1) + eps: 4-D Tensor, (x2 - mu(x1)) / sigma(x1) + x2: 4-D Tensor, Latent representation at the current level. + state: Current LSTM state. + 4-D Tensor, only if hparams.latent_dist_encoder is set to conv_lstm. + Raises: + ValueError: If latent is provided and shape is not equal to NHW(C/2) + where (NHWC) is the size of x. + """ + # TODO(mechcoder) Change the return type to be a dict. + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + if not reverse: + x1, x2 = tf.split(x, num_or_size_splits=2, axis=-1) + + # objective: P(x2|x1) ~N(x2 ; NN(x1)) + prior_dist, state = compute_prior( + "prior_on_z2", x1, cond_latents, hparams, condition, state=state) + logpb = tf.reduce_sum(prior_dist.log_prob(x2), axis=[1, 2, 3]) + eps = get_eps(prior_dist, x2) + return x1, logpb, eps, x2, state + else: + prior_dist, state = compute_prior( + "prior_on_z2", x, cond_latents, hparams, condition, state=state, + temperature=temperature) + if eps is not None: + x2 = set_eps(prior_dist, eps) + elif eps_std is not None: + x2 = eps_std * tf.random_normal(common_layers.shape_list(x)) + else: + x2 = prior_dist.sample() + return tf.concat([x, x2], 3), x2, state + + +@add_arg_scope +def revnet_step(name, x, hparams, reverse=True): + """One step of glow generative flow. + + Actnorm + invertible 1X1 conv + affine_coupling. + + Args: + name: used for variable scope. + x: input + hparams: coupling_width is the only hparam that is being used in + this function. + reverse: forward or reverse pass. + Returns: + z: Output of one step of reversible flow. + """ + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + if hparams.coupling == "additive": + coupling_layer = functools.partial( + additive_coupling, name="additive", reverse=reverse, + mid_channels=hparams.coupling_width, + activation=hparams.activation, dropout=hparams.coupling_dropout) + else: + coupling_layer = functools.partial( + affine_coupling, name="affine", reverse=reverse, + mid_channels=hparams.coupling_width, + activation=hparams.activation, dropout=hparams.coupling_dropout) + ops = [ + functools.partial(actnorm, name="actnorm", reverse=reverse), + functools.partial(invertible_1x1_conv, name="invertible", + reverse=reverse), coupling_layer] + + if reverse: + ops = ops[::-1] + + objective = 0.0 + for op in ops: + x, curr_obj = op(x=x) + objective += curr_obj + return x, objective + + +def revnet(name, x, hparams, reverse=True): + """'hparams.depth' steps of generative flow. + + Args: + name: variable scope for the revnet block. + x: 4-D Tensor, shape=(NHWC). + hparams: HParams. + reverse: bool, forward or backward pass. + Returns: + x: 4-D Tensor, shape=(NHWC). + objective: float. + """ + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + steps = np.arange(hparams.depth) + if reverse: + steps = steps[::-1] + + objective = 0.0 + for step in steps: + x, curr_obj = revnet_step( + "revnet_step_%d" % step, x, hparams, reverse=reverse) + objective += curr_obj + return x, objective + + +@add_arg_scope +def scale_gaussian_prior(name, z, logscale_factor=3.0, trainable=True): + """Returns N(s^i * z^i, std^i) where s^i and std^i are pre-component. + + s^i is a learnable parameter with identity initialization. + std^i is optionally learnable with identity initialization. + + Args: + name: variable scope. + z: input_tensor + logscale_factor: equivalent to scaling up the learning_rate by a factor + of logscale_factor. + trainable: Whether or not std^i is learnt. + """ + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + z_shape = common_layers.shape_list(z) + latent_multiplier = tf.get_variable( + "latent_multiplier", shape=z_shape, dtype=tf.float32, + initializer=tf.ones_initializer()) + log_scale = tf.get_variable( + "log_scale_latent", shape=z_shape, dtype=tf.float32, + initializer=tf.zeros_initializer(), trainable=trainable) + log_scale = log_scale * logscale_factor + return tfp.distributions.Normal( + loc=latent_multiplier * z, scale=tf.exp(log_scale)) + + +@add_arg_scope +def top_prior(name, z_shape, learn_prior="normal", temperature=1.0): + """Unconditional prior distribution. + + Args: + name: variable scope + z_shape: Shape of the mean / scale of the prior distribution. + learn_prior: Possible options are "normal" and "single_conv". + If set to "single_conv", the gaussian is parametrized by a + single convolutional layer whose input are an array of zeros + and initialized such that the mean and std are zero and one. + If set to "normal", the prior is just a Gaussian with zero + mean and unit variance. + temperature: Temperature with which to sample from the Gaussian. + Returns: + objective: 1-D Tensor shape=(batch_size,) summed across spatial components. + Raises: + ValueError: If learn_prior not in "normal" or "single_conv" + """ + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + h = tf.zeros(z_shape, dtype=tf.float32) + if learn_prior == "normal": + prior_dist = tfp.distributions.Normal(h, tf.exp(h)) + elif learn_prior == "single_conv": + prior_dist = single_conv_dist("top_learn_prior", h) + else: + raise ValueError("Expected learn_prior to be normal or single_conv " + "got %s" % learn_prior) + return TemperedNormal(prior_dist.loc, prior_dist.scale, temperature) + + +def uniform_binning_correction(x, n_bits=8): + """Replaces x^i with q^i(x) = U(x, x + 1.0 / 256.0). + + Args: + x: 4-D Tensor of shape (NHWC) + n_bits: optional. + Returns: + x: x ~ U(x, x + 1.0 / 256) + objective: Equivalent to -q(x)*log(q(x)). + """ + n_bins = 2**n_bits + batch_size, height, width, n_channels = common_layers.shape_list(x) + hwc = float(height * width * n_channels) + + x = x + tf.random_uniform( + shape=(batch_size, height, width, n_channels), + minval=0.0, maxval=1.0/n_bins) + objective = -np.log(n_bins) * hwc * tf.ones(batch_size) + return x, objective + + +@add_arg_scope +def encoder_decoder(name, x, hparams, eps=None, reverse=False, + cond_latents=None, condition=False, states=None, + temperature=1.0): + """Glow encoder-decoder. n_levels of (Squeeze + Flow + Split.) operations. + + Args: + name: variable scope. + x: 4-D Tensor, shape=(NHWC). + hparams: HParams. + eps: Stores (glow(x) - mu) / sigma during the forward pass. + Used only to test if the network is reversible. + reverse: Forward or reverse pass. + cond_latents: list of lists of tensors. + outer length equals hparams.num_cond_latents + innter length equals hparams.num_levels - 1. + condition: If set to True, condition the encoder/decoder on cond_latents. + states: LSTM states, used only if hparams.latent_dist_encoder is set + to "conv_lstm. + temperature: Temperature set during sampling. + Returns: + x: If reverse, decoded image, else the encoded glow latent representation. + objective: log-likelihood. + eps: list of tensors, shape=(num_levels-1). + Stores (glow(x) - mu_level(x)) / sigma_level(x)) for each level. + all_latents: list of tensors, shape=(num_levels-1). + Latent representatios for each level. + new_states: list of tensors, shape=(num_levels-1). + useful only if hparams.latent_dist_encoder="conv_lstm", returns + the current state of each level. + """ + # TODO(mechcoder) Change return_type to a dict to be backward compatible. + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + + if states and len(states) != hparams.n_levels - 1: + raise ValueError("Expected length of states to be %d, got %d" % + (hparams.n_levels - 1, len(states))) + if states is None: + states = [None] * (hparams.n_levels - 1) + if eps and len(eps) != hparams.n_levels - 1: + raise ValueError("Expected length of eps to be %d, got %d" % + (hparams.n_levels - 1, len(eps))) + if eps is None: + eps = [None] * (hparams.n_levels - 1) + check_cond_latents(cond_latents, hparams) + + objective = 0.0 + all_eps = [] + all_latents = [] + new_states = [] + + if not reverse: + # Squeeze + Flow + Split + for level in range(hparams.n_levels): + x = squeeze("squeeze_%d" % level, x, factor=2, reverse=False) + + x, obj = revnet("revnet_%d" % level, x, hparams, reverse=False) + objective += obj + + if level < hparams.n_levels - 1: + curr_cond_latents = get_cond_latents_at_level( + cond_latents, level, hparams) + x, obj, eps, z, state = split("split_%d" % level, x, reverse=False, + cond_latents=curr_cond_latents, + condition=condition, + hparams=hparams, state=states[level]) + objective += obj + all_eps.append(eps) + all_latents.append(z) + new_states.append(state) + + return x, objective, all_eps, all_latents, new_states + + else: + for level in reversed(range(hparams.n_levels)): + if level < hparams.n_levels - 1: + + curr_cond_latents = get_cond_latents_at_level( + cond_latents, level, hparams) + + x, latent, state = split("split_%d" % level, x, eps=eps[level], + reverse=True, cond_latents=curr_cond_latents, + condition=condition, hparams=hparams, + state=states[level], + temperature=temperature) + new_states.append(state) + all_latents.append(latent) + + x, obj = revnet( + "revnet_%d" % level, x, hparams=hparams, reverse=True) + objective += obj + x = squeeze("squeeze_%d" % level, x, reverse=True) + return x, objective, all_latents[::-1], new_states[::-1] diff --git a/tensor2tensor/models/research/glow_ops_test.py b/tensor2tensor/models/research/glow_ops_test.py new file mode 100644 index 000000000..baab0fd1a --- /dev/null +++ b/tensor2tensor/models/research/glow_ops_test.py @@ -0,0 +1,511 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.models.research.glow_ops.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tempfile +from absl.testing import parameterized +import numpy as np +from six.moves import range +from six.moves import zip +from tensor2tensor.models.research import glow +from tensor2tensor.models.research import glow_ops +from tensor2tensor.utils import contrib +from tensor2tensor.utils import hparam +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +arg_scope = contrib.framework().arg_scope +add_arg_scope = contrib.framework().add_arg_scope + + +class GlowOpsTest(parameterized.TestCase, tf.test.TestCase): + + def get_glow_hparams(self): + hparams = glow.glow_hparams() + hparams.add_hparam("mode", tf_estimator.ModeKeys.TRAIN) + hparams.add_hparam("num_cond_latents", 1) + hparams.add_hparam("latent_architecture", "glow_resnet") + # Use latent skip connections + hparams.add_hparam("model_input", False) + hparams.add_hparam("latent_apply_dilations", False) + hparams.add_hparam("latent_skip", True) + hparams.add_hparam("latent_encoder_depth", 2) + hparams.add_hparam("latent_encoder_width", 256) + hparams.add_hparam("latent_pre_output_channels", 256) + hparams.add_hparam("latent_dist_encoder", "conv_net") + hparams.add_hparam("latent_time_filter_size", 3) + hparams.add_hparam("latent_activation", "relu") + hparams.add_hparam("latent_dropout", 0.0) + hparams.add_hparam("latent_noise", 0.0) + return hparams + + def test_get_variable_ddi(self): + with tf.Graph().as_default(): + x_t = tf.random_normal((5, 5)) + ddi = glow_ops.get_variable_ddi( + "x", (5, 5), initial_value=x_t, init=True) + with tf.Session() as session: + diff = ddi - x_t + self.assertTrue(np.allclose(session.run(diff), 0.0)) + + def test_actnorm(self): + """Test that actnorm provides activations with zero channel-mean.""" + with tf.Graph().as_default(): + x_t = tf.random_normal((16, 32, 32, 3), mean=50.0, stddev=2.0) + x_act = glow_ops.actnorm("actnorm", x_t, init=True) + with tf.Session() as session: + x_act_np, _ = session.run(x_act) + channel_mean = np.mean(x_act_np, axis=(0, 1, 2)) + channel_var = np.var(x_act_np, axis=(0, 1, 2)) + self.assertTrue(np.allclose(channel_mean, 0.0, atol=1e-3)) + self.assertTrue(np.allclose(channel_var, 1.0, atol=1e-3)) + + @parameterized.named_parameters( + ("inv_1x1", glow_ops.invertible_1x1_conv, "inv_1x1"), + ("affine", glow_ops.affine_coupling, "affine_coupling"), + ("additive", glow_ops.additive_coupling, "additive_coupling"), + ("actnorm", glow_ops.actnorm, "actnorm"), + ("affine_drop", glow_ops.affine_coupling, "affine_dropout", 0.5), + ("additive_drop", glow_ops.additive_coupling, "additive_dropout", 0.5)) + def test_invertibility(self, op, name, dropout=0.0): + with tf.Graph().as_default(): + tf.set_random_seed(42) + x = tf.random_uniform(shape=(16, 32, 32, 4)) + + if op in [glow_ops.affine_coupling, glow_ops.additive_coupling]: + with arg_scope([glow_ops.get_dropout], init=False): + x_inv, _ = op(name, x, reverse=False, dropout=dropout) + x_inv_inv, _ = op(name, x_inv, reverse=True, dropout=dropout) + else: + x_inv, _ = op(name, x, reverse=False) + x_inv_inv, _ = op(name, x_inv, reverse=True) + with tf.Session() as session: + session.run(tf.global_variables_initializer()) + diff = session.run(x - x_inv_inv) + self.assertTrue(np.allclose(diff, 0.0, atol=1e-5)) + + def test_add_edge_bias(self): + with tf.Graph().as_default(): + x = tf.random_uniform(shape=(16, 32, 32, 3)) + x_pad = glow_ops.add_edge_bias(x, [3, 3]) + with tf.Session() as session: + x_pad_np = session.run(x_pad) + + # Test expected output shape. + self.assertEqual(x_pad_np.shape, (16, 34, 34, 4)) + + def test_conv2d(self): + with tf.Graph().as_default(): + x = 10.0 * tf.random_uniform(shape=(16, 5, 5, 32)) + + with arg_scope([glow_ops.actnorm], init=True): + actnorm_conv2d = glow_ops.conv( + "actnorm_conv2d", x, output_channels=64, apply_actnorm=True) + actnorm_zeros2d = glow_ops.conv( + "actnorm_zeros2d", x, output_channels=64, apply_actnorm=False) + + with tf.Session() as session: + session.run(tf.global_variables_initializer()) + + # test if apply_actnorm is set to True, the first minibatch has + # zero mean and unit variance. + actnorm_np, zeros_np = session.run([actnorm_conv2d, actnorm_zeros2d]) + self.assertEqual(actnorm_np.shape, (16, 5, 5, 64)) + mean = np.mean(actnorm_np, axis=(0, 1, 2)) + var = np.var(actnorm_np, axis=(0, 1, 2)) + self.assertTrue(np.allclose(mean, 0.0, atol=1e-5)) + self.assertTrue(np.allclose(var, 1.0, atol=1e-5)) + + # test shape in case apply_actnorm is set to False, + self.assertEqual(zeros_np.shape, (16, 5, 5, 64)) + + @parameterized.named_parameters( + ("relu_act", "relu"), ("gatu_act", "gatu")) + def test_conv_stack(self, activation="relu"): + """Test output shape.""" + with tf.Graph().as_default(): + x = 10.0 * tf.random_uniform(shape=(16, 5, 5, 32)) + nn = glow_ops.conv_stack("nn", x, mid_channels=512, output_channels=64, + activation=activation) + + with tf.Session() as session: + session.run(tf.global_variables_initializer()) + nn_np = session.run(nn) + self.assertEqual(nn_np.shape, (16, 5, 5, 64)) + + # Initialized with zeros. + self.assertTrue(np.allclose(nn_np, 0.0)) + + def check_latent_to_dist(self, architecture): + with tf.Graph().as_default(): + x = tf.random_uniform(shape=(16, 5, 5, 32)) + hparams = hparam.HParams(architecture=architecture) + x_prior = glow_ops.latent_to_dist("split_prior", x, hparams=hparams, + output_channels=64) + mean_t, scale_t = x_prior.loc, x_prior.scale + with tf.Session() as session: + session.run(tf.global_variables_initializer()) + mean, scale = session.run([mean_t, scale_t]) + self.assertEqual(mean.shape, (16, 5, 5, 64)) + self.assertEqual(scale.shape, (16, 5, 5, 64)) + self.assertTrue(np.allclose(mean, 0.0)) + self.assertTrue(np.allclose(scale, 1.0)) + + def test_latent_to_dist(self): + for architecture in ["single_conv", "glow_nn", "glow_resnet"]: + self.check_latent_to_dist(architecture) + + def test_split(self): + with tf.Graph().as_default(): + x = tf.random_uniform(shape=(16, 5, 5, 32)) + x_inv, _, eps, z, _ = glow_ops.split("split", x) + x_inv_inv, _, _ = glow_ops.split("split", x_inv, reverse=True, eps=eps) + with tf.Session() as session: + session.run(tf.global_variables_initializer()) + x_inv_np, diff, z_np = session.run([x_inv, x - x_inv_inv, z]) + self.assertEqual(z_np.shape, (16, 5, 5, 16)) + self.assertEqual(x_inv_np.shape, (16, 5, 5, 16)) + self.assertTrue(np.allclose(diff, 0.0, atol=1e-5)) + + @parameterized.named_parameters( + ("aff_revnet", glow_ops.revnet, "aff_rev", "affine"), + ("add_revnet", glow_ops.revnet, "add_rev", "additive"), + ("aff_rev_step", glow_ops.revnet_step, "aff_rev_step", "affine"), + ("add_rev_step", glow_ops.revnet_step, "add_rev_step", "additive"),) + def test_revnet_reversibility(self, op, name, coupling): + with tf.Graph().as_default(): + hparams = glow.glow_hparams() + hparams.depth = 2 + hparams.coupling = coupling + x = tf.random_uniform(shape=(16, 32, 32, 4), seed=0) + x_inv, _ = op(name, x, hparams, reverse=False) + x_inv_inv, _ = op(name, x_inv, hparams, reverse=True) + with tf.Session() as session: + session.run(tf.global_variables_initializer()) + diff = session.run(x - x_inv_inv) + self.assertTrue(np.allclose(diff, 0.0, atol=1e-2)) + + def test_encoder_decoder(self): + with tf.Graph().as_default(): + hparams = glow.glow_hparams() + hparams.n_levels = 3 + hparams.depth = 6 + rng = np.random.RandomState(0) + x_np = rng.rand(1, 64, 64, 4) + x_t = tf.convert_to_tensor(x_np, dtype=tf.float32) + init_ops = [glow_ops.get_variable_ddi, glow_ops.actnorm] + with arg_scope(init_ops, init=True): + x_inv, _, eps, z_levels, _ = glow_ops.encoder_decoder( + "encoder_decoder", x_t, hparams, reverse=False) + x_inv_inv, _, z_inv_levels, _ = glow_ops.encoder_decoder( + "encoder_decoder", x_inv, hparams, eps=eps, reverse=True) + + with tf.Session() as session: + session.run(tf.global_variables_initializer()) + x_inv_np = session.run(x_inv) + z_levels_np, z_inv_levels_np, x_inv_inv_np = session.run( + [z_levels, z_inv_levels, x_inv_inv]) + diff = x_inv_inv_np - x_np + self.assertLen(z_levels_np, 2) + self.assertLen(z_inv_levels_np, 2) + # (h_i, w_i, c_i) = (h_{i-1}/f, w_{i-1}/f, c_{i-1}*(2f)/2) where (f=2) + self.assertEqual(z_levels_np[0].shape, (1, 32, 32, 8)) + self.assertEqual(z_levels_np[1].shape, (1, 16, 16, 16)) + self.assertEqual(z_inv_levels_np[0].shape, (1, 32, 32, 8)) + self.assertEqual(z_inv_levels_np[1].shape, (1, 16, 16, 16)) + self.assertTrue(x_inv_np.shape, (1, 8, 8, 64)) + self.assertTrue(np.allclose(diff, 0.0, atol=1e-2)) + + def test_encoder_decoder_practical_usage(self): + """Tests the following sequence of operations. + + 1. Define forward network with arg_scope(init=True). + 2. Run one-forward pass to do data-dependent initialization and save. + 3. Define forward and reverse network with arg_scope(init=False) + 4. Check that reverse(forward(x)) == x + """ + hparams = glow.glow_hparams() + hparams.n_levels = 2 + hparams.depth = 12 + + with tf.Graph().as_default(): + rng = np.random.RandomState(0) + x_rand = np.asarray(rng.rand(1, 4, 4, 4), dtype=np.float32) + x_t = tf.convert_to_tensor(x_rand) + + ops = [glow_ops.get_variable_ddi, glow_ops.actnorm] + with arg_scope(ops, init=True): + x_inv, _, _, _, _ = glow_ops.encoder_decoder( + "revnet", x_t, hparams, reverse=False) + curr_dir = tempfile.mkdtemp() + model_path = os.path.join(curr_dir, "model") + + with tf.Session() as session: + saver = tf.train.Saver() + session.run(tf.global_variables_initializer()) + session.run(x_inv) + saver.save(session, model_path) + + with tf.Graph().as_default(): + rng = np.random.RandomState(0) + x_rand = np.asarray(rng.rand(1, 4, 4, 4), dtype=np.float32) + x_t = tf.convert_to_tensor(x_rand) + ops = [glow_ops.get_variable_ddi, glow_ops.actnorm] + with arg_scope(ops, init=False): + x_inv2, _, all_eps, _, _ = glow_ops.encoder_decoder( + "revnet", x_t, hparams, reverse=False) + x_inv_inv_, _, _, _ = glow_ops.encoder_decoder( + "revnet", x_inv2, hparams, eps=all_eps, reverse=True) + + with tf.Session() as session: + saver = tf.train.Saver() + saver.restore(session, model_path) + x_inv_inv_np = session.run(x_inv_inv_) + diff = np.abs(x_inv_inv_np - x_rand) + self.assertTrue(np.allclose(diff, 0.0, atol=1e-3)) + + def test_scale_gaussian_prior(self): + with tf.Graph().as_default(): + rng = np.random.RandomState(0) + img_shape = (16, 2, 2, 2) + x_rand = np.asarray(rng.randint(0, 10, img_shape), dtype=np.float32) + z_rand = np.asarray(rng.randint(0, 10, img_shape), dtype=np.float32) + x_t = tf.convert_to_tensor(x_rand) + z_t = tf.convert_to_tensor(z_rand) + dist = glow_ops.scale_gaussian_prior( + "scale_gaussian_prior", z_t, x_t, trainable=True) + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + mean, scale = sess.run([dist.loc, dist.scale]) + self.assertTrue(np.allclose(mean, z_rand)) + self.assertTrue(np.allclose(scale, 1.0)) + + def check_split_latent_conditioning(self, merge_std): + with tf.Graph().as_default(): + rng = np.random.RandomState(0) + x_rand = rng.randn(12, 32, 32, 32).astype(np.float32) + latent_rand = rng.randn(12, 32, 32, 16).astype(np.float32) + x_t = tf.convert_to_tensor(x_rand) + latent_t = tf.convert_to_tensor(latent_rand) + hparams = glow.glow_hparams() + hparams.level_scale = merge_std + hparams.add_hparam("latent_dist_encoder", "pointwise") + + # Test initalization. + # x2 ~ N(scale * latent, 1.0) where initial scale is 1.0 + exp_x2 = x_rand[:, :, :, 16:] + exp_eps = x_rand[:, :, :, 16:] - latent_rand + x_inv, _, eps, x2_t, _ = glow_ops.split( + merge_std, x_t, cond_latents=latent_t, hparams=hparams, + condition=True) + # Test reversibility. + x_inv_inv, _, _ = glow_ops.split( + merge_std, x_inv, cond_latents=latent_t, eps=eps, reverse=True, + hparams=hparams, condition=True) + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + actual_eps, actual_x2, diff_np = sess.run([eps, x2_t, x_inv_inv - x_t]) + self.assertTrue(np.allclose(diff_np, 0.0, atol=1e-5)) + self.assertTrue(np.allclose(actual_eps, exp_eps)) + self.assertTrue(np.allclose(exp_x2, actual_x2)) + + def test_split_latent_conditioning(self): + for merge_std in ["normal", "prev_level", "prev_step"]: + self.check_split_latent_conditioning(merge_std) + + @parameterized.named_parameters( + ("lstm_skip", "conv_lstm", True), + ("lstm_no_skip", "conv_lstm", False), + ("conv_net_skip", "conv_net", True), + ("conv_net_no_skip", "conv_net", False), + ("conv3d_skip", "conv3d_net", False), + ("conv3d_no_skip", "conv3d_net", True), + ("conv3d_skip_drop", "conv3d_net", False, 0.1), + ("conv3d_no_skip_drop", "conv3d_net", True, 0.1), + ("conv3d_no_skip_drop_noise", "conv3d_net", True, 0.1, 0.1),) + def test_latent_dist_encoder(self, encoder="conv_lstm", skip=True, + dropout=0.0, noise=0.1): + with tf.Graph().as_default(): + rng = np.random.RandomState(0) + # Initialize x, latent, state. + x_rand = rng.randn(12, 32, 32, 16).astype(np.float32) + latent_rand = rng.randn(12, 32, 32, 16).astype(np.float32) + state_rand = rng.randn(12, 32, 32, 256).astype(np.float32) + x_t = tf.convert_to_tensor(x_rand) + latent_t = tf.convert_to_tensor(latent_rand) + state_t = tf.convert_to_tensor(state_rand) + if encoder in ["conv_net", "conv3d_net"]: + latent_t = [latent_t, latent_t] + init_state = tf.nn.rnn_cell.LSTMStateTuple(state_t, state_t) + hparams = self.get_glow_hparams() + hparams.latent_dist_encoder = encoder + hparams.latent_skip = skip + hparams.latent_encoder_width = 256 + hparams.latent_dropout = dropout + hparams.latent_noise = noise + + with arg_scope([glow_ops.get_dropout], init=False): + prior_dist, new_state = glow_ops.compute_prior( + "prior", x_t, latent=latent_t, hparams=hparams, state=init_state, + condition=True) + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + # Test initialization: + # Scale is 1.0 + # If skip is set to True, then mean equals the input latent. + # If skip, is set to False, then the mean is zero. + ops = [prior_dist.loc, prior_dist.scale] + mean, scale = sess.run(ops) + + if skip: + self.assertTrue(np.allclose(latent_rand - mean, 0.0)) + else: + self.assertTrue(np.allclose(mean, 0.0)) + self.assertTrue(np.allclose(scale, 1.0)) + + # State update. + if encoder == "conv_lstm": + state_diff = sess.run(new_state.h - init_state.h) + self.assertFalse(np.allclose(state_diff, 0.0)) + + def test_conv3d(self): + with tf.Graph().as_default(): + x = 10.0 * tf.random_uniform(shape=(16, 4, 5, 5, 32)) + + with arg_scope([glow_ops.actnorm], init=True): + conv3d = glow_ops.conv( + "conv3d", x, output_channels=64, apply_actnorm=True) + conv3d_zeros = glow_ops.conv( + "conv3d_zeros", x, output_channels=64, apply_actnorm=False, + conv_init="zeros") + + with tf.Session() as session: + session.run(tf.global_variables_initializer()) + + # test if apply_actnorm is set to True, the first minibatch has + # zero mean and unit variance. + conv3d_np, conv3d_zeros_np = session.run([conv3d, conv3d_zeros]) + self.assertEqual(conv3d_np.shape, (16, 4, 5, 5, 64)) + for i in range(4): + curr_step = conv3d_np[:, i, :, :, :] + mean = np.mean(curr_step, axis=(0, 1, 2)) + var = np.var(curr_step, axis=(0, 1, 2)) + self.assertTrue(np.allclose(mean, 0.0, atol=1e-5)) + self.assertTrue(np.allclose(var, 1.0, atol=1e-5)) + + # test shape in case apply_actnorm is set to False, + self.assertTrue(np.allclose(conv3d_zeros_np, 0.0)) + + def test_actnorm_3d(self): + with tf.Graph().as_default(): + x_t = tf.random_normal((16, 5, 32, 32, 3), mean=50.0, stddev=2.0) + ops = [glow_ops.actnorm, glow_ops.get_variable_ddi] + with arg_scope(ops, init=True): + x_act, _ = glow_ops.actnorm_3d("actnorm", x_t) + with tf.Session() as session: + x_act_np = session.run(x_act) + # Mean and standard deviation per time-step equals zero and one. + for time_step in range(5): + x_act_curr = x_act_np[:, time_step, :, :, :] + channel_mean = np.mean(x_act_curr, axis=(0, 1, 2)) + channel_var = np.var(x_act_curr, axis=(0, 1, 2)) + self.assertTrue(np.allclose(channel_mean, 0.0, atol=1e-3)) + self.assertTrue(np.allclose(channel_var, 1.0, atol=1e-3)) + + @parameterized.named_parameters( + ("dil_relu", True, "relu"), ("no_dil_relu", False, "relu"), + ("dil_gatu", True, "gatu"), ("no_dil_gatu", False, "gatu"), + ("dil_relu_drop", True, "relu", 0.1), + ("dil_gatu_drop", True, "gatu", 0.1), + ("dil_gatu_drop_noise", True, "gatu", 0.1, 0.1), + ("gatu_drop_single_step", False, "gatu", 0.1, 0.1, 1), + ("dil_gatu_drop_single_step", True, "gatu", 0.1, 0.1, 1),) + def test_temporal_latent_to_dist(self, apply_dilation, activation, + dropout=0.0, noise=0.1, num_steps=5): + with tf.Graph().as_default(): + hparams = self.get_glow_hparams() + hparams.latent_apply_dilations = apply_dilation + hparams.latent_activation = activation + hparams.latent_dropout = dropout + hparams.latent_noise = noise + latent_shape = (16, num_steps, 32, 32, 48) + latents = tf.random_normal(latent_shape) + dist = glow_ops.temporal_latent_to_dist( + "tensor_to_dist", latents, hparams) + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + # dilated conv_3d is not available on CPU. + is_gpu = tf.test.is_gpu_available() + if not apply_dilation or is_gpu: + mean, scale = dist.loc, dist.scale + mean_np, scale_np = sess.run([mean, scale]) + self.assertTrue(np.allclose(mean_np, 0.0)) + self.assertTrue(np.allclose(scale_np, 1.0)) + + @parameterized.named_parameters( + ("temp_1.0", 1.0), ("temp_0.9", 0.9), ("temp_0.7", 0.7), + ("temp_0.3", 0.3), ("temp_0.1", 0.1), ("temp_0.0", 0.0)) + def test_temperature_normal(self, temperature): + with tf.Graph().as_default(): + rng = np.random.RandomState(0) + # in numpy, so that multiple calls don't trigger different random numbers. + loc_t = tf.convert_to_tensor(rng.randn(5, 5)) + scale_t = tf.convert_to_tensor(rng.rand(5, 5)) + tempered_normal = glow_ops.TemperedNormal( + loc=loc_t, scale=scale_t, temperature=temperature) + # smoke test for a single sample. + smoke_sample = tempered_normal.sample() + samples = tempered_normal.sample((10000,), seed=0) + + with tf.Session() as sess: + ops = [samples, loc_t, scale_t, smoke_sample] + samples_np, loc_exp, scale_exp, _ = sess.run(ops) + scale_exp *= temperature + loc_act = np.mean(samples_np, axis=0) + scale_act = np.std(samples_np, axis=0) + self.assertTrue(np.allclose(loc_exp, loc_act, atol=1e-2)) + self.assertTrue(np.allclose(scale_exp, scale_act, atol=1e-2)) + + def linear_interpolate_rank(self): + with tf.Graph().as_default(): + # Since rank is 1, the first channel should remain 1.0. + # and the second channel should be interpolated between 1.0 and 6.0 + z1 = np.ones(shape=(4, 4, 2)) + z2 = np.copy(z1) + z2[:, :, 0] += 0.01 + z2[:, :, 1] += 5.0 + coeffs = np.linspace(0.0, 1.0, 11) + z1 = np.expand_dims(z1, axis=0) + z2 = np.expand_dims(z2, axis=0) + tensor1 = tf.convert_to_tensor(z1, dtype=tf.float32) + tensor2 = tf.convert_to_tensor(z2, dtype=tf.float32) + lin_interp_max = glow_ops.linear_interpolate_rank( + tensor1, tensor2, coeffs) + with tf.Session() as sess: + lin_interp_np_max = sess.run(lin_interp_max) + for lin_interp_np, coeff in zip(lin_interp_np_max, coeffs): + exp_val = 1.0 + coeff * (6.0 - 1.0) + self.assertTrue(np.allclose(lin_interp_np[:, :, 0], 1.0)) + self.assertTrue(np.allclose(lin_interp_np[:, :, 1], exp_val)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/research/glow_test.py b/tensor2tensor/models/research/glow_test.py new file mode 100644 index 000000000..ef14224b5 --- /dev/null +++ b/tensor2tensor/models/research/glow_test.py @@ -0,0 +1,129 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.models.research.glow_model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tempfile +import numpy as np +from six.moves import range +from tensor2tensor import problems +from tensor2tensor.data_generators import cifar # pylint: disable=unused-import +from tensor2tensor.models.research import glow +from tensor2tensor.utils import registry # pylint: disable=unused-import +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +MODES = tf_estimator.ModeKeys + + +class GlowModelTest(tf.test.TestCase): + + def batch(self, one_shot_iterator, batch_size=16): + x_batch, y_batch = [], [] + for _ in range(batch_size): + curr = one_shot_iterator.get_next() + x_batch.append(curr['inputs']) + y_batch.append(curr['targets']) + return tf.stack(x_batch), tf.stack(y_batch) + + def test_glow(self): + with tf.Graph().as_default(): + hparams = glow.glow_hparams() + hparams.depth = 15 + hparams.n_levels = 2 + hparams.init_batch_size = 256 + hparams.batch_size = 1 + hparams.data_dir = '' + cifar_problem = problems.problem('image_cifar10_plain_random_shift') + hparams.problem = cifar_problem + model = glow.Glow(hparams, tf_estimator.ModeKeys.TRAIN) + train_dataset = cifar_problem.dataset(MODES.TRAIN) + one_shot = train_dataset.make_one_shot_iterator() + x_batch, y_batch = self.batch(one_shot) + features = {'inputs': x_batch, 'targets': y_batch} + _, obj_dict = model.body(features) + objective = obj_dict['training'] + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + + # Run initialization. + init_op = tf.get_collection('glow_init_op') + sess.run(init_op) + + # Run forward pass. + obj_np = sess.run(objective) + mean_obj = np.mean(obj_np) + + # Check that one forward-propagation does not NaN, i.e + # initialization etc works as expected. + self.assertTrue(mean_obj > 0 and mean_obj < 10.0) + + def test_glow_inference(self): + hparams = glow.glow_hparams() + hparams.depth = 15 + hparams.n_levels = 2 + hparams.data_dir = '' + curr_dir = tempfile.mkdtemp() + + # Training pipeline + with tf.Graph().as_default(): + cifar_problem = problems.problem('image_cifar10_plain_random_shift') + hparams.problem = cifar_problem + model = glow.Glow(hparams, tf_estimator.ModeKeys.TRAIN) + train_dataset = cifar_problem.dataset(MODES.TRAIN) + one_shot = train_dataset.make_one_shot_iterator() + x_batch, y_batch = self.batch(one_shot) + features = {'inputs': x_batch, 'targets': y_batch} + model_path = os.path.join(curr_dir, 'model') + model(features) + + with tf.Session() as session: + saver = tf.train.Saver() + session.run(tf.global_variables_initializer()) + + init_op = tf.get_collection('glow_init_op') + session.run(init_op) + z = session.run([model.z]) + mean_z = np.mean(z) + is_undefined = np.isnan(mean_z) or np.isinf(mean_z) + self.assertTrue(not is_undefined) + saver.save(session, model_path) + + # Inference pipeline + with tf.Graph().as_default(): + cifar_problem = problems.problem('image_cifar10_plain_random_shift') + hparams.problem = cifar_problem + model = glow.Glow(hparams, tf_estimator.ModeKeys.PREDICT) + test_dataset = cifar_problem.dataset(MODES.EVAL) + one_shot = test_dataset.make_one_shot_iterator() + x_batch, y_batch = self.batch(one_shot) + features = {'inputs': x_batch, 'targets': y_batch} + model_path = os.path.join(curr_dir, 'model') + + predictions = model.infer(features) + with tf.Session() as session: + saver = tf.train.Saver() + saver.restore(session, model_path) + predictions_np = session.run(predictions) + self.assertTrue(np.all(predictions_np <= 255)) + self.assertTrue(np.all(predictions_np >= 0)) + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/models/research/lm_experiments.py b/tensor2tensor/models/research/lm_experiments.py new file mode 100644 index 000000000..91c074ac4 --- /dev/null +++ b/tensor2tensor/models/research/lm_experiments.py @@ -0,0 +1,159 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Experiments with Language Models. + +Train languagemodel_lm1b32k_packed and measure log-ppl/token (dev). +These numbers need to be multiplied by 1.107893 to get log-ppl/word + for comparison with published results. + +Basic training regimen is 300k steps * 8 cores * batch_size=4096 + = about 10 epochs + +Make sure to eval on CPU or GPU using a large number of steps (1000), since the +TPU eval code doesn't know how to stop at the end of the dev data. Also need +to set activation_type=float32 for eval, since there is currently a conflict +between daisy_chain_getter and activation_type=bfloat16. + +RESULTS: + lmx_base: log-ppl/tok=3.40 PPL/word=43.2 (10 hours*8 cores) + lmx_h1k_f4k: + lmx_h2k_f8k: +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.models import transformer +from tensor2tensor.utils import registry + + +@registry.register_hparams +def lmx_base(): + """Transformer on languagemodel_lm1b32k_packed. 50M Params.""" + hparams = transformer.transformer_tpu() + # sharing is counterproductive when underparameterized + hparams.shared_embedding_and_softmax_weights = False + # we judge by log-ppl, so label smoothing hurts. + hparams.label_smoothing = 0.0 + # This makes the batch size on GPU the same as on TPU for a packed problem + # with sequence length 256. + # TODO(noam): fix the mess that is the data reading pipeline. + hparams.max_length = 256 + # larger batch since we only have a decoder + hparams.batch_size = 4096 + # save some memory so we can have a larger model + hparams.activation_dtype = "bfloat16" + return hparams + + +@registry.register_hparams +def lmx_h1k_f4k(): + """Transformer on languagemodel_lm1b32k_packed. 140M Params.""" + hparams = lmx_base() + hparams.hidden_size = 1024 + hparams.filter_size = 4096 + return hparams + + +@registry.register_hparams +def lmx_h2k_f8k(): + """HParams for training languagemodel_lm1b32k_packed. 430M Params.""" + hparams = lmx_base() + hparams.hidden_size = 2048 + hparams.filter_size = 8192 + return hparams + + +@registry.register_hparams +def lmx_h3k_f12k(): + """HParams for training languagemodel_lm1b32k_packed. 880M Params.""" + hparams = lmx_base() + hparams.hidden_size = 3072 + hparams.filter_size = 12288 + hparams.batch_size = 2048 + hparams.weight_dtype = "bfloat16" + return hparams + + +@registry.register_hparams +def lmx_h4k_f16k(): + """HParams for training languagemodel_lm1b32k_packed. 1470M Params.""" + hparams = lmx_base() + hparams.hidden_size = 4096 + hparams.filter_size = 16384 + hparams.batch_size = 1024 + hparams.weight_dtype = "bfloat16" + return hparams + + +@registry.register_hparams +def lmx_relative(): + """Language model using relative attention.""" + hparams = lmx_base() + hparams.self_attention_type = "dot_product_relative_v2" + hparams.activation_dtype = "float32" + hparams.weight_dtype = "float32" + return hparams + + +@registry.register_hparams +def lmx_relative_nopos(): + """Language model using relative attention and no positional encoding.""" + hparams = lmx_relative() + hparams.pos = "none" + return hparams + + +@registry.register_hparams +def lmx_moe(): + """Transformer with mixture of experts. 140M Params.""" + hparams = lmx_base() + hparams.ffn_layer = "local_moe_tpu" + return hparams + + +@registry.register_hparams +def lmx_moe_h1k_f4k_x32(): + """Transformer with mixture of experts. 890M Params.""" + hparams = lmx_h1k_f4k() + hparams.ffn_layer = "local_moe_tpu" + hparams.moe_num_experts = 32 + hparams.weight_dtype = "bfloat16" + hparams.batch_size = 8192 + return hparams + + +@registry.register_hparams +def lmx_moe_h1k_f8k_x16(): + """Transformer with mixture of experts. 890M Params.""" + hparams = lmx_h1k_f4k() + hparams.filter_size = 8192 + hparams.ffn_layer = "local_moe_tpu" + hparams.moe_num_experts = 16 + hparams.weight_dtype = "bfloat16" + hparams.batch_size = 8192 + return hparams + + +@registry.register_hparams +def lmx_h1k_f64k(): + """HParams for training languagemodel_lm1b32k_packed. 880M Params.""" + hparams = lmx_base() + hparams.hidden_size = 1024 + hparams.filter_size = 65536 + hparams.batch_size = 2048 + return hparams diff --git a/tensor2tensor/models/research/moe.py b/tensor2tensor/models/research/moe.py new file mode 100644 index 000000000..89e26a174 --- /dev/null +++ b/tensor2tensor/models/research/moe.py @@ -0,0 +1,679 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Mixture-of-experts code. + +Interfaces and algorithms are under development and subject to rapid change +without notice. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import mesh_tensorflow as mtf +import tensorflow.compat.v1 as tf + + +def transformer_moe_layer_v1(inputs, output_dim, hparams, train, + master_dtype=tf.bfloat16, + slice_dtype=tf.float32): + """Local mixture of experts that works well on TPU. + + Adapted from the paper https://arxiv.org/abs/1701.06538 + + Note: until the algorithm and inferface solidify, we pass in a hyperparameters + dictionary in order not to complicate the interface in mtf_transformer.py . + Once this code moves out of "research", we should pass the hyperparameters + separately. + + Hyperparameters used: + hparams.moe_num_experts: number of experts + hparams.moe_hidden_size: size of hidden layer in each expert + hparams.moe_group_size: size of each "group" for gating purposes + hparams.moe_capacity_factor_train: a float + hparams.moe_capacity_factor_eval: a float + hparams.moe_gating: a string + + all hyperparmeters used by _top_2_gating() + + The number of parameters in the gating network is: + (input_dim.size * hparams.num_experts) + + + The number of parameters in the experts themselves is: + (hparams.num_experts + * (input_dim.size + output_dim.size) + * hparams.moe_hidden_size) + + The input is n-dimensional: [, input_dim], consisting + of the representations of all positions in a batch of sequences. + + Each position of each sequence is sent to 0-2 experts. The expert + choices and the combination weights are determined by a learned gating + function. + + This function returns a small auxiliary loss that should be added to the + training loss of the model. This loss helps to balance expert usage. + Without the loss, it is very likely that a few experts will be trained and + the rest will starve. + + Several hacks are necessary to get around current TPU limitations: + + - To ensure static shapes, we enforce (by truncation/padding) + that each sequence send the same number of elements to each expert. + + It would make more sense to enforce this equality over the entire batch, + but due to our hacked-up gather-by-matmul implementation, we need to divide + the batch into "groups". For each group, the same number of elements + are sent to each expert. + + TODO(noam): Factor this code better. We want to be able to substitute + different code for the experts themselves. + + Args: + inputs: a mtf.Tensor with shape [, length_dim, input_dim] + output_dim: a mtf.Dimension (for Transformer, this is input_dim) + hparams: model hyperparameters + train: a boolean + master_dtype: a tf.dtype + slice_dtype: a tf.dtype + + Returns: + outputs: a Tensor with shape [, length_dim, output_dim] + loss: a mtf scalar + + Raises: + ValueError: on unrecognized hparams.moe_gating + """ + orig_inputs = inputs + input_dim = inputs.shape.dims[-1] + hidden_dim = mtf.Dimension("expert_hidden", hparams.moe_hidden_size) + experts_dim = mtf.Dimension("experts", hparams.moe_num_experts) + group_size_dim = mtf.Dimension("group", hparams.moe_group_size) + batch_dim = mtf.Dimension( + orig_inputs.shape[0].name, + orig_inputs.shape.size // (group_size_dim.size * input_dim.size)) + inputs = mtf.reshape(inputs, [batch_dim, group_size_dim, input_dim]) + + # Each sequence sends expert_capacity positions to each expert. + capacity_factor = ( + hparams.moe_capacity_factor_train if train else + hparams.moe_capacity_factor_eval) + expert_capacity = min( + group_size_dim.size, + int((group_size_dim.size * capacity_factor) / experts_dim.size)) + expert_capacity_dim = mtf.Dimension("expert_capacity", expert_capacity) + + experts_dim_unsplit = mtf.Dimension("expert_unsplit", experts_dim.size) + batch_dim_unsplit = mtf.Dimension("batch_unsplit", batch_dim.size) + + if hparams.moe_gating == "top_2": + dispatch_tensor, combine_tensor, loss = _top_2_gating( + inputs=inputs, + outer_expert_dims=None, + experts_dim=experts_dim_unsplit, + expert_capacity_dim=expert_capacity_dim, + hparams=hparams, + train=train) + else: + raise ValueError("unknown hparams.moe_gating=%s" % hparams.moe_gating) + + # put num_experts dimension first to make split easier in alltoall + expert_inputs = mtf.einsum([inputs, dispatch_tensor], mtf.Shape( + [experts_dim_unsplit, batch_dim, expert_capacity_dim, input_dim])) + + expert_inputs = mtf.reshape(expert_inputs, mtf.Shape( + [experts_dim, batch_dim_unsplit, expert_capacity_dim, input_dim])) + + # Now feed the expert inputs through the experts. + h = mtf.layers.dense( + expert_inputs, hidden_dim, expert_dims=[experts_dim], + activation=mtf.relu, use_bias=False, master_dtype=master_dtype, + slice_dtype=slice_dtype, name="x0") + expert_output = mtf.layers.dense( + h, output_dim, expert_dims=[experts_dim], use_bias=False, + master_dtype=master_dtype, slice_dtype=slice_dtype, name="x1") + + expert_output = mtf.reshape(expert_output, mtf.Shape( + [experts_dim_unsplit, batch_dim, expert_capacity_dim, input_dim])) + + output = mtf.einsum([expert_output, combine_tensor], mtf.Shape( + [batch_dim, group_size_dim, output_dim])) + + output = mtf.reshape(output, orig_inputs.shape.dims[:-1] + [output_dim]) + + return output, loss * hparams.moe_loss_coef + + +def transformer_moe_layer_v2(inputs, output_dim, hparams, train, + master_dtype=tf.bfloat16, slice_dtype=tf.float32): + """2-level mixture of experts. + + Adapted from the paper https://arxiv.org/abs/1701.06538 + + Note: until the algorithm and inferface solidify, we pass in a hyperparameters + dictionary in order not to complicate the interface in mtf_transformer.py . + Once this code moves out of "research", we should pass the hyperparameters + separately. + + Hyperparameters used: + hparams.moe_num_experts: number of experts + hparams.moe_hidden_size: size of hidden layer in each expert + hparams.moe_group_size: size of each "group" for gating purposes + hparams.moe_capacity_factor_train: a float + hparams.moe_capacity_factor_eval: a float + hparams.moe_capacity_factor_second_level: a float + hparams.moe_gating: a string + + all hyperparmeters used by _top_2_gating() + + One set of params for experts in first level and different of hparams + per expert in the second level. + The number of parameters in the gating network is: + (input_dim.size * (hparams.num_experts) + + (moe_hidden_size * hparams.num_experts) * hparams.num_experts + + + The number of parameters in the experts themselves is: + (hparams.num_experts + * (input_dim.size + output_dim.size) + * hparams.moe_hidden_size) + + The input is n-dimensional: [, input_dim], consisting + of the representations of all positions in a batch of sequences. + + Each position of each sequence is sent to 0-3 experts. The expert + choices and the combination weights are determined by a learned gating + function. + + This function returns a small auxiliary loss that should be added to the + training loss of the model. This loss helps to balance expert usage. + Without the loss, it is very likely that a few experts will be trained and + the rest will starve. + + Several hacks are necessary to get around current TPU limitations: + + - To ensure static shapes, we enforce (by truncation/padding) + that each sequence send the same number of elements to each expert. + + It would make more sense to enforce this equality over the entire batch, + but due to our hacked-up gather-by-matmul implementation, we need to divide + the batch into "groups". For each group, the same number of elements + are sent to each expert. + + TODO(noam): Factor this code better. We want to be able to substitute + different code for the experts themselves. + + Dimensions cheat sheet: + a, b: batch size + l: original sequence length + m: input depth + n: output depth + g, h: number of groups + s, t: group size + x, y: number of experts + c, d: expert capacity + + input: [a0, b1, l, m] + input: [a0, g1, s, m] + dispatch_tensor_x: [a0, g1, s, x, c] + expert_input: [a0, g1, x, c, m] + alltoall: [a0, g, x1, c, m] + alltoall: [a0, g, x1, c, m] + transpose: [x1, a0, g, c, m] + reshape: [x1, h0, s, m] + assignment2: [x1, h0, t, y, d] + expert_input2: [x1, h0, y, d, m] + alltoall: [x1, h, y0, d, m] + ... + reverse of that + + gating params 0: [m, x] + gating params 1: [x1, m, y] + + expert params: + [x1, y0, m, hidden] + [x1, y0, hidden, n] + + Args: + inputs: a mtf.Tensor with shape [a, b, l, m] + output_dim: a mtf.Dimension (for Transformer, this is input_dim) + hparams: model hyperparameters + train: a boolean + master_dtype: a tf.dtype + slice_dtype: a tf.dtype + + Returns: + outputs: a Tensor with shape [a, b, l, n] + loss: a mtf scalar + + Raises: + ValueError: on unrecognized hparams.moe_gating + """ + insert_outer_batch_dim = (len(inputs.shape.dims) == 3) + if insert_outer_batch_dim: + inputs = mtf.reshape( + inputs, [mtf.Dimension("outer_batch", 1)] + inputs.shape.dims) + + assert len(hparams.moe_num_experts) == 2 + a0, b1, l, m = inputs.shape.dims + hidden_dim = mtf.Dimension("expert_hidden", hparams.moe_hidden_size) + x1 = mtf.Dimension("expert_x", hparams.moe_num_experts[0]) + y0 = mtf.Dimension("expert_y", hparams.moe_num_experts[1]) + x = mtf.Dimension("expert_x_unsplit", hparams.moe_num_experts[0]) + y = mtf.Dimension("expert_y_unsplit", hparams.moe_num_experts[1]) + n = output_dim + + # We "cheat" here and look at the mesh shape and layout. This is to ensure + # that the number of groups (g.size) is a multiple of the mesh dimension + # over which those groups are split. + num_groups, group_size = _split_into_groups( + b1.size * l.size, hparams.moe_group_size, + mtf.tensor_dim_to_mesh_dim_size(hparams.layout, hparams.mesh_shape, b1)) + g1 = mtf.Dimension(b1.name, num_groups) + g = mtf.Dimension(b1.name + "_unsplit", g1.size) + s = mtf.Dimension("group_size_x", group_size) + + # Each sequence sends (at most?) expert_capacity positions to each expert. + # Static expert_capacity dimension is needed for expert batch sizes + capacity_factor = ( + hparams.moe_capacity_factor_train if train else + hparams.moe_capacity_factor_eval) + expert_capacity = min(s.size, int((s.size * capacity_factor) / x.size)) + expert_capacity = max(expert_capacity, 4) + c = mtf.Dimension("expert_capacity_x", expert_capacity) + + # We "cheat" here and look at the mesh shape and layout. This is to ensure + # that the number of groups (h.size) is a multiple of the mesh dimension + # over which those groups are split. + num_groups, group_size = _split_into_groups( + a0.size * g.size * c.size, + hparams.moe_group_size, + mtf.tensor_dim_to_mesh_dim_size(hparams.layout, hparams.mesh_shape, a0)) + t = mtf.Dimension("group_size_y", group_size) + h0 = mtf.Dimension(a0.name, num_groups) + h = mtf.Dimension(a0.name + "_unsplit", h0.size) + + expert_capacity = min( + t.size, + int((t.size * hparams.moe_capacity_factor_second_level) / y.size)) + expert_capacity = max(expert_capacity, 4) + d = mtf.Dimension("expert_capacity_y", expert_capacity) + + # First level of expert routing + # Reshape the inner batch size to a multiple of group_dim g1 and + # group_size_dim s. + inputs = mtf.reshape(inputs, [a0, g1, s, m]) + + # Get the assignments for the first level. + # dispatch_tensor_x has shape [a0, g1, s, x, c] + if hparams.moe_gating == "top_2": + dispatch_tensor_x, combine_tensor_x, loss_outer = _top_2_gating( + inputs=inputs, + outer_expert_dims=None, + experts_dim=x, + expert_capacity_dim=c, + hparams=hparams, + train=train) + else: + raise ValueError("unknown hparams.moe_gating=%s" % hparams.moe_gating) + + # Now create expert_inputs based on the assignments. + # put num_experts dimension first to make split easier in alltoall + expert_inputs_x = mtf.einsum([inputs, dispatch_tensor_x], [x, a0, g1, c, m]) + + # we construct an "importance" Tensor for the inputs to the second-level + # gating. The importance of an input is 1.0 if it represents the + # first-choice expert-group and 0.5 if it represents the second-choice expert + # group. This is used by the second-level gating. + importance = mtf.reduce_sum(combine_tensor_x, output_shape=[x, a0, g1, c]) + importance = 0.5 * ( + mtf.to_float(mtf.greater(importance, 0.5)) + + mtf.to_float(mtf.greater(importance, 0.0))) + + # First level, all to all. Here we change the split dimension from g1 to x1. + expert_inputs_x = mtf.reshape(expert_inputs_x, mtf.Shape( + [x1, a0, g, c, m])) + importance = mtf.reshape(importance, [x1, a0, g, c]) + + # Second level of expert routing + # Reshape the expert_inputs outer batch dim to be a multiple of group_dim h0 + # and group_size_dim t. + inputs_y = mtf.reshape(expert_inputs_x, [x1, h0, t, m]) + importance = mtf.reshape(importance, [x1, h0, t]) + + # Get the assignments for the second level. + # dispatch_tensor_y has shape [x1, h0, t, y, d] + if hparams.moe_gating == "top_2": + dispatch_tensor_y, combine_tensor_y, loss_inner = _top_2_gating( + inputs=inputs_y, + outer_expert_dims=[x1], + experts_dim=y, + expert_capacity_dim=d, + hparams=hparams, + train=train, + importance=importance) + else: + raise ValueError("unknown hparams.moe_gating=%s" % hparams.moe_gating) + + # Now create expert_inputs based on the assignments. + # put num_experts dimension first to make split easier in alltoall + expert_inputs_y = mtf.einsum([inputs_y, dispatch_tensor_y], [y, x1, h0, d, m]) + + # Second level, all to all. Here we change the split dimension from h0 to y0. + expert_inputs_y = mtf.reshape(expert_inputs_y, mtf.Shape( + [y0, x1, h, d, m])) + + hidden_output = mtf.layers.dense( + expert_inputs_y, hidden_dim, expert_dims=[y0, x1], + activation=mtf.relu, use_bias=False, master_dtype=master_dtype, + slice_dtype=slice_dtype, name="expert0") + expert_output = mtf.layers.dense( + hidden_output, output_dim, expert_dims=[y0, x1], + use_bias=False, master_dtype=master_dtype, slice_dtype=slice_dtype, + name="expert1") + + # NOW COMBINE EXPERT OUTPUTS (reversing everything we have done) + # expert_output has shape [y0, x1, h, d, n] + + # alltoall + expert_output = mtf.reshape(expert_output, mtf.Shape( + [y, x1, h0, d, n])) + + # combine results from inner level + output_y = mtf.einsum([expert_output, combine_tensor_y], [x1, h0, t, n]) + + # Reshape the combined tensor from inner level to now contain outer_batch_dim + # a0 and group_dim g + output = mtf.reshape(output_y, [x1, a0, g, c, n]) + + # alltoall from expert_dim x to group_dim g1 + expert_output_x = mtf.reshape(output, mtf.Shape([x, a0, g1, c, n])) + + # combine results from outer level + output_x = mtf.einsum([expert_output_x, combine_tensor_x], [a0, g1, s, n]) + + # Reshape the combined tensor to now contain inner_batch_dim + # b1 and the original sequence length + output = mtf.reshape(output_x, [a0, b1, l, n]) + if insert_outer_batch_dim: + output = mtf.reshape(output, [b1, l, n]) + return output, (loss_outer + loss_inner) * hparams.moe_loss_coef + + +def _top_2_gating( + inputs, outer_expert_dims, experts_dim, expert_capacity_dim, + hparams, train, importance=None): + """Compute gating for mixture-of-experts in TensorFlow. + + Note: until the algorithm and inferface solidify, we pass in a hyperparameters + dictionary in order not to complicate the interface in mtf_transformer.py . + Once this code moves out of "research", we should pass the hyperparameters + separately. + + Hyperparameters used: + hparams.moe_use_second_place_loss: a boolean + hparams.moe_second_policy_train: a string + hparams.moe_second_policy_eval: a string + hparams.moe_second_threshold: a float + + The returned forward assignment is a tensor used to map (via einsum) from the + inputs to the expert_inputs. Likewise, the returned combine_tensor is + used to map (via einsum) from the expert outputs to the outputs. Both the + forward and backward assignments are mostly zeros. The shapes of the tensors + are as follows. + + inputs: [, group_size_dim, input_dim] + importance: [, group_size_dim] + dispatch_tensor: + [, group_size_dim, experts_dim, expert_capacity_dim] + expert_inputs: + [, experts_dim, expert_capacity_dim, input_dim] + + expert_outputs: [, experts_dim, expert_capacity_dim, output_dim] + combine_tensor: + [, group_size_dim, experts_dim, expert_capacity_dim] + outputs: [, group_size_dim, output_dim] + + "importance" is an optional tensor with one floating-point value for each + input vector. If the importance of an input is 1.0, then we send it to + up to 2 experts. If 0.0 < importance < 1.0, then we send it to at most + one expert. If importance == 0.0, then we send it to no experts. + + We use "importance" at the second-level gating function of a hierarchical + mixture of experts. Inputs to the first-choice expert-group get importance + 1.0. Inputs to the second-choice expert group get importance 0.5. + Inputs that represent padding get importance 0.0. + + Args: + inputs: a mtf.Tensor with shape [, group_size_dim, input_dim] + outer_expert_dims: an optional list of dimensions. This is for the case + where we are at an inner level of a hierarchical MoE. + experts_dim: a Dimension (the number of experts) + expert_capacity_dim: a Dimension (number of examples per group per expert) + hparams: model hyperparameters. + train: a boolean + importance: an optional tensor with shape [, group_size_dim] + + Returns: + dispatch_tensor: a Tensor with shape + [, group_size_dim, experts_dim, expert_capacity_dim] + combine_tensor: a Tensor with shape + [, group_size_dim, experts_dim, expert_capacity_dim] + loss: a mtf scalar + + Raises: + ValueError: on illegal hyperparameters + """ + group_size_dim, unused_input_dim = inputs.shape.dims[-2:] + + raw_gates = mtf.softmax(mtf.layers.dense( + inputs, experts_dim, use_bias=False, + expert_dims=outer_expert_dims), experts_dim) + + # The internals of this function run in float32. + # bfloat16 seems to reduce quality. + raw_gates = mtf.to_float(raw_gates) + + expert_capacity_f = float(expert_capacity_dim.size) + + # FIND TOP 2 EXPERTS PER POSITON + # Find the top expert for each position. shape=[batch, group] + index_1, gate_1 = mtf.top_1(raw_gates, experts_dim) + # [batch, group, experts] + mask_1 = mtf.one_hot(index_1, experts_dim, dtype=raw_gates.dtype) + density_1_proxy = raw_gates + if importance is not None: + mask_1 *= mtf.to_float(mtf.equal(importance, 1.0)) + gate_1 *= mtf.to_float(mtf.equal(importance, 1.0)) + density_1_proxy *= mtf.to_float(mtf.equal(importance, 1.0)) + gates_without_top_1 = raw_gates * (1.0 - mask_1) + # [batch, group] + index_2, gate_2 = mtf.top_1(gates_without_top_1, experts_dim) + # [batch, group, experts] + mask_2 = mtf.one_hot(index_2, experts_dim, dtype=raw_gates.dtype) + if importance is not None: + mask_2 *= mtf.to_float(mtf.greater(importance, 0.0)) + + denom = gate_1 + gate_2 + 1e-9 + gate_1 /= denom + gate_2 /= denom + + # BALANCING LOSSES + # shape = [batch, experts] + # We want to equalize the fraction of the batch assigned to each expert + density_1 = mtf.reduce_mean(mask_1, reduced_dim=group_size_dim) + # Something continuous that is correlated with what we want to equalize. + density_1_proxy = mtf.reduce_mean(density_1_proxy, reduced_dim=group_size_dim) + density_1 = mtf.Print( + density_1, [mtf.reduce_mean(density_1, output_shape=[experts_dim])], + "density_1", summarize=1000) + loss = (mtf.reduce_mean(density_1_proxy * density_1) + * float(experts_dim.size * experts_dim.size)) + + if hparams.moe_use_second_place_loss: + # Also add a loss to encourage all experts to be used equally also as the + # second-place expert. Experimentally, this seems to be a wash. + # We want to equalize the fraction of the batch assigned to each expert: + density_2 = mtf.reduce_mean(mask_2, reduced_dim=group_size_dim) + # As a proxy for density_2, we renormalize the raw gates after the top one + # has been removed. + normalized = gates_without_top_1 / ( + mtf.reduce_sum(gates_without_top_1, reduced_dim=experts_dim) + 1e-9) + density_2_proxy = mtf.reduce_mean(normalized, reduced_dim=group_size_dim) + loss_2 = (mtf.reduce_mean(density_2_proxy * density_2) + * float(experts_dim.size * experts_dim.size)) + loss += loss_2 * 0.5 + + # Depending on the policy in the hparams, we may drop out some of the + # second-place experts. + policy = ( + hparams.moe_second_policy_train if train else + hparams.moe_second_policy_eval) + threshold = ( + hparams.moe_second_threshold_train if train else + hparams.moe_second_threshold_eval) + if policy == "all": + # Use second-place experts for all examples. + pass + elif policy == "none": + # Never use second-place experts for all examples. + mask_2 = mtf.zeros_like(mask_2) + elif policy == "threshold": + # Use second-place experts if gate_2 > threshold. + mask_2 *= mtf.to_float(mtf.greater(gate_2, threshold)) + elif policy == "random": + # Use second-place experts with probablity min(1.0, gate_2 / threshold). + mask_2 *= mtf.to_float( + mtf.less(mtf.random_uniform(gate_2.mesh, gate_2.shape), + gate_2 / max(threshold, 1e-9))) + else: + raise ValueError("Unknown policy %s" % policy) + mask_2 = mtf.Print( + mask_2, [mtf.reduce_mean(mask_2, output_shape=[experts_dim])], + "density_2", summarize=1000) + + # COMPUTE ASSIGNMENT TO EXPERTS + # [batch, group, experts] + # This is the position within the expert's mini-batch for this sequence + position_in_expert_1 = mtf.cumsum( + mask_1, group_size_dim, exclusive=True) * mask_1 + # Remove the elements that don't fit. [batch, group, experts] + mask_1 *= mtf.to_float(mtf.less(position_in_expert_1, expert_capacity_f)) + # [batch, experts] + # How many examples in this sequence go to this expert + mask_1_count = mtf.reduce_sum(mask_1, reduced_dim=group_size_dim) + # [batch, group] - mostly ones, but zeros where something didn't fit + mask_1_flat = mtf.reduce_sum(mask_1, reduced_dim=experts_dim) + # [batch, group] + position_in_expert_1 = mtf.reduce_sum( + position_in_expert_1, reduced_dim=experts_dim) + # Weight assigned to first expert. [batch, group] + gate_1 *= mask_1_flat + + # [batch, group, experts] + position_in_expert_2 = ( + mtf.cumsum(mask_2, group_size_dim, exclusive=True) + mask_1_count) + position_in_expert_2 *= mask_2 + mask_2 *= mtf.to_float(mtf.less(position_in_expert_2, expert_capacity_f)) + # mask_2_count = mtf.reduce_sum(mask_2, reduced_dim=experts_dim) + mask_2_flat = mtf.reduce_sum(mask_2, reduced_dim=experts_dim) + gate_2 *= mask_2_flat + position_in_expert_2 = mtf.reduce_sum( + position_in_expert_2, reduced_dim=experts_dim) + + # [batch, group, experts, expert_capacity] + combine_tensor = ( + gate_1 * mask_1_flat + * mtf.one_hot(index_1, experts_dim) + * mtf.one_hot(mtf.to_int32(position_in_expert_1), expert_capacity_dim) + + gate_2 * mask_2_flat + * mtf.one_hot(index_2, experts_dim) + * mtf.one_hot(mtf.to_int32(position_in_expert_2), expert_capacity_dim)) + + combine_tensor = mtf.cast(combine_tensor, inputs.dtype) + loss = mtf.cast(loss, inputs.dtype) + + dispatch_tensor = mtf.cast( + mtf.cast(combine_tensor, tf.bool), combine_tensor.dtype) + + return dispatch_tensor, combine_tensor, loss + + +def set_default_moe_hparams(hparams): + """Add necessary hyperparameters for mixture-of-experts.""" + hparams.moe_num_experts = 16 + hparams.moe_loss_coef = 1e-2 + hparams.add_hparam("moe_gating", "top_2") + # Experts have fixed capacity per batch. We need some extra capacity + # in case gating is not perfectly balanced. + # moe_capacity_factor_* should be set to a value >=1. + hparams.add_hparam("moe_capacity_factor_train", 1.25) + hparams.add_hparam("moe_capacity_factor_eval", 2.0) + hparams.add_hparam("moe_capacity_factor_second_level", 1.0) + # Each expert has a hidden layer with this size. + hparams.add_hparam("moe_hidden_size", 4096) + # For gating, divide inputs into groups of this size before gating. + # Each group sends the same number of inputs to each expert. + # Ideally, the group size would be the whole batch, but this is expensive + # due to our use of matrix multiplication for reordering. + hparams.add_hparam("moe_group_size", 1024) + # For top_2 gating, whether to impose an additional loss in order to make + # the experts equally used as the second-place expert. + hparams.add_hparam("moe_use_second_place_loss", 0) + # In top_2 gating, policy for whether to use a second-place expert. + # Legal values are: + # "all": always + # "none": never + # "threshold": if gate value > the given threshold + # "random": if gate value > threshold*random_uniform(0,1) + hparams.add_hparam("moe_second_policy_train", "random") + hparams.add_hparam("moe_second_policy_eval", "random") + hparams.add_hparam("moe_second_threshold_train", 0.2) + hparams.add_hparam("moe_second_threshold_eval", 0.2) + + +def _split_into_groups(n, max_group_size, mesh_dim_size): + """Helper function for figuring out how to split a dimensino into groups. + + We have a dimension with size n and we want to split it into + two dimensions: n = num_groups * group_size + + group_size should be the largest possible value meeting the constraints: + group_size <= max_group_size + (num_groups = n/group_size) is a multiple of mesh_dim_size + + Args: + n: an integer + max_group_size: an integer + mesh_dim_size: an integer + + Returns: + num_groups: an integer + group_size: an integer + + Raises: + ValueError: if n is not a multiple of mesh_dim_size + """ + if n % mesh_dim_size != 0: + raise ValueError( + "n=%d is not a multiple of mesh_dim_size=%d" % (n, mesh_dim_size)) + num_groups = max(1, n // max_group_size) + while (num_groups % mesh_dim_size != 0 or n % num_groups != 0): + num_groups += 1 + group_size = n // num_groups + tf.logging.info( + "_split_into_groups(n=%d, max_group_size=%d, mesh_dim_size=%d)" + " = (num_groups=%d group_size=%d)" % + (n, max_group_size, mesh_dim_size, num_groups, group_size)) + return num_groups, group_size diff --git a/tensor2tensor/models/research/moe_experiments.py b/tensor2tensor/models/research/moe_experiments.py new file mode 100644 index 000000000..73f3f4f50 --- /dev/null +++ b/tensor2tensor/models/research/moe_experiments.py @@ -0,0 +1,554 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Languaeg modeling experiments in mtf.""" + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.models import mtf_transformer +from tensor2tensor.models import mtf_transformer2 +from tensor2tensor.models.research import moe +from tensor2tensor.utils import registry + + +@registry.register_hparams +def xmoe_tr_dense_2k(): + """Series of architectural experiments on Translation. + + # run on 8-core setup + + 119M params, einsum=0.95e13 + + Returns: + a hparams + """ + hparams = mtf_transformer2.mtf_bitransformer_base() + hparams.encoder_layers = ["self_att", "drd"] * 4 + hparams.decoder_layers = ["self_att", "enc_att", "drd"] * 4 + hparams.batch_size = 64 + hparams.shared_embedding_and_softmax_weights = True + hparams.mesh_shape = "batch:8" + return hparams + + +@registry.register_hparams +def xmoe_tr_dense_32k(): + """Bigger d_ff. + + 623M params, einsum=3.42e13 + + Returns: + a hparams + """ + hparams = xmoe_tr_dense_2k() + hparams.d_ff = 32768 + return hparams + + +@registry.register_hparams +def xmoe_tr_1d(): + """Mixture of experts (16 experts). + + + 623M Params, einsum=1.09e13 + + Returns: + a hparams + """ + hparams = xmoe_tr_dense_2k() + hparams.encoder_layers = ["self_att", "moe_1d"] * 4 + hparams.decoder_layers = ["self_att", "enc_att", "moe_1d"] * 4 + hparams.layout = "batch:batch;experts:batch" + hparams.moe_hidden_size = 2048 + hparams.moe_num_experts = 16 + return hparams + + +@registry.register_hparams +def xmoe_tr_2d(): + """Mixture of experts (16 experts). + + 623M Params, einsum=1.09e13 + + Returns: + a hparams + """ + hparams = xmoe_tr_dense_2k() + hparams.mesh_shape = "b0:2;b1:4" + hparams.outer_batch_size = 4 + hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0" + hparams.encoder_layers = ["self_att", "moe_2d"] * 4 + hparams.decoder_layers = ["self_att", "enc_att", "moe_2d"] * 4 + hparams.moe_hidden_size = 2048 + hparams.moe_experts_x = 4 + hparams.moe_experts_y = 4 + return hparams + + +@registry.register_hparams +def xmoe_dense_4k(): + """Series of architectural experiments on cheap language models. + + For all of these architectures, we run on languagemodel_lm1b8k_packed + for 32000 steps. + + All log-perplexities are per-token - multiply by 1.298 for per-word + + Results: + model params(M) einsum alltoall mxu-util log-ppl + xmoe_dense_4k 30 3.0e12 0 45% 3.31 + xmoe_dense_8k 46 4.7e12 0 49% 3.24 + xmoe_dense_64k 282 2.8e13 0 3.06 + xmoe_top_2 282 4.0e12 3.4e8 36% 3.07 + xmoe_top_2_c15 282 4.5e12 4.0e8 38% 3.07 + xmoe_2d 282 5.3e12 7.6e8 34% 3.06 + + Trained at 4x the batch size: + xmoe_2d_88 1090 2.1e13 3.0e9 24% 3.07 + + Note: configurations and code are likely to change without notice. + + Returns: + a hparams + """ + hparams = mtf_transformer.mtf_transformer_base_lm() + hparams.attention_dropout = 0.0 + hparams.relu_dropout = 0.0 + hparams.layer_prepostprocess_dropout = 0.0 + + # The following hparams are constant across all these experiments. + hparams.batch_size = 128 + hparams.d_model = 512 + hparams.d_kv = 128 + hparams.num_heads = 4 + hparams.decoder_layers = ["att", "drd"] * 4 + hparams.shared_embedding_and_softmax_weights = False + hparams.learning_rate_schedule = "rsqrt_decay" + + # We will vary the following parameters related to the ffn/moe layers. + hparams.d_ff = 4096 + hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model" + hparams.mesh_shape = "batch:8" + return hparams + + +@registry.register_hparams +def xmoe_dense_8k(): + hparams = xmoe_dense_4k() + hparams.d_ff = 8192 + return hparams + + +@registry.register_hparams +def xmoe_dense_64k(): + """Very wide layer- run on 4x4.""" + hparams = xmoe_dense_4k() + hparams.d_ff = 65536 + hparams.mesh_shape = "model:4,batch:8" + return hparams + + +@registry.register_hparams +def xmoe_top_2(): + """Mixture of experts (16 experts).""" + hparams = xmoe_dense_4k() + moe.set_default_moe_hparams(hparams) + hparams.mesh_shape = "all:8" + hparams.layout = "batch:all;experts:all" + return hparams + + +@registry.register_hparams +def xmoe_top_2_c15(): + """Mixture of experts.""" + hparams = xmoe_top_2() + hparams.moe_capacity_factor_train = 1.5 + return hparams + + +@registry.register_hparams +def xmoe_2d(): + """Two-dimensional hierarchical mixture of 16 experts.""" + hparams = xmoe_top_2() + hparams.decoder_layers = ["att", "hmoe"] * 4 + hparams.mesh_shape = "b0:2;b1:4" + hparams.outer_batch_size = 4 + hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0" + hparams.moe_num_experts = [4, 4] + return hparams + + +@registry.register_hparams +def xmoe_2d_debug(): + """For debugging. + + Running this model on TPU without the hack of casting to bfloat16 for + alltoall results in nan on the first step. + TODO(noam): debug + + Returns: + a hparams + """ + hparams = xmoe_2d() + hparams.decoder_layers = ["hmoe"] * 1 + hparams.activation_dtype = "float32" + return hparams + + +@registry.register_hparams +def xmoe_2d_c15(): + """Mixture of experts.""" + hparams = xmoe_2d() + hparams.moe_capacity_factor_train = 1.5 + return hparams + + +@registry.register_hparams +def xmoe_2d_x64(): + """Two-dimensional hierarchical mixture of 64 experts.""" + hparams = xmoe_2d() + # hparams.mesh_shape = "b0:4;b1:8" + hparams.outer_batch_size = 4 + hparams.moe_num_experts = [8, 8] + return hparams + + +@registry.register_hparams +def xmoe2_dense(sz): + """Series of architectural experiments on language modeling. + + Larger models than the ones above. + + All models are trained on sequences of 1024 tokens. + + We assume infinite training data, so no dropout necessary. + We process 2^36 tokens in training = 524288 steps at batch size 128 + + TODO(noam): find a large enough dataset for these experiments. + + You can use languagemodel_wiki_noref_v32k_l1k, but this is too small, + (1 epoch = ~46000 steps) so training will cover about 11 epochs. + + Note: configurations and code are likely to change without notice. + + Run on TPU 4x4 for 524288 steps unless otherwise indicated. + + Args: + sz: an integer + + Returns: + a hparams + """ + hparams = mtf_transformer.mtf_transformer_paper_lm(sz) + hparams.attention_dropout = 0.0 + hparams.relu_dropout = 0.0 + hparams.layer_prepostprocess_dropout = 0.0 + hparams.max_length = 1024 + hparams.batch_size = 128 + hparams.learning_rate_schedule = "rsqrt_decay*linear_decay" + hparams.learning_rate_decay_steps = 65536 + hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model" + hparams.mesh_shape = "batch:32" + return hparams + + +@registry.register_hparams +def xmoe2_dense_0(): + return xmoe2_dense(0) + + +@registry.register_hparams +def xmoe2_dense_1(): + return xmoe2_dense(1) + + +@registry.register_hparams +def xmoe2_dense_2(): + return xmoe2_dense(2) + + +@registry.register_hparams +def xmoe2_dense_3(): + return xmoe2_dense(3) + + +@registry.register_hparams +def xmoe2_v1(): + """Model incorporating mixture-of-experts and local-attention. + + ~6B parameters + + 32 experts in 3 hierarchichal moe layers. + + Returns: + a hparams + """ + hparams = xmoe2_dense(0) + moe.set_default_moe_hparams(hparams) + hparams.decoder_layers = ( + ["local_att", "local_att", "drd", + "att", "drd", "local_att", "local_att", "hmoe"] * 4)[:-1] + hparams.d_ff = 2048 + hparams.d_kv = 128 + hparams.moe_hidden_size = 32768 + hparams.mesh_shape = "b0:4;b1:8" + hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0" + hparams.outer_batch_size = 4 + hparams.moe_num_experts = [8, 4] + hparams.num_heads = 4 + return hparams + + +@registry.register_hparams +def xmoe2_v1_x128(): + """128 experts, ~25B params - Train for 131072 steps on 8x8.""" + hparams = xmoe2_v1() + hparams.moe_num_experts = [16, 8] + hparams.outer_batch_size = 8 + hparams.mesh_shape = "b0:8;b1:16" + hparams.batch_size = 512 + hparams.learning_rate_decay_steps = 16384 + return hparams + + +@registry.register_hparams +def xmoe2_tiny(): + """Test on local cpu.""" + hparams = xmoe2_v1() + hparams.decoder_layers = [ + "local_att", "att", "compressed_att", "drd", "hmoe"] + hparams.d_model = 128 + hparams.moe_hidden_size = 512 + hparams.outer_batch_size = 0 + hparams.batch_size = 2 + hparams.mesh_shape = "" + hparams.activation_dtype = "float32" + return hparams + + +@registry.register_hparams +def xmoe2_v1_l4k(): + """With sequence length 4096.""" + hparams = xmoe2_v1() + hparams.batch_size = 32 + hparams.max_length = 4096 + hparams.split_to_length = 4096 + hparams.reshape_logits_hack = True + return hparams + + +@registry.register_hparams +def xmoe2_v1_l4k_local_only(): + """With sequence length 4096.""" + hparams = xmoe2_v1_l4k() + hparams.decoder_layers = [ + "local_att" if l == "att" else l for l in hparams.decoder_layers] + return hparams + + +@registry.register_hparams +def xmoe2_v1_l4k_global_only(): + """With sequence length 4096.""" + hparams = xmoe2_v1_l4k() + hparams.decoder_layers = [ + "att" if l == "local_att" else l for l in hparams.decoder_layers] + return hparams + + +@registry.register_hparams +def xmoe2_v1_l4k_compressed_c4(): + """With compressed attention.""" + hparams = xmoe2_v1_l4k() + hparams.decoder_layers = [ + "compressed_att" if l == "att" else l for l in hparams.decoder_layers] + hparams.compression_factor = 4 + return hparams + + +@registry.register_hparams +def xmoe2_v1_l4k_compressed_c8(): + """With compressed attention.""" + hparams = xmoe2_v1_l4k_compressed_c4() + hparams.compression_factor = 8 + return hparams + + +@registry.register_hparams +def wiki_2x2_base(): + """Set of architectural experiments - language model on wikipedia on a 2x2. + + 1 epoch = ~180k steps at batch size 32 - we may never finish an epoch! + + Returns: + a hparams + """ + hparams = mtf_transformer.mtf_transformer_base_lm() + hparams.shared_embedding_and_softmax_weights = False + # no dropout - dataset is big enough to avoid overfitting. + hparams.attention_dropout = 0.0 + hparams.relu_dropout = 0.0 + hparams.layer_prepostprocess_dropout = 0.0 + hparams.max_length = 1024 + # 4 sequences per core + hparams.batch_size = 32 + # We don't use linear decay in these experiments, since we don't want + # a sharp jump in quality at the end of the training schedule. + # You can insert this once you find the right architecture. + hparams.learning_rate_schedule = "rsqrt_decay" + hparams.mesh_shape = "all:8" + hparams.layout = "batch:all;experts:all" + + # parameters for mixture-of-experts + moe.set_default_moe_hparams(hparams) + hparams.moe_num_experts = 16 + hparams.moe_hidden_size = 8192 + + hparams.decoder_layers = ["att", "drd"] * 6 + hparams.d_model = 1024 + hparams.d_ff = 2048 + hparams.d_kv = 128 + hparams.num_heads = 4 + + return hparams + + +@registry.register_hparams +def wiki_2x2_v1(): + hparams = wiki_2x2_base() + hparams.decoder_layers = ( + ["local_att", "local_att", "drd", + "att", "drd", "local_att", "local_att", "moe"] * 4)[:-1] + return hparams + + +@registry.register_hparams +def wiki_2x2_local(): + hparams = wiki_2x2_base() + hparams.decoder_layers = ["local_att", "drd"] * 6 + return hparams + + +@registry.register_hparams +def denoise_m15(): + """Denoising experiment.""" + hparams = xmoe2_dense_0() + hparams.decoder_type = "denoising" + hparams.noising_spec_train = {"type": "mask", "prob": 0.15} + return hparams + + +@registry.register_hparams +def denoise_m30(): + """More masking during training.""" + hparams = xmoe2_dense_0() + hparams.decoder_type = "denoising" + hparams.noising_spec_train = {"type": "mask", "prob": 0.3} + return hparams + + +@registry.register_hparams +def denoise_dense_2_m30(): + """More masking during training.""" + hparams = xmoe2_dense_2() + hparams.decoder_type = "denoising" + hparams.noising_spec_train = {"type": "mask", "prob": 0.3} + return hparams + + +@registry.register_hparams +def denoise_z15(): + """Replace tokens instead of masking.""" + hparams = xmoe2_dense_0() + hparams.decoder_type = "denoising" + hparams.noising_spec_train = {"type": "random_zipfian", "prob": 0.15} + hparams.noising_use_eval_during_train = 0.25 + return hparams + + +@registry.register_hparams +def denoise_t15(): + """Noise up with dropout and a little transformer.""" + hparams = xmoe2_dense_0() + hparams.decoder_type = "denoising" + hparams.noising_spec_train = { + "type": "transformer", + "overrides": { + "noising_spec_train": {"type": "mask", "prob": 0.15}, + "noising_use_eval_during_train": 0.0, + "decoder_layers": ["att", "drd"] * 4, + "num_heads": 4, + "d_model": 512, + "d_ff": 2048, + } + } + return hparams + + +@registry.register_hparams +def denoise_v1_m15(): + """Denoising experiment.""" + hparams = xmoe2_v1() + # no local attention + # TODO(noam): non-masked version of local-attention + hparams.decoder_layers = [ + "att" if l == "local_att" else l for l in hparams.decoder_layers] + hparams.decoder_type = "denoising" + hparams.noising_spec_train = {"type": "mask", "prob": 0.15} + return hparams + + +@registry.register_hparams +def denoise_v1_m30(): + """More masking during training.""" + hparams = denoise_v1_m15() + hparams.noising_spec_train = {"type": "mask", "prob": 0.3} + return hparams + + +@registry.register_hparams +def denoise_v1_m50(): + """More masking during training.""" + hparams = denoise_v1_m15() + hparams.noising_spec_train = {"type": "mask", "prob": 0.5} + return hparams + + +@registry.register_hparams +def denoise_v1_z15(): + """Replace tokens instead of masking.""" + hparams = denoise_v1_m15() + hparams.noising_spec_train = {"type": "random_zipfian", "prob": 0.15} + return hparams + + +@registry.register_hparams +def denoise_v1_t15(): + """Noise up with dropout and a little transformer.""" + hparams = denoise_v1_m15() + hparams.noising_spec_train = { + "type": "transformer", + "overrides": { + "noising_spec_train": {"type": "mask", "prob": 0.15}, + "noising_use_eval_during_train": 0.0, + "decoder_layers": ["att", "drd"] * 4, + "num_heads": 4, + "d_model": 512, + "d_ff": 2048, + } + } + return hparams diff --git a/tensor2tensor/models/research/multiquery_paper.py b/tensor2tensor/models/research/multiquery_paper.py new file mode 100644 index 000000000..9157a5177 --- /dev/null +++ b/tensor2tensor/models/research/multiquery_paper.py @@ -0,0 +1,218 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Experiments for Multiquery-Attention Paper. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.models import mtf_transformer2 +from tensor2tensor.utils import registry + + +@registry.register_hparams +def mqp_ende_base(): + # params=211M + hparams = mtf_transformer2.mtr_tr_dense_0() + hparams.learning_rate_decay_steps = 20000 + hparams.shared_embedding_and_softmax_weights = True + hparams.layer_prepostprocess_dropout = 0.2 + return hparams + + +@registry.register_hparams +def mqp_ende_local(): + hparams = mqp_ende_base() + hparams.decoder_local_attention_radius = 32 + return hparams + + +@registry.register_hparams +def mqp_ende_mq8(): + # params=178M + hparams = mqp_ende_base() + hparams.decoder_num_heads = 8 + hparams.decoder_num_memory_heads = 1 + hparams.encoder_num_heads = 8 + hparams.encoder_num_memory_heads = 1 + return hparams + + +@registry.register_hparams +def mqp_ende_mq8_ff5440(): + # params=211M + hparams = mqp_ende_mq8() + hparams.d_ff = 5440 + return hparams + + +@registry.register_hparams +def mqp_ende_mq8_ff5440_local(): + hparams = mqp_ende_mq8_ff5440() + hparams.decoder_local_attention_radius = 32 + return hparams + + +@registry.register_hparams +def mqp_ende_h4_kv256(): + hparams = mqp_ende_base() + hparams.decoder_num_heads = 4 + hparams.encoder_num_heads = 4 + hparams.d_kv = 256 + return hparams + + +@registry.register_hparams +def mqp_ende_h2_kv512(): + hparams = mqp_ende_base() + hparams.decoder_num_heads = 2 + hparams.encoder_num_heads = 2 + hparams.d_kv = 512 + return hparams + + +@registry.register_hparams +def mqp_ende_h1_kv1024(): + hparams = mqp_ende_base() + hparams.decoder_num_heads = 1 + hparams.encoder_num_heads = 1 + hparams.d_kv = 1024 + return hparams + + +@registry.register_hparams +def mqp_ende_h4_ff5632(): + hparams = mqp_ende_base() + hparams.decoder_num_heads = 4 + hparams.encoder_num_heads = 4 + hparams.d_ff = 5632 + return hparams + + +@registry.register_hparams +def mqp_ende_h2_ff6400(): + hparams = mqp_ende_base() + hparams.decoder_num_heads = 2 + hparams.encoder_num_heads = 2 + hparams.d_ff = 6400 + return hparams + + +@registry.register_hparams +def mqp_ende_h1_ff6784(): + hparams = mqp_ende_base() + hparams.decoder_num_heads = 1 + hparams.encoder_num_heads = 1 + hparams.d_ff = 6784 + return hparams + + +@registry.register_hparams +def mqp_ende_h2_kv64_ff6784(): + hparams = mqp_ende_base() + hparams.decoder_num_heads = 2 + hparams.encoder_num_heads = 2 + hparams.d_kv = 64 + hparams.d_ff = 6784 + return hparams + + +@registry.register_hparams +def mqp_ende_h4_kv32_ff6784(): + hparams = mqp_ende_base() + hparams.decoder_num_heads = 4 + hparams.encoder_num_heads = 4 + hparams.d_kv = 32 + hparams.d_ff = 6784 + return hparams + + +@registry.register_hparams +def mqp_ende_h8_kv16_ff6784(): + hparams = mqp_ende_base() + hparams.decoder_num_heads = 8 + hparams.encoder_num_heads = 8 + hparams.d_kv = 16 + return hparams + + +@registry.register_hparams +def mqp_lm1b_base(): + """Series of architectures for language modeling.""" + hparams = mtf_transformer2.mtf_unitransformer_base() + hparams.d_model = 1024 + hparams.max_length = 256 + hparams.batch_size = 256 + # Parameters for my_layer_stack() + hparams.num_hidden_layers = 6 + hparams.d_ff = 8192 + hparams.d_kv = 128 + hparams.num_heads = 8 + hparams.learning_rate_decay_steps = 13600 + hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model" + hparams.mesh_shape = "batch:32" + return hparams + + +@registry.register_hparams +def mqp_lm1b_mq8(): + hparams = mqp_lm1b_base() + hparams.num_heads = 8 + hparams.num_memory_heads = 1 + return hparams + + +@registry.register_hparams +def mqp_lm1b_mq8_ff9088(): + hparams = mqp_lm1b_mq8() + hparams.d_ff = 9088 + return hparams + + +@registry.register_hparams +def mqp_lm1b_h1_ff9984(): + hparams = mqp_lm1b_base() + hparams.num_heads = 1 + hparams.d_ff = 9984 + return hparams + + +@registry.register_hparams +def mqp_lm1b_h2_kv64_ff9984(): + hparams = mqp_lm1b_base() + hparams.num_heads = 2 + hparams.d_kv = 64 + hparams.d_ff = 9984 + return hparams + + +@registry.register_hparams +def mqp_lm1b_h4_kv32_ff9984(): + hparams = mqp_lm1b_base() + hparams.num_heads = 4 + hparams.d_kv = 32 + hparams.d_ff = 9984 + return hparams + + +@registry.register_hparams +def mqp_lm1b_h8_kv16_ff9984(): + hparams = mqp_lm1b_base() + hparams.num_heads = 8 + hparams.d_kv = 16 + hparams.d_ff = 9984 + return hparams diff --git a/tensor2tensor/models/research/neural_stack.py b/tensor2tensor/models/research/neural_stack.py new file mode 100644 index 000000000..0c5464bd9 --- /dev/null +++ b/tensor2tensor/models/research/neural_stack.py @@ -0,0 +1,661 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Stacks and Queues implemented as encoder-decoder models. + +Based off of the following research: + +Learning to Transduce with Unbounded Memory +Edward Grefenstette, Karl Moritz Hermann, Mustafa Suleyman, Phil Blunsom +https://arxiv.org/abs/1506.02516, 2015 + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf + +# This is the interface between the RNN controller and the neural stack. +NeuralStackControllerInterface = collections.namedtuple( + "NeuralStackControllerInterface", + "push_strengths, pop_strengths, write_values, outputs, state") + +# This is recurrent state of the neural stack RNN cell. +NeuralStackState = collections.namedtuple( + "NeuralStackState", + "controller_state, read_values, memory_values, read_strengths, " + + "write_strengths") + + +class NeuralStackCell(tf.nn.rnn_cell.RNNCell): + """An RNN cell base class that can implement a stack or queue. + """ + + def __init__(self, num_units, memory_size, embedding_size, + num_read_heads=1, num_write_heads=1, reuse=None): + """Create a new NeuralStackCell. + + Args: + num_units: The number of hidden units in the RNN cell. + memory_size: The maximum memory size allocated for the stack. + embedding_size: The embedding width of the individual stack values. + num_read_heads: This should always be 1 for a regular stack. + num_write_heads: This should always be 1 for a regular stack. + reuse: Whether to reuse the weights. + """ + super(NeuralStackCell, self).__init__(dtype=tf.float32, _reuse=reuse) + self._num_units = num_units + self._embedding_size = embedding_size + self._memory_size = memory_size + self._num_read_heads = num_read_heads + self._num_write_heads = num_write_heads + + @property + def state_size(self): + """The NeuralStackCell maintains a tuple of state values. + + Returns: + (controller_state.shape, + read_values.shape, + memory_values.shape, + read_strengths.shape, + write_strengths.shape) + """ + return (tf.TensorShape([self._num_units]), + tf.TensorShape([self._num_read_heads, self._embedding_size]), + tf.TensorShape([self._memory_size, self._embedding_size]), + tf.TensorShape([1, self._memory_size, 1]), + tf.TensorShape([self._num_write_heads, self._memory_size, 1])) + + @property + def output_size(self): + return tf.TensorShape([1, self._embedding_size]) + + def initialize_write_strengths(self, batch_size): + """Initialize write strengths to write to the first memory address. + + This is exposed as its own function so that it can be overridden to provide + alternate write adressing schemes. + + Args: + batch_size: The size of the current batch. + + Returns: + A tf.float32 tensor of shape [num_write_heads, memory_size, 1] where the + first element in the second dimension is set to 1.0. + """ + return tf.expand_dims( + tf.one_hot([[0] * self._num_write_heads] * batch_size, + depth=self._memory_size, dtype=tf.float32), axis=3) + + def zero_state(self, batch_size, dtype): + """Initialize the tuple of state values to zeros except write strengths. + + Args: + batch_size: The size of the current batch. + dtype: The default datatype to initialize to. + + Returns: + A new NeuralStackState tuple. + """ + parent_state = NeuralStackState(*super(NeuralStackCell, self).zero_state( + batch_size, dtype)) + return NeuralStackState( + controller_state=parent_state.controller_state, + read_values=parent_state.read_values, + memory_values=parent_state.memory_values, + read_strengths=parent_state.read_strengths, + write_strengths=self.initialize_write_strengths(batch_size)) + + def get_read_mask(self, read_head_index): + """Creates a mask which allows us to attenuate subsequent read strengths. + + This is exposed as its own function so that it can be overridden to provide + alternate read adressing schemes. + + Args: + read_head_index: Identifies which read head we're getting the mask for. + + Returns: + A tf.float32 tensor of shape [1, 1, memory_size, memory_size] + """ + if read_head_index == 0: + return tf.expand_dims( + common_layers.mask_pos_lt(self._memory_size, self._memory_size), + axis=0) + else: + raise ValueError("Read head index must be 0 for stack.") + + def get_write_head_offset(self, write_head_index): + """Lookup the offset to shift the write head at each step. + + By default, we move each write head forward by 1. + + This is exposed as its own function so that it can be overridden to provide + alternate write adressing schemes. + + Args: + write_head_index: Identifies which write head we're getting the index for. + + Returns: + An integer offset to move the write head at each step. + """ + if write_head_index == 0: + return 1 + else: + raise ValueError("Write head index must be 0 for stack.") + + def add_scalar_projection(self, name, size): + """A helper function for mapping scalar controller outputs. + + Args: + name: A prefix for the variable names. + size: The desired number of scalar outputs. + + Returns: + A tuple of (weights, bias) where weights has shape [num_units, size] and + bias has shape [size]. + """ + weights = self.add_variable( + name + "_projection_weights", + shape=[self._num_units, size], + dtype=self.dtype) + bias = self.add_variable( + name + "_projection_bias", + shape=[size], + initializer=tf.zeros_initializer(dtype=self.dtype)) + return weights, bias + + def add_vector_projection(self, name, size): + """A helper function for mapping embedding controller outputs. + + Args: + name: A prefix for the variable names. + size: The desired number of embedding outputs. + + Returns: + A tuple of (weights, bias) where weights has shape + [num_units, size * embedding_size] and bias has shape + [size * embedding_size]. + """ + weights = self.add_variable( + name + "_projection_weights", + shape=[self._num_units, size * self._embedding_size], + dtype=self.dtype) + bias = self.add_variable( + name + "_projection_bias", + shape=[size * self._embedding_size], + initializer=tf.zeros_initializer(dtype=self.dtype)) + return weights, bias + + def build_controller(self): + """Create the RNN and output projections for controlling the stack. + """ + with tf.name_scope("controller"): + self.rnn = contrib.rnn().BasicRNNCell(self._num_units) + self._input_proj = self.add_variable( + "input_projection_weights", + shape=[self._embedding_size * (self._num_read_heads + 1), + self._num_units], + dtype=self.dtype) + self._input_bias = self.add_variable( + "input_projection_bias", + shape=[self._num_units], + initializer=tf.zeros_initializer(dtype=self.dtype)) + self._push_proj, self._push_bias = self.add_scalar_projection( + "push", self._num_write_heads) + self._pop_proj, self._pop_bias = self.add_scalar_projection( + "pop", self._num_write_heads) + self._value_proj, self._value_bias = self.add_vector_projection( + "value", self._num_write_heads) + self._output_proj, self._output_bias = self.add_vector_projection( + "output", 1) + + def build(self, _): + """Build the controller. + """ + self.build_controller() + self.built = True + + def get_controller_shape(self, batch_size): + """Define the output shapes of the neural stack controller. + + Making this a separate functions so that it can be used in unit tests. + + Args: + batch_size: The size of the current batch of data. + + Returns: + A tuple of shapes for each output returned from the controller. + """ + return ( + # push_strengths, + [batch_size, self._num_write_heads, 1, 1], + # pop_strengths + [batch_size, self._num_write_heads, 1, 1], + # write_values + [batch_size, self._num_write_heads, self._embedding_size], + # outputs + [batch_size, 1, self._embedding_size], + # state + [batch_size, self._num_units]) + + def call_controller(self, input_value, read_values, prev_state, batch_size): + """Make a call to the neural stack controller. + + See Section 3.1 of Grefenstette et al., 2015. + + Args: + input_value: The input to the neural stack cell should be a tf.float32 + tensor with shape [batch_size, 1, embedding_size] + read_values: The values of the read heads at the previous timestep. + prev_state: The hidden state from the previous time step. + batch_size: The size of the current batch of input values. + + Returns: + A tuple of outputs and the new NeuralStackControllerInterface. + """ + with tf.name_scope("controller"): + # Concatenate the current input value with the read values from the + # previous timestep before feeding them into the controller. + controller_inputs = tf.concat([ + contrib.layers().flatten(input_value), + contrib.layers().flatten(read_values), + ], + axis=1) + + rnn_input = tf.tanh(tf.nn.bias_add(tf.matmul( + controller_inputs, self._input_proj), self._input_bias)) + + (rnn_output, state) = self.rnn(rnn_input, prev_state) + + push_strengths = tf.sigmoid(tf.nn.bias_add(tf.matmul( + rnn_output, self._push_proj), self._push_bias)) + + pop_strengths = tf.sigmoid(tf.nn.bias_add(tf.matmul( + rnn_output, self._pop_proj), self._pop_bias)) + + write_values = tf.tanh(tf.nn.bias_add(tf.matmul( + rnn_output, self._value_proj), self._value_bias)) + + outputs = tf.tanh(tf.nn.bias_add(tf.matmul( + rnn_output, self._output_proj), self._output_bias)) + + # Reshape all the outputs according to the shapes specified by + # get_controller_shape() + projected_outputs = [push_strengths, + pop_strengths, + write_values, + outputs, + state] + next_state = [ + tf.reshape(output, shape=output_shape) for output, output_shape + in zip(projected_outputs, self.get_controller_shape(batch_size))] + return NeuralStackControllerInterface(*next_state) + + def call(self, inputs, prev_state): + """Evaluates one timestep of the current neural stack cell. + + See section 3.4 of Grefenstette et al., 2015. + + Args: + inputs: The inputs to the neural stack cell should be a tf.float32 tensor + with shape [batch_size, embedding_size] + prev_state: The NeuralStackState from the previous timestep. + + Returns: + A tuple of the output of the stack as well as the new NeuralStackState. + """ + batch_size = tf.shape(inputs)[0] + + # Call the controller and get controller interface values. + with tf.control_dependencies([prev_state.read_strengths]): + controller_output = self.call_controller( + inputs, prev_state.read_values, prev_state.controller_state, + batch_size) + + # Always write input values to memory regardless of push strength. + # See Equation-1 in Grefenstette et al., 2015. + new_memory_values = prev_state.memory_values + tf.reduce_sum( + tf.expand_dims(controller_output.write_values, axis=2) * + prev_state.write_strengths, + axis=1) + + # Attenuate the read strengths of existing memory values depending on the + # current pop strength. + # See Equation-2 in Grefenstette et al., 2015. + new_read_strengths = prev_state.read_strengths + for h in range(self._num_read_heads - 1, -1, -1): + new_read_strengths = tf.nn.relu(new_read_strengths - tf.nn.relu( + tf.slice(controller_output.pop_strengths, + [0, h, 0, 0], + [-1, 1, -1, -1]) - + tf.expand_dims( + tf.reduce_sum(new_read_strengths * self.get_read_mask(h), axis=2), + axis=3))) + + # Combine all write heads and their associated push values into a single set + # of read weights. + new_read_strengths += tf.reduce_sum( + controller_output.push_strengths * prev_state.write_strengths, + axis=1, keep_dims=True) + + # Calculate the "top" value of the stack by looking at read strengths. + # See Equation-3 in Grefenstette et al., 2015. + new_read_values = tf.reduce_sum( + tf.minimum( + new_read_strengths, + tf.nn.relu(1 - tf.expand_dims( + tf.reduce_sum( + new_read_strengths * tf.concat([ + self.get_read_mask(h) + for h in range(self._num_read_heads) + ], axis=1), + axis=2), + axis=3)) + ) * tf.expand_dims(new_memory_values, axis=1), + axis=2) + + # Temporarily split write strengths apart so they can be shifted in + # different directions. + write_strengths_by_head = tf.split(prev_state.write_strengths, + self._num_write_heads, + axis=1) + # Shift the write strengths for each write head in the direction indicated + # by get_write_head_offset(). + new_write_strengths = tf.concat([ + tf.roll(write_strength, shift=self.get_write_head_offset(h), axis=2) + for h, write_strength in enumerate(write_strengths_by_head) + ], axis=1) + + return (controller_output.outputs, NeuralStackState( + controller_state=controller_output.state, + read_values=new_read_values, + memory_values=new_memory_values, + read_strengths=new_read_strengths, + write_strengths=new_write_strengths)) + + +class NeuralQueueCell(NeuralStackCell): + """An subclass of the NeuralStackCell which reads from the opposite direction. + + See section 3.2 of Grefenstette et al., 2015. + """ + + def get_read_mask(self, read_head_index): + """Uses mask_pos_lt() instead of mask_pos_gt() to reverse read values. + + Args: + read_head_index: Identifies which read head we're getting the mask for. + + Returns: + A tf.float32 tensor of shape [1, 1, memory_size, memory_size]. + """ + if read_head_index == 0: + return tf.expand_dims( + common_layers.mask_pos_gt(self._memory_size, self._memory_size), + axis=0) + else: + raise ValueError("Read head index must be 0 for queue.") + + +class NeuralDequeCell(NeuralStackCell): + """An subclass of the NeuralStackCell which reads/writes in both directions. + + See section 3.3 of Grefenstette et al., 2015. + """ + + def __init__(self, num_units, memory_size, embedding_size, reuse=None): + # Override constructor to set 2 read/write heads. + super(NeuralDequeCell, self).__init__(num_units, + memory_size, + embedding_size, + num_read_heads=2, + num_write_heads=2, + reuse=reuse) + + def get_read_mask(self, read_head_index): + if read_head_index == 0: + # Use the same read mask as the queue for the bottom of the deque. + return tf.expand_dims( + common_layers.mask_pos_gt(self._memory_size, self._memory_size), + axis=0) + elif read_head_index == 1: + # Use the same read mask as the stack for the top of the deque. + return tf.expand_dims( + common_layers.mask_pos_lt(self._memory_size, self._memory_size), + axis=0) + else: + raise ValueError("Read head index must be either 0 or 1 for deque.") + + def get_write_head_offset(self, write_head_index): + if write_head_index == 0: + # Move the bottom write position back at each timestep. + return -1 + elif write_head_index == 1: + # Move the top write position forward at each timestep. + return 1 + else: + raise ValueError("Write head index must be 0 or 1 for deque.") + + def initialize_write_strengths(self, batch_size): + """Initialize write strengths which write in both directions. + + Unlike in Grefenstette et al., It's writing out from the center of the + memory so that it doesn't need to shift the entire memory forward at each + step. + + Args: + batch_size: The size of the current batch. + + Returns: + A tf.float32 tensor of shape [num_write_heads, memory_size, 1]. + """ + memory_center = self._memory_size // 2 + return tf.expand_dims( + tf.concat([ + # The write strength for the deque bottom. + # Should be shifted back at each timestep. + tf.one_hot([[memory_center - 1]] * batch_size, + depth=self._memory_size, dtype=tf.float32), + # The write strength for the deque top. + # Should be shifted forward at each timestep. + tf.one_hot([[memory_center]] * batch_size, + depth=self._memory_size, dtype=tf.float32) + ], axis=1), axis=3) + + +@registry.register_model +class NeuralStackModel(t2t_model.T2TModel): + """An encoder-decoder T2TModel that uses NeuralStackCells. + """ + + def cell(self, hidden_size): + """Build an RNN cell. + + This is exposed as its own function so that it can be overridden to provide + different types of RNN cells. + + Args: + hidden_size: The hidden size of the cell. + + Returns: + A new RNNCell with the given hidden size. + """ + return NeuralStackCell(hidden_size, + self._hparams.memory_size, + self._hparams.embedding_size) + + def _rnn(self, inputs, name, initial_state=None, sequence_length=None): + """A helper method to build tf.nn.dynamic_rnn. + + Args: + inputs: The inputs to the RNN. A tensor of shape + [batch_size, max_seq_length, embedding_size] + name: A namespace for the RNN. + initial_state: An optional initial state for the RNN. + sequence_length: An optional sequence length for the RNN. + + Returns: + A tf.nn.dynamic_rnn operator. + """ + layers = [self.cell(layer_size) + for layer_size in self._hparams.controller_layer_sizes] + with tf.variable_scope(name): + return tf.nn.dynamic_rnn( + contrib.rnn().MultiRNNCell(layers), + inputs, + initial_state=initial_state, + sequence_length=sequence_length, + dtype=tf.float32, + time_major=False) + + def body(self, features): + """Build the main body of the model. + + Args: + features: A dict of "inputs" and "targets" which have already been passed + through an embedding layer. Inputs should have shape + [batch_size, max_seq_length, 1, embedding_size]. Targets should have + shape [batch_size, max_seq_length, 1, 1] + + Returns: + The logits which get passed to the top of the model for inference. + A tensor of shape [batch_size, seq_length, 1, embedding_size] + """ + inputs = features.get("inputs") + targets = features["targets"] + + if inputs is not None: + inputs = common_layers.flatten4d3d(inputs) + _, final_encoder_state = self._rnn(tf.reverse(inputs, axis=[1]), + "encoder") + else: + final_encoder_state = None + + shifted_targets = common_layers.shift_right(targets) + decoder_outputs, _ = self._rnn( + common_layers.flatten4d3d(shifted_targets), + "decoder", + initial_state=final_encoder_state) + return decoder_outputs + + +@registry.register_model +class NeuralQueueModel(NeuralStackModel): + """Subcalss of NeuralStackModel which implements a queue. + """ + + def cell(self, hidden_size): + """Build a NeuralQueueCell instead of a NeuralStackCell. + + Args: + hidden_size: The hidden size of the cell. + + Returns: + A new NeuralQueueCell with the given hidden size. + """ + return NeuralQueueCell(hidden_size, + self._hparams.memory_size, + self._hparams.embedding_size) + + +@registry.register_model +class NeuralDequeModel(NeuralStackModel): + """Subclass of NeuralStackModel which implements a double-ended queue. + """ + + def cell(self, hidden_size): + """Build a NeuralDequeCell instead of a NeuralStackCell. + + Args: + hidden_size: The hidden size of the cell. + + Returns: + A new NeuralDequeCell with the given hidden size. + """ + return NeuralDequeCell(hidden_size, + self._hparams.memory_size, + self._hparams.embedding_size) + + +@registry.register_hparams +def lstm_transduction(): + """HParams for LSTM base on transduction tasks.""" + hparams = common_hparams.basic_params1() + hparams.daisy_chain_variables = False + hparams.batch_size = 10 + hparams.clip_grad_norm = 1.0 + hparams.hidden_size = 128 + hparams.num_hidden_layers = 4 + hparams.initializer = "uniform_unit_scaling" + hparams.initializer_gain = 1.0 + hparams.optimizer = "RMSProp" + hparams.learning_rate = 0.01 + hparams.weight_decay = 0.0 + + hparams.add_hparam("memory_size", 128) + hparams.add_hparam("embedding_size", 32) + return hparams + + +@registry.register_hparams +def neural_stack(): + """HParams for neural stacks and queues.""" + hparams = common_hparams.basic_params1() + hparams.daisy_chain_variables = False + hparams.batch_size = 10 + hparams.clip_grad_norm = 1.0 + hparams.initializer = "uniform_unit_scaling" + hparams.initializer_gain = 1.0 + hparams.optimizer = "RMSProp" + hparams.learning_rate = 0.0001 + hparams.weight_decay = 0.0 + + hparams.add_hparam("controller_layer_sizes", [256, 512]) + hparams.add_hparam("memory_size", 128) + hparams.add_hparam("embedding_size", 64) + hparams.hidden_size = hparams.embedding_size + return hparams + + +@registry.register_hparams +def neural_deque(): + """HParams for neural deques.""" + hparams = common_hparams.basic_params1() + hparams.daisy_chain_variables = False + hparams.batch_size = 10 + hparams.clip_grad_norm = 1.0 + hparams.initializer = "uniform_unit_scaling" + hparams.initializer_gain = 1.0 + hparams.optimizer = "RMSProp" + hparams.learning_rate = 0.0001 + hparams.weight_decay = 0.0 + + hparams.add_hparam("controller_layer_sizes", [256, 512]) + hparams.add_hparam("memory_size", 256) + hparams.add_hparam("embedding_size", 64) + hparams.hidden_size = hparams.embedding_size + return hparams diff --git a/tensor2tensor/models/research/neural_stack_test.py b/tensor2tensor/models/research/neural_stack_test.py new file mode 100644 index 000000000..83f45ab68 --- /dev/null +++ b/tensor2tensor/models/research/neural_stack_test.py @@ -0,0 +1,433 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests NeuralStackCell, NeuralQueueCell and NeuralStackModel.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import mock +import numpy as np + +from tensor2tensor.layers import modalities +from tensor2tensor.models.research import neural_stack +from tensor2tensor.utils import contrib + +import tensorflow.compat.v1 as tf + + +def build_fake_controller(cell): + """Create a scalar variable to track the timestep. + + Args: + cell: The NeuralStackCell to add the variable to. + """ + cell.current_step = cell.add_variable( + "current_step", [], + initializer=tf.constant_initializer(-1), + dtype=tf.int32, + trainable=False) + + +def call_fake_controller(push_values, pop_values, write_values, output_values): + """Mock a RNN controller from a set of expected outputs. + + Args: + push_values: Expected controller push values. + pop_values: Expected controller pop values. + write_values: Expected controller write values. + output_values: Expected controller output values. + + Returns: + A callable which behaves like the call method of an NeuralStackCell. + """ + def call(cell, inputs, prev_read_values, controller_state, batch_size): + del inputs + del prev_read_values + del batch_size + next_step = tf.constant(0) + if hasattr(cell, "current_step"): + next_step = tf.assign_add(cell.current_step, tf.constant(1)) + return neural_stack.NeuralStackControllerInterface( + push_strengths=tf.slice(tf.constant(push_values), + [next_step, 0, 0, 0], + [1, -1, -1, -1]), + pop_strengths=tf.slice(tf.constant(pop_values), + [next_step, 0, 0, 0], + [1, -1, -1, -1]), + write_values=tf.slice(tf.constant(write_values), + [next_step, 0, 0], + [1, -1, -1]), + outputs=tf.slice(tf.constant(output_values), + [next_step, 0, 0], + [1, -1, -1]), + state=controller_state + ) + return call + + +def assert_controller_shapes(test, controller_outputs, controller_shapes): + for name, output, shape in zip(controller_outputs._fields, controller_outputs, + controller_shapes): + test.assertEqual(shape, output.shape, "%s shapes don't match" % name) + + +def assert_cell_shapes(test, output_state, zero_state): + for name, output, zero in zip(output_state._fields, output_state, + zero_state): + test.assertEqual(zero.shape, output.shape, "%s shapes don't match" % name) + + +class NeuralStackCellTest(tf.test.TestCase): + + def test_cell_shapes(self): + """Check that all the NeuralStackCell tensor shapes are correct. + """ + batch_size = 5 + embedding_size = 3 + memory_size = 6 + num_units = 8 + + stack = neural_stack.NeuralStackCell(num_units, memory_size, embedding_size) + stack.build(None) + + self.assertEqual([1, 1, memory_size, memory_size], + stack.get_read_mask(0).shape) + + stack_input = tf.zeros([batch_size, 1, embedding_size], dtype=tf.float32) + zero_state = stack.zero_state(batch_size, tf.float32) + (outputs, (stack_next_state)) = stack.call(stack_input, zero_state) + + # Make sure that stack output shapes match stack input shapes + self.assertEqual(outputs.shape, stack_input.shape) + + assert_cell_shapes(self, stack_next_state, zero_state) + + @mock.patch.object(neural_stack.NeuralStackCell, "build_controller", + build_fake_controller) + @mock.patch.object(neural_stack.NeuralStackCell, "call_controller", + call_fake_controller( + push_values=[[[[1.0]]], [[[1.0]]], [[[0.0]]]], + pop_values=[[[[0.0]]], [[[0.0]]], [[[1.0]]]], + write_values=[[[1.0, 0.0, 0.0]], + [[0.0, 1.0, 0.0]], + [[0.0, 0.0, 1.0]]], + output_values=[[[0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0]]])) + def test_push_pop(self): + """Test pushing a popping from a NeuralStackCell. + + The sequence of operations is: + push([1.0, 0.0, 0.0]) + push([0.0, 1.0, 0.0]) + pop() + """ + input_values = np.array([[[[1.0, 0.0, 0.0]], + [[0.0, 1.0, 0.0]], + [[0.0, 0.0, 1.0]]]]) + + expected_values = np.array([[[1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + [0.0, 0.0, 1.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0]]]) + expected_read_strengths = np.array([ + [[[1.0], [0.0], [0.0], [0.0], [0.0], [0.0]]]]) + expected_write_strengths = np.array([ + [[[0.0], [0.0], [0.], [1.0], [0.0], [0.0]]]]) + expected_top = np.array([[[1.0, 0.0, 0.0]]]) + + batch_size = 1 + embedding_size = 3 + memory_size = 6 + num_units = 8 + + stack = neural_stack.NeuralStackCell(num_units, memory_size, embedding_size) + stack_input = tf.constant(input_values, dtype=tf.float32) + + stack_zero_state = tf.zeros([batch_size, num_units]) + controller_outputs = stack.call_controller(None, None, stack_zero_state, + batch_size) + assert_controller_shapes(self, controller_outputs, + stack.get_controller_shape(batch_size)) + + (outputs, state) = tf.nn.dynamic_rnn(cell=stack, + inputs=stack_input, + time_major=False, + dtype=tf.float32) + + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + _, state_vals = sess.run([outputs, state]) + (_, stack_top, values, read_strengths, write_strengths) = state_vals + + self.assertAllClose(expected_values, values) + self.assertAllClose(expected_write_strengths, write_strengths) + self.assertAllClose(expected_read_strengths, read_strengths) + self.assertAllClose(expected_top, stack_top) + + +class NeuralQueueCellTest(tf.test.TestCase): + + @mock.patch.object(neural_stack.NeuralQueueCell, "build_controller", + build_fake_controller) + @mock.patch.object(neural_stack.NeuralQueueCell, "call_controller", + call_fake_controller( + push_values=[[[[1.0]]], [[[1.0]]], [[[0.0]]]], + pop_values=[[[[0.0]]], [[[0.0]]], [[[1.0]]]], + write_values=[[[1.0, 0.0, 0.0]], + [[0.0, 1.0, 0.0]], + [[0.0, 0.0, 1.0]]], + output_values=[[[0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0]]])) + def test_enqueue_dequeue(self): + """Test enqueueing a dequeueing from a NeuralQueueCell. + + The sequence of operations is: + enqueue([1.0, 0.0, 0.0]) + enqueue([0.0, 1.0, 0.0]) + dequeue() + """ + input_values = np.array([[[[1.0, 0.0, 0.0]], + [[0.0, 1.0, 0.0]], + [[0.0, 0.0, 1.0]]]]) + expected_values = np.array([[[1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + [0.0, 0.0, 1.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0], + [0.0, 0.0, 0.0]]]) + expected_read_strengths = np.array([ + [[[0.0], [1.0], [0.0], [0.0], [0.0], [0.0]]]]) + expected_write_strengths = np.array([ + [[[0.0], [0.0], [0.0], [1.0], [0.0], [0.0]]]]) + expected_front = np.array([[[0.0, 1.0, 0.0]]]) + + batch_size = 1 + num_units = 8 + embedding_size = 3 + memory_size = 6 + + queue = neural_stack.NeuralQueueCell(num_units, memory_size, embedding_size) + rnn_input = tf.constant(input_values, dtype=tf.float32) + + queue_zero_state = tf.zeros([batch_size, num_units]) + controller_outputs = queue.call_controller(None, None, queue_zero_state, + batch_size) + assert_controller_shapes(self, controller_outputs, + queue.get_controller_shape(batch_size)) + + (outputs, state) = tf.nn.dynamic_rnn(cell=queue, + inputs=rnn_input, + time_major=False, + dtype=tf.float32) + + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + _, state_vals = sess.run([outputs, state]) + (_, queue_front, values, read_strengths, write_strengths) = state_vals + + self.assertAllClose(expected_values, values) + self.assertAllClose(expected_write_strengths, write_strengths) + self.assertAllClose(expected_read_strengths, read_strengths) + self.assertAllClose(expected_front, queue_front) + + +class NeuralDequeCellTest(tf.test.TestCase): + + def test_cell_shapes(self): + """Check that all the NeuralStackCell tensor shapes are correct. + """ + batch_size = 5 + embedding_size = 4 + memory_size = 12 + num_units = 8 + + deque = neural_stack.NeuralDequeCell(num_units, memory_size, embedding_size) + deque.build(None) + + self.assertEqual([1, 1, memory_size, memory_size], + deque.get_read_mask(0).shape) + self.assertEqual([1, 1, memory_size, memory_size], + deque.get_read_mask(1).shape) + + deque_input = tf.zeros([batch_size, 1, embedding_size], dtype=tf.float32) + zero_state = deque.zero_state(batch_size, tf.float32) + (outputs, (deque_next_state)) = deque.call(deque_input, zero_state) + + # Make sure that deque output shapes match deque input shapes + self.assertEqual(outputs.shape, deque_input.shape) + + assert_cell_shapes(self, deque_next_state, zero_state) + + @mock.patch.object(neural_stack.NeuralDequeCell, "build_controller", + build_fake_controller) + @mock.patch.object(neural_stack.NeuralDequeCell, "call_controller", + call_fake_controller( + push_values=[[[[1.0]], [[0.0]]], + [[[1.0]], [[0.0]]], + [[[1.0]], [[0.0]]], + [[[0.0]], [[1.0]]], + [[[0.0]], [[0.0]]], + [[[0.0]], [[0.0]]]], + pop_values=[[[[0.0]], [[0.0]]], + [[[0.0]], [[0.0]]], + [[[0.0]], [[0.0]]], + [[[0.0]], [[0.0]]], + [[[0.0]], [[1.0]]], + [[[0.0]], [[1.0]]]], + write_values=[[[1.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0]], + [[0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 1.0, 0.0], + [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0]], + [[0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0]]], + output_values=[[[0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0]]])) + def test_enqueue_dequeue(self): + """Test enqueueing a dequeueing from a NeuralDequeCell. + + The sequence of operations is: + enqueue_bottom([1.0, 0.0, 0.0, 0.0]) + enqueue_bottom([0.0, 1.0, 0.0, 0.0]) + enqueue_bottom([0.0, 0.0, 1.0, 0.0]) + enqueue_top([0.0, 0.0, 0.0, 1.0]) + dequeue_top() + dequeue_top() + """ + input_values = np.array([[[[1.0, 0.0, 0.0, 0.0]], + [[0.0, 1.0, 0.0, 0.0]], + [[0.0, 0.0, 1.0, 0.0]], + [[0.0, 0.0, 0.0, 1.0]], + [[0.0, 0.0, 0.0, 0.0]], + [[0.0, 0.0, 0.0, 0.0]]]]) + + expected_values = np.array([[[0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [1.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + [0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0]]]) + + expected_read_strengths = np.array([[[[0.0], [0.0], [0.0], [1.0], [1.0], + [0.0], [0.0], [0.0], [0.0], [0.0], + [0.0], [0.0]]]]) + + expected_write_strengths = np.array([[[[0.0], [0.0], [0.0], [0.0], [0.0], + [0.0], [0.0], [0.0], [0.0], [0.0], + [0.0], [1.0]], + [[1.0], [0.0], [0.0], [0.0], [0.0], + [0.0], [0.0], [0.0], [0.0], [0.0], + [0.0], [0.0]]]]) + + expected_read_values = np.array([[[0.0, 0.0, 1.0, 0.0], + [0.0, 1.0, 0.0, 0.0]]]) + + batch_size = input_values.shape[0] + memory_size = input_values.shape[1] * 2 + embedding_size = input_values.shape[3] + num_units = 8 + + deque = neural_stack.NeuralDequeCell(num_units, memory_size, embedding_size) + rnn_input = tf.constant(input_values, dtype=tf.float32) + + deque_zero_state = tf.zeros([batch_size, num_units]) + controller_outputs = deque.call_controller(None, None, + deque_zero_state, + batch_size) + assert_controller_shapes(self, controller_outputs, + deque.get_controller_shape(batch_size)) + + (outputs, state) = tf.nn.dynamic_rnn(cell=deque, + inputs=rnn_input, + time_major=False, + dtype=tf.float32) + + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + _, state_vals = sess.run([outputs, state]) + (_, read_values, + memory_values, + read_strengths, + write_strengths) = state_vals + + print(read_values) + self.assertAllClose(expected_values, memory_values) + self.assertAllClose(expected_write_strengths, write_strengths) + self.assertAllClose(expected_read_strengths, read_strengths) + self.assertAllClose(expected_read_values, read_values) + + +class NeuralStackModelTest(tf.test.TestCase): + + def test_model_shapes(self): + """Test a few of the important output shapes for NeuralStackModel. + """ + batch_size = 100 + seq_length = 80 + embedding_size = 64 + vocab_size = 128 + + hparams = neural_stack.neural_stack() + problem_hparams = contrib.training().HParams() + + problem_hparams.add_hparam("modality", { + "inputs": modalities.ModalityType.SYMBOL, + "targets": modalities.ModalityType.SYMBOL, + }) + problem_hparams.add_hparam("vocab_size", { + "inputs": vocab_size, + "targets": vocab_size, + }) + model = neural_stack.NeuralStackModel(hparams, + problem_hparams=problem_hparams) + + features = { + "inputs": tf.ones([batch_size, seq_length, 1, 1], + dtype=tf.int32), + "targets": tf.ones([batch_size, seq_length, 1, 1], dtype=tf.int32) + } + + transformed_features = model.bottom(features) + + self.assertEqual([batch_size, seq_length, 1, embedding_size], + transformed_features["inputs"].shape) + + logits = model.body(transformed_features) + + self.assertEqual([batch_size, seq_length, 1, embedding_size], logits.shape) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/research/residual_shuffle_exchange.py b/tensor2tensor/models/research/residual_shuffle_exchange.py new file mode 100644 index 000000000..7c22c2880 --- /dev/null +++ b/tensor2tensor/models/research/residual_shuffle_exchange.py @@ -0,0 +1,291 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Residual Shuffle-Exchange Network. + +Implementation of +"Residual Shuffle-Exchange Networks for Fast Processing of Long Sequences" +paper by A.Draguns, E.Ozolins, A.Sostaks, M.Apinis, K.Freivalds. + +Paper: https://arxiv.org/abs/2004.04662 +Original code: https://github.com/LUMII-Syslab/RSE +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from tensor2tensor.layers.common_layers import gelu +from tensor2tensor.models.research.shuffle_network import reverse_shuffle_layer +from tensor2tensor.models.research.shuffle_network import shuffle_layer +from tensor2tensor.models.research.shuffle_network import ShuffleNetwork +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class LayerNormalization(tf.keras.layers.Layer): + """Layer Normalization (LayerNorm) without output bias and gain.""" + + def __init__(self, axis=1, epsilon=1e-10, **kwargs): + """Initialize Layer Normalization layer. + + Args: + axis: Tuple or number of axis for calculating mean and variance + epsilon: Small epsilon to avoid division by zero + **kwargs: keyword args passed to super. + """ + self.axis = axis + self.epsilon = epsilon + self.bias = None + super(LayerNormalization, self).__init__(**kwargs) + + def build(self, input_shape): + """Initialize bias weights for layer normalization. + + Args: + input_shape: shape of input tensor + """ + num_units = input_shape.as_list()[-1] + self.bias = self.add_weight( + "bias", [1, 1, num_units], initializer=tf.zeros_initializer) + super(LayerNormalization, self).build(input_shape) + + def call(self, inputs, **kwargs): + """Apply Layer Normalization without output bias and gain. + + Args: + inputs: tensor to be normalized. Axis should be smaller than input tensor + dimensions. + **kwargs: more arguments (unused) + + Returns: + tensor output. + """ + inputs -= tf.reduce_mean(inputs, axis=self.axis, keepdims=True) + inputs += self.bias + variance = tf.reduce_mean(tf.square(inputs), self.axis, keepdims=True) + return inputs * tf.math.rsqrt(variance + self.epsilon) + + +def inv_sigmoid(y): + """Inverse sigmoid function. + + Args: + y: float in range 0 to 1 + + Returns: + the inverse sigmoid. + """ + return np.log(y / (1 - y)) + + +class RSU(tf.keras.layers.Layer): + """Residual Switch Unit of Residual Shuffle-Exchange network.""" + + def __init__(self, prefix, dropout, mode, **kwargs): + """Initialize Switch Layer. + + Args: + prefix: Name prefix for switch layer + dropout: Dropout rate + mode: Training mode + **kwargs: more arguments (unused) + """ + super().__init__(**kwargs) + self.prefix = prefix + self.dropout = dropout + self.mode = mode + self.first_linear = None + self.second_linear = None + self.layer_norm = None + self.residual_scale = None + + residual_weight = 0.9 + self.candidate_weight = np.sqrt(1 - residual_weight**2) * 0.25 + self.init_value = inv_sigmoid(residual_weight) + + def build(self, input_shape): + """Initialize layer weights and sublayers. + + Args: + input_shape: shape of inputs + """ + in_units = input_shape[-1] + middle_units = in_units * 4 + out_units = in_units * 2 + init = tf.variance_scaling_initializer( + scale=1.0, mode="fan_avg", distribution="uniform") + + self.first_linear = tf.keras.layers.Dense( + middle_units, + use_bias=False, + kernel_initializer=init, + name=self.prefix + "/cand1") + + self.second_linear = tf.keras.layers.Dense( + out_units, kernel_initializer=init, name=self.prefix + "/cand2") + self.layer_norm = LayerNormalization() + + init = tf.constant_initializer(self.init_value) + self.residual_scale = self.add_weight( + self.prefix + "/residual", [out_units], initializer=init) + super(RSU, self).build(input_shape) + + def call(self, inputs, **kwargs): + """Apply Residual Switch Layer to inputs. + + Args: + inputs: Input tensor. + **kwargs: unused kwargs. + + Returns: + tf.Tensor: New candidate value + """ + del kwargs + input_shape = tf.shape(inputs) + batch_size = input_shape[0] + length = input_shape[1] + num_units = inputs.shape.as_list()[2] + + n_bits = tf.log(tf.cast(length - 1, tf.float32)) / tf.log(2.0) + n_bits = tf.floor(n_bits) + 1 + + reshape_shape = [batch_size, length // 2, num_units * 2] + reshaped_inputs = tf.reshape(inputs, reshape_shape) + + first_linear = self.first_linear(reshaped_inputs) + first_linear = self.layer_norm(first_linear) + first_linear = gelu(first_linear) + candidate = self.second_linear(first_linear) + + residual = tf.sigmoid(self.residual_scale) * reshaped_inputs + candidate = residual + candidate * self.candidate_weight + candidate = tf.reshape(candidate, input_shape) + + if self.dropout > 0: + candidate = tf.nn.dropout(candidate, rate=self.dropout / n_bits) + if self.dropout != 0.0 and self.mode == tf_estimator.ModeKeys.TRAIN: + noise = tf.random_normal(tf.shape(candidate), mean=1.0, stddev=0.001) + candidate = candidate * noise + + return candidate + + +def residual_shuffle_network(inputs, hparams): + """Residual Shuffle-Exchange network with weight sharing. + + Args: + inputs: inputs to the Shuffle-Exchange network. Should be in length of power + of 2. + hparams: Model configuration + + Returns: + tf.Tensor: Outputs of the Shuffle-Exchange last layer + """ + input_shape = tf.shape(inputs) + n_bits = tf.log(tf.cast(input_shape[1] - 1, tf.float32)) / tf.log(2.0) + n_bits = tf.cast(n_bits, tf.int32) + 1 + + block_out = inputs + + for k in range(hparams.num_hidden_layers): + with tf.variable_scope("benes_block_" + str(k), reuse=tf.AUTO_REUSE): + forward_output = forward_part(block_out, hparams, n_bits) + block_out = reverse_part(forward_output, hparams, n_bits) + + return RSU("last_layer", hparams.dropout, hparams.mode)(block_out) + + +def reverse_part(inputs, hparams, n_bits): + """Reverse part of Benes block. + + Repeatably applies interleaved Residual Switch layer and Reverse Shuffle + Layer. One set of weights used for all Switch layers. + + Args: + inputs: inputs for reverse part. Should be outputs from forward part. + hparams: params of the network. + n_bits: count of repeated layer applications. + + Returns: + tf.Tensor: output of reverse part. + """ + reverse_rsu = RSU("reverse_switch", hparams.dropout, hparams.mode) + + def reverse_step(state, _): + with tf.variable_scope("reverse"): + new_state = reverse_rsu(state) + return reverse_shuffle_layer(new_state) + + reverse_outputs = tf.scan( + reverse_step, + tf.range(n_bits, n_bits * 2), + initializer=inputs, + parallel_iterations=1, + swap_memory=True) + + return reverse_outputs[-1, :, :, :] + + +def forward_part(block_out, hparams, n_bits): + """Forward part of Benes block. + + Repeatably applies interleaved Residual Switch layer and Shuffle + Layer. One set of weights used for all Switch layers. + + Args: + block_out: TODO(authors) document. + hparams: params of the network. + n_bits: count of repeated layer applications. + + Returns: + tf.Tensor: output of forward part. + """ + forward_rsu = RSU("switch", hparams.dropout, hparams.mode) + + def forward_step(state, _): + with tf.variable_scope("forward"): + new_state = forward_rsu(state) + return shuffle_layer(new_state) + + forward_outputs = tf.scan( + forward_step, + tf.range(0, n_bits), + initializer=block_out, + parallel_iterations=1, + swap_memory=True) + + return forward_outputs[-1, :, :, :] + + +@registry.register_model +class ResidualShuffleExchange(ShuffleNetwork): + """T2T implementation of Residual Shuffle-Exchange network.""" + + def body(self, features): + """Body of Residual Shuffle-Exchange network. + + Args: + features: dictionary of inputs and targets + + Returns: + the network output. + """ + + inputs = tf.squeeze(features["inputs"], axis=2) + logits = residual_shuffle_network(inputs, self._hparams) + return tf.expand_dims(logits, axis=2) diff --git a/tensor2tensor/models/research/rl.py b/tensor2tensor/models/research/rl.py new file mode 100644 index 000000000..16ee162c6 --- /dev/null +++ b/tensor2tensor/models/research/rl.py @@ -0,0 +1,881 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Reinforcement learning models and parameters.""" + +import collections +import functools +import operator +import gym +import six + +from tensor2tensor.data_generators import gym_env +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import video_utils +from tensor2tensor.envs import tic_tac_toe_env +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import discretization +from tensor2tensor.layers import modalities +from tensor2tensor.models.video import basic_deterministic_params +from tensor2tensor.models.video import basic_stochastic +from tensor2tensor.rl.envs.py_func_batch_env import PyFuncBatchEnv +from tensor2tensor.rl.envs.simulated_batch_env import SimulatedBatchEnv +from tensor2tensor.rl.envs.simulated_batch_gym_env import SimulatedBatchGymEnv +from tensor2tensor.utils import hparam +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model +from tensor2tensor.utils import trainer_lib + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator +import tensorflow_probability as tfp + + +@registry.register_hparams +def ppo_base_v1(): + """Set of hyperparameters.""" + hparams = common_hparams.basic_params1() + hparams.learning_rate_schedule = "constant" + hparams.learning_rate_constant = 1e-4 + hparams.clip_grad_norm = 0.5 + hparams.weight_decay = 0 + # If set, extends the LR warmup to all epochs except the final one. + hparams.add_hparam("lr_decay_in_final_epoch", False) + hparams.add_hparam("init_mean_factor", 0.1) + hparams.add_hparam("init_logstd", 0.1) + hparams.add_hparam("policy_layers", (100, 100)) + hparams.add_hparam("value_layers", (100, 100)) + hparams.add_hparam("clipping_coef", 0.2) + hparams.add_hparam("gae_gamma", 0.99) + hparams.add_hparam("gae_lambda", 0.95) + hparams.add_hparam("entropy_loss_coef", 0.01) + hparams.add_hparam("value_loss_coef", 1) + hparams.add_hparam("optimization_epochs", 15) + hparams.add_hparam("epoch_length", 200) + hparams.add_hparam("epochs_num", 2000) + hparams.add_hparam("eval_every_epochs", 10) + hparams.add_hparam("save_models_every_epochs", 30) + hparams.add_hparam("optimization_batch_size", 50) + hparams.add_hparam("intrinsic_reward_scale", 0.) + hparams.add_hparam("logits_clip", 0.0) + hparams.add_hparam("dropout_ppo", 0.1) + hparams.add_hparam("effective_num_agents", None) + hparams.add_hparam("use_epochs", True) + # TODO(afrozm): Clean this up, this is used in PPO learner to get modalities. + hparams.add_hparam("policy_problem_name", "dummy_policy_problem") + return hparams + + +@registry.register_hparams +def basic_policy_parameters(): + wrappers = None + return hparam.HParams(wrappers=wrappers) + + +@registry.register_hparams +def ppo_discrete_action_base(): + hparams = ppo_base_v1() + hparams.add_hparam("policy_network", "feed_forward_categorical_policy") + return hparams + + +@registry.register_hparams +def discrete_random_action_base(): + hparams = common_hparams.basic_params1() + hparams.add_hparam("policy_network", "random_policy") + return hparams + + +@registry.register_hparams +def ppo_atari_base(): + """Pong base parameters.""" + hparams = ppo_discrete_action_base() + hparams.learning_rate_constant = 1e-4 + hparams.epoch_length = 200 + hparams.gae_gamma = 0.985 + hparams.gae_lambda = 0.985 + hparams.entropy_loss_coef = 0.003 + hparams.value_loss_coef = 1 + hparams.optimization_epochs = 3 + hparams.epochs_num = 1000 + hparams.policy_network = "feed_forward_cnn_small_categorical_policy" + hparams.clipping_coef = 0.2 + hparams.optimization_batch_size = 20 + hparams.clip_grad_norm = 0.5 + return hparams + + +@registry.register_hparams +def ppo_original_params(): + """Parameters based on the original PPO paper.""" + hparams = ppo_atari_base() + hparams.learning_rate_constant = 2.5e-4 + hparams.gae_gamma = 0.99 + hparams.gae_lambda = 0.95 + hparams.clipping_coef = 0.1 + hparams.value_loss_coef = 1 + hparams.entropy_loss_coef = 0.01 + hparams.eval_every_epochs = 200 + hparams.dropout_ppo = 0.1 + # The parameters below are modified to accommodate short epoch_length (which + # is needed for model based rollouts). + hparams.epoch_length = 50 + hparams.optimization_batch_size = 20 + return hparams + + +@registry.register_hparams +def ppo_dist_params(): + """Parameters based on the original paper modified for distributional RL.""" + hparams = ppo_original_params() + hparams.learning_rate_constant = 1e-3 + return hparams + + +@registry.register_hparams +def ppo_original_tiny(): + """Parameters based on the original PPO paper, tiny version.""" + hparams = ppo_original_params() + hparams.epoch_length = 5 + hparams.optimization_batch_size = 1 + return hparams + + +@registry.register_hparams +def ppo_ttt_params(): + """Parameters based on the original PPO paper.""" + hparams = ppo_original_tiny() + hparams.policy_network = "feed_forward_categorical_policy" + hparams.policy_problem_name = "dummy_policy_problem_ttt" + return hparams + + +@registry.register_hparams +def ppo_original_params_gamma95(): + """Parameters based on the original PPO paper, changed gamma.""" + hparams = ppo_original_params() + hparams.gae_gamma = 0.95 + return hparams + + +@registry.register_hparams +def ppo_original_params_gamma90(): + """Parameters based on the original PPO paper, changed gamma.""" + hparams = ppo_original_params() + hparams.gae_gamma = 0.90 + return hparams + + +@registry.register_hparams +def ppo_original_world_model(): + """Atari parameters with world model as policy.""" + hparams = ppo_original_params() + hparams.policy_network = "next_frame_basic_deterministic" + hparams_keys = hparams.values().keys() + video_hparams = basic_deterministic_params.next_frame_basic_deterministic() + for (name, value) in six.iteritems(video_hparams.values()): + if name in hparams_keys: + hparams.set_hparam(name, value) + else: + hparams.add_hparam(name, value) + # Mostly to avoid decaying WM params when training the policy. + hparams.weight_decay = 0 + return hparams + + +@registry.register_hparams +def ppo_tiny_world_model(): + """Atari parameters with world model as policy.""" + hparams = ppo_original_params() + hparams.policy_network = "next_frame_basic_deterministic" + hparams_keys = hparams.values().keys() + video_hparams = basic_deterministic_params.next_frame_tiny() + for (name, value) in six.iteritems(video_hparams.values()): + if name in hparams_keys: + hparams.set_hparam(name, value) + else: + hparams.add_hparam(name, value) + hparams.weight_decay = 0 + return hparams + + +@registry.register_hparams +def ppo_original_world_model_stochastic_discrete(): + """Atari parameters with stochastic discrete world model as policy.""" + hparams = ppo_original_params() + hparams.policy_network = "next_frame_basic_stochastic_discrete" + hparams_keys = hparams.values().keys() + video_hparams = basic_stochastic.next_frame_basic_stochastic_discrete() + for (name, value) in six.iteritems(video_hparams.values()): + if name in hparams_keys: + hparams.set_hparam(name, value) + else: + hparams.add_hparam(name, value) + # To avoid OOM. Probably way to small. + hparams.optimization_batch_size = 1 + hparams.weight_decay = 0 + return hparams + + +def make_real_env_fn(env): + """Creates a function returning a given real env, in or out of graph. + + Args: + env: Environment to return from the function. + + Returns: + Function in_graph -> env. + """ + return lambda in_graph: PyFuncBatchEnv(env) if in_graph else env + + +def make_simulated_env_fn(**env_kwargs): + """Returns a function creating a simulated env, in or out of graph. + + Args: + **env_kwargs: kwargs to pass to the simulated env constructor. + + Returns: + Function in_graph -> env. + """ + def env_fn(in_graph): + class_ = SimulatedBatchEnv if in_graph else SimulatedBatchGymEnv + return class_(**env_kwargs) + return env_fn + + +# TODO(koz4k): Move this and the one below to rl_utils. +def make_simulated_env_kwargs(real_env, hparams, **extra_kwargs): + """Extracts simulated env kwargs from real_env and loop hparams.""" + objs_and_attrs = [ + (real_env, [ + "reward_range", "observation_space", "action_space", "frame_height", + "frame_width" + ]), + (hparams, ["frame_stack_size", "intrinsic_reward_scale"]) + ] + kwargs = { + attr: getattr(obj, attr) # pylint: disable=g-complex-comprehension + for (obj, attrs) in objs_and_attrs for attr in attrs + } + kwargs["model_name"] = hparams.generative_model + kwargs["model_hparams"] = trainer_lib.create_hparams( + hparams.generative_model_params + ) + if hparams.wm_policy_param_sharing: + kwargs["model_hparams"].optimizer_zero_grads = True + kwargs.update(extra_kwargs) + return kwargs + + +def make_simulated_env_fn_from_hparams(real_env, hparams, **extra_kwargs): + """Creates a simulated env_fn.""" + return make_simulated_env_fn( + **make_simulated_env_kwargs(real_env, hparams, **extra_kwargs) + ) + + +def get_policy(observations, hparams, action_space, + distributional_size=1, epoch=-1): + """Get a policy network. + + Args: + observations: observations + hparams: parameters + action_space: action space + distributional_size: optional number of buckets for distributional RL + epoch: optional epoch number + + Returns: + Tuple (action logits, value). + """ + if not isinstance(action_space, gym.spaces.Discrete): + raise ValueError("Expecting discrete action space.") + + obs_shape = common_layers.shape_list(observations) + (frame_height, frame_width) = obs_shape[2:4] + + # TODO(afrozm): We have these dummy problems mainly for hparams, so cleanup + # when possible and do this properly. + if hparams.policy_problem_name == "dummy_policy_problem_ttt": + tf.logging.info("Using DummyPolicyProblemTTT for the policy.") + policy_problem = tic_tac_toe_env.DummyPolicyProblemTTT() + else: + tf.logging.info("Using DummyPolicyProblem for the policy.") + policy_problem = DummyPolicyProblem(action_space, frame_height, frame_width) + + trainer_lib.add_problem_hparams(hparams, policy_problem) + hparams.force_full_predict = True + model = registry.model(hparams.policy_network)( + hparams, tf_estimator.ModeKeys.TRAIN + ) + try: + num_target_frames = hparams.video_num_target_frames + except AttributeError: + num_target_frames = 1 + target_value_shape_suffix = [num_target_frames] + if distributional_size > 1: + target_value_shape_suffix = [num_target_frames, distributional_size] + features = { + "inputs": observations, + "epoch": tf.constant(epoch + 1), + "input_action": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32), + "input_reward": tf.zeros(obs_shape[:2] + [1], dtype=tf.int32), + "targets": tf.zeros(obs_shape[:1] + [num_target_frames] + obs_shape[2:]), + "target_action": tf.zeros( + obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32), + "target_reward": tf.zeros( + obs_shape[:1] + [num_target_frames, 1], dtype=tf.int32), + "target_policy": tf.zeros( + obs_shape[:1] + [num_target_frames] + [action_space.n]), + "target_value": tf.zeros( + obs_shape[:1] + target_value_shape_suffix) + } + model.distributional_value_size = max(distributional_size, 1) + model.use_epochs = hparams.use_epochs + with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): + t2t_model.create_dummy_vars() + (targets, _) = model(features) + target_values = targets["target_value"][:, 0] + if distributional_size > 1: + target_values = targets["target_value"][:, :] + return (targets["target_policy"][:, 0, :], target_values) + + +@registry.register_hparams +def ppo_pong_ae_base(): + """Pong autoencoder base parameters.""" + hparams = ppo_original_params() + hparams.learning_rate_constant = 1e-4 + hparams.network = "dense_bitwise_categorical_policy" + return hparams + + +@registry.register_hparams +def dqn_atari_base(): + # These params are based on agents/dqn/configs/dqn.gin + # with some modifications taking into account our code + return hparam.HParams( + agent_gamma=0.99, + agent_update_horizon=1, + agent_min_replay_history=20000, # agent steps + agent_update_period=4, + agent_target_update_period=8000, # agent steps + agent_epsilon_train=0.01, + agent_epsilon_eval=0.001, + agent_epsilon_decay_period=250000, # agent steps + agent_generates_trainable_dones=True, + agent_type="VanillaDQN", # one of ["Rainbow", "VanillaDQN"] + + optimizer_class="RMSProp", + optimizer_learning_rate=0.00025, + optimizer_decay=0.95, + optimizer_momentum=0.0, + optimizer_epsilon=0.00001, + optimizer_centered=True, + + # TODO(kozak): change names maybe replay_buffer -> agent? + # Also batch_size is now buffer_batch_size in _DQNAgent. + replay_buffer_replay_capacity=1000000, + replay_buffer_buffer_batch_size=32, + + time_limit=27000, + save_every_steps=50000, + num_frames=int(20 * 1e6), + + # TODO(konradczechowski) this is not used in trainer_model_free, clean + # this up after evaluation refactor + eval_episodes_num=3, + ) + + +@registry.register_hparams +def dqn_original_params(): + """dqn_original_params.""" + hparams = dqn_atari_base() + hparams.set_hparam("num_frames", int(1e6)) + return hparams + + +@registry.register_hparams +def dqn_guess1_params(): + """Guess 1 for DQN params.""" + hparams = dqn_atari_base() + hparams.set_hparam("num_frames", int(1e6)) + hparams.set_hparam("agent_update_period", 1) + hparams.set_hparam("agent_target_update_period", 400) + # Small replay buffer size was set for mistake, but it seems to work + hparams.set_hparam("replay_buffer_replay_capacity", 10000) + return hparams + + +@registry.register_hparams +def dqn_guess1_params_eval(): + """Params for dqn_guess1 evaluation (with evaluator.py).""" + hparams = dqn_guess1_params() + hparams.set_hparam("eval_episodes_num", 64) + return hparams + + +@registry.register_hparams +def dqn_guess1_rainbow_params(): + """Guess 1 for DQN params.""" + hparams = dqn_guess1_params() + hparams.set_hparam("agent_type", "Rainbow") + return hparams + + +@registry.register_hparams +def dqn_rainbow_params(): + """Rainbow params.""" + hparams = dqn_guess1_params() + hparams.set_hparam("agent_type", "Rainbow") + hparams.set_hparam("replay_buffer_replay_capacity", int(2e6) + int(1e5)) + return hparams + + +@registry.register_hparams +def dqn_2m_replay_buffer_params(): + """Guess 1 for DQN params, 2 milions transitions in replay buffer.""" + hparams = dqn_guess1_params() + hparams.set_hparam("replay_buffer_replay_capacity", int(2e6) + int(1e5)) + return hparams + + +@registry.register_hparams +def dqn_10m_replay_buffer_params(): + """Guess 1 for DQN params, 10 milions transitions in replay buffer.""" + hparams = dqn_guess1_params() + hparams.set_hparam("replay_buffer_replay_capacity", int(10e6)) + return hparams + + +def rlmf_tiny_overrides(): + """Parameters to override for tiny setting excluding agent-related hparams.""" + return dict( + max_num_noops=1, + eval_max_num_noops=1, + rl_env_max_episode_steps=7, + eval_rl_env_max_episode_steps=7, + eval_sampling_temps=[0.0, 1.0], + ) + + +@registry.register_hparams +def rlmf_original(): + return hparam.HParams( + game="pong", + sticky_actions=False, + base_algo="ppo", + base_algo_params="ppo_original_params", + batch_size=16, + eval_batch_size=2, + frame_stack_size=4, + eval_sampling_temps=[0.0, 0.2, 0.5, 0.8, 1.0, 2.0], + max_num_noops=8, + eval_max_num_noops=8, + eval_rl_env_max_episode_steps=1000, + resize_height_factor=2, + resize_width_factor=2, + distributional_size=1, # In distributional RL, number of buckets. + distributional_subscale=0.04, # How to scale values to buckets. + distributional_threshold=0.0, # Optimism threshold for experiments. + grayscale=0, + rl_env_max_episode_steps=-1, + # If set, use this as the gym env name, instead of changing game mode etc. + rl_env_name="", + # Controls whether we should derive observation space, do some + # pre-processing etc. See T2TGymEnv._derive_observation_space. + rl_should_derive_observation_space=True, + aunused=0, # unused param for multi-run settings. + ) + + +@registry.register_hparams +def rlmf_tictactoe(): + """Base set of hparams for model-free PPO.""" + hparams = rlmf_original() + hparams.game = "tictactoe" + hparams.rl_env_name = "T2TEnv-TicTacToeEnv-v0" + # Since we don't have any no-op actions, otherwise we have to have an + # attribute called `get_action_meanings`. + hparams.eval_max_num_noops = 0 + hparams.max_num_noops = 0 + hparams.rl_should_derive_observation_space = False + + hparams.policy_network = "feed_forward_categorical_policy" + hparams.base_algo_params = "ppo_ttt_params" + + # Number of last observations to feed to the agent + hparams.frame_stack_size = 1 + return hparams + + +@registry.register_hparams +def rlmf_base(): + """Base set of hparams for model-free PPO.""" + hparams = rlmf_original() + hparams.add_hparam("ppo_epochs_num", 3000) + hparams.add_hparam("ppo_eval_every_epochs", 100) + return hparams + + +@registry.register_ranged_hparams +def rlmf_5runs(rhp): + rhp.set_discrete("aunused", list(range(5))) + + +@registry.register_ranged_hparams +def rlmf_5runs_atari(rhp): + rhp.set_categorical("game", gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE) + rhp.set_discrete("aunused", list(range(5))) + + +@registry.register_hparams +def rlmf_dist(): + """Distributional set of hparams for model-free PPO.""" + hparams = rlmf_original() + hparams.distributional_size = 1024 + hparams.base_algo_params = "ppo_dist_params" + return hparams + + +@registry.register_hparams +def rlmf_dist_threshold(): + """Distributional set of hparams for model-free PPO.""" + hparams = rlmf_dist() + hparams.distributional_threshold = 0.5 + return hparams + + +@registry.register_hparams +def rlmf_tiny(): + """Tiny set of hparams for model-free PPO.""" + hparams = rlmf_original() + hparams = hparams.override_from_dict(rlmf_tiny_overrides()) + hparams.batch_size = 2 + hparams.base_algo_params = "ppo_original_tiny" + hparams.add_hparam("ppo_epochs_num", 3) + hparams.add_hparam("ppo_epoch_length", 2) + return hparams + + +@registry.register_hparams +def rlmf_dqn_tiny(): + """Tiny DQN params.""" + hparams = rlmf_original() + hparams = hparams.override_from_dict(rlmf_tiny_overrides()) + hparams.batch_size = 1 + hparams.base_algo = "dqn" + hparams.base_algo_params = "dqn_original_params" + hparams.add_hparam("dqn_num_frames", 128) + hparams.add_hparam("dqn_save_every_steps", 128) + hparams.add_hparam("dqn_replay_buffer_replay_capacity", 100) + hparams.add_hparam("dqn_agent_min_replay_history", 10) + return hparams + + +@registry.register_hparams +def rlmf_eval(): + """Eval set of hparams for model-free PPO.""" + hparams = rlmf_original() + hparams.batch_size = 16 + hparams.eval_batch_size = 32 + hparams.eval_episodes_num = 2 + hparams.eval_sampling_temps = [0.5, 0.0, 1.0] + hparams.eval_rl_env_max_episode_steps = 40000 + hparams.add_hparam("ppo_epoch_length", 128) + hparams.add_hparam("ppo_optimization_batch_size", 32) + hparams.add_hparam("ppo_epochs_num", 10000) + hparams.add_hparam("ppo_eval_every_epochs", 500) + hparams.add_hparam("attempt", 0) + hparams.add_hparam("moe_loss_coef", 0) + return hparams + + +@registry.register_hparams +def rlmf_eval_dist(): + """Distributional set of hparams for model-free PPO.""" + hparams = rlmf_eval() + hparams.distributional_size = 4096 + hparams.distributional_subscale = 0.08 + hparams.base_algo_params = "ppo_dist_params" + return hparams + + +@registry.register_hparams +def rlmf_eval_dist_threshold(): + """Distributional set of hparams for model-free PPO.""" + hparams = rlmf_eval_dist() + hparams.distributional_threshold = 0.5 + return hparams + + +class PolicyBase(t2t_model.T2TModel): + + def __init__(self, *args, **kwargs): + super(PolicyBase, self).__init__(*args, **kwargs) + self.distributional_value_size = 1 + self.use_epochs = False + + def loss(self, *args, **kwargs): + return 0.0 + + +# TODO(lukaszkaiser): move this class or clean up the whole file. +class DummyPolicyProblem(video_utils.VideoProblem): + """Dummy Problem for running the policy.""" + + def __init__(self, action_space, frame_height, frame_width): + super(DummyPolicyProblem, self).__init__() + self.action_space = action_space + self._frame_height = frame_height + self._frame_width = frame_width + + @property + def frame_height(self): + """Height of each frame.""" + return self._frame_height + + @property + def frame_width(self): + """Width of each frame.""" + return self._frame_width + + @property + def num_actions(self): + return self.action_space.n + + def hparams(self, defaults, unused_model_hparams): + p = defaults + p.modality = { + "inputs": modalities.ModalityType.VIDEO, + "input_action": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, + "input_reward": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, + "targets": modalities.ModalityType.VIDEO, + "target_action": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, + "target_reward": modalities.ModalityType.SYMBOL_WEIGHTS_ALL, + "target_policy": modalities.ModalityType.IDENTITY, + "target_value": modalities.ModalityType.IDENTITY, + } + p.vocab_size = { + "inputs": 256, + "input_action": self.num_actions, + "input_reward": 3, + "targets": 256, + "target_action": self.num_actions, + "target_reward": 3, + "target_policy": None, + "target_value": None, + } + p.input_space_id = problem.SpaceID.IMAGE + p.target_space_id = problem.SpaceID.IMAGE + + +NetworkOutput = collections.namedtuple( + "NetworkOutput", "policy, value, action_postprocessing") + + +# TODO(koz4k): Translate it to T2TModel or remove. +def feed_forward_gaussian_fun(action_space, config, observations): + """Feed-forward Gaussian.""" + if not isinstance(action_space, gym.spaces.box.Box): + raise ValueError("Expecting continuous action space.") + + mean_weights_initializer = tf.initializers.variance_scaling( + scale=config.init_mean_factor) + logstd_initializer = tf.random_normal_initializer(config.init_logstd, 1e-10) + + flat_observations = tf.reshape(observations, [ + tf.shape(observations)[0], tf.shape(observations)[1], + functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)]) + + with tf.variable_scope("network_parameters"): + with tf.variable_scope("policy"): + x = flat_observations + for size in config.policy_layers: + x = tf.layers.dense(x, size, activation=tf.nn.relu) + mean = tf.layers.dense( + x, action_space.shape[0], activation=tf.tanh, + kernel_initializer=mean_weights_initializer) + logstd = tf.get_variable( + "logstd", mean.shape[2:], tf.float32, logstd_initializer) + logstd = tf.tile( + logstd[None, None], + [tf.shape(mean)[0], tf.shape(mean)[1]] + [1] * (mean.shape.ndims - 2)) + with tf.variable_scope("value"): + x = flat_observations + for size in config.value_layers: + x = tf.layers.dense(x, size, activation=tf.nn.relu) + value = tf.layers.dense(x, 1)[..., 0] + mean = tf.check_numerics(mean, "mean") + logstd = tf.check_numerics(logstd, "logstd") + value = tf.check_numerics(value, "value") + + policy = tfp.distributions.MultivariateNormalDiag(mean, tf.exp(logstd)) + + return NetworkOutput(policy, value, lambda a: tf.clip_by_value(a, -2., 2)) + + +def clip_logits(logits, config): + logits_clip = getattr(config, "logits_clip", 0.) + if logits_clip > 0: + min_logit = tf.reduce_min(logits) + return tf.minimum(logits - min_logit, logits_clip) + else: + return logits + + +@registry.register_model +class FeedForwardCategoricalPolicy(PolicyBase): + """Feed-forward categorical.""" + + def body(self, features): + observations = features["inputs_raw"] + observations = tf.cast(observations, tf.float32) + flat_observations = tf.layers.flatten(observations) + with tf.variable_scope("policy"): + x = flat_observations + for size in self.hparams.policy_layers: + x = tf.layers.dense(x, size, activation=tf.nn.relu) + logits = tf.layers.dense(x, self.hparams.problem.num_actions) + logits = tf.expand_dims(logits, axis=1) + with tf.variable_scope("value"): + x = flat_observations + for size in self.hparams.value_layers: + x = tf.layers.dense(x, size, activation=tf.nn.relu) + value = tf.layers.dense(x, 1) + logits = clip_logits(logits, self.hparams) + return {"target_policy": logits, "target_value": value} + + +@registry.register_model +class FeedForwardCnnSmallCategoricalPolicy(PolicyBase): + """Small cnn network with categorical output.""" + + def body(self, features): + observations = features["inputs_raw"] + # Axis 0 - Batch. + # Axis 1 - Input Frames, 4 frames. + # Axis 2, 3 - Height & Width. + # Axis 4 - Channels RGB, 3 colours. + x = tf.transpose(observations, [0, 2, 3, 1, 4]) + x_shape = common_layers.shape_list(x) + x = tf.reshape(x, x_shape[:-2] + [-1]) + dropout = getattr(self.hparams, "dropout_ppo", 0.0) + with tf.variable_scope("feed_forward_cnn_small"): + x = tf.cast(x, tf.float32) / 255.0 + x = tf.layers.conv2d(x, 32, (5, 5), strides=(2, 2), + activation=tf.nn.relu, padding="same") + x = tf.layers.conv2d(x, 32, (5, 5), strides=(2, 2), + activation=tf.nn.relu, padding="same") + + flat_x = tf.layers.flatten(x) + if self.use_epochs: + epoch = features["epoch"] + tf.zeros([x_shape[0]], dtype=tf.int32) + # Randomly set epoch to 0 in some cases as that's the inference value. + rand = tf.random.uniform([x_shape[0]]) + epoch = tf.where(rand < 0.1, tf.zeros_like(epoch), epoch) + # Embed the epoch number. + emb_epoch = common_layers.embedding(epoch, 32, 32) # [batch, 32] + flat_x = tf.concat([flat_x, emb_epoch], axis=1) + flat_x = tf.layers.dropout(flat_x, rate=dropout) + x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu) + + logits = tf.layers.dense( + x, self.hparams.problem.num_actions, name="dense2" + ) + logits = clip_logits(logits, self.hparams) + logits = tf.expand_dims(logits, axis=1) + value = tf.layers.dense(x, self.distributional_value_size) + return {"target_policy": logits, "target_value": value} + + +@registry.register_model +class FeedForwardCnnSmallCategoricalPolicyNew(PolicyBase): + """Small cnn network with categorical output.""" + + def body(self, features): + observations = features["inputs"] + x = tf.transpose(observations, [0, 2, 3, 1, 4]) + x_shape = common_layers.shape_list(x) + x = tf.reshape(x, x_shape[:-2] + [-1]) + dropout = getattr(self.hparams, "dropout_ppo", 0.0) + with tf.variable_scope("feed_forward_cnn_small"): + x = tf.cast(x, tf.float32) / 255.0 + x = tf.nn.dropout(x, rate=dropout) + x = tf.layers.conv2d( + x, 32, (4, 4), strides=(2, 2), name="conv1", + activation=common_layers.belu, padding="SAME") + x = tf.nn.dropout(x, rate=dropout) + x = tf.layers.conv2d( + x, 64, (4, 4), strides=(2, 2), name="conv2", + activation=common_layers.belu, padding="SAME") + x = tf.nn.dropout(x, rate=dropout) + x = tf.layers.conv2d( + x, 128, (4, 4), strides=(2, 2), name="conv3", + activation=common_layers.belu, padding="SAME") + + flat_x = tf.layers.flatten(x) + flat_x = tf.nn.dropout(flat_x, rate=dropout) + x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu, name="dense1") + + logits = tf.layers.dense( + x, self.hparams.problem.num_actions, name="dense2" + ) + logits = tf.expand_dims(logits, axis=1) + logits = clip_logits(logits, self.hparams) + + value = tf.layers.dense(x, 1, name="value") + return {"target_policy": logits, "target_value": value} + + +@registry.register_model +class DenseBitwiseCategoricalPolicy(PolicyBase): + """Dense network with bitwise input and categorical output.""" + + def body(self, features): + observations = features["inputs"] + flat_x = tf.layers.flatten(observations) + with tf.variable_scope("dense_bitwise"): + flat_x = discretization.int_to_bit_embed(flat_x, 8, 32) + + x = tf.layers.dense(flat_x, 256, activation=tf.nn.relu) + x = tf.layers.dense(flat_x, 128, activation=tf.nn.relu) + + logits = tf.layers.dense(x, self.hparams.problem.num_actions) + + value = tf.layers.dense(x, 1)[..., 0] + + return {"target_policy": logits, "target_value": value} + + +@registry.register_model +class RandomPolicy(PolicyBase): + """Random policy with categorical output.""" + + def body(self, features): + observations = features["inputs"] + obs_shape = observations.shape.as_list() + # Just so Saver doesn't complain because of no variables. + tf.get_variable("dummy_var", initializer=0.0) + num_actions = self.hparams.problem.num_actions + logits = tf.constant( + 1. / float(num_actions), + shape=(obs_shape[:1] + [1, num_actions]) + ) + value = tf.zeros(obs_shape[:1] + [1]) + return {"target_policy": logits, "target_value": value} diff --git a/tensor2tensor/models/research/shuffle_network.py b/tensor2tensor/models/research/shuffle_network.py new file mode 100644 index 000000000..3b0117010 --- /dev/null +++ b/tensor2tensor/models/research/shuffle_network.py @@ -0,0 +1,500 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Neural Shuffle-Exchange Network. + +Implementation of +"Neural Shuffle-Exchange Networks - Sequence Processing in O(n log n) Time" +paper by K.Freivalds, E.Ozolins, A.Sostaks. + +Paper: https://papers.nips.cc/paper/ +8889-neural-shuffle-exchange-networks-sequence-processing-in-on-log-n-time.pdf + +Original code: https://github.com/LUMII-Syslab/shuffle-exchange +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +from tensor2tensor.layers import common_hparams +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +def ror(x, n, p=1): + """Bitwise right rotation. + + Args: + x: Input tensor + n: Bit count to represent x + p: Bit positions to shift + + Returns: + tf.Tensor: x shifted by p positions in n bits + """ + + a = tf.bitwise.right_shift(x, p) + b = tf.bitwise.left_shift(1, p) - 1 + c = tf.bitwise.bitwise_and(x, b) + d = tf.bitwise.left_shift(c, n - p) + + return a + d + + +def rol(x, n, p=1): + """Bitwise left rotation. + + Args: + x: Input tensor + n: Bit count to represent x + p: Bit positions to shift + + Returns: + tf.Tensor: x shifted by p positions in n bits + """ + a = tf.bitwise.left_shift(x, p) + b = tf.bitwise.left_shift(1, n) - 1 + c = tf.bitwise.bitwise_and(a, b) + d = tf.bitwise.right_shift(x, n - p) + + return tf.bitwise.bitwise_or(c, d) + + +def shuffle_layer(inputs, shuffle_fn=rol): + """Shuffles the elements according to bitwise left or right rotation. + + Args: + inputs: Tensor input from previous layer + shuffle_fn: Shift function rol or ror + + Returns: + tf.Tensor: Inputs shifted according to shuffle_fn + """ + + length = tf.shape(inputs)[1] + n_bits = tf.log(tf.cast(length - 1, tf.float32)) / tf.log(2.0) + n_bits = tf.cast(n_bits, tf.int32) + 1 + + indices = tf.range(0, length) + rev_indices = shuffle_fn(indices, n_bits) + return tf.gather(inputs, rev_indices, axis=1) + + +def reverse_shuffle_layer(inputs): + """Reverse shuffle of inputs. + + Used in the second half of Benes block. + + Args: + inputs: Inputs that should be shuffled + + Returns: + tf.Tensor: Inputs shuffled according to bitwise right rotation + """ + + return shuffle_layer(inputs, ror) + + +def conv_linear_map(inputs, nin, nout, bias_start, prefix): + """Convolutional liner map. + + Maps 3D tensor by last dimension. + + Args: + inputs: Inputs that should be shuffled + nin: Input feature map count + nout: Output feature map count + bias_start: Bias start value + prefix: Name prefix + + Returns: + tf.Tensor: Inputs with applied convolution + """ + + with tf.variable_scope(prefix): + inp_shape = tf.shape(inputs) + + initializer = tf.variance_scaling_initializer( + scale=1.0, mode="fan_avg", distribution="uniform") + kernel = tf.get_variable("CvK", [nin, nout], initializer=initializer) + bias_term = tf.get_variable( + "CvB", [nout], initializer=tf.constant_initializer(0.0)) + + mul_shape = [inp_shape[0] * inp_shape[1], nin] + res = tf.matmul(tf.reshape(inputs, mul_shape), kernel) + res = tf.reshape(res, [inp_shape[0], inp_shape[1], nout]) + return res + bias_start + bias_term + + +# pylint: disable=useless-object-inheritance +class SwitchLayer(object): + """Switch layer of Neural Shuffle-Exchange network.""" + + def __init__(self, prefix, dropout, mode): + """Initialize switch layer. + + Args: + prefix: Name prefix for switch layer + dropout: Dropout rate + mode: Training mode + """ + + self.prefix = prefix + self.dropout = dropout + self.mode = mode + self.batch_size = None + self.length = None + self.num_units = None + self.n_bits = None + + def linear_map(self, inputs, suffix, bias_start, in_units, out_units): + """2 input to 2 output linear map. + + Args: + inputs: Input tensor + suffix: Linear map name suffix + bias_start: Bias start value + in_units: Size of input tensor feature map count + out_units: Size of output tensor feature map count + Return: + tf.Tensor: Convolution apply to input tensor + """ + in_shape = [self.batch_size, self.length // 2, in_units * 2] + inputs = tf.reshape(inputs, in_shape) + res = conv_linear_map(inputs, in_units * 2, out_units * 2, bias_start, + self.prefix + "/" + suffix) + return tf.reshape(res, [self.batch_size, self.length, out_units]) + + def gated_linear_map(self, inputs, suffix, bias_start_reset, in_units, + out_units): + """Linear mapping with two reset gates. + + Args: + inputs: Input tensor + suffix: Linear map name suffix + bias_start_reset: Bias start value for reset gate + in_units: Size of input tensor feature map count + out_units: Size of output tensor feature map count + Return: + tf.Tensor: Convolution apply to input tensor + """ + + def reset_gate(name): + prefix = self.prefix + name + suffix + reset = conv_linear_map(inputs, in_units * 2, in_units * 2, + bias_start_reset, prefix) + return tf.nn.sigmoid(reset) + + in_shape = [self.batch_size, self.length // 2, in_units * 2] + inputs = tf.reshape(inputs, in_shape) + + reset1 = reset_gate("/reset1/") + reset2 = reset_gate("/reset2/") + res1 = conv_linear_map(inputs * reset1, in_units * 2, out_units, 0.0, + self.prefix + "/cand1/" + suffix) + res2 = conv_linear_map(inputs * reset2, in_units * 2, out_units, 0.0, + self.prefix + "/cand2/" + suffix) + + res = tf.concat([res1, res2], axis=2) + res = tf.reshape(res, [self.batch_size, self.length, out_units]) + return tf.nn.tanh(res) + + def __call__(self, inputs, residual_inputs): + """Apply SwitchLayer to inputs. + + Args: + inputs: Input tensor + residual_inputs: Residual connections from previous block + + Returns: + tf.Tensor: New candidate value + """ + input_shape = tf.shape(inputs) + self.batch_size = input_shape[0] + self.length = input_shape[1] + self.num_units = inputs.shape.as_list()[2] + + self.n_bits = tf.log(tf.cast(self.length - 1, tf.float32)) / tf.log(2.0) + self.n_bits = tf.floor(self.n_bits) + 1 + + initializer = tf.constant_initializer(0.5) + residual_scale = tf.get_variable( + self.prefix + "/residual_scale", [self.num_units], + initializer=initializer) + + shuffled_input = self.swap_halves(inputs) + mem_all = inputs + residual_inputs * residual_scale + + # calculate the new value + candidate = self.gated_linear_map(mem_all, "c", 0.5, self.num_units, + self.num_units) + gate = tf.nn.sigmoid( + self.linear_map(mem_all, "g", 0.5, self.num_units, self.num_units)) + + candidate = gate * shuffled_input + (1 - gate) * candidate + + if self.dropout > 0: + candidate = tf.nn.dropout(candidate, rate=self.dropout / self.n_bits) + if self.dropout != 0.0 and self.mode == tf_estimator.ModeKeys.TRAIN: + noise = tf.random_normal(tf.shape(candidate), mean=1.0, stddev=0.001) + candidate = candidate * noise + + return candidate + + def swap_halves(self, inputs): + """Split inputs in half and then shuffle them as described in paper. + + Args: + inputs: ShuffleLayer inputs + Return: + tf.Tensor: Inputs with swapped halves + """ + x = tf.range(0, self.length) + xor_indices = tf.bitwise.bitwise_xor(x, 1) + input_xor = tf.gather( + inputs[:, :, :self.num_units // 2], xor_indices, axis=1) + return tf.concat([input_xor, inputs[:, :, self.num_units // 2:]], axis=2) + + +def shuffle_network(inputs, hparams): + """Neural Shuffle-Network with skip connections between blocks. + + Args: + inputs: inputs to the Shuffle-Exchange network. Should be in length of power + of 2. + hparams: Model configuration + + Returns: + tf.Tensor: Outputs of the Shuffle-Exchange last layer + """ + + def forward_step(state, layer_nr): + with tf.variable_scope("forward"): + last_state, residuals = state + prev = residuals[layer_nr, :, :, :] + switch = SwitchLayer("switch", hparams.dropout, hparams.mode) + cur = switch(last_state, prev) + return shuffle_layer(cur), residuals + + def reverse_step(state, layer_nr): + with tf.variable_scope("reverse"): + last_state, residuals = state + prev = residuals[layer_nr, :, :, :] + switch = SwitchLayer("reverse_switch", hparams.dropout, hparams.mode) + cur = switch(last_state, prev) + return reverse_shuffle_layer(cur), residuals + + input_shape = tf.shape(inputs) + n_bits = tf.log(tf.cast(input_shape[1] - 1, tf.float32)) / tf.log(2.0) + n_bits = tf.cast(n_bits, tf.int32) + 1 + + queue_shape = [n_bits * 2, input_shape[0], input_shape[1], input_shape[2]] + residuals_queue = tf.zeros(queue_shape) + block_out = tf.tanh(inputs) + + for k in range(hparams.num_hidden_layers): + with tf.variable_scope("benes_block_" + str(k), reuse=tf.AUTO_REUSE): + forward_outputs, _ = tf.scan( + forward_step, + tf.range(0, n_bits), + initializer=(block_out, residuals_queue), + parallel_iterations=1, + swap_memory=True) + + forward_tensors = [tf.expand_dims(block_out, axis=0), forward_outputs] + forward_outputs = tf.concat(forward_tensors, axis=0) + forward_last = forward_outputs[-1, :, :, :] + + reverse_outputs, _ = tf.scan( + reverse_step, + tf.range(n_bits, n_bits * 2), + initializer=(forward_last, residuals_queue), + parallel_iterations=1, + swap_memory=True) + + block_out = reverse_outputs[-1, :, :, :] + residuals_queue = tf.concat([forward_outputs, reverse_outputs], axis=0) + + last_layer = SwitchLayer("last_layer", hparams.dropout, hparams.mode) + return last_layer(block_out, residuals_queue[n_bits * 2, :, :, :]) + + +@registry.register_model +class ShuffleNetwork(t2t_model.T2TModel): + """Seq2Seq model for sequence processing in O(n log n) time.""" + + def bottom(self, features): + """We add padding to the input and output so they are the same. + + Length of input and output should be power of 2. + + Args: + features: Dictionary of inputs and targets + + Returns: + dictionary: Inputs and targets padded with 0 to the length of power of 2. + Both are same length. + """ + pad_len = self.max_pad_length(features) + features["inputs"] = self.pad(features["inputs"], pad_len) + + if features.get("targets") is not None: + features["targets"] = self.pad(features["targets"], pad_len) + + return super(ShuffleNetwork, self).bottom(features) + + @staticmethod + def pad(tensor, pad_len): + """Pad tensor on first dimension to pad_len. + + Args: + tensor: input tensor of shape length >= 2 + pad_len: pad length + + Returns: + tf.Tensor: Padded input tensor. + """ + + assert len(tensor.shape) >= 2 # tensor of shape [batch, length, ...] + length = tf.shape(tensor)[1] + + padding = [[0, 0], [0, pad_len - length]] + padding += [[0, 0]] * (len(tensor.shape) - 2) + return tf.pad(tensor, padding) + + def max_pad_length(self, features): + """Finds max padding length. + + If target length not specified use fixed padding + length from hparams.max_length. + + Args: + features: Dictionary with input and target tensors + + Returns: + tf.Tensor: Length of input and output sequence. Length is power of 2. + """ + + if self.hparams.force_max_length or features.get("targets") is None: + assert math.log(self.hparams.max_length, 2).is_integer(), \ + "hparams.max_length should be power of w" + + return self.hparams.max_length + + length = tf.shape(features["inputs"])[1] + targets_length = tf.shape(features["targets"])[1] + length = tf.maximum(length, targets_length) + + p = tf.log(tf.cast(length, tf.float32)) / tf.log(2.0) + p = tf.cast(tf.ceil(p), tf.int32) + return tf.pow(2, p) + + def infer(self, features=None, **kwargs): + """Custom infer method for Shuffle-Exchange network. + + Args: + features: Dictionary of inputs and targets + **kwargs: SE network currently doesn't support auto-regressive output + + Returns: + dict: Dictionary of outputs. + """ + + del kwargs + targets = features.get("targets") + infer_targets = features.get("infer_targets") + + if targets is None and infer_targets is not None: + features["targets"] = infer_targets + + # Run the model + self.hparams.force_full_predict = True + with tf.variable_scope(self.name): + logits, _ = self.model_fn(features) + + assert len(logits.shape) == 5 # [batch, time, 1, 1, vocab] + logits = tf.squeeze(logits, [2, 3]) + outputs = tf.argmax(logits, axis=2) + + return {"outputs": outputs, "logits": logits, "scores": None} + + def loss(self, logits, features): + """Loss function for Neural Shuffle-Exchange network. + + We use custom loss function as default loss function doesn't + use padding for calculating loss. We assume that output string is same + length as the input. If you need other type of output please feel + free to modify this. + + Args: + logits: Logits from model + features: Features, not in one-hot format + + Returns: + tf.Tensor: Loss value + """ + + onehot_labels = tf.one_hot(features["targets"], + self._problem_hparams.vocab_size["targets"]) + cost_vector = tf.nn.softmax_cross_entropy_with_logits_v2( + logits=logits, labels=onehot_labels) + return tf.reduce_mean(cost_vector) + + def body(self, features): + """Body of Neural Shuffle-Exchange network. + + Args: + features: dictionary of inputs and targets + """ + + inputs = tf.squeeze(features["inputs"], axis=2) + logits = shuffle_network(inputs, self._hparams) + return tf.expand_dims(logits, axis=2) + + +@registry.register_hparams +def shuffle_network_baseline(): + """Large Shuffle-Exchange configuration. + + Returns: + dict: Neural Shuffle-Exchange configuration + """ + + hparams = common_hparams.basic_params1() + hparams.hidden_size = 48 * 8 # feature maps + hparams.num_hidden_layers = 2 # block count + + hparams.clip_grad_norm = 0. # no gradient clipping + + hparams.optimizer = "adam" + hparams.optimizer_adam_epsilon = 1e-5 + hparams.learning_rate_schedule = "legacy" + hparams.learning_rate_decay_scheme = "noam" + hparams.learning_rate = 0.1 + hparams.initializer_gain = 1.0 + hparams.initializer = "uniform_unit_scaling" + hparams.optimizer_adam_beta1 = 0.9 + hparams.optimizer_adam_beta2 = 0.999 + hparams.add_hparam("force_max_length", False) # use fixed max length + hparams.max_length = 256 # use when targets are not known + + hparams.dropout = 0.1 + hparams.label_smoothing = 0. + hparams.weight_decay = 0. + + return hparams diff --git a/tensor2tensor/models/research/similarity_transformer.py b/tensor2tensor/models/research/similarity_transformer.py new file mode 100644 index 000000000..8d596a808 --- /dev/null +++ b/tensor2tensor/models/research/similarity_transformer.py @@ -0,0 +1,119 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Using Transformer Networks for String similarities.""" +from tensor2tensor.data_generators import problem +from tensor2tensor.layers import common_layers +from tensor2tensor.models import transformer +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +@registry.register_model +class SimilarityTransformer(t2t_model.T2TModel): + """Transformer Model for Similarity between two strings. + + This model defines the architecture using two transformer + networks, each of which embed a string and the loss is + calculated as a Binary Cross-Entropy loss. Normalized + Dot Product is used as the distance measure between two + string embeddings. + """ + + def top(self, body_output, _): + return body_output + + def body(self, features): + if self.hparams.mode != tf_estimator.ModeKeys.PREDICT: + # In training mode we need to embed both the queries and the code + # using the inputs and targets respectively. + with tf.variable_scope('string_embedding'): + string_embedding = self.encode(features, 'inputs') + + with tf.variable_scope('code_embedding'): + code_embedding = self.encode(features, 'targets') + + string_embedding_norm = tf.nn.l2_normalize(string_embedding, axis=1) + code_embedding_norm = tf.nn.l2_normalize(code_embedding, axis=1) + + # All-vs-All cosine distance matrix, reshaped as row-major. + cosine_dist = 1.0 - tf.matmul(string_embedding_norm, code_embedding_norm, + transpose_b=True) + cosine_dist_flat = tf.reshape(cosine_dist, [-1, 1]) + + # Positive samples on the diagonal, reshaped as row-major. + label_matrix = tf.eye(tf.shape(cosine_dist)[0], dtype=tf.int32) + label_matrix_flat = tf.reshape(label_matrix, [-1]) + + logits = tf.concat([1.0 - cosine_dist_flat, cosine_dist_flat], axis=1) + labels = tf.one_hot(label_matrix_flat, 2) + + loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, + logits=logits) + return string_embedding_norm, {'training': loss} + + # In predict mode we conditionally embed either the string query + # or the code based on the embed_code feature. In both cases the + # input will be in the inputs feature but the variable scope will + # be different + # Define predicates to be used with tf.cond + def embed_string(): + with tf.variable_scope('string_embedding'): + string_embedding = self.encode(features, 'inputs') + return string_embedding + + def embed_code(): + with tf.variable_scope('code_embedding'): + code_embedding = self.encode(features, 'inputs') + return code_embedding + + embed_code_feature = features.get('embed_code') + + # embed_code_feature will be a tensor because inputs will be a batch + # of inputs. We need to reduce that down to a single value for use + # with tf.cond; so we simply take the max of all the elements. + # This implicitly assume all inputs have the same value. + is_embed_code = tf.reduce_max(embed_code_feature) + result = tf.cond(is_embed_code > 0, embed_code, embed_string) + + result = tf.nn.l2_normalize(result) + return result + + def encode(self, features, input_key): + hparams = self._hparams + inputs = common_layers.flatten4d3d(features[input_key]) + + (encoder_input, encoder_self_attention_bias, _) = ( + transformer.transformer_prepare_encoder(inputs, problem.SpaceID.EN_TOK, + hparams)) + + encoder_input = tf.nn.dropout(encoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + encoder_output = transformer.transformer_encoder( + encoder_input, + encoder_self_attention_bias, + hparams, + nonpadding=transformer.features_to_nonpadding(features, input_key)) + + encoder_output = tf.reduce_mean(encoder_output, axis=1) + + return encoder_output + + def infer(self, features=None, **kwargs): + del kwargs + predictions, _ = self(features) + return predictions diff --git a/tensor2tensor/models/research/super_lm.py b/tensor2tensor/models/research/super_lm.py new file mode 100644 index 000000000..ea45c67b7 --- /dev/null +++ b/tensor2tensor/models/research/super_lm.py @@ -0,0 +1,410 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Supercomputer-based language model. + +Uses model-parallelism. + +Each shard (device) has a similar structure with different weights. +Occasional cross-replica-sum across shards. + +Example problem: languagemodel_lm1b8k_packed + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from six.moves import range # pylint: disable=redefined-builtin + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import modalities +from tensor2tensor.utils import diet +from tensor2tensor.utils import expert_utils +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +ModeKeys = tf_estimator.ModeKeys # pylint: disable=invalid-name + + +@registry.register_model +class SuperLM(t2t_model.T2TModel): + """Attention net. See file docstring.""" + + def body(self, features): + # Remove dropout if not training + hparams = self._hparams + ps_devices = self._ps_devices + assert hparams.num_model_shards % len(ps_devices) == 0 + shards_per_device = hparams.num_model_shards // len(ps_devices) + model_devices = [ps_devices[i // shards_per_device] + for i in range(hparams.num_model_shards)] + print("model_devices = %s" % model_devices) + mp = expert_utils.Parallelism(model_devices, reuse=False) + vocab_size = self._problem_hparams.vocabulary["targets"].vocab_size + # squeeze out channels, heights + targets = features["targets_raw"] + targets = tf.squeeze(targets, 3) + targets = tf.squeeze(targets, 2) + shifted_targets = common_layers.shift_right_2d(targets) + # Bypass the symbol modality and use a different embedding on each shard. + decoder_input = mp( + common_layers.embedding, shifted_targets, vocab_size, + hparams.hidden_size, + multiplier=hparams.hidden_size**0.5, + symbol_dropout_rate=hparams.symbol_dropout) + decoder_self_attention_bias = mp( + common_attention.attention_bias_lower_triangle, + tf.shape(targets)[1]) + if "targets_segmentation" in features: + # "Packed" dataset - keep the examples from seeing each other. + targets_segmentation = features["targets_segmentation"] + targets_position = features["targets_position"] + decoder_self_attention_bias = mp( + tf.add, decoder_self_attention_bias, + mp(common_attention.attention_bias_same_segment, + targets_segmentation, targets_segmentation)) + else: + targets_position = None + + if hparams.pos == "timing": + if targets_position is None: + decoder_input = mp(common_attention.add_timing_signal_1d, decoder_input) + else: + decoder_input = mp( + common_attention.add_timing_signal_1d_given_position, + decoder_input, targets_position) + + decoder_input = mp( + tf.nn.dropout, decoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + decoder_output, extra_loss = _super_stack( + decoder_input, decoder_self_attention_bias, hparams, mp) + # Bypass the symbol modality and compute logits directly. + # We compute a different set of logits on each shard, and sum them. + logits = mp(tf.layers.dense, decoder_output, vocab_size, name="logits") + logits = expert_utils.all_reduce_ring(logits, mp) + logits = mp(tf.multiply, logits, mp.n ** -0.5) + # We now have identical logits on all shards. + # Shard 0 gets returned to the estimator. + logits_shard_0 = logits[0] + logits_shard_0 = tf.expand_dims(logits_shard_0, 2) + logits_shard_0 = tf.expand_dims(logits_shard_0, 3) + # On each device, we compute the loss for a part of the batch. + # This is faster than computing the whole loss on one shard. + mp, logits = expert_utils.reduce_by_device(mp, logits, lambda l: l[0]) + def _loss_for_shard(logits, targets, shard): + if mp.n > 1: + logits = common_layers.approximate_split(logits, mp.n, 0)[shard] + targets = common_layers.approximate_split(targets, mp.n, 0)[shard] + return common_layers.padded_cross_entropy( + logits, targets, hparams.label_smoothing) + num, denom = mp(_loss_for_shard, logits, targets, range(mp.n)) + # override training loss so that it is not computed externally. + losses = {"training": tf.add_n(num) / tf.add_n(denom)} + if extra_loss is not None: + losses["extra"] = extra_loss + return logits_shard_0, losses + + +def _super_stack(inputs, + attention_bias, + hparams, + mp, + padding="LEFT"): + """A stack of super_lm layers. + + Args: + inputs: a list of Tensors + attention_bias: list of bias Tensor for self-attention + (see common_attention.attention_bias()) + hparams: hyperparameters for model + mp: a Parallelism object + padding: a string + + Returns: + y: a list of Tensors + extra_loss: an optional scalar + """ + layers = hparams.layers.strip(",").split(",") + moe_hidden_sizes = [int(s) for s in hparams.moe_hidden_sizes.split(",")] + if hparams.diet_experts: + hsize, = moe_hidden_sizes + def _diet_expert(x): + return diet.diet_expert(x, hsize, diet.diet_adam_optimizer_params()) + expert_fn = _diet_expert + else: + expert_fn = expert_utils.ffn_expert_fn( + hparams.hidden_size, moe_hidden_sizes, hparams.hidden_size) + # scaled_dot_product_attention_with_projections uses a 3d attention bias + # (no heads), where multihead_attention uses 4d attention bias. + attention_bias_3d = mp(tf.squeeze, attention_bias, 1) + mix_size = int(hparams.mix_fraction * hparams.hidden_size) + accumulator = inputs + x = inputs + extra_losses = [] + for layer_num, layer_type in enumerate(layers): + with tf.variable_scope("%s_%d" % (layer_type, layer_num)): + tf.logging.info("%s_%d" % (layer_type, layer_num)) + if layer_type == "a": + # accumulate + accumulator = mp(tf.add, x, accumulator) + x = accumulator + elif layer_type == "n": + # normalize + x = mp(common_layers.apply_norm, + x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon) + elif layer_type == "d": + # dropout + x = mp(tf.nn.dropout, x, 1.0 - hparams.layer_prepostprocess_dropout) + elif layer_type == "m": + # mix across shards + def _split(t): + return tuple(tf.split( + t, [mix_size, hparams.hidden_size - mix_size], 2)) + to_mix, to_keep = mp(_split, x) + mixed = expert_utils.all_reduce_ring(to_mix, mp) + mixed = mp(tf.multiply, mixed, mp.n ** -0.5) + x = mp(lambda a, b: tf.concat([a, b], 2), mixed, to_keep) + elif layer_type == "att": + # single-head attention + q = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False, + name="q_transform") + x = mp( + common_attention.scaled_dot_product_attention_simple, + q, x, x, attention_bias_3d) + x = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False, + name="o_transform") + elif layer_type == "multihead-att": + # multi-head attention + x = mp( + common_attention.multihead_attention, + x, + None, + attention_bias, # bias + hparams.multihead_attention_key_channels or hparams.hidden_size, + hparams.multihead_attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.multihead_attention_num_heads, + hparams.attention_dropout) + elif layer_type == "ffn": + x = mp( + common_layers.dense_relu_dense, x, + hparams.filter_size, hparams.hidden_size) + elif layer_type == "conv": + # convolution + x = mp( + common_layers.conv1d, + x, + hparams.hidden_size, + hparams.kernel_height, + activation=tf.nn.relu, + padding=padding, + ) + elif layer_type == "moe": + # mixture of experts - each model shard has its own local MoE. + x, loss = mp( + expert_utils.local_moe, + x, + train=hparams.mode == tf_estimator.ModeKeys.TRAIN, + expert_fn=expert_fn, + num_experts=hparams.moe_num_experts, + k=hparams.moe_k, + loss_coef=hparams.moe_loss_coef) + extra_losses.extend(loss) + else: + assert False, "unknown sublayer %s" % layer_type + if extra_losses: + extra_loss = tf.add_n(extra_losses) + else: + extra_loss = None + return x, extra_loss + + +@registry.register_hparams +def super_lm_base(): + """Set of hyperparameters.""" + hparams = common_hparams.basic_params1() + hparams.hidden_size = 512 + hparams.moe_hidden_sizes = "512" + hparams.batch_size = 16384 + hparams.max_length = 0 + # All hyperparameters ending in "dropout" are automatically set to 0.0 + # when not in training mode. + hparams.layer_prepostprocess_dropout = 0.0 + hparams.symbol_dropout = 0.1 + hparams.add_hparam("attention_dropout", 0.0) + hparams.label_smoothing = 0.0 + hparams.clip_grad_norm = 0. # i.e. no gradient clipping + hparams.optimizer = "Adafactor" + hparams.learning_rate_decay_scheme = "noam" + hparams.learning_rate = 0.1 + hparams.learning_rate_warmup_steps = 8000 + hparams.initializer_gain = 1.0 + hparams.initializer = "uniform_unit_scaling" + hparams.weight_decay = 0.0 + hparams.shared_embedding_and_softmax_weights = False + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + # we only want one data shard. + hparams.no_data_parallelism = True + # bypass the symbol modality so that we can use model parallelism. + hparams.bottom = { + "inputs": modalities.identity_bottom, + "targets": modalities.identity_bottom, + } + hparams.top = { + "targets": modalities.identity_top, + } + hparams.add_hparam("filter_size", 512) + hparams.add_hparam("mix_fraction", 0.5) + # attention-related flags + hparams.add_hparam("multihead_attention_num_heads", 4) + hparams.add_hparam("multihead_attention_key_channels", 0) + hparams.add_hparam("multihead_attention_value_channels", 0) + hparams.add_hparam("pos", "timing") # timing, none + hparams.add_hparam( + "layers", ("n,att,m,d,a," "n,ffn,m,d,a,") * 4 + "n,ffn,d") + # Number of model shards - each one has separate parameters. + # Changing this number invalidates checkpoints. + hparams.add_hparam("num_model_shards", 8) + hparams.add_hparam("diet_experts", False) + return hparams + + +@registry.register_hparams +def super_lm_conv(): + """Add some convolutions.""" + hparams = super_lm_base() + hparams.layers = ( + ("n,conv,m,d,a," "n,att,m,d,a," "n,ffn,m,d,a,") * 4 + "n,ffn,d") + return hparams + + +@registry.register_hparams +def super_lm_big(): + """Big model.""" + hparams = super_lm_base() + hparams.hidden_size = 1024 + hparams.filter_size = 2048 + return hparams + + +@registry.register_hparams +def super_lm_low_mix(): + """Less mixuing.""" + hparams = super_lm_base() + hparams.mix_fraction = 0.125 + return hparams + + +@registry.register_hparams +def super_lm_high_mix(): + """More mixing.""" + hparams = super_lm_base() + hparams.mix_fraction = 0.875 + return hparams + + +@registry.register_hparams +def super_lm_b8k(): + """Smaller batch.""" + hparams = super_lm_base() + hparams.batch_size = 8192 + return hparams + + +@registry.register_hparams +def super_lm_moe(): + """Add mixture of experts with ~1B params.""" + hparams = super_lm_base() + hparams.layers = ( + ("n,att,m,d,a," "n,moe,m,d,a,") * 4 + "n,ffn,d") + hparams.moe_num_experts = 32 + hparams.moe_hidden_sizes = "1024" + return hparams + + +@registry.register_hparams +def super_lm_moe_h4(): + """Add mixture of experts.""" + hparams = super_lm_moe() + hparams.layers = ( + ("n,multihead-att,m,d,a," "n,moe,m,d,a,") * 4 + "n,ffn,d") + return hparams + + +@registry.register_hparams +def super_lm_moe_4b_diet(): + """Add mixture of experts with ~4B params and diet variables. + + Currently, hangs. See this issue: + https://github.com/tensorflow/tensorflow/issues/13351 + + Returns: + a hparams. + """ + hparams = super_lm_moe() + hparams.moe_num_experts = 128 + hparams.diet_experts = True + return hparams + + +@registry.register_hparams +def super_lm_tpu(): + """Hyperparameters for data-parallel training on TPU. + + This is not the intended usage - we would really like to use model-parallelism + with the model shards mapping to cores and cross_replica_sum used for + communication. Currently, we replicate the entire model on each core. + + Returns: + An hparams object. + """ + hparams = super_lm_base() + hparams.batch_size = 4096 + return hparams + + +@registry.register_hparams +def super_lm_big_tpu(): + hparams = super_lm_big() + hparams.batch_size = 1024 + return hparams + + +@registry.register_hparams +def super_lm_tpu_memtest(): + """Crazy set of hyperparameters to test memory optimizations. + + Quality will be very poor due to lack of attention layers. + 853M parameters + This seems to run on TPU for languagemodel_lm1b8k_packed as of 2018-01-19. + + Returns: + An hparams object. + """ + hparams = super_lm_base() + hparams.num_model_shards = 1 + hparams.layers = "ffn," * 8 + hparams.hidden_size = 4096 + hparams.filter_size = 12000 + hparams.batch_size = 512 + return hparams diff --git a/tensor2tensor/models/research/transformer_aux.py b/tensor2tensor/models/research/transformer_aux.py new file mode 100644 index 000000000..6bb0f48e7 --- /dev/null +++ b/tensor2tensor/models/research/transformer_aux.py @@ -0,0 +1,175 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Transformer with auxiliary losses from https://arxiv.org/abs/1803.00144.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_layers +from tensor2tensor.models import transformer +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +def shift_and_pad(tensor, shift, axis=0): + """Shifts and pads with zero along an axis. + + Example: + shift_and_pad([1, 2, 3, 4], 2) --> [0, 0, 1, 2] + shift_and_pad([1, 2, 3, 4], -2) --> [3, 4, 0, 0] + + Args: + tensor: Tensor; to be shifted and padded. + shift: int; number of positions to shift by. + axis: int; along which axis to shift and pad. + + Returns: + A Tensor with the same shape as the input tensor. + """ + shape = tensor.shape + rank = len(shape) + assert 0 <= abs(axis) < rank + + length = int(shape[axis]) + assert 0 <= abs(shift) < length + + paddings = [(0, 0)] * rank + begin = [0] * rank + size = [-1] * rank + + if shift > 0: + paddings[axis] = (shift, 0) + size[axis] = length - shift + elif shift < 0: + paddings[axis] = (0, -shift) + begin[axis] = -shift + + ret = tf.pad(tf.slice(tensor, begin, size), paddings) + + return ret + + +@registry.register_model +class TransformerAux(transformer.Transformer): + """Attention net. See file docstring.""" + + def _extract_shift_values(self): + """Parses the shift string. + + The hparams should contain the key shift_values, which maps to a + comma-separated string of integers. These integers specify the number of + timesteps to predict/reconstruct to compute auxiliary losses. + + For instance, "-4,2,6" means to reconstruct the target 4 steps before and + predict the targets 2 steps and 6 steps ahead. + + Returns: + List of int != 0 shift values to compute the auxiliary losses. + """ + shift_values_str = self._hparams.get("shift_values", "") + shift_values = [int(x) for x in shift_values_str.split(",")] + + tf.logging.info( + "Computing auxiliary losses for the following shifts: %s", + shift_values) + + return shift_values + + def auxiliary_loss(self, body_output, features, shift): + """Auxiliary predict loss. + + Args: + body_output: Tensor with shape [batch_size, decoder_length, hidden_dim]. + features: Map of features to the model. Must contain the following: + "targets": Target decoder outputs. + [batch_size, decoder_length, 1, hidden_dim] + shift: int != 0, amount to shift/pad the target sequence. + If shift > 0, it represents the number of previous timesteps to + reconstruct; if shift < 0, it represents the number of future timesteps + to predict. + + Returns: + A 2-tuple of the numerator and denominator of the cross-entropy loss. + + Raises: + ValueError: if features does not contain a targets_raw tensor. + """ + assert isinstance(shift, int) and shift != 0 + name = "reconst_%d" % shift if shift > 0 else "predict_%d" % abs(shift) + + if features and "targets_raw" in features: + targets = features["targets_raw"] + targets = common_layers.flatten4d3d(targets) + else: + raise ValueError( + "Feature map must contain a targets_raw tensor.") + + with tf.variable_scope(name): + logits = self.top(body_output, features) + labels = shift_and_pad(targets, shift, axis=1) + return common_layers.padded_cross_entropy( + logits, + labels, + self._hparams.label_smoothing) + + def body(self, features): + """Transformer main model_fn. + + Args: + features: Map of features to the model. Should contain the following: + "inputs": Transformer inputs. + [batch_size, input_length, 1, hidden_dim]. + "targets": Target decoder outputs. + [batch_size, target_length, 1, hidden_dim] + "target_space_id": A scalar int from data_generators.problem.SpaceID. + + Returns: + A 2-tuple containing: + Logit tensor. [batch_size, decoder_length, vocab_size] + Map of keys to loss tensors. Should contain the following: + "training": Training loss (shift == 0). + "auxiliary": Auxiliary loss (shift != 0). + """ + output = super(TransformerAux, self).body(features) + output, losses = self._normalize_body_output(output) + + aux = 0.0 + for shift in self._extract_shift_values(): + loss_num, loss_den = self.auxiliary_loss(output, features, shift) + aux += loss_num / loss_den + losses["auxiliary"] = aux + + return output, losses + + +@registry.register_hparams +def transformer_aux_base(): + """Set of hyperparameters.""" + hparams = transformer.transformer_base() + hparams.shared_embedding_and_softmax_weights = False + hparams.add_hparam("shift_values", "1,2,3,4") + return hparams + + +@registry.register_hparams +def transformer_aux_tiny(): + """Set of hyperparameters.""" + hparams = transformer.transformer_tiny() + hparams.shared_embedding_and_softmax_weights = False + hparams.add_hparam("shift_values", "1,2") + return hparams diff --git a/tensor2tensor/models/research/transformer_aux_test.py b/tensor2tensor/models/research/transformer_aux_test.py new file mode 100644 index 000000000..0d4ae332a --- /dev/null +++ b/tensor2tensor/models/research/transformer_aux_test.py @@ -0,0 +1,119 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.models.research.transformer_aux.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized + +import numpy as np +from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.models.research import transformer_aux +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class TransformerAuxTest(parameterized.TestCase, tf.test.TestCase): + + @parameterized.parameters( + dict( + tensor=np.array( + [1, 2, 3, 4] + ), + shift=0, + axis=0, + target=np.array( + [1, 2, 3, 4] + ), + ), + dict( + tensor=np.array( + [1, 2, 3, 4] + ), + shift=2, + axis=0, + target=np.array( + [0, 0, 1, 2] + ), + ), + dict( + tensor=np.array( + [1, 2, 3, 4] + ), + shift=-2, + axis=0, + target=np.array( + [3, 4, 0, 0] + ), + ), + dict( + tensor=np.array( + [[1, 2, 3, 4], + [5, 6, 7, 8]] + ), + shift=2, + axis=1, + target=np.array( + [[0, 0, 1, 2], + [0, 0, 5, 6]] + ), + ), + ) + def test_shift_and_pad(self, tensor, shift, axis, target): + with self.test_session() as session: + output = transformer_aux.shift_and_pad(tensor, shift, axis) + output_val = session.run(output) + self.assertAllEqual(output_val, target) + + def test_transformer_aux_body(self): + batch_size = 3 + input_length = 5 + target_length = 16 + vocab_size = 9 + hparams = transformer_aux.transformer_aux_tiny() + hparams.shift_values = "-5,1,2,3" + p_hparams = problem_hparams.test_problem_hparams(vocab_size, + vocab_size, + hparams) + hparams.problem_hparams = p_hparams + inputs = np.random.randint( + vocab_size, size=(batch_size, input_length, 1, 1)) + targets = np.random.randint( + vocab_size, size=(batch_size, target_length, 1, 1)) + features = { + "inputs": tf.constant(inputs, dtype=tf.int32), + "targets": tf.constant(targets, dtype=tf.int32), + "target_space_id": tf.constant(1, dtype=tf.int32), + } + tf.train.create_global_step() + model = transformer_aux.TransformerAux(hparams, tf_estimator.ModeKeys.TRAIN, + p_hparams) + logits, losses = model(features) + + self.assertIn("training", losses) + self.assertIn("auxiliary", losses) + + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + logits_val = session.run(logits) + self.assertEqual(logits_val.shape, + (batch_size, target_length, 1, 1, vocab_size)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/research/transformer_moe.py b/tensor2tensor/models/research/transformer_moe.py new file mode 100644 index 000000000..9e3eadcea --- /dev/null +++ b/tensor2tensor/models/research/transformer_moe.py @@ -0,0 +1,419 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""transformer (attention seq-seq model) with mixtures of experts. + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.models import transformer +from tensor2tensor.utils import expert_utils +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +# The transformer architecture can be defined using the layer_types hparams. +# If not defined, the default types and num_hidden_layers are used as fallback +# values. +# +# Examples of usage: +# "a/a/a/a/a/a": Original base transformer (6 encoder and decoder layers of +# multihead full attention) +# "a/a/a-moe/a": 4 layers with 1 moe at layer 3 +# "loc/red/loc/red": Alternate between local and memory compressed attention +# "a/a/a#": Encoder only model (3 layers) +# "#a/a/a": Decoder only model (3 layers) +# "a/a-moe#a/a/a": Encoder (2 layers with 1 moe), decoder (3 layers) +# Note that all combinations are not necessarily possibles (some attention +# types are not necessarily compatible with the encoder, or can't accept certain +# types of masking) + +SEP_ENCODEC = "#" +SEP_LAYER = "/" +SEP_FF = "-" + + +@registry.register_model +class TransformerMoe(t2t_model.T2TModel): + """Attention net. See file docstring.""" + + @staticmethod + def use_body_sharded(): + return True + + def body_sharded(self, sharded_features): + # ========= Prepare the input and target ========= + + hparams = self._hparams + dp = self._data_parallelism + + # Process input + inputs = sharded_features["inputs"] + target_space = sharded_features["target_space_id"] + ( + encoder_input, + encoder_self_attention_bias, + encoder_decoder_attention_bias, + ) = dp(self._prepare_encoder, inputs, target_space) + + # Process output + targets = sharded_features["targets"] + decoder_input, decoder_self_attention_bias = dp( + self._prepare_decoder, targets + ) + + def dp_preprocess(x): + return dp(common_layers.layer_preprocess, x, hparams) + + def dp_postprocess(x, y): + return dp(common_layers.layer_postprocess, x, y, hparams) + + cache = dict(extra_loss=0.0) + + def prepostprocess(fct): + """Apply processing and capture the extra loss.""" + @expert_utils.add_var_scope() + def decorated(x, *args, **kwargs): + x_preprocessed = dp_preprocess(x) + y, loss = fct(x_preprocessed, *args, **kwargs) + cache["extra_loss"] += loss + return dp_postprocess(x, y) + return decorated + + # ========= Compute the transformer architecture ========= + + encoder_layers, decoder_layers = self._extract_layer_types() + + layers = common_attention.get_standardized_layers( + hparams=hparams, + dp=dp, + ) + + if hparams.mode == tf_estimator.ModeKeys.TRAIN: + + # Display the encoder-decoder architecture + def print_layer(name, layers): + tf.logging.info("{} architecture:".format(name)) + for i, l in enumerate(layers): + tf.logging.info(" * Layer {}: {}".format(i, " - ".join(l))) + print_layer("Encoder", encoder_layers) + print_layer("Decoder", decoder_layers) + + # ========= Construct the transformer encoder and decoder ========= + + encoder_outputs = [] + + x = encoder_input + with tf.variable_scope("encoder"): + for layer_num, block_types in enumerate(encoder_layers): + # Each encoder layers is composed of two blocks: + # * self-attention block + # * feed-forward block + att_type, ff_type = block_types + with tf.variable_scope("layer_{}".format(layer_num)): + x = prepostprocess(layers[att_type])( + x, + bias=encoder_self_attention_bias, + name="att_{}".format(att_type), + ) + x = prepostprocess(layers[ff_type])( + x, + name="ff_{}".format(ff_type) + ) + encoder_outputs.append(x) + if encoder_outputs: + encoder_outputs[-1] = dp_preprocess(x) + + x = decoder_input + with tf.variable_scope("decoder"): + for layer_num, block_types in enumerate(decoder_layers): + # Each decoder layers is composed of three blocks: + # * self-attention block + # * enco-deco attention block (optional) + # * feed-forward block + self_att_type, att_ende_type, ff_type = block_types + with tf.variable_scope("layer_{}".format(layer_num)): + x = prepostprocess(layers[self_att_type])( + x, + bias=decoder_self_attention_bias, + name="self_att_{}".format(self_att_type), + ) + # Only add the enco-deco attention layer if there is an encoder + if encoder_outputs: + x = prepostprocess(layers[att_ende_type])( + x, + memory_antecedent=encoder_outputs[-1], + bias=encoder_decoder_attention_bias, + name="att_ende_{}".format(att_ende_type), + ) + x = prepostprocess(layers[ff_type])( + x, + name="ff_{}".format(ff_type) + ) + # If normalization is done in layer_preprocess, then it should also be + # done on the output, since the output can grow very large, being the sum + # of a whole stack of unnormalized layer outputs. + x = dp_preprocess(x) + + decoder_output = dp(tf.expand_dims, x, 2) + return decoder_output, cache["extra_loss"] + + @expert_utils.add_name_scope() + def _prepare_encoder(self, inputs, target_space): + """Process the transformer encoder inputs.""" + inputs = common_layers.flatten4d3d(inputs) + + output = transformer.transformer_prepare_encoder( + inputs, + target_space, + self._hparams, + features=None, + ) + enco_input, enco_self_att_bias, enco_deco_att_bias = output + + enco_input = tf.nn.dropout( + enco_input, 1.0 - self._hparams.layer_prepostprocess_dropout) + + return enco_input, enco_self_att_bias, enco_deco_att_bias + + @expert_utils.add_name_scope() + def _prepare_decoder(self, targets): + """Process the transformer decoder input.""" + targets = common_layers.flatten4d3d(targets) + + output = transformer.transformer_prepare_decoder( + targets, self._hparams, features=None, + ) + deco_input, deco_self_attention_bias = output + + deco_input = tf.nn.dropout( + deco_input, 1.0 - self._hparams.layer_prepostprocess_dropout + ) + return deco_input, deco_self_attention_bias + + def _extract_layer_types(self): + """Parse the layer string. + + Returns: + list[tuple[str, str]]: Encoder layers: list of (attention, feed-forward) + list[tuple[str, str, str]]: Decoder layers: list of (self-attention, + enc-dec attention, feed-forward) + """ + hparams = self._hparams + layer_types = hparams.layer_types + + # If the architecture has not explicitly been set, we just construct a + # standard transformer with the fallback values + if not layer_types: + layer_types = SEP_LAYER.join( + [hparams.default_att] * hparams.num_hidden_layers) + + # If encoder not explicitly defined, the encoder will have the same + # structure as the decoder + layer_types = layer_types.split(SEP_ENCODEC) + if len(layer_types) == 1: + layer_types *= 2 + + # Some models don't need the encoder (ex: language modeling) + # TODO(epot): What are the other conditions (has_input ?) + if hparams.prepend_mode != "none": + layer_types[0] = "" + + # Extend the blocks and fill them with the default values if not specified + final_layers = ([], []) + for i, blocks_str_joined in enumerate(layer_types): + for blocks_str in blocks_str_joined.split(SEP_LAYER): + if not blocks_str: + continue + blocks_list = blocks_str.split(SEP_FF) + # Eventually use the fallback values for the layer_types. If the + # encoder is empty, do not use the enco-deco attention. + self_att = blocks_list[0] or hparams.default_att + ende_att = hparams.default_att if layer_types[0] else "_" + ff = hparams.default_ff + if len(blocks_list) > 1: + ff = blocks_list[-1] + if len(blocks_list) == 3: + ende_att = blocks_list[1] + if i == 0: # Encoder + blocks_tuple = (self_att, ff) + elif i == 1: # Decoder + blocks_tuple = (self_att, ende_att, ff) + final_layers[i].append(blocks_tuple) + + return final_layers + + +@registry.register_hparams +def transformer_moe_base(): + """Set of hyperparameters.""" + hparams = common_hparams.basic_params1() + hparams.norm_type = "layer" + hparams.hidden_size = 512 + hparams.batch_size = 4096 + hparams.max_length = 2001 + hparams.max_input_seq_length = 2000 + hparams.max_target_seq_length = 2000 + hparams.dropout = 0.0 + hparams.clip_grad_norm = 0. # i.e. no gradient clipping + hparams.optimizer_adam_epsilon = 1e-9 + hparams.learning_rate_decay_scheme = "noam" + hparams.learning_rate = 0.1 + hparams.learning_rate_warmup_steps = 2000 + hparams.initializer_gain = 1.0 + hparams.num_hidden_layers = 5 + hparams.initializer = "uniform_unit_scaling" + hparams.weight_decay = 0.0 + hparams.optimizer_adam_beta1 = 0.9 + hparams.optimizer_adam_beta2 = 0.98 + hparams.num_sampled_classes = 0 + hparams.label_smoothing = 0.0 + hparams.shared_embedding_and_softmax_weights = True + # According to noam, ("n", "da") seems better for harder-to-learn models + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + + # Hparams used by transformer_prepare_decoder() function + hparams.add_hparam("pos", "timing") # timing, none + hparams.add_hparam("proximity_bias", False) + hparams.add_hparam("causal_decoder_self_attention", True) + + hparams = common_attention.add_standard_attention_hparams(hparams) + + # Decoder layers type. If set, num_decoder_layers parameter will be ignored + # and the number of decoder layer will be deduced from the string + # See top file comment for example of usage + hparams.add_hparam("layer_types", "") + # Default attention type (ex: a, loc, red,...) and feed-forward type (ex: fc, + # sep, moe,...) + hparams.add_hparam("default_att", "a") + hparams.add_hparam("default_ff", "fc") + + return hparams + + +@registry.register_hparams +def transformer_moe_8k(): + """Hyper parameters specifics for long sequence generation.""" + hparams = transformer_moe_base() + + hparams.batch_size = 8192 + hparams.max_length = 0 # max_length == batch_size + hparams.eval_drop_long_sequences = True + hparams.min_length_bucket = 256 # Avoid cyclic problems for big batches + + hparams.default_ff = "sep" + hparams.hidden_size = 1024 + + return hparams + + +@registry.register_hparams +def transformer_moe_8k_lm(): + """Language modeling params. + + Will have the following architecture by default: + * No encoder. + * Decoder architecture: + * Layer 0: a - sepm (masked self-attention/masked separable convolutions) + * Layer 1: a - sepm + * Layer 2: a - moe (mixture of expert layers in the middle) + * Layer 3: a - sepm + * Layer 4: a - sepm + + Returns: + hparams + """ + hparams = transformer_moe_8k() + + # Use masked versions of local attention and separable convolution + hparams.default_ff = "sepm" + + # hparams.layer_types contains the network architecture: + # Start with '#' for decoder only architecture + hparams.layer_types = "#a/a/a-moe/a/a" # 5 full attention layers with 1 moe + # For long sequences, if running out of memory, it's possible to use the + # one of those two optimized versions instead: + # * Memory efficient multihead attention (slow): + # hparams.layer_types = "#mem/mem/mem-moe/mem/mem" + # * Alternate between local/compressed attention layers (faster): + # hparams.layer_types = "#locm/redm/locm-moe/redm/locm" + + return hparams + + +@registry.register_hparams +def transformer_moe_2k(): + """Base transformers model with moe. + + Will have the following architecture: + * No encoder. + * Layer 0: a - sep (self-attention - unmasked separable convolutions) + * Layer 1: a - sep + * Layer 2: a - sep + * Layer 3: a - sep + * Layer 4: a - sep + * Decoder architecture: + * Layer 0: a - a - sepm (self-attention - enco/deco-attention - masked sep) + * Layer 1: a - a - sepm + * Layer 2: a - a - moe (mixture of expert layers in the middle) + * Layer 3: a - a - sepm + * Layer 4: a - a - sepm + + Returns: + hparams + """ + hparams = transformer_moe_8k() + hparams.batch_size = 2048 + + hparams.default_ff = "sep" + + # hparams.layer_types contains the network architecture: + encoder_archi = "a/a/a/a/a" + decoder_archi = "a-sepm/a-sepm/a-moe/a-sepm/a-sepm" + hparams.layer_types = "{}#{}".format(encoder_archi, decoder_archi) + + return hparams + + +@registry.register_hparams +def transformer_moe_12k(): + """Hyper parameters specifics for long sequence generation.""" + hparams = transformer_moe_8k() + hparams.batch_size = 12000 + # At 12k, the softmax become the memory bottleneck + hparams.factored_logit = True + return hparams + + +@registry.register_hparams +def transformer_moe_prepend_8k(): + """Model which formulate a seq2seq problem as language modeling.""" + hparams = transformer_moe_8k() + hparams.prepend_mode = "prepend_inputs_masked_attention" + hparams.eval_drop_long_sequences = False + hparams.max_input_seq_length = 7500 + hparams.default_ff = "sepm" + hparams.layer_types = "locm/redm/locm-moe/redm/locm" + hparams.moe_num_experts = 256 + return hparams diff --git a/tensor2tensor/models/research/transformer_nat.py b/tensor2tensor/models/research/transformer_nat.py new file mode 100644 index 000000000..7ad67c8b1 --- /dev/null +++ b/tensor2tensor/models/research/transformer_nat.py @@ -0,0 +1,430 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""NAT Transformer from https://arxiv.org/abs/1805.11063.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import functools +from six.moves import range +from tensor2tensor.layers import common_layers +from tensor2tensor.models import transformer +from tensor2tensor.utils import beam_search +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator +from tensorflow.python.training import moving_averages # pylint: disable=g-direct-tensorflow-import + + +def init_vq_bottleneck(bottleneck_size, hidden_size): + """Get lookup table for VQ bottleneck.""" + means = tf.get_variable( + name="means", + shape=[bottleneck_size, hidden_size], + initializer=tf.uniform_unit_scaling_initializer()) + ema_count = tf.get_variable( + name="ema_count", + shape=[bottleneck_size], + initializer=tf.constant_initializer(0), + trainable=False) + with tf.colocate_with(means): + ema_means = tf.get_variable( + name="ema_means", + initializer=means.initialized_value(), + trainable=False) + + return means, ema_means, ema_count + + +def vq_nearest_neighbor(x, hparams): + """Find the nearest element in means to elements in x.""" + bottleneck_size = 2**hparams.bottleneck_bits + means = hparams.means + x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True) + means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True) + scalar_prod = tf.matmul(x, means, transpose_b=True) + dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod + if hparams.bottleneck_kind == "em": + x_means_idx = tf.multinomial(-dist, num_samples=hparams.num_samples) + x_means_hot = tf.one_hot( + x_means_idx, depth=bottleneck_size) + x_means_hot = tf.reduce_mean(x_means_hot, axis=1) + else: + x_means_idx = tf.argmax(-dist, axis=-1) + x_means_hot = tf.one_hot(x_means_idx, depth=bottleneck_size) + x_means = tf.matmul(x_means_hot, means) + e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means))) + return x_means_hot, e_loss + + +def vq_discrete_bottleneck(x, hparams): + """Simple vector quantized discrete bottleneck.""" + tf.logging.info("Using EMA with beta = {}".format(hparams.beta)) + bottleneck_size = 2**hparams.bottleneck_bits + x_shape = common_layers.shape_list(x) + x = tf.reshape(x, [-1, hparams.hidden_size]) + x_means_hot, e_loss = vq_nearest_neighbor( + x, hparams) + means, ema_means, ema_count = (hparams.means, hparams.ema_means, + hparams.ema_count) + + # Update the ema variables + updated_ema_count = moving_averages.assign_moving_average( + ema_count, + tf.reduce_sum(x_means_hot, axis=0), + hparams.decay, + zero_debias=False) + + dw = tf.matmul(x_means_hot, x, transpose_a=True) + updated_ema_means = moving_averages.assign_moving_average( + ema_means, dw, hparams.decay, zero_debias=False) + n = tf.reduce_sum(updated_ema_count, axis=-1, keepdims=True) + updated_ema_count = ( + (updated_ema_count + hparams.epsilon) / + (n + bottleneck_size * hparams.epsilon) * n) + updated_ema_means = updated_ema_means / tf.expand_dims( + updated_ema_count, axis=-1) + with tf.control_dependencies([e_loss]): + update_means = tf.assign(means, updated_ema_means) + with tf.control_dependencies([update_means]): + loss = hparams.beta * e_loss + + discrete = tf.reshape(x_means_hot, x_shape[:-1] + [bottleneck_size]) + return discrete, loss + + +def vq_discrete_unbottleneck(x, hparams): + """Simple undiscretization from vector quantized representation.""" + x_shape = common_layers.shape_list(x) + bottleneck_size = 2**hparams.bottleneck_bits + means = hparams.means + x_flat = tf.reshape(x, [-1, bottleneck_size]) + result = tf.matmul(x_flat, means) + result = tf.reshape(result, x_shape[:-1] + [hparams.hidden_size]) + return result + + +def residual_conv(x, repeat, k, hparams, name, reuse=None): + """A stack of convolution blocks with residual connections.""" + with tf.variable_scope(name, reuse=reuse): + dilations_and_kernels = [((1, 1), k) for _ in range(3)] + for i in range(repeat): + with tf.variable_scope("repeat_%d" % i): + y = common_layers.conv_block( + common_layers.layer_norm(x, hparams.hidden_size, name="lnorm"), + hparams.hidden_size, + dilations_and_kernels, + padding="SAME", + name="residual_conv") + y = tf.nn.dropout(y, 1.0 - hparams.dropout) + x += y + return x + + +def decompress_step(source, hparams, first_relu, name): + """Decompression function.""" + with tf.variable_scope(name): + shape = common_layers.shape_list(source) + multiplier = 2 + kernel = (1, 1) + thicker = common_layers.conv_block( + source, + hparams.hidden_size * multiplier, [((1, 1), kernel)], + first_relu=first_relu, + name="decompress_conv") + return tf.reshape(thicker, [shape[0], shape[1] * 2, 1, hparams.hidden_size]) + + +def compress(x, hparams, name): + """Compress.""" + with tf.variable_scope(name): + # Run compression by strided convs. + cur = x + k1 = (3, 1) + k2 = (2, 1) + cur = residual_conv(cur, hparams.num_compress_steps, k1, hparams, "rc") + for i in range(hparams.num_compress_steps): + cur = common_layers.conv_block( + cur, + hparams.hidden_size, [((1, 1), k2)], + strides=k2, + name="compress_%d" % i) + return cur + + +def encode(x, x_space, hparams, name): + """Transformer preparations and encoder.""" + with tf.variable_scope(name): + (encoder_input, encoder_self_attention_bias, + ed) = transformer.transformer_prepare_encoder(x, x_space, hparams) + encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.dropout) + return transformer.transformer_encoder( + encoder_input, encoder_self_attention_bias, hparams), ed + + +def decode_transformer(encoder_output, encoder_decoder_attention_bias, targets, + hparams, name): + """Original Transformer decoder.""" + with tf.variable_scope(name): + targets = common_layers.flatten4d3d(targets) + + decoder_input, decoder_self_bias = ( + transformer.transformer_prepare_decoder(targets, hparams)) + + decoder_input = tf.nn.dropout(decoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + + decoder_output = transformer.transformer_decoder( + decoder_input, encoder_output, decoder_self_bias, + encoder_decoder_attention_bias, hparams) + decoder_output = tf.expand_dims(decoder_output, axis=2) + decoder_output_shape = common_layers.shape_list(decoder_output) + decoder_output = tf.reshape( + decoder_output, [decoder_output_shape[0], -1, 1, hparams.hidden_size]) + # Expand since t2t expects 4d tensors. + return decoder_output + + +def get_latent_pred_loss(latents_pred, latents_discrete_hot, hparams): + """Latent prediction and loss.""" + latents_logits = tf.layers.dense( + latents_pred, 2**hparams.bottleneck_bits, name="extra_logits") + loss = tf.nn.softmax_cross_entropy_with_logits_v2( + labels=tf.stop_gradient(latents_discrete_hot), logits=latents_logits) + return loss + + +def ae_latent_sample_beam(latents_dense_in, inputs, ed, embed, hparams): + """Sample from the latent space in the autoencoder.""" + def symbols_to_logits_fn(ids): + """Go from ids to logits.""" + ids = tf.expand_dims(ids, axis=2) # Ids start with added all-zeros. + latents_discrete = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0]]) + + with tf.variable_scope(tf.get_variable_scope(), reuse=False): + latents_dense = embed( + tf.one_hot(latents_discrete, depth=2**hparams.bottleneck_bits)) + latents_pred = decode_transformer(inputs, ed, latents_dense, hparams, + "extra") + logits = tf.layers.dense( + latents_pred, 2**hparams.bottleneck_bits, name="extra_logits") + current_output_position = common_layers.shape_list(ids)[1] - 1 + logits = logits[:, current_output_position, :, :] + return tf.squeeze(logits, axis=[1]) + + initial_ids = tf.zeros([tf.shape(latents_dense_in)[0]], dtype=tf.int32) + length = tf.shape(latents_dense_in)[1] + ids, _, _ = beam_search.beam_search( + symbols_to_logits_fn, + initial_ids, + beam_size=1, + decode_length=length, + vocab_size=2**hparams.bottleneck_bits, + alpha=0.0, + eos_id=-1, + stop_early=False) + + res = tf.expand_dims(ids[:, 0, :], axis=2) # Pick first beam. + return res[:, 1:] # Remove the added all-zeros from ids. + + +def ae_transformer_internal(inputs, targets, target_space, hparams, cache=None): + """Main step used for training.""" + # Encoder. + inputs = common_layers.flatten4d3d(inputs) + inputs, ed = encode(inputs, target_space, hparams, "input_enc") + + # Autoencoding. + losses = {"extra": tf.constant(0.0), "latent_pred": tf.constant(0.0)} + + max_targets_len_from_inputs = tf.concat([inputs, inputs], axis=1) + targets, _ = common_layers.pad_to_same_length( + targets, + max_targets_len_from_inputs, + final_length_divisible_by=2**hparams.num_compress_steps) + targets_c = compress(targets, hparams, "compress") + if hparams.mode != tf_estimator.ModeKeys.PREDICT: + # Compress and bottleneck. + latents_discrete_hot, extra_loss = vq_discrete_bottleneck( + x=targets_c, hparams=hparams) + latents_dense = vq_discrete_unbottleneck( + latents_discrete_hot, hparams=hparams) + latents_dense = targets_c + tf.stop_gradient(latents_dense - targets_c) + latents_discrete = tf.argmax(latents_discrete_hot, axis=-1) + tf.summary.histogram("codes", tf.reshape(latents_discrete[:, 0, :], [-1])) + losses["extra"] = extra_loss + + # Extra loss predicting latent code from input. + latents_pred = decode_transformer(inputs, ed, latents_dense, hparams, + "extra") + latent_pred_loss = get_latent_pred_loss(latents_pred, latents_discrete_hot, + hparams) + losses["latent_pred"] = tf.reduce_mean(latent_pred_loss) + else: + latent_len = common_layers.shape_list(targets_c)[1] + embed = functools.partial(vq_discrete_unbottleneck, hparams=hparams) + latents_dense = tf.zeros_like(targets_c[:, :latent_len, :, :]) + if cache is None: + cache = ae_latent_sample_beam(latents_dense, inputs, ed, embed, + hparams) + cache_hot = tf.one_hot(cache, depth=2**hparams.bottleneck_bits) + latents_dense = embed(cache_hot) + + # Postprocess. + d = latents_dense + pos = tf.get_variable("pos", [1, 1000, 1, hparams.hidden_size]) + pos = pos[:, :common_layers.shape_list(latents_dense)[1] + 1, :, :] + latents_dense = tf.pad(latents_dense, [[0, 0], [1, 0], [0, 0], [0, 0]]) + pos + + # Decompressing the dense latents + for i in range(hparams.num_compress_steps): + j = hparams.num_compress_steps - i - 1 + d = residual_conv(d, 1, (3, 1), hparams, "decompress_rc_%d" % j) + d = decompress_step(d, hparams, i > 0, "decompress_%d" % j) + + masking = common_layers.inverse_lin_decay(hparams.mask_startup_steps) + masking *= common_layers.inverse_exp_decay( + hparams.mask_startup_steps // 4) # Not much at start. + masking = tf.minimum(tf.maximum(masking, 0.0), 1.0) + if hparams.mode == tf_estimator.ModeKeys.PREDICT: + masking = 1.0 + mask = tf.less(masking, + tf.random_uniform(common_layers.shape_list(targets)[:-1])) + mask = tf.expand_dims(tf.to_float(mask), 3) + + # targets is always [batch, length, 1, depth] + targets = mask * targets + (1.0 - mask) * d + + res = decode_transformer(inputs, ed, targets, hparams, "decoder") + latent_time = tf.less(hparams.mask_startup_steps, + tf.to_int32(tf.train.get_global_step())) + losses["latent_pred"] *= tf.to_float(latent_time) + return res, losses, cache + + +@registry.register_model +class TransformerNAT(t2t_model.T2TModel): + """Nonautoregressive Transformer from https://arxiv.org/abs/1805.11063.""" + + def __init__(self, *args, **kwargs): + super(TransformerNAT, self).__init__(*args, **kwargs) + means, ema_means, ema_count = init_vq_bottleneck( + 2**self._hparams.bottleneck_bits, self._hparams.hidden_size) + self._hparams.means = means + self._hparams.ema_means = ema_means + self._hparams.ema_count = ema_count + + def body(self, features): + inputs = features["inputs"] if "inputs" in features else None + reuse = "cache_raw" in features + with tf.variable_scope(tf.get_variable_scope(), reuse=reuse): + res, loss, _ = ae_transformer_internal( + inputs, features["targets"], features["target_space_id"], + self._hparams, features.get("cache_raw", None)) + return res, loss + + def prepare_features_for_infer(self, features): + batch_size = self._decode_hparams.batch_size + inputs = tf.zeros([batch_size, 1, 1, self._hparams.hidden_size]) + inputs = inputs if "inputs" in features else None + targets = tf.zeros([batch_size, 1, 1, self._hparams.hidden_size]) + with tf.variable_scope("transformer_nat/body"): + _, _, cache = ae_transformer_internal( + inputs, targets, features["target_space_id"], self._hparams) + features["cache_raw"] = cache + + def infer(self, + features=None, + decode_length=50, + beam_size=1, + top_beams=1, + alpha=0.0, + use_tpu=False): + """Produce predictions from the model.""" + if not features: + features = {} + inputs_old = None + if "inputs" in features and len(features["inputs"].shape) < 4: + inputs_old = features["inputs"] + features["inputs"] = tf.expand_dims(features["inputs"], 2) + + # Create an initial targets tensor. + if "partial_targets" in features: + initial_output = tf.convert_to_tensor(features["partial_targets"]) + else: + batch_size = common_layers.shape_list(features["inputs"])[0] + length = common_layers.shape_list(features["inputs"])[1] + target_length = tf.to_int32(2.0 * tf.to_float(length)) + initial_output = tf.zeros((batch_size, target_length, 1, 1), + dtype=tf.int64) + + features["targets"] = initial_output + logits, _ = self(features) # pylint: disable=not-callable + samples = tf.argmax(logits, axis=-1) + if inputs_old is not None: # Restore to not confuse Estimator. + features["inputs"] = inputs_old + return samples + + +@registry.register_hparams +def transformer_nat_small(): + """Set of hyperparameters.""" + hparams = transformer.transformer_small() + hparams.batch_size = 2048 + hparams.learning_rate = 0.2 + hparams.learning_rate_warmup_steps = 4000 + hparams.num_hidden_layers = 3 + hparams.hidden_size = 384 + hparams.filter_size = 2048 + hparams.label_smoothing = 0.0 + hparams.force_full_predict = True + hparams.optimizer = "adam" + hparams.optimizer_adam_epsilon = 1e-9 + hparams.optimizer_adam_beta1 = 0.9 + hparams.optimizer_adam_beta2 = 0.997 + hparams.add_hparam("bottleneck_kind", "vq") + hparams.add_hparam("bottleneck_bits", 12) + hparams.add_hparam("num_compress_steps", 3) + hparams.add_hparam("beta", 0.25) + hparams.add_hparam("epsilon", 1e-5) + hparams.add_hparam("decay", 0.999) + hparams.add_hparam("num_samples", 10) + hparams.add_hparam("mask_startup_steps", 50000) + return hparams + + +@registry.register_hparams +def transformer_nat_base(): + """Set of hyperparameters.""" + hparams = transformer_nat_small() + hparams.batch_size = 2048 + hparams.hidden_size = 512 + hparams.filter_size = 4096 + hparams.num_hidden_layers = 6 + return hparams + + +@registry.register_hparams +def transformer_nat_big(): + """Set of hyperparameters.""" + hparams = transformer_nat_small() + hparams.batch_size = 2048 + hparams.hidden_size = 1024 + hparams.filter_size = 4096 + hparams.num_hidden_layers = 6 + hparams.num_heads = 16 + hparams.layer_prepostprocess_dropout = 0.3 + return hparams diff --git a/tensor2tensor/models/research/transformer_parallel.py b/tensor2tensor/models/research/transformer_parallel.py new file mode 100644 index 000000000..8e73e673c --- /dev/null +++ b/tensor2tensor/models/research/transformer_parallel.py @@ -0,0 +1,327 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Models for semi-parallel and parallel decoding with the transformer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.layers import common_layers +from tensor2tensor.models import transformer +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +@registry.register_model +class TransformerBlockParallel(transformer.Transformer): + """Transformer that predicts blocks of the output in parallel.""" + + def body(self, features): + assert self._hparams.block_size > 0 + assert not common_layers.is_xla_compiled() + assert "targets_segmentation" not in features + + decoder_output = super(TransformerBlockParallel, self).body(features) + assert not isinstance(decoder_output, tuple) + assert len(decoder_output.shape) == 4 + + relu_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(self._hparams, "relu_dropout_broadcast_dims", ""))) + + with tf.variable_scope("block_size_%d" % self._hparams.block_size): + block_output = common_layers.dense_relu_dense( + decoder_output, + self._hparams.block_size * self._hparams.filter_size, + self._hparams.block_size * self._hparams.hidden_size, + dropout=self._hparams.relu_dropout, + dropout_broadcast_dims=relu_dropout_broadcast_dims) + + batch_size, length = common_layers.shape_list(decoder_output)[:2] + block_output = tf.reshape(block_output, [ + batch_size, + length, + self._hparams.block_size, + self._hparams.hidden_size + ]) + + block_output = common_layers.layer_postprocess( + decoder_output, block_output, self._hparams) + + return block_output + + def top(self, body_output, features): + assert self._hparams.block_size > 0 + + if (self._hparams.mode == tf_estimator.ModeKeys.TRAIN or + self._hparams.mode == tf_estimator.ModeKeys.EVAL): + if self._hparams.mode == tf_estimator.ModeKeys.TRAIN: + features["block_index"] = tf.random_uniform( + shape=[], minval=0, maxval=self._hparams.block_size, dtype=tf.int64) + else: + features["block_index"] = 0 + k = features["block_index"] + body_output = body_output[:, :, k:k + 1, :] + + return super(TransformerBlockParallel, self).top(body_output, features) + + def loss(self, logits, features): + assert self._hparams.block_size > 0 + + def shift_left_4d(x, k): + return tf.pad(x, [[0, 0], [0, k], [0, 0], [0, 0]])[:, k:, :, :] + + targets = features["targets"] + assert len(targets.shape) == 4 + + targets = tf.concat([ + shift_left_4d(targets, i) + for i in range(self._hparams.block_size) + ], axis=2) + + if (self._hparams.mode == tf_estimator.ModeKeys.TRAIN or + self._hparams.mode == tf_estimator.ModeKeys.EVAL): + assert "block_index" in features + k = features["block_index"] + targets = targets[:, :, k:k + 1, :] + + features["targets"] = targets + + loss = super(TransformerBlockParallel, self).loss(logits, features) + + if self._hparams.mode == tf_estimator.ModeKeys.TRAIN: + loss_num, loss_den = loss + loss_val = loss_num / loss_den + for i in range(self._hparams.block_size): + # Hack: if you report a loss of NaN, TensorBoard will plot a point at + # the previous value without a connecting line. This is used here to + # separate out the training losses by block index. + one_or_nan = tf.cond(tf.equal(k, i), lambda: 1.0, lambda: float("nan")) + tf.summary.scalar( + "block_index_%d" % i, one_or_nan * loss_val, family="losses") + + return loss + + def _greedy_infer(self, features, decode_length, use_tpu=False): + assert not use_tpu + return self._slow_greedy_infer_guess_and_check(features, decode_length) + + def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha): + raise NotImplementedError + + def _slow_greedy_infer_guess_and_check(self, features, decode_length): + assert self._hparams.block_size > 0 + assert self._hparams.force_full_predict + assert self._hparams.sampling_method == "argmax" + assert self._decode_hparams.batch_size == 1 + assert self._decode_hparams.block_size > 0 + assert self._decode_hparams.block_size <= self._hparams.block_size + assert self._decode_hparams.guess_and_check_top_k > 0 + + inputs_old = features["inputs"] + assert "targets" not in features + + assert len(features["inputs"].shape) in [3, 4] + if len(features["inputs"].shape) < 4: + features["inputs"] = tf.expand_dims(features["inputs"], 2) + + block_size = self._decode_hparams.block_size + decode_length += tf.shape(features["inputs"])[1] + + def while_exit_cond(result, length): # pylint: disable=unused-argument + return tf.logical_and( + length < decode_length, + tf.reduce_all( + tf.not_equal(result[:, :length, :, :], text_encoder.EOS_ID)) + ) + + def infer_step(result, length): + """Inference step.""" + + def print_info(result, length, new_length): + vocab = self.problem_hparams.vocabulary["targets"] + tf.logging.info( + "length=%s new_length=%s length_diff=%s new_suffix=%s", + length, + new_length, + new_length - length, + str([ + vocab._subtoken_id_to_subtoken_string(index) # pylint: disable=protected-access + for index in result[0, -block_size:, 0, 0][:new_length - length] + ]).decode("unicode-escape"), + ) + + features["targets"] = tf.pad(result, [[0, 0], [0, 1], [0, 0], [0, 0]]) + samples, logits, losses = self.sample(features) # pylint: disable=unused-variable + + _, top_k_indices = tf.nn.top_k( + logits[:, :-1, :1, :, :], + k=self._decode_hparams.guess_and_check_top_k) + in_top_k = tf.reduce_any( + tf.equal(tf.to_int64(top_k_indices), tf.expand_dims(result, 4)), + axis=4) + + eos_cumsum = tf.cumsum( + tf.to_int32(tf.equal(result, text_encoder.EOS_ID)), axis=1) + after_eos = tf.greater(common_layers.shift_right(eos_cumsum), 0) + + correct = tf.logical_and(in_top_k, tf.logical_not(after_eos)) + correct_cumsum = tf.cumsum(tf.to_int32(correct), axis=1) + perfect_cumsum = 1 + tf.range(tf.shape(correct)[1]) + for axis in [0, 2, 3]: + perfect_cumsum = tf.expand_dims(perfect_cumsum, axis=axis) + + new_length = tf.reduce_sum( + tf.to_int32(tf.equal(correct_cumsum, perfect_cumsum)), axis=1) + new_length = tf.squeeze(new_length, axis=[0, 1, 2]) + new_length = tf.minimum(new_length, decode_length) + + new_result = tf.concat([ + result[:, :new_length, :, :], + tf.reshape( + samples[:, new_length, :block_size, :], [1, block_size, 1, 1]) + ], axis=1) + + with tf.control_dependencies([ + tf.py_func(print_info, [result, length, new_length], []) + ]): + new_result = tf.identity(new_result) + + return new_result, new_length + + result = tf.zeros((1, 0, 1, 1), dtype=tf.int64) + length = tf.squeeze(tf.zeros(1, dtype=tf.int32)) + + result, length = tf.while_loop( + while_exit_cond, + infer_step, + [result, length], + shape_invariants=[ + tf.TensorShape([1, None, 1, 1]), + tf.TensorShape([]), + ], + back_prop=False, + parallel_iterations=1) + + result = result[:, :length, :, :] + + features["inputs"] = inputs_old + + return { + "outputs": result, + "scores": None, + } + + +@registry.register_hparams +def transformer_base_bs1(): + hparams = transformer.transformer_base() + hparams.add_hparam("block_size", 1) + return hparams + + +@registry.register_hparams +def transformer_base_bs2(): + hparams = transformer.transformer_base() + hparams.add_hparam("block_size", 2) + return hparams + + +@registry.register_hparams +def transformer_base_bs3(): + hparams = transformer.transformer_base() + hparams.add_hparam("block_size", 3) + return hparams + + +@registry.register_hparams +def transformer_base_bs4(): + hparams = transformer.transformer_base() + hparams.add_hparam("block_size", 4) + return hparams + + +@registry.register_hparams +def transformer_base_bs5(): + hparams = transformer.transformer_base() + hparams.add_hparam("block_size", 5) + return hparams + + +@registry.register_hparams +def transformer_base_bs6(): + hparams = transformer.transformer_base() + hparams.add_hparam("block_size", 6) + return hparams + + +@registry.register_hparams +def transformer_base_bs7(): + hparams = transformer.transformer_base() + hparams.add_hparam("block_size", 7) + return hparams + + +@registry.register_hparams +def transformer_base_bs8(): + hparams = transformer.transformer_base() + hparams.add_hparam("block_size", 8) + return hparams + + +@registry.register_hparams +def transformer_base_bs9(): + hparams = transformer.transformer_base() + hparams.add_hparam("block_size", 9) + return hparams + + +@registry.register_hparams +def transformer_base_bs10(): + hparams = transformer.transformer_base() + hparams.add_hparam("block_size", 10) + return hparams + + +@registry.register_hparams +def transformer_big_bs1(): + hparams = transformer.transformer_big() + hparams.add_hparam("block_size", 1) + return hparams + + +@registry.register_hparams +def transformer_tiny_bs1(): + hparams = transformer.transformer_tiny() + hparams.add_hparam("block_size", 1) + return hparams + + +@registry.register_hparams +def transformer_tiny_bs2(): + hparams = transformer.transformer_tiny() + hparams.add_hparam("block_size", 2) + return hparams + + +@registry.register_hparams +def transformer_tiny_bs3(): + hparams = transformer.transformer_tiny() + hparams.add_hparam("block_size", 3) + return hparams diff --git a/tensor2tensor/models/research/transformer_revnet.py b/tensor2tensor/models/research/transformer_revnet.py new file mode 100644 index 000000000..7b9ee347d --- /dev/null +++ b/tensor2tensor/models/research/transformer_revnet.py @@ -0,0 +1,242 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Reversible Residual Transformer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.models import transformer +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +@registry.register_model +class TransformerRevnet(transformer.Transformer): + """Reversible Residual Transformer. + + Layers are reversible and are recomputed on the backward pass. + + y1 = x1 + f(x2) + y2 = x2 + g(y1) + + f: Attention + g: Feed-forward + """ + + def body(self, features): + hparams = self._hparams + targets = features["targets"] + inputs = features["inputs"] + target_space = features["target_space_id"] + + inputs = common_layers.flatten4d3d(inputs) + targets = common_layers.flatten4d3d(targets) + + (encoder_input, encoder_self_attention_bias, + encoder_decoder_attention_bias) = (transformer.transformer_prepare_encoder( + inputs, target_space, hparams)) + (decoder_input, + decoder_self_attention_bias) = transformer.transformer_prepare_decoder( + targets, hparams) + + encoder_input = tf.nn.dropout(encoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + decoder_input = tf.nn.dropout(decoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + encoder_output = transformer_revnet_encoder( + encoder_input, encoder_self_attention_bias, hparams) + + decoder_output = transformer_revnet_decoder( + decoder_input, encoder_output, decoder_self_attention_bias, + encoder_decoder_attention_bias, hparams) + decoder_output = tf.expand_dims(decoder_output, 2) + + return decoder_output + + +def transformer_revnet_encoder(encoder_input, + encoder_self_attention_bias, + hparams, + name="encoder"): + """A stack of transformer layers. + + Args: + encoder_input: a Tensor + encoder_self_attention_bias: bias Tensor for self-attention + (see common_attention.attention_bias()) + hparams: hyperparameters for model + name: a string + + Returns: + y: a Tensors + """ + + def f(x, side_input): + """f(x) for reversible layer, self-attention layer.""" + encoder_self_attention_bias = side_input[0] + + old_hid_size = hparams.hidden_size + hparams.hidden_size = old_hid_size // 2 + + with tf.variable_scope("self_attention"): + y = common_attention.multihead_attention( + common_layers.layer_preprocess( + x, hparams), None, encoder_self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) + y = common_layers.layer_postprocess(x, y, hparams) + hparams.hidden_size = old_hid_size + return y + + def g(x): + """g(x) for reversible layer, feed-forward layer.""" + old_hid_size = hparams.hidden_size + hparams.hidden_size = old_hid_size // 2 + + with tf.variable_scope("ffn"): + y = transformer.transformer_ffn_layer( + common_layers.layer_preprocess(x, hparams), hparams) + y = common_layers.layer_postprocess(x, y, hparams) + hparams.hidden_size = old_hid_size + return y + + x1, x2 = tf.split(encoder_input, 2, axis=-1) + + with tf.variable_scope(name): + y1, y2 = contrib.layers().rev_block( + x1, + x2, + f, + g, + num_layers=hparams.num_hidden_layers, + f_side_input=[encoder_self_attention_bias], + is_training=hparams.mode == tf_estimator.ModeKeys.TRAIN) + y = tf.concat([y1, y2], axis=-1) + + return common_layers.layer_preprocess(y, hparams) + + +def transformer_revnet_decoder(decoder_input, + encoder_output, + decoder_self_attention_bias, + encoder_decoder_attention_bias, + hparams, + name="decoder"): + """A stack of transformer layers. + + Args: + decoder_input: a Tensor + encoder_output: a Tensor + decoder_self_attention_bias: bias Tensor for self-attention + (see common_attention.attention_bias()) + encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention + (see common_attention.attention_bias()) + hparams: hyperparameters for model + name: a string + + Returns: + y: a Tensors + """ + + def f(x, side_input): + """f(x) for reversible layer, self-attention and enc-dec attention.""" + decoder_self_attention_bias = side_input[0] + encoder_decoder_attention_bias = side_input[1] + encoder_output = side_input[2] + + old_hid_size = hparams.hidden_size + hparams.hidden_size = old_hid_size // 2 + + with tf.variable_scope("self_attention"): + y = common_attention.multihead_attention( + common_layers.layer_preprocess( + x, hparams), None, decoder_self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) + y = common_layers.layer_postprocess(x, y, hparams) + if encoder_output is not None: + with tf.variable_scope("encdec_attention"): + y = common_attention.multihead_attention( + common_layers.layer_preprocess( + x, hparams), encoder_output, encoder_decoder_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, hparams.num_heads, hparams.attention_dropout) + y = common_layers.layer_postprocess(x, y, hparams) + hparams.hidden_size = old_hid_size + return y + + def g(x): + """g(x) for reversible layer, feed-forward layer.""" + old_hid_size = hparams.hidden_size + hparams.hidden_size = old_hid_size // 2 + with tf.variable_scope("ffn"): + y = transformer.transformer_ffn_layer( + common_layers.layer_preprocess(x, hparams), hparams) + y = common_layers.layer_postprocess(x, y, hparams) + hparams.hidden_size = old_hid_size + return y + + x1, x2 = tf.split(decoder_input, 2, axis=-1) + + with tf.variable_scope(name): + y1, y2 = contrib.layers().rev_block( + x1, + x2, + f, + g, + num_layers=hparams.num_hidden_layers, + f_side_input=[ + decoder_self_attention_bias, encoder_decoder_attention_bias, + encoder_output + ], + is_training=hparams.mode == tf_estimator.ModeKeys.TRAIN) + y = tf.concat([y1, y2], axis=-1) + return common_layers.layer_preprocess(y, hparams) + + +@registry.register_hparams +def transformer_revnet_base(): + """Base hparams for TransformerRevnet.""" + hparams = transformer.transformer_big() + + # Use settings from transformer_n_da + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + hparams.learning_rate = 0.4 + + return hparams + + +@registry.register_hparams +def transformer_revnet_big(): + """Base hparams for TransformerRevnet.""" + hparams = transformer_revnet_base() + + # The TransformerRevnet uses significantly less memory than the Transformer. + # Increase batch size and model size. + hparams.batch_size *= 2 + hparams.hidden_size *= 2 + hparams.num_heads *= 2 + hparams.num_hidden_layers += 1 + return hparams diff --git a/tensor2tensor/models/research/transformer_revnet_test.py b/tensor2tensor/models/research/transformer_revnet_test.py new file mode 100644 index 000000000..fca42b54e --- /dev/null +++ b/tensor2tensor/models/research/transformer_revnet_test.py @@ -0,0 +1,75 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for TransformerRevnet.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np + +from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.models.research import transformer_revnet + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +def transformer_revnet_test(): + hparams = transformer_revnet.transformer_revnet_base() + hparams.num_hidden_layers = 2 + hparams.hidden_size = 128 + hparams.filter_size = 512 + hparams.num_heads = 2 + return hparams + + +class TransformerRevnetTest(tf.test.TestCase): + + def testTransformer(self): + batch_size = 3 + input_length = 5 + target_length = 7 + vocab_size = 9 + hparams = transformer_revnet_test() + p_hparams = problem_hparams.test_problem_hparams(vocab_size, + vocab_size, + hparams) + hparams.problem_hparams = p_hparams + inputs = np.random.randint( + vocab_size, size=(batch_size, input_length, 1, 1)) + targets = np.random.randint( + vocab_size, size=(batch_size, target_length, 1, 1)) + features = { + "inputs": tf.constant(inputs, dtype=tf.int32), + "targets": tf.constant(targets, dtype=tf.int32), + "target_space_id": tf.constant(1, dtype=tf.int32), + } + model = transformer_revnet.TransformerRevnet( + hparams, tf_estimator.ModeKeys.TRAIN, p_hparams) + logits, _ = model(features) + grads = tf.gradients( + tf.reduce_mean(logits), [features["inputs"]] + tf.global_variables()) + grads = [g for g in grads if g is not None] + + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + logits_val, _ = session.run([logits, grads]) + self.assertEqual(logits_val.shape, (batch_size, target_length, 1, 1, + vocab_size)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/research/transformer_seq2edits.py b/tensor2tensor/models/research/transformer_seq2edits.py new file mode 100644 index 000000000..75ce9b02a --- /dev/null +++ b/tensor2tensor/models/research/transformer_seq2edits.py @@ -0,0 +1,543 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The Seq2Edits model. + +Seq2Edits is an adaptation of the Transformer that predicts span level edits +and pairs them with tags. The Seq2Edits model is described in + + Stahlberg, Felix, and Kumar, Shankar. "Seq2Edits: Sequence Transduction Using + Span-level Edit Operations." Proceedings of the 2020 Conference on Empirical + Methods in Natural Language Processing (EMNLP). 2020. + https://www.aclweb.org/anthology/2020.emnlp-main.418/ + +T2T problem definitions for Seq2Edits are in data_generators/seq2edits.py. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import transformer_layers +from tensor2tensor.models import transformer +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf + + +def maybe_flatten4d3d(x): + """Flatten if tensor has 4 dimensions. + + Pass through otherwise. + + This is useful since additional dimensions are sometimes removed on the TPU, + see e.g. + https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/models/transformer.py?l=159&rcl=279807999 + + Args: + x: a tensor + + Returns: + A 3D tensor if x is 4D, unmodified x otherwise. + """ + xshape = common_layers.shape_list(x) + return common_layers.flatten4d3d(x) if len(xshape) == 4 else x + + +def maybe_flatten3d2d(x): + """Flatten if tensor has 3 dimensions, similar to maybe_flatten4d3d().""" + xshape = common_layers.shape_list(x) + if len(xshape) != 3: + return x + return tf.reshape(x, [xshape[0], xshape[1] * xshape[2]]) + + +def maybe_flatten4d2d(x): + return maybe_flatten3d2d(maybe_flatten4d3d(x)) + + +def features_to_nonpadding(features, inputs_or_targets="inputs"): + """See transformer.features_to_nonpadding.""" + key = inputs_or_targets + "_segmentation" + if features and key in features: + return tf.minimum(tf.to_float(features[key]), 1.0) + return None + + +def gather_2d(params, indices): + """2D version of tf.gather. + + This is a batched version of tf.gather(), i.e. it applies tf.gather() to + each batch separately. + Example: + params = [[10, 11, 12, 13, 14], + [20, 21, 22, 23, 24]] + indices = [[0, 0, 1, 1, 1, 2], + [1, 3, 0, 0, 2, 2]] + result = [[10, 10, 11, 11, 11, 12], + [21, 23, 20, 20, 22, 22]] + This method is copied from + https://github.com/fstahlberg/tensor2tensor-usr/blob/master/usr/utils.py + which is published under Apache 2. + + Args: + params: A [batch_size, n, ...] tensor with data + indices: A [batch_size, num_indices] int32 tensor with indices into params. + Entries must be smaller than n + + Returns: + The result of tf.gather() on each entry of the batch. + """ + batch_size = tf.shape(params)[0] + num_indices = tf.shape(indices)[1] + batch_indices = tf.tile( + tf.expand_dims(tf.range(batch_size), 1), [1, num_indices]) + # batch_indices is [[0,0,0,0,...],[1,1,1,1,...],...] + gather_nd_indices = tf.stack([batch_indices, indices], axis=2) + return tf.gather_nd(params, gather_nd_indices) + + +@registry.register_model +class TransformerSeq2edits(t2t_model.T2TModel): + """The Seq2Edits model. See file docstring.""" + + def __init__(self, *args, **kwargs): + super(TransformerSeq2edits, self).__init__(*args, **kwargs) + self.attention_weights = {} # For visualizing attention heads. + self._encoder_function = transformer_layers.transformer_encoder + self._decoder_function = transformer.transformer_decoder + self._prepare_encoder_fn = transformer_layers.transformer_prepare_encoder + self._prepare_decoder_fn = transformer.transformer_prepare_decoder + self.loss_num = {} + self.logits = {} + self.loss_den = None + + def encode(self, inputs, target_space, hparams, features=None, losses=None): + """Encodes transformer inputs, see transformer.transformer_encode().""" + return transformer.transformer_encode( + self._encoder_function, + inputs, + target_space, + hparams, + attention_weights=self.attention_weights, + features=features, + losses=losses, + prepare_encoder_fn=self._prepare_encoder_fn) + + def decode(self, + decoder_input, + encoder_output, + encoder_decoder_attention_bias, + decoder_self_attention_bias, + hparams, + cache=None, + decode_loop_step=None, + nonpadding=None, + losses=None, + **kwargs): + """Decodes Transformer outputs, see transformer.transformer_decode().""" + return transformer.transformer_decode( + self._decoder_function, + decoder_input, + encoder_output, + encoder_decoder_attention_bias, + decoder_self_attention_bias, + hparams, + attention_weights=self.attention_weights, + cache=cache, + decode_loop_step=decode_loop_step, + nonpadding=nonpadding, + losses=losses, + **kwargs) + + def body(self, features): + """Seq2Edits main model_fn. + + Args: + features: Feature dictionary. Should contain the following fields: + "inputs": [batch_size, input_length, 1, hidden_dim] float tensor with + input token embeddings. + "targets": [batch_size, target_length, 1, hidden_dim] float tensor + with target token embeddings. + "targets_error_tag": [batch_size, target_length, 1, hidden_dim] float + tensor with target error tag embeddings. + "targets_start_token": [batch_size, target_length] int tensor with + start token positions. + "targets_end_token": [batch_size, target_length] int tensor with end + token positions. + "target_space_id": A scalar int from data_generators.problem.SpaceID. + + Returns: + Final decoder representation. Dictionary containing the following fields: + "targets": [batch_size, target_length, hidden_dim] float tensor with + decoder outputs + "targets_error_tag": [batch_size, target_length, hidden_dim] float + tensor with decoder outputs + "targets_start_token": [batch_size, input_length, target_length] float + tensor with start token position logits + "targets_end_token": [batch_size, input_length, target_length] float + tensor with end token position logits + """ + hparams = self._hparams + + losses = [] + + if self.has_input: + target_space = features["target_space_id"] + encoder_output, encoder_decoder_attention_bias = self.encode( + features["inputs"], + target_space, + hparams, + features=features, + losses=losses) + else: + encoder_output, encoder_decoder_attention_bias = (None, None) + + targets = features["targets"] + targets_shape = common_layers.shape_list(targets) + targets = common_layers.flatten4d3d(targets) + decoder_input, decoder_self_attention_bias = self._prepare_decoder_fn( + targets, hparams, features=features) + + nonpadding = features_to_nonpadding(features, "targets") + + # Add edit ops layer to condition on start_token, end_token, and error_tag + decoder_input = transformer_edit_ops_layer( + decoder_input, + hparams, + encoder_output, + features, + nonpadding=nonpadding, + losses=losses) + + if hparams.middle_prediction: + num_decoder_layers = hparams.num_decoder_layers or hparams.num_hidden_layers + hparams.num_decoder_layers = int( + num_decoder_layers / hparams.middle_prediction_layer_factor) + + decode_kwargs = {} + decoder_output = self.decode( + decoder_input, + encoder_output, + encoder_decoder_attention_bias, + decoder_self_attention_bias, + hparams, + nonpadding=nonpadding, + losses=losses, + **decode_kwargs) + + loss_mask = common_layers.weights_nonzero( + maybe_flatten4d2d(features["targets_raw"])) + self.loss_den = tf.reduce_sum(loss_mask) + decoder_output = self._prediction_cascade( + hparams=hparams, + features=features, + losses=losses, + loss_mask=loss_mask, + nonpadding=nonpadding, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + encoder_output=encoder_output, + decoder_output=decoder_output) + + if hparams.middle_prediction: + with tf.variable_scope("after_prediction"): + decoder_output = self.decode( + decoder_input + decoder_output, + encoder_output, + encoder_decoder_attention_bias, + decoder_self_attention_bias, + hparams, + nonpadding=nonpadding, + losses=losses, + **decode_kwargs) + + ret = {"targets": tf.reshape(decoder_output, targets_shape)} + ret.update(self.logits) + if losses: + return ret, {"extra_loss": tf.add_n(losses)} + else: + return ret + + def _prediction_cascade(self, hparams, features, losses, loss_mask, + nonpadding, encoder_decoder_attention_bias, + encoder_output, decoder_output): + if hparams.use_error_tags: + (decoder_output, error_tag_logits, + error_tag_loss) = transformer_error_tag_prediction_layer( + decoder_output, hparams, features, loss_mask=loss_mask) + self.logits["targets_error_tag"] = error_tag_logits + self.loss_num["targets_error_tag"] = error_tag_loss + decoder_output = transformer_between_predictions_layer( + decoder_output, + hparams, + name="post_error_tag", + nonpadding=nonpadding, + losses=losses) + + pos_feat_names = [] + if hparams.use_start_token: + pos_feat_names.append("targets_start_token") + pos_feat_names.append("targets_end_token") + for pos_feat_name in pos_feat_names: + (decoder_output, pos_logits, + pos_loss) = transformer_pointer_prediction_layer( + pos_feat_name, + encoder_output, + decoder_output, + encoder_decoder_attention_bias, + hparams, + features, + loss_mask=loss_mask) + self.logits[pos_feat_name] = pos_logits + self.loss_num[pos_feat_name] = pos_loss + decoder_output = transformer_between_predictions_layer( + decoder_output, + hparams, + name="post_%s" % pos_feat_name, + nonpadding=nonpadding, + losses=losses) + return decoder_output + + def _loss_single(self, logits, feature_name, feature, weights=None): + """Prevents modality loss computation for targets_*.""" + if feature_name in [ + "targets_start_token", "targets_end_token", "targets_error_tag" + ]: + loss_num = self.loss_num[feature_name] + loss_num *= self._problem_hparams.loss_multiplier + loss_den = self.loss_den + else: + loss_num, loss_den = super(TransformerSeq2edits, + self)._loss_single(logits, feature_name, + feature, weights) + tf.summary.scalar("loss/%s" % feature_name, loss_num / loss_den) + return loss_num, loss_den + + def top(self, body_output, features): + """Adds additional dimensions and then calls super class implementation.""" + exp_features = features + for feat in body_output.keys(): + while len(body_output[feat].shape) < 4: + logging.warning("Expanding body output %s...", feat) + body_output[feat] = tf.expand_dims(body_output[feat], -2) + if feat in exp_features: + while len(exp_features[feat].shape) < 4: + exp_features[feat] = tf.expand_dims(exp_features[feat], -1) + logging.warning("Expanding feature %s...", feat) + return super(TransformerSeq2edits, self).top(body_output, exp_features) + + +def _pointer_feedback(pointers, encoder_output, shift=True): + """Feedback loop for pointer networks. + + Args: + pointers: [batch_size, target_length] int tensor with pointers into the + source sentence. + encoder_output: [batch_size, input_length, hidden_size] tensor with encoder + outputs. + shift: Whether to shift the pointers to the right. + + Returns: + A [batch_size, target_length, hidden_size] tensor with encoder outputs. + """ + if shift: + pointers = common_layers.shift_right_2d(pointers) + return gather_2d(encoder_output, pointers) + + +def transformer_edit_ops_layer(decoder_input, + hparams, + encoder_output, + features, + cache=None, + decode_loop_step=None, + nonpadding=None, + losses=None, + layer_collection=None): + """Layer that conditions on the error tag and start and end token pointers.""" + if isinstance(encoder_output, list): # Select forward encoder + encoder_output = encoder_output[0] + with tf.variable_scope("edit_ops_layer"): + with tf.variable_scope("ffn"): + x = decoder_input + # Shorthand for layer preprocessing + # pylint: disable=g-long-lambda + preproc = lambda z: common_layers.layer_preprocess( + z, hparams, layer_collection=layer_collection) + # pylint: enable=g-long-lambda + + feedback_start_token = (hparams.use_start_token or + not hparams.feedback_end_token) + if feedback_start_token: + start_token = _pointer_feedback( + features["targets_start_token"], + encoder_output, + shift=hparams.feedback_end_token) + if hparams.feedback_end_token: + end_token = _pointer_feedback(features["targets_end_token"], + encoder_output) + layer_inputs = [preproc(x)] + if hparams.use_error_tags: + error_tags = common_layers.shift_right_3d( + common_layers.flatten4d3d(features["targets_error_tag"])) + layer_inputs.append(preproc(error_tags)) + if feedback_start_token: + layer_inputs.append(start_token) + if hparams.feedback_end_token: + layer_inputs.append(end_token) + y = transformer_layers.transformer_ffn_layer( + tf.concat(layer_inputs, axis=2), + hparams, + conv_padding="LEFT", + nonpadding_mask=nonpadding, + losses=losses, + cache=cache, + decode_loop_step=decode_loop_step, + layer_collection=layer_collection) + x = common_layers.layer_postprocess(x, y, hparams) + return x + + +def transformer_between_predictions_layer(x, + hparams, + name, + cache=None, + decode_loop_step=None, + nonpadding=None, + losses=None, + layer_collection=None): + """Stack between prediction layers.""" + with tf.variable_scope(name): + for i in range(hparams.ffn_in_prediction_cascade): + with tf.variable_scope("layer_%d" % i): + y = transformer_layers.transformer_ffn_layer( + common_layers.layer_preprocess( + x, hparams, layer_collection=layer_collection), + hparams, + conv_padding="LEFT", + nonpadding_mask=nonpadding, + losses=losses, + cache=cache, + decode_loop_step=decode_loop_step, + layer_collection=layer_collection) + x = common_layers.layer_postprocess(x, y, hparams) + return x + + +def get_error_tag_embedding_matrix(): + candidates = [ + var for var in tf.global_variables() if "targets_error_tag" in var.op.name + ] + if len(candidates) != 1: + raise ValueError("Could not identify error tag embedding matrix! " + "Matching variable names: %s" % candidates) + embed_mat = candidates[0] + return embed_mat + + +def transformer_error_tag_prediction_layer(x, + hparams, + features, + loss_mask, + layer_collection=None): + """Layer that predicts the error tag.""" + with tf.variable_scope("error_tag_prediction"): + x = maybe_flatten4d3d(x) + vocab_size = hparams.problem.feature_info["targets_error_tag"].vocab_size + labels = features["targets_error_tag_raw"] + with tf.variable_scope("projection"): + bottleneck = common_layers.dense( + x, + hparams.error_tag_embed_size, + layer_collection=layer_collection, + name="bottleneck") + logits = common_layers.dense( + bottleneck, + vocab_size, + use_bias=False, + layer_collection=layer_collection, + name="logits") + xent = tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=logits, labels=labels) + loss = tf.reduce_sum(xent * loss_mask) + with tf.variable_scope("embedding"): + embed_mat = get_error_tag_embedding_matrix() + y = common_layers.layer_preprocess( + common_layers.embedding( + labels, vocab_size, hparams.hidden_size, embedding_var=embed_mat), + hparams, + layer_collection=layer_collection) + x = common_layers.layer_postprocess(x, y, hparams) + return x, logits, loss + + +def transformer_pointer_prediction_layer(feature_name, + encoder_output, + x, + encoder_decoder_attention_bias, + hparams, + features, + loss_mask, + layer_collection=None): + """Layer that predicts the start or end token position. + + Args: + feature_name: 'targets_start_token' or 'targets_end_token' + encoder_output: [batch_size, input_length, hidden_size] tensor with encoder + outputs + x: [batch_size, target_length, 1, hidden_size] tensor with decoder outputs + encoder_decoder_attention_bias: [batch_size, input_length, target_length] + attention mask + hparams: Hyper parameters + features: Feature dictionary + loss_mask: [batch_size, target_length] mask for loss computation. + layer_collection: Layer collection + + Returns: + (x, logits, loss) + """ + if isinstance(encoder_output, list): + pointer_encoder_output = encoder_output[1] + encoder_output = sum(encoder_output) + else: + pointer_encoder_output = encoder_output + with tf.variable_scope("%s_prediction" % feature_name): + x = maybe_flatten4d3d(x) + encoder_decoder_attention_bias = common_layers.flatten4d3d( + encoder_decoder_attention_bias) + q = common_attention.compute_attention_component(x, hparams.hidden_size) + k = common_attention.compute_attention_component(encoder_output, + hparams.hidden_size) + # Scaled dot-product attention + scalar = tf.rsqrt(tf.to_float(common_layers.shape_list(q)[2])) + logits = tf.matmul(q * scalar, k, transpose_b=True) + + logits += encoder_decoder_attention_bias + + labels = features["%s_raw" % feature_name] + xent = tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=logits, labels=labels) + loss = tf.reduce_sum(xent * loss_mask) + + pointer_out = gather_2d(pointer_encoder_output, labels) + y = common_layers.layer_preprocess( + pointer_out, hparams, layer_collection=layer_collection) + x = common_layers.layer_postprocess(x, y, hparams) + return x, logits, loss diff --git a/tensor2tensor/models/research/transformer_sketch.py b/tensor2tensor/models/research/transformer_sketch.py new file mode 100644 index 000000000..79aa21aec --- /dev/null +++ b/tensor2tensor/models/research/transformer_sketch.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Transformer Sketch for im2sketch problems. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_layers +from tensor2tensor.models import transformer +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +@registry.register_model +class TransformerSketch(transformer.Transformer): + """Transformer with strided convolutions.""" + + def encode(self, inputs, target_space, hparams, features=None, losses=None): + """Add layers of strided convolutions on top of encoder.""" + with tf.variable_scope("downstride"): + hparams = self.hparams + kernel, strides = (4, 4), (2, 2) + x = inputs + # Down-convolutions. + for i in range(hparams.num_compress_steps): + x = common_layers.make_even_size(x) + x = tf.layers.conv2d( + x, hparams.hidden_size, kernel, strides=strides, + padding="SAME", activation=common_layers.belu, name="conv_%d" % i) + x = common_layers.layer_norm(x) + + encoder_output, encoder_decoder_attention_bias = super( + TransformerSketch, self).encode( + x, target_space, hparams, features=features, losses=losses) + return encoder_output, encoder_decoder_attention_bias + + +@registry.register_hparams +def transformer_sketch(): + """Basic transformer_sketch hparams.""" + hparams = transformer.transformer_small() + hparams.num_compress_steps = 4 + hparams.batch_size = 32 + hparams.clip_grad_norm = 2. + hparams.sampling_method = "random" + return hparams diff --git a/tensor2tensor/models/research/transformer_symshard.py b/tensor2tensor/models/research/transformer_symshard.py new file mode 100644 index 000000000..aed4b5e96 --- /dev/null +++ b/tensor2tensor/models/research/transformer_symshard.py @@ -0,0 +1,419 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test of the SymShard programming model. + +Symmetric model parallellism. + +Each shard (device) has a similar structure with different weights. +Occasional allreduce (sum) across shards. + +On TPU, we replicate the whole model on each core. This is not the intended +use, but we can test the model quality. + +Example problem: translate_ende_8k_packed + +Preliminary results on languagemodel_lm1b8k_packed (200k steps 8 cores) + transformer_tpu: 48M params dev-log-ppl=-1.29 dev-BLEU=27.0 + transformer_symshard_sh4: 49M params dev-log-ppl=-1.30 dev-BLEU=26.4 + transformer_symshard_base: 98M params dev-log-ppl=-1.23 dev-BLEU=27.6 + + transformer_symshard_base with different mixing fraction (default=0.5): + mix_fraction=0.0 dev-log-ppl=-1.33 + mix_fraction=0.25 dev-log-ppl=-1.23 + mix_fraction=0.5 dev-log-ppl=-1.23 + mix_fraction=0.75 dev-log-ppl=-1.24 + mix_fraction=1.0 dev-log-ppl=-1.28 + +TODO(noam): Make sure no one is using super_lm, then delete it. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from six.moves import range # pylint: disable=redefined-builtin + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import modalities +from tensor2tensor.utils import expert_utils +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf + + +@registry.register_model +class TransformerSymshard(t2t_model.T2TModel): + """See file docstring.""" + + def body(self, features): + hparams = self._hparams + ps_devices = self._ps_devices + single_device = (len(ps_devices) == 1) + assert hparams.num_model_shards % len(ps_devices) == 0 + shards_per_device = hparams.num_model_shards // len(ps_devices) + model_devices = [ps_devices[i // shards_per_device] + for i in range(hparams.num_model_shards)] + print("model_devices = %s" % model_devices) + mp = expert_utils.Parallelism(model_devices, reuse=False) + targets_vocab_size = self._problem_hparams.vocabulary["targets"].vocab_size + # squeeze out channels, heights + targets = tf.squeeze(features["targets_raw"], [2, 3]) + targets_embedding_var = mp( + tf.get_variable, "embedding", + [[targets_vocab_size, hparams.hidden_size]] * mp.n, + initializer=tf.random_normal_initializer( + 0.0, hparams.hidden_size**-0.5)) + shifted_targets = common_layers.shift_right_2d(targets) + # Bypass the symbol modality and use a different embedding on each shard. + if single_device: + targets_embedding_var_combined = tf.concat(targets_embedding_var, 1) + decoder_input_combined = common_layers.embedding( + shifted_targets, targets_vocab_size, + hparams.hidden_size * mp.n, + multiplier=hparams.hidden_size**0.5, + embedding_var=targets_embedding_var_combined, + ) + decoder_input = tf.split(decoder_input_combined, mp.n, axis=2) + else: + targets_embedding_var_combined = None + decoder_input = mp( + common_layers.embedding, shifted_targets, targets_vocab_size, + hparams.hidden_size, + multiplier=hparams.hidden_size**0.5, + embedding_var=targets_embedding_var, + ) + decoder_self_attention_bias = mp( + common_attention.attention_bias_lower_triangle, + tf.shape(targets)[1]) + if "targets_segmentation" in features: + # "Packed" dataset - keep the examples from seeing each other. + targets_segmentation = features["targets_segmentation"] + targets_position = features["targets_position"] + decoder_self_attention_bias = mp( + tf.add, decoder_self_attention_bias, + mp(common_attention.attention_bias_same_segment, + targets_segmentation, targets_segmentation)) + decoder_input = mp( + common_attention.add_timing_signal_1d_given_position, + decoder_input, targets_position) + else: + targets_position = None + decoder_self_attention_bias = mp( + common_attention.attention_bias_lower_triangle, + tf.shape(targets)[1]) + decoder_input = mp(common_attention.add_timing_signal_1d, decoder_input) + + if self.has_input: + inputs = tf.squeeze(features["inputs_raw"], [2, 3]) + inputs_vocab_size = self._problem_hparams.vocabulary["inputs"].vocab_size + # share everything for now + share_inputs_and_targets_embedding = True + if share_inputs_and_targets_embedding: + assert inputs_vocab_size == targets_vocab_size + inputs_embedding_var = targets_embedding_var + inputs_embedding_var_combined = targets_embedding_var_combined + if single_device: + encoder_input_combined = common_layers.embedding( + inputs, inputs_vocab_size, + hparams.hidden_size * mp.n, + multiplier=hparams.hidden_size**0.5, + embedding_var=inputs_embedding_var_combined, + ) + encoder_input = tf.split(encoder_input_combined, mp.n, axis=2) + else: + encoder_input = mp( + common_layers.embedding, inputs, inputs_vocab_size, + hparams.hidden_size, + multiplier=hparams.hidden_size**0.5, + embedding_var=inputs_embedding_var, + ) + if "inputs_segmentation" in features: + # "Packed" dataset - keep the examples from seeing each other. + inputs_segmentation = features["inputs_segmentation"] + inputs_position = features["inputs_position"] + encoder_self_attention_bias = mp( + common_attention.attention_bias_same_segment, + inputs_segmentation, inputs_segmentation) + encoder_decoder_attention_bias = mp( + common_attention.attention_bias_same_segment, + targets_segmentation, inputs_segmentation) + encoder_input = mp( + common_attention.add_timing_signal_1d_given_position, + encoder_input, inputs_position) + else: + encoder_padding = tf.to_float(tf.equal(inputs, 0)) + ignore_padding = common_attention.attention_bias_ignore_padding( + encoder_padding) + encoder_self_attention_bias = ignore_padding + encoder_decoder_attention_bias = ignore_padding + inputs_position = None + encoder_input = mp(common_attention.add_timing_signal_1d, encoder_input) + + # encoder stack here + with tf.variable_scope("encoder"): + encoder_input = mp( + tf.nn.dropout, encoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + encoder_output = _layer_stack( + mp, + encoder_input, + encoder_self_attention_bias, + hparams.encoder_layers, + hparams) + else: + encoder_decoder_attention_bias = None + encoder_output = None + + with tf.variable_scope("decoder"): + decoder_input = mp( + tf.nn.dropout, decoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + decoder_output = _layer_stack( + mp, + decoder_input, + decoder_self_attention_bias, + layers=hparams.decoder_layers, + hparams=hparams, + encoder_output=encoder_output, + encoder_decoder_attention_bias=encoder_decoder_attention_bias) + + # Bypass the symbol modality and compute logits directly. + # We compute a different set of logits on each shard, and sum them. + # Share the weights with the target embedding. + output_var = targets_embedding_var + output_var_combined = targets_embedding_var_combined + if single_device: + decoder_output = tf.concat(decoder_output, 2) + logits = tf.tensordot(decoder_output, output_var_combined, [[2], [1]]) + num, denom = common_layers.padded_cross_entropy( + logits, targets, hparams.label_smoothing) + training_loss = num / denom + else: + logits = mp( + tf.tensordot, decoder_output, output_var, [[[2], [1]]] * mp.n) + logits = expert_utils.all_reduce_ring(logits, mp) + # On each device, we compute the loss for a part of the batch. + # This is faster than computing the whole loss on one shard. + mp, logits = expert_utils.reduce_by_device(mp, logits, lambda l: l[0]) + def _loss_for_shard(logits, targets, shard): + logits = common_layers.approximate_split(logits, mp.n, 0)[shard] + targets = common_layers.approximate_split(targets, mp.n, 0)[shard] + return common_layers.padded_cross_entropy( + logits, targets, hparams.label_smoothing) + num, denom = mp(_loss_for_shard, logits, targets, range(mp.n)) + training_loss = tf.add_n(num) / tf.add_n(denom) + logits = logits[0] + logits = tf.expand_dims(tf.expand_dims(logits, 2), 3) + # override training loss so that it is not computed externally. + losses = {"training": training_loss} + return logits, losses + + +def _layer_stack(mp, + inputs, + self_attention_bias, + layers, + hparams, + encoder_output=None, + encoder_decoder_attention_bias=None): + """A stack of layers. + + Args: + mp: a Parallelism object + inputs: a list of Tensors + self_attention_bias: list of bias Tensor for self-attention + (see common_attention.attention_bias()) + layers: a string + hparams: hyperparameters for model + encoder_output: optional list of tensors + encoder_decoder_attention_bias: optional list of tensors + + Returns: + y: a list of Tensors + """ + layers = layers.strip(",").split(",") + + # scaled_dot_product_attention_with_projections uses a 3d attention bias + # (no heads), where multihead_attention uses 4d attention bias. + self_attention_bias_3d = mp(tf.squeeze, self_attention_bias, 1) + if encoder_decoder_attention_bias is not None: + encoder_decoder_attention_bias_3d = mp( + tf.squeeze, encoder_decoder_attention_bias, 1) + relu_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(hparams, "relu_dropout_broadcast_dims", ""))) + mix_size = int(hparams.mix_fraction * hparams.hidden_size) + accumulator = inputs + x = inputs + for layer_num, layer_type in enumerate(layers): + with tf.variable_scope("%s_%d" % (layer_type, layer_num)): + tf.logging.info("%s_%d" % (layer_type, layer_num)) + if layer_type == "a": + # accumulate + accumulator = mp(tf.add, x, accumulator) + x = accumulator + elif layer_type == "n": + # normalize + x = mp(common_layers.apply_norm, + x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon) + elif layer_type == "d": + # dropout + x = mp(tf.nn.dropout, x, 1.0 - hparams.layer_prepostprocess_dropout) + elif layer_type == "m": + if mix_size > 0: + # mix across shards + def _split(t): + return tuple(tf.split( + t, [mix_size, hparams.hidden_size - mix_size], 2)) + to_mix, to_keep = mp(_split, x) + mixed = expert_utils.all_reduce_ring(to_mix, mp) + mixed = mp(tf.multiply, mixed, mp.n ** -0.5) + x = mp(lambda a, b: tf.concat([a, b], 2), mixed, to_keep) + elif layer_type == "att": + # single-head attention + q = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False, + name="q_transform") + x = mp( + common_attention.scaled_dot_product_attention_simple, + q, x, x, self_attention_bias_3d) + x = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False, + name="o_transform") + elif layer_type == "enc-att": + # single-head attention over encoder + q = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False, + name="q_transform") + assert encoder_output is not None + x = mp( + common_attention.scaled_dot_product_attention_simple, + q, encoder_output, encoder_output, + encoder_decoder_attention_bias_3d) + x = mp(tf.layers.dense, x, hparams.hidden_size, use_bias=False, + name="o_transform") + elif layer_type == "multihead-att": + # multi-head attention + x = mp( + common_attention.multihead_attention, + x, + None, + self_attention_bias, # bias + hparams.multihead_attention_key_channels or hparams.hidden_size, + hparams.multihead_attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.multihead_attention_num_heads, + hparams.attention_dropout) + elif layer_type == "enc-multihead-att": + # multi-head attention + x = mp( + common_attention.multihead_attention, + x, + encoder_output, + encoder_decoder_attention_bias, # bias + hparams.multihead_attention_key_channels or hparams.hidden_size, + hparams.multihead_attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.multihead_attention_num_heads, + hparams.attention_dropout) + elif layer_type == "ffn": + x = mp( + common_layers.dense_relu_dense, x, + hparams.filter_size, hparams.hidden_size, + dropout=hparams.relu_dropout, + dropout_broadcast_dims=[relu_dropout_broadcast_dims] * mp.n) + else: + assert False, "unknown sublayer %s" % layer_type + return x + + +@registry.register_hparams +def transformer_symshard_base(): + """Set of hyperparameters.""" + hparams = common_hparams.basic_params1() + hparams.hidden_size = 256 + hparams.batch_size = 2048 + hparams.max_length = 0 + # All hyperparameters ending in "dropout" are automatically set to 0.0 + # when not in training mode. + hparams.layer_prepostprocess_dropout = 0.2 + hparams.add_hparam("attention_dropout", 0.1) + hparams.add_hparam("relu_dropout", 0.0) + hparams.add_hparam("relu_dropout_broadcast_dims", "1") + hparams.layer_prepostprocess_dropout = 0.1 + hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length + hparams.label_smoothing = 0.1 + hparams.clip_grad_norm = 0. # i.e. no gradient clipping + hparams.optimizer = "Adafactor" + hparams.learning_rate_schedule = "rsqrt_decay" + hparams.learning_rate_warmup_steps = 10000 + hparams.initializer_gain = 1.0 + hparams.initializer = "uniform_unit_scaling" + hparams.weight_decay = 0.0 + # TODO(noam): use this to control sharing. We now share always + hparams.shared_embedding_and_softmax_weights = True + # we only want one data shard. + hparams.no_data_parallelism = True + # bypass the symbol modality so that we can use model parallelism. + hparams.bottom = { + "inputs": modalities.identity_bottom, + "targets": modalities.identity_bottom, + } + hparams.top = { + "targets": modalities.identity_top, + } + hparams.add_hparam("filter_size", 1280) + hparams.add_hparam("mix_fraction", 0.5) + # attention-related flags + hparams.add_hparam("multihead_attention_num_heads", 4) + hparams.add_hparam("multihead_attention_key_channels", 0) + hparams.add_hparam("multihead_attention_value_channels", 0) + hparams.add_hparam("pos", "timing") # timing, none + hparams.add_hparam( + "encoder_layers", ("n,att,m,d,a," "n,ffn,m,d,a,") * 6 + "n,d") + hparams.add_hparam( + "decoder_layers", + ("n,att,m,d,a," "n,enc-att,m,d,a," "n,ffn,m,d,a,") * 6 + "n,d") + # Number of model shards - each one has separate parameters. + # Changing this number invalidates checkpoints. + hparams.add_hparam("num_model_shards", 8) + return hparams + + +@registry.register_hparams +def transformer_symshard_sh4(): + """4 shards instead of 8. Similar model size to transformer_tpu().""" + hparams = transformer_symshard_base() + hparams.num_model_shards = 4 + return hparams + + +@registry.register_hparams +def transformer_symshard_lm_0(): + """For language modeling - suggested problem languagemodel_lm1b8k_packed.""" + hparams = transformer_symshard_base() + hparams.label_smoothing = 0 + return hparams + + +@registry.register_hparams +def transformer_symshard_h4(): + """4 heads per shard.""" + hparams = transformer_symshard_base() + hparams.encoder_layers = ("n,multihead-att,m,d,a," "n,ffn,m,d,a,") * 6 + "n,d" + hparams.decoder_layers = ( + ("n,multihead-att,m,d,a," "n,enc-multihead-att,m,d,a," "n,ffn,m,d,a,") * 6 + + "n,d") + return hparams diff --git a/tensor2tensor/models/research/transformer_vae.py b/tensor2tensor/models/research/transformer_vae.py new file mode 100644 index 000000000..84644446d --- /dev/null +++ b/tensor2tensor/models/research/transformer_vae.py @@ -0,0 +1,1046 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""AE Transformer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import math +import os +from six.moves import range # pylint: disable=redefined-builtin + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_image_attention as cia +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import discretization +from tensor2tensor.layers import latent_layers +from tensor2tensor.layers import modalities +from tensor2tensor.models import transformer +from tensor2tensor.utils import beam_search +from tensor2tensor.utils import contrib +from tensor2tensor.utils import expert_utils +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +_DO_SUMMARIES = True + + +def residual_conv(x, repeat, k, hparams, name, reuse=None): + """A stack of convolution blocks with residual connections.""" + with tf.variable_scope(name, reuse=reuse): + dilations_and_kernels = [((1, 1), k) for _ in range(3)] + for i in range(repeat): + with tf.variable_scope("repeat_%d" % i): + y = common_layers.conv_block( + common_layers.layer_norm(x, hparams.hidden_size, name="lnorm"), + hparams.hidden_size, + dilations_and_kernels, + padding="SAME", + name="residual_conv") + y = tf.nn.dropout(y, 1.0 - hparams.dropout) + x += y + return x + + +def attend(x, source, hparams, name): + """Self-attention layer with source as memory antecedent.""" + with tf.variable_scope(name): + x = tf.squeeze(x, axis=2) + if len(source.get_shape()) > 3: + source = tf.squeeze(source, axis=2) + source = common_attention.add_timing_signal_1d(source) + y = common_attention.multihead_attention( + common_layers.layer_preprocess(x, hparams), source, None, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, hparams.num_heads, + hparams.attention_dropout) + res = common_layers.layer_postprocess(x, y, hparams) + return tf.expand_dims(res, axis=2) + + +def decompress_step(source, hparams, first_relu, is_2d, name): + """Decompression function.""" + with tf.variable_scope(name): + shape = common_layers.shape_list(source) + multiplier = 4 if is_2d else 2 + kernel = (1, 1) if is_2d else (1, 1) + thicker = common_layers.conv_block( + source, hparams.hidden_size * multiplier, [((1, 1), kernel)], + first_relu=first_relu, name="decompress_conv") + if is_2d: + return tf.depth_to_space(thicker, 2) + return tf.reshape(thicker, [shape[0], shape[1] * 2, 1, hparams.hidden_size]) + + +def top_k_softmax(x, k): + """Calculate softmax(x), select top-k and rescale to sum to 1.""" + x = tf.nn.softmax(x) + top_x, _ = tf.nn.top_k(x, k=k+1) + min_top = tf.reduce_min(top_x, axis=-1, keepdims=True) + x = tf.nn.relu((x - min_top) + 1e-12) + x /= tf.reduce_sum(x, axis=-1, keepdims=True) + return x, tf.reduce_max(top_x, axis=-1) + + +def top_k_experts(x, k, hparams): + x_shape = common_layers.shape_list(x) + x_flat = tf.reshape(x, [-1, common_layers.shape_list(x)[-1]]) + is_training = hparams.mode == tf_estimator.ModeKeys.TRAIN + gates, load = expert_utils.noisy_top_k_gating( + x_flat, 2 ** hparams.z_size, is_training, k) + gates_shape = [x_shape[0], x_shape[1], x_shape[2], 2 ** hparams.z_size] + gates = tf.reshape(gates, gates_shape) + load_loss = expert_utils.cv_squared(load) + return gates, load_loss + + +def compress(x, c, is_2d, hparams, name): + """Compress.""" + with tf.variable_scope(name): + # Run compression by strided convs. + cur = x + k1 = (3, 3) if is_2d else (3, 1) + k2 = (2, 2) if is_2d else (2, 1) + cur = residual_conv(cur, hparams.num_compress_steps, k1, hparams, "rc") + if c is not None and hparams.do_attend_compress: + cur = attend(cur, c, hparams, "compress_attend") + for i in range(hparams.num_compress_steps): + if hparams.do_residual_compress: + cur = residual_conv(cur, hparams.num_compress_steps, k1, hparams, + "rc_%d" % i) + cur = common_layers.conv_block( + cur, hparams.hidden_size, [((1, 1), k2)], + strides=k2, name="compress_%d" % i) + return cur + + +def encode(x, x_space, hparams, name): + """Transformer preparations and encoder.""" + with tf.variable_scope(name): + (encoder_input, encoder_self_attention_bias, + ed) = transformer.transformer_prepare_encoder(x, x_space, hparams) + encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.dropout) + return transformer.transformer_encoder( + encoder_input, encoder_self_attention_bias, hparams), ed + + +def decode_transformer(encoder_output, + encoder_decoder_attention_bias, + targets, + hparams, + name, + task=None, + causal=True): + """Original Transformer decoder.""" + orig_hparams = hparams + with tf.variable_scope(name): + if task is None: + task = hparams.task + if task == "translate": + targets = common_layers.flatten4d3d(targets) + + decoder_input, decoder_self_bias = ( + transformer.transformer_prepare_decoder(targets, hparams)) + + decoder_input = tf.nn.dropout(decoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + + if not causal: + decoder_self_bias *= 0. + + decoder_output = transformer.transformer_decoder( + decoder_input, + encoder_output, + decoder_self_bias, + encoder_decoder_attention_bias, + hparams) + decoder_output = tf.expand_dims(decoder_output, axis=2) + else: + assert task == "image" + inputs = None + # have to reshape targets as b, 32, 32, 3 * hidden size] beacuse otherwise + # prepare_image will choke + targets = tf.reshape(targets, [tf.shape(targets)[0], hparams.img_len, + hparams.img_len, + hparams.num_channels*hparams.hidden_size]) + + # Prepare decoder inputs and bias. + # TODO(nikip): Make prepare_decoder return bias + decoder_input, _, _ = cia.prepare_decoder(targets, hparams) + bias = None + + # Add class label to decoder input. + if not hparams.drop_inputs: + decoder_input += tf.reshape( + inputs, + [common_layers.shape_list(targets)[0], 1, 1, hparams.hidden_size]) + decoder_output = cia.transformer_decoder_layers( + decoder_input, + encoder_output=None, + num_layers=hparams.num_decoder_layers or hparams.num_hidden_layers, + hparams=hparams, + self_attention_bias=bias, + attention_type=hparams.dec_attention_type, + name="decoder") + decoder_output_shape = common_layers.shape_list(decoder_output) + decoder_output = tf.reshape(decoder_output, [decoder_output_shape[0], -1, 1, + hparams.hidden_size]) + # Expand since t2t expects 4d tensors. + hparams = orig_hparams + return decoder_output + + +def multinomial_sample(x, vocab_size, temperature): + """Multinomial sampling from a n-dimensional tensor.""" + if temperature > 0: + samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]) / temperature, 1) + else: + samples = tf.argmax(x, axis=-1) + reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1]) + return tf.to_int32(reshaped_samples) + + +def ae_latent_softmax(latents_pred, latents_discrete, hparams): + """Latent prediction and loss.""" + vocab_size = 2 ** hparams.z_size + if hparams.num_decode_blocks < 2: + latents_logits = tf.layers.dense(latents_pred, vocab_size, + name="extra_logits") + if hparams.logit_normalization: + latents_logits *= tf.rsqrt(1e-8 + + tf.reduce_mean(tf.square(latents_logits))) + + loss = None + if latents_discrete is not None: + if hparams.soft_em: + # latents_discrete is actually one-hot of multinomial samples + assert hparams.num_decode_blocks == 1 + loss = tf.nn.softmax_cross_entropy_with_logits_v2( + labels=latents_discrete, logits=latents_logits) + else: + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + labels=latents_discrete, logits=latents_logits) + sample = multinomial_sample( + latents_logits, vocab_size, hparams.sampling_temp) + return sample, loss + + # Multi-block case. + vocab_bits = int(math.log(vocab_size, 2)) + assert vocab_size == 2**vocab_bits + assert vocab_bits % hparams.num_decode_blocks == 0 + block_vocab_size = 2**(vocab_bits // hparams.num_decode_blocks) + latents_logits = [ + tf.layers.dense( + latents_pred, block_vocab_size, name="extra_logits_%d" % i) + for i in range(hparams.num_decode_blocks) + ] + loss = None + if latents_discrete is not None: + losses = [] + for i in range(hparams.num_decode_blocks): + d = tf.floormod(tf.floordiv(latents_discrete, + block_vocab_size**i), block_vocab_size) + losses.append(tf.nn.sparse_softmax_cross_entropy_with_logits( + labels=d, logits=latents_logits[i])) + loss = sum(losses) + samples = [multinomial_sample(l, block_vocab_size, hparams.sampling_temp) + for l in latents_logits] + sample = sum([s * block_vocab_size**i for i, s in enumerate(samples)]) + return sample, loss + + +def ae_latent_sample_beam(latents_dense_in, inputs, ed, embed, hparams): + """Sample from the latent space in the autoencoder.""" + vocab_size = 2**hparams.z_size + beam_size = 1 # TODO(lukaszkaiser): larger beam sizes seem to work bad. + inputs = tf.tile(inputs, [beam_size, 1, 1]) + ed = tf.tile(ed, [beam_size, 1, 1, 1]) + + def symbols_to_logits_fn(ids): + """Go from ids to logits.""" + ids = tf.expand_dims(ids, axis=2) # Ids start with added all-zeros. + latents_discrete = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0]]) + + with tf.variable_scope(tf.get_variable_scope(), reuse=False): + latents_dense = embed(latents_discrete) + latents_pred = decode_transformer( + inputs, ed, latents_dense, hparams, "extra") + logits = tf.layers.dense(latents_pred, vocab_size, name="extra_logits") + current_output_position = common_layers.shape_list(ids)[1] - 1 + logits = logits[:, current_output_position, :, :] + return tf.squeeze(logits, axis=[1]) + + initial_ids = tf.zeros([tf.shape(latents_dense_in)[0]], dtype=tf.int32) + length = tf.shape(latents_dense_in)[1] + ids, _, _ = beam_search.beam_search( + symbols_to_logits_fn, initial_ids, beam_size, length, + vocab_size, alpha=0.0, eos_id=-1, stop_early=False) + + res = tf.expand_dims(ids[:, 0, :], axis=2) # Pick first beam. + return res[:, 1:] # Remove the added all-zeros from ids. + + +def ae_latent_sample(latents_dense, inputs, ed, embed, iters, hparams): + """Sample from the latent space in the autoencoder.""" + if hparams.num_decode_blocks < 2 and hparams.sampling_temp == 0.0: + # TODO(lukaszkaiser): beam-search only works in non-blocked mode for now. + tf.logging.info("Running beam-search for latents with beam size 1.") + return ae_latent_sample_beam(latents_dense, inputs, ed, embed, hparams) + latents_pred = decode_transformer(inputs, ed, latents_dense, hparams, "extra") + latents_discrete, _ = ae_latent_softmax(latents_pred, None, hparams) + + def next_bit(latents_discrete, i): + latents_discrete_prev = latents_discrete + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + latents_dense = embed(latents_discrete) + latents_pred = decode_transformer( + inputs, ed, latents_dense, hparams, "extra") + latents_discrete, _ = ae_latent_softmax(latents_pred, None, hparams) + return tf.concat([latents_discrete_prev[:, :(i+1), :], + latents_discrete[:, (i+1):, :]], axis=1) + + for i in range(iters): + latents_discrete = next_bit(latents_discrete, i) + return latents_discrete + + +def ae_transformer_internal(inputs, + targets, + target_space, + hparams, + cache=None, + predict_mask=1.0): + """AE Transformer, main step used for training.""" + # Summaries break with the do_refine cond, turn them off in that case. + global _DO_SUMMARIES + if hparams.do_refine: + _DO_SUMMARIES = False + + # Prepare. + if inputs is not None: + batch_size = common_layers.shape_list(inputs)[0] + else: + batch_size = common_layers.shape_list(targets)[0] + targets = tf.reshape(targets, [batch_size, -1, 1, hparams.hidden_size]) + + # Encoder. + if inputs is not None: + inputs = common_layers.flatten4d3d(inputs) + inputs, ed = encode(inputs, target_space, hparams, "input_enc") + inputs_ex, ed_ex = inputs, ed + else: + ed, inputs_ex, ed_ex = None, None, None + + # Autoencoding. + losses = {"extra": tf.constant(0.0), "latent_pred": tf.constant(0.0), + "neg_q_entropy": tf.constant(0.0)} + if hparams.do_ae: + # flatten here + original_targets = targets + original_targets_shape = tf.shape(original_targets) + if hparams.task == "image": + cia.maybe_reshape_4d_to_3d(targets) + if hparams.task == "translate": + if inputs is not None: + max_targets_len_from_inputs = tf.concat([inputs, inputs], axis=1) + else: + max_targets_len_from_inputs = targets + else: + assert hparams.task == "image" + max_targets_len_from_inputs = targets + if hparams.word_shuffle: + tf.logging.info("Using word shuffle with rate = {}".format( + hparams.word_shuffle)) + targets_idx = tf.range(start=0, + limit=common_layers.shape_list(targets)[1], + delta=1) + targets_idx = tf.to_float(targets_idx) + noise = tf.random_uniform(shape=common_layers.shape_list(targets_idx), + minval=0, + maxval=1 + hparams.word_shuffle) + targets_idx += noise + permutation = contrib.framework().argsort(targets_idx) + targets_permuted = tf.gather(targets, indices=permutation, axis=1) + targets = targets_permuted + targets, _ = common_layers.pad_to_same_length( + targets, max_targets_len_from_inputs, + final_length_divisible_by=2**hparams.num_compress_steps) + # Add positional information + targets_shape = common_layers.shape_list(targets) + targets = tf.reshape(targets, [targets_shape[0], targets_shape[1], + targets_shape[3]]) + targets = common_attention.add_positional_embedding( + targets, hparams.max_length, name="targets_position") + targets = tf.reshape(targets, shape=targets_shape) + if hparams.word_dropout: + mask = tf.random_uniform(shape=common_layers.shape_list(targets), + minval=0.0, maxval=1.0) + targets_noisy = tf.where(mask > hparams.word_dropout, targets, + tf.zeros_like(targets)) + else: + targets_noisy = targets + + targets_c = compress(targets_noisy, inputs, False, hparams, "compress") + if hparams.mode != tf_estimator.ModeKeys.PREDICT: + # Compress and bottleneck. + latents_dense, latents_discrete, extra_loss, embed, neg_q_entropy = ( + hparams.bottleneck(inputs=targets_c, + filter_size=hparams.compress_filter_size, + mode=hparams.mode, + name="vc")) + if _DO_SUMMARIES: + tf.summary.histogram("b0", tf.reshape(latents_discrete[:, 0, :], [-1])) + pc = common_layers.inverse_exp_decay(hparams.startup_steps) + pc = pc if hparams.mode == tf_estimator.ModeKeys.TRAIN else 1.0 + cond = tf.less(tf.random_uniform([batch_size]), pc) + latents_dense = tf.where(cond, latents_dense, targets_c) + # TODO(lukaszkaiser): return extra losses batchwise, multiply before mean. + losses["extra"] = extra_loss * tf.reduce_mean(tf.to_float(cond)) + # Extra loss predicting latent code from input. Discrete only. + if hparams.bottleneck_kind not in ["dense", "vae"]: + latents_pred = decode_transformer( + inputs_ex, ed_ex, + embed(latents_discrete), hparams, "extra", + task="translate") + _, latent_pred_loss = ae_latent_softmax( + latents_pred, tf.stop_gradient(latents_discrete), hparams) + + # Scale by latent dimension for summary so we can compare across + # batches. + if _DO_SUMMARIES: + tf.summary.scalar("latent_pred_loss_mean", + tf.reduce_mean(latent_pred_loss)) + if hparams.sum_over_latents: + latent_pred_loss = tf.reduce_sum(latent_pred_loss, [1, 2]) + + losses["latent_pred"] = tf.reduce_mean( + latent_pred_loss * tf.to_float(cond)) * hparams.prior_scale + losses["neg_q_entropy"] = neg_q_entropy * hparams.entropy_scale + else: + inputs_c = decode_transformer(inputs, ed, targets_c, hparams, "dec_c") + losses["latent_pred"] = tf.reduce_mean( + tf.squared_difference(inputs_c, targets_c)) * 20 + def bn_inputs(): + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + bn, _, _, _, _ = hparams.bottleneck( + inputs=inputs_c, + filter_size=hparams.compress_filter_size, + mode=hparams.mode, + name="vc") + return bn + inputs_c = bn_inputs() + ptc = 1.0 - common_layers.inverse_lin_decay(200000) * 0.5 + ptc = ptc if hparams.mode == tf_estimator.ModeKeys.TRAIN else 1.0 + latents_dense = tf.where(tf.less(tf.random_uniform([batch_size]), ptc), + latents_dense, inputs_c) + else: + if hparams.bottleneck_kind in ["dense", "vae"]: + inputs_c = decode_transformer(inputs, ed, targets_c, hparams, "dec_c") + latents_dense, _, _, _, _ = hparams.bottleneck( + inputs=inputs_c, + filter_size=hparams.compress_filter_size, + mode=hparams.mode, + name="vc") + else: + latent_len = common_layers.shape_list(targets_c)[1] + _, _, _, embed, _ = hparams.bottleneck( + inputs=targets_c, + filter_size=hparams.compress_filter_size, + name="vc") + latents_dense = tf.zeros_like(targets_c[:, :latent_len, :, :]) + if cache is None: + cache = ae_latent_sample( + latents_dense, inputs_ex, ed_ex, embed, 16, hparams) + latents_dense = embed(cache) + # Postprocess. + d = latents_dense + d_shape = common_layers.shape_list(d) + d = tf.reshape(d, [d_shape[0], d_shape[1], d_shape[3]]) + d = common_attention.add_positional_embedding( + d, hparams.max_length, name="latents_position") + d = tf.reshape(d, shape=d_shape) + + # decompressing the dense latents + for i in range(hparams.num_compress_steps): + j = hparams.num_compress_steps - i - 1 + d = residual_conv(d, 1, (3, 1), hparams, "decompress_rc_%d" % j) + if inputs is not None and hparams.do_attend_decompress: + d = attend(d, inputs, hparams, "decompress_attend_%d" % j) + d = decompress_step(d, hparams, i > 0, False, "decompress_%d" % j) + + # Masking. + if hparams.do_mask: + masking = common_layers.inverse_lin_decay(hparams.mask_startup_steps) + masking *= common_layers.inverse_exp_decay( + hparams.mask_startup_steps // 4) # Not much at start. + if not hparams.do_refine: + masking -= tf.random_uniform([]) * hparams.unmasked_percentage + masking = tf.minimum(tf.maximum(masking, 0.0), 1.0) + if hparams.use_predict_mask: + masking = predict_mask + if hparams.mode == tf_estimator.ModeKeys.PREDICT: + masking = predict_mask + mask = tf.less(masking, tf.random_uniform( + common_layers.shape_list(targets)[:-1])) + mask = tf.expand_dims(tf.to_float(mask), 3) + + # targets is always [batch, length, 1, depth] + targets = mask * targets + (1.0 - mask) * d + # reshape back to 4d here + if hparams.task == "image": + targets = tf.reshape(targets, original_targets_shape) + else: + targets = d + + res = decode_transformer(inputs, ed, targets, hparams, "decoder", + causal=hparams.causal) + if hparams.do_ae: + if hparams.do_mask and hparams.do_refine: + def refine_res(): + # return residual_conv(res, 1, (5, 1), hparams, "refine") + r, _ = encode(tf.squeeze(res, axis=[2]), + target_space, hparams, "refine_enc") + return tf.expand_dims(r, axis=2) + masked_batches = tf.reduce_sum(mask, axis=[1, 2, 3]) + all_masked = tf.less(masked_batches, 0.1) + res = tf.where(all_masked, refine_res(), res) + # We'll start training the extra model of latents after mask_startup_steps. + nonlatent_steps = hparams.mask_startup_steps + latent_time = tf.less(nonlatent_steps, + tf.to_int32(tf.train.get_global_step())) + losses["latent_pred"] *= tf.to_float(latent_time) + + # res was generated from padded targets, which means it has some extra + # elements. These can cause shape problems when computing loss with respect to + # the original (unpadded) targets. So we remove their extra elements here. + res = res[:, :original_targets_shape[1], :, :] + + data_dim = common_layers.shape_list(res)[1] + latent_dim = common_layers.shape_list(targets_c)[1] + return res, losses, cache, data_dim, latent_dim + + +@registry.register_model +class TransformerAE(t2t_model.T2TModel): + """Autoencoder-augmented Transformer.""" + + def __init__(self, *args, **kwargs): + super(TransformerAE, self).__init__(*args, **kwargs) + self.predict_mask = 1.0 + + # Define bottleneck function + self._hparams.bottleneck = functools.partial( + discretization.discrete_bottleneck, + hidden_size=self._hparams.hidden_size, + z_size=self._hparams.z_size, + filter_size=self._hparams.filter_size, + bottleneck_kind=self._hparams.bottleneck_kind, + num_blocks=self._hparams.num_blocks, + num_residuals=self.hparams.num_residuals, + reshape_method=self._hparams.reshape_method, + beta=self._hparams.beta, + ema=self._hparams.ema, + epsilon=self._hparams.epsilon, + decay=self._hparams.decay, + random_top_k=self._hparams.random_top_k, + soft_em=self.hparams.soft_em, + num_samples=self.hparams.num_samples, + softmax_k=self._hparams.softmax_k, + temperature_warmup_steps=self._hparams.temperature_warmup_steps, + do_hard_gumbel_softmax=self._hparams.do_hard_gumbel_softmax, + num_flows=self._hparams.num_flows, + approximate_gs_entropy=self._hparams.approximate_gs_entropy, + discrete_mix=self._hparams.d_mix, + noise_dev=self._hparams.noise_dev, + startup_steps=self.hparams.startup_steps, + summary=_DO_SUMMARIES) + # Set the discretization bottleneck specific things here + if self._hparams.bottleneck_kind in ["dvq", "gumbel-softmax-dvq"]: + z_size_per_residual = self._hparams.z_size / self._hparams.num_residuals + block_dim = int(self._hparams.hidden_size // self._hparams.num_blocks) + block_v_size = 2**(z_size_per_residual / self._hparams.num_blocks) + block_v_size = int(block_v_size) + + if self._hparams.reshape_method == "project": + tf.logging.info("Using projections for DVQ") + tf.logging.info("Trainable projections = {}".format( + self._hparams.trainable_projections)) + + projection_tensors = tf.get_variable( + name="projection", + shape=[ + self._hparams.num_residuals, self._hparams.num_blocks, + self._hparams.hidden_size, block_dim + ], + initializer=tf.initializers.glorot_uniform(), + trainable=self._hparams.trainable_projections) + + self._hparams.bottleneck = functools.partial( + self._hparams.bottleneck, projection_tensors=projection_tensors) + elif self._hparams.reshape_method == "slice": + tf.logging.info("Using slices for DVQ") + else: + raise ValueError("Unknown reshape method") + + means = tf.get_variable( + name="means", + shape=[ + self._hparams.num_residuals, self._hparams.num_blocks, + block_v_size, block_dim + ], + initializer=tf.uniform_unit_scaling_initializer()) + + # Create the shadow variables if we are using EMA + ema_count = None + ema_means = None + if self._hparams.ema: + ema_count = [] + for i in range(self._hparams.num_residuals): + ema_count_i = tf.get_variable( + "ema_count_{}".format(i), + [self._hparams.num_blocks, block_v_size], + initializer=tf.constant_initializer(0), + trainable=False) + ema_count.append(ema_count_i) + with tf.colocate_with(means): + ema_means = [] + for i in range(self._hparams.num_residuals): + ema_means_i = tf.get_variable( + "ema_means_{}".format(i), + [self._hparams.num_blocks, block_v_size, block_dim], + initializer=(lambda shape, dtype=None, partition_info=None, # pylint: disable=g-long-lambda + verify_shape=None: + means.initialized_value()[i]), + trainable=False) + ema_means.append(ema_means_i) + + # Update bottleneck + self._hparams.bottleneck = functools.partial( + self._hparams.bottleneck, + means=means, + ema_count=ema_count, + ema_means=ema_means) + + def body(self, features): + inputs = features["inputs"] if "inputs" in features else None + if self._hparams.drop_inputs: + inputs = None + reuse = "cache_raw" in features + with tf.variable_scope(tf.get_variable_scope(), reuse=reuse): + res, loss, _, self._data_dim, self._latent_dim = ae_transformer_internal( + inputs, + features["targets"], + features["target_space_id"], + self._hparams, + features.get("cache_raw", None), + predict_mask=self.predict_mask) + return res, loss + + def prepare_features_for_infer(self, features): + if self._hparams.do_mask or not self._hparams.do_ae: + return features + beam_batch_size = self._decode_hparams.beam_size + beam_batch_size *= self._decode_hparams.batch_size + inputs = tf.zeros([beam_batch_size, 1, 1, self._hparams.hidden_size]) + inputs = inputs if "inputs" in features else None + if self._hparams.drop_inputs or not self.has_input: + inputs = None + targets = tf.zeros([beam_batch_size, 1, 1, self._hparams.hidden_size]) + with tf.variable_scope("body"): + _, _, cache, _, _ = ae_transformer_internal( + inputs, targets, features["target_space_id"], self._hparams) + features["cache_raw"] = cache + + def infer(self, features=None, decode_length=50, beam_size=1, top_beams=1, + alpha=0.0, use_tpu=False): + """Produce predictions from the model.""" + if not self._hparams.do_mask: + infer_out = super(TransformerAE, self).infer( + features, decode_length, beam_size, top_beams, alpha, use_tpu=use_tpu) + return infer_out["outputs"] + if not features: + features = {} + inputs_old = None + if "inputs" in features and len(features["inputs"].shape) < 4: + inputs_old = features["inputs"] + features["inputs"] = tf.expand_dims(features["inputs"], 2) + + # Create an initial targets tensor. + if "partial_targets" in features: + initial_output = tf.convert_to_tensor(features["partial_targets"]) + else: + # inputs might not be present in features (e.g.: language modeling), + # in which case we fallback to 'infer_targets' for calculating initial + # input shape, type, etc. + inputs_or_targets = features.get("inputs", features.get("infer_targets")) + batch_size = common_layers.shape_list(inputs_or_targets)[0] + length = common_layers.shape_list(inputs_or_targets)[1] + hidden_dim = common_layers.shape_list(inputs_or_targets)[-1] + target_length = tf.to_int32(2.0 * tf.to_float(length)) + initial_output = tf.zeros((batch_size, target_length, 1, hidden_dim), + dtype=inputs_or_targets.dtype) + + features["targets"] = initial_output + logits, _ = self(features) # pylint: disable=not-callable + # this should only happen if we're doing target_modality not real + if inputs_or_targets.dtype == tf.float32: + samples = logits + else: + samples = tf.argmax(logits, axis=-1) + + # More steps. + self.predict_mask = 0.0 # Use the provided targets this time. + how_many_more_steps = 0 # Set to 1 or more for Gibbs-like sampling. + for _ in range(how_many_more_steps): + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + features["targets"] = samples + logits, _ = self(features) # pylint: disable=not-callable + if inputs_or_targets.dtype == tf.float32: + # When target_modality is real, the last axis does not represent + # classes, so it should not be argmax'ed + samples = logits + else: + samples = tf.argmax(logits, axis=-1) + + self.predict_mask = 1.0 + if inputs_old is not None: # Restore to not confuse Estimator. + features["inputs"] = inputs_old + return samples + + def estimator_spec_eval(self, features, logits, labels, loss, losses_dict): + """Constructs `tf.estimator.EstimatorSpec` for EVAL (evaluation) mode.""" + estimator_spec = super(TransformerAE, self).estimator_spec_eval( + features, logits, labels, loss, losses_dict) + if common_layers.is_xla_compiled(): + # For TPUs (and XLA more broadly?), do not add summary hooks that depend + # on losses; they are not supported. + return estimator_spec + + summary_op = tf.get_collection(tf.GraphKeys.SUMMARIES, scope="losses") + summary_op.extend(tf.get_collection(tf.GraphKeys.SUMMARIES, scope="loss")) + summary_op.append(tf.summary.scalar("loss", loss)) + summary_saver_hook = tf.train.SummarySaverHook( + save_steps=100, + summary_op=summary_op, + output_dir=os.path.join(self.hparams.model_dir, "eval")) + + hooks = list(estimator_spec.evaluation_hooks) + hooks.append(summary_saver_hook) + return estimator_spec._replace(evaluation_hooks=hooks) + + def _summarize_losses(self, losses_dict): + """Adds `tf.summary`s to all terms in the losses dictionary.""" + super(TransformerAE, self)._summarize_losses(losses_dict) + nats_per_dim, bits_per_dim = latent_layers.compute_nats_and_bits_per_dim( + data_dim=self._data_dim, + latent_dim=self._latent_dim, + average_reconstruction=losses_dict["training"], + average_prior=losses_dict["latent_pred"]) + tf.summary.scalar("loss/nats_per_dim", nats_per_dim) + tf.summary.scalar("loss/bits_per_dim", bits_per_dim) + + +@registry.register_hparams +def transformer_ae_small(): + """Set of hyperparameters.""" + hparams = transformer.transformer_small() + hparams.batch_size = 2048 + hparams.learning_rate = 0.2 + hparams.learning_rate_warmup_steps = 4000 + hparams.num_hidden_layers = 3 + hparams.hidden_size = 384 + hparams.filter_size = 2048 + hparams.add_hparam("compress_filter_size", 2048 * 2) + hparams.label_smoothing = 0.0 + hparams.optimizer = "adam" # Can be unstable, maybe try Adam. + hparams.optimizer_adam_epsilon = 1e-9 + hparams.optimizer_adam_beta1 = 0.9 + hparams.optimizer_adam_beta2 = 0.997 # Needs tuning, try 0.98 to 0.999. + hparams.add_hparam("z_size", 14) + hparams.add_hparam("noise_dev", 0.5) + hparams.add_hparam("d_mix", 0.5) + hparams.add_hparam("logit_normalization", True) + hparams.add_hparam("word_dropout", 0.) + # Bottleneck kinds supported: dense, vae, semhash, gumbel-softmax, dvq. + hparams.add_hparam("bottleneck_kind", "semhash") + hparams.add_hparam("num_blocks", 1) + hparams.add_hparam("num_decode_blocks", 1) + # Add an hparam for number of reiduals + hparams.add_hparam("num_residuals", 1) + # Reshape method for DVQ: slice, project + hparams.add_hparam("word_shuffle", 0.5) + hparams.add_hparam("causal", True) + hparams.add_hparam("reshape_method", "slice") + hparams.add_hparam("trainable_projections", False) + hparams.add_hparam("unmasked_percentage", 0.1) + hparams.add_hparam("do_ae", True) + hparams.add_hparam("do_mask", True) + hparams.add_hparam("use_predict_mask", True) + hparams.add_hparam("do_refine", False) + hparams.add_hparam("do_attend_compress", False) + hparams.add_hparam("do_attend_decompress", True) + hparams.add_hparam("do_residual_compress", False) + hparams.add_hparam("drop_inputs", False) + hparams.add_hparam("v_size", 1024*64) + hparams.add_hparam("max_context_length", 64) + hparams.add_hparam("num_compress_steps", 3) + hparams.add_hparam("startup_steps", 10000) + hparams.add_hparam("mask_startup_steps", 50000) + hparams.add_hparam("z_dropout", 0.1) + hparams.add_hparam("is_2d", 0) + hparams.add_hparam("softmax_k", 0) + hparams.add_hparam("decode_autoregressive", True) + hparams.add_hparam("do_vae", True) + hparams.add_hparam("bit_vae", True) + hparams.add_hparam("beta", 0.25) + hparams.add_hparam("epsilon", 1e-5) + hparams.add_hparam("decay", 0.999) + hparams.add_hparam("ema", True) + hparams.add_hparam("random_top_k", 1) + hparams.add_hparam("soft_em", False) + hparams.add_hparam("num_samples", 10) + hparams.add_hparam("inv_temp", 1.0) + hparams.add_hparam("entropy_scale", 0.0) + hparams.add_hparam("prior_scale", 1.0) + hparams.add_hparam("do_hard_gumbel_softmax", False) + hparams.add_hparam("num_flows", 0) + hparams.add_hparam("approximate_gs_entropy", False) + hparams.add_hparam("temperature_warmup_steps", 150000) + hparams.add_hparam("sum_over_latents", False) + hparams.force_full_predict = True + + # task params + hparams.add_hparam("task", "translate") # translate or image tasks supported + return hparams + + +@registry.register_hparams +def imagetransformer_ae_cifar(): + """Hyperparameters for CIFAR-10 experiments.""" + hparams = transformer_ae_small() + hparams.filter_size = 512 + hparams.num_compress_steps = 3 + hparams.startup_steps = 10000 + hparams.is_2d = 0 + hparams.learning_rate_warmup_steps = 8000 + hparams.learning_rate = 0.2 + hparams.hidden_size = 512 + hparams.batch_size = 1 + hparams.max_length = 256 + hparams.dropout = 0.0 + hparams.clip_grad_norm = 0. # i.e. no gradient clipping + hparams.optimizer_adam_epsilon = 1e-9 + hparams.learning_rate_decay_scheme = "noam" + hparams.learning_rate = 0.1 + hparams.initializer_gain = 0.2 + hparams.num_hidden_layers = 6 + hparams.initializer = "uniform_unit_scaling" + hparams.weight_decay = 0.0 + hparams.optimizer_adam_beta1 = 0.9 + hparams.optimizer_adam_beta2 = 0.98 + hparams.label_smoothing = 0.0 + hparams.norm_type = "layer" + hparams.layer_prepostprocess_dropout = 0.0 + hparams.num_heads = 8 + hparams.task = "image" + hparams.ffn_layer = "conv_hidden_relu" + # All hyperparameters ending in "dropout" are automatically set to 0.0 + # when not in training mode. + hparams.attention_dropout = 0.0 + hparams.relu_dropout = 0. + hparams.pos = "timing" # timing, none + hparams.nbr_decoder_problems = 1 + hparams.num_output_layers = 3 + # TODO(trandustin): semhash doesn't work if filter_size != hidden_size. For + # now, set default to dvq. + hparams.bottleneck_kind = "dvq" + hparams.add_hparam("block_size", 1) + + # dilated attention based flags + hparams.add_hparam("gap_sizes", [2, 4, 8, 16, 32, 64, 2, 4, 8, 16, 32, 64]) + hparams.add_hparam("dilated_attention", False) + + # image size related flags + # assuming that the image has same height and width + hparams.add_hparam("img_len", 32) + hparams.add_hparam("num_channels", 3) + # Local attention params + hparams.add_hparam("local_and_global_att", False) + hparams.add_hparam("block_length", 256) + hparams.add_hparam("block_width", 128) + hparams.num_encoder_layers = 4 + hparams.num_decoder_layers = 12 + hparams.add_hparam("dec_attention_type", cia.AttentionType.LOCAL_1D) + hparams.add_hparam("block_raster_scan", False) + hparams.add_hparam("shared_rel", False) + + # multipos attention params + hparams.add_hparam("q_filter_width", 1) + hparams.add_hparam("kv_filter_width", 1) + + hparams.add_hparam("unconditional", False) # unconditional generation + + hparams.bottom["targets"] = modalities.image_channel_embeddings_bottom + hparams.top["targets"] = modalities.image_channel_embeddings_top + hparams.drop_inputs = True + hparams.do_attend_compress = False + hparams.do_attend_decompress = False + return hparams + + +def imagetransformer_ae_imagenet(): + """For 64x64 ImageNet. ~56M trainable variables.""" + hparams = imagetransformer_ae_cifar() + hparams.max_length = int(64 * 64 * 3) + hparams.img_len = 64 + hparams.num_heads = 4 # Heads are expensive on TPUs. + # Reduce architecture from 32x32 CIFAR-10 in order to fit in memory. + hparams.num_decoder_layers = 8 + hparams.num_compress_steps = 2 + return hparams + + +@registry.register_hparams +def transformer_ae_base(): + """Set of hyperparameters.""" + hparams = transformer_ae_small() + hparams.batch_size = 2048 + hparams.hidden_size = 512 + hparams.filter_size = 4096 + hparams.num_hidden_layers = 6 + return hparams + + +@registry.register_hparams +def transformer_ae_a3(): + """Set of hyperparameters.""" + hparams = transformer_ae_base() + hparams.batch_size = 4096 + hparams.layer_prepostprocess_dropout = 0.3 + hparams.optimizer = "Adafactor" + hparams.learning_rate = 0.25 + hparams.learning_rate_warmup_steps = 10000 + return hparams + + +@registry.register_hparams +def transformer_ae_a6(): + """Best hparams for transformer with semhash.""" + hparams = transformer_ae_a3() + hparams.optimizer = "adam" + hparams.noise_dev = 0.5 + return hparams + + +@registry.register_hparams +def transformer_ae_a8(): + """Set of hyperparameters.""" + hparams = transformer_ae_a3() + hparams.optimizer = "Adafactor" + hparams.noise_dev = 0.5 + return hparams + + +@registry.register_hparams +def transformer_ae_base_tpu(): + """Base config adjusted for TPU.""" + hparams = transformer_ae_base() + transformer.update_hparams_for_tpu(hparams) + hparams.batch_size = 512 + return hparams + + +@registry.register_hparams +def transformer_ae_base_noatt(): + """Set of hyperparameters.""" + hparams = transformer_ae_base() + hparams.reshape_method = "slice" + hparams.bottleneck_kind = "dvq" + hparams.hidden_size = 512 + hparams.num_blocks = 1 + hparams.num_decode_blocks = 1 + hparams.z_size = 12 + hparams.do_attend_decompress = False + return hparams + + +@registry.register_hparams +def transformer_ae_small_noatt(): + """Set of hyperparameters.""" + hparams = transformer_ae_small() + hparams.reshape_method = "slice" + hparams.bottleneck_kind = "dvq" + hparams.hidden_size = 512 + hparams.num_blocks = 1 + hparams.num_decode_blocks = 1 + hparams.z_size = 12 + hparams.do_attend_decompress = False + return hparams + + +@registry.register_hparams +def transformer_ae_base_ablation_1(): + hparams = transformer_ae_base_noatt() + hparams.soft_em = True + return hparams + + +@registry.register_hparams +def transformer_ae_base_ablation_2(): + hparams = transformer_ae_base_ablation_1() + hparams.entropy_scale = 0.1 + return hparams + + +@registry.register_hparams +def transformer_ae_base_ablation_3(): + hparams = transformer_ae_base_ablation_2() + hparams.prior_scale = 0.1 + hparams.entropy_scale = 0.1 + return hparams + + +@registry.register_hparams +def transformer_ae_base_ablation_4(): + hparams = transformer_ae_base_ablation_3() + hparams.entropy_scale = 0.0 + hparams.prior_scale = 1.0 + hparams.bottleneck_kind = "gumbel-softmax-dvq" + hparams.do_hard_gumbel_softmax = True + hparams.approximate_gs_entropy = True + return hparams + + +@registry.register_hparams +def transformer_ae_base_ablation_5(): + hparams = transformer_ae_base_ablation_4() + hparams.do_hard_gumbel_softmax = False + return hparams + + +@registry.register_hparams +def transformer_ae_base_iaf(): + hparams = transformer_ae_base_ablation_5() + hparams.num_flows = 1 + hparams.num_samples = 1 + return hparams diff --git a/tensor2tensor/models/research/transformer_vae_flow_prior.py b/tensor2tensor/models/research/transformer_vae_flow_prior.py new file mode 100644 index 000000000..343a457bc --- /dev/null +++ b/tensor2tensor/models/research/transformer_vae_flow_prior.py @@ -0,0 +1,1136 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Transformer VAE with Flow Priors for Non-Autoregressive MT.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import inspect +import math +import six + +from tensor2tensor.data_generators import multi_problem +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import modalities +from tensor2tensor.layers import transformer_glow_layers as glow +from tensor2tensor.layers import transformer_glow_layers_ops as gops +from tensor2tensor.models import transformer +from tensor2tensor.research.models import transformer_vae_flow_prior_ops as ops +from tensor2tensor.utils import contrib +from tensor2tensor.utils import optimize +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +@registry.register_model +class TransformerVaeFlowPrior(t2t_model.T2TModel): + """Transformer VAE using flow priors.""" + + def __init__(self, *args, **kwargs): + super(TransformerVaeFlowPrior, self).__init__(*args, **kwargs) + hparams = self._hparams + if hparams.prior_type in ["affine", "additive", "rq"]: + self._fparams = contrib.training.HParams(**hparams.values()) + for key, value in self._fparams.values().items(): + if key.startswith("flow_"): + setattr(self._fparams, key[5:], value) + + @property + def is_training(self): + return self.hparams.mode == tf_estimator.ModeKeys.TRAIN + + @property + def is_evaluating(self): + return self._hparams.mode == tf_estimator.ModeKeys.EVAL + + @property + def is_predicting(self): + return self._hparams.mode == tf_estimator.ModeKeys.PREDICT + + def loss_iw(self, logits, features): + if isinstance(logits, dict): + losses = {} + for k, v in six.iteritems(logits): + losses[k] = self._loss_single_iw( + v, + k, + features[k], + weights=features.get(k + "_mask")) + + n, d = losses[k] + if common_layers.should_generate_summaries(): + tf.summary.scalar(k + "_loss", n / d) + tf.summary.scalar(k + "_loss_num", n) + tf.summary.scalar(k + "_loss_den", d) + if getattr(self.hparams, "visualize_logits_histogram", False): + hist = tf.summary.histogram + hist(k + "_predict", tf.argmax(tf.squeeze(v), axis=-1)) + hist(k + "_targets", features[k]) + + return tf.add_n([n / d for n, d in losses.values()]) + else: + return self._loss_single_iw( + logits, + "targets", + features["targets"], + weights=features.get("targets_mask")) + + def _loss_single_iw(self, logits, feature_name, feature, weights=None): + # The current bfloat16 version still uses float32 for most parts of backward + # propagation to keep model quality, so cast back before computing the loss + # value. + no_problem_err_str = ( + "The default implementation of %s requires that the " + "model be used with a Problem. If using a Problem, augment the " + "hparams object with trainer_lib.add_problem_hparams. If not, " + "override %s.") + no_problem_err = ( + lambda method_name: no_problem_err_str % (method_name, method_name)) + if not self._problem_hparams: + t2t_model.log_warn(no_problem_err("loss")) + return (tf.constant(0., dtype=tf.float32), + tf.constant(1., dtype=tf.float32)) + + # Calculate loss contribution. + modality = self._problem_hparams.modality[feature_name] + vocab_size = self._problem_hparams.vocab_size[feature_name] + if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"): + vocab_size += (-vocab_size) % self._hparams.vocab_divisor + # loss = self._hparams.loss.get(feature_name, modalities.get_loss(modality)) + loss = ops.generic_loss + targets_weights_fn = self._hparams.weights_fn.get( + "targets", modalities.get_weights_fn(modality)) + if weights is None: + loss_num, loss_den = loss(logits, feature, self._hparams, vocab_size, + weights_fn=targets_weights_fn) + else: + + def weights_fn(labels): + """Per-token weights for loss.""" + # Use target_weights_fn() given by modality as well as explicitly given + # weights. + modality_weights = targets_weights_fn(labels) + + # Broadcast 'weights' along minor dimensions (TF's default is major). + explicit_weights = weights + if len(explicit_weights.shape) < len(modality_weights.shape): + explicit_weights = common_layers.expand_squeeze_to_nd( + weights, modality_weights.shape.ndims) + + return explicit_weights * modality_weights + + # Ensure that target.modality_loss() supports "weights_fn" keyword + # argument. If it doesn't and "weights" is specified, raise an exception. + argument_names = inspect.getargspec(loss).args + if "weights_fn" not in argument_names: + raise ValueError( + "Explicit 'weights' given but default loss for modality doesn't " + "support 'weights_fn' keyword argument: %s.loss(%s)." % + (modality, ", ".join(argument_names))) + + loss_num, loss_den = loss( + logits, feature, self._hparams, vocab_size, weights_fn=weights_fn) + + loss_num *= self._problem_hparams.loss_multiplier + + if hasattr(self.hparams, "problem") and hasattr( + self.hparams.problem, "task_list"): + if weights is not None: + raise NotImplementedError("weights not yet implemented in " + "multitask setting.") + loss_num, loss_den, summaries = multi_problem.aggregate_task_losses( + self.hparams, + self._problem_hparams, + logits, + feature_name, + feature + ) + + for key, val in summaries: + tf.summary.scalar(key, val) + + return loss_num, loss_den + + def internal(self, features, real_features): + """Main procedure for both training and inference.""" + inputs = common_layers.flatten4d3d(features["inputs"]) + targets = common_layers.flatten4d3d(features["targets"]) + target_space = features["target_space_id"] + hparams = self._hparams + inputs_mask = ops.embedding_to_non_padding(inputs) + inputs_length = tf.reduce_sum(inputs_mask, axis=-1) + + encoder_output, encoder_decoder_attention_bias = ( + ops.encoder("encoder", hparams, inputs, target_space)) + kwargs = {"encoder_output": encoder_output, + "encoder_decoder_attention_bias": encoder_decoder_attention_bias} + losses, monitor = {}, {} + log_abs_det = tf.constant(0.0) + + if not self.is_predicting: + # Training + targets_mask = ops.embedding_to_non_padding(targets) + targets_length = tf.reduce_sum(targets_mask, axis=-1) + length_diff = targets_length - inputs_length + decoder_self_attention_bias = ( + common_attention.attention_bias_ignore_padding(1.0 - targets_mask)) + z_q, log_q_z, q_dist = self.sample_q( + targets, targets_mask, decoder_self_attention_bias, n_samples=1, + temp=1.0, **kwargs) + + body_output = ops.decoder( + "decoder", z_q, hparams, decoder_self_attention_bias, **kwargs) + logits = self.top(body_output, real_features) + numerator, denominator = self.loss(logits, real_features) + + if not (self.is_evaluating and ( + hparams.compute_kl_refinement or hparams.compute_iw_marginal)): + targets_length_pred, lenpred_loss = ops.predict_target_lengths( + encoder_output, inputs_mask, hparams, length_diff) + log_p_z_base, log_abs_det = self.compute_prior_log_prob( + z_q, targets_mask, decoder_self_attention_bias, + check_invertibility=False, **kwargs) + losses, monitor = ops.save_log_loss( + hparams, targets_mask, numerator, denominator, log_q_z, log_abs_det, + log_p_z_base, z_q, lenpred_loss, targets_length_pred, + targets_length) + + if self.is_evaluating: + if hparams.compute_kl_refinement: + z_p, _ = self.sample_p( + targets_length, temp=self._decode_hparams.temp, + check_invertibility=False, targets_mask=targets_mask, **kwargs) + z_dq = self.delta_posterior( + z_p, targets_mask, decoder_self_attention_bias, + self._decode_hparams.n_gibbs_steps, **kwargs) + log_q_z_ = q_dist.log_prob(z_dq) + log_q_z_ = gops.reduce_mean_over_bl_sum_over_c(log_q_z_, targets_mask) + losses = {"training": log_q_z_} + + if hparams.compute_iw_marginal: + # if True: + log_p_y_x = self.compute_iw_marginal( + targets, targets_mask, decoder_self_attention_bias, + real_features, self._decode_hparams.n_samples, **kwargs) + # real_features, 1, **kwargs) + losses = {"training": log_p_y_x} + + return logits, losses, monitor, targets_mask + + else: + # Inference + targets_length, _ = ops.predict_target_lengths( + encoder_output, inputs_mask, hparams) + targets_mask = ops.sequence_mask(targets_length, hparams) + decoder_self_attention_bias = ( + common_attention.attention_bias_ignore_padding(1.0 - targets_mask)) + z_p, _ = self.sample_p( + targets_length, temp=self._decode_hparams.temp, + check_invertibility=False, **kwargs) + z_q = self.delta_posterior( + z_p, targets_mask, decoder_self_attention_bias, + self._decode_hparams.n_gibbs_steps, **kwargs) + # 0, **kwargs) + + body_output = ops.decoder( + "decoder", z_q, hparams, decoder_self_attention_bias, **kwargs) + return body_output, losses, monitor, targets_mask + + def sample_q( + self, targets, targets_mask, decoder_self_attention_bias, n_samples, + temp, **kwargs): + hparams = self._hparams + batch_size, targets_max_length = common_layers.shape_list(targets_mask)[:2] + q_params = ops.posterior("posterior", hparams, targets, targets_mask, + decoder_self_attention_bias, **kwargs) + q_dist = gops.diagonal_normal(q_params, "posterior") + loc, scale = q_dist.loc, q_dist.scale + z_shape = [batch_size, targets_max_length, hparams.latent_size] + iw_z_shape = [n_samples*batch_size, targets_max_length, hparams.latent_size] + if n_samples == 1: + noise = tf.random_normal(z_shape, stddev=temp) + z_q = loc + scale * noise + log_q_z = q_dist.log_prob(z_q) # [B, L, C] + else: + noise = tf.random_normal([n_samples] + z_shape, stddev=temp) + z_q = loc[tf.newaxis, ...] + scale[tf.newaxis, ...] * noise + log_q_z = q_dist.log_prob(z_q) # [K, B, L, C] + z_q = tf.reshape(z_q, iw_z_shape) + log_q_z = tf.reshape(log_q_z, iw_z_shape) + return z_q, log_q_z, q_dist + + def compute_iw_marginal( + self, targets, targets_mask, decoder_self_attention_bias, features, + n_samples, reduce_mean=True, **kwargs): + hparams = self._hparams + z_q, log_q_z, _ = self.sample_q( + targets, targets_mask, decoder_self_attention_bias, + n_samples=n_samples, temp=1.0, **kwargs) # [K*B, L, C] + iw_kwargs = {key: ops.prepare_for_iw(value, n_samples) for ( + key, value) in kwargs.items()} + iw_targets_mask = ops.prepare_for_iw(targets_mask, n_samples) + iw_decoder_self_attention_bias = ( + common_attention.attention_bias_ignore_padding(1.0 - iw_targets_mask)) + iw_features = copy.copy(features) + iw_features["targets"] = ops.prepare_for_iw( + features["targets"], n_samples) + + log_p_z_base, log_abs_det = self.compute_prior_log_prob( + z_q, iw_targets_mask, iw_decoder_self_attention_bias, + check_invertibility=False, **iw_kwargs) + log_p_z = log_p_z_base + log_abs_det + + body_output = ops.decoder( + "decoder", z_q, hparams, iw_decoder_self_attention_bias, **iw_kwargs) + logits = self.top(body_output, iw_features) + numerator, denominator = self.loss_iw(logits, iw_features) + numerator = tf.reduce_sum(numerator[..., 0, 0], 1) # [K*B] + denominator = tf.reduce_sum(denominator[..., 0, 0], 1) # [K*B] + log_p_x = -1 * numerator / denominator + log_q_z = gops.reduce_mean_over_l_sum_over_c(log_q_z, iw_targets_mask) + log_p_z = log_p_z / tf.reduce_sum(iw_targets_mask, 1) + + log_p_x, log_q_z, log_p_z = [ops.unprepare_for_iw(ii, n_samples) for ii in [ + log_p_x, log_q_z, log_p_z]] + + log_w_n = log_p_z - log_q_z + log_w_n = tf.nn.log_softmax(log_w_n, axis=0) # [K, B] + + iw_marginal = log_p_x + log_w_n + iw_marginal = tf.reduce_logsumexp(iw_marginal, 0) # [B] + + if reduce_mean: + iw_marginal = tf.cast(tf.reduce_mean(iw_marginal, 0), tf.float32) # [1] + else: + iw_marginal = tf.cast(iw_marginal, tf.float32) # [1] + return iw_marginal + + def argmax_decode(self, z, decoder_self_attention_bias, **kwargs): + hparams = self._hparams + body_output = ops.decoder( + "decoder", z, hparams, decoder_self_attention_bias, **kwargs) + logits = self.top(body_output, {"targets": None}) + targets = tf.argmax(logits, axis=-1) + targets_emb = self.bottom({"targets": targets})["targets"][..., 0, :] + return targets, targets_emb + + def delta_posterior( + self, z, targets_mask, decoder_self_attention_bias, n_gibbs_steps, + **kwargs): + hparams = self._hparams + for _ in range(n_gibbs_steps): + _, targets_emb = self.argmax_decode( + z, decoder_self_attention_bias, **kwargs) + q_params = ops.posterior( + "posterior", hparams, targets_emb, targets_mask, + decoder_self_attention_bias, **kwargs) + q_dist = gops.diagonal_normal(q_params, "posterior") + z = q_dist.loc # [B, L, C] + return z + + def compute_prior_log_prob( + self, z_q, targets_mask, decoder_self_attention_bias, + check_invertibility=False, **kwargs): + hparams = self._hparams + batch_size, targets_max_length = ( + common_layers.shape_list(targets_mask)[:2]) + prior_shape = [batch_size, targets_max_length, hparams.latent_size] + log_abs_det = tf.zeros([batch_size]) + + if hparams.prior_type == "standard_normal": + log_p_z_base = gops.standard_normal_density(z_q, targets_mask) + elif hparams.prior_type == "diagonal_normal": + diag_prior_params = ops.cond_prior( + "diag_prior", hparams, tf.zeros(prior_shape), targets_mask, + hparams.latent_size*2, decoder_self_attention_bias, **kwargs) + p_dist = gops.diagonal_normal(diag_prior_params, "diag_prior") + log_p_z_base = p_dist.log_prob(z_q) # [B, L, C] + log_p_z_base = gops.reduce_sum_over_lc(log_p_z_base, targets_mask) # [B] + elif hparams.prior_type in ["affine", "additive", "rq"]: + if self.is_evaluating: + disable_dropout = True + init = False + elif self.is_training: + disable_dropout = False + init = tf.equal(hparams.kl_startup_steps, + tf.cast(tf.train.get_global_step(), tf.int32)) + else: + raise ValueError("compute_prior shouldn't be used in decoding.") + + z_inv, log_abs_det, log_p_z_base, zs = glow.glow( + "glow", z_q, targets_mask, decoder_self_attention_bias, + inverse=False, init=init, hparams=self._fparams, + disable_dropout=disable_dropout, **kwargs) + if self.is_evaluating and check_invertibility: + z_inv_inv, _, _, _ = glow.glow( + "glow", z_inv, targets_mask, decoder_self_attention_bias, + inverse=True, split_zs=zs, init=False, hparams=self._fparams, + disable_dropout=True, **kwargs) + z_diff = z_q - z_inv_inv + tf.summary.scalar("flow_recon_forward", tf.reduce_max(tf.abs(z_diff))) + return log_p_z_base, log_abs_det + + def sample_p( + self, targets_length, temp, check_invertibility=False, targets_mask=None, + **kwargs): + hparams = self._hparams + if targets_mask is None: + targets_mask = ops.sequence_mask(targets_length, hparams) + decoder_self_attention_bias = ( + common_attention.attention_bias_ignore_padding(1.0 - targets_mask)) + batch_size, targets_max_length = ( + common_layers.shape_list(targets_mask)[:2]) + prior_shape = [batch_size, targets_max_length, hparams.latent_size] + noise = tf.random.normal(prior_shape, stddev=temp) + p_dist = None + + if hparams.prior_type == "standard_normal": + z_p = noise + elif hparams.prior_type == "diagonal_normal": + diag_prior_params = ops.cond_prior( + "diag_prior", hparams, tf.zeros(prior_shape), targets_mask, + hparams.latent_size*2, decoder_self_attention_bias, **kwargs) + p_dist = gops.diagonal_normal(diag_prior_params, "diag_prior") + z_p = p_dist.loc + p_dist.scale * noise + elif hparams.prior_type in ["affine", "additive", "rq"]: + n_levels = len(hparams.depths.split("/")) + divi = max(1, hparams.factor**(n_levels-1)) + flow_prior_shape = [ + batch_size, targets_max_length//divi, hparams.latent_size] + noise = tf.random_normal(flow_prior_shape, stddev=temp) + z_p, _, _, _ = glow.glow( + "glow", noise, targets_mask, decoder_self_attention_bias, + inverse=True, init=False, hparams=self._fparams, + disable_dropout=True, temp=temp, **kwargs) + if self.is_evaluating and check_invertibility: + noise_inv, _, _, _ = glow.glow( + "glow", z_p, targets_mask, decoder_self_attention_bias, + inverse=False, init=False, hparams=self._fparams, + disable_dropout=True, **kwargs) + z_diff = noise - noise_inv + tf.summary.scalar("flow_recon_inverse", tf.reduce_max(tf.abs(z_diff))) + return z_p, p_dist + + def optimize(self, loss, num_async_replicas=1, use_tpu=False, variables=None): + """Return a training op minimizing loss.""" + lr = ops.learning_rate_schedule(self.hparams) + if num_async_replicas > 1: + t2t_model.log_info("Dividing learning rate by num_async_replicas: %d", + num_async_replicas) + lr /= math.sqrt(float(num_async_replicas)) + train_op = optimize.optimize( + loss, lr, self.hparams, use_tpu=use_tpu, variables=variables) + return train_op + + def body(self, features, real_features): + return self.internal(features, real_features) + + def infer(self, + features, + *args, + **kwargs): + """Produce predictions from the model.""" + del args, kwargs + inputs_old = None + if "inputs" in features and len(features["inputs"].shape) < 4: + inputs_old = features["inputs"] + features["inputs"] = tf.expand_dims(features["inputs"], 2) + features["targets"] = tf.identity(features["inputs"]) + + # logits, _ = self(features) + t2t_model.set_custom_getter_compose(self._custom_getter) + tf.get_variable_scope().set_initializer( + optimize.get_variable_initializer(self.hparams)) + with self._eager_var_store.as_default(): + self._fill_problem_hparams_features(features) + # intentionally disable sharding during inference (in multi GPU) + with tf.variable_scope(self.name): + logits, _, _, targets_mask = self.model_fn(features) + + samples = tf.argmax(logits, axis=-1) + samples = tf.where( + tf.cast(targets_mask[..., tf.newaxis, tf.newaxis], tf.bool), + samples, tf.ones_like(samples)) + if inputs_old is not None: # Restore to not confuse Estimator. + features["inputs"] = inputs_old + return samples + + def model_fn(self, features): + with tf.variable_scope( + tf.get_variable_scope(), use_resource=True, reuse=tf.AUTO_REUSE): + transformed_features = self.bottom(features) + + if self.hparams.activation_dtype == "bfloat16": + for k, v in sorted(six.iteritems(transformed_features)): + if v.dtype == tf.float32: + transformed_features[k] = tf.cast(v, tf.bfloat16) + + t2t_model.log_info("Building model body") + output, losses, monitor, targets_mask = self.body( + transformed_features, features) + output, losses = self._normalize_body_output((output, losses)) + + if "training" in losses: + t2t_model.log_info( + "Skipping T2TModel top and loss because training loss " + "returned from body") + logits = output + else: + logits = self.top(output, features) + losses["training"] = 0.0 + if (self._hparams.mode != tf_estimator.ModeKeys.PREDICT and + self._hparams.mode != "attack"): + losses["training"] = self.loss(logits, features) + + return logits, losses, monitor, targets_mask + + def model_fn_sharded(self, sharded_features): + """Estimator model_fn sharded along batch dimension. + + Args: + sharded_features: {str: [Tensor]}. Features sharded along batch dimension. + Each list is the same length (== number of shards). + + Returns: + sharded_logits: [Tensor]. Logits for each shard of examples. + losses: {str: 0-D Tensor}. Loss averaged across shards. + """ + dp = self._data_parallelism + + # [{str: Tensor}]. Transpose of 'sharded_features'. + datashard_to_features = self._to_features_per_datashard(sharded_features) + sharded_logits, sharded_losses, sharded_monitors, _ = ( + dp(self.model_fn, datashard_to_features)) + sharded_logits, sharded_losses = dp( + self.maybe_scheduled_sampling, + datashard_to_features, sharded_logits, sharded_losses) + if isinstance(sharded_logits[0], dict): + temp_dict = {k: [] for k, _ in six.iteritems(sharded_logits[0])} + for k, _ in six.iteritems(sharded_logits[0]): + for l in sharded_logits: + temp_dict[k].append(l[k]) + sharded_logits = temp_dict + losses = t2t_model.average_sharded_losses(sharded_losses) + monitor = {} + for key in list(sharded_monitors[0].keys()): + monitor[key] = ( + tf.add_n([m[key] for m in sharded_monitors]) / len(sharded_monitors)) + ops.save_summary(monitor, "monitor") + + return sharded_logits, losses + + +@registry.register_hparams +def wmt_enro_tpu(): + """HParams for Transformer model on TPU.""" + hparams = transformer.transformer_base() + hparams = transformer.update_hparams_for_tpu(hparams) + hparams.batch_size = 512 + return hparams + + +@registry.register_hparams +def iwslt_baseline_gpu(): + """HParams for Transformer model on TPU.""" + hparams = transformer.transformer_base() + hparams.hidden_size = 256 + hparams.filter_size = 1024 + hparams.num_hidden_layers = 5 + hparams.num_heads = 2 + hparams.layer_prepostprocess_dropout = 0.1 + hparams.attention_dropout = 0.1 + hparams.relu_dropout = 0.1 + hparams.dropout = 0.1 + return hparams + + +@registry.register_hparams +def iwslt_baseline_single_gpu(): + """HParams for Transformer model on TPU.""" + hparams = iwslt_baseline_gpu() + hparams.batch_size = 1024 + hparams.learning_rate_schedule = "constant*linear_warmup*rsqrt_decay" + hparams.learning_rate_constant = 0.1 + hparams.learning_rate_warmup_steps = 16000 + return hparams + + +@registry.register_hparams +def iwslt_baseline_tpu(): + """HParams for Transformer model on TPU.""" + hparams = transformer.transformer_base() + transformer.update_hparams_for_tpu(hparams) + hparams.hidden_size = 256 + hparams.filter_size = 1024 + hparams.num_hidden_layers = 5 + hparams.num_heads = 2 + hparams.layer_prepostprocess_dropout = 0.1 + hparams.attention_dropout = 0.1 + hparams.relu_dropout = 0.1 + hparams.dropout = 0.1 + hparams.add_hparam("pos_attn", False) + return hparams + + +@registry.register_hparams +def iwslt_base(): + """Set of hyperparameters.""" + # Model architecture flags. + hparams = transformer.transformer_base() + hparams.num_hidden_layers = 5 + hparams.hidden_size = 256 + hparams.filter_size = 1024 + hparams.num_heads = 4 + # Other flags. + hparams.summarize_grads = False + hparams.summarize_vars = False + # Optimization-related flags. + hparams.clip_grad_norm = 1.0 + hparams.learning_rate_decay_scheme = "noam" + hparams.learning_rate_warmup_steps = 8000 + hparams.learning_rate = 0.2 + hparams.learning_rate_schedule = ( + "constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size") + hparams.learning_rate_constant = 2.0 + hparams.add_hparam("predict_target_length", True) + hparams.add_hparam("lendiff_bound", 30) + hparams = update_hparams_for_tpu(hparams) + hparams.add_hparam("pos_attn", False) + return hparams + + +@registry.register_hparams +def iwslt_diag(): + """Set of hyperparameters.""" + hparams = iwslt_base() + hparams.batch_size = 4096 + # Other flags. + hparams.force_full_predict = True + hparams.causal_decoder_self_attention = False + # VAE-related flags. + hparams.add_hparam("latent_size", 256) + hparams.add_hparam("anneal_min_value", 0.0) + hparams.add_hparam("kl_startup_steps", 5000) + hparams.add_hparam("kl_anneal_steps", 20000) + hparams.add_hparam("n_posterior_layers", 3) + hparams.add_hparam("n_decoder_layers", 3) + hparams.add_hparam("posterior_2d_dropout", 0.20) + # diagonal_normal / affine / additive / rq + hparams.add_hparam("posterior_type", "diagonal_normal") + # standard_normal / diagonal_normal + hparams.add_hparam("prior_type", "diagonal_normal") + hparams.add_hparam("decoder_2d_dropout", 0.00) + # Optimization-related flags. + hparams.learning_rate_warmup_steps = 8000 + hparams.learning_rate_constant = 2.0 + hparams.layer_prepostprocess_dropout = 0.2 + hparams.attention_dropout = 0.2 + hparams.relu_dropout = 0.2 + hparams.dropout = 0.2 + # Optimization-related flags. + hparams.add_hparam("kl_reg", 0.0) + hparams.add_hparam("n_gibbs_steps", 0) + hparams.add_hparam("compute_kl_refinement", False) + hparams.add_hparam("compute_iw_marginal", False) + hparams.add_hparam("n_samples", 1) + return hparams + + +@registry.register_hparams +def wmt_diag_base(): + """Set of hyperparameters.""" + hparams = iwslt_diag() + hparams.batch_size = 4096 + hparams.num_hidden_layers = 6 + hparams.hidden_size = 512 + hparams.filter_size = 2048 + hparams.num_heads = 8 + # VAE-related flags. + hparams.latent_size = 512 + hparams.n_posterior_layers = 4 + hparams.n_decoder_layers = 6 + hparams.dropout = 0.1 + hparams.layer_prepostprocess_dropout = 0.1 + hparams.attention_dropout = 0.1 + hparams.relu_dropout = 0.1 + return hparams + + +@registry.register_hparams +def wmt_diag_small(): + """Set of hyperparameters.""" + hparams = wmt_diag_base() + hparams.n_posterior_layers = 3 + hparams.n_decoder_layers = 3 + hparams.kl_reg = 1e-4 + return hparams + + +@registry.register_hparams +def wmt_diag_small_trueadam(): + """Set of hyperparameters.""" + hparams = wmt_diag_small() + hparams.optimizer = "true_adam" + return hparams + + +@registry.register_hparams +def wmt_diag_small_trueadam_longer(): + """Set of hyperparameters.""" + hparams = wmt_diag_small_trueadam() + hparams.learning_rate_constant = 4.0 + hparams.learning_rate_warmup_steps = 20000 + return hparams + + +@registry.register_hparams +def wmt_diag_small_trueadam_shorter(): + """Set of hyperparameters.""" + hparams = wmt_diag_small_trueadam() + hparams.learning_rate_constant = 2.0 + hparams.learning_rate_warmup_steps = 4000 + return hparams + + +@registry.register_hparams +def wmt_diag_base_trueadam_1e4(): + """Set of hyperparameters.""" + hparams = wmt_diag_base() + hparams.kl_reg = 1e-4 + hparams.optimizer = "true_adam" + hparams.learning_rate_constant = 2.0 + hparams.learning_rate_warmup_steps = 8000 + return hparams + + +@registry.register_hparams +def wmt_diag_base_trueadam_longer_1e4(): + """Set of hyperparameters.""" + hparams = wmt_diag_base_trueadam_1e4() + hparams.learning_rate_constant = 4.0 + hparams.learning_rate_warmup_steps = 20000 + return hparams + + +@registry.register_hparams +def wmt_diag_base_trueadam_shorter_1e4(): + """Set of hyperparameters.""" + hparams = wmt_diag_base_trueadam_1e4() + hparams.learning_rate_constant = 2.0 + hparams.learning_rate_warmup_steps = 4000 + return hparams + + +@registry.register_hparams +def wmt_diag_base_1e4_trueadam(): + """Set of hyperparameters.""" + hparams = wmt_diag_base() + hparams.kl_reg = 1e-4 + hparams.optimizer = "true_adam" + return hparams + + +@registry.register_hparams +def wmt_diag_base_1e4_trueadam_longer(): + """Set of hyperparameters.""" + hparams = wmt_diag_base_1e4_trueadam() + hparams.learning_rate_constant = 4.0 + hparams.learning_rate_warmup_steps = 20000 + return hparams + + +@registry.register_hparams +def wmt_diag_base_1e4_trueadam_shorter(): + """Set of hyperparameters.""" + hparams = wmt_diag_base_1e4_trueadam() + hparams.learning_rate_constant = 2.0 + hparams.learning_rate_warmup_steps = 4000 + return hparams + + +@registry.register_hparams +def wmt_diag_base_1e4(): + """Set of hyperparameters.""" + hparams = wmt_diag_base() + hparams.kl_reg = 1e-4 + return hparams + + +@registry.register_hparams +def wmt_diag_base_longer_1e4(): + """Set of hyperparameters.""" + hparams = wmt_diag_base_1e4() + hparams.learning_rate_constant = 4.0 + hparams.learning_rate_warmup_steps = 20000 + return hparams + + +@registry.register_hparams +def wmt_diag_base_shorter_1e4(): + """Set of hyperparameters.""" + hparams = wmt_diag_base_1e4() + hparams.learning_rate_constant = 2.0 + hparams.learning_rate_warmup_steps = 4000 + return hparams + + +@registry.register_hparams +def iwslt_diag_1e5(): + """Set of hyperparameters.""" + hparams = iwslt_diag() + hparams.kl_reg = 1e-5 + return hparams + + +@registry.register_hparams +def iwslt_diag_1e4(): + """Set of hyperparameters.""" + hparams = iwslt_diag() + hparams.kl_reg = 1e-4 + return hparams + + +@registry.register_hparams +def iwslt_affine(): + """Set of hyperparameters.""" + hparams = iwslt_diag() + hparams.prior_type = "affine" + hparams.batch_size = 2048 + hparams.latent_size = 256 + # Glow-related flags. + hparams.add_hparam("depths", "4/8/8") # infer n_levels from depths + hparams.add_hparam("step_fn", "glow") # glow / chunting + hparams.add_hparam("affine_scale", "glow") # glow / jason + hparams.add_hparam("conv_fn", "np") # np / tf + hparams.add_hparam("split_plans", "cat/cat/ca") + hparams.add_hparam("factor", 2) # squeezing factor + hparams.add_hparam("n_layers_transform_params", 1) + hparams.add_hparam("n_1x1_heads", 4) + hparams.add_hparam("flow_num_heads", 4) + hparams.add_hparam("flow_hidden_size", 256) + hparams.add_hparam("flow_filter_size", 512) + # Control max scale change. + hparams.add_hparam("scale_width", 0.999) + # Optimization-related flags. + # hparams.learning_rate_warmup_steps = 20000 + hparams.add_hparam("flow_layer_prepostprocess_dropout", 0.0) + hparams.add_hparam("flow_attention_dropout", 0.0) + hparams.add_hparam("flow_relu_dropout", 0.0) + # hparams.optimizer_adam_beta1 = 0.9 + # hparams.optimizer_adam_beta2 = 0.999 + # hparams.optimizer_adam_epsilon = 1e-8 + # Precision-related flags. + hparams.activation_dtype = "float32" + hparams.weight_dtype = "float32" + + return hparams + + +@registry.register_hparams +def wmt_affine(): + """Set of hyperparameters.""" + hparams = iwslt_affine() + hparams.batch_size = 2048 # TODO(jason) : address this later. + hparams.num_hidden_layers = 6 + hparams.hidden_size = 256 + hparams.filter_size = 1024 + hparams.num_heads = 8 + # VAE-related flags. + hparams.latent_size = 256 + hparams.n_posterior_layers = 4 + hparams.n_decoder_layers = 4 + hparams.layer_prepostprocess_dropout = 0.1 + hparams.attention_dropout = 0.1 + hparams.relu_dropout = 0.1 + # Glow-related flags. + hparams.flow_num_heads = 8 + hparams.flow_filter_size = 512 + return hparams + + +@registry.register_hparams +def wmt_affine_base(): + """Set of hyperparameters.""" + hparams = wmt_affine() + hparams.batch_size = 2048 + hparams.hidden_size = 320 + hparams.latent_size = 320 + hparams.flow_filter_size = 640 + return hparams + + +@registry.register_hparams +def wmt_affine_base_small(): + """Set of hyperparameters.""" + hparams = wmt_affine_base() + hparams.depths = "4/4/4" + hparams.kl_reg = 1e-4 + hparams.learning_rate_constant = 2.0 + hparams.learning_rate_warmup_steps = 8000 + return hparams + + +@registry.register_hparams +def wmt_affine_base_trueadam_small(): + """Set of hyperparameters.""" + hparams = wmt_affine_base_small() + hparams.optimizer = "true_adam" + return hparams + + +@registry.register_hparams +def wmt_affine_base_trueadam_longer_small(): + """Set of hyperparameters.""" + hparams = wmt_affine_base_trueadam_small() + hparams.learning_rate_constant = 4.0 + hparams.learning_rate_warmup_steps = 20000 + return hparams + + +@registry.register_hparams +def wmt_affine_base_trueadam_shorter_small(): + """Set of hyperparameters.""" + hparams = wmt_affine_base_trueadam_small() + hparams.learning_rate_constant = 2.0 + hparams.learning_rate_warmup_steps = 4000 + return hparams + + +@registry.register_hparams +def wmt_affine_base_trueadam(): + """Set of hyperparameters.""" + hparams = wmt_affine_base() + hparams.optimizer = "true_adam" + # hparams.optimizer_adam_beta1 = 0.9 + # hparams.optimizer_adam_beta2 = 0.999 + # hparams.optimizer_adam_epsilon = 1e-8 + hparams.kl_reg = 1e-4 + hparams.learning_rate_constant = 2.0 + hparams.learning_rate_warmup_steps = 8000 + return hparams + + +@registry.register_hparams +def wmt_affine_base_trueadam_longer(): + """Set of hyperparameters.""" + hparams = wmt_affine_base_trueadam() + hparams.learning_rate_constant = 4.0 + hparams.learning_rate_warmup_steps = 20000 + return hparams + + +@registry.register_hparams +def wmt_affine_base_trueadam_shorter(): + """Set of hyperparameters.""" + hparams = wmt_affine_base_trueadam() + hparams.learning_rate_constant = 2.0 + hparams.learning_rate_warmup_steps = 4000 + return hparams + + +@registry.register_hparams +def wmt_affine_base_1e4(): + """Set of hyperparameters.""" + hparams = wmt_affine_base() + hparams.kl_reg = 1e-4 + hparams.learning_rate_constant = 2.0 + hparams.learning_rate_warmup_steps = 8000 + return hparams + + +@registry.register_hparams +def wmt_affine_base_longer_1e4(): + """Set of hyperparameters.""" + hparams = wmt_affine_base_1e4() + hparams.learning_rate_constant = 4.0 + hparams.learning_rate_warmup_steps = 20000 + return hparams + + +@registry.register_hparams +def wmt_affine_base_shorter_1e4(): + """Set of hyperparameters.""" + hparams = wmt_affine_base_1e4() + hparams.learning_rate_constant = 2.0 + hparams.learning_rate_warmup_steps = 4000 + return hparams + + +@registry.register_hparams +def wmt_affine_1e4(): + """Set of hyperparameters.""" + hparams = wmt_affine() + hparams.kl_reg = 1e-4 + return hparams + + +@registry.register_hparams +def wmt_affine_large(): + """Set of hyperparameters.""" + hparams = iwslt_affine() + hparams.batch_size = 2048 + hparams.num_hidden_layers = 6 + hparams.hidden_size = 512 + hparams.filter_size = 1024 + hparams.num_heads = 8 + # VAE-related flags. + hparams.latent_size = 512 + hparams.n_posterior_layers = 4 + hparams.n_decoder_layers = 4 + hparams.layer_prepostprocess_dropout = 0.1 + hparams.attention_dropout = 0.1 + hparams.relu_dropout = 0.1 + # Glow-related flags. + hparams.flow_num_heads = 8 + hparams.flow_filter_size = 1024 + return hparams + + +@registry.register_hparams +def wmt_affine_large_1e4(): + """Set of hyperparameters.""" + hparams = wmt_affine_large() + hparams.kl_reg = 1e-4 + return hparams + + +@registry.register_hparams +def iwslt_affine_tiny(): + """Set of hyperparameters.""" + hparams = iwslt_affine() + hparams.depths = "1" + hparams.split_plans = "c" + return hparams + + +@registry.register_hparams +def iwslt_affine_small(): + """Set of hyperparameters.""" + hparams = iwslt_affine() + hparams.depths = "4/4/4" + return hparams + + +@registry.register_hparams +def iwslt_affine_small_1e4_trueadam(): + """Set of hyperparameters.""" + hparams = iwslt_affine_small_1e4() + hparams.optimizer = "true_adam" + return hparams + + +@registry.register_hparams +def iwslt_affine_small_1e4_trueadam_longer(): + """Set of hyperparameters.""" + hparams = iwslt_affine_small_1e4_trueadam() + hparams.learning_rate_constant = 4.0 + hparams.learning_rate_warmup_steps = 20000 + return hparams + + +@registry.register_hparams +def iwslt_affine_small_1e4_trueadam_shorter(): + """Set of hyperparameters.""" + hparams = iwslt_affine_small_1e4_trueadam() + hparams.learning_rate_constant = 2.0 + hparams.learning_rate_warmup_steps = 4000 + return hparams + + +@registry.register_hparams +def iwslt_affine_small_1e4(): + """Set of hyperparameters.""" + hparams = iwslt_affine_small() + hparams.kl_reg = 1e-4 + return hparams + + +@registry.register_hparams +def iwslt_affine_tpu_glow_glow_np_1e4_trueadam(): + """Set of hyperparameters.""" + hparams = iwslt_affine_tpu_glow_glow_np_1e4() + hparams.optimizer = "true_adam" + return hparams + + +@registry.register_hparams +def iwslt_affine_tpu_glow_glow_np_1e4_trueadam_longer(): + """Set of hyperparameters.""" + hparams = iwslt_affine_tpu_glow_glow_np_1e4_trueadam() + hparams.learning_rate_constant = 4.0 + hparams.learning_rate_warmup_steps = 20000 + return hparams + + +@registry.register_hparams +def iwslt_affine_tpu_glow_glow_np_1e4_trueadam_shorter(): + """Set of hyperparameters.""" + hparams = iwslt_affine_tpu_glow_glow_np_1e4_trueadam() + hparams.learning_rate_constant = 2.0 + hparams.learning_rate_warmup_steps = 4000 + return hparams + + +@registry.register_hparams +def iwslt_affine_tpu_glow_glow_np_1e4(): + """Set of hyperparameters.""" + hparams = iwslt_affine() + hparams.conv_fn = "np" + hparams.kl_reg = 1e-4 + return hparams + + +def update_hparams_for_tpu(hparams): + """Change hparams to be compatible with TPU training.""" + + # Adafactor uses less memory than Adam. + # switch to Adafactor with its recommended learning rate scheme. + # hparams.optimizer = "Adafactor" + # hparams.learning_rate_schedule = "rsqrt_decay" + # hparams.learning_rate_warmup_steps = 10000 + + # Avoid an expensive concat on TPU. + # >1 shards helps with faster parameter distribution on multi-GPU machines + hparams.symbol_modality_num_shards = 1 + + # Adaptive batch sizes and sequence lengths are not supported on TPU. + # Instead, every batch has the same sequence length and the same batch size. + # Longer sequences are dropped and shorter ones are padded. + # + # It is therefore suggested to use a problem where examples have been combined + # to a longer length, e.g. the "_packed" problems. + # + # For problems with variable sequence lengths, this parameter controls the + # maximum sequence length. Shorter sequences are dropped and longer ones + # are padded. + # + # For problems with fixed sequence lengths - e.g. the "_packed" problems, + # this hyperparameter is ignored. + hparams.max_length = 64 + + # TPUs have less memory than GPUs, so decrease the batch size if it's too high + if hparams.batch_size > 2048: + hparams.batch_size = 2048 + + # Using noise broadcast in the dropout layers saves memory during training. + hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads + hparams.relu_dropout_broadcast_dims = "1" # length + hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length + return hparams diff --git a/tensor2tensor/models/research/transformer_vae_flow_prior_ops.py b/tensor2tensor/models/research/transformer_vae_flow_prior_ops.py new file mode 100644 index 000000000..ad176f174 --- /dev/null +++ b/tensor2tensor/models/research/transformer_vae_flow_prior_ops.py @@ -0,0 +1,366 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Various ops for TransformerVaeFlowPrior.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import transformer_glow_layers_ops as gops +from tensor2tensor.models.transformer import transformer_decoder_layer +from tensor2tensor.models.transformer import transformer_encoder +from tensor2tensor.models.transformer import transformer_prepare_encoder +from tensor2tensor.utils import learning_rate as lr +from tensor2tensor.utils import mlperf_log +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +def _mixed_precision_is_enabled(hparams): + """Should be the same as in common_attention, avoiding import.""" + activation_dtype = hparams.activation_dtype + weight_dtype = hparams.weight_dtype + return activation_dtype == tf.float16 and weight_dtype == tf.float32 + + +def encoder(name, hparams, inputs, target_space): + """Compute encoder outputs and attention bias.""" + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + (encoder_input, + encoder_self_attention_bias, + encoder_decoder_attention_bias) = ( + transformer_prepare_encoder(inputs, target_space, hparams)) + encoder_input = tf.nn.dropout(encoder_input, + rate=hparams.layer_prepostprocess_dropout) + encoder_output = transformer_encoder(encoder_input, + encoder_self_attention_bias, + hparams) + return encoder_output, encoder_decoder_attention_bias + + +def transformer_decoder_layers(name, + n_layers, + decoder_input, + **kwargs): + """A transformation block composed of transformer decoder layers.""" + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + hparams = kwargs["hparams"] + outputs = decoder_input + with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE): + for layer_idx in range(n_layers): + outputs = transformer_decoder_layer( + decoder_input=outputs, + layer_idx=layer_idx, + **kwargs) + outputs = common_layers.layer_preprocess(outputs, hparams) + return outputs + + +def posterior( + name, hparams, targets, targets_mask, decoder_self_attention_bias, + **kwargs): + """Compute mu and sigma for diagonal normal posterior q(z|x,y).""" + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + decoder_input = drop_2d(targets, hparams.mode, hparams.posterior_2d_dropout) + decoder_input = common_attention.add_timing_signal_1d(decoder_input) + decoder_input = tf.nn.dropout(decoder_input, + rate=hparams.layer_prepostprocess_dropout) + decoder_output = transformer_decoder_layers( + "block", + n_layers=hparams.n_posterior_layers, + decoder_input=decoder_input, + hparams=hparams, + decoder_self_attention_bias=decoder_self_attention_bias, + **kwargs) + decoder_output = gops.dense_weightnorm( + "h2o_out", decoder_output, hparams.latent_size * 2, targets_mask, + init_scale=0.0, init=False) + return decoder_output + + +def cond_prior( + name, hparams, decoder_input, targets_mask, output_size, + decoder_self_attention_bias, init_scale=0.0, **kwargs): + """Compute hidden states for parameters for conditional prior.""" + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + decoder_input = common_attention.add_timing_signal_1d(decoder_input) + decoder_input = tf.nn.dropout(decoder_input, + rate=hparams.layer_prepostprocess_dropout) + decoder_output = transformer_decoder_layers( + "block", + n_layers=hparams.n_posterior_layers, + decoder_input=decoder_input, + hparams=hparams, + decoder_self_attention_bias=decoder_self_attention_bias, + **kwargs) + decoder_output = gops.dense_weightnorm( + "h2o_out", decoder_output, output_size, targets_mask, + init_scale=init_scale, init=False) + return decoder_output + + +def decoder(name, latents, hparams, decoder_self_attention_bias, **kwargs): + """Compute final hidden states for p(y|z,x).""" + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + decoder_input = drop_2d(latents, hparams.mode, hparams.decoder_2d_dropout) + if hparams.pos_attn: + decoder_input = gops.positional_attention( + "pos_attn", decoder_input, decoder_self_attention_bias, hparams) + else: + decoder_input = common_attention.add_timing_signal_1d(decoder_input) + if common_layers.shape_list(latents)[-1] != hparams.hidden_size: + decoder_input = gops.dense("lat2hid", latents, hparams.hidden_size) + decoder_output = transformer_decoder_layers( + "block", + n_layers=hparams.n_decoder_layers, + decoder_input=decoder_input, + hparams=hparams, + decoder_self_attention_bias=decoder_self_attention_bias, + **kwargs) + batch_size, targets_length = common_layers.shape_list(decoder_output)[:2] + decoder_output = tf.reshape( + decoder_output, [batch_size, targets_length, 1, hparams.hidden_size]) + # Expand since t2t expects 4d tensors. + return decoder_output + + +def drop_2d(targets, mode, dropout_p): + """Dropout in 2D.""" + if dropout_p > 0 and mode == tf_estimator.ModeKeys.TRAIN: + batch_size, targets_length, hidden_size = common_layers.shape_list(targets) + mask_prob = tf.random_uniform( + shape=(batch_size, targets_length), minval=0.0, maxval=1.0) + mask_prob = tf.tile(mask_prob[..., tf.newaxis], [1, 1, hidden_size]) + scale = 1 / (1 - dropout_p) + targets_noisy = tf.where( + mask_prob > dropout_p, targets * scale, tf.zeros_like(targets)) + return targets_noisy + return targets + + +def sequence_mask(length, hparams): + dtype = get_dtype(hparams) + return tf.sequence_mask(length, dtype=dtype) + + +def get_padding(mask, hparams): + dtype = get_dtype(hparams) + return tf.cast(tf.equal(mask, 0.0), dtype=dtype) + + +def get_dtype(hparams): + if hparams.activation_dtype == "float32": + return tf.float32 + elif hparams.activation_dtype == "float64": + return tf.float64 + elif hparams.activation_dtype == "bfloat16": + return tf.bfloat16 + else: + return None + + +def lenpred_mlp(name, logits, hidden_size, bound): + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + logits = tf.layers.dense(logits, hidden_size) + logits = tf.nn.elu(logits) + logits = tf.layers.dense(logits, hidden_size) + logits = tf.nn.elu(logits) + logits = tf.layers.dense(logits, bound * 2 + 1) + return logits + + +def predict_target_lengths( + encoder_output, inputs_mask, hparams, length_diff=None): + """Predict target lengths.""" + bound = hparams.lendiff_bound + inputs_length = tf.cast(tf.reduce_sum(inputs_mask, 1), tf.int32) + targets_length = inputs_length + loss = None + if hparams.predict_target_length: + encoder_output = gops.reduce_mean_over_l(encoder_output, inputs_mask) + logits = tf.stop_gradient(encoder_output) + logits = lenpred_mlp("lenpred", logits, hparams.hidden_size, bound) + if length_diff is not None: + labels = tf.maximum(tf.minimum(length_diff, bound), -bound) + labels = tf.cast(labels + bound, tf.int32) + labels = tf.stop_gradient(labels) + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + labels=labels, logits=logits) + loss = tf.reduce_mean(loss) + diff_pred = tf.argmax(logits, 1) + diff_pred = tf.cast(diff_pred - bound, tf.int32) + targets_length = inputs_length + diff_pred + targets_length = tf.maximum(targets_length, 1) + divi = 4 + targets_length = tf.ceil(targets_length / divi) * divi + targets_length = tf.cast(targets_length, tf.int32) + return targets_length, loss + + +def lenpred_stats(targets_length_pred, targets_length): + lenpred_diff = tf.abs(targets_length_pred - tf.cast(targets_length, tf.int32)) + lenpred_acc = tf.cast(tf.equal(lenpred_diff, 0), tf.float32) + lenpred_acc = tf.reduce_mean(lenpred_acc) + lenpred_acc5 = tf.cast(tf.less_equal(lenpred_diff, 5), tf.float32) + lenpred_acc5 = tf.reduce_mean(lenpred_acc5) + return lenpred_acc, lenpred_acc5 + + +def save_log_loss( + hparams, targets_mask, numerator, denominator, log_q_z, log_abs_det, + log_p_z_base, z_q, lenpred_loss, targets_length_pred, targets_length): + """Populate loss dictionary and summary.""" + anneal, kl_mask = get_anneal_mask(hparams) + lenpred_acc, lenpred_acc5 = ( + lenpred_stats(targets_length_pred, targets_length)) + batch_length = tf.reduce_sum(targets_mask) + + z_q_norm = gops.reduce_mean_over_bl( + tf.norm(z_q, axis=2, keepdims=True), targets_mask)[0] + + log_q_z = gops.reduce_mean_over_bl_sum_over_c(log_q_z, targets_mask) + log_p_z_base = tf.reduce_sum(log_p_z_base, axis=0) / batch_length + log_abs_det = tf.reduce_sum(log_abs_det, axis=0) / batch_length + log_p_z_reg = gops.standard_normal_density(z_q, targets_mask, reduce_sum=True) + + log_p_x = -1 * numerator / denominator + log_p_z = log_p_z_base + log_abs_det + kl = log_q_z - log_p_z + kl_reg = log_p_z - log_p_z_reg + elbo = log_p_x - kl + monitor = { + "elbo": elbo, + "kl": kl, + "kl_reg": kl_reg, + "log_p_x": log_p_x, + "log_q_z": log_q_z, + "log_p_z": log_p_z, + "log_p_z_base": log_p_z_base, + "log_abs_det": log_abs_det, + "anneal": anneal, + "z_q_norm": z_q_norm, + "lenpred_acc": lenpred_acc, + "lenpred_acc5": lenpred_acc5, + } + + kl = kl * anneal + kl_reg = hparams.kl_reg * kl_reg * anneal + loss_dict = { + "training": -1 * log_p_x, + "kl": kl * kl_mask, + "kl_reg": kl_reg * kl_mask, + } + if lenpred_loss is not None: + monitor["lenpred_loss"] = lenpred_loss + loss_dict["lenpred_loss"] = lenpred_loss + return loss_dict, monitor + + +def get_anneal_mask(hparams): + """Get anneal and kl mask.""" + startup = hparams.kl_startup_steps + anneal = hparams.kl_anneal_steps + global_step = tf.train.get_global_step() + min_value = hparams.anneal_min_value + step = tf.maximum(global_step - startup, 0) + anneal = common_layers.inverse_lin_decay( + anneal, min_value=min_value, step=step) + kl_mask = tf.less(startup, tf.to_int32(global_step)) + kl_mask = tf.cast(kl_mask, tf.float32) + return anneal, kl_mask + + +def embedding_to_non_padding(emb, dtype=tf.float32): + """Calculates the padding mask based on which embeddings are not zero.""" + emb_sum = tf.reduce_sum(tf.abs(emb), axis=-1) + return tf.cast(tf.not_equal(emb_sum, 0.0), dtype=dtype) + + +def save_summary(monitor, name): + with tf.name_scope(name): + for key in list(monitor.keys()): + tf.summary.scalar(key, monitor[key]) + + +def _global_step(hparams): + """Adjust global step if a multi-step optimizer is used.""" + step = tf.cast(tf.train.get_or_create_global_step(), tf.float32) + multiplier = hparams.optimizer_multistep_accumulate_steps + if not multiplier: + return step + + tf.logging.info("Dividing global step by %d for multi-step optimizer." + % multiplier) + return step / tf.cast(multiplier, tf.float32) + + +def learning_rate_schedule(hparams): + """Learning rate schedule based on hparams.""" + mlperf_log.transformer_print(key=mlperf_log.OPT_LR, deferred=True) + mlperf_log.transformer_print( + key=mlperf_log.OPT_LR_WARMUP_STEPS, + value=hparams.learning_rate_warmup_steps) + step_num = _global_step(hparams) + # Simulate pretraining the encoder, decoder and posterior with the same + # learning rate schedule, and then restoring the parameters. + # using `warm_start_from` is not compatible with actnorm DDI on TPUs. + step_num = tf.where( + step_num < hparams.kl_startup_steps, + step_num, + step_num - hparams.kl_startup_steps) + schedule_string = hparams.learning_rate_schedule + names = schedule_string.split("*") + names = [name.strip() for name in names if name.strip()] + ret = tf.constant(1.0) + for name in names: + ret *= lr.learning_rate_factor(name, step_num, hparams) + return ret + + +def prepare_for_iw(x, k): + """Prepare feature for importance sampling.""" + batch_size = common_layers.shape_list(x)[0] + remaining_shape = common_layers.shape_list(x)[1:] + + multiplier = [1] * x.shape.rank + x = tf.tile(x[tf.newaxis, ...], [k] + multiplier) + x = tf.reshape(x, [k * batch_size] + remaining_shape) + return x + + +def unprepare_for_iw(x, k): + """Unprepare feature for importance sampling.""" + batch_size_times_k = common_layers.shape_list(x)[0] + remaining_shape = common_layers.shape_list(x)[1:] + x = tf.reshape(x, [k, batch_size_times_k // k] + remaining_shape) + return x + + +def generic_loss(top_out, targets, model_hparams, vocab_size, weights_fn): + """Compute loss numerator and denominator for one shard of output.""" + del vocab_size # unused arg + logits = top_out + logits = common_attention.maybe_upcast(logits, hparams=model_hparams) + cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.0) + return common_layers.padded_cross_entropy( + logits, + targets, + model_hparams.label_smoothing, + cutoff=cutoff, + weights_fn=weights_fn, + reduce_sum=False) diff --git a/tensor2tensor/models/research/transformer_vae_test.py b/tensor2tensor/models/research/transformer_vae_test.py new file mode 100644 index 000000000..8ac40854d --- /dev/null +++ b/tensor2tensor/models/research/transformer_vae_test.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.models.research.transformer_vae.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.models.research import transformer_vae +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class TransformerVaeTest(tf.test.TestCase): + + def testTransformerAEOnDVQ(self): + batch_size = 3 + input_length = 5 + target_length = 16 + vocab_size = 9 + hparams = transformer_vae.transformer_ae_small() + hparams.bottleneck_kind = "dvq" + hparams.dp_strength = 0 + p_hparams = problem_hparams.test_problem_hparams(vocab_size, + vocab_size, + hparams) + hparams.problem_hparams = p_hparams + inputs = np.random.randint( + vocab_size, size=(batch_size, input_length, 1, 1)) + targets = np.random.randint( + vocab_size, size=(batch_size, target_length, 1, 1)) + features = { + "inputs": tf.constant(inputs, dtype=tf.int32), + "targets": tf.constant(targets, dtype=tf.int32), + "target_space_id": tf.constant(1, dtype=tf.int32), + } + tf.train.create_global_step() + model = transformer_vae.TransformerAE(hparams, tf_estimator.ModeKeys.TRAIN, + p_hparams) + logits, _ = model(features) + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + logits_val = session.run(logits) + self.assertEqual(logits_val.shape, + (batch_size, target_length, 1, 1, vocab_size)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/research/universal_transformer.py b/tensor2tensor/models/research/universal_transformer.py new file mode 100644 index 000000000..f480bf2da --- /dev/null +++ b/tensor2tensor/models/research/universal_transformer.py @@ -0,0 +1,829 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Universal Transformers. + +Universal Transformer is described in https://arxiv.org/abs/1807.03819. + +Universal Transformer is recurrent in depth while employing self-attention +to combine information from different parts of sequences. +In contrast to the Transformer, given enough memory its recurrence in depth +makes the Universal Transformer computationally universal. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.models import transformer +from tensor2tensor.models.research import universal_transformer_util +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +@registry.register_model +class UniversalTransformer(transformer.Transformer): + """Universal Transformer: Depth-wise recurrent transformer model.""" + + def encode(self, inputs, target_space, hparams, features=None, losses=None, + **kwargs): + """Encode Universal Transformer inputs. + + It is similar to "transformer.encode", but it uses + "universal_transformer_util.universal_transformer_encoder" instead of + "transformer.transformer_encoder". + + Args: + inputs: Transformer inputs [batch_size, input_length, input_height, + hidden_dim] which will be flattened along the two spatial dimensions. + target_space: scalar, target space ID. + hparams: hyperparmeters for model. + features: optionally pass the entire features dictionary as well. + This is needed now for "packed" datasets. + losses: Unused. + **kwargs: additional arguments to pass to encoder_function + + Returns: + Tuple of: + encoder_output: Encoder representation. + [batch_size, input_length, hidden_dim] + encoder_decoder_attention_bias: Bias and mask weights for + encoder-decoder attention. [batch_size, input_length] + encoder_extra_output: which is extra encoder output used in some + variants of the model (e.g. in ACT, to pass the ponder-time to body) + """ + del losses + + inputs = common_layers.flatten4d3d(inputs) + + encoder_input, self_attention_bias, encoder_decoder_attention_bias = ( + transformer.transformer_prepare_encoder( + inputs, target_space, hparams, features=features)) + + encoder_input = tf.nn.dropout(encoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + + (encoder_output, encoder_extra_output) = ( + universal_transformer_util.universal_transformer_encoder( + encoder_input, + self_attention_bias, + hparams, + nonpadding=transformer.features_to_nonpadding(features, "inputs"), + save_weights_to=self.attention_weights)) + + return encoder_output, encoder_decoder_attention_bias, encoder_extra_output + + def decode(self, + decoder_input, + encoder_output, + encoder_decoder_attention_bias, + decoder_self_attention_bias, + hparams, + cache=None, + decode_loop_step=None, + nonpadding=None, + losses=None, + ** kwargs): + """Decode Universal Transformer outputs from encoder representation. + + It is similar to "transformer.decode", but it uses + "universal_transformer_util.universal_transformer_decoder" instead of + "transformer.transformer_decoder". + + Args: + decoder_input: inputs to bottom of the model. [batch_size, decoder_length, + hidden_dim] + encoder_output: Encoder representation. [batch_size, input_length, + hidden_dim] + encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder + attention. [batch_size, input_length] + decoder_self_attention_bias: Bias and mask weights for decoder + self-attention. [batch_size, decoder_length] + hparams: hyperparmeters for model. + cache: Unimplemented. + decode_loop_step: Unused. + nonpadding: optional Tensor with shape [batch_size, decoder_length] + losses: Unused. + **kwargs: additional arguments to pass to decoder_function + + Returns: + Tuple of: + Final decoder representation. [batch_size, decoder_length, + hidden_dim] + encoder_extra_output: which is extra encoder output used in some + variants of the model (e.g. in ACT, to pass the ponder-time to body) + + """ + del decode_loop_step + del losses + # TODO(dehghani): enable caching. + del cache + + decoder_input = tf.nn.dropout(decoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + + # No caching in Universal Transformers! + (decoder_output, dec_extra_output) = ( + universal_transformer_util.universal_transformer_decoder( + decoder_input, + encoder_output, + decoder_self_attention_bias, + encoder_decoder_attention_bias, + hparams, + nonpadding=nonpadding, + save_weights_to=self.attention_weights)) + + # Expand since t2t expects 4d tensors. + return tf.expand_dims(decoder_output, axis=2), dec_extra_output + + def body(self, features): + """Universal Transformer main model_fn. + + + Args: + features: Map of features to the model. Should contain the following: + "inputs": Transformer inputs [batch_size, input_length, hidden_dim] + "targets": Target decoder outputs. + [batch_size, decoder_length, hidden_dim] + "target_space_id" + + Returns: + Final decoder representation. [batch_size, decoder_length, hidden_dim] + """ + hparams = self._hparams + if hparams.add_position_timing_signal: + # Turning off addition of positional embedding in the encoder/decoder + # preparation as we do it in the beginning of each step. + hparams.pos = None + + if self.has_input: + inputs = features["inputs"] + target_space = features["target_space_id"] + (encoder_output, encoder_decoder_attention_bias, + enc_extra_output) = self.encode( + inputs, target_space, hparams, features=features) + else: + (encoder_output, encoder_decoder_attention_bias, + enc_extra_output) = (None, None, (None, None)) + + targets = features["targets"] + targets = common_layers.flatten4d3d(targets) + + (decoder_input, + decoder_self_attention_bias) = transformer.transformer_prepare_decoder( + targets, hparams, features=features) + + decoder_output, dec_extra_output = self.decode( + decoder_input, + encoder_output, + encoder_decoder_attention_bias, + decoder_self_attention_bias, + hparams, + nonpadding=transformer.features_to_nonpadding(features, "targets")) + + expected_attentions = features.get("expected_attentions") + if expected_attentions is not None: + attention_loss = common_attention.encoder_decoder_attention_loss( + expected_attentions, self.attention_weights, + hparams.expected_attention_loss_type, + hparams.expected_attention_loss_multiplier) + return decoder_output, {"attention_loss": attention_loss} + + if hparams.recurrence_type == "act" and hparams.act_loss_weight != 0: + if self.has_input: + enc_ponder_times, enc_remainders = enc_extra_output + enc_act_loss = ( + hparams.act_loss_weight * + tf.reduce_mean(enc_ponder_times + enc_remainders)) + else: + enc_act_loss = 0.0 + + (dec_ponder_times, dec_remainders) = dec_extra_output + dec_act_loss = ( + hparams.act_loss_weight * + tf.reduce_mean(dec_ponder_times + dec_remainders)) + act_loss = enc_act_loss + dec_act_loss + contrib.summary().scalar("act_loss", act_loss) + return decoder_output, {"act_loss": act_loss} + + return decoder_output + + def _greedy_infer(self, features, decode_length, use_tpu=False): + """Fast version of greedy decoding. + + Args: + features: an map of string to `Tensor` + decode_length: an integer. How many additional timesteps to decode. + use_tpu: bool, whether to use the TPU codepath. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } + + Raises: + NotImplementedError: If there are multiple data shards. + """ + if use_tpu: + return self._slow_greedy_infer_tpu(features, decode_length) + return self._slow_greedy_infer(features, decode_length) + + def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha, + use_tpu=False): + """Beam search decoding. + + Args: + features: an map of string to `Tensor` + decode_length: an integer. How many additional timesteps to decode. + beam_size: number of beams. + top_beams: an integer. How many of the beams to return. + alpha: Float that controls the length penalty. larger the alpha, stronger + the preference for longer translations. + use_tpu: Whether we should use TPU or not. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } + """ + # Caching is not ebabled in Universal Transformer + # TODO(dehghani): Support fast decoding for Universal Transformer + return self._beam_decode_slow(features, decode_length, beam_size, + top_beams, alpha, use_tpu) + + +@registry.register_model +class UniversalTransformerEncoder(transformer.Transformer): + """Universal Transformer Encoder: Has no decoder (e.g.for classification).""" + + def encode(self, inputs, target_space, hparams, features=None, losses=None): + """Encode transformer inputs. + + Args: + inputs: Transformer inputs [batch_size, input_length, input_height, + hidden_dim] which will be flattened along the two spatial dimensions. + target_space: scalar, target space ID. + hparams: hyperparmeters for model. + features: optionally pass the entire features dictionary as well. + This is needed now for "packed" datasets. + losses: Unused. + + Returns: + Tuple of: + encoder_output: Encoder representation. + [batch_size, input_length, hidden_dim] + encoder_extra_output: which is extra encoder output used in some + variants of the model (e.g. in ACT, to pass the ponder-time to body) + """ + del losses + inputs = common_layers.flatten4d3d(inputs) + + (encoder_input, self_attention_bias, _) = ( + transformer.transformer_prepare_encoder(inputs, target_space, hparams)) + + encoder_input = tf.nn.dropout(encoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + + (encoder_output, encoder_extra_output) = ( + universal_transformer_util.universal_transformer_encoder( + encoder_input, + self_attention_bias, + hparams, + nonpadding=transformer.features_to_nonpadding(features, "inputs"), + save_weights_to=self.attention_weights)) + + return encoder_output, encoder_extra_output + + def body(self, features): + """Universal Transformer main model_fn. + + Args: + features: Map of features to the model. Should contain the following: + "inputs": Transformer inputs [batch_size, input_length, hidden_dim] + "targets": Target decoder outputs. + [batch_size, decoder_length, hidden_dim] + "target_space_id" + + Returns: + Final decoder representation. [batch_size, decoder_length, hidden_dim] + """ + hparams = self._hparams + + assert self.has_input, ("universal_transformer_encoder is applicable on " + "problems with inputs") + + inputs = features["inputs"] + target_space = features["target_space_id"] + encoder_output, enc_extra_output = self.encode( + inputs, target_space, hparams, features=features) + + encoder_output = tf.expand_dims(encoder_output, 2) + + if hparams.recurrence_type == "act" and hparams.act_loss_weight != 0: + ponder_times, remainders = enc_extra_output + act_loss = hparams.act_loss_weight * tf.reduce_mean(ponder_times + + remainders) + contrib.summary().scalar("act_loss", act_loss) + + return encoder_output, {"act_loss": act_loss} + return encoder_output + + +def update_hparams_for_universal_transformer(hparams): + """Adds default hparams for all of the variants of the Universal Transformer. + + Args: + hparams: default hparams (usually one of the standard hparams from + transformer model (like "transformer_base") + + Returns: + hparams with default values for Universal Transformers hyper-parameters + + """ + hparams.daisy_chain_variables = False # Breaks multi-gpu in while loops. + + # If not None, mixes vanilla transformer with Universal Transformer. + # Options: None, "before_ut", and "after_ut". + hparams.add_hparam("mix_with_transformer", None) + + # Number of vanilla transformer layers used to be mixed with u-transofmer. + hparams.add_hparam("num_mixedin_layers", 2) + # Number of transformer layers within the recurrent block (default is 1). + hparams.add_hparam("num_inrecurrence_layers", 1) + + # Type of recurrency: + # basic, highway, skip, dwa, act, rnn, gru, lstm. + hparams.add_hparam("recurrence_type", "basic") + + # Number of steps (which is equivalent to num layer in transformer). + hparams.add_hparam("num_rec_steps", hparams.num_hidden_layers) + + # Add the positional mebedding at each step(horisontal timing) + hparams.add_hparam("add_position_timing_signal", True) + if hparams.add_position_timing_signal: + hparams.pos = None + # Logic of position shifting when using timing signal: + # None, "random", "step" + hparams.add_hparam("position_start_index", None) + + # Add an step embedding at each step (vertical timing) + hparams.add_hparam("add_step_timing_signal", True) + # Either "learned" or "sinusoid" + hparams.add_hparam("step_timing_signal_type", "learned") + + # Add or concat the timing signal (applied both on position and step timing). + # Options: "add" and "concat". + hparams.add_hparam("add_or_concat_timing_signal", "add") + + # Add SRU at the beginning of each Universal Transformer step. + # This can be considered as a position timing signal + hparams.add_hparam("add_sru", False) + + # Default ffn layer is separable convolution. + # Options: "fc" and "sepconv". + hparams.add_hparam("transformer_ffn_type", "fc") + + # Transform bias (in models with highway or skip connection). + hparams.add_hparam("transform_bias_init", -1.0) + hparams.add_hparam("couple_carry_transform_gates", True) + + # Depth-wise attention (grid-transformer!) hparams: + # Adds depth embedding, if true. + hparams.add_hparam("depth_embedding", True) + # Learns attention weights for elements (instead of positions), if true. + hparams.add_hparam("dwa_elements", True) + + # Type of ffn_layer used for gate in skip, highway, etc. + # "dense" or "dense_dropconnect". + # With dense_relu_dense, the bias/kernel initializations will not be applied. + hparams.add_hparam("gate_ffn_layer", "dense") + + # LSTM forget bias for lstm style recurrence. + hparams.add_hparam("lstm_forget_bias", 1.0) + # Uses the memory at the last step as the final output, if true. + hparams.add_hparam("use_memory_as_final_state", False) + # if also add a ffn unit to the transition function when using gru/lstm + hparams.add_hparam("add_ffn_unit_to_the_transition_function", False) + + # Type of act: basic/accumulated/global (instead of position-wise!)/random. + hparams.add_hparam("act_type", "basic") + # Max number of steps (forces halting at this step). + hparams.add_hparam("act_max_steps", 2 * hparams.num_hidden_layers) + hparams.add_hparam("act_halting_bias_init", 1.0) + hparams.add_hparam("act_epsilon", 0.01) + hparams.add_hparam("act_loss_weight", 0.01) + + return hparams + + +@registry.register_hparams +def universal_transformer_base(): + """Base parameters for Universal Transformer.""" + hparams = transformer.transformer_base() + # To have a similar capacity to the transformer_base with 6 layers, + # we need to increase the size of the UT's layer + # since, in fact, UT has a single layer repeating multiple times. + hparams.hidden_size = 1024 + hparams.filter_size = 4096 + hparams.num_heads = 16 + hparams.layer_prepostprocess_dropout = 0.3 + hparams = update_hparams_for_universal_transformer(hparams) + return hparams + + +@registry.register_hparams +def universal_transformer_base_tpu(): + hparams = universal_transformer_base() + transformer.update_hparams_for_tpu(hparams) + hparams.add_step_timing_signal = False + return hparams + + +@registry.register_hparams +def universal_transformer_big(): + hparams = universal_transformer_base() + hparams.hidden_size = 2048 + hparams.filter_size = 8192 + return hparams + + +@registry.register_hparams +def universal_transformer_base_fp16(): + hparams = transformer.transformer_base() + hparams = update_hparams_for_universal_transformer(hparams) + hparams.activation_dtype = "float16" + return hparams + + +@registry.register_hparams +def universal_transformer_small(): + hparams = transformer.transformer_base() + hparams = update_hparams_for_universal_transformer(hparams) + return hparams + + +@registry.register_hparams +def universal_transformer_tiny(): + hparams = transformer.transformer_tiny() + hparams = update_hparams_for_universal_transformer(hparams) + hparams.num_rec_steps = 8 + return hparams + + +@registry.register_hparams +def transformer_teeny(): + hparams = transformer.transformer_base() + hparams.hidden_size = 128 + hparams.filter_size = 128 + hparams.num_heads = 2 + return hparams + + +@registry.register_hparams +def universal_transformer_teeny(): + hparams = transformer_teeny() + hparams = update_hparams_for_universal_transformer(hparams) + hparams.num_rec_steps = 10 + return hparams + + +@registry.register_hparams +def universal_transformer_tall(): + hparams = universal_transformer_small() + hparams.num_rec_steps = 16 + return hparams + + +@registry.register_hparams +def universal_transformer_small_dropconnect(): + hparams = universal_transformer_small() + hparams.gate_ffn_layer = "dense_dropconnect" + hparams.add_hparam("dropconnect_dropout", 0.5) + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_base(): + hparams = universal_transformer_base() + hparams.recurrence_type = "act" + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_base_tpu(): + hparams = adaptive_universal_transformer_base() + transformer.update_hparams_for_tpu(hparams) + hparams.add_step_timing_signal = False + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_multilayer_tpu(): + """Multi-layer config for adaptive Transformer on TPU.""" + hparams = adaptive_universal_transformer_base_tpu() + hparams.num_inrecurrence_layers = 2 + hparams.mix_with_transformer = "before_ut,after_ut" + hparams.num_mixedin_layers = 1 + hparams.transformer_ffn_type = "sepconv" + # TODO(lukaszkaiser): the options below don't work on TPU yet, make them work. + # hparams.add_step_timing_signal = True + # hparams.add_sru = True + # hparams.self_attention_type = "dot_product_relative_v2" + # hparams.max_relative_position = 256 + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_multilayer_hard(): + """Multi-layer config for adaptive Transformer with hard attention.""" + hparams = adaptive_universal_transformer_multilayer_tpu() + hparams.batch_size = 256 + hparams.hard_attention_k = 8 + hparams.add_step_timing_signal = True + # hparams.add_sru = True # This is very slow on GPUs, does it help? + hparams.self_attention_type = "dot_product_relative_v2" + hparams.max_relative_position = 256 + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_small(): + hparams = universal_transformer_small() + hparams.recurrence_type = "act" + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_tiny(): + hparams = universal_transformer_tiny() + hparams.recurrence_type = "act" + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_sepconv_tiny(): + hparams = universal_transformer_tiny() + hparams.recurrence_type = "act" + hparams.transformer_ffn_type = "sepconv" + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_global_base(): + hparams = universal_transformer_base() + hparams.recurrence_type = "act" + hparams.act_type = "global" + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_global_base_tpu(): + hparams = adaptive_universal_transformer_global_base() + transformer.update_hparams_for_tpu(hparams) + hparams.add_step_timing_signal = False + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_tall(): + hparams = universal_transformer_small() + hparams.recurrence_type = "act" + hparams.num_hidden_layers = 16 + hparams.batch_size = 1024 + hparams.act_max_steps = 24 + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_tall_actlossw0(): + hparams = universal_transformer_small() + hparams.recurrence_type = "act" + hparams.num_hidden_layers = 16 + hparams.batch_size = 1024 + hparams.act_max_steps = 24 + hparams.act_loss_weight = 0.0 + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_tall_actlossw001(): + hparams = universal_transformer_small() + hparams.recurrence_type = "act" + hparams.num_hidden_layers = 16 + hparams.batch_size = 1024 + hparams.act_max_steps = 24 + hparams.act_loss_weight = 0.001 + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_base_dropout03(): + hparams = universal_transformer_base() + hparams.recurrence_type = "act" + hparams.layer_prepostprocess_dropout = 0.3 + hparams.attention_dropout = 0.3 + hparams.relu_dropout = 0.3 + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_base_dropout05(): + hparams = universal_transformer_base() + hparams.recurrence_type = "act" + hparams.layer_prepostprocess_dropout = 0.5 + hparams.attention_dropout = 0.5 + hparams.relu_dropout = 0.5 + return hparams + + +@registry.register_hparams +def universal_transformer_skip_base(): + hparams = universal_transformer_base() + hparams.recurrence_type = "skip" + return hparams + + +@registry.register_hparams +def universal_transformer_highway_base(): + hparams = universal_transformer_base() + hparams.recurrence_type = "highway" + return hparams + + +@registry.register_hparams +def universal_transformer_dwa_base(): + hparams = universal_transformer_base() + hparams.recurrence_type = "dwa" + return hparams + + +@registry.register_hparams +def universal_transformer_lstm_base(): + hparams = universal_transformer_base() + hparams.recurrence_type = "lstm" + hparams.add_step_timing_signal = False # Let lstm count in depth for us! + return hparams + + +@registry.register_hparams +def universal_transformer_gru_base(): + hparams = universal_transformer_base() + hparams.recurrence_type = "gru" + hparams.add_step_timing_signal = False # Let gru count in depth for us! + return hparams + + +@registry.register_hparams +def universal_transformer_lstm_tall(): + hparams = universal_transformer_tall() + hparams.recurrence_type = "lstm" + hparams.add_step_timing_signal = False # Let lstm count in depth for us! + return hparams + + +@registry.register_hparams +def universal_transformer_position_random_timing_tiny(): + hparams = universal_transformer_tiny() + hparams.position_start_index = "random" + return hparams + + +@registry.register_hparams +def universal_transformer_position_step_timing_tiny(): + hparams = universal_transformer_tiny() + hparams.position_start_index = "step" + return hparams + + +@registry.register_hparams +def universal_transformer_step_sinusoid_timing_tiny(): + hparams = universal_transformer_tiny() + hparams.step_timing_signal_type = "sinusoid" + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_position_random_timing_tiny(): + hparams = universal_transformer_tiny() + hparams.recurrence_type = "act" + hparams.position_start_index = "random" + return hparams + + +@registry.register_hparams +def universal_transformer_mix_before_ut_base(): + hparams = universal_transformer_base() + hparams.mix_with_transformer = "before_ut" + return hparams + + +@registry.register_hparams +def universal_transformer_mix_after_ut_base(): + hparams = universal_transformer_base() + hparams.mix_with_transformer = "after_ut" + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_mix_before_ut_base(): + hparams = universal_transformer_base() + hparams.mix_with_transformer = "before_ut" + hparams.recurrence_type = "act" + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_mix_after_ut_base(): + hparams = universal_transformer_base() + hparams.mix_with_transformer = "after_ut" + hparams.recurrence_type = "act" + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_concat_tiny(): + hparams = universal_transformer_tiny() + hparams.recurrence_type = "act" + hparams.add_or_concat_timing_signal = "concat" + return hparams + + +@registry.register_hparams +def adaptive_universal_transformer_with_sru_base(): + hparams = universal_transformer_base() + hparams.recurrence_type = "act" + hparams.add_sru = True + return hparams + + +@registry.register_hparams +def universal_transformer_sepconv_big(): + hparams = universal_transformer_big() + hparams.transformer_ffn_type = "sepconv" + return hparams + + +@registry.register_hparams +def universal_transformer_sepconv_base(): + hparams = universal_transformer_base() + hparams.transformer_ffn_type = "sepconv" + return hparams + + +@registry.register_hparams +def universal_transformer_sepconv_tiny(): + hparams = universal_transformer_tiny() + hparams.transformer_ffn_type = "sepconv" + return hparams + + +@registry.register_ranged_hparams +def universal_transformer_base_range(rhp): + """Range of hyperparameters.""" + # After starting from base, set intervals for some parameters. + rhp.set_discrete("num_rec_steps", [6, 8, 10]) + rhp.set_discrete("hidden_size", [1024, 2048, 4096]) + rhp.set_discrete("filter_size", [2048, 4096, 8192]) + rhp.set_discrete("num_heads", [8, 16, 32]) + rhp.set_categorical("transformer_ffn_type", ["sepconv", "fc"]) + rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE) + rhp.set_float("weight_decay", 0.0, 2.0) + + +@registry.register_ranged_hparams +def adaptive_universal_transformer_base_range(rhp): + """Range of hyperparameters.""" + # After starting from base, set intervals for some parameters. + rhp.set_discrete("act_max_steps", [8, 16, 32]) + rhp.set_float("act_loss_weight", 0.0, 0.5) + rhp.set_discrete("hidden_size", [1024, 2048, 4096]) + rhp.set_discrete("filter_size", [2048, 4096, 8192]) + rhp.set_discrete("num_heads", [8, 16, 32]) + rhp.set_categorical("transformer_ffn_type", ["sepconv", "fc"]) + rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE) + rhp.set_float("weight_decay", 0.0, 2.0) diff --git a/tensor2tensor/models/research/universal_transformer_test.py b/tensor2tensor/models/research/universal_transformer_test.py new file mode 100644 index 000000000..143d756fe --- /dev/null +++ b/tensor2tensor/models/research/universal_transformer_test.py @@ -0,0 +1,79 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Tests for Transformer.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.models.research import universal_transformer + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +BATCH_SIZE = 3 +INPUT_LENGTH = 5 +TARGET_LENGTH = 7 +VOCAB_SIZE = 10 + + +class UniversalTransformerTest(tf.test.TestCase): + + def get_model(self, + hparams, mode=tf_estimator.ModeKeys.TRAIN, has_input=True): + hparams.hidden_size = 8 + hparams.filter_size = 32 + hparams.num_heads = 1 + hparams.layer_prepostprocess_dropout = 0.0 + hparams.mix_with_transformer = "" + + p_hparams = problem_hparams.test_problem_hparams(VOCAB_SIZE, + VOCAB_SIZE, + hparams) + if not has_input: + del p_hparams.modality["inputs"] + hparams.problems = [p_hparams] + + inputs = np.random.randint( + VOCAB_SIZE, size=(BATCH_SIZE, INPUT_LENGTH, 1, 1)) + targets = np.random.randint( + VOCAB_SIZE, size=(BATCH_SIZE, TARGET_LENGTH, 1, 1)) + features = { + "targets": tf.constant(targets, dtype=tf.int32, name="targets"), + "target_space_id": tf.constant(1, dtype=tf.int32) + } + if has_input: + features["inputs"] = tf.constant(inputs, dtype=tf.int32, name="inputs") + + return universal_transformer.UniversalTransformer( + hparams, mode, p_hparams), features + + def testTransformer(self): + model, features = self.get_model( + universal_transformer.universal_transformer_base()) + logits, _ = model(features) + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + res = session.run(logits) + self.assertEqual(res.shape, (BATCH_SIZE, TARGET_LENGTH, 1, 1, VOCAB_SIZE)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/research/universal_transformer_util.py b/tensor2tensor/models/research/universal_transformer_util.py new file mode 100644 index 000000000..daa5ab578 --- /dev/null +++ b/tensor2tensor/models/research/universal_transformer_util.py @@ -0,0 +1,1482 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for Universal Transformer. + +The Universal Transformer is based on the popular encoder-decoder architecture. +However, as opposed to a fixed stack of distinct layers (as is usually the case +for most popular neural sequence models), the Universal Transformer is +recurrent "in depth", and repeatedly applies the same series of functions with +the same parameters to all elements of the sequence in parallel, revising their +representations with every step. The encoder and decoder have the same +recurrent structure, but the decoder additionally consumes the final encoder +representations for each position. Like the Transformer, the Universal +Transformer is autoregressive. Trained using teacher-forcing, at generation +time it produces its output one position at a time, with the decoder consuming +the previously produced output positions. + +Given an input sequence of length m, we start with a matrix whose rows are the +d-dimensional embeddings of the symbols at each position of the sequence. +The Universal Transformer then iteratively computes representation of the input +at each step by applying the multiheaded dot-product self-attention mechanism, +followed by a recurrent transition function. We also add residual connections +around each of these function blocks and apply dropout and layer normalization. + +The recurrent transition function in fact controls how steps communicate with +each other in depth. For instance, the recurrent transition, can be a simple +identity function which passes the output of a step as the input to next step. +Or it can be an LSTM (flipped vertically) next to the transformer which +controls how state of the model changes in depth. + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import functools + +from six.moves import range # pylint: disable=redefined-builtin + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.models import transformer +from tensor2tensor.utils import contrib +from tensor2tensor.utils import expert_utils + +import tensorflow.compat.v1 as tf + + +def universal_transformer_encoder(encoder_input, + encoder_self_attention_bias, + hparams, + name="encoder", + nonpadding=None, + save_weights_to=None, + make_image_summary=True): + """Universal Transformer encoder function. + + Prepares all the arguments and the inputs and passes it to a + universal_transformer_layer to encode the encoder_input. + + Args: + encoder_input: a Tensor + encoder_self_attention_bias: bias Tensor for self-attention + (see common_attention.attention_bias()) + hparams: hyperparameters for model + name: a string + nonpadding: optional Tensor with shape [batch_size, encoder_length] + indicating what positions are not padding. This must either be + passed in, which we do for "packed" datasets, or inferred from + encoder_self_attention_bias. The knowledge about padding is used + for pad_remover(efficiency) and to mask out padding in convoltutional + layers. + save_weights_to: an optional dictionary to capture attention weights + for vizualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. + + Returns: + y: a Tensors as the output of the encoder + extra_output: which can be used to pass extra information to the body + """ + + x = encoder_input + attention_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(hparams, "attention_dropout_broadcast_dims", ""))) + with tf.variable_scope(name): + if nonpadding is not None: + padding = 1.0 - nonpadding + else: + padding = common_attention.attention_bias_to_padding( + encoder_self_attention_bias) + nonpadding = 1.0 - padding + pad_remover = None + if hparams.use_pad_remover and not common_layers.is_xla_compiled(): + pad_remover = expert_utils.PadRemover(padding) + + ffn_unit = functools.partial( + transformer_encoder_ffn_unit, + hparams=hparams, + nonpadding_mask=nonpadding, + pad_remover=pad_remover) + + attention_unit = functools.partial( + transformer_encoder_attention_unit, + hparams=hparams, + encoder_self_attention_bias=encoder_self_attention_bias, + attention_dropout_broadcast_dims=attention_dropout_broadcast_dims, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary) + + x, extra_output = universal_transformer_layer( + x, hparams, ffn_unit, attention_unit, pad_remover=pad_remover) + + return common_layers.layer_preprocess(x, hparams), extra_output + + +def universal_transformer_decoder(decoder_input, + encoder_output, + decoder_self_attention_bias, + encoder_decoder_attention_bias, + hparams, + name="decoder", + nonpadding=None, + save_weights_to=None, + make_image_summary=True): + """Universal Transformer decoder function. + + Prepares all the arguments and the inputs and passes it to a + core_universal_transformer_layer to decoder. + + Args: + decoder_input: a Tensor + encoder_output: a Tensor + decoder_self_attention_bias: bias Tensor for self-attention + (see common_attention.attention_bias()) + encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention + (see common_attention.attention_bias()) + hparams: hyperparameters for model + name: a string + nonpadding: optional Tensor with shape [batch_size, encoder_length] + indicating what positions are not padding. This is used + to mask out padding in convoltutional layers. We generally only + need this mask for "packed" datasets, because for ordinary datasets, + no padding is ever followed by nonpadding. + save_weights_to: an optional dictionary to capture attention weights + for vizualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. + + Returns: + y: the output Tensors + extra_output: which can be used to pass extra information to the body + """ + x = decoder_input + attention_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(hparams, "attention_dropout_broadcast_dims", ""))) + with tf.variable_scope(name): + ffn_unit = functools.partial( + transformer_decoder_ffn_unit, + hparams=hparams, + nonpadding_mask=nonpadding) + + attention_unit = functools.partial( + transformer_decoder_attention_unit, + hparams=hparams, + encoder_output=encoder_output, + decoder_self_attention_bias=decoder_self_attention_bias, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + attention_dropout_broadcast_dims=attention_dropout_broadcast_dims, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary) + + x, extra_output = universal_transformer_layer( + x, hparams, ffn_unit, attention_unit) + + return common_layers.layer_preprocess(x, hparams), extra_output + + +def universal_transformer_layer(x, + hparams, + ffn_unit, + attention_unit, + pad_remover=None): + """Core function applying the universal transformer layer. + + Args: + x: input + hparams: model hyper-parameters + ffn_unit: feed-forward unit + attention_unit: multi-head attention unit + pad_remover: to mask out padding in convolutional layers (efficiency). + + Returns: + the output tensor, extra output (can be memory, ponder time, etc.) + + Raises: + ValueError: Unknown recurrence type + """ + + def add_vanilla_transformer_layer(x, num_layers, name): + """Passes the input through num_layers of vanilla transformer layers. + + Args: + x: input + num_layers: number of layers + name: string, prefix of layer names + + Returns: + output of vanilla_transformer_layer + """ + if hparams.add_position_timing_signal: + # In case of add_position_timing_signal=true, we set hparams.pos=None + # and add position timing signal at the beginning of each step, so for + # the vanilla transformer, we need to add timing signal here. + x = common_attention.add_timing_signal_1d(x) + for layer in range(num_layers): + with tf.variable_scope(name + "layer_%d" % layer): + x = ffn_unit(attention_unit(x)) + return x + + with tf.variable_scope("universal_transformer_%s" % hparams.recurrence_type): + if (hparams.mix_with_transformer and + "before_ut" in hparams.mix_with_transformer): + x = add_vanilla_transformer_layer(x, hparams.num_mixedin_layers, + "before_ut_") + + if hparams.recurrence_type == "act": + output, extra_output = universal_transformer_act( + x, hparams, ffn_unit, attention_unit) + + else: # for all the other recurrency types with fixed number of steps + + ut_function, initializer = get_ut_layer(x, hparams, ffn_unit, + attention_unit, pad_remover) + + output, _, extra_output = tf.foldl( + ut_function, tf.range(hparams.num_rec_steps), + initializer=initializer) + + # Right now, this is only possible when the transition function is an lstm + if (hparams.recurrence_type == "lstm" and + hparams.get("use_memory_as_final_state", False)): + output = extra_output + + if (hparams.mix_with_transformer and + "after_ut" in hparams.mix_with_transformer): + output = add_vanilla_transformer_layer(output, hparams.num_mixedin_layers, + "after_ut_") + + return output, extra_output + + +def get_ut_layer(x, + hparams, + ffn_unit, + attention_unit, + pad_remover=None): + """Provides the function that is used in universal transforemr steps. + + Args: + x: input + hparams: model hyper-parameters + ffn_unit: feed-forward unit + attention_unit: multi-head attention unit + pad_remover: to mask out padding in convolutional layers (efficiency). + + Returns: + ut_function and the ut_initializer + + Raises: + ValueError: Unknown recurrence type + """ + + if hparams.recurrence_type == "basic": + ut_initializer = (x, x, x) # (state, input, memory) + ut_function = functools.partial( + universal_transformer_basic, + hparams=hparams, + ffn_unit=ffn_unit, + attention_unit=attention_unit) + + elif hparams.recurrence_type == "highway": + ut_initializer = (x, x, x) # (state, input, memory) + ut_function = functools.partial( + universal_transformer_highway, + hparams=hparams, + ffn_unit=ffn_unit, + attention_unit=attention_unit, + pad_remover=pad_remover) + + elif hparams.recurrence_type == "skip": + ut_initializer = (x, x, x) # (state, input, memory) + ut_function = functools.partial( + universal_transformer_skip, + hparams=hparams, + ffn_unit=ffn_unit, + attention_unit=attention_unit, + pad_remover=pad_remover) + + elif hparams.recurrence_type == "dwa": + # memory contains the original input + all the states + memory_size = hparams.num_rec_steps + 1 + + # prepare initializer: + memory_empty = tf.zeros([memory_size] + common_layers.shape_list(x)) + + # filling the first slot with the original input + memory = fill_memory_slot(memory_empty, x, 0) + + ut_initializer = (x, x, memory) # (state, input, memory) + ut_function = functools.partial( + universal_transformer_depthwise_attention, + hparams=hparams, + ffn_unit=ffn_unit, + attention_unit=attention_unit) + + elif hparams.recurrence_type == "gru": + ut_initializer = (x, x, x) # (state, input, memory) + ut_function = functools.partial( + universal_transformer_with_gru_as_transition_function, + hparams=hparams, + ffn_unit=ffn_unit, + attention_unit=attention_unit, + pad_remover=pad_remover) + + elif hparams.recurrence_type == "lstm": + memory = tf.zeros(common_layers.shape_list(x)) + ut_initializer = (x, x, memory) # (state, input, memory) + ut_function = functools.partial( + universal_transformer_with_lstm_as_transition_function, + hparams=hparams, + ffn_unit=ffn_unit, + attention_unit=attention_unit, + pad_remover=pad_remover) + + else: + raise ValueError("Unknown recurrence type: %s" % hparams.recurrence_type) + + return ut_function, ut_initializer + + +def transformer_encoder_ffn_unit(x, + hparams, + nonpadding_mask=None, + pad_remover=None): + """Applies a feed-forward function which is parametrised for encoding. + + Args: + x: input + hparams: model hyper-parameters + nonpadding_mask: optional Tensor with shape [batch_size, encoder_length] + indicating what positions are not padding. This is used + to mask out padding in convoltutional layers. We generally only + need this mask for "packed" datasets, because for ordinary datasets, + no padding is ever followed by nonpadding. + pad_remover: to mask out padding in convolutional layers (efficiency). + + Returns: + the output tensor + """ + + with tf.variable_scope("ffn"): + if hparams.transformer_ffn_type == "fc": + y = transformer.transformer_ffn_layer( + common_layers.layer_preprocess(x, hparams), + hparams, + pad_remover, + conv_padding="SAME", + nonpadding_mask=nonpadding_mask) + + if hparams.transformer_ffn_type == "sepconv": + assert nonpadding_mask is not None, ( + "The nonpadding_mask should be provided, otherwise the model uses " + "the leaked padding information to estimate the length!") + y = common_layers.sepconv_relu_sepconv( + common_layers.layer_preprocess(x, hparams), + filter_size=hparams.filter_size, + output_size=hparams.hidden_size, + first_kernel_size=(3, 1), + second_kernel_size=(5, 1), + padding="SAME", + nonpadding_mask=nonpadding_mask, + dropout=hparams.relu_dropout) + + x = common_layers.layer_postprocess(x, y, hparams) + + return x + + +def transformer_encoder_attention_unit(x, + hparams, + encoder_self_attention_bias, + attention_dropout_broadcast_dims, + save_weights_to=None, + make_image_summary=True): + """Applies multihead attention function which is parametrised for encoding. + + Args: + x: input + hparams: model hyper-parameters + encoder_self_attention_bias: a bias tensor for use in encoder self-attention + attention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout + layers to save memory during training + save_weights_to: an optional dictionary to capture attention weights for + visualization; the weights tensor will be appended there under a string + key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. + + Returns: + the output tensor + + """ + + with tf.variable_scope("self_attention"): + y = common_attention.multihead_attention( + common_layers.layer_preprocess(x, hparams), + None, + encoder_self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=hparams.self_attention_type, + save_weights_to=save_weights_to, + max_relative_position=hparams.max_relative_position, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims, + hard_attention_k=hparams.hard_attention_k) + x = common_layers.layer_postprocess(x, y, hparams) + return x + + +def transformer_decoder_ffn_unit(x, + hparams, + nonpadding_mask=None): + """Applies a feed-forward function which is parametrised for decoding. + + Args: + x: input + hparams: model hyper-parameters + nonpadding_mask: optional Tensor with shape [batch_size, encoder_length] + indicating what positions are not padding. This is used + to mask out padding in convoltutional layers. We generally only + need this mask for "packed" datasets, because for ordinary datasets, + no padding is ever followed by nonpadding. + + Returns: + the output tensor + + """ + + with tf.variable_scope("ffn"): + if hparams.transformer_ffn_type == "fc": + y = transformer.transformer_ffn_layer( + common_layers.layer_preprocess(x, hparams), + hparams, + conv_padding="LEFT", + nonpadding_mask=nonpadding_mask) + + if hparams.transformer_ffn_type == "sepconv": + y = common_layers.sepconv_relu_sepconv( + common_layers.layer_preprocess(x, hparams), + filter_size=hparams.filter_size, + output_size=hparams.hidden_size, + first_kernel_size=(3, 1), + second_kernel_size=(5, 1), + padding="LEFT", + nonpadding_mask=nonpadding_mask, + dropout=hparams.relu_dropout) + + x = common_layers.layer_postprocess(x, y, hparams) + + return x + + +def transformer_decoder_attention_unit(x, + hparams, + encoder_output, + decoder_self_attention_bias, + encoder_decoder_attention_bias, + attention_dropout_broadcast_dims, + save_weights_to=None, + make_image_summary=True): + """Applies multihead attention function which is parametrised for decoding. + + Args: + x: input (decoder input) + hparams: model hyper-parameters + encoder_output: Encoder representation. [batch_size, input_length, + hidden_dim] + decoder_self_attention_bias: Bias and mask weights for decoder + self-attention. [batch_size, decoder_length] + encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder + attention. [batch_size, input_length] + attention_dropout_broadcast_dims: Fpr noise broadcasting in the dropout + layers to save memory during training + save_weights_to: an optional dictionary to capture attention weights for + visualization; the weights tensor will be appended there under a string + key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. + + Returns: + The output tensor + """ + + with tf.variable_scope("self_attention"): + y = common_attention.multihead_attention( + common_layers.layer_preprocess(x, hparams), + None, + decoder_self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=hparams.self_attention_type, + save_weights_to=save_weights_to, + max_relative_position=hparams.max_relative_position, + cache=None, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims, + hard_attention_k=hparams.hard_attention_k) + x = common_layers.layer_postprocess(x, y, hparams) + if encoder_output is not None: + with tf.variable_scope("encdec_attention"): + y = common_attention.multihead_attention( + common_layers.layer_preprocess(x, hparams), + encoder_output, + encoder_decoder_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims, + hard_attention_k=hparams.hard_attention_k) + x = common_layers.layer_postprocess(x, y, hparams) + return x + + +def universal_transformer_basic(layer_inputs, + step, hparams, + ffn_unit, + attention_unit): + """Basic Universal Transformer. + + This model is pretty similar to the vanilla transformer in which weights are + shared between layers. For some tasks, this simple idea brings a + generalization that is not achievable by playing with the size of the model + or drop_out parameters in the vanilla transformer. + + Args: + layer_inputs: + - state: state + step: indicates number of steps taken so far + hparams: model hyper-parameters + ffn_unit: feed-forward unit + attention_unit: multi-head attention unit + + Returns: + layer_output: + new_state: new state + """ + state, inputs, memory = tf.unstack(layer_inputs, num=None, axis=0, + name="unstack") + new_state = step_preprocess(state, step, hparams) + + for i in range(hparams.num_inrecurrence_layers): + with tf.variable_scope("rec_layer_%d" % i): + new_state = ffn_unit(attention_unit(new_state)) + + return new_state, inputs, memory + + +def universal_transformer_highway(layer_inputs, + step, hparams, + ffn_unit, + attention_unit, + pad_remover=None): + """Universal Transformer with highway connection. + + + It transforms the state using a block containing self-attention and transition + function and wrap the whole block with a highway connection. + (the new state is a combination of the state and the transformed-state + based on cary/transform gates.) + + Interesting observation: + Controlling the cary/transform gate with the original inputs works usually + better (i.e. hparams.gates_inputs="i") + + Args: + layer_inputs: + - state: state + - inputs: the original embedded inputs (= inputs to the first step) + step: indicates number of steps taken so far + hparams: model hyper-parameters. + ffn_unit: feed-forward unit + attention_unit: multi-head attention unit + pad_remover: to mask out padding in convolutional layers (efficiency). + + Returns: + layer_output: + new_state: new state + inputs: the original embedded inputs (= inputs to the first step) + + """ + + state, inputs, memory = layer_inputs + new_state = step_preprocess(state, step, hparams) + + for i in range(hparams.num_inrecurrence_layers): + with tf.variable_scope("rec_layer_%d" % i): + new_state = ffn_unit(attention_unit(new_state)) + + transformed_state = new_state + + gate_inputs = [] + if "s" in hparams.gates_inputs: + gate_inputs.append(state) + + if "t" in hparams.gates_inputs: + gate_inputs.append(transformed_state) + + if "i" in hparams.gates_inputs: + gate_inputs.append(inputs) + + gate_ffn_layer = hparams.gate_ffn_layer + + transform_gate = _ffn_layer_multi_inputs( + gate_inputs, + hparams, + ffn_layer_type=gate_ffn_layer, + name="transform", + bias_initializer=tf.constant_initializer(hparams.transform_bias_init), + activation=tf.sigmoid, + pad_remover=pad_remover, + preprocess=True) + + if hparams.couple_carry_transform_gates: + carry_gate = tf.subtract(1.0, transform_gate, name="carry") + + else: + carry_gate = _ffn_layer_multi_inputs( + gate_inputs, + hparams, + ffn_layer_type=gate_ffn_layer, + name="carry", + bias_initializer=tf.constant_initializer(-hparams.transform_bias_init), + activation=tf.sigmoid, + pad_remover=pad_remover, + preprocess=True) + + new_state = state * carry_gate + transformed_state * transform_gate + + contrib.summary().scalar("highway_transform_gate_layer", + tf.reduce_mean(transform_gate)) + + contrib.summary().scalar("highway_carry_gate_layer", + tf.reduce_mean(carry_gate)) + + return new_state, inputs, memory + + +def universal_transformer_skip(layer_inputs, + step, + hparams, + ffn_unit, + attention_unit, + pad_remover=None): + """Universal Transformer with highway connection. + + + It transforms the state using attention and ffn and wrap this transformation + with a skip-all connection. (the new state is a combination of the state and + the inputs (original inputs) based on cary/transform gates.) + + Observation: + Controlling the cary/transform gate with the original inputs works usually + better (i.e. hparams.gates_inputs="i") + + Args: + layer_inputs: + - state: state + - inputs: the original embedded inputs (= inputs to the first step) + step: indicates number of steps taken so far + hparams: model hyper-parameters. + ffn_unit: feed-forward unit + attention_unit: multi-head attention unit + pad_remover: to mask out padding in convolutional layers (efficiency). + + + Returns: + layer_output: + new_state: new state + inputs: the original embedded inputs (= inputs to the first step) + """ + + state, inputs, memory = layer_inputs + new_state = step_preprocess(state, step, hparams) + + for i in range(hparams.num_inrecurrence_layers): + with tf.variable_scope("rec_layer_%d" % i): + new_state = ffn_unit(attention_unit(new_state)) + + transformed_state = new_state + + inputs.get_shape().assert_is_compatible_with(state.get_shape()) + + gate_inputs = [] + if "s" in hparams.gates_inputs: + gate_inputs.append(state) + + if "t" in hparams.gates_inputs: + gate_inputs.append(transformed_state) + + if "i" in hparams.gates_inputs: + gate_inputs.append(inputs) + + gate_ffn_layer = hparams.gate_ffn_layer + + transform_gate = _ffn_layer_multi_inputs( + gate_inputs, + hparams, + ffn_layer_type=gate_ffn_layer, + name="transform", + bias_initializer=tf.constant_initializer(hparams.transform_bias_init), + activation=tf.sigmoid, + pad_remover=pad_remover, + preprocess=True) + + if hparams.couple_carry_transform_gates: + carry_gate = tf.subtract(1.0, transform_gate, name="carry") + + else: + carry_gate = _ffn_layer_multi_inputs( + gate_inputs, + hparams, + ffn_layer_type=gate_ffn_layer, + name="carry", + bias_initializer=tf.constant_initializer(-hparams.transform_bias_init), + activation=tf.sigmoid, + pad_remover=pad_remover, + preprocess=True) + + contrib.summary().scalar("skip_transform_gate_layer", + tf.reduce_mean(transform_gate)) + + contrib.summary().scalar("skip_carry_gate_layer", tf.reduce_mean(carry_gate)) + + new_state = inputs * carry_gate + transformed_state * transform_gate + return new_state, inputs, memory + + +def universal_transformer_depthwise_attention(layer_inputs, + step, hparams, + ffn_unit, + attention_unit): + """universal_transformer with depth-wise attention. + + It uses an attention mechanism-flipped vertically- + over all the states from previous steps to generate the new_state. + + Args: + layer_inputs: + - state: state + - memory: contains states from all the previous steps. + step: indicating number of steps take so far + hparams: model hyper-parameters. + ffn_unit: feed-forward unit + attention_unit: multi-head attention unit + + + Returns: + layer_output: + new_state: new state + memory: contains states from all the previous steps. + + """ + _, inputs, memory = layer_inputs + all_states = memory + + # add depth signal + if hparams.depth_embedding: + all_states = add_depth_embedding(all_states) + + # get the states up to the current step (non-zero part of the memory) + states_so_far = all_states[:step, :, :, :] + + states_so_far_weights = tf.nn.softmax( + common_layers.dense( + states_so_far, (hparams.hidden_size if hparams.dwa_elements else 1), + activation=None, + use_bias=True), + axis=-1) + + # prepare the state tensor that will be transformed + state_to_be_transformed = tf.reduce_sum( + (states_so_far * states_so_far_weights), axis=0) + + new_state = step_preprocess(state_to_be_transformed, step, hparams) + + for i in range(hparams.num_inrecurrence_layers): + with tf.variable_scope("rec_layer_%d" % i): + new_state = ffn_unit(attention_unit(new_state)) + + # add the new state to the memory + memory = fill_memory_slot(memory, new_state, step + 1) + + return new_state, inputs, memory + + +def universal_transformer_with_gru_as_transition_function( + layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None): + """Universal Transformer which uses a gru as transition function. + + It's kind of like having a gru, filliped vertically next to the Universal + Transformer that controls the flow of the information in depth, + over different steps of the Universal Transformer. + + Args: + layer_inputs: + - state: state + - inputs: not used here + - memory: not used here + step: indicates number of steps taken so far + hparams: model hyper-parameters. + ffn_unit: feed-forward unit + attention_unit: multi-head attention unit + pad_remover: to mask out padding in convolutional layers (efficiency). + Returns: + layer_output: + new_state: new state + inputs: not uesed + memory: not used + """ + + state, unused_inputs, unused_memory = tf.unstack( + layer_inputs, num=None, axis=0, name="unstack") + + # state (ut_state): output of the gru in the previous step + + # Multi_head_attention: + assert not hparams.add_step_timing_signal # Let gru count for us! + mh_attention_input = step_preprocess(state, step, hparams) + transition_function_input = attention_unit(mh_attention_input) + + # Transition Function: + if hparams.add_ffn_unit_to_the_transition_function: + transition_function_input = ffn_unit(transition_function_input) + + transition_function_input = common_layers.layer_preprocess( + transition_function_input, hparams) + with tf.variable_scope("gru"): + # gru update gate: z_t = sigmoid(W_z.x_t + U_z.h_{t-1}) + transition_function_update_gate = _ffn_layer_multi_inputs( + [transition_function_input, state], + hparams, + name="update", + bias_initializer=tf.constant_initializer(1.0), + activation=tf.sigmoid, + pad_remover=pad_remover) + + contrib.summary().scalar("gru_update_gate", + tf.reduce_mean(transition_function_update_gate)) + + # gru reset gate: r_t = sigmoid(W_r.x_t + U_r.h_{t-1}) + transition_function_reset_gate = _ffn_layer_multi_inputs( + [transition_function_input, state], + hparams, + name="reset", + bias_initializer=tf.constant_initializer(1.0), + activation=tf.sigmoid, + pad_remover=pad_remover) + + contrib.summary().scalar("gru_reset_gate", + tf.reduce_mean(transition_function_reset_gate)) + reset_state = transition_function_reset_gate * state + + # gru_candidate_activation: h' = tanh(W_{x_t} + U (r_t h_{t-1}) + transition_function_candidate = _ffn_layer_multi_inputs( + [transition_function_input, reset_state], + hparams, + name="candidate", + bias_initializer=tf.zeros_initializer(), + activation=tf.tanh, + pad_remover=pad_remover) + + transition_function_output = ( + (1 - transition_function_update_gate) * transition_function_input + + transition_function_update_gate * transition_function_candidate) + + transition_function_output = common_layers.layer_preprocess( + transition_function_output, hparams) + + return transition_function_output, unused_inputs, unused_memory + + +def universal_transformer_with_lstm_as_transition_function( + layer_inputs, step, hparams, ffn_unit, attention_unit, pad_remover=None): + """Universal Transformer which uses a lstm as transition function. + + It's kind of like having a lstm, filliped vertically next to the Universal + Transformer that controls the flow of the information in depth, + over different steps of the Universal Transformer. + + Args: + layer_inputs: + - state: state + - inputs: the original embedded inputs (= inputs to the first step) + - memory: memory used in lstm. + step: indicates number of steps taken so far + hparams: model hyper-parameters. + ffn_unit: feed-forward unit + attention_unit: multi-head attention unit + pad_remover: to mask out padding in convolutional layers (efficiency). + Returns: + layer_output: + new_state: new state + inputs: the original embedded inputs (= inputs to the first step) + memory: contains information of state from all the previous steps. + """ + + state, unused_inputs, memory = tf.unstack( + layer_inputs, num=None, axis=0, name="unstack") + # NOTE: + # state (ut_state): output of the lstm in the previous step + # inputs (ut_input): original input --> we don't use it here + # memory: lstm memory + + # Multi_head_attention: + assert not hparams.add_step_timing_signal # Let lstm count for us! + mh_attention_input = step_preprocess(state, step, hparams) + transition_function_input = attention_unit(mh_attention_input) + + # Transition Function: + if hparams.add_ffn_unit_to_the_transition_function: + transition_function_input = ffn_unit(transition_function_input) + + transition_function_input = common_layers.layer_preprocess( + transition_function_input, hparams) + with tf.variable_scope("lstm"): + # lstm input gate: i_t = sigmoid(W_i.x_t + U_i.h_{t-1}) + transition_function_input_gate = _ffn_layer_multi_inputs( + [transition_function_input, state], + hparams, + name="input", + bias_initializer=tf.zeros_initializer(), + activation=tf.sigmoid, + pad_remover=pad_remover) + + contrib.summary().scalar("lstm_input_gate", + tf.reduce_mean(transition_function_input_gate)) + + # lstm forget gate: f_t = sigmoid(W_f.x_t + U_f.h_{t-1}) + transition_function_forget_gate = _ffn_layer_multi_inputs( + [transition_function_input, state], + hparams, + name="forget", + bias_initializer=tf.zeros_initializer(), + activation=None, + pad_remover=pad_remover) + forget_bias_tensor = tf.constant(hparams.lstm_forget_bias) + transition_function_forget_gate = tf.sigmoid( + transition_function_forget_gate + forget_bias_tensor) + + contrib.summary().scalar("lstm_forget_gate", + tf.reduce_mean(transition_function_forget_gate)) + + # lstm output gate: o_t = sigmoid(W_o.x_t + U_o.h_{t-1}) + transition_function_output_gate = _ffn_layer_multi_inputs( + [transition_function_input, state], + hparams, + name="output", + bias_initializer=tf.zeros_initializer(), + activation=tf.sigmoid, + pad_remover=pad_remover) + + contrib.summary().scalar("lstm_output_gate", + tf.reduce_mean(transition_function_output_gate)) + + # lstm input modulation + transition_function_input_modulation = _ffn_layer_multi_inputs( + [transition_function_input, state], + hparams, + name="input_modulation", + bias_initializer=tf.zeros_initializer(), + activation=tf.tanh, + pad_remover=pad_remover) + + transition_function_memory = ( + memory * transition_function_forget_gate + + transition_function_input_gate * transition_function_input_modulation) + + transition_function_output = ( + tf.tanh(transition_function_memory) * transition_function_output_gate) + + transition_function_output = common_layers.layer_preprocess( + transition_function_output, hparams) + + return transition_function_output, unused_inputs, transition_function_memory + + +def universal_transformer_act(x, hparams, ffn_unit, attention_unit): + """ACT based models. + + Implementations of all act models are based on craffel@'s cl/160711592. + + (1) Basic AUT based on remainder-distribution ACT (position-wise). + (2) AUT with global halting probability (not position-wise). + (3) AUT with random halting probability (not position-wise). + (4) AUT with final state as accumulation of all states. + + Args: + x: input + hparams: model hyper-parameters + ffn_unit: feed-forward unit + attention_unit: multi-head attention unit + + Returns: + the output tensor, (ponder_times, remainders) + + Raises: + ValueError: Unknown act type + """ + if hparams.act_type not in ["basic", "global", "random", "accumulated"]: + raise ValueError("Unknown act type: %s" % hparams.act_type) + + state = x + act_max_steps = hparams.act_max_steps + threshold = 1.0 - hparams.act_epsilon + state_shape_static = state.get_shape() + + state_slice = slice(0, 2) + if hparams.act_type == "global": + state_slice = slice(0, 1) + + # Dynamic shape for update tensors below + update_shape = tf.shape(state)[state_slice] + + # Halting probabilities (p_t^n in the paper) + halting_probability = tf.zeros(update_shape, name="halting_probability") + + # Remainders (R(t) in the paper) + remainders = tf.zeros(update_shape, name="remainder") + + # Number of updates performed (N(t) in the paper) + n_updates = tf.zeros(update_shape, name="n_updates") + + # Previous cell states (s_t in the paper) + previous_state = tf.zeros_like(state, name="previous_state") + step = tf.constant(0, dtype=tf.int32) + + def ut_function(state, step, halting_probability, remainders, n_updates, + previous_state): + """implements act (position-wise halting). + + Args: + state: 3-D Tensor: [batch_size, length, channel] + step: indicates number of steps taken so far + halting_probability: halting probability + remainders: act remainders + n_updates: act n_updates + previous_state: previous state + + Returns: + transformed_state: transformed state + step: step+1 + halting_probability: halting probability + remainders: act remainders + n_updates: act n_updates + new_state: new state + """ + state = step_preprocess(state, step, hparams) + + if hparams.act_type == "random": + # random as halting probability + p = tf.random_uniform( + shape=common_layers.shape_list(halting_probability)) + else: + with tf.variable_scope("sigmoid_activation_for_pondering"): + p = common_layers.dense( + state, + 1, + activation=tf.nn.sigmoid, + use_bias=True, + bias_initializer=tf.constant_initializer( + hparams.act_halting_bias_init)) + + if hparams.act_type == "global": + # average over all positions (as a global halting prob) + p = tf.reduce_mean(p, axis=1) + p = tf.squeeze(p) + else: + # maintain position-wise probabilities + p = tf.squeeze(p, axis=-1) + + # Mask for inputs which have not halted yet + still_running = tf.cast(tf.less(halting_probability, 1.0), tf.float32) + + # Mask of inputs which halted at this step + new_halted = tf.cast( + tf.greater(halting_probability + p * still_running, threshold), + tf.float32) * still_running + + # Mask of inputs which haven't halted, and didn't halt this step + still_running = tf.cast( + tf.less_equal(halting_probability + p * still_running, threshold), + tf.float32) * still_running + + # Add the halting probability for this step to the halting + # probabilities for those input which haven't halted yet + halting_probability += p * still_running + + # Compute remainders for the inputs which halted at this step + remainders += new_halted * (1 - halting_probability) + + # Add the remainders to those inputs which halted at this step + halting_probability += new_halted * remainders + + # Increment n_updates for all inputs which are still running + n_updates += still_running + new_halted + + # Compute the weight to be applied to the new state and output + # 0 when the input has already halted + # p when the input hasn't halted yet + # the remainders when it halted this step + update_weights = tf.expand_dims( + p * still_running + new_halted * remainders, -1) + if hparams.act_type == "global": + update_weights = tf.expand_dims(update_weights, -1) + + # apply transformation on the state + transformed_state = state + for i in range(hparams.num_inrecurrence_layers): + with tf.variable_scope("rec_layer_%d" % i): + transformed_state = ffn_unit(attention_unit(transformed_state)) + + # update running part in the weighted state and keep the rest + new_state = ((transformed_state * update_weights) + + (previous_state * (1 - update_weights))) + + if hparams.act_type == "accumulated": + # Add in the weighted state + new_state = (transformed_state * update_weights) + previous_state + + # remind TensorFlow of everything's shape + transformed_state.set_shape(state_shape_static) + for x in [halting_probability, remainders, n_updates]: + x.set_shape(state_shape_static[state_slice]) + new_state.set_shape(state_shape_static) + step += 1 + return (transformed_state, step, halting_probability, remainders, n_updates, + new_state) + + # While loop stops when this predicate is FALSE. + # Ie all (probability < 1-eps AND counter < N) are false. + def should_continue(u0, u1, halting_probability, u2, n_updates, u3): + del u0, u1, u2, u3 + return tf.reduce_any( + tf.logical_and( + tf.less(halting_probability, threshold), + tf.less(n_updates, act_max_steps))) + + # Do while loop iterations until predicate above is false. + (_, _, _, remainder, n_updates, new_state) = tf.while_loop( + should_continue, ut_function, + (state, step, halting_probability, remainders, n_updates, previous_state), + maximum_iterations=act_max_steps + 1) + + ponder_times = n_updates + remainders = remainder + + contrib.summary().scalar("ponder_times", tf.reduce_mean(ponder_times)) + + return new_state, (ponder_times, remainders) + + +def _ffn_layer_multi_inputs(inputs_list, + hparams, + output_size=None, + ffn_layer_type="dense", + name="ffn", + kernel_initializer=None, + bias_initializer=None, + activation=None, + pad_remover=None, + preprocess=False, + postprocess=False): + """Implements a Feed-forward layer with multiple inputs, pad-removing, etc. + + Args: + inputs_list: list of input tensors + hparams: hyper-parameters + output_size: dimentionality of the output + ffn_layer_type: dense / dense_dropconnect/ dense_relu_dense + name: name + kernel_initializer: kernel initializer + bias_initializer: bias initializer + activation: activation function + pad_remover: pad remover + preprocess: if preprocess the input --> default: layer-norm + postprocess: if postprocess the output --> default: drop-out and residual + + Returns: + a tensor + Raises: + ValueError: Unknown ffn_layer type. + + """ + + # need at least one inputs + num_inputs = len(inputs_list) + assert num_inputs > 0 + + if preprocess: + # In case of having more than one input to the ffn, + # we just apply layer norm on them independently as preprocessing + for i, inputs in enumerate(inputs_list): + inputs_list[i] = common_layers.layer_preprocess(inputs_list[i], hparams) + + # for the residual connection + if postprocess and num_inputs == 1: + original_inputs = inputs_list[0] + + # the output size is the hidden size of the main inputs + main_input = inputs_list[0] + original_shape = common_layers.shape_list(main_input) + assert hparams.hidden_size == common_layers.shape_list(main_input)[-1] + + # all the inputs are in the same shape with main inputs + for inputs in inputs_list: + main_input.get_shape().assert_is_compatible_with(inputs.get_shape()) + + def remove_pads(x): + original_shape = common_layers.shape_list(x) + # Collapse `x` across examples, and remove padding positions. + x = tf.reshape(x, tf.concat([[-1], original_shape[2:]], axis=0)) + x = tf.expand_dims(pad_remover.remove(x), axis=0) + return x + + if pad_remover: + for i, inputs in enumerate(inputs_list): + inputs_list[i] = remove_pads(inputs) + + ffn_inputs = inputs_list[0] + if len(inputs_list) != 1: + ffn_inputs = tf.concat(inputs_list, axis=-1) + + if ffn_layer_type == "dense": + output = common_layers.dense( + ffn_inputs, + hparams.hidden_size if output_size is None else output_size, + name=name, + activation=activation, + use_bias=True, + kernel_initializer=kernel_initializer, + bias_initializer=bias_initializer) + + elif ffn_layer_type == "dense_dropconnect": + output = common_layers.dense_dropconnect( + ffn_inputs, + hparams.hidden_size if output_size is None else output_size, + name=name, + dropconnect_dropout=hparams.dropconnect_dropout, + output_activation=activation) + postprocess = False # no dropout on the output unit + + elif ffn_layer_type == "dense_relu_dense": + output = common_layers.dense_relu_dense( + ffn_inputs, + hparams.filter_size, + hparams.hidden_size if output_size is None else output_size, + name=name, + dropout=hparams.relu_dropout, + output_activation=activation, + ) + + else: + raise ValueError("Unknown ffn_layer type: %s" % ffn_layer_type) + + if pad_remover: + # Restore `output` to the original shape of `x`, including padding. + output = tf.reshape( + pad_remover.restore(tf.squeeze(output, axis=0)), original_shape) + + if postprocess: + if num_inputs == 1: + output = common_layers.layer_postprocess(original_inputs, output, hparams) + else: # only dropout (no residual) + hp = copy.copy(hparams) + hp.layer_postprocess_sequence = hp.layer_postprocess_sequence.replace( + "a", "") + output = common_layers.layer_postprocess(None, output, hp) + + return output + + +def fill_memory_slot(memory, value, index): + """Fills the memory slot at a particular index with the given value. + + Args: + memory: a 4-d tensor [memory_size, batch, length, channel] containing + the state of all steps + value: a 3-d tensor [batch, length, channel] as the sate + index: integer in [0, memory_size) + + Returns: + filled memory + + """ + mask = tf.to_float( + tf.one_hot(index, + tf.shape(memory)[0])[:, None, None, None]) + fill_memory = (1 - mask) * memory + mask * value[None, ...] + return fill_memory + + +def add_depth_embedding(x): + """Add n-dimensional embedding as the depth embedding (timing signal). + + Adds embeddings to represent the position of the step in the recurrent + tower. + + Args: + x: a tensor with shape [max_step, batch, length, depth] + + Returns: + a Tensor the same shape as x. + """ + x_shape = common_layers.shape_list(x) + depth = x_shape[-1] + num_steps = x_shape[0] + shape = [num_steps, 1, 1, depth] + depth_embedding = ( + tf.get_variable( + "depth_embedding", + shape, + initializer=tf.random_normal_initializer(0, depth**-0.5)) * (depth** + 0.5)) + + x += depth_embedding + return x + + +def step_preprocess(x, step, hparams): + """Preprocess the input at the beginning of each step. + + Args: + x: input tensor + step: step + hparams: model hyper-parameters + + Returns: + preprocessed input. + + """ + original_channel_size = common_layers.shape_list(x)[-1] + + if hparams.add_position_timing_signal: + x = add_position_timing_signal(x, step, hparams) + + if hparams.add_step_timing_signal: + x = add_step_timing_signal(x, step, hparams) + + if ((hparams.add_position_timing_signal or hparams.add_position_timing_signal) + and hparams.add_or_concat_timing_signal == "concat"): + # linear projection to the original dimension of x + x = common_layers.dense( + x, original_channel_size, activation=None, use_bias=False) + + if hparams.add_sru: + x = common_layers.sru(x) + + return x + + +def add_position_timing_signal(x, step, hparams): + """Add n-dimensional embedding as the position (horizontal) timing signal. + + Args: + x: a tensor with shape [batch, length, depth] + step: step + hparams: model hyper parameters + + Returns: + a Tensor with the same shape as x. + + """ + + if not hparams.position_start_index: + index = 0 + + elif hparams.position_start_index == "random": + # Shift all positions randomly + # TODO(dehghani): What would be reasonable for max number of shift? + index = tf.random_uniform( + [], maxval=common_layers.shape_list(x)[1], dtype=tf.int32) + + elif hparams.position_start_index == "step": + # Shift positions based on the step + if hparams.recurrence_type == "act": + num_steps = hparams.act_max_steps + else: + num_steps = hparams.num_rec_steps + index = tf.cast( + common_layers.shape_list(x)[1] * step / num_steps, dtype=tf.int32) + + # No need for the timing signal in the encoder/decoder input preparation + assert hparams.pos is None + + length = common_layers.shape_list(x)[1] + channels = common_layers.shape_list(x)[2] + signal = common_attention.get_timing_signal_1d( + length, channels, start_index=index) + + if hparams.add_or_concat_timing_signal == "add": + x_with_timing = x + common_layers.cast_like(signal, x) + + elif hparams.add_or_concat_timing_signal == "concat": + batch_size = common_layers.shape_list(x)[0] + signal_tiled = tf.tile(signal, [batch_size, 1, 1]) + x_with_timing = tf.concat((x, signal_tiled), axis=-1) + + return x_with_timing + + +def add_step_timing_signal(x, step, hparams): + """Add n-dimensional embedding as the step (vertical) timing signal. + + Args: + x: a tensor with shape [batch, length, depth] + step: step + hparams: model hyper parameters + + Returns: + a Tensor with the same shape as x. + + """ + if hparams.recurrence_type == "act": + num_steps = hparams.act_max_steps + else: + num_steps = hparams.num_rec_steps + channels = common_layers.shape_list(x)[-1] + + if hparams.step_timing_signal_type == "learned": + signal = common_attention.get_layer_timing_signal_learned_1d( + channels, step, num_steps) + + elif hparams.step_timing_signal_type == "sinusoid": + signal = common_attention.get_layer_timing_signal_sinusoid_1d( + channels, step, num_steps) + + if hparams.add_or_concat_timing_signal == "add": + x_with_timing = x + common_layers.cast_like(signal, x) + + elif hparams.add_or_concat_timing_signal == "concat": + batch_size = common_layers.shape_list(x)[0] + length = common_layers.shape_list(x)[1] + signal_tiled = tf.tile(signal, [batch_size, length, 1]) + x_with_timing = tf.concat((x, signal_tiled), axis=-1) + + return x_with_timing diff --git a/tensor2tensor/models/research/vqa_attention.py b/tensor2tensor/models/research/vqa_attention.py new file mode 100644 index 000000000..7a006e1e1 --- /dev/null +++ b/tensor2tensor/models/research/vqa_attention.py @@ -0,0 +1,591 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Attention models for VQA.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import vqa_layers +from tensor2tensor.utils import registry +# from tensor2tensor.utils import restore_hook +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator +from tensorflow.contrib import rnn as contrib_rnn + +# pylint: disable=unused-import +from tensorflow.contrib.layers.python.layers import utils +from tensorflow.contrib.slim.python.slim.nets.resnet_v1 import resnet_v1_152 +from tensorflow.contrib.slim.python.slim.nets.resnet_v2 import resnet_v2_152 + + +@registry.register_model +class VqaAttentionBaseline(t2t_model.T2TModel): + """Attention baseline model for VQA.""" + + # @staticmethod + # def train_hooks(): + # restore_resnet_hook = restore_hook.RestoreHook( + # # TODO(zichaoy): hard code the path given static function. + # checkpoint_path="/home/zichaoy/resnet_v1_152.ckpt", + # new_model_scope="vqa_attention_baseline/body/", + # old_model_scope="resnet_v1_152/", + # ) + # return [restore_resnet_hook] + + def body(self, features): + hp = self.hparams + model_fn = resnet_v1_152 + if hp.image_model_fn != "resnet_v1_152": + model_fn = eval(hp.image_model_fn) # pylint: disable=eval-used + if hp.image_input_type == "image": + image_feat = vqa_layers.image_embedding( + features["inputs"], + model_fn=model_fn, + trainable=hp.train_resnet, + is_training=hp.mode == tf_estimator.ModeKeys.TRAIN) + else: + image_feat = features["inputs"] + + if hp.image_feat_size: + image_feat = common_layers.dense(image_feat, hp.image_feat_size) + + # apply layer normalization and dropout on image_feature + utils.collect_named_outputs("norms", "image_feat_before_l2", + tf.norm(image_feat, axis=-1)) + image_feat = common_layers.l2_norm(image_feat) + utils.collect_named_outputs("norms", "image_feat_after_l2", + tf.norm(image_feat, axis=-1)) + + image_feat = tf.nn.dropout(image_feat, keep_prob=1.-hp.dropout) + + query = question_encoder(features["question"], hp) + utils.collect_named_outputs("norms", "query", + tf.norm(query, axis=-1)) + + image_ave = attn(image_feat, query, hp) + utils.collect_named_outputs("norms", "image_ave", + tf.norm(image_ave, axis=-1)) + + image_question = tf.concat([image_ave, query], axis=1) + utils.collect_named_outputs("norms", "image_question", + tf.norm(image_question, axis=-1)) + + image_question = tf.nn.dropout(image_question, 1. - hp.dropout) + + output = mlp(image_question, hp) + utils.collect_named_outputs("norms", "output", + tf.norm(output, axis=-1)) + + norm_tensors = utils.convert_collection_to_dict("norms") + vqa_layers.summarize_tensors(norm_tensors, tag="norms/") + + # Expand dimension 1 and 2 + return tf.expand_dims(tf.expand_dims(output, axis=1), axis=2) + + def infer(self, + features=None, + decode_length=1, + beam_size=1, + top_beams=1, + alpha=0.0, + use_tpu=False): + """Predict.""" + del decode_length, beam_size, top_beams, alpha, use_tpu + assert features is not None + logits, _ = self(features) + assert len(logits.get_shape()) == 5 + logits = tf.squeeze(logits, [1, 2, 3]) + log_probs = common_layers.log_prob_from_logits(logits) + predictions, scores = common_layers.argmax_with_score(log_probs) + return { + "outputs": predictions, + "scores": scores, + } + + +@registry.register_model +class VqaSimpleImageSelfAttention(VqaAttentionBaseline): + """Attention baseline model for VQA.""" + + def body(self, features): + hp = self.hparams + # pylint: disable=eval-used + if hp.image_input_type == "image": + image_feat = vqa_layers.image_embedding( + features["inputs"], + model_fn=eval(hp.image_model_fn), + trainable=hp.train_resnet, + is_training=hp.mode == tf_estimator.ModeKeys.TRAIN) + else: + image_feat = features["inputs"] + + image_feat = common_layers.flatten4d3d(image_feat) + # image feature self attention + # image_feat = tf.nn.dropout( + # image_feat, keep_prob=1.-hp.layer_prepostprocess_dropout) + + # image_feat = image_feat - tf.reduce_mean( + # image_feat, axis=-1, keepdims=True) + # image_feat = tf.nn.l2_normalize(image_feat, -1) + # utils.collect_named_outputs("norms", "image_feat_after_l2", + # tf.norm(image_feat, axis=-1)) + + image_feat = tf.nn.dropout(image_feat, keep_prob=1.-hp.dropout) + + image_feat = image_encoder(image_feat, hp) + utils.collect_named_outputs("norms", "image_feat_encoded", + tf.norm(image_feat, axis=-1)) + image_feat = common_layers.l2_norm(image_feat) + utils.collect_named_outputs("norms", "image_feat_encoded_l2", + tf.norm(image_feat, axis=-1)) + + query = question_encoder(features["question"], hp) + utils.collect_named_outputs("norms", "query", + tf.norm(query, axis=-1)) + + image_ave = attn(image_feat, query, hp) + utils.collect_named_outputs("norms", "image_ave", + tf.norm(image_ave, axis=-1)) + + image_question = tf.concat([image_ave, query], axis=1) + utils.collect_named_outputs("norms", "image_question", + tf.norm(image_question, axis=-1)) + + image_question = tf.nn.dropout(image_question, 1. - hp.dropout) + + output = mlp(image_question, hp) + utils.collect_named_outputs("norms", "output", + tf.norm(output, axis=-1)) + + norm_tensors = utils.convert_collection_to_dict("norms") + vqa_layers.summarize_tensors(norm_tensors, tag="norms/") + + # Expand dimension 1 and 2 + return tf.expand_dims(tf.expand_dims(output, axis=1), axis=2) + + +def image_encoder(image_feat, + hparams, + name="image_encoder", + save_weights_to=None, + make_image_summary=True): + """A stack of self attention layers.""" + + x = image_feat + with tf.variable_scope(name): + for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): + with tf.variable_scope("layer_%d" % layer): + with tf.variable_scope("self_attention"): + y = vqa_layers.multihead_attention( + common_layers.layer_preprocess(x, hparams), + None, + None, + hparams.attention_key_channels or hparams.image_hidden_size, + hparams.attention_value_channels or hparams.image_hidden_size, + hparams.image_hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=hparams.self_attention_type, + save_weights_to=save_weights_to, + max_relative_position=None, + make_image_summary=make_image_summary, + dropout_broadcast_dims=None, + max_length=None, + vars_3d=False, + scale_otproduct=hparams.scale_dotproduct) + utils.collect_named_outputs("norms", "image_feat_self_attention", + tf.norm(y, axis=-1)) + x = common_layers.layer_postprocess(x, y, hparams) + utils.collect_named_outputs( + "norms", "image_feat_self_attention_zero_add", + tf.norm(x, axis=-1)) + with tf.variable_scope("ffn"): + y = common_layers.dense_relu_dense( + common_layers.layer_preprocess(x, hparams), + hparams.image_filter_size, + hparams.image_hidden_size, + dropout=hparams.relu_dropout, + dropout_broadcast_dims=None) + utils.collect_named_outputs("norms", "image_feat_ffn", + tf.norm(y, axis=-1)) + x = common_layers.layer_postprocess(x, y, hparams) + utils.collect_named_outputs("norms", "image_feat_ffn_zero_add", + tf.norm(x, axis=-1)) + # if normalization is done in layer_preprocess, then it should also be done + # on the output, since the output can grow very large, being the sum of + # a whole stack of unnormalized layer outputs. + return common_layers.layer_preprocess(x, hparams) + + +def _get_rnn_cell(hparams): + if hparams.rnn_type == "lstm": + rnn_cell = tf.nn.rnn_cell.BasicLSTMCell + elif hparams.rnn_type == "lstm_layernorm": + rnn_cell = contrib_rnn.LayerNormBasicLSTMCell + return tf.nn.rnn_cell.DropoutWrapper( + rnn_cell(hparams.hidden_size), + output_keep_prob=1.0-hparams.dropout) + + +def question_encoder(question, hparams, name="encoder"): + """Question encoder, run LSTM encoder and get the last output as encoding.""" + with tf.variable_scope(name, "encoder", values=[question]): + question = common_layers.flatten4d3d(question) + padding = common_attention.embedding_to_padding(question) + length = common_attention.padding_to_length(padding) + + max_question_length = hparams.max_question_length + question = question[:, :max_question_length, :] + actual_question_length = common_layers.shape_list(question)[1] + length = tf.minimum(length, max_question_length) + padding = [[0, 0], + [0, max_question_length-actual_question_length], + [0, 0]] + question = tf.pad(question, padding) + question_shape = question.get_shape().as_list() + question_shape[1] = max_question_length + question.set_shape(question_shape) + + # apply tanh dropout on question embedding + question = tf.tanh(question) + question = tf.nn.dropout(question, keep_prob=1.-hparams.dropout) + + question = [question[:, i, :] for i in range(max_question_length)] + + # rnn_layers = [_get_rnn_cell(hparams) + # for _ in range(hparams.num_rnn_layers)] + # rnn_multi_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers) + rnn_cell = _get_rnn_cell(hparams) + # outputs, _ = tf.nn.dynamic_rnn( + # rnn_cell, question, length, dtype=tf.float32) + _, state = tf.nn.static_rnn(rnn_cell, question, sequence_length=length, + dtype=tf.float32) + # outputs = [tf.expand_dims(output, axis=1) for output in outputs] + # outputs = tf.concat(outputs, axis=1) + + # utils.collect_named_outputs("vqa_attention_debug", "question_output", + # outputs) + # utils.collect_named_outputs("vqa_attention_debug", "question_state", + # state.h) + + # batch_size = common_layers.shape_list(outputs)[0] + # row_indices = tf.range(batch_size) + # # length - 1 as index + # indices = tf.transpose([row_indices, tf.maximum(length-1, 0)]) + # last_output = tf.gather_nd(outputs, indices) + + # utils.collect_named_outputs("vqa_attention_debug", + # "question_final_output", last_output) + + return state.h + + +def attn(image_feat, query, hparams, name="attn"): + """Attention on image feature with question as query.""" + with tf.variable_scope(name, "attn", values=[image_feat, query]): + attn_dim = hparams.attn_dim + num_glimps = hparams.num_glimps + num_channels = common_layers.shape_list(image_feat)[-1] + if len(common_layers.shape_list(image_feat)) == 4: + image_feat = common_layers.flatten4d3d(image_feat) + query = tf.expand_dims(query, 1) + image_proj = common_attention.compute_attention_component( + image_feat, attn_dim, name="image_proj") + query_proj = common_attention.compute_attention_component( + query, attn_dim, name="query_proj") + h = tf.nn.relu(image_proj + query_proj) + h_proj = common_attention.compute_attention_component( + h, num_glimps, name="h_proj") + p = tf.nn.softmax(h_proj, axis=1) + image_ave = tf.matmul(image_feat, p, transpose_a=True) + image_ave = tf.reshape(image_ave, [-1, num_channels*num_glimps]) + + return image_ave + + +def mlp(feature, hparams, name="mlp"): + """Multi layer perceptron with dropout and relu activation.""" + with tf.variable_scope(name, "mlp", values=[feature]): + num_mlp_layers = hparams.num_mlp_layers + mlp_dim = hparams.mlp_dim + for _ in range(num_mlp_layers): + feature = common_layers.dense(feature, mlp_dim, activation=tf.nn.relu) + feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout) + return feature + + +@registry.register_hparams +def vqa_attention_base(): + """VQA attention baseline hparams.""" + hparams = common_hparams.basic_params1() + hparams.batch_size = 128 + hparams.use_fixed_batch_size = True, + hparams.optimizer = "adam" + hparams.optimizer_adam_beta1 = 0.9 + hparams.optimizer_adam_beta2 = 0.999 + hparams.optimizer_adam_epsilon = 1e-8 + hparams.weight_decay = 0. + hparams.clip_grad_norm = 0. + hparams.initializer = "xavier" + hparams.learning_rate = 0.5 + hparams.learning_rate_schedule = "legacy" + hparams.learning_rate_warmup_steps = 0 + hparams.learning_rate_decay_scheme = "exp" + hparams.learning_rate_decay_rate = 0.5 + hparams.learning_rate_decay_steps = 50000 + hparams.dropout = 0.5 + hparams.summarize_grads = True + hparams.summarize_vars = True + + # not used hparams + hparams.label_smoothing = 0. + hparams.multiply_embedding_mode = "" + + # add new hparams + # preprocess + hparams.add_hparam("resize_side", 512) + hparams.add_hparam("height", 448) + hparams.add_hparam("width", 448) + hparams.add_hparam("distort", True) + + hparams.add_hparam("train_resnet", False) + hparams.add_hparam("rnn_type", "lstm") + hparams.add_hparam("num_rnn_layers", 1) + hparams.add_hparam("max_question_length", 15) + # lstm hidden size + hparams.hidden_size = 512 + + hparams.add_hparam("attn_dim", 512) + hparams.add_hparam("num_glimps", 2) + + hparams.add_hparam("num_mlp_layers", 1) + hparams.add_hparam("mlp_dim", 1024) + + hparams.add_hparam("image_input_type", "image") + hparams.add_hparam("image_model_fn", "resnet_v1_152") + hparams.add_hparam("image_feat_size", 0) + + # self attention parts + hparams.norm_type = "layer" + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + hparams.layer_prepostprocess_dropout = 0.3 + hparams.attention_dropout = 0.1 + hparams.relu_dropout = 0.1 + hparams.image_hidden_size = 2048 + hparams.add_hparam("num_encoder_layers", 1) + # Attention-related flags. + hparams.add_hparam("num_heads", 8) + hparams.add_hparam("attention_key_channels", 0) + hparams.add_hparam("attention_value_channels", 0) + hparams.add_hparam("image_filter_size", 1024) + hparams.add_hparam("self_attention_type", "dot_product") + hparams.add_hparam("scale_dotproduct", True) + + return hparams + + +@registry.register_hparams +def vqa_attention_feature_base(): + hparams = vqa_attention_base() + hparams.image_input_type = "feature" + return hparams + + +@registry.register_hparams +def vqa_attention_feature_lstmlayernorm(): + hparams = vqa_attention_feature_base() + hparams.rnn_type = "lstm_layernorm" + return hparams + + +@registry.register_hparams +def vqa_attention_feature_initializer(): + hparams = vqa_attention_feature_base() + hparams.initializer = "uniform_unit_scaling" + hparams.initializer_gain = 1.0 + return hparams + + +@registry.register_hparams +def vqa_attention_feature_batch512(): + hparams = vqa_attention_feature_base() + hparams.batch_size = 512 + return hparams + + +@registry.register_hparams +def vqa_attention_feature_hidden1024(): + hparams = vqa_attention_feature_base() + hparams.hidden_size = 1024 + return hparams + + +@registry.register_hparams +def vqa_attention_feature_imagefeat512(): + hparams = vqa_attention_feature_base() + hparams.image_feat_size = 512 + return hparams + + +@registry.register_hparams +def vqa_attention_feature_imagefeat1024(): + hparams = vqa_attention_feature_base() + hparams.image_feat_size = 1024 + return hparams + + +@registry.register_hparams +def vqa_attention_feature_batch1024_lstmlayernorm(): + hparams = vqa_attention_feature_lstmlayernorm() + hparams.batch_size = 1024 + return hparams + + +@registry.register_hparams +def vqa_attention_numglimps1(): + hparams = vqa_attention_base() + hparams.num_glimps = 1 + return hparams + + +@registry.register_hparams +def vqa_attention_feature_numglimps1(): + hparams = vqa_attention_feature_base() + hparams.num_glimps = 1 + return hparams + + +@registry.register_hparams +def vqa_attention_feature_batch1024_numglimps1(): + hparams = vqa_attention_feature_numglimps1() + hparams.batch_size = 1024 + return hparams + + +@registry.register_hparams +def vqa_attention_feature_batch1024(): + hparams = vqa_attention_feature_base() + hparams.batch_size = 1024 + return hparams + + +@registry.register_hparams +def vqa_attention_feature_batch1024_dnz(): + hparams = vqa_attention_feature_batch1024() + hparams.layer_preprocess_sequence = "" + hparams.layer_postprocess_sequence = "dnz" + return hparams + + +@registry.register_hparams +def vqa_attention_feature_batch1024_dnz_l2(): + hparams = vqa_attention_feature_batch1024_dnz() + hparams.norm_type = "l2" + return hparams + + +@registry.register_hparams +def vqa_attention_feature_dnz(): + hparams = vqa_attention_feature_base() + hparams.layer_preprocess_sequence = "" + hparams.layer_postprocess_sequence = "dnz" + return hparams + + +@registry.register_hparams +def vqa_attention_feature_dna(): + hparams = vqa_attention_feature_base() + hparams.layer_preprocess_sequence = "" + hparams.layer_postprocess_sequence = "dna" + return hparams + + +@registry.register_hparams +def vqa_attention_feature_dnz_noscaledp(): + hparams = vqa_attention_feature_dnz() + hparams.scale_dotproduct = False + return hparams + + +@registry.register_hparams +def vqa_attention_feature_dnz_l2(): + hparams = vqa_attention_feature_dnz() + hparams.norm_type = "l2" + return hparams + + +@registry.register_hparams +def vqa_attention_feature_batch1024_dnz_noscaledp(): + hparams = vqa_attention_feature_batch1024_dnz() + hparams.scale_dotproduct = False + return hparams + + +@registry.register_hparams +def vqa_attention_feature_batch1024_drop01(): + hparams = vqa_attention_feature_batch1024() + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def vqa_attention_feature_batch1024_drop01_dna(): + hparams = vqa_attention_feature_batch1024_drop01() + hparams.layer_preprocess_sequence = "" + hparams.layer_postprocess_sequence = "dna" + return hparams + + +@registry.register_hparams +def vqa_attention_drop01_dna(): + hparams = vqa_attention_feature_batch1024_drop01_dna() + hparams.batch_size = 128 + hparams.image_input_type = "image" + return hparams + + +@registry.register_hparams +def vqa_attention_feature_batch1024_drop01_dna_concat(): + hparams = vqa_attention_feature_batch1024_drop01() + hparams.layer_preprocess_sequence = "" + hparams.layer_postprocess_sequence = "dna" + hparams.num_glimps = 1 + return hparams + + +@registry.register_hparams +def vqa_attention_feature_nonormalization(): + hparams = vqa_attention_feature_base() + hparams.layer_preprocess_sequence = "" + return hparams + + +@registry.register_ranged_hparams +def vqa_attention_base_range(rhp): + """Small range of hyperparameters.""" + # After starting from base, set intervals for some parameters. + rhp.set_float("learning_rate", 0.1, 1.0, scale=rhp.LOG_SCALE) + rhp.set_float("clip_grad_norm", 0.1, 10, scale=rhp.LOG_SCALE) + rhp.set_discrete("batch_size", [128, 256, 512, 1024]) + rhp.set_float("weight_decay", 0.0, 1e-4) + rhp.set_categorical("rnn_type", ["lstm", "lstm_layernorm"]) diff --git a/tensor2tensor/models/research/vqa_attention_test.py b/tensor2tensor/models/research/vqa_attention_test.py new file mode 100644 index 000000000..87646fdd3 --- /dev/null +++ b/tensor2tensor/models/research/vqa_attention_test.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Vqa_attention_baseline tests.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.layers import modalities +from tensor2tensor.models.research import vqa_attention + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class VqaAttentionBaselineTest(tf.test.TestCase): + + def testVqaAttentionBaseline(self): + + batch_size = 3 + image_size = 448 + vocab_size = 100 + num_classes = 10 + question_length = 5 + answer_length = 10 + x = 2 * np.random.rand(batch_size, image_size, image_size, 3) - 1 + q = np.random.randint( + 1, high=vocab_size, size=(batch_size, question_length, 1, 1)) + a = np.random.randint( + num_classes + 1, size=(batch_size, answer_length, 1, 1)) + hparams = vqa_attention.vqa_attention_base() + p_hparams = problem_hparams.test_problem_hparams(vocab_size, + num_classes + 1, + hparams) + p_hparams.modality["inputs"] = modalities.ModalityType.IMAGE + p_hparams.modality["targets"] = modalities.ModalityType.MULTI_LABEL + p_hparams.modality["question"] = modalities.ModalityType.SYMBOL + p_hparams.vocab_size["question"] = vocab_size + with self.test_session() as session: + features = { + "inputs": tf.constant(x, dtype=tf.float32), + "question": tf.constant(q, dtype=tf.int32), + "targets": tf.constant(a, dtype=tf.int32), + } + model = vqa_attention.VqaAttentionBaseline( + hparams, tf_estimator.ModeKeys.TRAIN, p_hparams) + logits, losses = model(features) + session.run(tf.global_variables_initializer()) + logits_, losses_ = session.run([logits, losses]) + + self.assertEqual(logits_.shape, (batch_size, 1, 1, 1, num_classes + 1)) + self.assertEqual(losses_["training"].shape, ()) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/research/vqa_recurrent_self_attention.py b/tensor2tensor/models/research/vqa_recurrent_self_attention.py new file mode 100644 index 000000000..175e76d15 --- /dev/null +++ b/tensor2tensor/models/research/vqa_recurrent_self_attention.py @@ -0,0 +1,318 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Recurrent self attention models for VQA.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import vqa_layers +from tensor2tensor.models.research import universal_transformer +from tensor2tensor.models.research import universal_transformer_util +from tensor2tensor.models.research import vqa_attention +from tensor2tensor.utils import registry +# from tensor2tensor.utils import restore_hook + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +from tensorflow.contrib.layers.python.layers import utils + + +@registry.register_model +class VqaRecurrentSelfAttention(vqa_attention.VqaAttentionBaseline): + """Recurrent Self attention both on image and question.""" + + # @staticmethod + # def train_hooks(): + # restore_resnet_hook = restore_hook.RestoreHook( + # # TODO(zichaoy): hard code the path given static function. + # checkpoint_path="/home/zichaoy/resnet_v1_152.ckpt", + # new_model_scope="vqa_recurrent_self_attention/body/", + # old_model_scope="resnet_v1_152/", + # ) + # return [restore_resnet_hook] + + def body(self, features): + hp = self.hparams + # pylint: disable=eval-used + if hp.image_input_type == "image": + image_feat = vqa_layers.image_embedding( + features["inputs"], + model_fn=eval(hp.image_model_fn), + trainable=hp.train_resnet, + is_training=hp.mode == tf_estimator.ModeKeys.TRAIN) + else: + image_feat = features["inputs"] + + image_feat = common_layers.flatten4d3d(image_feat) + image_feat = common_layers.dense(image_feat, hp.hidden_size) + utils.collect_named_outputs("norms", "image_feat_after_proj", + tf.norm(image_feat, axis=-1)) + + question = common_layers.flatten4d3d(features["question"]) + utils.collect_named_outputs("norms", "question_embedding", + tf.norm(question, axis=-1)) + (encoder_input, encoder_self_attention_bias, + encoder_decoder_attention_bias) = prepare_image_question_encoder( + image_feat, question, hp) + + encoder_input = tf.nn.dropout( + encoder_input, keep_prob=1.-hp.layer_prepostprocess_dropout) + + encoder_output, _ = recurrent_transformer_decoder( + encoder_input, None, encoder_self_attention_bias, None, + hp, name="encoder") + utils.collect_named_outputs( + "norms", "encoder_output", tf.norm(encoder_output, axis=-1)) + + # scale query by sqrt(hidden_size) + query = tf.get_variable("query", [hp.hidden_size]) * hp.hidden_size **0.5 + query = tf.expand_dims(tf.expand_dims(query, axis=0), axis=0) + batch_size = common_layers.shape_list(encoder_input)[0] + query = tf.tile(query, [batch_size, 1, 1]) + query = tf.nn.dropout( + query, keep_prob=1.-hp.layer_prepostprocess_dropout) + + decoder_output, _ = recurrent_transformer_decoder( + query, encoder_output, None, encoder_decoder_attention_bias, + hp, name="decoder") + utils.collect_named_outputs("norms", "decoder_output", + tf.norm(decoder_output, axis=-1)) + + norm_tensors = utils.convert_collection_to_dict("norms") + vqa_layers.summarize_tensors(norm_tensors, tag="norms/") + + # Expand dimension 1 and 2 + return tf.expand_dims(decoder_output, axis=1) + + +def prepare_image_question_encoder(image_feat, question, hparams): + """Prepare encoder. + + Args: + image_feat: a Tensor. + question: a Tensor. + hparams: run hyperparameters + + Returns: + encoder_input: a Tensor, bottom of encoder stack + encoder_self_attention_bias: a bias tensor for use in encoder self-attention + """ + + encoder_input = tf.concat([image_feat, question], axis=1) + encoder_padding = common_attention.embedding_to_padding(encoder_input) + ignore_padding = common_attention.attention_bias_ignore_padding( + encoder_padding) + encoder_self_attention_bias = ignore_padding + encoder_decoder_attention_bias = ignore_padding + # Usual case - not a packed dataset. + if hparams.pos == "timing": + question = common_attention.add_timing_signal_1d(question) + elif hparams.pos == "emb": + question = common_attention.add_positional_embedding( + question, hparams.max_length, "inputs_positional_embedding", + None) + encoder_input = tf.concat([image_feat, question], axis=1) + + return (encoder_input, encoder_self_attention_bias, + encoder_decoder_attention_bias) + + +def recurrent_transformer_decoder( + decoder_input, + encoder_output, + decoder_self_attention_bias, + encoder_decoder_attention_bias, + hparams, + name="decoder", + nonpadding=None, + save_weights_to=None, + make_image_summary=True): + """Recurrent decoder function.""" + x = decoder_input + attention_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(hparams, "attention_dropout_broadcast_dims", ""))) + with tf.variable_scope(name): + ffn_unit = functools.partial( + # use encoder ffn, since decoder ffn use left padding + universal_transformer_util.transformer_encoder_ffn_unit, + hparams=hparams, + nonpadding_mask=nonpadding) + + attention_unit = functools.partial( + universal_transformer_util.transformer_decoder_attention_unit, + hparams=hparams, + encoder_output=encoder_output, + decoder_self_attention_bias=decoder_self_attention_bias, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + attention_dropout_broadcast_dims=attention_dropout_broadcast_dims, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary) + + x, extra_output = universal_transformer_util.universal_transformer_layer( + x, hparams, ffn_unit, attention_unit) + + return common_layers.layer_preprocess(x, hparams), extra_output + + +@registry.register_hparams +def vqa_recurrent_self_attention_base(): + """VQA attention baseline hparams.""" + hparams = universal_transformer.universal_transformer_base() + hparams.batch_size = 1024 + hparams.use_fixed_batch_size = True + hparams.weight_decay = 0. + hparams.clip_grad_norm = 0. + # use default initializer + # hparams.initializer = "xavier" + hparams.learning_rate_schedule = ( + "constant*linear_warmup*rsqrt_normalized_decay") + hparams.learning_rate_warmup_steps = 8000 + hparams.learning_rate_constant = 7e-4 + hparams.learning_rate_decay_rate = 0.5 + hparams.learning_rate_decay_steps = 50000 + # hparams.dropout = 0.5 + hparams.summarize_grads = True + hparams.summarize_vars = True + + # not used hparams + hparams.label_smoothing = 0.1 + hparams.multiply_embedding_mode = "sqrt_depth" + + # add new hparams + # use raw image as input + hparams.add_hparam("image_input_type", "feature") + hparams.add_hparam("image_model_fn", "resnet_v1_152") + hparams.add_hparam("resize_side", 512) + hparams.add_hparam("height", 448) + hparams.add_hparam("width", 448) + hparams.add_hparam("distort", True) + hparams.add_hparam("train_resnet", False) + + # question hidden size + # hparams.hidden_size = 512 + # hparams.filter_size = 1024 + # hparams.num_hidden_layers = 4 + + # self attention parts + # hparams.norm_type = "layer" + # hparams.layer_preprocess_sequence = "n" + # hparams.layer_postprocess_sequence = "da" + # hparams.layer_prepostprocess_dropout = 0.1 + # hparams.attention_dropout = 0.1 + # hparams.relu_dropout = 0.1 + # hparams.add_hparam("pos", "timing") + # hparams.add_hparam("num_encoder_layers", 0) + # hparams.add_hparam("num_decoder_layers", 0) + # hparams.add_hparam("num_heads", 8) + # hparams.add_hparam("attention_key_channels", 0) + # hparams.add_hparam("attention_value_channels", 0) + # hparams.add_hparam("self_attention_type", "dot_product") + + # iterative part + hparams.transformer_ffn_type = "fc" + + return hparams + + +@registry.register_hparams +def vqa_recurrent_self_attention_small(): + hparams = vqa_recurrent_self_attention_base() + hparams.learning_rate_constant = 1e-3 + hparams.hidden_size = 512 + hparams.filter_size = 2048 + hparams.num_heads = 8 + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def vqa_recurrent_self_attention_big(): + hparams = vqa_recurrent_self_attention_base() + hparams.learning_rate_constant = 5e-4 + hparams.hidden_size = 2048 + hparams.filter_size = 8192 + return hparams + + +@registry.register_hparams +def vqa_recurrent_self_attention_big_l4(): + hparams = vqa_recurrent_self_attention_big() + hparams.num_rec_steps = 4 + return hparams + + +@registry.register_hparams +def vqa_recurrent_self_attention_highway(): + hparams = vqa_recurrent_self_attention_base() + hparams.recurrence_type = "highway" + return hparams + + +@registry.register_hparams +def vqa_recurrent_self_attention_gru(): + hparams = vqa_recurrent_self_attention_base() + hparams.recurrence_type = "gru" + return hparams + + +@registry.register_hparams +def vqa_recurrent_self_attention_l8(): + hparams = vqa_recurrent_self_attention_base() + hparams.num_rec_steps = 8 + return hparams + + +@registry.register_hparams +def vqa_recurrent_self_attention_mix_before_ut(): + hparams = vqa_recurrent_self_attention_base() + hparams.mix_with_transformer = "before_ut" + return hparams + + +@registry.register_hparams +def vqa_recurrent_self_attention_l4(): + hparams = vqa_recurrent_self_attention_base() + hparams.num_rec_steps = 4 + return hparams + + +@registry.register_hparams +def vqa_recurrent_self_attention_ls2(): + hparams = vqa_recurrent_self_attention_base() + hparams.label_smoothing = 0.2 + return hparams + + +@registry.register_hparams +def vqa_recurrent_self_attention_drop1(): + hparams = vqa_recurrent_self_attention_base() + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def vqa_recurrent_self_attention_drop3(): + hparams = vqa_recurrent_self_attention_base() + hparams.relu_dropout = 0.3 + hparams.attention_dropout = 0.3 + return hparams diff --git a/tensor2tensor/models/research/vqa_self_attention.py b/tensor2tensor/models/research/vqa_self_attention.py new file mode 100644 index 000000000..eb1b86948 --- /dev/null +++ b/tensor2tensor/models/research/vqa_self_attention.py @@ -0,0 +1,827 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Self attention models for VQA.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range # pylint: disable=redefined-builtin + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import vqa_layers +from tensor2tensor.models.research import vqa_attention +from tensor2tensor.utils import registry +# from tensor2tensor.utils import restore_hook + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +from tensorflow.contrib.layers.python.layers import utils + + +@registry.register_model +class VqaSelfAttention(vqa_attention.VqaAttentionBaseline): + """Self attention both on image and question.""" + + # @staticmethod + # def train_hooks(): + # restore_resnet_hook = restore_hook.RestoreHook( + # # TODO(zichaoy): hard code the path given static function. + # checkpoint_path="/home/zichaoy/resnet_v1_152.ckpt", + # new_model_scope="vqa_self_attention/body/", + # old_model_scope="resnet_v1_152/", + # ) + # return [restore_resnet_hook] + + def body(self, features): + hp = self.hparams + # pylint: disable=eval-used + if hp.image_input_type == "image": + image_feat = vqa_layers.image_embedding( + features["inputs"], + model_fn=eval(hp.image_model_fn), + trainable=hp.train_resnet, + is_training=hp.mode == tf_estimator.ModeKeys.TRAIN) + else: + image_feat = features["inputs"] + + image_feat = common_layers.flatten4d3d(image_feat) + image_hidden_size = hp.image_hidden_size or hp.hidden_size + if hp.image_feat_preprocess_proj: + image_feat = common_layers.dense(image_feat, image_hidden_size) + utils.collect_named_outputs("norms", "image_feat_after_proj", + tf.norm(image_feat, axis=-1)) + else: + assert image_hidden_size == 2048 + + image_feat = tf.nn.dropout( + image_feat, keep_prob=1.-hp.layer_prepostprocess_dropout) + + if hp.image_feat_encode: + image_feat = image_encoder(image_feat, hp) + utils.collect_named_outputs("norms", "image_feat_encoded", + tf.norm(image_feat, axis=-1)) + else: + image_feat = common_layers.layer_norm(image_feat) + utils.collect_named_outputs("norms", "image_feat_after_layer", + tf.norm(image_feat, axis=-1)) + + question = common_layers.flatten4d3d(features["question"]) + utils.collect_named_outputs("norms", "question_embedding", + tf.norm(question, axis=-1)) + question, question_self_attention_bias = prepare_question_encoder( + question, hp) + question = tf.nn.dropout( + question, keep_prob=1.-hp.layer_prepostprocess_dropout) + query = question_encoder(question, question_self_attention_bias, hp) + utils.collect_named_outputs( + "norms", "query_encode", tf.norm(query, axis=-1)) + query = (query + tf.expand_dims( + tf.squeeze(question_self_attention_bias, [1, 2]), axis=2)) + query = tf.reduce_max(query, axis=1) + utils.collect_named_outputs( + "norms", "query_maxpool", tf.norm(query, axis=-1)) + + # query = common_layers.l2_norm(query) + # utils.collect_named_outputs("norms", "query_after_l2", + # tf.norm(query, axis=-1)) + + image_ave = attn(image_feat, query, hp) + utils.collect_named_outputs("norms", "image_ave", + tf.norm(image_ave, axis=-1)) + + if hp.multimodal_combine == "concat": + image_question = tf.concat([image_ave, query], axis=1) + elif hp.multimodal_combine == "sum": + image_question = image_ave + query + elif hp.multimodal_combine == "product": + image_question = image_ave * query + + utils.collect_named_outputs("norms", "image_question", + tf.norm(image_question, axis=-1)) + + image_question = tf.nn.dropout(image_question, 1. - hp.dropout) + + output = mlp(image_question, hp) + utils.collect_named_outputs("norms", "output", + tf.norm(output, axis=-1)) + + norm_tensors = utils.convert_collection_to_dict("norms") + vqa_layers.summarize_tensors(norm_tensors, tag="norms/") + + # Expand dimension 1 and 2 + return tf.expand_dims(tf.expand_dims(output, axis=1), axis=2) + + +@registry.register_model +class VqaCombinedSelfAttention(VqaSelfAttention): + """Combined Self attention both on image and question.""" + + # @staticmethod + # def train_hooks(): + # restore_resnet_hook = restore_hook.RestoreHook( + # # TODO(zichaoy): hard code the path given static function. + # checkpoint_path="/home/zichaoy/resnet_v1_152.ckpt", + # new_model_scope="vqa_combined_self_attention/body/", + # old_model_scope="resnet_v1_152/", + # ) + # return [restore_resnet_hook] + + def body(self, features): + hp = self.hparams + # pylint: disable=eval-used + if hp.image_input_type == "image": + image_feat = vqa_layers.image_embedding( + features["inputs"], + model_fn=eval(hp.image_model_fn), + trainable=hp.train_resnet, + is_training=hp.mode == tf_estimator.ModeKeys.TRAIN) + else: + image_feat = features["inputs"] + + image_feat = common_layers.flatten4d3d(image_feat) + image_hidden_size = hp.hidden_size + image_feat = common_layers.dense(image_feat, image_hidden_size) + utils.collect_named_outputs("norms", "image_feat_after_proj", + tf.norm(image_feat, axis=-1)) + + question = common_layers.flatten4d3d(features["question"]) + utils.collect_named_outputs("norms", "question_embedding", + tf.norm(question, axis=-1)) + (encoder_input, encoder_self_attention_bias, + encoder_decoder_attention_bias) = prepare_image_question_encoder( + image_feat, question, hp) + encoder_input = tf.nn.dropout( + encoder_input, keep_prob=1.-hp.layer_prepostprocess_dropout) + encoder_output = image_question_encoder( + encoder_input, encoder_self_attention_bias, hp) + utils.collect_named_outputs( + "norms", "encoder_output", tf.norm(encoder_output, axis=-1)) + + # scale query by sqrt(hidden_size) + query = tf.get_variable("query", [hp.hidden_size]) * hp.hidden_size **0.5 + query = tf.expand_dims(tf.expand_dims(query, axis=0), axis=0) + batch_size = common_layers.shape_list(encoder_input)[0] + query = tf.tile(query, [batch_size, 1, 1]) + query = tf.nn.dropout( + query, keep_prob=1.-hp.layer_prepostprocess_dropout) + + decoder_output = decoder( + query, encoder_output, None, encoder_decoder_attention_bias, hp) + utils.collect_named_outputs("norms", "decoder_output", + tf.norm(decoder_output, axis=-1)) + + norm_tensors = utils.convert_collection_to_dict("norms") + vqa_layers.summarize_tensors(norm_tensors, tag="norms/") + + # Expand dimension 1 and 2 + return tf.expand_dims(decoder_output, axis=1) + + +@registry.register_model +class VqaIterativeCombinedSelfAttention(VqaSelfAttention): + """Combined Self attention both on image and question.""" + + # @staticmethod + # def train_hooks(): + # restore_resnet_hook = restore_hook.RestoreHook( + # # TODO(zichaoy): hard code the path given static function. + # checkpoint_path="/home/zichaoy/resnet_v1_152.ckpt", + # new_model_scope="vqa_combined_self_attention/body/", + # old_model_scope="resnet_v1_152/", + # ) + # return [restore_resnet_hook] + + def body(self, features): + hp = self.hparams + # pylint: disable=eval-used + if hp.image_input_type == "image": + image_feat = vqa_layers.image_embedding( + features["inputs"], + model_fn=eval(hp.image_model_fn), + trainable=hp.train_resnet, + is_training=hp.mode == tf_estimator.ModeKeys.TRAIN) + else: + image_feat = features["inputs"] + + image_feat = common_layers.flatten4d3d(image_feat) + image_hidden_size = hp.hidden_size + image_feat = common_layers.dense(image_feat, image_hidden_size) + utils.collect_named_outputs("norms", "image_feat_after_proj", + tf.norm(image_feat, axis=-1)) + + question = common_layers.flatten4d3d(features["question"]) + utils.collect_named_outputs("norms", "question_embedding", + tf.norm(question, axis=-1)) + (encoder_input, encoder_self_attention_bias, + encoder_decoder_attention_bias) = prepare_image_question_encoder( + image_feat, question, hp) + encoder_input = tf.nn.dropout( + encoder_input, keep_prob=1.-hp.layer_prepostprocess_dropout) + + # scale query by sqrt(hidden_size) + query = tf.get_variable("query", [hp.hidden_size]) * hp.hidden_size **0.5 + query = tf.expand_dims(tf.expand_dims(query, axis=0), axis=0) + batch_size = common_layers.shape_list(encoder_input)[0] + query = tf.tile(query, [batch_size, 1, 1]) + query = tf.nn.dropout( + query, keep_prob=1.-hp.layer_prepostprocess_dropout) + + decoder_output = iterative_encoder_decoder( + encoder_input, + encoder_self_attention_bias, + encoder_decoder_attention_bias, + query, + hp) + + utils.collect_named_outputs("norms", "decoder_output", + tf.norm(decoder_output, axis=-1)) + + norm_tensors = utils.convert_collection_to_dict("norms") + vqa_layers.summarize_tensors(norm_tensors, tag="norms/") + + # Expand dimension 1 and 2 + return tf.expand_dims(decoder_output, axis=1) + + +def image_encoder(image_feat, + hparams, + name="image_encoder", + save_weights_to=None, + make_image_summary=True): + """A stack of self attention layers.""" + + x = image_feat + image_hidden_size = hparams.image_hidden_size or hparams.hidden_size + image_filter_size = hparams.image_filter_size or hparams.filter_size + with tf.variable_scope(name): + for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): + with tf.variable_scope("layer_%d" % layer): + with tf.variable_scope("self_attention"): + y = vqa_layers.multihead_attention( + common_layers.layer_preprocess(x, hparams), + None, + None, + hparams.attention_key_channels or image_hidden_size, + hparams.attention_value_channels or image_hidden_size, + image_hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=hparams.image_self_attention_type, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + scale_dotproduct=hparams.scale_dotproduct, + ) + utils.collect_named_outputs( + "norms", "image_feat_self_attention_%d"%(layer), + tf.norm(y, axis=-1)) + x = common_layers.layer_postprocess(x, y, hparams) + utils.collect_named_outputs( + "norms", "image_feat_self_attention_postprocess_%d"%(layer), + tf.norm(x, axis=-1)) + with tf.variable_scope("ffn"): + y = common_layers.dense_relu_dense( + common_layers.layer_preprocess(x, hparams), + image_filter_size, + image_hidden_size, + dropout=hparams.relu_dropout, + ) + utils.collect_named_outputs( + "norms", "image_feat_ffn_%d"%(layer), tf.norm(y, axis=-1)) + x = common_layers.layer_postprocess(x, y, hparams) + utils.collect_named_outputs( + "norms", "image_feat_ffn_postprocess_%d"%(layer), + tf.norm(x, axis=-1)) + # if normalization is done in layer_preprocess, then it should also be done + # on the output, since the output can grow very large, being the sum of + # a whole stack of unnormalized layer outputs. + return common_layers.layer_preprocess(x, hparams) + + +def prepare_question_encoder(inputs, hparams): + """Prepare question encoder. + + Args: + inputs: a Tensor. + hparams: run hyperparameters + + Returns: + encoder_input: a Tensor, bottom of encoder stack + encoder_self_attention_bias: a bias tensor for use in encoder self-attention + """ + encoder_input = inputs + # Usual case - not a packed dataset. + encoder_padding = common_attention.embedding_to_padding(encoder_input) + ignore_padding = common_attention.attention_bias_ignore_padding( + encoder_padding) + encoder_self_attention_bias = ignore_padding + if hparams.pos == "timing": + encoder_input = common_attention.add_timing_signal_1d(encoder_input) + elif hparams.pos == "emb": + encoder_input = common_attention.add_positional_embedding( + encoder_input, hparams.max_length, "inputs_positional_embedding", + None) + return (encoder_input, encoder_self_attention_bias) + + +def question_encoder(question, + question_self_attention_bias, + hparams, + name="question_encoder", + save_weights_to=None, + make_image_summary=True): + """A stack of self attention layers.""" + x = question + with tf.variable_scope(name): + for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): + with tf.variable_scope("layer_%d" % layer): + with tf.variable_scope("self_attention"): + y = vqa_layers.multihead_attention( + common_layers.layer_preprocess(x, hparams), + None, + question_self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=hparams.question_self_attention_type, + block_length=hparams.block_length, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + scale_dotproduct=hparams.scale_dotproduct, + ) + utils.collect_named_outputs( + "norms", "query_self_attention_%d"%(layer), + tf.norm(y, axis=-1)) + x = common_layers.layer_postprocess(x, y, hparams) + utils.collect_named_outputs( + "norms", "query_self_attention_postprocess_%d"%(layer), + tf.norm(x, axis=-1)) + with tf.variable_scope("ffn"): + y = common_layers.dense_relu_dense( + common_layers.layer_preprocess(x, hparams), + hparams.filter_size, + hparams.hidden_size, + dropout=hparams.relu_dropout, + ) + utils.collect_named_outputs( + "norms", "query_ffn_%d"%(layer), tf.norm(y, axis=-1)) + x = common_layers.layer_postprocess(x, y, hparams) + utils.collect_named_outputs( + "norms", "query_ffn_postprocess_%d"%(layer), + tf.norm(x, axis=-1)) + # if normalization is done in layer_preprocess, then it should also be done + # on the output, since the output can grow very large, being the sum of + # a whole stack of unnormalized layer outputs. + return common_layers.layer_preprocess(x, hparams) + + +def attn(image_feat, + query, + hparams, + name="attn", + save_weights_to=None, + make_image_summary=True): + """Attention on image feature with question as query.""" + with tf.variable_scope(name, "attn", values=[image_feat, query]): + total_key_depth = hparams.attention_key_channels or hparams.hidden_size + total_value_depth = hparams.attention_value_channels or hparams.hidden_size + num_heads = hparams.num_heads + query = tf.expand_dims(query, 1) + q, k, v = common_attention.compute_qkv( + query, + image_feat, + total_key_depth, + total_value_depth, + ) + q = common_attention.split_heads(q, num_heads) + k = common_attention.split_heads(k, num_heads) + v = common_attention.split_heads(v, num_heads) + + if hparams.scale_dotproduct: + key_depth_per_head = total_key_depth // num_heads + q *= key_depth_per_head**-0.5 + + # image_feat is input as v + x = common_attention.dot_product_attention( + q, k, v, None, + dropout_rate=hparams.attention_dropout, + image_shapes=None, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary) + x = common_attention.combine_heads(x) + + return tf.squeeze(x, axis=1) + + +def mlp(feature, hparams, name="mlp"): + """Multi layer perceptron with dropout and relu activation.""" + with tf.variable_scope(name, "mlp", values=[feature]): + num_mlp_layers = hparams.num_mlp_layers + mlp_size = hparams.mlp_size + for _ in range(num_mlp_layers): + feature = common_layers.dense(feature, mlp_size, activation=None) + utils.collect_named_outputs("norms", "mlp_feature", + tf.norm(feature, axis=-1)) + feature = common_layers.layer_norm(feature) + feature = tf.nn.relu(feature) + feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout) + return feature + + +def prepare_image_question_encoder(image_feat, question, hparams): + """Prepare encoder. + + Args: + image_feat: a Tensor. + question: a Tensor. + hparams: run hyperparameters + + Returns: + encoder_input: a Tensor, bottom of encoder stack + encoder_self_attention_bias: a bias tensor for use in encoder self-attention + """ + + encoder_input = tf.concat([image_feat, question], axis=1) + encoder_padding = common_attention.embedding_to_padding(encoder_input) + ignore_padding = common_attention.attention_bias_ignore_padding( + encoder_padding) + encoder_self_attention_bias = ignore_padding + encoder_decoder_attention_bias = ignore_padding + # Usual case - not a packed dataset. + if hparams.pos == "timing": + question = common_attention.add_timing_signal_1d(question) + elif hparams.pos == "emb": + question = common_attention.add_positional_embedding( + question, hparams.max_length, "inputs_positional_embedding", + None) + encoder_input = tf.concat([image_feat, question], axis=1) + + return (encoder_input, encoder_self_attention_bias, + encoder_decoder_attention_bias) + + +def image_question_encoder(encoder_inputs, + encoder_self_attention_bias, + hparams, + query=None, + name="image_question_encoder", + save_weights_to=None, + make_image_summary=True): + """A stack of self attention layers.""" + x = encoder_inputs + with tf.variable_scope(name): + for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers): + with tf.variable_scope("layer_%d" % layer): + with tf.variable_scope("self_attention"): + y = vqa_layers.multihead_attention( + common_layers.layer_preprocess(x, hparams), + None, + encoder_self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=hparams.self_attention_type, + block_length=hparams.block_length, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + scale_dotproduct=hparams.scale_dotproduct, + ) + utils.collect_named_outputs( + "norms", "encoder_self_attention_%d"%(layer), + tf.norm(y, axis=-1)) + x = common_layers.layer_postprocess(x, y, hparams) + utils.collect_named_outputs( + "norms", "encoder_self_attention_postprocess_%d"%(layer), + tf.norm(x, axis=-1)) + if query is not None: + with tf.variable_scope("encdec_attention"): + y = common_attention.multihead_attention( + common_layers.layer_preprocess(x, hparams), + query, + None, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=hparams.self_attention_type, + block_length=hparams.block_length, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + scale_dotproduct=hparams.scale_dotproduct, + ) + utils.collect_named_outputs( + "norms", + "encoder_decoder_attention_%d"%(layer), + tf.norm(y, axis=-1)) + x = common_layers.layer_postprocess(x, y, hparams) + utils.collect_named_outputs( + "norms", + "encoder_decoder_attention_post_%d"%(layer), + tf.norm(x, axis=-1)) + with tf.variable_scope("ffn"): + y = common_layers.dense_relu_dense( + common_layers.layer_preprocess(x, hparams), + hparams.filter_size, + hparams.hidden_size, + dropout=hparams.relu_dropout, + ) + utils.collect_named_outputs( + "norms", "encoder_ffn_%d"%(layer), tf.norm(y, axis=-1)) + x = common_layers.layer_postprocess(x, y, hparams) + utils.collect_named_outputs( + "norms", "encoder_ffn_postprocess_%d"%(layer), + tf.norm(x, axis=-1)) + # if normalization is done in layer_preprocess, then it should also be done + # on the output, since the output can grow very large, being the sum of + # a whole stack of unnormalized layer outputs. + return common_layers.layer_preprocess(x, hparams) + + +def decoder(decoder_input, + encoder_output, + decoder_self_attention_bias, + encoder_decoder_attention_bias, + hparams, + name="decoder", + save_weights_to=None, + make_image_summary=True,): + """A stack of transformer layers. + + Args: + decoder_input: a Tensor + encoder_output: a Tensor + decoder_self_attention_bias: bias Tensor for self-attention + (see common_attention.attention_bias()) + encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention + (see common_attention.attention_bias()) + hparams: hyperparameters for model + name: a string + save_weights_to: an optional dictionary to capture attention weights + for visualization; the weights tensor will be appended there under + a string key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. + + Returns: + y: a Tensors + """ + x = decoder_input + with tf.variable_scope(name): + for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers): + layer_name = "layer_%d" % layer + with tf.variable_scope(layer_name): + with tf.variable_scope("self_attention"): + y = common_attention.multihead_attention( + common_layers.layer_preprocess(x, hparams), + None, + decoder_self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=hparams.self_attention_type, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + ) + utils.collect_named_outputs("norms", + "decoder_self_attention_%d"%(layer), + tf.norm(y, axis=-1)) + x = common_layers.layer_postprocess(x, y, hparams) + utils.collect_named_outputs("norms", + "decoder_self_attention_post_%d"%(layer), + tf.norm(x, axis=-1)) + if encoder_output is not None: + with tf.variable_scope("encdec_attention"): + y = common_attention.multihead_attention( + common_layers.layer_preprocess(x, hparams), + encoder_output, + encoder_decoder_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + ) + utils.collect_named_outputs( + "norms", + "decoder_encoder_attention_%d"%(layer), + tf.norm(y, axis=-1)) + x = common_layers.layer_postprocess(x, y, hparams) + utils.collect_named_outputs( + "norms", + "decoder_encoder_attention_post_%d"%(layer), + tf.norm(x, axis=-1)) + with tf.variable_scope("ffn"): + y = common_layers.dense_relu_dense( + common_layers.layer_preprocess(x, hparams), + hparams.filter_size, + hparams.hidden_size, + dropout=hparams.relu_dropout, + ) + utils.collect_named_outputs("norms", "decoder_ffn_%d"%(layer), + tf.norm(y, axis=-1)) + x = common_layers.layer_postprocess(x, y, hparams) + utils.collect_named_outputs("norms", "decoder_ffn_post_%d"%(layer), + tf.norm(x, axis=-1)) + # if normalization is done in layer_preprocess, then it should also be done + # on the output, since the output can grow very large, being the sum of + # a whole stack of unnormalized layer outputs. + return common_layers.layer_preprocess(x, hparams) + + +def iterative_encoder_decoder(encoder_input, + encoder_self_attention_bias, + encoder_decoder_attention_bias, + query, + hparams): + """Iterative encoder decoder.""" + for _ in range(hparams.num_rec_steps): + with tf.variable_scope("step", reuse=tf.AUTO_REUSE): + encoder_output = image_question_encoder( + encoder_input, + encoder_self_attention_bias, + hparams, + query) + + decoder_output = decoder( + query, + encoder_output, + None, + encoder_decoder_attention_bias, + hparams) + + encoder_input = encoder_output + query = decoder_output + + return decoder_output + + +@registry.register_hparams +def vqa_self_attention_base(): + """VQA attention baseline hparams.""" + hparams = common_hparams.basic_params1() + hparams.batch_size = 128 + hparams.use_fixed_batch_size = True, + hparams.optimizer = "adam" + hparams.optimizer_adam_beta1 = 0.9 + hparams.optimizer_adam_beta2 = 0.997 + hparams.optimizer_adam_epsilon = 1e-9 + hparams.weight_decay = 0. + hparams.clip_grad_norm = 0. + hparams.initializer = "xavier" + hparams.learning_rate_schedule = ( + "constant*linear_warmup*rsqrt_normalized_decay") + hparams.learning_rate_warmup_steps = 8000 + hparams.learning_rate_constant = 1e-3 + hparams.learning_rate_decay_rate = 0.5 + hparams.learning_rate_decay_steps = 50000 + hparams.dropout = 0.5 + hparams.summarize_grads = True + hparams.summarize_vars = True + + # not used hparams + hparams.label_smoothing = 0. + hparams.multiply_embedding_mode = "sqrt_depth" + + # add new hparams + # use raw image as input + hparams.add_hparam("image_input_type", "image") + hparams.add_hparam("image_model_fn", "resnet_v1_152") + hparams.add_hparam("resize_side", 512) + hparams.add_hparam("height", 448) + hparams.add_hparam("width", 448) + hparams.add_hparam("distort", True) + hparams.add_hparam("train_resnet", False) + + # image parts + hparams.add_hparam("image_feat_preprocess_proj", True) + hparams.add_hparam("image_feat_preprocess_layernorm", True) + hparams.add_hparam("image_feat_encode", True) + hparams.add_hparam("image_hidden_size", 0) # default to hidden_size + hparams.add_hparam("image_filter_size", 0) # defaults to filter_size + + # question hidden size + hparams.hidden_size = 512 + hparams.filter_size = 1024 + hparams.num_hidden_layers = 4 + + hparams.add_hparam("multimodal_combine", "concat") + hparams.add_hparam("num_mlp_layers", 1) + hparams.add_hparam("mlp_size", 1024) + + # self attention parts + hparams.norm_type = "layer" + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + hparams.layer_prepostprocess_dropout = 0.1 + hparams.attention_dropout = 0.1 + hparams.relu_dropout = 0.1 + hparams.add_hparam("pos", "timing") + hparams.add_hparam("num_encoder_layers", 0) + hparams.add_hparam("num_decoder_layers", 0) + hparams.add_hparam("num_heads", 8) + hparams.add_hparam("attention_key_channels", 0) + hparams.add_hparam("attention_value_channels", 0) + hparams.add_hparam("self_attention_type", "dot_product") + hparams.add_hparam("image_self_attention_type", "dot_product") + hparams.add_hparam("question_self_attention_type", "dot_product") + hparams.add_hparam("block_length", 1) + hparams.add_hparam("scale_dotproduct", True) + + # iterative part + hparams.add_hparam("num_rec_steps", 3) + + return hparams + + +@registry.register_hparams +def vqa_self_attention_feature(): + hparams = vqa_self_attention_base() + hparams.image_input_type = "feature" + return hparams + + +@registry.register_hparams +def vqa_self_attention_feature_batch1024(): + hparams = vqa_self_attention_feature() + hparams.batch_size = 1024 + return hparams + + +@registry.register_hparams +def vqa_self_attention_feature_batch1024_big(): + """Big model.""" + hparams = vqa_self_attention_feature_batch1024() + hparams.learning_rate_constant = 7e-4 + hparams.batch_size = 256 + hparams.hidden_size = 1024 + hparams.filter_size = 4096 + hparams.num_heads = 16 + hparams.layer_prepostprocess_dropout = 0.3 + hparams.attention_dropout = 0.3 + hparams.relu_dropout = 0.3 + return hparams + + +@registry.register_hparams +def vqa_self_attention_feature_batch1024_exp(): + hparams = vqa_self_attention_feature_batch1024() + hparams.learning_rate_schedule = ( + "constant*linear_warmup*exp_decay") + hparams.learning_rate_decay_steps = 4000 + return hparams + + +@registry.register_hparams +def vqa_self_attention_feature_batch1024_hidden6(): + hparams = vqa_self_attention_feature_batch1024() + hparams.num_hidden_layers = 6 + return hparams + + +@registry.register_hparams +def vqa_self_attention_feature_batch1024_hidden6_big(): + hparams = vqa_self_attention_feature_batch1024_hidden6() + hparams.batch_size = 256 + hparams.hidden_size = 1024 + hparams.filter_size = 4096 + hparams.num_heads = 16 + hparams.layer_prepostprocess_dropout = 0.3 + return hparams + + +@registry.register_hparams +def vqa_self_attention_feature_batch1024_drop03(): + hparams = vqa_self_attention_feature_batch1024() + hparams.layer_prepostprocess_dropout = 0.3 + return hparams + + +@registry.register_hparams +def vqa_self_attention_feature_lr5(): + hparams = vqa_self_attention_feature() + hparams.learning_rate_constant = 5e-4 + return hparams diff --git a/tensor2tensor/models/resnet.py b/tensor2tensor/models/resnet.py new file mode 100644 index 000000000..5eeb4792f --- /dev/null +++ b/tensor2tensor/models/resnet.py @@ -0,0 +1,861 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Resnets.""" +# Copied from cloud_tpu/models/resnet/resnet_model.py and modified + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import hparam +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +BATCH_NORM_DECAY = 0.9 +BATCH_NORM_EPSILON = 1e-5 + + +# TODO(lukaszkaiser): remove or simplify after V2 work is done. +def layers(): + return common_layers.layers() + + +def batch_norm_relu(inputs, + is_training, + relu=True, + init_zero=False, + data_format="channels_first"): + """Performs a batch normalization followed by a ReLU. + + Args: + inputs: `Tensor` of shape `[batch, channels, ...]`. + is_training: `bool` for whether the model is training. + relu: `bool` if False, omits the ReLU operation. + init_zero: `bool` if True, initializes scale parameter of batch + normalization with 0 instead of 1 (default). + data_format: `str` either "channels_first" for `[batch, channels, height, + width]` or "channels_last for `[batch, height, width, channels]`. + + Returns: + A normalized `Tensor` with the same `data_format`. + """ + if init_zero: + gamma_initializer = tf.zeros_initializer() + else: + gamma_initializer = tf.ones_initializer() + + if data_format == "channels_first": + axis = 1 + else: + axis = 3 + + inputs = layers().BatchNormalization( + axis=axis, + momentum=BATCH_NORM_DECAY, + epsilon=BATCH_NORM_EPSILON, + center=True, + scale=True, + fused=True, + gamma_initializer=gamma_initializer)(inputs, training=is_training) + + if relu: + inputs = tf.nn.relu(inputs) + return inputs + + +def fixed_padding(inputs, kernel_size, data_format="channels_first"): + """Pads the input along the spatial dimensions independently of input size. + + Args: + inputs: `Tensor` of size `[batch, channels, height, width]` or + `[batch, height, width, channels]` depending on `data_format`. + kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d` + operations. Should be a positive integer. + data_format: `str` either "channels_first" for `[batch, channels, height, + width]` or "channels_last for `[batch, height, width, channels]`. + + Returns: + A padded `Tensor` of the same `data_format` with size either intact + (if `kernel_size == 1`) or padded (if `kernel_size > 1`). + """ + pad_total = kernel_size - 1 + pad_beg = pad_total // 2 + pad_end = pad_total - pad_beg + if data_format == "channels_first": + padded_inputs = tf.pad( + inputs, [[0, 0], [0, 0], [pad_beg, pad_end], [pad_beg, pad_end]]) + else: + padded_inputs = tf.pad( + inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]) + + return padded_inputs + + +def conv2d_fixed_padding(inputs, + filters, + kernel_size, + strides, + data_format="channels_first", + use_td=False, + targeting_rate=None, + keep_prob=None, + is_training=None): + """Strided 2-D convolution with explicit padding. + + The padding is consistent and is based only on `kernel_size`, not on the + dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone). + + Args: + inputs: `Tensor` of size `[batch, channels, height_in, width_in]`. + filters: `int` number of filters in the convolution. + kernel_size: `int` size of the kernel to be used in the convolution. + strides: `int` strides of the convolution. + data_format: `str` either "channels_first" for `[batch, channels, height, + width]` or "channels_last for `[batch, height, width, channels]`. + use_td: `str` one of "weight" or "unit". Set to False or "" to disable + targeted dropout. + targeting_rate: `float` proportion of weights to target with targeted + dropout. + keep_prob: `float` keep probability for targeted dropout. + is_training: `bool` for whether the model is in training. + + Returns: + A `Tensor` of shape `[batch, filters, height_out, width_out]`. + + Raises: + Exception: if use_td is not valid. + """ + if strides > 1: + inputs = fixed_padding(inputs, kernel_size, data_format=data_format) + + if use_td: + inputs_shape = common_layers.shape_list(inputs) + if use_td == "weight": + if data_format == "channels_last": + size = kernel_size * kernel_size * inputs_shape[-1] + else: + size = kernel_size * kernel_size * inputs_shape[1] + targeting_count = targeting_rate * tf.to_float(size) + targeting_fn = common_layers.weight_targeting + elif use_td == "unit": + targeting_count = targeting_rate * filters + targeting_fn = common_layers.unit_targeting + else: + raise Exception("Unrecognized targeted dropout type: %s" % use_td) + + y = common_layers.td_conv( + inputs, + filters, + kernel_size, + targeting_count, + targeting_fn, + keep_prob, + is_training, + do_prune=True, + strides=strides, + padding=("SAME" if strides == 1 else "VALID"), + data_format=data_format, + use_bias=False, + kernel_initializer=tf.variance_scaling_initializer()) + else: + y = layers().Conv2D( + filters=filters, + kernel_size=kernel_size, + strides=strides, + padding=("SAME" if strides == 1 else "VALID"), + use_bias=False, + kernel_initializer=tf.variance_scaling_initializer(), + data_format=data_format)(inputs) + + return y + + +def residual_block(inputs, + filters, + is_training, + projection_shortcut, + strides, + final_block, + data_format="channels_first", + use_td=False, + targeting_rate=None, + keep_prob=None, + bottleneck_ratio=None): + """Standard building block for residual networks with BN before convolutions. + + Args: + inputs: `Tensor` of size `[batch, channels, height, width]`. + filters: `int` number of filters for the first two convolutions. Note that + the third and final convolution will use 4 times as many filters. + is_training: `bool` for whether the model is in training. + projection_shortcut: `function` to use for projection shortcuts (typically + a 1x1 convolution to match the filter dimensions). If None, no + projection is used and the input is passed as unchanged through the + shortcut connection. + strides: `int` block stride. If greater than 1, this block will ultimately + downsample the input. + final_block: unused parameter to keep the same function signature as + `bottleneck_block`. + data_format: `str` either "channels_first" for `[batch, channels, height, + width]` or "channels_last for `[batch, height, width, channels]`. + use_td: `str` one of "weight" or "unit". Set to False or "" to disable + targeted dropout. + targeting_rate: `float` proportion of weights to target with targeted + dropout. + keep_prob: `float` keep probability for targeted dropout. + bottleneck_ratio: unused parameter to keep the same function signature as + `bottleneck_block`. + + Returns: + The output `Tensor` of the block. + """ + del final_block + del bottleneck_ratio + shortcut = inputs + inputs = batch_norm_relu(inputs, is_training, data_format=data_format) + + if projection_shortcut is not None: + shortcut = projection_shortcut(inputs) + + inputs = conv2d_fixed_padding( + inputs=inputs, + filters=filters, + kernel_size=3, + strides=strides, + data_format=data_format, + use_td=use_td, + targeting_rate=targeting_rate, + keep_prob=keep_prob, + is_training=is_training) + + inputs = batch_norm_relu(inputs, is_training, data_format=data_format) + inputs = conv2d_fixed_padding( + inputs=inputs, + filters=filters, + kernel_size=3, + strides=1, + data_format=data_format, + use_td=use_td, + targeting_rate=targeting_rate, + keep_prob=keep_prob, + is_training=is_training) + + return inputs + shortcut + + +def bottleneck_block(inputs, + filters, + is_training, + projection_shortcut, + strides, + final_block, + data_format="channels_first", + use_td=False, + targeting_rate=None, + keep_prob=None, + bottleneck_ratio=4): + """Bottleneck block variant for residual networks with BN after convolutions. + + Args: + inputs: `Tensor` of size `[batch, channels, height, width]`. + filters: `int` number of filters for the first two convolutions. Note that + the third and final convolution will use 4 times as many filters. + is_training: `bool` for whether the model is in training. + projection_shortcut: `function` to use for projection shortcuts (typically + a 1x1 convolution to match the filter dimensions). If None, no + projection is used and the input is passed as unchanged through the + shortcut connection. + strides: `int` block stride. If greater than 1, this block will ultimately + downsample the input. + final_block: `bool` set to True if it is this the final block in the group. + This is changes the behavior of batch normalization initialization for + the final batch norm in a block. + data_format: `str` either "channels_first" for `[batch, channels, height, + width]` or "channels_last for `[batch, height, width, channels]`. + use_td: `str` one of "weight" or "unit". Set to False or "" to disable + targeted dropout. + targeting_rate: `float` proportion of weights to target with targeted + dropout. + keep_prob: `float` keep probability for targeted dropout. + bottleneck_ratio: `int`, how much we scale up filters. + + + Returns: + The output `Tensor` of the block. + """ + # TODO(chrisying): this block is technically the post-activation resnet-v1 + # bottleneck unit. Test with v2 (pre-activation) and replace if there is no + # difference for consistency. + shortcut = inputs + if projection_shortcut is not None: + shortcut = projection_shortcut(inputs) + + inputs = conv2d_fixed_padding( + inputs=inputs, + filters=filters, + kernel_size=1, + strides=1, + data_format=data_format, + use_td=use_td, + targeting_rate=targeting_rate, + keep_prob=keep_prob, + is_training=is_training) + + inputs = batch_norm_relu(inputs, is_training, data_format=data_format) + inputs = conv2d_fixed_padding( + inputs=inputs, + filters=filters, + kernel_size=3, + strides=strides, + data_format=data_format, + use_td=use_td, + targeting_rate=targeting_rate, + keep_prob=keep_prob, + is_training=is_training) + + inputs = batch_norm_relu(inputs, is_training, data_format=data_format) + inputs = conv2d_fixed_padding( + inputs=inputs, + filters=bottleneck_ratio * filters, + kernel_size=1, + strides=1, + data_format=data_format, + use_td=use_td, + targeting_rate=targeting_rate, + keep_prob=keep_prob, + is_training=is_training) + inputs = batch_norm_relu( + inputs, + is_training, + relu=False, + init_zero=final_block, + data_format=data_format) + + return tf.nn.relu(inputs + shortcut) + + +def block_layer(inputs, + filters, + block_fn, + blocks, + strides, + is_training, + name, + data_format="channels_first", + use_td=False, + targeting_rate=None, + keep_prob=None, + bottleneck_ratio=4): + """Creates one layer of blocks for the ResNet model. + + Args: + inputs: `Tensor` of size `[batch, channels, height, width]`. + filters: `int` number of filters for the first convolution of the layer. + block_fn: `function` for the block to use within the model + blocks: `int` number of blocks contained in the layer. + strides: `int` stride to use for the first convolution of the layer. If + greater than 1, this layer will downsample the input. + is_training: `bool` for whether the model is training. + name: `str`name for the Tensor output of the block layer. + data_format: `str` either "channels_first" for `[batch, channels, height, + width]` or "channels_last for `[batch, height, width, channels]`. + use_td: `str` one of "weight" or "unit". Set to False or "" to disable + targeted dropout. + targeting_rate: `float` proportion of weights to target with targeted + dropout. + keep_prob: `float` keep probability for targeted dropout. + bottleneck_ratio: `int`, how much we scale up filters in bottleneck block. + + Returns: + The output `Tensor` of the block layer. + """ + # Bottleneck blocks end with bottleneck_ratio x the number of filters + filters_out = filters + if block_fn is bottleneck_block: + filters_out = bottleneck_ratio * filters + + def projection_shortcut(inputs): + """Project identity branch.""" + inputs = conv2d_fixed_padding( + inputs=inputs, + filters=filters_out, + kernel_size=1, + strides=strides, + data_format=data_format, + use_td=use_td, + targeting_rate=targeting_rate, + keep_prob=keep_prob, + is_training=is_training) + return batch_norm_relu( + inputs, is_training, relu=False, data_format=data_format) + + # Only the first block per block_layer uses projection_shortcut and strides + inputs = block_fn( + inputs, + filters, + is_training, + projection_shortcut, + strides, + False, + data_format, + use_td=use_td, + targeting_rate=targeting_rate, + keep_prob=keep_prob, + bottleneck_ratio=bottleneck_ratio) + + for i in range(1, blocks): + inputs = block_fn( + inputs, + filters, + is_training, + None, + 1, (i + 1 == blocks), + data_format, + use_td=use_td, + targeting_rate=targeting_rate, + keep_prob=keep_prob, + bottleneck_ratio=bottleneck_ratio) + + return tf.identity(inputs, name) + + +def resnet_v2(inputs, + block_fn, + layer_blocks, + filters, + data_format="channels_first", + is_training=False, + is_cifar=False, + use_td=False, + targeting_rate=None, + keep_prob=None, + bottleneck_ratios=None): + """Resnet model. + + Args: + inputs: `Tensor` images. + block_fn: `function` for the block to use within the model. Either + `residual_block` or `bottleneck_block`. + layer_blocks: list of 3 or 4 `int`s denoting the number of blocks to include + in each of the 3 or 4 block groups. Each group consists of blocks that + take inputs of the same resolution. + filters: list of 4 or 5 `int`s denoting the number of filter to include in + block. + data_format: `str`, "channels_first" `[batch, channels, height, + width]` or "channels_last" `[batch, height, width, channels]`. + is_training: bool, build in training mode or not. + is_cifar: bool, whether the data is CIFAR or not. + use_td: `str` one of "weight" or "unit". Set to False or "" to disable + targeted dropout. + targeting_rate: `float` proportion of weights to target with targeted + dropout. + keep_prob: `float` keep probability for targeted dropout. + bottleneck_ratios: list of `int`s, how much we scale up filters in + bottleneck blocks. + + Returns: + Pre-logit activations. + """ + inputs = block_layer( + inputs=inputs, + filters=filters[1], + block_fn=block_fn, + blocks=layer_blocks[0], + strides=1, + is_training=is_training, + name="block_layer1", + data_format=data_format, + use_td=use_td, + targeting_rate=targeting_rate, + keep_prob=keep_prob, + bottleneck_ratio=bottleneck_ratios[0]) + inputs = block_layer( + inputs=inputs, + filters=filters[2], + block_fn=block_fn, + blocks=layer_blocks[1], + strides=2, + is_training=is_training, + name="block_layer2", + data_format=data_format, + use_td=use_td, + targeting_rate=targeting_rate, + keep_prob=keep_prob, + bottleneck_ratio=bottleneck_ratios[1]) + inputs = block_layer( + inputs=inputs, + filters=filters[3], + block_fn=block_fn, + blocks=layer_blocks[2], + strides=2, + is_training=is_training, + name="block_layer3", + data_format=data_format, + use_td=use_td, + targeting_rate=targeting_rate, + keep_prob=keep_prob, + bottleneck_ratio=bottleneck_ratios[2]) + if not is_cifar: + inputs = block_layer( + inputs=inputs, + filters=filters[4], + block_fn=block_fn, + blocks=layer_blocks[3], + strides=2, + is_training=is_training, + name="block_layer4", + data_format=data_format, + use_td=use_td, + targeting_rate=targeting_rate, + keep_prob=keep_prob, + bottleneck_ratio=bottleneck_ratios[3]) + + return inputs + + +@registry.register_model +class Resnet(t2t_model.T2TModel): + """Residual Network.""" + + def body(self, features): + hp = self.hparams + block_fns = { + "residual": residual_block, + "bottleneck": bottleneck_block, + } + assert hp.block_fn in block_fns + is_training = hp.mode == tf_estimator.ModeKeys.TRAIN + if is_training: + targets = features["targets_raw"] + + inputs = features["inputs"] + + data_format = "channels_last" + if hp.use_nchw: + # Convert from channels_last (NHWC) to channels_first (NCHW). This + # provides a large performance boost on GPU. + inputs = tf.transpose(inputs, [0, 3, 1, 2]) + data_format = "channels_first" + + inputs = conv2d_fixed_padding( + inputs=inputs, + filters=hp.filter_sizes[0], + kernel_size=7, + strides=1 if hp.is_cifar else 2, + data_format=data_format) + inputs = tf.identity(inputs, "initial_conv") + inputs = batch_norm_relu(inputs, is_training, data_format=data_format) + + if not hp.is_cifar: + inputs = layers().MaxPooling2D( + pool_size=3, + strides=2, + padding="SAME", + data_format=data_format)(inputs) + inputs = tf.identity(inputs, "initial_max_pool") + + out = resnet_v2( + inputs, + block_fns[hp.block_fn], + hp.layer_sizes, + hp.filter_sizes, + data_format, + is_training=is_training, + is_cifar=hp.is_cifar, + use_td=hp.use_td, + targeting_rate=hp.targeting_rate, + keep_prob=hp.keep_prob, + bottleneck_ratios=hp.bottleneck_ratios) + + if hp.use_nchw: + out = tf.transpose(out, [0, 2, 3, 1]) + + if not hp.is_cifar: + return out + + out = tf.reduce_mean(out, [1, 2]) + num_classes = self._problem_hparams.vocab_size["targets"] + if hasattr(self._hparams, "vocab_divisor"): + num_classes += (-num_classes) % self._hparams.vocab_divisor + logits = layers().Dense(num_classes, name="logits")(out) + + losses = {"training": 0.0} + if is_training: + loss = tf.losses.sparse_softmax_cross_entropy( + labels=tf.squeeze(targets), logits=logits) + loss = tf.reduce_mean(loss) + + losses = {"training": loss} + + logits = tf.reshape(logits, [-1, 1, 1, 1, logits.shape[1]]) + + return logits, losses + + def infer(self, + features=None, + decode_length=50, + beam_size=1, + top_beams=1, + alpha=0.0, + use_tpu=False): + """Predict.""" + del decode_length, beam_size, top_beams, alpha, use_tpu + assert features is not None + logits, _ = self(features) # pylint: disable=not-callable + assert len(logits.get_shape()) == 5 + logits = tf.squeeze(logits, [1, 2, 3]) + log_probs = common_layers.log_prob_from_logits(logits) + predictions, scores = common_layers.argmax_with_score(log_probs) + return { + "outputs": predictions, + "scores": scores, + } + + +def resnet_base(): + """Set of hyperparameters.""" + # For imagenet on TPU: + # Set train_steps=120000 + # Set eval_steps=48 + + # Base + hparams = common_hparams.basic_params1() + + # Model-specific parameters + hparams.add_hparam("layer_sizes", [3, 4, 6, 3]) + hparams.add_hparam("bottleneck_ratios", [4, 4, 4, 4]) + hparams.add_hparam("filter_sizes", [64, 64, 128, 256, 512]) + hparams.add_hparam("block_fn", "bottleneck") + hparams.add_hparam("use_nchw", True) + hparams.add_hparam("is_cifar", False) + + # Targeted dropout + hparams.add_hparam("use_td", False) + hparams.add_hparam("targeting_rate", None) + hparams.add_hparam("keep_prob", None) + + # Variable init + hparams.initializer = "normal_unit_scaling" + hparams.initializer_gain = 2. + + # Optimization + hparams.optimizer = "Momentum" + hparams.optimizer_momentum_momentum = 0.9 + hparams.optimizer_momentum_nesterov = True + hparams.weight_decay = 1e-4 + hparams.clip_grad_norm = 0.0 + # (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.) + hparams.learning_rate = 0.4 + hparams.learning_rate_decay_scheme = "cosine" + # For image_imagenet224, 120k training steps, which effectively makes this a + # cosine decay (i.e. no cycles). + hparams.learning_rate_cosine_cycle_steps = 120000 + + hparams.batch_size = 128 + return hparams + + +@registry.register_hparams +def resnet_50(): + hp = resnet_base() + return hp + + +@registry.register_hparams +def resnet_18(): + hp = resnet_base() + hp.block_fn = "residual" + hp.layer_sizes = [2, 2, 2, 2] + return hp + + +@registry.register_hparams +def resnet_imagenet_34(): + """Set of hyperparameters.""" + hp = resnet_base() + hp.block_fn = "residual" + hp.layer_sizes = [2, 4, 8, 2] + + return hp + + +@registry.register_hparams +def resnet_imagenet_34_td_weight_05_05(): + """Set of hyperparameters.""" + hp = resnet_imagenet_34() + hp.use_td = "weight" + hp.targeting_rate = 0.5 + hp.keep_prob = 0.5 + + return hp + + +@registry.register_hparams +def resnet_imagenet_34_td_unit_05_05(): + """Set of hyperparameters.""" + hp = resnet_imagenet_34() + hp.use_td = "unit" + hp.targeting_rate = 0.5 + hp.keep_prob = 0.5 + + return hp + + +@registry.register_hparams +def resnet_imagenet_34_td_unit_no_drop(): + """Set of hyperparameters.""" + hp = resnet_imagenet_34() + hp.use_td = "unit" + hp.targeting_rate = 0.0 + hp.keep_prob = 1.0 + + return hp + + +@registry.register_hparams +def resnet_imagenet_102(): + hp = resnet_imagenet_34() + hp.layer_sizes = [3, 8, 36, 3] + return hp + + +@registry.register_hparams +def resnet_cifar_15(): + """Set of hyperparameters.""" + hp = resnet_base() + hp.block_fn = "residual" + hp.is_cifar = True + hp.layer_sizes = [2, 2, 2] + hp.filter_sizes = [16, 32, 64, 128] + + return hp + + +@registry.register_hparams +def resnet_cifar_32(): + hp = resnet_cifar_15() + hp.layer_sizes = [5, 5, 5] + return hp + + +@registry.register_hparams +def resnet_cifar_32_td_weight_05_05(): + hp = resnet_cifar_32() + hp.use_td = "weight" + hp.targeting_rate = 0.5 + hp.keep_prob = 0.5 + return hp + + +@registry.register_hparams +def resnet_cifar_32_td_unit_05_05(): + hp = resnet_cifar_32() + hp.use_td = "unit" + hp.targeting_rate = 0.5 + hp.keep_prob = 0.5 + return hp + + +@registry.register_hparams +def resnet_cifar_32_td_unit_no_drop(): + hp = resnet_cifar_32() + hp.use_td = "unit" + hp.targeting_rate = 0.0 + hp.keep_prob = 1.0 + return hp + + +@registry.register_hparams +def resnet_34(): + hp = resnet_base() + hp.block_fn = "residual" + return hp + + +@registry.register_hparams +def resnet_101(): + hp = resnet_base() + hp.layer_sizes = [3, 4, 23, 3] + return hp + + +@registry.register_hparams +def resnet_152(): + hp = resnet_base() + hp.layer_sizes = [3, 8, 36, 3] + return hp + + +@registry.register_hparams +def resnet_200(): + hp = resnet_base() + hp.layer_sizes = [3, 24, 36, 3] + return hp + + +# Pruning parameters +@registry.register_pruning_params +def resnet_weight(): + hp = hparam.HParams() + hp.add_hparam("strategy", "weight") + hp.add_hparam("black_list", ["logits", "bias"]) + hp.add_hparam("white_list", ["td_conv"]) + hp.add_hparam("sparsities", [0.1 * i for i in range(10)]) + return hp + + +@registry.register_pruning_params +def resnet_unit(): + hp = resnet_weight() + hp.strategy = "unit" + return hp + + +# Adversarial attack parameters +@registry.register_attack_params +def resnet_fgsm(): + aparams = hparam.HParams() + aparams.attack = "fgsm" + aparams.epsilon_name = "eps" + aparams.attack_epsilons = [i * 0.8 for i in range(20)] + aparams.add_hparam("clip_min", 0.0) + aparams.add_hparam("clip_max", 255.0) + return aparams + + +@registry.register_attack_params +def resnet_madry(): + aparams = resnet_fgsm() + aparams.attack = "madry" + aparams.add_hparam("nb_iter", 40) + aparams.add_hparam("eps_iter", 1.0) + return aparams + + +@registry.register_attack_params +def resnet_random(): + aparams = resnet_fgsm() + aparams.attack = "random" + aparams.epsilon_name = "eps" + aparams.add_hparam("num_samples", 10) + aparams.add_hparam("num_batches", 100) + return aparams diff --git a/tensor2tensor/models/resnet_test.py b/tensor2tensor/models/resnet_test.py new file mode 100644 index 000000000..3b629fa48 --- /dev/null +++ b/tensor2tensor/models/resnet_test.py @@ -0,0 +1,70 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Resnet tests.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.layers import modalities +from tensor2tensor.models import resnet + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +def resnet_tiny_cpu(): + hparams = resnet.resnet_base() + hparams.layer_sizes = [2, 2, 2, 2] + hparams.use_nchw = False + return hparams + + +class ResnetTest(tf.test.TestCase): + + def _test_resnet(self, img_size, output_size): + vocab_size = 9 + batch_size = 2 + x = np.random.randint( + 256, size=(batch_size, img_size, img_size, 3)) + y = np.random.randint( + 1, high=vocab_size, size=(batch_size, 1, 1, 1)) + hparams = resnet_tiny_cpu() + p_hparams = problem_hparams.test_problem_hparams(vocab_size, + vocab_size, + hparams) + p_hparams.modality["inputs"] = modalities.ModalityType.IMAGE + p_hparams.modality["targets"] = modalities.ModalityType.CLASS_LABEL + with self.test_session() as session: + features = { + "inputs": tf.constant(x, dtype=tf.int32), + "targets": tf.constant(y, dtype=tf.int32), + } + model = resnet.Resnet(hparams, tf_estimator.ModeKeys.TRAIN, p_hparams) + logits, _ = model(features) + session.run(tf.global_variables_initializer()) + res = session.run(logits) + self.assertEqual(res.shape, (batch_size,) + output_size + (1, vocab_size)) + + def testResnetLarge(self): + self._test_resnet(img_size=224, output_size=(1, 1)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/revnet.py b/tensor2tensor/models/revnet.py new file mode 100644 index 000000000..e841652af --- /dev/null +++ b/tensor2tensor/models/revnet.py @@ -0,0 +1,439 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Creates a RevNet with the bottleneck residual function. + +Implements the following equations described in the RevNet paper: +y1 = x1 + f(x2) +y2 = x2 + g(y1) + +However, in practice, the authors use the following equations to downsample +tensors inside a RevNet block: + +y1 = h(x1) + f(x2) +y2 = h(x2) + g(y1) + +In this case, h is the downsampling function used to change number of channels. + +These modified equations are evident in the authors' code online: +https://github.com/renmengye/revnet-public + +For reference, the original paper can be found here: +https://arxiv.org/pdf/1707.04585.pdf +""" + +import functools +from tensor2tensor.layers import common_hparams +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +def wrapped_partial(fn, *args, **kwargs): + partial = functools.partial(fn, *args, **kwargs) + wrapped = functools.update_wrapper(partial, fn) + return wrapped + + +conv_initializer = tf.initializers.variance_scaling( + scale=2.0, mode='fan_out') + +CONFIG = {'2d': {'conv': wrapped_partial( + tf.layers.conv2d, kernel_initializer=conv_initializer), + 'max_pool': tf.layers.max_pooling2d, + 'avg_pool': tf.layers.average_pooling2d, + 'split_axis': 3, + 'reduction_dimensions': [1, 2] + }, + '3d': {'conv': wrapped_partial( + tf.layers.conv3d, kernel_initializer=conv_initializer), + 'max_pool': tf.layers.max_pooling3d, + 'avg_pool': tf.layers.average_pooling2d, + 'split_axis': 4, + 'reduction_dimensions': [1, 2, 3] + } + } + + +def f(x, depth1, depth2, dim='2d', first_batch_norm=True, stride=1, + training=True, bottleneck=True, padding='SAME'): + """Applies residual function for RevNet. + + Args: + x: input tensor + depth1: Number of output channels for the first and second conv layers. + depth2: Number of output channels for the third conv layer. + dim: '2d' if 2-dimensional, '3d' if 3-dimensional. + first_batch_norm: Whether to keep the first batch norm layer or not. + Typically used in the first RevNet block. + stride: Stride for the first conv filter. Note that this particular + RevNet architecture only varies the stride for the first conv + filter. The stride for the second conv filter is always set to 1. + training: True for train phase, False for eval phase. + bottleneck: If true, apply bottleneck 1x1 down/up sampling. + padding: Padding for each conv layer. + + Returns: + Output tensor after applying residual function for RevNet. + """ + conv = CONFIG[dim]['conv'] + with tf.variable_scope('f', reuse=tf.AUTO_REUSE): + if first_batch_norm: + net = tf.layers.batch_normalization(x, training=training) + net = tf.nn.relu(net) + else: + net = x + + if bottleneck: + net = conv(net, depth1, 1, strides=stride, + padding=padding, activation=None) + + net = tf.layers.batch_normalization(net, training=training) + net = tf.nn.relu(net) + net = conv(net, depth1, 3, strides=1, + padding=padding, activation=None) + + net = tf.layers.batch_normalization(net, training=training) + net = tf.nn.relu(net) + net = conv(net, depth2, 1, strides=1, + padding=padding, activation=None) + else: + net = conv(net, depth2, 3, strides=stride, + padding=padding, activation=None) + net = tf.layers.batch_normalization(x, training=training) + net = tf.nn.relu(net) + net = conv(net, depth2, 3, strides=stride, + padding=padding, activation=None) + + return net + + +def downsample_bottleneck(x, output_channels, dim='2d', stride=1, scope='h'): + """Downsamples 'x' by `stride` using a 1x1 convolution filter. + + Args: + x: input tensor of size [N, H, W, C] + output_channels: Desired number of output channels. + dim: '2d' if 2-dimensional, '3d' if 3-dimensional. + stride: What stride to use. Usually 1 or 2. + scope: Optional variable scope. + + Returns: + A downsampled tensor of size [N, H/2, W/2, output_channels] if stride + is 2, else returns a tensor of size [N, H, W, output_channels] if + stride is 1. + """ + conv = CONFIG[dim]['conv'] + with tf.variable_scope(scope): + x = conv(x, output_channels, 1, strides=stride, padding='SAME', + activation=None) + return x + + +def downsample_residual(x, output_channels, dim='2d', stride=1, scope='h'): + """Downsamples 'x' by `stride` using average pooling. + + Args: + x: input tensor of size [N, H, W, C] + output_channels: Desired number of output channels. + dim: '2d' if 2-dimensional, '3d' if 3-dimensional. + stride: What stride to use. Usually 1 or 2. + scope: Optional variable scope. + + Returns: + A downsampled tensor of size [N, H/2, W/2, output_channels] if stride + is 2, else returns a tensor of size [N, H, W, output_channels] if + stride is 1. + """ + with tf.variable_scope(scope): + if stride > 1: + avg_pool = CONFIG[dim]['avg_pool'] + x = avg_pool(x, + pool_size=(stride, stride), + strides=(stride, stride), + padding='VALID') + + input_channels = tf.shape(x)[3] + diff = output_channels - input_channels + x = tf.pad( + x, [[0, 0], [0, 0], [0, 0], + [diff // 2, diff // 2]]) + return x + + +def init(images, num_channels, dim='2d', stride=2, + kernel_size=7, maxpool=True, training=True, scope='init'): + """Standard ResNet initial block used as first RevNet block. + + Args: + images: [N, H, W, 3] tensor of input images to the model. + num_channels: Output depth of convolutional layer in initial block. + dim: '2d' if 2-dimensional, '3d' if 3-dimensional. + stride: stride for the convolution and pool layer. + kernel_size: Size of the initial convolution filter + maxpool: If true, apply a maxpool after the convolution + training: True for train phase, False for eval phase. + scope: Optional scope for the init block. + + Returns: + Two [N, H, W, C] output activations from input images. + """ + conv = CONFIG[dim]['conv'] + pool = CONFIG[dim]['max_pool'] + with tf.variable_scope(scope): + net = conv(images, num_channels, kernel_size, strides=stride, + padding='SAME', activation=None) + net = tf.layers.batch_normalization(net, training=training) + net = tf.nn.relu(net) + if maxpool: + net = pool(net, pool_size=3, strides=stride) + x1, x2 = tf.split(net, 2, axis=CONFIG[dim]['split_axis']) + return x1, x2 + + +def unit(x1, x2, block_num, depth, num_layers, dim='2d', + bottleneck=True, first_batch_norm=True, stride=1, training=True): + """Implements bottleneck RevNet unit from authors' RevNet architecture. + + Args: + x1: [N, H, W, C] tensor of network activations. + x2: [N, H, W, C] tensor of network activations. + block_num: integer ID of block + depth: First depth in bottleneck residual unit. + num_layers: Number of layers in the RevNet block. + dim: '2d' if 2-dimensional, '3d' if 3-dimensional. + bottleneck: Should a bottleneck layer be used. + first_batch_norm: Whether to keep the first batch norm layer or not. + Typically used in the first RevNet block. + stride: Stride for the residual function. + training: True for train phase, False for eval phase. + + Returns: + Two [N, H, W, C] output activation tensors. + """ + scope_name = 'unit_%d' % block_num + if bottleneck: + depth1 = depth + depth2 = depth * 4 + else: + depth1 = depth2 = depth + + residual = wrapped_partial(f, + depth1=depth1, depth2=depth2, dim=dim, + training=training, bottleneck=bottleneck) + + with tf.variable_scope(scope_name): + downsample = downsample_bottleneck if bottleneck else downsample_residual + # Manual implementation of downsampling + with tf.variable_scope('downsampling'): + with tf.variable_scope('x1'): + hx1 = downsample(x1, depth2, dim=dim, stride=stride) + fx2 = residual(x2, stride=stride, first_batch_norm=first_batch_norm) + x1 = hx1 + fx2 + with tf.variable_scope('x2'): + hx2 = downsample(x2, depth2, dim=dim, stride=stride) + fx1 = residual(x1) + x2 = hx2 + fx1 + + # Full block using memory-efficient rev_block implementation. + with tf.variable_scope('full_block'): + x1, x2 = contrib.layers().rev_block( + x1, x2, residual, residual, num_layers=num_layers) + return x1, x2 + + +def final_block(x1, x2, dim='2d', training=True, scope='final_block'): + """Converts activations from last RevNet block to pre-logits. + + Args: + x1: [NxHxWxC] tensor of network activations. + x2: [NxHxWxC] tensor of network activations. + dim: '2d' if 2-dimensional, '3d' if 3-dimensional. + training: True for train phase, False for eval phase. + scope: Optional variable scope for the final block. + + Returns: + [N, hidden_dim] pre-logits tensor from activations x1 and x2. + """ + + # Final batch norm and relu + with tf.variable_scope(scope): + y = tf.concat([x1, x2], axis=CONFIG[dim]['split_axis']) + y = tf.layers.batch_normalization(y, training=training) + y = tf.nn.relu(y) + + # Global average pooling + net = tf.reduce_mean(y, CONFIG[dim]['reduction_dimensions'], + name='final_pool', keep_dims=True) + + return net + + +def revnet(inputs, hparams, reuse=None): + """Uses Tensor2Tensor memory optimized RevNet block to build a RevNet. + + Args: + inputs: [NxHxWx3] tensor of input images to the model. + hparams: HParams object that contains the following parameters, + in addition to the parameters contained in the basic_params1() object in + the common_hparams module: + num_channels_first - A Python list where each element represents the + depth of the first and third convolutional layers in the bottleneck + residual unit for a given block. + num_channels_second - A Python list where each element represents the + depth of the second convolutional layer in the bottleneck residual + unit for a given block. + num_layers_per_block - A Python list containing the number of RevNet + layers for each block. + first_batch_norm - A Python list containing booleans representing the + presence of a batch norm layer at the beginning of a given block. + strides - A Python list containing integers representing the stride of + the residual function for each block. + num_channels_init_block - An integer representing the number of channels + for the convolutional layer in the initial block. + dimension - A string (either "2d" or "3d") that decides if the RevNet is + 2-dimensional or 3-dimensional. + reuse: Whether to reuse the default variable scope. + + Returns: + [batch_size, hidden_dim] pre-logits tensor from the bottleneck RevNet. + """ + training = hparams.mode == tf_estimator.ModeKeys.TRAIN + with tf.variable_scope('RevNet', reuse=reuse): + x1, x2 = init(inputs, + num_channels=hparams.num_channels_init_block, + dim=hparams.dim, + kernel_size=hparams.init_kernel_size, + maxpool=hparams.init_maxpool, + stride=hparams.init_stride, + training=training) + for block_num in range(len(hparams.num_layers_per_block)): + block = {'depth': hparams.num_channels[block_num], + 'num_layers': hparams.num_layers_per_block[block_num], + 'first_batch_norm': hparams.first_batch_norm[block_num], + 'stride': hparams.strides[block_num], + 'bottleneck': hparams.bottleneck} + x1, x2 = unit(x1, x2, block_num, dim=hparams.dim, training=training, + **block) + pre_logits = final_block(x1, x2, dim=hparams.dim, training=training) + return pre_logits + + +@registry.register_model +class Revnet(t2t_model.T2TModel): + + def body(self, features): + return revnet(features['inputs'], self.hparams) + + +def revnet_base(): + """Default hparams for Revnet.""" + hparams = common_hparams.basic_params1() + hparams.add_hparam('num_channels', [64, 128, 256, 416]) + hparams.add_hparam('num_layers_per_block', [1, 1, 10, 1]) + hparams.add_hparam('bottleneck', True) + hparams.add_hparam('first_batch_norm', [False, True, True, True]) + hparams.add_hparam('init_stride', 2) + hparams.add_hparam('init_kernel_size', 7) + hparams.add_hparam('init_maxpool', True) + hparams.add_hparam('strides', [1, 2, 2, 2]) + hparams.add_hparam('num_channels_init_block', 64) + hparams.add_hparam('dim', '2d') + + # Variable init + hparams.initializer = 'normal_unit_scaling' + hparams.initializer_gain = 2. + + # Optimization + hparams.optimizer = 'Momentum' + hparams.optimizer_momentum_momentum = 0.9 + hparams.optimizer_momentum_nesterov = True + hparams.weight_decay = 1e-4 + hparams.clip_grad_norm = 0.0 + # (base_lr=0.1) * (batch_size=128*8 (on TPU, or 8 GPUs)=1024) / (256.) + hparams.learning_rate = 0.4 + hparams.learning_rate_decay_scheme = 'cosine' + # For image_imagenet224, 120k training steps, which effectively makes this a + # cosine decay (i.e. no cycles). + hparams.learning_rate_cosine_cycle_steps = 120000 + + # Can run with a batch size of 128 with Problem ImageImagenet224 + hparams.batch_size = 128 + return hparams + + +@registry.register_hparams +def revnet_104(): + return revnet_base() + + +def revnet_cifar_base(): + """Tiny hparams suitable for CIFAR/etc.""" + hparams = revnet_base() + hparams.num_channels_init_block = 32 + hparams.first_batch_norm = [False, True, True] + hparams.init_stride = 1 + hparams.init_kernel_size = 3 + hparams.init_maxpool = False + hparams.strides = [1, 2, 2] + hparams.batch_size = 128 + hparams.weight_decay = 1e-4 + + hparams.learning_rate = 0.1 + hparams.learning_rate_cosine_cycle_steps = 5000 + return hparams + + +@registry.register_hparams +def revnet_38_cifar(): + hparams = revnet_cifar_base() + hparams.bottleneck = False + hparams.num_channels = [16, 32, 56] + hparams.num_layers_per_block = [2, 2, 2] + hparams.initializer = 'normal_unit_scaling' + hparams.initializer_gain = 1.5 + return hparams + + +@registry.register_hparams +def revnet_110_cifar(): + """Tiny hparams suitable for CIFAR/etc.""" + hparams = revnet_cifar_base() + hparams.bottleneck = False + hparams.num_channels = [16, 32, 64] + hparams.num_layers_per_block = [8, 8, 8] + return hparams + + +@registry.register_hparams +def revnet_164_cifar(): + """Tiny hparams suitable for CIFAR/etc.""" + hparams = revnet_cifar_base() + hparams.bottleneck = True + hparams.num_channels = [16, 32, 64] + hparams.num_layers_per_block = [8, 8, 8] + return hparams + + +@registry.register_ranged_hparams +def revnet_range(rhp): + """Hyperparameters for tuning revnet.""" + rhp.set_float('learning_rate', 0.05, 0.2, scale=rhp.LOG_SCALE) + rhp.set_float('weight_decay', 1e-5, 1e-3, scale=rhp.LOG_SCALE) + rhp.set_discrete('num_channels_init_block', [64, 128]) + return rhp diff --git a/tensor2tensor/models/revnet_test.py b/tensor2tensor/models/revnet_test.py new file mode 100644 index 000000000..234752514 --- /dev/null +++ b/tensor2tensor/models/revnet_test.py @@ -0,0 +1,116 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for Revnet.""" + +from tensor2tensor.models import revnet +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class RevnetTest(tf.test.TestCase): + + def testH(self): + rev_block_input = tf.random_uniform([1, 299, 299, 3]) + rev_block_output = revnet.downsample_bottleneck(rev_block_input, 256) + self.assertEqual(rev_block_output.get_shape().as_list(), [1, 299, 299, 256]) + + def testHStride(self): + rev_block_input = tf.random_uniform([2, 299, 299, 256]) + rev_block_output = revnet.downsample_bottleneck( + rev_block_input, 512, stride=2, scope='HStride') + self.assertEqual(rev_block_output.get_shape().as_list(), [2, 150, 150, 512]) + + def testInit(self): + images = tf.random_uniform([1, 299, 299, 3]) + x1, x2 = revnet.init(images, 32) + self.assertEqual(x1.get_shape().as_list(), [1, 74, 74, 16]) + self.assertEqual(x2.get_shape().as_list(), [1, 74, 74, 16]) + + def testInit3D(self): + images = tf.random_uniform([1, 299, 299, 299, 3]) + x1, x2 = revnet.init(images, 32, dim='3d', scope='init3d') + self.assertEqual(x1.get_shape().as_list(), [1, 74, 74, 74, 16]) + self.assertEqual(x2.get_shape().as_list(), [1, 74, 74, 74, 16]) + + def testUnit1(self): + x1 = tf.random_uniform([4, 74, 74, 256]) + x2 = tf.random_uniform([4, 74, 74, 256]) + x1, x2 = revnet.unit(x1, x2, block_num=1, depth=64, + first_batch_norm=True, num_layers=1) + self.assertEqual(x1.get_shape().as_list(), [4, 74, 74, 256]) + self.assertEqual(x2.get_shape().as_list(), [4, 74, 74, 256]) + + def testUnit2(self): + x1 = tf.random_uniform([4, 74, 74, 256]) + x2 = tf.random_uniform([4, 74, 74, 256]) + x1, x2 = revnet.unit(x1, x2, block_num=2, depth=128, + num_layers=1, stride=2) + self.assertEqual(x1.get_shape().as_list(), [4, 37, 37, 512]) + self.assertEqual(x2.get_shape().as_list(), [4, 37, 37, 512]) + + def testUnit3(self): + x1 = tf.random_uniform([1, 37, 37, 512]) + x2 = tf.random_uniform([1, 37, 37, 512]) + x1, x2 = revnet.unit(x1, x2, block_num=3, depth=256, + num_layers=10, stride=2) + self.assertEqual(x1.get_shape().as_list(), [1, 19, 19, 1024]) + self.assertEqual(x2.get_shape().as_list(), [1, 19, 19, 1024]) + + def testUnit4(self): + x1 = tf.random_uniform([1, 19, 19, 1024]) + x2 = tf.random_uniform([1, 19, 19, 1024]) + x1, x2 = revnet.unit(x1, x2, block_num=4, depth=416, + num_layers=1, stride=2) + self.assertEqual(x1.get_shape().as_list(), [1, 10, 10, 1664]) + self.assertEqual(x2.get_shape().as_list(), [1, 10, 10, 1664]) + + def testUnit3D(self): + x1 = tf.random_uniform([4, 74, 74, 74, 256]) + x2 = tf.random_uniform([4, 74, 74, 74, 256]) + x1, x2 = revnet.unit(x1, x2, block_num=5, depth=128, + num_layers=1, dim='3d', stride=2) + self.assertEqual(x1.get_shape().as_list(), [4, 37, 37, 37, 512]) + self.assertEqual(x2.get_shape().as_list(), [4, 37, 37, 37, 512]) + + def testFinalBlock(self): + x1 = tf.random_uniform([5, 10, 10, 1024]) + x2 = tf.random_uniform([5, 10, 10, 1024]) + logits = revnet.final_block(x1, x2) + self.assertEqual(logits.shape, [5, 1, 1, 2048]) + + def testFinalBlock3D(self): + x1 = tf.random_uniform([5, 10, 10, 10, 1024]) + x2 = tf.random_uniform([5, 10, 10, 10, 1024]) + logits = revnet.final_block(x1, x2, dim='3d', scope='FinalBlock3D') + self.assertEqual(logits.shape, [5, 1, 1, 1, 2048]) + + def testEndToEnd(self): + images = tf.random_uniform([1, 299, 299, 3]) + hparams = revnet.revnet_base() + hparams.mode = tf_estimator.ModeKeys.TRAIN + logits = revnet.revnet(images, hparams) + self.assertEqual(logits.shape, [1, 1, 1, 3328]) + + def testEndToEnd3D(self): + images = tf.random_uniform([1, 299, 299, 299, 3]) + hparams = revnet.revnet_base() + hparams.dim = '3d' + hparams.mode = tf_estimator.ModeKeys.TRAIN + logits = revnet.revnet(images, hparams) + self.assertEqual(logits.shape, [1, 1, 1, 1, 3328]) + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/models/shake_shake.py b/tensor2tensor/models/shake_shake.py new file mode 100644 index 000000000..378f86c97 --- /dev/null +++ b/tensor2tensor/models/shake_shake.py @@ -0,0 +1,224 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shake-shake model for CIFAR.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import hparam +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +def shake_shake_skip_connection(x, output_filters, stride, is_training): + """Adds a residual connection to the filter x for the shake-shake model.""" + curr_filters = common_layers.shape_list(x)[-1] + if curr_filters == output_filters: + return x + stride_spec = [1, stride, stride, 1] + # Skip path 1. + path1 = tf.nn.avg_pool(x, [1, 1, 1, 1], stride_spec, "VALID") + path1 = tf.layers.conv2d( + path1, int(output_filters / 2), (1, 1), padding="SAME", name="path1_conv") + + # Skip path 2. + pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]] # First pad with 0's then crop. + path2 = tf.pad(x, pad_arr)[:, 1:, 1:, :] + path2 = tf.nn.avg_pool(path2, [1, 1, 1, 1], stride_spec, "VALID") + path2 = tf.layers.conv2d( + path2, int(output_filters / 2), (1, 1), padding="SAME", name="path2_conv") + + # Concat and apply BN. + final_path = tf.concat(values=[path1, path2], axis=-1) + final_path = tf.layers.batch_normalization( + final_path, training=is_training, name="final_path_bn") + return final_path + + +def shake_shake_branch(x, output_filters, stride, rand_forward, rand_backward, + hparams): + """Building a 2 branching convnet.""" + is_training = hparams.mode == tf_estimator.ModeKeys.TRAIN + x = tf.nn.relu(x) + x = tf.layers.conv2d( + x, + output_filters, (3, 3), + strides=(stride, stride), + padding="SAME", + name="conv1") + x = tf.layers.batch_normalization(x, training=is_training, name="bn1") + x = tf.nn.relu(x) + x = tf.layers.conv2d(x, output_filters, (3, 3), padding="SAME", name="conv2") + x = tf.layers.batch_normalization(x, training=is_training, name="bn2") + if is_training: + x = x * rand_backward + tf.stop_gradient(x * rand_forward - + x * rand_backward) + else: + x *= 1.0 / hparams.shake_shake_num_branches + return x + + +def shake_shake_block(x, output_filters, stride, hparams): + """Builds a full shake-shake sub layer.""" + is_training = hparams.mode == tf_estimator.ModeKeys.TRAIN + batch_size = common_layers.shape_list(x)[0] + + # Generate random numbers for scaling the branches. + rand_forward = [ + tf.random_uniform( + [batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32) + for _ in range(hparams.shake_shake_num_branches) + ] + rand_backward = [ + tf.random_uniform( + [batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32) + for _ in range(hparams.shake_shake_num_branches) + ] + # Normalize so that all sum to 1. + total_forward = tf.add_n(rand_forward) + total_backward = tf.add_n(rand_backward) + rand_forward = [samp / total_forward for samp in rand_forward] + rand_backward = [samp / total_backward for samp in rand_backward] + zipped_rand = zip(rand_forward, rand_backward) + + branches = [] + for branch, (r_forward, r_backward) in enumerate(zipped_rand): + with tf.variable_scope("branch_{}".format(branch)): + b = shake_shake_branch(x, output_filters, stride, r_forward, r_backward, + hparams) + b = tf.nn.dropout(b, 1.0 - hparams.layer_prepostprocess_dropout) + branches.append(b) + res = shake_shake_skip_connection(x, output_filters, stride, is_training) + if hparams.shake_shake_concat: + concat_values = [res] + branches + concat_output = tf.concat(values=concat_values, axis=-1) + concat_output = tf.nn.relu(concat_output) + concat_output = tf.layers.conv2d( + concat_output, output_filters, (1, 1), name="concat_1x1") + concat_output = tf.layers.batch_normalization( + concat_output, training=is_training, name="concat_bn") + return concat_output + else: + return res + tf.add_n(branches) + + +def shake_shake_layer(x, output_filters, num_blocks, stride, hparams): + """Builds many sub layers into one full layer.""" + for block_num in range(num_blocks): + curr_stride = stride if (block_num == 0) else 1 + with tf.variable_scope("layer_{}".format(block_num)): + x = shake_shake_block(x, output_filters, curr_stride, hparams) + return x + + +@registry.register_model +class ShakeShake(t2t_model.T2TModel): + """Implements the Shake-Shake architecture. + + From + This is intended to match the CIFAR-10 version, and correspond to + "Shake-Shake-Batch" in Table 1. + """ + + def body(self, features): + hparams = self._hparams + is_training = hparams.mode == tf_estimator.ModeKeys.TRAIN + inputs = features["inputs"] + assert (hparams.num_hidden_layers - 2) % 6 == 0 + assert hparams.hidden_size % 16 == 0 + k = hparams.hidden_size // 16 + n = (hparams.num_hidden_layers - 2) // 6 + x = inputs + + x = tf.layers.conv2d(x, 16, (3, 3), padding="SAME", name="init_conv") + x = tf.layers.batch_normalization(x, training=is_training, name="init_bn") + with tf.variable_scope("L1"): + x = shake_shake_layer(x, 16 * k, n, 1, hparams) + with tf.variable_scope("L2"): + x = shake_shake_layer(x, 32 * k, n, 2, hparams) + with tf.variable_scope("L3"): + x = shake_shake_layer(x, 64 * k, n, 2, hparams) + x = tf.nn.relu(x) + + # Global avg on [1, 2] (we're nhwc) and dense to num_classes done by top. + return x + + +@registry.register_hparams +def shakeshake_small(): + """Parameters for CIFAR-10. Gets to about 96% accuracy@700K steps, 1 GPU.""" + hparams = common_hparams.basic_params1() + hparams.batch_size = 128 + hparams.hidden_size = 32 + hparams.layer_prepostprocess_dropout = 0.0 + hparams.dropout = 0 + hparams.label_smoothing = 0.0 + hparams.clip_grad_norm = 0.0 # No clipping for now, one can also try 2.0. + hparams.num_hidden_layers = 26 + hparams.learning_rate_decay_scheme = "cosine" + # Model should be run for 700000 steps with batch size 128 (~1800 epochs) + hparams.learning_rate_cosine_cycle_steps = 700000 + hparams.learning_rate = 0.2 + hparams.learning_rate_warmup_steps = 100 # That's basically unused. + hparams.initializer = "uniform_unit_scaling" + hparams.initializer_gain = 1.0 + hparams.weight_decay = 1e-4 + hparams.optimizer = "Momentum" + hparams.optimizer_momentum_momentum = 0.9 + hparams.add_hparam("shake_shake_num_branches", 2) + hparams.add_hparam("shake_shake_concat", int(False)) + return hparams + + +@registry.register_hparams +def shake_shake_quick(): + hparams = shakeshake_small() + hparams.optimizer = "adam" + hparams.learning_rate_cosine_cycle_steps = 1000 + hparams.learning_rate = 0.5 + hparams.batch_size = 100 + return hparams + + +@registry.register_hparams +def shakeshake_big(): + hparams = shakeshake_small() + hparams.layer_prepostprocess_dropout = 0.0 + hparams.hidden_size = 96 + return hparams + + +@registry.register_hparams +def shakeshake_tpu(): + hparams = shakeshake_big() + hparams.learning_rate_cosine_cycle_steps = 180000 + hparams.learning_rate = 0.6 + return hparams + + +@registry.register_attack_params +def shake_shake_fgsm(): + aparams = hparam.HParams() + aparams.attack = "fgsm" + aparams.attack_epsilons = [(i+1) * 0.1 for i in range(12)] + aparams.add_hparam("clip_min", 0.0) + aparams.add_hparam("clip_max", 255.0) + return aparams diff --git a/tensor2tensor/models/slicenet.py b/tensor2tensor/models/slicenet.py index a7e2623cc..e20786f31 100644 --- a/tensor2tensor/models/slicenet.py +++ b/tensor2tensor/models/slicenet.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,38 +17,21 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function - -# Dependency imports - -from six.moves import xrange # pylint: disable=redefined-builtin +from six.moves import range # pylint: disable=redefined-builtin from six.moves import zip # pylint: disable=redefined-builtin -from tensor2tensor.models import common_attention -from tensor2tensor.models import common_hparams -from tensor2tensor.models import common_layers +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import modalities from tensor2tensor.utils import registry from tensor2tensor.utils import t2t_model -import tensorflow as tf - +import tensorflow.compat.v1 as tf -def get_norm(hparams): - """Get the normalizer function.""" - if hparams.normalizer_fn == "layer": - return lambda x, name: common_layers.layer_norm( # pylint: disable=g-long-lambda - x, hparams.hidden_size, name=name) - if hparams.normalizer_fn == "batch": - return tf.layers.batch_normalization - if hparams.normalizer_fn == "noam": - return common_layers.noam_norm - if hparams.normalizer_fn == "none": - return lambda x, name: x - raise ValueError("Parameter normalizer_fn must be one of: 'layer', 'batch'," - "'noam', 'none'.") - -def attention(targets_shifted, inputs_encoded, norm_fn, hparams, train, - bias=None): +# pylint: disable=unused-argument +def attention(targets_shifted, inputs_encoded, norm_fn, hparams, bias=None): """Complete attention layer with preprocessing.""" separabilities = [hparams.separability, hparams.separability] if hparams.separability < 0: @@ -63,15 +47,17 @@ def attention(targets_shifted, inputs_encoded, norm_fn, hparams, train, targets_timed = tf.squeeze(targets_timed, 2) target_shape = tf.shape(targets_timed) targets_segment = tf.zeros([target_shape[0], target_shape[1]]) - target_attention_bias = common_attention.attention_bias( - targets_segment, targets_segment, lower_triangular=True) + target_attention_bias = common_attention.attention_bias_lower_triangle( + target_shape[1]) + inputs_encoded = common_layers.flatten4d3d(inputs_encoded) + # TODO(jbaccash): use input bias parameter. This code seems to assume fixed + # size inputs. inputs_attention_bias = tf.zeros([ tf.shape(inputs_encoded)[0], hparams.num_heads, tf.shape(targets_segment)[1], tf.shape(inputs_encoded)[1] ]) - attention_dropout = hparams.attention_dropout * tf.to_float(train) qv = common_attention.multihead_attention( targets_timed, None, @@ -80,9 +66,8 @@ def attention(targets_shifted, inputs_encoded, norm_fn, hparams, train, hparams.hidden_size, hparams.hidden_size, hparams.num_heads, - attention_dropout, - name="self_attention", - summaries=False) + hparams.attention_dropout, + name="self_attention") qv = common_attention.multihead_attention( qv, inputs_encoded, @@ -91,18 +76,14 @@ def attention(targets_shifted, inputs_encoded, norm_fn, hparams, train, hparams.hidden_size, hparams.hidden_size, hparams.num_heads, - attention_dropout, - name="encdec_attention", - summaries=False) + hparams.attention_dropout, + name="encdec_attention") return tf.expand_dims(qv, 2) - elif hparams.attention_type == "simple": - targets_with_attention = common_layers.simple_attention( - targets_timed, inputs_encoded, bias=bias, summaries=False) - return norm_fn(targets_shifted + targets_with_attention, name="attn_norm") + else: + raise ValueError("Unsupported attention_type: %s" % hparams.attention_type) -def multi_conv_res(x, padding, name, layers, hparams, train, - mask=None, source=None): +def multi_conv_res(x, padding, name, layers, hparams, mask=None, source=None): """A stack of separable convolution blocks with residual connections.""" with tf.variable_scope(name): padding_bias = None @@ -130,8 +111,13 @@ def multi_conv_res(x, padding, name, layers, hparams, train, hparams.separability - i for i in reversed(range(len(dilations_and_kernels2))) ] - norm_fn = get_norm(hparams) - for layer in xrange(layers): + + def norm_fn(x, name): + with tf.variable_scope(name, default_name="norm"): + return common_layers.apply_norm( + x, hparams.norm_type, hparams.hidden_size, hparams.norm_epsilon) + + for layer in range(layers): with tf.variable_scope("layer_%d" % layer): y = common_layers.subseparable_conv_block( x, @@ -152,10 +138,10 @@ def multi_conv_res(x, padding, name, layers, hparams, train, separabilities=separabilities2, name="residual2") + y if source is not None and hparams.attention_type != "none": - x += attention(x, source, norm_fn, hparams, train, bias=padding_bias) + x += attention(x, source, norm_fn, hparams, bias=padding_bias) if mask is not None: x *= mask - return tf.nn.dropout(x, 1.0 - hparams.dropout * tf.to_float(train)) + return tf.nn.dropout(x, 1.0 - hparams.dropout) def rank_loss(sentence_emb, image_emb, margin=0.2): @@ -188,38 +174,31 @@ def similarity_cost(inputs_encoded, targets_encoded): return rank_loss(x, y) -def slicenet_middle(inputs_encoded, targets, target_space_emb, mask, - hparams, train): +def slicenet_middle(inputs_encoded, targets, target_space_emb, mask, hparams): """Middle part of slicenet, connecting encoder and decoder.""" - norm_fn = get_norm(hparams) + + def norm_fn(x, name): + with tf.variable_scope(name, default_name="norm"): + return common_layers.apply_norm(x, hparams.norm_type, hparams.hidden_size, + hparams.norm_epsilon) # Flatten targets and embed target_space_id. targets_flat = tf.expand_dims(common_layers.flatten4d3d(targets), axis=2) target_space_emb = tf.tile(target_space_emb, [tf.shape(targets_flat)[0], 1, 1, 1]) - # Calculate similarity loss (but don't run if not needed). - if len(hparams.problems) > 1 and hparams.sim_loss_mult > 0.00001: - targets_timed = common_layers.add_timing_signal(targets_flat) - extra_layers = int(hparams.num_hidden_layers * 1.5) - with tf.variable_scope(tf.get_variable_scope(), reuse=True): - targets_encoded = multi_conv_res(targets_timed, "SAME", "encoder", - extra_layers, hparams, train) - with tf.variable_scope("similarity_loss"): - similarity_loss = similarity_cost(inputs_encoded, targets_encoded) - similarity_loss *= hparams.sim_loss_mult - else: - similarity_loss = 0.0 - # Use attention from each target to look at input and retrieve. - targets_shifted = common_layers.shift_left( + targets_shifted = common_layers.shift_right( targets_flat, pad_value=target_space_emb) if hparams.attention_type == "none": targets_with_attention = tf.zeros_like(targets_shifted) else: inputs_padding_bias = (1.0 - mask) * -1e9 # Bias to not attend to padding. targets_with_attention = attention( - targets_shifted, inputs_encoded, norm_fn, hparams, train, + targets_shifted, + inputs_encoded, + norm_fn, + hparams, bias=inputs_padding_bias) # Positional targets: merge attention and raw. @@ -232,7 +211,7 @@ def slicenet_middle(inputs_encoded, targets, target_space_emb, mask, separability=4, name="targets_merge") - return targets_merged, similarity_loss + return targets_merged, 0.0 def embed_target_space(target_space_id, hidden_size): @@ -247,25 +226,31 @@ def embedding_to_padding(emb): return tf.to_float(tf.equal(emb_sum, 0.0)) -def slicenet_internal(inputs, targets, target_space, - problem_idx, hparams, train): +def slicenet_internal(inputs, targets, target_space, hparams, run_decoder=True): """The slicenet model, main step used for training.""" with tf.variable_scope("slicenet"): + # Project to hidden size if necessary + if inputs.get_shape().as_list()[-1] != hparams.hidden_size: + inputs = common_layers.conv_block( + inputs, + hparams.hidden_size, [((1, 1), (3, 3))], + first_relu=False, + padding="SAME", + force2d=True) + # Flatten inputs and encode. inputs = tf.expand_dims(common_layers.flatten4d3d(inputs), axis=2) inputs_mask = 1.0 - embedding_to_padding(inputs) inputs = common_layers.add_timing_signal(inputs) # Add position info. target_space_emb = embed_target_space(target_space, hparams.hidden_size) extra_layers = int(hparams.num_hidden_layers * 1.5) - inputs_encoded = multi_conv_res(inputs, "SAME", "encoder", extra_layers, - hparams, train, mask=inputs_mask) - target_modality_name = hparams.problems[problem_idx].target_modality.name - if "class_label_modality" in target_modality_name: - # If we're just predicing a class, there is no use for a decoder. + inputs_encoded = multi_conv_res( + inputs, "SAME", "encoder", extra_layers, hparams, mask=inputs_mask) + if not run_decoder: return inputs_encoded # Do the middle part. decoder_start, similarity_loss = slicenet_middle( - inputs_encoded, targets, target_space_emb, inputs_mask, hparams, train) + inputs_encoded, targets, target_space_emb, inputs_mask, hparams) # Decode. decoder_final = multi_conv_res( decoder_start, @@ -273,7 +258,6 @@ def slicenet_internal(inputs, targets, target_space, "decoder", hparams.num_hidden_layers, hparams, - train, mask=inputs_mask, source=inputs_encoded) return decoder_final, tf.reduce_mean(similarity_loss) @@ -282,10 +266,17 @@ def slicenet_internal(inputs, targets, target_space, @registry.register_model class SliceNet(t2t_model.T2TModel): - def model_fn_body(self, features, train): - return slicenet_internal(features["inputs"], features["targets"], - features["target_space_id"], self._problem_idx, - self._hparams, train) + def body(self, features): + target_modality = self._problem_hparams.modality["targets"] + # If we're just predicting a class, there is no use for a decoder. + run_decoder = target_modality != modalities.ModalityType.CLASS_LABEL + return slicenet_internal( + features["inputs"], + features["targets"], + features["target_space_id"], + self._hparams, + run_decoder=run_decoder) + _KERNEL_SCHEMES = { "3.3.3.3": [(3, 1), (3, 1), (3, 1), (3, 1)], @@ -303,7 +294,7 @@ def model_fn_body(self, features, train): } -@registry.register_hparams("slicenet1") +@registry.register_hparams("slicenet_1") def slicenet_params1(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() @@ -316,8 +307,8 @@ def slicenet_params1(): hparams.num_hidden_layers = 4 hparams.kernel_height = 3 hparams.kernel_width = 1 - hparams.add_hparam("normalizer_fn", "layer") # New ones are added like this. - hparams.learning_rate_decay_scheme = "exp50k" + hparams.norm_type = "layer" + hparams.learning_rate_decay_scheme = "exp" hparams.learning_rate = 0.05 hparams.learning_rate_warmup_steps = 3000 hparams.initializer_gain = 1.0 @@ -327,29 +318,25 @@ def slicenet_params1(): hparams.optimizer_adam_epsilon = 1e-6 hparams.optimizer_adam_beta1 = 0.85 hparams.optimizer_adam_beta2 = 0.997 - hparams.add_hparam("large_kernel_size", 15) + hparams.add_hparam("large_kernel_size", 15) # New ones are added like this. hparams.add_hparam("separability", -2) # A dilation scheme, one of _DILATION_SCHEMES. hparams.add_hparam("dilation_scheme", "1.1.1.1") # A kernel scheme, one of _KERNEL_SCHEMES; overrides large_kernel_size. hparams.add_hparam("kernel_scheme", "3.7.15.31") hparams.add_hparam("audio_compression", 8) - hparams.add_hparam("moe_n1", 32) - hparams.add_hparam("moe_n2", 0) - hparams.add_hparam("moe_loss_coef", 1e-2) - hparams.add_hparam("imagenet_use_2d", int(True)) # attention-related flags - hparams.add_hparam("attention_type", "simple") + hparams.add_hparam("attention_type", "transformer") hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) hparams.add_hparam("sim_loss_mult", 0.0) # Try 10.0 for experiments. hparams.add_hparam("attention_dropout", 0.2) - hparams.shared_embedding_and_softmax_weights = int(True) + hparams.shared_embedding_and_softmax_weights = True return hparams -@registry.register_hparams("slicenet1noam") +@registry.register_hparams("slicenet_1noam") def slicenet_params1_noam(): """Version with Noam's decay scheme.""" hparams = slicenet_params1() @@ -363,15 +350,13 @@ def slicenet_params1_noam(): return hparams -@registry.register_hparams("slicenet1tiny") +@registry.register_hparams("slicenet_1tiny") def slicenet_params1_tiny(): """Version for fast local runs.""" hparams = slicenet_params1() - hparams.attention_type = "simple" hparams.separability = 0 hparams.hidden_size = 128 hparams.num_hidden_layers = 2 - hparams.moe_n1 = 2 hparams.batch_size = 512 hparams.learning_rate_warmup_steps = 200 return hparams @@ -381,10 +366,6 @@ def slicenet_params1_tiny(): def slicenet_range1(ranged_hparams): """Small range of hyperparameters.""" rhp = ranged_hparams - - hparams = slicenet_params1() - common_hparams.fill_ranged_hparams_from_hparams(hparams, rhp) - rhp.set_float("clip_grad_norm", 1.0, 10.0, scale=rhp.LOG_SCALE) rhp.set_float("learning_rate", 0.02, 1.0, scale=rhp.LOG_SCALE) rhp.set_float("optimizer_adam_beta2", 0.995, 0.998) diff --git a/tensor2tensor/models/slicenet_test.py b/tensor2tensor/models/slicenet_test.py index bbeb3a284..944a78234 100644 --- a/tensor2tensor/models/slicenet_test.py +++ b/tensor2tensor/models/slicenet_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,38 +18,62 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function - -# Dependency imports - import numpy as np -from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.data_generators import cifar # pylint: disable=unused-import +from tensor2tensor.data_generators import mscoco # pylint: disable=unused-import +from tensor2tensor.layers import modalities # pylint: disable=unused-import from tensor2tensor.models import slicenet +from tensor2tensor.utils import registry -import tensorflow as tf +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator class SliceNetTest(tf.test.TestCase): def testSliceNet(self): - x = np.random.random_integers(0, high=255, size=(3, 5, 4, 3)) - y = np.random.random_integers(0, high=9, size=(3, 5, 1, 1)) + x = np.random.randint(256, size=(3, 5, 5, 3)) + y = np.random.randint(10, size=(3, 5, 1, 1)) hparams = slicenet.slicenet_params1_tiny() - p_hparams = problem_hparams.image_cifar10(hparams) - hparams.problems = [p_hparams] + hparams.add_hparam("data_dir", "") + problem = registry.problem("image_cifar10") + p_hparams = problem.get_hparams(hparams) + hparams.problem_hparams = p_hparams with self.test_session() as session: features = { "inputs": tf.constant(x, dtype=tf.int32), "targets": tf.constant(y, dtype=tf.int32), "target_space_id": tf.constant(1, dtype=tf.int32), } - model = slicenet.SliceNet(hparams, p_hparams) - sharded_logits, _, _ = model.model_fn(features, True) - logits = tf.concat(sharded_logits, 0) + model = slicenet.SliceNet(hparams, tf_estimator.ModeKeys.TRAIN, + p_hparams) + logits, _ = model(features) session.run(tf.global_variables_initializer()) res = session.run(logits) self.assertEqual(res.shape, (3, 1, 1, 1, 10)) + def testSliceNetImageToText(self): + x = np.random.randint(256, size=(3, 5, 5, 3)) + y = np.random.randint(10, size=(3, 5, 1, 1)) + hparams = slicenet.slicenet_params1_tiny() + hparams.add_hparam("data_dir", "") + problem = registry.problem("image_ms_coco_characters") + p_hparams = problem.get_hparams(hparams) + hparams.problem_hparams = p_hparams + with self.test_session() as session: + features = { + "inputs": tf.constant(x, dtype=tf.int32), + "targets": tf.constant(y, dtype=tf.int32), + "target_space_id": tf.constant(1, dtype=tf.int32), + } + model = slicenet.SliceNet(hparams, tf_estimator.ModeKeys.TRAIN, + p_hparams) + logits, _ = model(features) + session.run(tf.global_variables_initializer()) + res = session.run(logits) + self.assertEqual(res.shape, (3, 5, 1, 1, 258)) + if __name__ == "__main__": tf.test.main() diff --git a/tensor2tensor/models/text_cnn.py b/tensor2tensor/models/text_cnn.py new file mode 100644 index 000000000..ee6434d3e --- /dev/null +++ b/tensor2tensor/models/text_cnn.py @@ -0,0 +1,112 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""TextCNN (see Convolutional Neural Networks for Sentence Classification).""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf + + +@registry.register_model +class TextCNN(t2t_model.T2TModel): + """Text CNN.""" + + def body(self, features): + """TextCNN main model_fn. + + Args: + features: Map of features to the model. Should contain the following: + "inputs": Text inputs. + [batch_size, input_length, 1, hidden_dim]. + "targets": Target encoder outputs. + [batch_size, 1, 1, hidden_dim] + Returns: + Final encoder representation. [batch_size, 1, 1, hidden_dim] + """ + hparams = self._hparams + inputs = features["inputs"] + + xshape = common_layers.shape_list(inputs) + + vocab_size = xshape[3] + inputs = tf.reshape(inputs, [xshape[0], xshape[1], xshape[3], xshape[2]]) + + pooled_outputs = [] + for _, filter_size in enumerate(hparams.filter_sizes): + with tf.name_scope("conv-maxpool-%s" % filter_size): + filter_shape = [filter_size, vocab_size, 1, hparams.num_filters] + filter_var = tf.Variable( + tf.truncated_normal(filter_shape, stddev=0.1), name="W") + filter_bias = tf.Variable( + tf.constant(0.1, shape=[hparams.num_filters]), name="b") + conv = tf.nn.conv2d( + inputs, + filter_var, + strides=[1, 1, 1, 1], + padding="VALID", + name="conv") + conv_outputs = tf.nn.relu( + tf.nn.bias_add(conv, filter_bias), name="relu") + pooled = tf.math.reduce_max( + conv_outputs, axis=1, keepdims=True, name="max") + pooled_outputs.append(pooled) + + num_filters_total = hparams.num_filters * len(hparams.filter_sizes) + h_pool = tf.concat(pooled_outputs, 3) + h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total]) + + # Add dropout + output = tf.nn.dropout(h_pool_flat, 1 - hparams.output_dropout) + output = tf.reshape(output, [-1, 1, 1, num_filters_total]) + + return output + + +@registry.register_hparams +def text_cnn_base(): + """Set of hyperparameters.""" + hparams = common_hparams.basic_params1() + hparams.batch_size = 4096 + hparams.max_length = 256 + hparams.clip_grad_norm = 0. # i.e. no gradient clipping + hparams.optimizer_adam_epsilon = 1e-9 + hparams.learning_rate_schedule = "legacy" + hparams.learning_rate_decay_scheme = "noam" + hparams.learning_rate = 0.1 + hparams.learning_rate_warmup_steps = 4000 + hparams.initializer_gain = 1.0 + hparams.num_hidden_layers = 6 + hparams.initializer = "uniform_unit_scaling" + hparams.weight_decay = 0.0 + hparams.optimizer_adam_beta1 = 0.9 + hparams.optimizer_adam_beta2 = 0.98 + hparams.num_sampled_classes = 0 + hparams.label_smoothing = 0.1 + hparams.shared_embedding_and_softmax_weights = True + hparams.symbol_modality_num_shards = 16 + + # Add new ones like this. + hparams.add_hparam("filter_sizes", [2, 3, 4, 5]) + hparams.add_hparam("num_filters", 128) + hparams.add_hparam("output_dropout", 0.4) + return hparams diff --git a/tensor2tensor/models/transformer.py b/tensor2tensor/models/transformer.py index 379210d67..2bc8f33d1 100644 --- a/tensor2tensor/models/transformer.py +++ b/tensor2tensor/models/transformer.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,245 +13,1772 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""transformer (attention). +"""Transformer model from "Attention Is All You Need". -encoder: [Self-Attention, Feed-forward] x n -decoder: [Self-Attention, Source-Target-Attention, Feed-forward] x n +The Transformer model consists of an encoder and a decoder. Both are stacks +of self-attention layers followed by feed-forward layers. This model yields +good results on a number of problems, especially in NLP and machine translation. +See "Attention Is All You Need" (https://arxiv.org/abs/1706.03762) for the full +description of the model and the results obtained with its early version. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function +from six.moves import range # pylint: disable=redefined-builtin + +from tensor2tensor.data_generators import librispeech +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import modalities +from tensor2tensor.layers import transformer_layers +from tensor2tensor.layers import transformer_memory +from tensor2tensor.utils import beam_search +from tensor2tensor.utils import expert_utils +from tensor2tensor.utils import mlperf_log +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model -import copy +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator -# Dependency imports +# pylint: disable=g-direct-tensorflow-import +from tensorflow.python.ops import inplace_ops +from tensorflow.python.util import nest +# pylint: enable=g-direct-tensorflow-import -from six.moves import xrange # pylint: disable=redefined-builtin +# Alias some commonly reused layers, here and elsewhere. +transformer_prepare_encoder = transformer_layers.transformer_prepare_encoder +transformer_encoder = transformer_layers.transformer_encoder +transformer_ffn_layer = transformer_layers.transformer_ffn_layer -from tensor2tensor.models import common_attention -from tensor2tensor.models import common_hparams -from tensor2tensor.models import common_layers -from tensor2tensor.utils import registry -from tensor2tensor.utils import t2t_model -import tensorflow as tf +def transformer_encode(encoder_function, inputs, target_space, hparams, + attention_weights=None, features=None, losses=None, + prepare_encoder_fn=None, **kwargs): + """Encode transformer inputs. + + Args: + encoder_function: the encoder function + inputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which + will be flattened along the two spatial dimensions. + target_space: scalar, target space ID. + hparams: hyperparameters for model. + attention_weights: weight to store attention to. + features: optionally pass the entire features dictionary as well. This is + needed now for "packed" datasets. + losses: optional list onto which to append extra training losses + prepare_encoder_fn: optional, alternative to transformer_prepare_encoder. + **kwargs: additional arguments to pass to encoder_function + + Returns: + Tuple of: + encoder_output: Encoder representation. + [batch_size, input_length, hidden_dim] + encoder_decoder_attention_bias: Bias and mask weights for + encoder-decoder attention. [batch_size, input_length] + """ + inputs = common_layers.flatten4d3d(inputs) + + if not prepare_encoder_fn: + prepare_encoder_fn = transformer_prepare_encoder + encoder_input, self_attention_bias, encoder_decoder_attention_bias = ( + prepare_encoder_fn( + inputs, target_space, hparams, features=features)) + + mlperf_log.transformer_print( + key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT, + value=hparams.layer_prepostprocess_dropout, + hparams=hparams) + + encoder_input = tf.nn.dropout(encoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + + attn_bias_for_padding = None + # Otherwise the encoder will just use encoder_self_attention_bias. + if hparams.unidirectional_encoder: + attn_bias_for_padding = encoder_decoder_attention_bias + + encoder_output = encoder_function( + encoder_input, + self_attention_bias, + hparams, + nonpadding=features_to_nonpadding(features, "inputs"), + save_weights_to=attention_weights, + make_image_summary=not common_layers.is_xla_compiled(), + losses=losses, + attn_bias_for_padding=attn_bias_for_padding, + **kwargs) + + return encoder_output, encoder_decoder_attention_bias + + +def transformer_decode(decoder_function, + decoder_input, + encoder_output, + encoder_decoder_attention_bias, + decoder_self_attention_bias, + hparams, + attention_weights=None, + cache=None, + decode_loop_step=None, + nonpadding=None, + losses=None, + **kwargs): + """Decode Transformer outputs from encoder representation. + + Args: + decoder_function: the decoder function + decoder_input: inputs to bottom of the model. [batch_size, decoder_length, + hidden_dim] + encoder_output: Encoder representation. [batch_size, input_length, + hidden_dim] + encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder + attention. [batch_size, input_length] + decoder_self_attention_bias: Bias and mask weights for decoder + self-attention. [batch_size, decoder_length] + hparams: hyperparameters for model. + attention_weights: weight to store attention to. + cache: dict, containing tensors which are the results of previous + attentions, used for fast decoding. + decode_loop_step: An integer, step number of the decoding loop. Only used + for inference on TPU. + nonpadding: optional Tensor with shape [batch_size, decoder_length] + losses: optional list onto which to append extra training losses + **kwargs: additional arguments to pass to decoder_function + + Returns: + Final decoder representation. [batch_size, decoder_length, hidden_dim] + """ + mlperf_log.transformer_print( + key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT, + value=hparams.layer_prepostprocess_dropout, + hparams=hparams) + decoder_input = tf.nn.dropout(decoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + + decoder_output = decoder_function( + decoder_input, + encoder_output, + decoder_self_attention_bias, + encoder_decoder_attention_bias, + hparams, + cache=cache, + decode_loop_step=decode_loop_step, + nonpadding=nonpadding, + save_weights_to=attention_weights, + losses=losses, + **kwargs) + + if (common_layers.is_xla_compiled() and + hparams.mode == tf_estimator.ModeKeys.TRAIN): + # TPU does not react kindly to extra dimensions. + # TODO(noam): remove this once TPU is more forgiving of extra dims. + return decoder_output + else: + # Expand since t2t expects 4d tensors. + return tf.expand_dims(decoder_output, axis=2) @registry.register_model class Transformer(t2t_model.T2TModel): """Attention net. See file docstring.""" - def model_fn_body(self, features, train): - # Remove dropout if not training - hparams = copy.copy(self._hparams) - if not train: - hparams.attention_dropout = 0. - hparams.relu_dropout = 0. - hparams.residual_dropout = 0. + def __init__(self, *args, **kwargs): + super(Transformer, self).__init__(*args, **kwargs) + self.attention_weights = {} # For visualizing attention heads. + self.recurrent_memory_by_layer = None # Override to enable recurrent memory + self._encoder_function = transformer_encoder + self._decoder_function = transformer_decoder + self._init_cache_fn = _init_transformer_cache + self._prepare_encoder_fn = transformer_prepare_encoder + self._prepare_decoder_fn = transformer_prepare_decoder + + def encode(self, inputs, target_space, hparams, features=None, losses=None): + """Encode transformer inputs, see transformer_encode.""" + return transformer_encode( + self._encoder_function, inputs, target_space, hparams, + attention_weights=self.attention_weights, + features=features, losses=losses, + prepare_encoder_fn=self._prepare_encoder_fn) + + def decode(self, + decoder_input, + encoder_output, + encoder_decoder_attention_bias, + decoder_self_attention_bias, + hparams, + cache=None, + decode_loop_step=None, + nonpadding=None, + losses=None, + **kwargs): + """Decode Transformer outputs, see transformer_decode.""" + return transformer_decode( + self._decoder_function, decoder_input, encoder_output, + encoder_decoder_attention_bias, decoder_self_attention_bias, + hparams, attention_weights=self.attention_weights, cache=cache, + decode_loop_step=decode_loop_step, nonpadding=nonpadding, losses=losses, + **kwargs) + + def body(self, features): + """Transformer main model_fn. + + Args: + features: Map of features to the model. Should contain the following: + "inputs": Transformer inputs. [batch_size, input_length, 1, + hidden_dim]. + "targets": Target decoder outputs. [batch_size, decoder_length, 1, + hidden_dim] + "target_space_id": A scalar int from data_generators.problem.SpaceID. + + Returns: + Final decoder representation. [batch_size, decoder_length, hidden_dim] + """ + hparams = self._hparams + + losses = [] + + if self.has_input: + inputs = self._prepare_inputs_for_body(features) + target_space = features["target_space_id"] + encoder_output, encoder_decoder_attention_bias = self.encode( + inputs, target_space, hparams, features=features, losses=losses) + else: + encoder_output, encoder_decoder_attention_bias = (None, None) + targets = features["targets"] - inputs = features.get("inputs") - target_space = features.get("target_space_id") + targets_shape = common_layers.shape_list(targets) + targets = common_layers.flatten4d3d(targets) + decoder_input, decoder_self_attention_bias = self._prepare_decoder_fn( + targets, hparams, features=features) + + # Not all subclasses of Transformer support keyword arguments related to + # recurrent memory, so only pass these arguments if memory is enabled. + decode_kwargs = {} + if self.recurrent_memory_by_layer is not None: + # TODO(kitaev): The chunk_number feature currently has the same shape as + # "targets", but this is only for the purposes of sharing sharding code. + # In fact every token within an example must have the same chunk number. + chunk_number_each_token = tf.squeeze(features["chunk_number"], (-1, -2)) + chunk_number_each_example = chunk_number_each_token[:, 0] + # Uncomment the code below to verify that tokens within a batch share the + # same chunk number: + # with tf.control_dependencies([ + # tf.assert_equal(chunk_number_each_token, + # chunk_number_each_example[:, None]) + # ]): + # chunk_number_each_example = tf.identity(chunk_number_each_example) + decode_kwargs = dict( + recurrent_memory_by_layer=self.recurrent_memory_by_layer, + chunk_number=chunk_number_each_example, + ) + decoder_output = self.decode( + decoder_input, + encoder_output, + encoder_decoder_attention_bias, + decoder_self_attention_bias, + hparams, + nonpadding=features_to_nonpadding(features, "targets"), + losses=losses, + **decode_kwargs + ) + expected_attentions = features.get("expected_attentions") + if expected_attentions is not None: + attention_loss = common_attention.encoder_decoder_attention_loss( + expected_attentions, self.attention_weights, + hparams.expected_attention_loss_type, + hparams.expected_attention_loss_multiplier) + return decoder_output, {"attention_loss": attention_loss} + + ret = tf.reshape(decoder_output, targets_shape) + if losses: + return ret, {"extra_loss": tf.add_n(losses)} + else: + return ret + + def _prepare_inputs_for_body(self, features): + """Prepare inputs for body. + + Args: + features: Map of string to model features. Should contain + "inputs": Transformer inputs. [batch_size, input_length, 1, + hidden_dim]. + + Returns: + Inputs which will be passed to the model. [batch_size, input_length, 1, + hidden_dim] + """ + return features["inputs"] + + def _greedy_infer(self, features, decode_length, use_tpu=False): + """Fast version of greedy decoding. + + Args: + features: an map of string to `Tensor` + decode_length: an integer. How many additional timesteps to decode. + use_tpu: A bool. Whether to build the inference graph for TPU. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } + + Raises: + NotImplementedError: If there are multiple data shards. + """ + # For real-valued modalities use the slow decode path for now. + if (self._target_modality_is_real or + self._hparams.self_attention_type != "dot_product"): + return super(Transformer, self)._greedy_infer(features, decode_length) + with tf.variable_scope(self.name): + if use_tpu: + return self._fast_decode_tpu(features, decode_length) + return self._fast_decode(features, decode_length) + + def _beam_decode(self, + features, + decode_length, + beam_size, + top_beams, + alpha, + use_tpu=False): + """Beam search decoding. + + Args: + features: an map of string to `Tensor` + decode_length: an integer. How many additional timesteps to decode. + beam_size: number of beams. + top_beams: an integer. How many of the beams to return. + alpha: Float that controls the length penalty. larger the alpha, stronger + the preference for longer translations. + use_tpu: A bool, whether to do beam decode on TPU. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } + """ + if (self._hparams.self_attention_type not in [ + "dot_product", "dot_product_relative" + ]): + # Caching is not guaranteed to work with attention types other than + # dot_product and dot_product_relative. + return self._beam_decode_slow(features, decode_length, beam_size, + top_beams, alpha, use_tpu) + with tf.variable_scope(self.name): + if use_tpu: + return self._fast_decode_tpu(features, decode_length, beam_size, + top_beams, alpha) + return self._fast_decode(features, decode_length, beam_size, top_beams, + alpha) + + def _prepare_inputs_for_decode(self, features): + """Prepare inputs for decoding. + + Args: + features: A map of string to model features. + + Returns: + Inputs after fixing shape and applying modality. + """ + dp = self._data_parallelism + hparams = self._hparams + inputs = features["inputs"] + # TODO(llion): Clean up this reshaping logic. + inputs = tf.expand_dims(inputs, axis=1) + if len(inputs.shape) < 5: + inputs = tf.expand_dims(inputs, axis=4) + s = common_layers.shape_list(inputs) + inputs = tf.reshape(inputs, [s[0] * s[1], s[2], s[3], s[4]]) + # _shard_features called to ensure that the variable names match + inputs = self._shard_features({"inputs": inputs})["inputs"] + input_modality = self._problem_hparams.modality["inputs"] + input_vocab_size = self._problem_hparams.vocab_size["inputs"] + if input_vocab_size is not None and hasattr(hparams, "vocab_divisor"): + input_vocab_size += (-input_vocab_size) % hparams.vocab_divisor + modality_name = hparams.name.get("inputs", + modalities.get_name(input_modality))( + hparams, input_vocab_size) + with tf.variable_scope(modality_name): + bottom = hparams.bottom.get("inputs", + modalities.get_bottom(input_modality)) + inputs = dp(bottom, inputs, hparams, input_vocab_size) + return inputs + + def _fast_decode_tpu(self, + features, + decode_length, + beam_size=1, + top_beams=1, + alpha=1.0): + """Fast decoding. + + Implements both greedy and beam search decoding on TPU, uses beam search + iff beam_size > 1, otherwise beam search related arguments are ignored. + + Args: + features: A map of string to model features. + decode_length: An integer, how many additional timesteps to decode. + beam_size: An integer, number of beams. + top_beams: An integer, how many of the beams to return. + alpha: A float that controls the length penalty. Larger the alpha, + stronger the preference for longer translations. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + }. + + Raises: + NotImplementedError: If there are multiple data shards. + """ + if self._num_datashards != 1: + raise NotImplementedError("Fast decoding only supports a single shard.") + if "targets_segmentation" in features: + raise NotImplementedError( + "Decoding not supported on packed datasets " + " If you want to decode from a dataset, use the non-packed version" + " of the dataset when decoding.") + dp = self._data_parallelism + hparams = self._hparams + target_modality = self._problem_hparams.modality["targets"] + target_vocab_size = self._problem_hparams.vocab_size["targets"] + if target_vocab_size is not None and hasattr(hparams, "vocab_divisor"): + target_vocab_size += (-target_vocab_size) % hparams.vocab_divisor + + if self.has_input: + inputs_shape = common_layers.shape_list(features["inputs"]) + if (target_modality == modalities.ModalityType.CLASS_LABEL or + self._problem_hparams.get("regression_targets")): + decode_length = 1 + else: + decode_length = ( + inputs_shape[1] + features.get("decode_length", decode_length)) + batch_size = inputs_shape[0] + inputs = self._prepare_inputs_for_decode(features) + with tf.variable_scope("body"): + encoder_output, encoder_decoder_attention_bias = dp( + self.encode, + inputs, + features["target_space_id"], + hparams, + features=features) + encoder_output = encoder_output[0] + encoder_decoder_attention_bias = encoder_decoder_attention_bias[0] + partial_targets = None + else: + # The problem has no inputs. + encoder_output = None + encoder_decoder_attention_bias = None + + # Prepare partial targets. + # In either features["inputs"] or features["targets"]. + # We force the outputs to begin with these sequences. + partial_targets = features.get("inputs") + if partial_targets is None: + partial_targets = features["targets"] + assert partial_targets is not None + partial_targets = common_layers.expand_squeeze_to_nd(partial_targets, 2) + partial_targets = tf.to_int64(partial_targets) + partial_targets_shape = common_layers.shape_list(partial_targets) + partial_targets_length = partial_targets_shape[1] + decode_length = ( + partial_targets_length + features.get("decode_length", decode_length)) + batch_size = partial_targets_shape[0] + + if hparams.pos == "timing": + positional_encoding = common_attention.get_timing_signal_1d( + decode_length + 1, hparams.hidden_size) + elif hparams.pos == "timing_from_features": + positional_encoding = common_attention.add_timing_signals_from_features( + tf.zeros([1, decode_length + 1, hparams.hidden_size]), features, + hparams.position_features) + elif hparams.pos == "emb": + positional_encoding = common_attention.add_positional_embedding( + tf.zeros([1, decode_length + 1, hparams.hidden_size]), + hparams.max_length, "body/targets_positional_embedding", None) + else: + positional_encoding = None + + def preprocess_targets(targets, i): + """Performs preprocessing steps on the targets to prepare for the decoder. + + This includes: + - Embedding the ids. + - Flattening to 3D tensor. + - Optionally adding timing signals. + + Args: + targets: A tensor, inputs ids to the decoder. [batch_size, 1]. + i: An integer, Step number of the decoding loop. + + Returns: + A tensor, processed targets [batch_size, 1, hidden_dim]. + """ + # _shard_features called to ensure that the variable names match + targets = self._shard_features({"targets": targets})["targets"] + modality_name = hparams.name.get( + "targets", + modalities.get_name(target_modality))(hparams, target_vocab_size) + with tf.variable_scope(modality_name): + bottom = hparams.bottom.get( + "targets", modalities.get_targets_bottom(target_modality)) + targets = dp(bottom, targets, hparams, target_vocab_size)[0] + targets = common_layers.flatten4d3d(targets) + + # GO embeddings are all zero, this is because transformer_prepare_decoder + # Shifts the targets along by one for the input which pads with zeros. + # If the modality already maps GO to the zero embeddings this is not + # needed. + targets = tf.cond( + tf.equal(i, 0), lambda: tf.zeros_like(targets), lambda: targets) + + if positional_encoding is not None: + positional_encoding_shape = positional_encoding.shape.as_list() + targets += tf.slice( + positional_encoding, [0, i, 0], + [positional_encoding_shape[0], 1, positional_encoding_shape[2]]) + return targets + + decoder_self_attention_bias = ( + common_attention.attention_bias_lower_triangle(decode_length)) + if hparams.proximity_bias: + decoder_self_attention_bias += common_attention.attention_bias_proximal( + decode_length) + + def symbols_to_logits_tpu_fn(ids, i, cache): + """Go from ids to logits for next symbol on TPU. + + Args: + ids: A tensor, symbol IDs. + i: An integer, step number of the decoding loop. Only used for inference + on TPU. + cache: A dict, containing tensors which are the results of previous + attentions, used for fast decoding. + + Returns: + ret: A tensor, computed logits. + cache: A dict, containing tensors which are the results of previous + attentions, used for fast decoding. + """ + ids = ids[:, -1:] + targets = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3) + targets = preprocess_targets(targets, i) + + bias_shape = decoder_self_attention_bias.shape.as_list() + bias = tf.slice(decoder_self_attention_bias, [0, 0, i, 0], + [bias_shape[0], bias_shape[1], 1, bias_shape[3]]) + + with tf.variable_scope("body"): + body_outputs = dp( + self.decode, + targets, + cache.get("encoder_output"), + cache.get("encoder_decoder_attention_bias"), + bias, + hparams, + cache, + i, + nonpadding=features_to_nonpadding(features, "targets")) + modality_name = hparams.name.get( + "targets", + modalities.get_name(target_modality))(hparams, target_vocab_size) + with tf.variable_scope(modality_name): + top = hparams.top.get("targets", + modalities.get_top(target_modality)) + logits = dp(top, body_outputs, None, hparams, target_vocab_size)[0] + + ret = tf.squeeze(logits, axis=[1, 2, 3]) + if partial_targets is not None: + # If the position is within the given partial targets, we alter the + # logits to always return those values. + # A faster approach would be to process the partial targets in one + # iteration in order to fill the corresponding parts of the cache. + # This would require broader changes, though. + vocab_size = tf.shape(ret)[1] + + def forced_logits(): + return tf.one_hot( + tf.tile( + tf.slice(partial_targets, [0, i], + [partial_targets.shape.as_list()[0], 1]), + [beam_size]), vocab_size, 0.0, -1e9) + + ret = tf.cond( + tf.less(i, partial_targets_length), forced_logits, lambda: ret) + return ret, cache + + eos_id = self.get_decode_end_id() or beam_search.EOS_ID + temperature = features.get("sampling_temp", + getattr(hparams, "sampling_temp", 0.0)) + top_k = features.get("sampling_keep_top_k", + getattr(hparams, "sampling_keep_top_k", -1)) + + ret = fast_decode_tpu( + encoder_output=encoder_output, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + symbols_to_logits_fn=symbols_to_logits_tpu_fn, + hparams=hparams, + decode_length=decode_length, + vocab_size=target_vocab_size, + init_cache_fn=self._init_cache_fn, + beam_size=beam_size, + top_beams=top_beams, + alpha=alpha, + batch_size=batch_size, + force_decode_length=self._decode_hparams.force_decode_length, + eos_id=eos_id, + sampling_temperature=temperature, + top_k=top_k) + if partial_targets is not None: + if beam_size <= 1 or top_beams <= 1: + ret["outputs"] = ret["outputs"][:, partial_targets_length:] + else: + ret["outputs"] = ret["outputs"][:, :, partial_targets_length:] + return ret + + def get_decode_start_id(self): + """Returns the id of the first decoder input symbol. + + The default case maps None to a vector of 0's for transformer. This method + can be overridden to return a different id by a model wanting to use a + different decoder start symbol. The id returned by this method is used to + index the embedding matrix, and retrieve the vector that will be used as the + first input to the decoder + """ + return None + + def get_decode_end_id(self): + """Returns the id of the output symbol that terminates decoding. + + This method can be overridden by a different model. The id returned by this + method is used to check if the generation is complete during decoding. + """ + return None + + def _fast_decode(self, + features, + decode_length, + beam_size=1, + top_beams=1, + alpha=1.0, + preprocess_targets_method=None): + """Fast decoding. + + Implements both greedy and beam search decoding, uses beam search iff + beam_size > 1, otherwise beam search related arguments are ignored. + + Args: + features: a map of string to model features. + decode_length: an integer. How many additional timesteps to decode. + beam_size: number of beams. + top_beams: an integer. How many of the beams to return. + alpha: Float that controls the length penalty. larger the alpha, stronger + the preference for longer translations. + preprocess_targets_method: method used to preprocess targets. If None, + uses method "preprocess_targets" defined inside this method. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } + + Raises: + NotImplementedError: If there are multiple data shards. + """ + if self._num_datashards != 1: + raise NotImplementedError("Fast decoding only supports a single shard.") + dp = self._data_parallelism + hparams = self._hparams + target_modality = self._problem_hparams.modality["targets"] + target_vocab_size = self._problem_hparams.vocab_size["targets"] + if target_vocab_size is not None and hasattr(hparams, "vocab_divisor"): + target_vocab_size += (-target_vocab_size) % hparams.vocab_divisor + if "targets_segmentation" in features: + raise NotImplementedError( + "Decoding not supported on packed datasets " + " If you want to decode from a dataset, use the non-packed version" + " of the dataset when decoding.") + if self.has_input: + inputs_shape = common_layers.shape_list(features["inputs"]) + if (target_modality == modalities.ModalityType.CLASS_LABEL or + self._problem_hparams.get("regression_targets")): + decode_length = 1 + else: + decode_length = ( + inputs_shape[1] + features.get("decode_length", decode_length)) + batch_size = inputs_shape[0] + inputs = self._prepare_inputs_for_decode(features) + with tf.variable_scope("body"): + encoder_output, encoder_decoder_attention_bias = dp( + self.encode, + inputs, + features["target_space_id"], + hparams, + features=features) + encoder_output = encoder_output[0] + encoder_decoder_attention_bias = encoder_decoder_attention_bias[0] + partial_targets = features.get("partial_targets") + else: + # The problem has no inputs. + encoder_output = None + encoder_decoder_attention_bias = None + + # Prepare partial targets. + # In either features["inputs"] or features["targets"]. + # We force the outputs to begin with these sequences. + partial_targets = features.get("inputs") + if partial_targets is None: + partial_targets = features["targets"] + assert partial_targets is not None + + if partial_targets is not None: + partial_targets = common_layers.expand_squeeze_to_nd(partial_targets, 2) + partial_targets = tf.to_int64(partial_targets) + partial_targets_shape = common_layers.shape_list(partial_targets) + partial_targets_length = partial_targets_shape[1] + decode_length = ( + partial_targets_length + features.get("decode_length", decode_length)) + batch_size = partial_targets_shape[0] + + if hparams.pos == "timing": + positional_encoding = common_attention.get_timing_signal_1d( + decode_length + 1, hparams.hidden_size) + elif hparams.pos == "timing_from_features": + positional_encoding = common_attention.add_timing_signals_from_features( + tf.zeros([1, decode_length, hparams.hidden_size]), features, + hparams.position_features) + elif hparams.pos == "emb": + positional_encoding = common_attention.add_positional_embedding( + tf.zeros([1, decode_length, hparams.hidden_size]), hparams.max_length, + "body/targets_positional_embedding", None) + else: + positional_encoding = None + + def preprocess_targets(targets, i): + """Performs preprocessing steps on the targets to prepare for the decoder. + + This includes: + - Embedding the ids. + - Flattening to 3D tensor. + - Optionally adding timing signals. + + Args: + targets: inputs ids to the decoder. [batch_size, 1] + i: scalar, Step number of the decoding loop. + + Returns: + Processed targets [batch_size, 1, hidden_dim] + """ + # _shard_features called to ensure that the variable names match + targets = self._shard_features({"targets": targets})["targets"] + modality_name = hparams.name.get( + "targets", + modalities.get_name(target_modality))(hparams, target_vocab_size) + with tf.variable_scope(modality_name): + bottom = hparams.bottom.get( + "targets", modalities.get_targets_bottom(target_modality)) + targets = dp(bottom, targets, hparams, target_vocab_size)[0] + targets = common_layers.flatten4d3d(targets) + + # GO embeddings are all zero, this is because transformer_prepare_decoder + # Shifts the targets along by one for the input which pads with zeros. + # If the modality already maps GO to the zero embeddings this is not + # needed. + if not self.get_decode_start_id(): + targets = tf.cond( + tf.equal(i, 0), lambda: tf.zeros_like(targets), lambda: targets) + + if positional_encoding is not None: + targets += positional_encoding[:, i:i + 1] + return targets + + decoder_self_attention_bias = ( + common_attention.attention_bias_lower_triangle(decode_length)) + if hparams.proximity_bias: + decoder_self_attention_bias += common_attention.attention_bias_proximal( + decode_length) + + # Create tensors for encoder-decoder attention history + att_cache = {"attention_history": {}} + num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers + if encoder_output is not None: + att_batch_size, enc_seq_length = common_layers.shape_list( + encoder_output)[0:2] + for layer in range(num_layers): + att_cache["attention_history"]["layer_%d" % layer] = tf.zeros( + [att_batch_size, hparams.num_heads, 0, enc_seq_length]) + + def update_decoder_attention_history(cache): + """Save attention weights in cache, e.g., for vizualization.""" + for k in [x for x in self.attention_weights + if "decoder" in x and "self" not in x and "logits" not in x]: + idx = k.find("layer_") + if idx < 0: + continue + # Get layer number from the string name. + layer_nbr = k[idx + 6:] + idx = 0 + while idx + 1 < len(layer_nbr) and layer_nbr[:idx + 1].isdigit(): + idx += 1 + layer_nbr = "layer_%d" % int(layer_nbr[:idx]) + if layer_nbr in cache["attention_history"]: + cache["attention_history"][layer_nbr] = tf.concat( + [cache["attention_history"][layer_nbr], + self.attention_weights[k]], + axis=2) + if not preprocess_targets_method: + preprocess_targets_method = preprocess_targets + + def symbols_to_logits_fn(ids, i, cache): + """Go from ids to logits for next symbol.""" + ids = ids[:, -1:] + targets = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3) + targets = preprocess_targets_method(targets, i) + + bias = decoder_self_attention_bias[:, :, i:i + 1, :i + 1] + with tf.variable_scope("body"): + body_outputs = dp( + self.decode, + targets, + cache.get("encoder_output"), + cache.get("encoder_decoder_attention_bias"), + bias, + hparams, + cache, + nonpadding=features_to_nonpadding(features, "targets")) + + update_decoder_attention_history(cache) + + modality_name = hparams.name.get( + "targets", + modalities.get_name(target_modality))(hparams, target_vocab_size) + with tf.variable_scope(modality_name): + top = hparams.top.get("targets", modalities.get_top(target_modality)) + logits = dp(top, body_outputs, None, hparams, target_vocab_size)[0] + + ret = tf.squeeze(logits, axis=[1, 2, 3]) + if partial_targets is not None: + # If the position is within the given partial targets, we alter the + # logits to always return those values. + # A faster approach would be to process the partial targets in one + # iteration in order to fill the corresponding parts of the cache. + # This would require broader changes, though. + vocab_size = tf.shape(ret)[1] + + def forced_logits(): + return tf.one_hot( + tf.tile(partial_targets[:, i], [beam_size]), vocab_size, 0.0, + -1e9) + + ret = tf.cond( + tf.less(i, partial_targets_length), forced_logits, lambda: ret) + return ret, cache + + sos_id = self.get_decode_start_id() or 0 + eos_id = self.get_decode_end_id() or beam_search.EOS_ID + temperature = features.get("sampling_temp", + getattr(hparams, "sampling_temp", 0.0)) + top_k = features.get("sampling_keep_top_k", + getattr(hparams, "sampling_keep_top_k", -1)) + + ret = fast_decode( + encoder_output=encoder_output, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + symbols_to_logits_fn=symbols_to_logits_fn, + hparams=hparams, + decode_length=decode_length, + vocab_size=target_vocab_size, + init_cache_fn=self._init_cache_fn, + beam_size=beam_size, + top_beams=top_beams, + alpha=alpha, + batch_size=batch_size, + force_decode_length=self._decode_hparams.force_decode_length, + sos_id=sos_id, + eos_id=eos_id, + sampling_temperature=temperature, + top_k=top_k, + cache=att_cache) + if partial_targets is not None: + if beam_size <= 1 or top_beams <= 1: + ret["outputs"] = ret["outputs"][:, partial_targets_length:] + else: + ret["outputs"] = ret["outputs"][:, :, partial_targets_length:] + return ret + + +def _init_transformer_cache(cache, hparams, batch_size, attention_init_length, + encoder_output, encoder_decoder_attention_bias, + scope_prefix): + """Create the initial cache for Transformer fast decoding.""" + key_channels = hparams.attention_key_channels or hparams.hidden_size + value_channels = hparams.attention_value_channels or hparams.hidden_size + num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers + vars_3d_num_heads = ( + hparams.num_heads if hparams.get("attention_variables_3d") else 0) + + if cache is None: + cache = {} + cache.update({ + "layer_%d" % layer: { # pylint: disable=g-complex-comprehension + "k": + common_attention.split_heads( + tf.zeros([batch_size, + attention_init_length, + key_channels]), hparams.num_heads), + "v": + common_attention.split_heads( + tf.zeros([batch_size, + attention_init_length, + value_channels]), hparams.num_heads), + } for layer in range(num_layers) + }) + + # If `ffn_layer` is in `["dense_relu_dense" or "conv_hidden_relu"]`, then the + # cache key "f" won't be used, which means that the` shape of cache["f"]` + # won't be changed to + # `[beamsize*batch_size, decode_length, hparams.hidden_size]` and may cause + # error when applying `nest.map reshape function` on it. + if hparams.ffn_layer not in ["dense_relu_dense", "conv_hidden_relu"]: + for layer in range(num_layers): + cache["layer_%d" % layer]["f"] = tf.zeros( + [batch_size, 0, hparams.hidden_size]) + + if encoder_output is not None: + for layer in range(num_layers): + layer_name = "layer_%d" % layer + with tf.variable_scope( + "%sdecoder/%s/encdec_attention/multihead_attention" % + (scope_prefix, layer_name)): + k_encdec = common_attention.compute_attention_component( + encoder_output, + key_channels, + name="k", + vars_3d_num_heads=vars_3d_num_heads) + k_encdec = common_attention.split_heads(k_encdec, hparams.num_heads) + v_encdec = common_attention.compute_attention_component( + encoder_output, + value_channels, + name="v", + vars_3d_num_heads=vars_3d_num_heads) + v_encdec = common_attention.split_heads(v_encdec, hparams.num_heads) + cache[layer_name]["k_encdec"] = k_encdec + cache[layer_name]["v_encdec"] = v_encdec + + cache["encoder_output"] = encoder_output + cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias + return cache + + +def fast_decode_tpu(encoder_output, + encoder_decoder_attention_bias, + symbols_to_logits_fn, + hparams, + decode_length, + vocab_size, + init_cache_fn=_init_transformer_cache, + beam_size=1, + top_beams=1, + alpha=1.0, + sos_id=0, + eos_id=beam_search.EOS_ID, + batch_size=None, + force_decode_length=False, + scope_prefix="body/", + use_top_k_with_unique=True, + sampling_temperature=0.0, + top_k=-1): + """Given encoder output and a symbols to logits function, does fast decoding. + + Implements both greedy and beam search decoding for TPU, uses beam search iff + beam_size > 1, otherwise beam search related arguments are ignored. - inputs = tf.squeeze(inputs, 2) - targets = tf.squeeze(targets, 2) + Args: + encoder_output: A tensor, output from encoder. + encoder_decoder_attention_bias: A tensor, bias for use in encoder-decoder + attention. + symbols_to_logits_fn: Incremental decoding, function mapping triple `(ids, + step, cache)` to symbol logits. + hparams: Run hyperparameters. + decode_length: An integer, how many additional timesteps to decode. + vocab_size: Output vocabulary size. + init_cache_fn: Function that returns the initial cache dict. + beam_size: An integer, number of beams. + top_beams: An integer, how many of the beams to return. + alpha: A float that controls the length penalty. Larger the alpha, stronger + the preference for longer translations. + sos_id: Start-of-sequence symbol. + eos_id: End-of-sequence symbol. + batch_size: An integer, must be passed if there is no input. + force_decode_length: A bool, whether to force the full decode length, or if + False, stop when all beams hit eos_id. + scope_prefix: str, prefix for decoder layer variable scopes. + use_top_k_with_unique: bool, whether to use a fast (but decreased precision) + top_k during beam search. + sampling_temperature: scalar, temperature with which to sample. + top_k: scalar, sample only top k. - (encoder_input, encoder_attention_bias, _) = (transformer_prepare_encoder( - inputs, target_space, hparams)) - (decoder_input, decoder_self_attention_bias) = transformer_prepare_decoder( - targets, hparams) + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if top_beams == 1 or + [batch_size, top_beams, <= decode_length] otherwise + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + }. + + Raises: + NotImplementedError: If beam size > 1 with partial targets. + """ + if encoder_output is not None: + batch_size = common_layers.shape_list(encoder_output)[0] + + cache = init_cache_fn(None, hparams, batch_size, decode_length, + encoder_output, encoder_decoder_attention_bias, + scope_prefix) + + mlperf_log.transformer_print( + key=mlperf_log.MODEL_HP_SEQ_BEAM_SEARCH, + value={ + "vocab_size": vocab_size, + "batch_size": batch_size, + "beam_size": beam_size, + "alpha": alpha, + "max_decode_length": decode_length + }, + hparams=hparams) + if beam_size > 1: # Beam Search + initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32) + decoded_ids, scores, _ = beam_search.beam_search( + symbols_to_logits_fn, + initial_ids, + beam_size, + decode_length, + vocab_size, + alpha, + states=cache, + eos_id=eos_id, + stop_early=(top_beams == 1), + use_tpu=True, + use_top_k_with_unique=use_top_k_with_unique) + + if top_beams == 1: + decoded_ids = decoded_ids[:, 0, 1:] + scores = scores[:, 0] + else: + decoded_ids = decoded_ids[:, :top_beams, 1:] + scores = scores[:, :top_beams] + else: # Greedy + + def inner_loop(i, hit_eos, next_id, decoded_ids, cache, log_prob): + """One step of greedy decoding.""" + logits, cache = symbols_to_logits_fn(next_id, i, cache) + log_probs = common_layers.log_prob_from_logits(logits) + temperature = sampling_temperature + if hparams.sampling_method == "random_per_example": + next_id = common_layers.sample_temperature_per_example( + logits, temperature, top_k) + else: + if hparams.sampling_method == "argmax": + temperature = 0.0 + next_id = common_layers.sample_with_temperature(logits, temperature, + top_k) + + log_prob_indices = tf.stack([tf.range(tf.to_int64(batch_size)), next_id], + axis=1) + log_prob += tf.gather_nd( + log_probs, log_prob_indices) * (1 - tf.to_float(hit_eos)) + # Note(thangluong): we purposely update hit_eos after aggregating log_prob + # There is a subtle detail here that we want to include log_probs up to + # (and inclusive of) the first eos generated, but not subsequent tokens. + hit_eos |= tf.equal(next_id, eos_id) + + next_id = tf.expand_dims(next_id, axis=1) + decoded_ids = tf.transpose(decoded_ids) + decoded_ids = inplace_ops.alias_inplace_update( + decoded_ids, i, tf.squeeze(next_id, axis=1)) + decoded_ids = tf.transpose(decoded_ids) + return i + 1, hit_eos, next_id, decoded_ids, cache, log_prob + + def is_not_finished(i, hit_eos, *_): + finished = i >= decode_length + if not force_decode_length: + finished |= tf.reduce_all(hit_eos) + return tf.logical_not(finished) + + decoded_ids = tf.zeros([batch_size, decode_length], dtype=tf.int64) + hit_eos = tf.fill([batch_size], False) + next_id = sos_id * tf.ones([batch_size, 1], dtype=tf.int64) + initial_log_prob = tf.zeros([batch_size], dtype=tf.float32) + + def compute_cache_shape_invariants(tensor): + return tf.TensorShape(tensor.shape.as_list()) + + _, _, _, decoded_ids, _, log_prob = tf.while_loop( + is_not_finished, + inner_loop, [ + tf.constant(0), hit_eos, next_id, decoded_ids, cache, + initial_log_prob + ], + shape_invariants=[ + tf.TensorShape([]), + tf.TensorShape([batch_size]), + tf.TensorShape([batch_size, 1]), + tf.TensorShape([batch_size, decode_length]), + nest.map_structure(compute_cache_shape_invariants, cache), + tf.TensorShape([batch_size]), + ]) + scores = log_prob + + return {"outputs": decoded_ids, "scores": scores} + + +def fast_decode(encoder_output, + encoder_decoder_attention_bias, + symbols_to_logits_fn, + hparams, + decode_length, + vocab_size, + init_cache_fn=_init_transformer_cache, + beam_size=1, + top_beams=1, + alpha=1.0, + sos_id=0, + eos_id=beam_search.EOS_ID, + batch_size=None, + force_decode_length=False, + scope_prefix="body/", + sampling_temperature=0.0, + top_k=-1, + cache=None): + """Given encoder output and a symbols to logits function, does fast decoding. + + Implements both greedy and beam search decoding, uses beam search iff + beam_size > 1, otherwise beam search related arguments are ignored. - def residual_fn(x, y): - return common_layers.layer_norm(x + tf.nn.dropout( - y, 1.0 - hparams.residual_dropout)) + Args: + encoder_output: Output from encoder. + encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder + attention + symbols_to_logits_fn: Incremental decoding; function mapping triple `(ids, + step, cache)` to symbol logits. + hparams: run hyperparameters + decode_length: an integer. How many additional timesteps to decode. + vocab_size: Output vocabulary size. + init_cache_fn: Function that returns the initial cache dict. + beam_size: number of beams. + top_beams: an integer. How many of the beams to return. + alpha: Float that controls the length penalty. larger the alpha, stronger + the preference for longer translations. + sos_id: End-of-sequence symbol in beam search. + eos_id: End-of-sequence symbol in beam search. + batch_size: an integer scalar - must be passed if there is no input + force_decode_length: bool, whether to force the full decode length, or if + False, stop when all beams hit eos_id. + scope_prefix: str, prefix for decoder layer variable scopes. + sampling_temperature: scalar, temperature with which to sample. + top_k: scalar, sample only top k. + cache: cache dictionary for additional predictions. - # encoder_input = tf.squeeze(encoder_input, 2) - # decoder_input = tf.squeeze(decoder_input, 2) - encoder_input = tf.nn.dropout(encoder_input, 1.0 - hparams.residual_dropout) - decoder_input = tf.nn.dropout(decoder_input, 1.0 - hparams.residual_dropout) - encoder_output = transformer_encoder(encoder_input, residual_fn, - encoder_attention_bias, hparams) + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if top_beams == 1 or + [batch_size, top_beams, <= decode_length] otherwise + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } + """ + if encoder_output is not None: + batch_size = common_layers.shape_list(encoder_output)[0] + + cache = init_cache_fn( + cache=cache, + hparams=hparams, + batch_size=batch_size, + attention_init_length=0, + encoder_output=encoder_output, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + scope_prefix=scope_prefix) + + if beam_size > 1: # Beam Search + initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32) + decoded_ids, scores, cache = beam_search.beam_search( + symbols_to_logits_fn, + initial_ids, + beam_size, + decode_length, + vocab_size, + alpha, + states=cache, + eos_id=eos_id, + stop_early=(top_beams == 1)) + + if top_beams == 1: + decoded_ids = decoded_ids[:, 0, 1:] + scores = scores[:, 0] + else: + decoded_ids = decoded_ids[:, :top_beams, 1:] + scores = scores[:, :top_beams] + else: # Greedy + + def inner_loop(i, hit_eos, next_id, decoded_ids, cache, log_prob): + """One step of greedy decoding.""" + logits, cache = symbols_to_logits_fn(next_id, i, cache) + log_probs = common_layers.log_prob_from_logits(logits) + temperature = sampling_temperature + if hparams.sampling_method == "random_per_example": + next_id = common_layers.sample_temperature_per_example( + logits, temperature, top_k) + else: + if hparams.sampling_method == "argmax": + temperature = 0.0 + next_id = common_layers.sample_with_temperature(logits, temperature, + top_k) + + log_prob_indices = tf.stack([tf.range(tf.to_int64(batch_size)), next_id], + axis=1) + log_prob += tf.gather_nd( + log_probs, log_prob_indices) * (1 - tf.to_float(hit_eos)) + # Note(thangluong): we purposely update hit_eos after aggregating log_prob + # There is a subtle detail here that we want to include log_probs up to + # (and inclusive of) the first eos generated, but not subsequent tokens. + hit_eos |= tf.equal(next_id, eos_id) + + next_id = tf.expand_dims(next_id, axis=1) + decoded_ids = tf.concat([decoded_ids, next_id], axis=1) + + return i + 1, hit_eos, next_id, decoded_ids, cache, log_prob + + def is_not_finished(i, hit_eos, *_): + finished = i >= decode_length + if not force_decode_length: + finished |= tf.reduce_all(hit_eos) + return tf.logical_not(finished) + + decoded_ids = tf.zeros([batch_size, 0], dtype=tf.int64) + hit_eos = tf.fill([batch_size], False) + next_id = sos_id * tf.ones([batch_size, 1], dtype=tf.int64) + initial_log_prob = tf.zeros([batch_size], dtype=tf.float32) + _, _, _, decoded_ids, cache, log_prob = tf.while_loop( + is_not_finished, + inner_loop, [ + tf.constant(0), hit_eos, next_id, decoded_ids, cache, + initial_log_prob + ], + shape_invariants=[ + tf.TensorShape([]), + tf.TensorShape([None]), + tf.TensorShape([None, None]), + tf.TensorShape([None, None]), + nest.map_structure(beam_search.get_state_shape_invariants, cache), + tf.TensorShape([None]), + ]) + scores = log_prob + + return {"outputs": decoded_ids, "scores": scores, "cache": cache} - decoder_output = transformer_decoder( - decoder_input, encoder_output, residual_fn, decoder_self_attention_bias, - encoder_attention_bias, hparams) - decoder_output = tf.expand_dims(decoder_output, 2) - return decoder_output +@registry.register_model +class TransformerScorer(Transformer): + """Transformer model, but only scores in PREDICT mode. + Checkpoints between Transformer and TransformerScorer are interchangeable. + """ -def transformer_prepare_encoder(inputs, target_space, hparams): - """Prepare one shard of the model for the encoder. + def __init__(self, *args, **kwargs): + super(TransformerScorer, self).__init__(*args, **kwargs) + self._name = "transformer" + self._base_name = "transformer" + + def infer(self, + features=None, + decode_length=50, + beam_size=1, + top_beams=1, + alpha=0.0, + use_tpu=False): + """Returns the targets and their log probabilities.""" + del decode_length, beam_size, top_beams, alpha, use_tpu + assert features is not None + + # Run the model + self.hparams.force_full_predict = True + with tf.variable_scope(self.name): + logits, _ = self.model_fn(features) + assert len(logits.shape) == 5 # [batch, time, 1, 1, vocab] + logits = tf.squeeze(logits, [2, 3]) + + # Compute the log probabilities + log_probs = common_layers.log_prob_from_logits(logits) - Args: - inputs: a Tensor. - target_space: a Tensor. - hparams: run hyperparameters + targets = features["targets"] + assert len(targets.shape) == 4 # [batch, time, 1, 1] + targets = tf.squeeze(targets, [2, 3]) - Returns: - encoder_input: a Tensor, bottom of encoder stack - encoder_self_attention_bias: a Tensor, containing large negative values - to implement masked attention and possibly baises for diagonal - alignments - encoder_padding: a Tensor + # Slice out the log_probs of the targets + log_probs = common_layers.index_last_dim_with_indices(log_probs, targets) + + # Sum over time to get the log_prob of the sequence + scores = tf.reduce_sum(log_probs, axis=1) + + return {"outputs": targets, "scores": scores} + + +@registry.register_model +class TransformerEncoder(t2t_model.T2TModel): + """Transformer, encoder only.""" + + def body(self, features): + hparams = self._hparams + inputs = features["inputs"] + target_space = features["target_space_id"] + + inputs = common_layers.flatten4d3d(inputs) + + (encoder_input, encoder_self_attention_bias, _) = ( + transformer_prepare_encoder(inputs, target_space, hparams)) + + encoder_input = tf.nn.dropout(encoder_input, + 1.0 - hparams.layer_prepostprocess_dropout) + encoder_output = transformer_encoder( + encoder_input, + encoder_self_attention_bias, + hparams, + nonpadding=features_to_nonpadding(features, "inputs")) + encoder_output = tf.expand_dims(encoder_output, 2) + + return encoder_output + + +@registry.register_model +class TransformerRegressor(TransformerEncoder): + """Transformer inheriting from Encoder, for the regression problem. + + Final result is a tensor that has a shape of (?, 1, 1, 1). """ - # Flatten inputs. - ishape_static = inputs.shape.as_list() - encoder_input = inputs - encoder_padding = common_attention.embedding_to_padding(encoder_input) - encoder_self_attention_bias = common_attention.attention_bias_ignore_padding( - encoder_padding) - # Append target_space_id embedding to inputs. - emb_target_space = common_layers.embedding( - target_space, 32, ishape_static[-1], name="target_space_embedding") - emb_target_space = tf.reshape(emb_target_space, [1, 1, -1]) - encoder_input += emb_target_space - if hparams.pos == "timing": - encoder_input = common_attention.add_timing_signal_1d(encoder_input) - return (encoder_input, encoder_self_attention_bias, encoder_padding) + def top(self, body_output, features): + """Computes single scalar value from body_output.""" + + with tf.variable_scope("reg_top_ffn"): + x = body_output + x = tf.reduce_mean(x, axis=[1, 2], keepdims=True) + res = tf.layers.dense(x, 1, name="model_top") + return res + + +def features_to_nonpadding(features, inputs_or_targets="inputs"): + key = inputs_or_targets + "_segmentation" + if features and key in features: + return tf.minimum(tf.to_float(features[key]), 1.0) + return None -def transformer_prepare_decoder(targets, hparams): + +def transformer_prepare_decoder(targets, hparams, features=None, pad=None): """Prepare one shard of the model for the decoder. Args: targets: a Tensor. hparams: run hyperparameters + features: optionally pass the entire features dictionary as well. This is + needed now for "packed" datasets. + pad: vector to use for padding when shifting targets right Returns: decoder_input: a Tensor, bottom of decoder stack - decoder_self_attention_bias: a Tensor, containing large negative values - to implement masked attention and possibly baises for diagonal alignments + decoder_self_attention_bias: a bias tensor for use in decoder self-attention """ - decoder_self_attention_bias = ( - common_attention.attention_bias_lower_triangle(tf.shape(targets)[1])) - decoder_input = common_layers.shift_left_3d(targets) + if hparams.causal_decoder_self_attention: + # Causal attention. + if hparams.prepend_mode == "prepend_inputs_full_attention": + decoder_self_attention_bias = ( + common_attention.attention_bias_prepend_inputs_full_attention( + common_attention.embedding_to_padding(targets))) + else: + decoder_self_attention_bias = ( + common_attention.attention_bias_lower_triangle( + common_layers.shape_list(targets)[1])) + else: + # Full attention. + decoder_padding = common_attention.embedding_to_padding(targets) + decoder_self_attention_bias = ( + common_attention.attention_bias_ignore_padding(decoder_padding)) + + if features and "targets_segmentation" in features: + # "Packed" dataset - keep the examples from seeing each other. + targets_segmentation = features["targets_segmentation"] + targets_position = features["targets_position"] + decoder_self_attention_bias += common_attention.attention_bias_same_segment( + targets_segmentation, targets_segmentation) + else: + targets_position = None + if hparams.proximity_bias: + decoder_self_attention_bias += common_attention.attention_bias_proximal( + common_layers.shape_list(targets)[1]) + decoder_input = common_layers.shift_right_3d(targets, pad) if hparams.pos == "timing": - decoder_input = common_attention.add_timing_signal_1d(decoder_input) + if targets_position is not None: + decoder_input = common_attention.add_timing_signal_1d_given_position( + decoder_input, targets_position) + else: + decoder_input = common_attention.add_timing_signal_1d(decoder_input) + elif hparams.pos == "timing_from_features": + decoder_input = common_attention.add_timing_signals_from_features( + decoder_input, features, hparams.position_features) + elif hparams.pos == "emb": + decoder_input = common_attention.add_positional_embedding( + decoder_input, hparams.max_length, "targets_positional_embedding", + targets_position) + + if hparams.activation_dtype == "bfloat16": + decoder_self_attention_bias = tf.cast(decoder_self_attention_bias, + tf.bfloat16) return (decoder_input, decoder_self_attention_bias) -def transformer_encoder(encoder_input, - residual_fn, - encoder_self_attention_bias, - hparams, - name="encoder"): - """A stack of transformer layers. - - Args: - encoder_input: a Tensor - residual_fn: a function from (layer_input, layer_output) -> combined_output - encoder_self_attention_bias: bias Tensor for self-attention - (see common_attention.attention_bias()) - hparams: hyperparameters for model - name: a string - - Returns: - y: a Tensors - """ - x = encoder_input - # Summaries don't work in multi-problem setting yet. - summaries = "problems" not in hparams.values() or len(hparams.problems) == 1 - with tf.variable_scope(name): - for layer in xrange(hparams.num_hidden_layers): - with tf.variable_scope("layer_%d" % layer): - x = residual_fn( - x, - common_attention.multihead_attention( - x, - None, - encoder_self_attention_bias, - hparams.attention_key_channels or hparams.hidden_size, - hparams.attention_value_channels or hparams.hidden_size, - hparams.hidden_size, - hparams.num_heads, - hparams.attention_dropout, - summaries=summaries, - name="encoder_self_attention")) - x = residual_fn(x, - common_layers.conv_hidden_relu( - x, - hparams.filter_size, - hparams.hidden_size, - dropout=hparams.relu_dropout)) - return x +def transformer_self_attention_layer(decoder_input, + decoder_self_attention_bias, + layer_idx, + hparams, + encoder_output=None, + encoder_decoder_attention_bias=None, + cache=None, + decode_loop_step=None, + save_weights_to=None, + make_image_summary=False, + layer_collection=None, + recurrent_memory_by_layer=None, + chunk_number=None): + """A single transformer self-attention layer.""" + x = decoder_input + layer = layer_idx + layer_name = "layer_%d" % layer + layer_cache = cache[layer_name] if cache is not None else None + + attention_dropout_broadcast_dims = ( + common_layers.comma_separated_string_to_integer_list( + getattr(hparams, "attention_dropout_broadcast_dims", ""))) + + if recurrent_memory_by_layer is not None: + recurrent_memory = recurrent_memory_by_layer[layer_name] + else: + recurrent_memory = None + + if layer < hparams.get("num_area_layers", 0): + max_area_width = hparams.get("max_area_width", 1) + max_area_height = hparams.get("max_area_height", 1) + memory_height = hparams.get("max_area_height", 1) + else: + max_area_width = 1 + max_area_height = 1 + memory_height = 1 + with tf.variable_scope(layer_name): + with tf.variable_scope("self_attention"): + y = common_attention.multihead_attention( + common_layers.layer_preprocess( + x, hparams, layer_collection=layer_collection), + None, + decoder_self_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + attention_type=hparams.self_attention_type, + max_relative_position=hparams.max_relative_position, + heads_share_relative_embedding=( + hparams.heads_share_relative_embedding), + add_relative_to_values=hparams.add_relative_to_values, + save_weights_to=save_weights_to, + cache=layer_cache, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims, + max_length=hparams.get("max_length"), + decode_loop_step=decode_loop_step, + vars_3d=hparams.get("attention_variables_3d"), + activation_dtype=hparams.get("activation_dtype", "float32"), + weight_dtype=hparams.get("weight_dtype", "float32"), + layer_collection=layer_collection, + recurrent_memory=recurrent_memory, + chunk_number=chunk_number, + hard_attention_k=hparams.get("hard_attention_k", 0), + gumbel_noise_weight=hparams.get("gumbel_noise_weight", 0.0), + max_area_width=max_area_width, + max_area_height=max_area_height, + memory_height=memory_height, + area_key_mode=hparams.get("area_key_mode", "none"), + area_value_mode=hparams.get("area_value_mode", "none"), + training=(hparams.get( + "mode", + tf_estimator.ModeKeys.TRAIN) == tf_estimator.ModeKeys.TRAIN)) + x = common_layers.layer_postprocess(x, y, hparams) + if encoder_output is not None: + if not isinstance(encoder_output, (list,)): + encoder_output = [encoder_output] + with tf.variable_scope("encdec_attention"): + for enc_output in encoder_output: + y = common_attention.multihead_attention( + common_layers.layer_preprocess( + x, hparams, layer_collection=layer_collection), + enc_output, + encoder_decoder_attention_bias, + hparams.attention_key_channels or hparams.hidden_size, + hparams.attention_value_channels or hparams.hidden_size, + hparams.hidden_size, + hparams.num_heads, + hparams.attention_dropout, + max_relative_position=hparams.max_relative_position, + heads_share_relative_embedding=( + hparams.heads_share_relative_embedding), + add_relative_to_values=hparams.add_relative_to_values, + save_weights_to=save_weights_to, + cache=layer_cache, + make_image_summary=make_image_summary, + dropout_broadcast_dims=attention_dropout_broadcast_dims, + max_length=hparams.get("max_length"), + vars_3d=hparams.get("attention_variables_3d"), + activation_dtype=hparams.get("activation_dtype", "float32"), + weight_dtype=hparams.get("weight_dtype", "float32"), + layer_collection=layer_collection, + hard_attention_k=hparams.get("hard_attention_k", 0), + gumbel_noise_weight=hparams.get("gumbel_noise_weight", 0.0), + max_area_width=max_area_width, + max_area_height=max_area_height, + memory_height=memory_height, + area_key_mode=hparams.get("area_key_mode", "none"), + area_value_mode=hparams.get("area_value_mode", "none"), + training=(hparams.get( + "mode", + tf_estimator.ModeKeys.TRAIN) == tf_estimator.ModeKeys.TRAIN)) + x = common_layers.layer_postprocess(x, y, hparams) + return x, layer_cache + + +def transformer_decoder_layer(decoder_input, + decoder_self_attention_bias, + layer_idx, + hparams, + encoder_output=None, + encoder_decoder_attention_bias=None, + cache=None, + decode_loop_step=None, + nonpadding=None, + save_weights_to=None, + make_image_summary=False, + losses=None, + layer_collection=None, + recurrent_memory_by_layer=None, + chunk_number=None): + """A single transformer decoder layer.""" + x, layer_cache = transformer_self_attention_layer( + decoder_input=decoder_input, + decoder_self_attention_bias=decoder_self_attention_bias, + layer_idx=layer_idx, + hparams=hparams, + encoder_output=encoder_output, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + cache=cache, + decode_loop_step=decode_loop_step, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + layer_collection=layer_collection, + recurrent_memory_by_layer=recurrent_memory_by_layer, + chunk_number=chunk_number) + + layer = layer_idx + layer_name = "layer_%d" % layer + with tf.variable_scope(layer_name): + with tf.variable_scope("ffn"): + y = transformer_ffn_layer( + common_layers.layer_preprocess( + x, hparams, layer_collection=layer_collection), + hparams, + conv_padding="LEFT", + nonpadding_mask=nonpadding, + losses=losses, + cache=layer_cache, + decode_loop_step=decode_loop_step, + layer_collection=layer_collection) + x = common_layers.layer_postprocess(x, y, hparams) + return x def transformer_decoder(decoder_input, encoder_output, - residual_fn, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, - name="decoder"): + cache=None, + decode_loop_step=None, + name="decoder", + nonpadding=None, + save_weights_to=None, + make_image_summary=True, + losses=None, + layer_collection=None, + recurrent_memory_by_layer=None, + chunk_number=None): """A stack of transformer layers. Args: decoder_input: a Tensor encoder_output: a Tensor - residual_fn: a function from (layer_input, layer_output) -> combined_output - decoder_self_attention_bias: bias Tensor for self-attention - (see common_attention.attention_bias()) + decoder_self_attention_bias: bias Tensor for self-attention (see + common_attention.attention_bias()) encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention (see common_attention.attention_bias()) hparams: hyperparameters for model + cache: dict, containing tensors which are the results of previous + attentions, used for fast decoding. + decode_loop_step: An integer, step number of the decoding loop. Only used + for inference on TPU. name: a string + nonpadding: optional Tensor with shape [batch_size, encoder_length] + indicating what positions are not padding. This is used to mask out + padding in convolutional layers. We generally only need this mask for + "packed" datasets, because for ordinary datasets, no padding is ever + followed by nonpadding. + save_weights_to: an optional dictionary to capture attention weights for + visualization; the weights tensor will be appended there under a string + key created from the variable scope (including name). + make_image_summary: Whether to make an attention image summary. + losses: optional list onto which to append extra training losses + layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC + optimizer. Default is None. + recurrent_memory_by_layer: Optional dict, mapping layer names to instances + of transformer_memory.RecurrentMemory. Default is None. + chunk_number: an optional integer Tensor with shape [batch] used to operate + the recurrent_memory. Returns: y: a Tensors """ x = decoder_input - # Summaries don't work in multi-problem setting yet. - summaries = "problems" not in hparams.values() or len(hparams.problems) == 1 + + mlperf_log.transformer_print( + key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS, + value=hparams.num_decoder_layers or hparams.num_hidden_layers, + hparams=hparams) + mlperf_log.transformer_print( + key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT, + value=hparams.attention_dropout, + hparams=hparams) + mlperf_log.transformer_print( + key=mlperf_log.MODEL_HP_ATTENTION_DENSE, + value={ + "use_bias": "false", + "num_heads": hparams.num_heads, + "hidden_size": hparams.hidden_size + }, + hparams=hparams) + with tf.variable_scope(name): - for layer in xrange(hparams.num_hidden_layers): - with tf.variable_scope("layer_%d" % layer): - x = residual_fn( - x, - common_attention.multihead_attention( - x, - None, - decoder_self_attention_bias, - hparams.attention_key_channels or hparams.hidden_size, - hparams.attention_value_channels or hparams.hidden_size, - hparams.hidden_size, - hparams.num_heads, - hparams.attention_dropout, - summaries=summaries, - name="decoder_self_attention")) - x = residual_fn( - x, - common_attention.multihead_attention( - x, - encoder_output, - encoder_decoder_attention_bias, - hparams.attention_key_channels or hparams.hidden_size, - hparams.attention_value_channels or hparams.hidden_size, - hparams.hidden_size, - hparams.num_heads, - hparams.attention_dropout, - summaries=summaries, - name="encdec_attention")) - x = residual_fn(x, - common_layers.conv_hidden_relu( - x, - hparams.filter_size, - hparams.hidden_size, - dropout=hparams.relu_dropout)) - return x + for layer_idx in range(hparams.num_decoder_layers or + hparams.num_hidden_layers): + x = transformer_decoder_layer( + x, + decoder_self_attention_bias, + layer_idx, + hparams, + encoder_decoder_attention_bias=encoder_decoder_attention_bias, + encoder_output=encoder_output, + cache=cache, + decode_loop_step=decode_loop_step, + nonpadding=nonpadding, + save_weights_to=save_weights_to, + make_image_summary=make_image_summary, + losses=losses, + layer_collection=layer_collection, + recurrent_memory_by_layer=recurrent_memory_by_layer, + chunk_number=chunk_number + ) + + # if normalization is done in layer_preprocess, then it should also be done + # on the output, since the output can grow very large, being the sum of + # a whole stack of unnormalized layer outputs. + mlperf_log.transformer_print( + key=mlperf_log.MODEL_HP_NORM, + value={"hidden_size": hparams.hidden_size}) + return common_layers.layer_preprocess( + x, hparams, layer_collection=layer_collection) + + +@registry.register_model +class TransformerMemory(Transformer): + """Transformer language model with memory across chunks.""" + + # TODO(kitaev): consider overriding set_mode to swap out recurrent memory when + # switching between training and evaluation. + + def __init__(self, *args, **kwargs): + super(TransformerMemory, self).__init__(*args, **kwargs) + + hparams = self._hparams + self.recurrent_memory_by_layer = {} + for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers): + layer_name = "layer_%d" % layer + if hparams.memory_type == "neural_memory": + memory = transformer_memory.TransformerMemory( + batch_size=int(hparams.batch_size / hparams.max_length), + key_depth=hparams.hidden_size, + val_depth=hparams.hidden_size, + memory_size=hparams.split_targets_chunk_length, + sharpen_factor=1., + name=layer_name + "/recurrent_memory") + elif hparams.memory_type == "transformer_xl": + memory = transformer_memory.RecentTokensMemory( + layer_name + "/recurrent_memory", hparams) + else: + raise ValueError("Unsupported memory type: %s" % hparams.memory_type) + self.recurrent_memory_by_layer[layer_name] = memory + + @property + def has_input(self): + if hasattr(self._hparams, "unconditional") and self._hparams.unconditional: + return False + return super(TransformerMemory, self).has_input + + def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha, + use_tpu=False): + """Overriding beam search because for now only the slow version works with + memory + """ + return self._beam_decode_slow(features, decode_length, beam_size, + top_beams, alpha, use_tpu) @registry.register_hparams -def transformer_base(): +def transformer_base_v1(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() + hparams.norm_type = "layer" hparams.hidden_size = 512 hparams.batch_size = 4096 hparams.max_length = 256 - hparams.dropout = 0.0 hparams.clip_grad_norm = 0. # i.e. no gradient clipping hparams.optimizer_adam_epsilon = 1e-9 + hparams.learning_rate_schedule = "legacy" hparams.learning_rate_decay_scheme = "noam" hparams.learning_rate = 0.1 hparams.learning_rate_warmup_steps = 4000 @@ -262,35 +1790,478 @@ def transformer_base(): hparams.optimizer_adam_beta2 = 0.98 hparams.num_sampled_classes = 0 hparams.label_smoothing = 0.1 - hparams.shared_embedding_and_softmax_weights = int(True) - - hparams.add_hparam("filter_size", 2048) # Add new ones like this. - # attention-related flags + hparams.shared_embedding_and_softmax_weights = True + hparams.symbol_modality_num_shards = 16 + + # Add new ones like this. + hparams.add_hparam("filter_size", 2048) + # Layer-related flags. If zero, these fall back on hparams.num_hidden_layers. + hparams.add_hparam("num_encoder_layers", 0) + hparams.add_hparam("num_decoder_layers", 0) + # Attention-related flags. hparams.add_hparam("num_heads", 8) hparams.add_hparam("attention_key_channels", 0) hparams.add_hparam("attention_value_channels", 0) + hparams.add_hparam("ffn_layer", "dense_relu_dense") + hparams.add_hparam("parameter_attention_key_channels", 0) + hparams.add_hparam("parameter_attention_value_channels", 0) + # All hyperparameters ending in "dropout" are automatically set to 0.0 + # when not in training mode. hparams.add_hparam("attention_dropout", 0.0) + hparams.add_hparam("attention_dropout_broadcast_dims", "") hparams.add_hparam("relu_dropout", 0.0) + hparams.add_hparam("relu_dropout_broadcast_dims", "") hparams.add_hparam("pos", "timing") # timing, none - hparams.add_hparam("residual_dropout", 0.1) + hparams.add_hparam("position_features", "") hparams.add_hparam("nbr_decoder_problems", 1) + hparams.add_hparam("proximity_bias", False) + hparams.add_hparam("causal_decoder_self_attention", True) + hparams.add_hparam("use_pad_remover", True) + hparams.add_hparam("self_attention_type", "dot_product") + hparams.add_hparam("conv_first_kernel", 3) + hparams.add_hparam("attention_variables_3d", False) + hparams.add_hparam("use_target_space_embedding", True) + # These parameters are only used when ffn_layer=="local_moe_tpu" + hparams.add_hparam("moe_overhead_train", 1.0) + hparams.add_hparam("moe_overhead_eval", 2.0) + hparams.moe_num_experts = 16 + hparams.moe_loss_coef = 1e-3 + # If specified, use this value instead of problem name in metrics.py. + # This is useful for programs that can automatically compare experiments side + # by side based on the same metric names. + hparams.add_hparam("overload_eval_metric_name", "") + # For making a transformer encoder unidirectional by using masked + # attention. + hparams.add_hparam("unidirectional_encoder", False) + # For hard attention. + hparams.add_hparam("hard_attention_k", 0) + hparams.add_hparam("gumbel_noise_weight", 0.0) + return hparams + + +@registry.register_hparams +def transformer_base_v2(): + """Set of hyperparameters.""" + hparams = transformer_base_v1() + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + hparams.layer_prepostprocess_dropout = 0.1 + hparams.attention_dropout = 0.1 + hparams.relu_dropout = 0.1 + hparams.learning_rate_warmup_steps = 8000 + hparams.learning_rate = 0.2 + return hparams + + +@registry.register_hparams +def transformer_base_vq_ada_32ex_packed(): + """Set of hyperparameters for lm1b packed following tpu params.""" + hparams = transformer_base_v2() + expert_utils.update_hparams_for_vq_gating(hparams) + hparams.moe_num_experts = 32 + hparams.gating_type = "vq" + # this gives us a batch size of 16 because each seq is len 256 + hparams.batch_size = 5072 + hparams.ffn_layer = "local_moe" + hparams.shared_embedding_and_softmax_weights = False + hparams.learning_rate_warmup_steps = 10000 + # one epoch for languagemodel_lm1b32k_packed = 27200 steps w/ bsize 128 + hparams.learning_rate_decay_steps = 27200 + hparams.num_heads = 4 + hparams.num_blocks = 1 + hparams.moe_k = 1 + hparams.num_decoder_layers = 6 + hparams.label_smoothing = 0. + hparams.layer_prepostprocess_dropout = 0.1 + hparams.layer_postprocess_sequence = "dan" + hparams.layer_preprocess_sequence = "none" + hparams.weight_decay = 1e-06 + hparams.attention_dropout = 0.1 + hparams.optimizer = "Adafactor" + hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay" + hparams.activation_dtype = "float32" + hparams.learning_rate = 0.1 + hparams.learning_rate_constant = 1.0 + return hparams + + +@registry.register_hparams +def transformer_topk_16_packed(): + hparams = transformer_base_vq_ada_32ex_packed() + hparams.gating_type = "topk" + hparams.moe_num_experts = 16 + hparams.moe_k = 2 + return hparams + + +@registry.register_hparams +def transformer_base_vq1_16_nb1_packed_nda_b01_scales(): + """Set of hyperparameters.""" + hparams = transformer_base_vq_ada_32ex_packed() + hparams.use_scales = int(True) + hparams.moe_num_experts = 16 + hparams.moe_k = 1 + hparams.beta = 0.1 + hparams.layer_preprocess_sequence = "n" + hparams.layer_postprocess_sequence = "da" + hparams.ema = False + return hparams + + +@registry.register_hparams +def transformer_base_vq1_16_nb1_packed_dan_b01_scales(): + """Set of hyperparameters.""" + hparams = transformer_base_vq_ada_32ex_packed() + hparams.use_scales = int(True) + hparams.moe_num_experts = 16 + hparams.moe_k = 1 + hparams.beta = 0.1 + hparams.ema = False + return hparams + + +@registry.register_hparams +def transformer_base_vq1_16_nb1_packed_nda_b01_scales_dialog(): + """Set of hyperparameters.""" + hparams = transformer_base_vq1_16_nb1_packed_nda_b01_scales() + hparams.batch_size = 2048 + hparams.max_length = 1024 + hparams.filter_size = 3072 + return hparams + + +@registry.register_hparams +def transformer_ada_lmpackedbase(): + """Set of hyperparameters.""" + hparams = transformer_base_vq_ada_32ex_packed() + hparams.ffn_layer = "dense_relu_dense" + return hparams + + +@registry.register_hparams +def transformer_ada_lmpackedbase_dialog(): + """Set of hyperparameters.""" + hparams = transformer_base_vq_ada_32ex_packed() + hparams.max_length = 1024 + hparams.ffn_layer = "dense_relu_dense" + hparams.batch_size = 4096 + return hparams + + +@registry.register_hparams +def transformer_ada_lmpackedbase_relative(): + """Set of hyperparameters.""" + hparams = transformer_base_vq_ada_32ex_packed() + hparams.ffn_layer = "dense_relu_dense" return hparams @registry.register_hparams -def transformer_single_gpu(): +def transformer_base_v3(): + """Base parameters for Transformer model.""" + # Update parameters here, then occasionally cut a versioned set, e.g. + # transformer_base_v2. + hparams = transformer_base_v2() + hparams.optimizer_adam_beta2 = 0.997 + # New way of specifying learning rate schedule. + # Equivalent to previous version. + hparams.learning_rate_schedule = ( + "constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size") + hparams.learning_rate_constant = 2.0 + return hparams + + +@registry.register_hparams +def transformer_base(): + """Base parameters for Transformer model.""" + hparams = transformer_base_v3() + return hparams + + +@registry.register_hparams +def transformer_big(): + """HParams for transformer big model on WMT.""" + hparams = transformer_base() + hparams.hidden_size = 1024 + hparams.filter_size = 4096 + # Reduce batch size to 2048 from 4096 to be able to train the model on a GPU + # with 12 GB memory. For example, NVIDIA TITAN V GPU. + hparams.batch_size = 2048 + hparams.num_heads = 16 + hparams.layer_prepostprocess_dropout = 0.3 + return hparams + + +@registry.register_hparams +def transformer_tall(): + """Hparams for transformer on LM for pretraining/finetuning/mixing.""" + hparams = transformer_base() + hparams.batch_size = 2048 + hparams.hidden_size = 768 + hparams.filter_size = 3072 + hparams.num_hidden_layers = 12 + hparams.num_heads = 12 + hparams.label_smoothing = 0.0 + hparams.max_length = 1024 + hparams.eval_drop_long_sequences = True + hparams.multiproblem_mixing_schedule = "pretrain" + hparams.multiproblem_vocab_size = 65536 + hparams.clip_grad_norm = 1.0 + return hparams + + +@registry.register_hparams +def transformer_tall_finetune_tied(): + """Tied means fine-tune CNN/DM summarization as LM.""" + hparams = transformer_tall() + hparams.multiproblem_max_input_length = 750 + hparams.multiproblem_max_target_length = 100 + hparams.multiproblem_schedule_max_examples = 0 + hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay") + hparams.learning_rate_constant = 5e-5 + hparams.learning_rate_warmup_steps = 100 + # Set train steps to learning_rate_decay_steps or less + hparams.learning_rate_decay_steps = 80000 + hparams.multiproblem_target_eval_only = True + hparams.multiproblem_reweight_label_loss = True + hparams.multiproblem_label_weight = 1.0 + hparams.optimizer = "true_adam" + return hparams + + +@registry.register_hparams +def transformer_tall_train_tied(): + """Tied means train CNN/DM summarization as LM.""" + hparams = transformer_tall() + hparams.multiproblem_max_input_length = 750 + hparams.multiproblem_max_target_length = 100 + hparams.multiproblem_schedule_max_examples = 0 + hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay") + hparams.learning_rate_constant = 2e-4 + hparams.learning_rate_warmup_steps = 8000 + # Set train steps to learning_rate_decay_steps or less + hparams.learning_rate_decay_steps = 150000 + hparams.multiproblem_target_eval_only = True + hparams.multiproblem_reweight_label_loss = True + hparams.multiproblem_label_weight = 1.0 + hparams.optimizer = "true_adam" + return hparams + + +@registry.register_hparams +def transformer_tall_finetune_uniencdec(): + """Fine-tune CNN/DM with a unidirectional encoder and decoder.""" + hparams = transformer_tall() + hparams.max_input_seq_length = 750 + hparams.max_target_seq_length = 100 + hparams.optimizer = "true_adam" + hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay") + hparams.learning_rate_decay_steps = 80000 + hparams.learning_rate_constant = 5e-5 + hparams.learning_rate_warmup_steps = 100 + hparams.unidirectional_encoder = True + return hparams + + +@registry.register_hparams +def transformer_tall_train_uniencdec(): + """Train CNN/DM with a unidirectional encoder and decoder.""" + hparams = transformer_tall() + hparams.max_input_seq_length = 750 + hparams.max_target_seq_length = 100 + hparams.optimizer = "true_adam" + hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay") + hparams.learning_rate_decay_steps = 150000 + hparams.learning_rate_constant = 2e-4 + hparams.unidirectional_encoder = True + return hparams + + +@registry.register_hparams +def transformer_tall_finetune_textclass(): + """Hparams for transformer on LM for finetuning on text class problems.""" + hparams = transformer_tall() + hparams.learning_rate_constant = 6.25e-5 + hparams.learning_rate_schedule = ("linear_warmup*constant*linear_decay") + hparams.multiproblem_schedule_max_examples = 0 + hparams.multiproblem_target_eval_only = True + hparams.learning_rate_warmup_steps = 50 + # Set train steps to learning_rate_decay_steps or less + hparams.learning_rate_decay_steps = 25000 + hparams.multiproblem_reweight_label_loss = True + hparams.multiproblem_label_weight = 0.95 + return hparams + + +@registry.register_hparams +def transformer_tall_pretrain_lm(): + """Hparams for transformer on LM pretraining (with 64k vocab).""" + hparams = transformer_tall() + hparams.learning_rate_constant = 2e-4 + hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay") + hparams.optimizer = "adam_w" + hparams.weight_decay = 0.01 * hparams.learning_rate_constant + hparams.optimizer_adam_beta1 = 0.9 + hparams.optimizer_adam_beta2 = 0.999 + hparams.optimizer_adam_epsilon = 1e-8 + # Set max examples to something big when pretraining only the LM, definitely + # something an order of magnitude bigger than number of train steps. + hparams.multiproblem_schedule_max_examples = 5e8 + # Set train steps to learning_rate_decay_steps or less + hparams.learning_rate_decay_steps = 5000000 + return hparams + + +@registry.register_hparams +def transformer_tall_pretrain_lm_tpu_adafactor(): + """Hparams for transformer on LM pretraining (with 64k vocab) on TPU.""" + hparams = transformer_tall_pretrain_lm() + update_hparams_for_tpu(hparams) + hparams.max_length = 1024 + # For multi-problem on TPU we need it in absolute examples. + hparams.batch_size = 8 + hparams.multiproblem_vocab_size = 2**16 + return hparams + + +@registry.register_hparams +def transformer_tall_pretrain_lm_tpu_adafactor_large(): + """Hparams for transformer on LM pretraining on TPU, large model.""" + hparams = transformer_tall_pretrain_lm_tpu_adafactor() + hparams.hidden_size = 1024 + hparams.num_heads = 16 + hparams.filter_size = 32768 # max fitting in 16G memory is 49152, batch 2 + hparams.batch_size = 4 + hparams.multiproblem_mixing_schedule = "constant" + # Task order: lm/en-de/en-fr/en-ro/de-en/fr-en/ro-en/cnndm/mnli/squad. + hparams.multiproblem_per_task_threshold = "320,80,160,1,80,160,2,20,10,5" + return hparams + + +@registry.register_hparams +def transformer_tall_pretrain_lm_tpu(): + """Hparams for transformer on LM pretraining on TPU with AdamW.""" + hparams = transformer_tall_pretrain_lm_tpu_adafactor() + # Optimizer gets reset in update_hparams_for_tpu so we set it again here. + hparams.learning_rate_constant = 2e-4 + hparams.learning_rate_schedule = ("linear_warmup * constant * cosdecay") + hparams.optimizer = "adam_w" + hparams.weight_decay = 0.01 * hparams.learning_rate_constant + return hparams + + +@registry.register_hparams +def transformer_tall_big(): + """Hparams for transformer on LM+MNLI.""" + hparams = transformer_tall() + hparams.num_hidden_layers = 18 + return hparams + + +@registry.register_hparams +def transformer_big_single_gpu(): + """HParams for transformer big model for single GPU.""" + hparams = transformer_big() + hparams.layer_prepostprocess_dropout = 0.1 + hparams.learning_rate_warmup_steps = 16000 + return hparams + + +@registry.register_hparams +def transformer_base_single_gpu(): + """HParams for transformer base model for single GPU.""" hparams = transformer_base() - hparams.batch_size = 8192 + hparams.batch_size = 1024 + hparams.learning_rate_schedule = "constant*linear_warmup*rsqrt_decay" + hparams.learning_rate_constant = 0.1 hparams.learning_rate_warmup_steps = 16000 - hparams.batching_mantissa_bits = 2 + return hparams + + +@registry.register_hparams +def transformer_base_multistep8(): + """HParams for simulating 8 GPUs with MultistepAdam optimizer.""" + hparams = transformer_base() + hparams.optimizer = "multistep_adam" + hparams.optimizer_multistep_accumulate_steps = 8 + return hparams + + +@registry.register_hparams +def transformer_cubbitt(): + """Transformer hyperparameters used in CUBBITT experiments.""" + hparams = transformer_big_single_gpu() + hparams.learning_rate_schedule = "rsqrt_decay" + hparams.batch_size = 2900 + hparams.learning_rate_warmup_steps = 8000 + hparams.max_length = 150 + hparams.layer_prepostprocess_dropout = 0 + hparams.optimizer = "Adafactor" + return hparams + + +@registry.register_hparams +def transformer_parsing_base(): + """HParams for parsing on WSJ only.""" + hparams = transformer_base() + hparams.attention_dropout = 0.2 + hparams.layer_prepostprocess_dropout = 0.2 + hparams.max_length = 512 + hparams.learning_rate_warmup_steps = 16000 + hparams.hidden_size = 1024 + hparams.learning_rate = 0.05 + hparams.shared_embedding_and_softmax_weights = False + return hparams + + +@registry.register_hparams +def transformer_parsing_big(): + """HParams for parsing on WSJ semi-supervised.""" + hparams = transformer_big() + hparams.max_length = 512 + hparams.shared_source_target_embedding = False + hparams.learning_rate_warmup_steps = 4000 + hparams.layer_prepostprocess_dropout = 0.1 + hparams.batch_size = 2048 + hparams.learning_rate = 0.05 + return hparams + + +@registry.register_hparams +def transformer_parsing_ice(): + """HParams for parsing and tagging Icelandic text.""" + hparams = transformer_base_single_gpu() + hparams.batch_size = 4096 + hparams.shared_embedding_and_softmax_weights = False return hparams @registry.register_hparams def transformer_tiny(): hparams = transformer_base() - hparams.hidden_size = 64 - hparams.filter_size = 128 + hparams.num_hidden_layers = 2 + hparams.hidden_size = 128 + hparams.filter_size = 512 + hparams.num_heads = 4 + return hparams + + +@registry.register_hparams +def transformer_test(): + hparams = transformer_base() + hparams.num_hidden_layers = 2 + hparams.hidden_size = 16 + hparams.filter_size = 8 + hparams.num_heads = 2 + return hparams + + +@registry.register_hparams +def transformer_small(): + hparams = transformer_base() + hparams.num_hidden_layers = 2 + hparams.hidden_size = 256 + hparams.filter_size = 1024 hparams.num_heads = 4 return hparams @@ -316,6 +2287,13 @@ def transformer_l8(): return hparams +@registry.register_hparams +def transformer_l10(): + hparams = transformer_base() + hparams.num_hidden_layers = 10 + return hparams + + @registry.register_hparams def transformer_h1(): hparams = transformer_base() @@ -375,14 +2353,14 @@ def transformer_ff4096(): @registry.register_hparams def transformer_dr0(): hparams = transformer_base() - hparams.residual_dropout = 0.0 + hparams.layer_prepostprocess_dropout = 0.0 return hparams @registry.register_hparams def transformer_dr2(): hparams = transformer_base() - hparams.residual_dropout = 0.2 + hparams.layer_prepostprocess_dropout = 0.2 return hparams @@ -420,76 +2398,581 @@ def transformer_big_dr1(): hparams.hidden_size = 1024 hparams.filter_size = 4096 hparams.num_heads = 16 - hparams.residual_dropout = 0.1 - hparams.batching_mantissa_bits = 2 + hparams.layer_prepostprocess_dropout = 0.1 return hparams @registry.register_hparams def transformer_big_enfr(): hparams = transformer_big_dr1() - hparams.shared_embedding_and_softmax_weights = int(False) + hparams.shared_embedding_and_softmax_weights = False hparams.filter_size = 8192 - hparams.residual_dropout = 0.1 + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def transformer_big_enfr_tpu(): + hparams = transformer_big_enfr() + # For performance, use fewer heads so that matrix dimensions are at least 128 + hparams.num_heads = 8 + update_hparams_for_tpu(hparams) return hparams @registry.register_hparams def transformer_big_dr2(): hparams = transformer_big_dr1() - hparams.residual_dropout = 0.2 + hparams.layer_prepostprocess_dropout = 0.2 return hparams @registry.register_hparams -def transformer_big_dr3(): - hparams = transformer_big_dr1() - hparams.residual_dropout = 0.3 +def transformer_parameter_attention_a(): + hparams = transformer_base() + hparams.ffn_layer = "parameter_attention" + hparams.filter_size = 1536 return hparams @registry.register_hparams -def transformer_big_single_gpu(): - hparams = transformer_big_dr1() - hparams.learning_rate_warmup_steps = 16000 - hparams.optimizer_adam_beta2 = 0.998 - hparams.batching_mantissa_bits = 3 +def transformer_parameter_attention_b(): + hparams = transformer_base() + hparams.ffn_layer = "parameter_attention" + hparams.filter_size = 512 + hparams.parameter_attention_key_channels = 1024 + hparams.parameter_attention_value_channels = 1024 + hparams.num_heads = 16 return hparams @registry.register_hparams -def transformer_parsing_base_dr6(): - """hparams for parsing on wsj only.""" +def transformer_prepend_v2(): + hparams = transformer_base_v2() + hparams.prepend_mode = "prepend_inputs_masked_attention" + hparams.max_length = 0 + return hparams + + +@registry.register_hparams +def transformer_prepend_v1(): + hparams = transformer_base_v1() + hparams.prepend_mode = "prepend_inputs_masked_attention" + hparams.max_length = 0 + return hparams + + +@registry.register_hparams +def transformer_prepend(): + return transformer_prepend_v2() + + +@registry.register_ranged_hparams +def transformer_base_range(rhp): + """Small range of hyperparameters.""" + # After starting from base, set intervals for some parameters. + rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE) + rhp.set_discrete("learning_rate_warmup_steps", + [1000, 2000, 4000, 8000, 16000]) + rhp.set_float("initializer_gain", 0.5, 2.0) + rhp.set_float("optimizer_adam_beta1", 0.85, 0.95) + rhp.set_float("optimizer_adam_beta2", 0.97, 0.99) + rhp.set_float("weight_decay", 0.0, 1e-4) + + +@registry.register_hparams +def transformer_relative(): + """Use relative position embeddings instead of absolute position encodings.""" hparams = transformer_base() - hparams.attention_dropout = 0.2 - hparams.residual_dropout = 0.2 - hparams.max_length = 512 - hparams.learning_rate_warmup_steps = 16000 + hparams.pos = None + hparams.self_attention_type = "dot_product_relative" + hparams.max_relative_position = 20 + return hparams + + +@registry.register_hparams +def transformer_relative_tiny(): + hparams = transformer_relative() + hparams.num_hidden_layers = 2 + hparams.hidden_size = 128 + hparams.filter_size = 512 + hparams.num_heads = 4 + return hparams + + +@registry.register_hparams +def transformer_relative_big(): + hparams = transformer_big() + hparams.pos = None + hparams.self_attention_type = "dot_product_relative" + hparams.max_relative_position = 20 + return hparams + + +@registry.register_hparams +def transformer_timeseries(): + hparams = transformer_small() + hparams.batch_size = 256 + hparams.learning_rate_warmup_steps = 2000 + return hparams + + +@registry.register_hparams +def transformer_mlperf_tpu(): + """HParams for Transformer model on TPU for MLPerf on TPU 2x2.""" + hparams = transformer_base_v3() + hparams.mlperf_mode = True + hparams.symbol_modality_num_shards = 1 + hparams.max_length = 256 # ignored when using "_packed" problems + hparams.batch_size = 2048 # per-chip batch size matches the reference model hparams.hidden_size = 1024 - hparams.learning_rate = 0.5 - hparams.shared_embedding_and_softmax_weights = int(False) + hparams.filter_size = 4096 + hparams.num_heads = 16 + hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads + hparams.relu_dropout_broadcast_dims = "1" # length + hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length + return hparams + + +def update_hparams_for_tpu(hparams): + """Change hparams to be compatible with TPU training.""" + + # Adafactor uses less memory than Adam. + # switch to Adafactor with its recommended learning rate scheme. + hparams.optimizer = "Adafactor" + hparams.learning_rate_schedule = "rsqrt_decay" + hparams.learning_rate_warmup_steps = 10000 + + # Avoid an expensive concat on TPU. + # >1 shards helps with faster parameter distribution on multi-GPU machines + hparams.symbol_modality_num_shards = 1 + + # Adaptive batch sizes and sequence lengths are not supported on TPU. + # Instead, every batch has the same sequence length and the same batch size. + # Longer sequences are dropped and shorter ones are padded. + # + # It is therefore suggested to use a problem where examples have been combined + # to a longer length, e.g. the "_packed" problems. + # + # For problems with variable sequence lengths, this parameter controls the + # maximum sequence length. Longer sequences are dropped and shorter ones + # are padded. + # + # For problems with fixed sequence lengths - e.g. the "_packed" problems, + # this hyperparameter is ignored. + hparams.max_length = 64 + + # TPUs have less memory than GPUs, so decrease the batch size if it's too high + if hparams.batch_size > 2048: + hparams.batch_size = 2048 + + # Using noise broadcast in the dropout layers saves memory during training. + hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads + hparams.relu_dropout_broadcast_dims = "1" # length + hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length return hparams @registry.register_hparams -def transformer_parsing_big(): - """HParams for parsing on wsj semi-supervised.""" - hparams = transformer_big_dr1() - hparams.max_length = 512 - hparams.shared_source_target_embedding = int(False) - hparams.learning_rate_warmup_steps = 4000 - hparams.batch_size = 2048 - hparams.learning_rate = 0.5 +def transformer_tpu(): + """HParams for Transformer model on TPU.""" + hparams = transformer_base() + update_hparams_for_tpu(hparams) + return hparams + + +@registry.register_hparams +def transformer_timeseries_tpu(): + """HParams for running Transformer model on timeseries on TPU.""" + hparams = transformer_timeseries() + update_hparams_for_tpu(hparams) + hparams.batch_size = 256 # revert to value set in transformer_timeseries + return hparams + + +@registry.register_hparams +def transformer_tpu_bf16_activation(): + """HParams for Transformer model with BF16 activation on TPU.""" + hparams = transformer_tpu() + hparams.activation_dtype = "bfloat16" + return hparams + + +@registry.register_hparams +def transformer_fairseq_fp16_activation_big(): + """Hparams intended to mirror those used in arxiv.org/pdf/1806.00187.pdf.""" + hparams = transformer_big() + hparams.activation_dtype = "float16" + hparams.batch_size = 3584 return hparams -@registry.register_ranged_hparams("transformer_big_single_gpu") -def transformer_range1(rhp): +@registry.register_hparams +def transformer_packed_tpu(): + """Deprecated alias for transformer_tpu().""" + return transformer_tpu() + + +@registry.register_hparams +def transformer_big_tpu(): + hparams = transformer_big() + update_hparams_for_tpu(hparams) + return hparams + + +@registry.register_hparams +def transformer_tiny_tpu(): + hparams = transformer_tiny() + update_hparams_for_tpu(hparams) + return hparams + + +@registry.register_ranged_hparams +def transformer_tiny_tpu_range(rhp): """Small range of hyperparameters.""" - hparams = transformer_big_single_gpu() - common_hparams.fill_ranged_hparams_from_hparams(hparams, rhp) + rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE) + rhp.set_float("weight_decay", 0.0, 2.0) + +@registry.register_ranged_hparams +def transformer_tpu_range(rhp): + """Small range of hyperparameters.""" + # After starting from base, set intervals for some parameters. rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE) + rhp.set_discrete("learning_rate_warmup_steps", + [1000, 2000, 4000, 8000, 16000]) rhp.set_float("initializer_gain", 0.5, 2.0) + rhp.set_float("optimizer_adam_beta1", 0.85, 0.95) rhp.set_float("optimizer_adam_beta2", 0.97, 0.99) rhp.set_float("weight_decay", 0.0, 2.0) + + +@registry.register_hparams +def transformer_small_tpu(): + """TPU-friendly version of transformer_small. + + Returns: + an hparams object. + """ + hparams = transformer_small() + update_hparams_for_tpu(hparams) + return hparams + + +@registry.register_hparams +def transformer_clean(): + """No dropout, label smoothing, max_length.""" + hparams = transformer_base_v2() + hparams.label_smoothing = 0.0 + hparams.layer_prepostprocess_dropout = 0.0 + hparams.attention_dropout = 0.0 + hparams.relu_dropout = 0.0 + hparams.max_length = 0 + return hparams + + +@registry.register_hparams +def transformer_clean_big(): + hparams = transformer_clean() + hparams.hidden_size = 1024 + hparams.filter_size = 4096 + return hparams + + +@registry.register_hparams +def transformer_clean_big_tpu(): + hparams = transformer_clean_big() + update_hparams_for_tpu(hparams) + return hparams + + +@registry.register_hparams +def transformer_tpu_with_conv(): + """Cut down on the number of heads, and use convs instead.""" + hparams = transformer_tpu() + hparams.num_heads = 4 # Heads are expensive on TPUs. + hparams.ffn_layer = "conv_relu_conv" + return hparams + + +@registry.register_hparams +def transformer_lm_tpu_0(): + """HParams for training languagemodel_lm1b8k on tpu. 92M Params.""" + hparams = transformer_clean_big() + update_hparams_for_tpu(hparams) + hparams.num_heads = 4 # Heads are expensive on TPUs. + hparams.batch_size = 4096 + hparams.shared_embedding_and_softmax_weights = False + hparams.layer_prepostprocess_dropout = 0.1 + return hparams + + +@registry.register_hparams +def transformer_lm_tpu_1(): + """HParams for training languagemodel_lm1b8k on tpu. 335M Params.""" + hparams = transformer_lm_tpu_0() + hparams.hidden_size = 2048 + hparams.filter_size = 8192 + return hparams + + +@registry.register_hparams +def transformer_librispeech_v1(): + """HParams for training ASR model on LibriSpeech V1.""" + hparams = transformer_base() + + hparams.num_heads = 4 + hparams.filter_size = 1024 + hparams.hidden_size = 256 + hparams.num_encoder_layers = 5 + hparams.num_decoder_layers = 3 + hparams.learning_rate = 0.15 + hparams.batch_size = 6000000 + + librispeech.set_librispeech_length_hparams(hparams) + return hparams + + +@registry.register_hparams +def transformer_librispeech_v2(): + """HParams for training ASR model on LibriSpeech V2.""" + hparams = transformer_base() + + hparams.max_length = 1240000 + hparams.max_input_seq_length = 1550 + hparams.max_target_seq_length = 350 + hparams.batch_size = 16 + hparams.num_decoder_layers = 4 + hparams.num_encoder_layers = 6 + hparams.hidden_size = 384 + hparams.learning_rate = 0.15 + hparams.daisy_chain_variables = False + hparams.filter_size = 1536 + hparams.num_heads = 2 + hparams.ffn_layer = "conv_relu_conv" + hparams.conv_first_kernel = 9 + hparams.weight_decay = 0 + hparams.layer_prepostprocess_dropout = 0.2 + hparams.relu_dropout = 0.2 + + return hparams + + +@registry.register_hparams +def transformer_librispeech_tpu_v1(): + """HParams for training ASR model on Librispeech on TPU v1.""" + hparams = transformer_librispeech_v1() + update_hparams_for_tpu(hparams) + + hparams.batch_size = 16 + librispeech.set_librispeech_length_hparams(hparams) + return hparams + + +@registry.register_hparams +def transformer_librispeech_tpu_v2(): + """HParams for training ASR model on Librispeech on TPU v2.""" + hparams = transformer_librispeech_v2() + update_hparams_for_tpu(hparams) + + hparams.batch_size = 16 + librispeech.set_librispeech_length_hparams(hparams) + return hparams + + +@registry.register_hparams +def transformer_librispeech_with_area_attention(): + """HParams for training ASR model on Librispeech on TPU v2.""" + hparams = transformer_librispeech_tpu_v2() + hparams.num_area_layers = 3 # area attn on first 3 encoder and decoder layers + hparams.max_area_width = 5 + hparams.area_key_mode = "concat" + hparams.area_value_mode = "sum" + return hparams + + +@registry.register_hparams +def transformer_librispeech(): + """HParams for training ASR model on Librispeech.""" + return transformer_librispeech_v2() + + +@registry.register_hparams +def transformer_librispeech_tpu(): + """HParams for training ASR model on Librispeech on TPU.""" + return transformer_librispeech_tpu_v2() + + +@registry.register_hparams +def transformer_common_voice(): + """HParams for training ASR model on Mozilla Common Voice.""" + return transformer_librispeech() + + +@registry.register_hparams +def transformer_common_voice_tpu(): + """HParams for training ASR model on Mozilla Common Voice on TPU.""" + hparams = transformer_librispeech_tpu() + hparams.batch_size = 8 + return hparams + + +@registry.register_hparams +def transformer_supervised_attention(): + """HParams for supervised attention problems.""" + hparams = transformer_base() + # Attention loss type (KL-divergence or MSE). + hparams.add_hparam("expected_attention_loss_type", "kl_divergence") + # Multiplier to the encoder-decoder expected attention loss. + hparams.add_hparam("expected_attention_loss_multiplier", 1.0) + return hparams + + +@registry.register_hparams +def transformer_tpu_1b(): + """Hparams for machine translation with ~1.1B parameters.""" + hparams = transformer_tpu() + hparams.hidden_size = 2048 + hparams.filter_size = 8192 + hparams.num_hidden_layers = 8 + # smaller batch size to avoid OOM + hparams.batch_size = 1024 + hparams.activation_dtype = "bfloat16" + hparams.weight_dtype = "bfloat16" + # maximize number of parameters relative to computation by not sharing. + hparams.shared_embedding_and_softmax_weights = False + return hparams + + +@registry.register_hparams +def transformer_wikitext103_l4k_v0(): + """HParams for training languagemodel_wikitext103_l4k.""" + hparams = transformer_big() + + # Adafactor uses less memory than Adam. + # switch to Adafactor with its recommended learning rate scheme. + hparams.optimizer = "Adafactor" + hparams.learning_rate_schedule = "rsqrt_decay" + hparams.learning_rate_warmup_steps = 10000 + + hparams.num_heads = 4 + hparams.max_length = 4096 + hparams.batch_size = 4096 + hparams.shared_embedding_and_softmax_weights = False + + hparams.num_hidden_layers = 8 + hparams.attention_dropout = 0.1 + hparams.layer_prepostprocess_dropout = 0.2 + hparams.relu_dropout = 0.1 + hparams.label_smoothing = 0.0 + + # Using noise broadcast in the dropout layers saves memory during training. + hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads + hparams.relu_dropout_broadcast_dims = "1" # length + hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length + + # Avoid an expensive concat on TPU. + # >1 shards helps with faster parameter distribution on multi-GPU machines + hparams.symbol_modality_num_shards = 1 + + return hparams + + +@registry.register_hparams +def transformer_wikitext103_l4k_memory_v0(): + """HParams for training languagemodel_wikitext103_l4k with memory.""" + hparams = transformer_wikitext103_l4k_v0() + + hparams.split_targets_chunk_length = 64 + hparams.split_targets_max_chunks = 64 + hparams.split_targets_strided_training = True + hparams.add_hparam("memory_type", "transformer_xl") + + # The hparams specify batch size *before* chunking, but we want to have a + # consistent 4K batch size *after* chunking to fully utilize the hardware. + target_tokens_per_batch = 4096 + hparams.batch_size = int(target_tokens_per_batch * ( + hparams.max_length / hparams.split_targets_chunk_length)) # 262144 + + hparams.pos = None + hparams.self_attention_type = "dot_product_relative" + hparams.max_relative_position = 2 * hparams.split_targets_chunk_length + + hparams.add_hparam("unconditional", True) + hparams.add_hparam("recurrent_memory_batch_size", 0) # 0 = try to guess + # By default, cache one chunk only (like Transformer-XL) + hparams.add_hparam("num_memory_items", hparams.split_targets_chunk_length) + + return hparams + + +@registry.register_hparams +def transformer_wikitext103_l16k_memory_v0(): + """HParams for training languagemodel_wikitext103_l16k with memory.""" + hparams = transformer_wikitext103_l4k_memory_v0() + + hparams.max_length = 16384 + hparams.split_targets_chunk_length = 64 + hparams.split_targets_max_chunks = int( + hparams.max_length / hparams.split_targets_chunk_length) + + # The hparams specify batch size *before* chunking, but we want to have a + # consistent 4K batch size *after* chunking to fully utilize the hardware. + target_tokens_per_batch = 4096 + hparams.batch_size = int(target_tokens_per_batch * ( + hparams.max_length / hparams.split_targets_chunk_length)) + + hparams.max_relative_position = 2 * hparams.split_targets_chunk_length + + return hparams + + +@registry.register_hparams +def transformer_cifar10_memory_v0(): + """HParams for training image_cifar10_plain_gen_flat_rev with memory.""" + hparams = transformer_wikitext103_l4k_memory_v0() + + hparams.num_hidden_layers = 6 + + hparams.max_length = 32 * 32 * 3 + hparams.split_targets_chunk_length = 64 * 3 + hparams.split_targets_max_chunks = int( + hparams.max_length / hparams.split_targets_chunk_length) + hparams.num_memory_items = 128 * 3 + + # Since this is an image problem, batch size refers to examples (not tokens) + target_images_per_batch = 4 + hparams.batch_size = int(target_images_per_batch * ( + hparams.max_length / hparams.split_targets_chunk_length)) + + # The recurrent memory needs to know the actual batch size (in sequences) + hparams.recurrent_memory_batch_size = hparams.batch_size + + hparams.max_relative_position = ( + hparams.num_memory_items + hparams.split_targets_chunk_length) + + return hparams + + +@registry.register_hparams +def transformer_imagenet64_memory_v0(): + """HParams for training image_imagenet64_gen_flat_rev with memory.""" + hparams = transformer_cifar10_memory_v0() + + hparams.max_length = 64 * 64 * 3 + hparams.split_targets_chunk_length = 64 * 3 + hparams.split_targets_max_chunks = int( + hparams.max_length / hparams.split_targets_chunk_length) + hparams.num_memory_items = 128 * 3 + + # Since this is an image problem, batch size refers to examples (not tokens) + target_images_per_batch = 2 + hparams.batch_size = int(target_images_per_batch * ( + hparams.max_length / hparams.split_targets_chunk_length)) + + # The recurrent memory needs to know the actual batch size (in sequences) + hparams.recurrent_memory_batch_size = hparams.batch_size + + hparams.max_relative_position = 3072 + + return hparams diff --git a/tensor2tensor/models/transformer_test.py b/tensor2tensor/models/transformer_test.py index 1b43ce625..96cdae359 100644 --- a/tensor2tensor/models/transformer_test.py +++ b/tensor2tensor/models/transformer_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,46 +18,412 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function - -# Dependency imports - import numpy as np +from tensor2tensor.data_generators import librispeech from tensor2tensor.data_generators import problem_hparams from tensor2tensor.models import transformer -import tensorflow as tf +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator -class TransformerTest(tf.test.TestCase): +BATCH_SIZE = 3 +INPUT_LENGTH = 5 +TARGET_LENGTH = 7 +VOCAB_SIZE = 10 - def _testTransformer(self, net): - batch_size = 3 - input_length = 5 - target_length = 7 - vocab_size = 9 + +def get_model(hparams=None, mode=tf_estimator.ModeKeys.TRAIN, + has_input=True, model_cls=transformer.Transformer): + if hparams is None: hparams = transformer.transformer_tiny() - p_hparams = problem_hparams.test_problem_hparams(hparams, vocab_size, - vocab_size) - inputs = -1 + np.random.random_integers( - vocab_size, size=(batch_size, input_length, 1, 1)) - targets = -1 + np.random.random_integers( - vocab_size, size=(batch_size, target_length, 1, 1)) + hparams.hidden_size = 8 + hparams.filter_size = 32 + hparams.num_heads = 1 + hparams.layer_prepostprocess_dropout = 0.0 + + if hparams.get("problem_hparams", None) is None: + p_hparams = problem_hparams.test_problem_hparams(VOCAB_SIZE, + VOCAB_SIZE, + hparams) + if not has_input: + del p_hparams.modality["inputs"] + hparams.problem_hparams = p_hparams + + inputs = np.random.randint( + VOCAB_SIZE, size=(BATCH_SIZE, INPUT_LENGTH, 1, 1)) + targets = np.random.randint( + VOCAB_SIZE, size=(BATCH_SIZE, TARGET_LENGTH, 1, 1)) + features = { + "targets": tf.constant(targets, dtype=tf.int32, name="targets"), + "target_space_id": tf.constant(1, dtype=tf.int32) + } + if has_input: + features["inputs"] = tf.constant(inputs, dtype=tf.int32, name="inputs") + + return model_cls(hparams, mode, p_hparams), features + + +def small_librispeech_model(param_overrides=None): + hparams = transformer.transformer_small() + hparams.hidden_size = 8 + hparams.filter_size = 32 + hparams.num_heads = 1 + hparams.layer_prepostprocess_dropout = 0.0 + p_hparams = librispeech.Librispeech().get_hparams(hparams) + p_hparams.vocab_size["targets"] = VOCAB_SIZE + hparams.problem_hparams = p_hparams + model = transformer.Transformer(hparams, problem_hparams=p_hparams) + if param_overrides is not None: # Add or Set any provided HParams + assert isinstance(param_overrides, dict) + for param_name in param_overrides: + if hasattr(hparams, param_name): + hparams.set_hparam(param_name, param_overrides[param_name]) + else: + hparams.add_hparam(param_name, param_overrides[param_name]) + inputs = np.random.rand( + BATCH_SIZE, INPUT_LENGTH, 80, 3).astype("float32") # modify for speech + targets = np.random.randint( + VOCAB_SIZE, size=(BATCH_SIZE, TARGET_LENGTH, 1, 1)) + features = { + "inputs": tf.constant(inputs, dtype=tf.float32, name="inputs"), + "targets": tf.constant(targets, dtype=tf.int32, name="targets"), + "target_space_id": tf.constant(1, dtype=tf.int32) + } + return model, features + + +class TransformerTest(tf.test.TestCase): + + def testTransformer(self, get_model_fn=None, p=None): + if get_model_fn: + model, features = get_model_fn(param_overrides=p) + else: + model, features = get_model(transformer.transformer_small()) + logits, _ = model(features) + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + res = session.run(logits) + self.assertEqual(res.shape, (BATCH_SIZE, TARGET_LENGTH, 1, 1, VOCAB_SIZE)) + + def testTransformerLibrispeech(self, params=None): + self.testTransformer(get_model_fn=small_librispeech_model, p=params) + + def testLibrispeechSlowVsFast(self, params=None): + self.testSlowVsFast(get_model_fn=small_librispeech_model, p=params) + + def testLibrispeechMultihead(self, params=None): + self.testTransformerLibrispeech({"num_heads": 2}) + + def testLibrispeechWithAreaAttention(self): + self.testTransformerLibrispeech({"max_area_width": 2, + "num_area_layers": 1, + "area_key_mode": "mean", + "area_value_mode": "sum"}) + + def testTransformerRelative(self): + model, features = get_model(transformer.transformer_relative_tiny()) + logits, _ = model(features) with self.test_session() as session: - features = { - "inputs": tf.constant(inputs, dtype=tf.int32), - "targets": tf.constant(targets, dtype=tf.int32), - "target_space_id": tf.constant(1, dtype=tf.int32), - } - model = net(hparams, p_hparams) - shadred_logits, _, _ = model.model_fn(features, True) - logits = tf.concat(shadred_logits, 0) session.run(tf.global_variables_initializer()) res = session.run(logits) - self.assertEqual(res.shape, (batch_size, target_length, 1, 1, vocab_size)) + self.assertEqual(res.shape, (BATCH_SIZE, TARGET_LENGTH, 1, 1, VOCAB_SIZE)) + + def testSlowVsFast(self, get_model_fn=None, p=None): + if get_model_fn: + model, features = get_model_fn(param_overrides=p) + else: + model, features = get_model(transformer.transformer_small()) + + decode_length = 3 + + out_logits, _ = model(features) + out_logits = tf.squeeze(out_logits, axis=[2, 3]) + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]), + labels=tf.reshape(features["targets"], [-1])) + loss = tf.reduce_mean(loss) + apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss) + + with self.test_session(): + tf.global_variables_initializer().run() + for _ in range(100): + apply_grad.run() + + model.set_mode(tf_estimator.ModeKeys.PREDICT) + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + greedy_result = model._slow_greedy_infer( + features, decode_length)["outputs"] + greedy_result = tf.squeeze(greedy_result, axis=[2, 3]) + + fast_result = model._greedy_infer(features, decode_length)["outputs"] + + with self.test_session(): + greedy_res = greedy_result.eval() + fast_res = fast_result.eval() + + self.assertEqual(fast_res.shape, (BATCH_SIZE, INPUT_LENGTH + decode_length)) + self.assertAllClose(greedy_res, fast_res) + + def testSlowVsFastNoInput(self): + model, features = get_model( + transformer.transformer_small(), has_input=False) + + decode_length = 3 + + out_logits, _ = model(features) + out_logits = tf.squeeze(out_logits, axis=[2, 3]) + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]), + labels=tf.reshape(features["targets"], [-1])) + loss = tf.reduce_mean(loss) + apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss) + + with self.test_session(): + tf.global_variables_initializer().run() + for _ in range(100): + apply_grad.run() + + model.set_mode(tf_estimator.ModeKeys.PREDICT) + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + slow_result = model._slow_greedy_infer( + features, decode_length)["outputs"] + slow_result = tf.squeeze(slow_result, axis=[2, 3]) + + fast_result = model._greedy_infer(features, decode_length)["outputs"] + + with self.test_session(): + slow_res = slow_result.eval() + fast_res = fast_result.eval() + + self.assertEqual(slow_res.shape, (BATCH_SIZE, decode_length)) + self.assertAllClose(slow_res, fast_res) + + def testBeamDecodeWithRelativeAttention(self): + decode_length = 2 + model, features = get_model(transformer.transformer_relative_tiny()) + model.set_mode(tf_estimator.ModeKeys.PREDICT) + + beam_result = model._beam_decode( + features, decode_length, beam_size=4, top_beams=1, + alpha=1.0)["outputs"] + + with self.test_session(): + tf.global_variables_initializer().run() + beam_result.eval() + + # TODO(petershaw): This test is flaky because the decode may hit EOS before + # getting to the expected length. + # self.assertEqual(beam_res.shape, + # (BATCH_SIZE, INPUT_LENGTH + decode_length)) + + def testBeamVsFast(self): + model, features = get_model(transformer.transformer_small()) + + decode_length = 2 + + out_logits, _ = model(features) + out_logits = tf.squeeze(out_logits, axis=[2, 3]) + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]), + labels=tf.reshape(features["targets"], [-1])) + loss = tf.reduce_mean(loss) + apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss) + + with self.test_session(): + tf.global_variables_initializer().run() + for _ in range(100): + apply_grad.run() + + model.set_mode(tf_estimator.ModeKeys.PREDICT) + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + beam_result = model._beam_decode_slow( + features, + decode_length, + beam_size=4, + top_beams=1, + alpha=1.0)["outputs"] + + fast_result = model._beam_decode( + features, + decode_length, + beam_size=4, + top_beams=1, + alpha=1.0)["outputs"] + + with self.test_session(): + beam_res = beam_result.eval() + fast_res = fast_result.eval() + + self.assertAllClose(beam_res, fast_res) + + def testTransformerWithoutProblem(self): + hparams = transformer.transformer_test() + + embedded_inputs = np.random.random_sample( + (BATCH_SIZE, INPUT_LENGTH, 1, hparams.hidden_size)) + embedded_targets = np.random.random_sample( + (BATCH_SIZE, TARGET_LENGTH, 1, hparams.hidden_size)) + + transformed_features = { + "inputs": tf.constant(embedded_inputs, dtype=tf.float32), + "targets": tf.constant(embedded_targets, dtype=tf.float32) + } + + model = transformer.Transformer(hparams) + body_out, _ = model(transformed_features) + + self.assertAllEqual( + body_out.get_shape().as_list(), + [BATCH_SIZE, TARGET_LENGTH, 1, hparams.hidden_size]) + + def testTransformerWithEncoderDecoderAttentionLoss(self): + model, features = get_model( + transformer.transformer_supervised_attention()) + expected_attention_weights = np.random.random_sample( + size=(BATCH_SIZE, TARGET_LENGTH, INPUT_LENGTH)) + features["expected_attentions"] = tf.constant( + expected_attention_weights, dtype=tf.float32) + _, extra_loss = model(features) + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + res = session.run(extra_loss["attention_loss"]) + self.assertEqual(res.shape, ()) + + def _create_greedy_infer_model(self): + """Creates model for greedy inference testing. + + Returns: + model: A t2t model. + features: An map of string to tensor. + """ + model, features = get_model(transformer.transformer_small()) + + out_logits, _ = model(features) + out_logits = tf.squeeze(out_logits, axis=[2, 3]) + loss = tf.nn.sparse_softmax_cross_entropy_with_logits( + logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]), + labels=tf.reshape(features["targets"], [-1])) + loss = tf.reduce_mean(loss) + apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss) + + with self.test_session(): + tf.global_variables_initializer().run() + for _ in range(100): + apply_grad.run() + + model.set_mode(tf_estimator.ModeKeys.PREDICT) + + return model, features + + def testGreedySlowTPUVsNonTPU(self): + decode_length = 3 + + model, features = self._create_greedy_infer_model() + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + slow_result_non_tpu = model._slow_greedy_infer( + features, decode_length)["outputs"] + slow_result_non_tpu = tf.squeeze(slow_result_non_tpu, axis=[2, 3]) + + slow_result_tpu = model._slow_greedy_infer_tpu( + features, decode_length)["outputs"] + slow_result_tpu = tf.squeeze(slow_result_tpu, axis=[2, 3]) + + with self.test_session(): + slow_non_tpu_res = slow_result_non_tpu.eval() + slow_tpu_res = slow_result_tpu.eval() + + self.assertEqual(slow_tpu_res.shape, + (BATCH_SIZE, INPUT_LENGTH + decode_length)) + self.assertAllClose(slow_tpu_res, slow_non_tpu_res) + + def testGreedyFastTPUVsNonTPU(self): + decode_length = 3 + + model, features = self._create_greedy_infer_model() + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + fast_result_non_tpu = model._greedy_infer( + features, decode_length, use_tpu=False)["outputs"] + + fast_result_tpu = model._greedy_infer( + features, decode_length, use_tpu=True)["outputs"] + + with self.test_session(): + fast_non_tpu_res = fast_result_non_tpu.eval() + fast_tpu_res = fast_result_tpu.eval() + + self.assertEqual(fast_tpu_res.shape, + (BATCH_SIZE, INPUT_LENGTH + decode_length)) + self.assertAllClose(fast_tpu_res, fast_non_tpu_res) + + def testGreedyTPUSlowVsFast(self): + decode_length = 3 + + model, features = self._create_greedy_infer_model() + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + slow_result = model._slow_greedy_infer_tpu( + features, decode_length)["outputs"] + slow_result = tf.squeeze(slow_result, axis=[2, 3]) + + fast_result = model._greedy_infer( + features, decode_length, use_tpu=True)["outputs"] + + with self.test_session(): + slow_res = slow_result.eval() + fast_res = fast_result.eval() + + self.assertEqual(fast_res.shape, + (BATCH_SIZE, INPUT_LENGTH + decode_length)) + self.assertAllClose(fast_res, slow_res) + + +class TransformerScorerTest(tf.test.TestCase): + + def testReturnsScores(self): + model, features = get_model( + mode=tf_estimator.ModeKeys.PREDICT, + model_cls=transformer.TransformerScorer) + infer_out = model.infer(features) + self.assertTrue("outputs" in infer_out) + self.assertTrue("scores" in infer_out) + + with self.test_session() as session: + session.run(tf.global_variables_initializer()) + infer_out = session.run(infer_out) + self.assertEqual((BATCH_SIZE,), infer_out["scores"].shape) + self.assertEqual((BATCH_SIZE, TARGET_LENGTH), infer_out["outputs"].shape) + + def testVarNames(self): + with tf.Graph().as_default(): + model, features = get_model( + mode=tf_estimator.ModeKeys.PREDICT, + model_cls=transformer.TransformerScorer) + _ = model.infer(features) + scorer_vars = [v.name for v in tf.global_variables()] + + with tf.Graph().as_default(): + model, features = get_model( + mode=tf_estimator.ModeKeys.EVAL, + model_cls=transformer.TransformerScorer) + _ = model(features) + scorer_eval_vars = [v.name for v in tf.global_variables()] + + with tf.Graph().as_default(): + model, features = get_model( + mode=tf_estimator.ModeKeys.EVAL, + model_cls=transformer.Transformer) + _ = model(features) + transformer_vars = [v.name for v in tf.global_variables()] - def testTransformer(self): - self._testTransformer(transformer.Transformer) + self.assertEqual(sorted(scorer_vars), sorted(transformer_vars)) + self.assertEqual(sorted(scorer_eval_vars), sorted(transformer_vars)) if __name__ == "__main__": diff --git a/tensor2tensor/models/vanilla_gan.py b/tensor2tensor/models/vanilla_gan.py new file mode 100644 index 000000000..a79a7575f --- /dev/null +++ b/tensor2tensor/models/vanilla_gan.py @@ -0,0 +1,218 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Simple Generative Adversarial Model with two linear layers. + +Example of how to create a GAN in T2T. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +def lrelu(input_, leak=0.2, name="lrelu"): + return tf.maximum(input_, leak * input_, name=name) + + +def deconv2d( + input_, output_shape, k_h, k_w, d_h, d_w, stddev=0.02, name="deconv2d"): + """Deconvolution layer.""" + with tf.variable_scope(name): + w = tf.get_variable( + "w", [k_h, k_w, output_shape[-1], input_.get_shape()[-1]], + initializer=tf.random_normal_initializer(stddev=stddev)) + deconv = tf.nn.conv2d_transpose( + input_, w, output_shape=output_shape, strides=[1, d_h, d_w, 1]) + biases = tf.get_variable( + "biases", [output_shape[-1]], initializer=tf.constant_initializer(0.0)) + return tf.reshape(tf.nn.bias_add(deconv, biases), deconv.get_shape()) + + +def reverse_gradient(x): + return -x + tf.stop_gradient(2 * x) + + +class AbstractGAN(t2t_model.T2TModel): + """Base class for all GANs.""" + + def discriminator(self, x, is_training, reuse=False): + """Discriminator architecture based on InfoGAN. + + Args: + x: input images, shape [bs, h, w, channels] + is_training: boolean, are we in train or eval model. + reuse: boolean, should params be re-used. + + Returns: + out_logit: the output logits (before sigmoid). + """ + hparams = self.hparams + with tf.variable_scope( + "discriminator", reuse=reuse, + initializer=tf.random_normal_initializer(stddev=0.02)): + batch_size, height, width = common_layers.shape_list(x)[:3] + # Mapping x from [bs, h, w, c] to [bs, 1] + net = tf.layers.conv2d(x, 64, (4, 4), strides=(2, 2), + padding="SAME", name="d_conv1") + # [bs, h/2, w/2, 64] + net = lrelu(net) + net = tf.layers.conv2d(net, 128, (4, 4), strides=(2, 2), + padding="SAME", name="d_conv2") + # [bs, h/4, w/4, 128] + if hparams.discriminator_batchnorm: + net = tf.layers.batch_normalization(net, training=is_training, + momentum=0.999, name="d_bn2") + net = lrelu(net) + size = height * width + net = tf.reshape(net, [batch_size, size * 8]) # [bs, h * w * 8] + net = tf.layers.dense(net, 1024, name="d_fc3") # [bs, 1024] + if hparams.discriminator_batchnorm: + net = tf.layers.batch_normalization(net, training=is_training, + momentum=0.999, name="d_bn3") + net = lrelu(net) + return net + + def generator(self, z, is_training, out_shape): + """Generator outputting image in [0, 1].""" + hparams = self.hparams + height, width, c_dim = out_shape + batch_size = hparams.batch_size + with tf.variable_scope( + "generator", + initializer=tf.random_normal_initializer(stddev=0.02)): + net = tf.layers.dense(z, 1024, name="g_fc1") + net = tf.layers.batch_normalization(net, training=is_training, + momentum=0.999, name="g_bn1") + net = lrelu(net) + net = tf.layers.dense(net, 128 * (height // 4) * (width // 4), + name="g_fc2") + net = tf.layers.batch_normalization(net, training=is_training, + momentum=0.999, name="g_bn2") + net = lrelu(net) + net = tf.reshape(net, [batch_size, height // 4, width // 4, 128]) + net = deconv2d(net, [batch_size, height // 2, width // 2, 64], + 4, 4, 2, 2, name="g_dc3") + net = tf.layers.batch_normalization(net, training=is_training, + momentum=0.999, name="g_bn3") + net = lrelu(net) + net = deconv2d(net, [batch_size, height, width, c_dim], + 4, 4, 2, 2, name="g_dc4") + out = tf.nn.sigmoid(net) + return common_layers.convert_real_to_rgb(out) + + def losses(self, inputs, generated): + """Return the losses dictionary.""" + raise NotImplementedError + + def body(self, features): + """Body of the model. + + Args: + features: a dictionary with the tensors. + + Returns: + A pair (predictions, losses) where predictions is the generated image + and losses is a dictionary of losses (that get added for the final loss). + """ + features["targets"] = features["inputs"] + is_training = self.hparams.mode == tf_estimator.ModeKeys.TRAIN + + # Input images. + inputs = tf.to_float(features["targets_raw"]) + + # Noise vector. + z = tf.random_uniform([self.hparams.batch_size, + self.hparams.bottleneck_bits], + minval=-1, maxval=1, name="z") + + # Generator output: fake images. + out_shape = common_layers.shape_list(inputs)[1:4] + g = self.generator(z, is_training, out_shape) + + losses = self.losses(inputs, g) # pylint: disable=not-callable + + summary_g_image = tf.reshape( + g[0, :], [1] + common_layers.shape_list(inputs)[1:]) + tf.summary.image("generated", summary_g_image, max_outputs=1) + + if is_training: # Returns an dummy output and the losses dictionary. + return tf.zeros_like(inputs), losses + return tf.reshape(g, tf.shape(inputs)), losses + + def top(self, body_output, features): + """Override the top function to not do anything.""" + return body_output + + +@registry.register_model +class SlicedGan(AbstractGAN): + """Sliced GAN for demonstration.""" + + def losses(self, inputs, generated): + """Losses in the sliced case.""" + is_training = self.hparams.mode == tf_estimator.ModeKeys.TRAIN + def discriminate(x): + return self.discriminator(x, is_training=is_training, reuse=False) + generator_loss = common_layers.sliced_gan_loss( + inputs, reverse_gradient(generated), discriminate, + self.hparams.num_sliced_vecs) + return {"training": - generator_loss} + + def infer(self, *args, **kwargs): # pylint: disable=arguments-differ + del args, kwargs + + try: + num_channels = self.hparams.problem.num_channels + except AttributeError: + num_channels = 1 + + with tf.variable_scope("body/vanilla_gan", reuse=tf.AUTO_REUSE): + hparams = self.hparams + z = tf.random_uniform([hparams.batch_size, hparams.bottleneck_bits], + minval=-1, maxval=1, name="z") + out_shape = (hparams.sample_height, hparams.sample_width, num_channels) + g_sample = self.generator(z, False, out_shape) + return g_sample + + +@registry.register_hparams +def sliced_gan(): + """Basic parameters for a vanilla_gan.""" + hparams = common_hparams.basic_params1() + hparams.optimizer = "adam" + hparams.learning_rate_constant = 0.0002 + hparams.learning_rate_warmup_steps = 500 + hparams.learning_rate_schedule = "constant * linear_warmup" + hparams.label_smoothing = 0.0 + hparams.batch_size = 128 + hparams.hidden_size = 128 + hparams.initializer = "uniform_unit_scaling" + hparams.initializer_gain = 1.0 + hparams.weight_decay = 1e-6 + hparams.kernel_height = 4 + hparams.kernel_width = 4 + hparams.bottleneck_bits = 128 + hparams.add_hparam("discriminator_batchnorm", True) + hparams.add_hparam("num_sliced_vecs", 4096) + return hparams diff --git a/tensor2tensor/models/video/__init__.py b/tensor2tensor/models/video/__init__.py new file mode 100644 index 000000000..ff174dd63 --- /dev/null +++ b/tensor2tensor/models/video/__init__.py @@ -0,0 +1,15 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tensor2tensor/models/video/base.py b/tensor2tensor/models/video/base.py new file mode 100644 index 000000000..9fffc50dc --- /dev/null +++ b/tensor2tensor/models/video/base.py @@ -0,0 +1,699 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic models for testing simple tasks.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import six + +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import common_video +from tensor2tensor.layers import discretization +from tensor2tensor.layers import modalities +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf + + +def flat_lists(list_of_lists): + return [x for l in list_of_lists for x in l] # pylint: disable=g-complex-comprehension + + +def pixels_from_softmax(frame_logits, pure_sampling=False, + temperature=1.0, gumbel_noise_factor=0.2): + """Given frame_logits from a per-pixel softmax, generate colors.""" + # If we're purely sampling, just sample each pixel. + if pure_sampling or temperature == 0.0: + return common_layers.sample_with_temperature(frame_logits, temperature) + + # Gumbel-sample from the pixel sofmax and average by pixel values. + pixel_range = tf.to_float(tf.range(256)) + for _ in range(len(frame_logits.get_shape().as_list()) - 1): + pixel_range = tf.expand_dims(pixel_range, axis=0) + + frame_logits = tf.nn.log_softmax(frame_logits) + gumbel_samples = discretization.gumbel_sample( + common_layers.shape_list(frame_logits)) * gumbel_noise_factor + + frame = tf.nn.softmax((frame_logits + gumbel_samples) / temperature, axis=-1) + result = tf.reduce_sum(frame * pixel_range, axis=-1) + # Round on the forward pass, not on the backward one. + return result + tf.stop_gradient(tf.round(result) - result) + + +@registry.register_model +class NextFrameBase(t2t_model.T2TModel): + """Base class for next_frame models. + + This is the base class for the models that given the previous frames + can predict the next frame. They may also support reward prediction + and action condition prediction which enables them to be run as + a world model in model-based RL pipeline. + + The API supports both recurrent and stacked frames models. Please look + at the documents for next_frame function for the API. + + If you are implementing a next frame prediction model consider + following the API presented in this class. But if the API + is too limiting for your models, feel free to override lower level + functions and/or inheret from T2TModel directly. + + """ + + # ============================================================================ + # BEGIN SUBCLASS INTERFACE + # ============================================================================ + def next_frame(self, + frames, actions, rewards, + target_frame, internal_states, video_features): + """The main prediction function of next frame models. + + This is the main function that should be overridden to implement models. + + Args: + frames: The list of input frames. + Only previous frame in case of recurrent models. + actions: The list of input actions. + Only previous action in case of recurrent models. + rewards: The list of input rewards. + Only previous reward in case of recurrent models. + target_frame: The target frame. + Usually required for approximating the posterior. + internal_states: Internal model states. Only useful for recurrent models + to keep the state from the previous time index. + internal_states is None at the first frame and should be + initialized properly. + video_features: video wide features. None by default. + Please refer to video_features function for description. + + Returns: + pred_frame: predicted frame BSxWxHxC + where C is 3 for L1/L2 modality and 3*256 for Softmax. + pred_reward: the same size as input reward. + None if the model does not detect rewards. + pred_action: predicted action logits + pred_value: predicted value + extra_loss: any extra loss other than predicted frame and reward. + e.g. KL loss in case of VAE models. + internal_states: updated internal models states. + """ + raise NotImplementedError("Base video model.") + + def video_features( + self, all_frames, all_actions, all_rewards, all_raw_frames): + """Optional video wide features. + + If the model requires access to all of the video frames + (e.g. in case of approximating one latent for the whole video) + override this function to add them. They will be accessible + as video_features in next_frame function. + + Args: + all_frames: list of all frames including input and target frames. + all_actions: list of all actions including input and target actions. + all_rewards: list of all rewards including input and target rewards. + all_raw_frames: list of all raw frames (before modalities). + + Returns: + video_features: a dictionary containing video-wide features. + """ + del all_frames, all_actions, all_rewards, all_raw_frames + return None + + def video_extra_loss(self, frames_predicted, frames_target, + internal_states, video_features): + """Optional video wide extra loss. + + If the model needs to calculate some extra loss across all predicted + frames (e.g. in case of video GANS loss) override this function. + + Args: + frames_predicted: list of all predicted frames. + frames_target: list of all target frames. + internal_states: internal states of the video. + video_features: video wide features coming from video_features function. + + Returns: + extra_loss: extra video side loss. + """ + del frames_predicted, frames_target, internal_states, video_features + return 0.0 + + @property + def is_recurrent_model(self): + """Set to true if your model is recurrent. False otherwise. + + This mainly affects how the inputs will be fed into next_frame function. + """ + raise NotImplementedError("Base video model.") + + def init_internal_states(self): + """Allows a model to preserve its internal model across multiple runs. + + This optional function is only useful for any model with internal states + (usually recurrent models) which need to preserve states after any call. + """ + return None + + def reset_internal_states_ops(self): + """Resets internal states to initial values.""" + return [[tf.no_op()]] + + def load_internal_states_ops(self): + """Loade internal states from class variables.""" + return [[tf.no_op()]] + + def save_internal_states_ops(self, internal_states): + """Saves internal states into class variables.""" + return [[tf.no_op()]] + + # ============================================================================ + # END SUBCLASS INTERFACE + # ============================================================================ + + def __init__(self, *args, **kwargs): + super(NextFrameBase, self).__init__(*args, **kwargs) + self.internal_states = self.init_internal_states() + + @property + def _target_modality(self): + return self.problem_hparams.modality["targets"] + + @property + def is_per_pixel_softmax(self): + # TODO(trandustin): This is a hack. + return "targets" not in self.hparams.get("loss") + + def get_iteration_num(self): + step_num = tf.train.get_global_step() + # TODO(lukaszkaiser): what should it be if it's undefined? + if step_num is None: + step_num = 10000000 + return step_num + + def visualize_predictions(self, predics, targets): + predics = tf.concat(predics, axis=1) + targets = tf.concat(targets, axis=1) + side_by_side_video = tf.concat([predics, targets], axis=2) + tf.summary.image("full_video", side_by_side_video) + + def get_scheduled_sample_func(self, batch_size): + """Creates a function for scheduled sampling based on given hparams.""" + with tf.variable_scope("scheduled_sampling_func", reuse=tf.AUTO_REUSE): + iter_num = self.get_iteration_num() + + # Simple function to bypass scheduled sampling in gt or pred only modes. + def scheduled_sampling_simple(ground_truth_x, generated_x, + batch_size, scheduled_sample_var): + del batch_size + if scheduled_sample_var: + return ground_truth_x + return generated_x + + mode = self.hparams.scheduled_sampling_mode + if mode == "ground_truth_only": + scheduled_sampling_func = scheduled_sampling_simple + scheduled_sampling_func_var = True + elif mode == "prediction_only": + scheduled_sampling_func = scheduled_sampling_simple + scheduled_sampling_func_var = False + elif mode == "prob": + decay_steps = self.hparams.scheduled_sampling_decay_steps + probability = tf.train.polynomial_decay( + 1.0, iter_num, decay_steps, 0.0) + scheduled_sampling_func = common_video.scheduled_sample_prob + scheduled_sampling_func_var = probability + elif mode == "prob_inverse_exp": + decay_steps = self.hparams.scheduled_sampling_decay_steps + probability = common_layers.inverse_exp_decay( + decay_steps, step=iter_num) + probability *= self.hparams.scheduled_sampling_max_prob + probability = 1.0 - probability + scheduled_sampling_func = common_video.scheduled_sample_prob + scheduled_sampling_func_var = probability + elif mode == "prob_inverse_lin": + decay_steps = self.hparams.scheduled_sampling_decay_steps + probability = common_layers.inverse_exp_decay( + decay_steps // 4, step=iter_num) # Very low at start. + probability *= common_layers.inverse_lin_decay( + decay_steps, step=iter_num) + probability *= self.hparams.scheduled_sampling_max_prob + probability = 1.0 - probability + scheduled_sampling_func = common_video.scheduled_sample_prob + scheduled_sampling_func_var = probability + elif mode == "count": + # Calculate number of ground-truth frames to pass in. + k = self.hparams.scheduled_sampling_k + num_ground_truth = tf.to_int32( + tf.round( + tf.to_float(batch_size) * + (k / (k + tf.exp(tf.to_float(iter_num) / tf.to_float(k)))))) + scheduled_sampling_func = common_video.scheduled_sample_count + scheduled_sampling_func_var = num_ground_truth + else: + raise ValueError("unknown scheduled sampling method: %s" % mode) + + if isinstance(scheduled_sampling_func_var, tf.Tensor): + tf.summary.scalar("scheduled_sampling_var", scheduled_sampling_func_var) + partial_func = functools.partial( + scheduled_sampling_func, + batch_size=batch_size, + scheduled_sample_var=scheduled_sampling_func_var) + return partial_func + + def get_scheduled_sample_inputs(self, + done_warm_start, + groundtruth_items, + generated_items, + scheduled_sampling_func): + """Scheduled sampling. + + Args: + done_warm_start: whether we are done with warm start or not. + groundtruth_items: list of ground truth items. + generated_items: list of generated items. + scheduled_sampling_func: scheduled sampling function to choose between + groundtruth items and generated items. + + Returns: + A mix list of ground truth and generated items. + """ + def sample(): + """Calculate the scheduled sampling params based on iteration number.""" + with tf.variable_scope("scheduled_sampling", reuse=tf.AUTO_REUSE): + return [ + scheduled_sampling_func(item_gt, item_gen) + for item_gt, item_gen in zip(groundtruth_items, generated_items)] + + cases = [ + (tf.logical_not(done_warm_start), lambda: groundtruth_items), + (tf.logical_not(self.is_training), lambda: generated_items), + ] + output_items = tf.case(cases, default=sample, strict=True) + + return output_items + + def get_extra_internal_loss(self, extra_raw_gts, extra_gts, extra_pds): + """Hacky code the get the loss on predicted frames from input frames. + + Recurrent models consume the frames one-by-one. Therefore + if there is more than one input frame they also get predicted. + T2T only calculates loss on the predicted target frames which + means the loss is not being applied on the predicted input frames. + This code is to fix this issue. Since the model is not aware of the + modality it has to match the pre-porocessing happening in bottom + function and therefore this becomes a very hacky code. This code + should match the bottom and top and loss of modalities otherwise + it will calculate the wrong loss. + + Args: + extra_raw_gts: extra raw ground truth frames. + extra_gts: extra normalized ground truth frames. + extra_pds: extra predicted frames. + + Returns: + Additional reconstruction loss. + + Raises: + ValueError: in case of unknown loss transformation. + """ + # TODO(trandustin): This logic should be moved elsewhere. + if self.hparams.loss.get("targets") == modalities.video_l2_raw_loss: + recon_loss = tf.losses.mean_squared_error(extra_gts, extra_pds) + elif "targets" not in self.hparams.loss: + shape = common_layers.shape_list(extra_pds) + updated_shape = shape[:-1] + [3, 256] + extra_pds = tf.reshape(extra_pds, updated_shape) + # Merge time and batch + logits = tf.reshape(extra_pds, [-1] + updated_shape[2:]) + targets = extra_raw_gts + targets_shape = common_layers.shape_list(targets) + targets = tf.reshape(targets, [-1] + targets_shape[2:]) + targets_weights_fn = self.hparams.weights_fn.get( + "targets", + modalities.get_weights_fn(self._target_modality)) + numerator, denominator = common_layers.padded_cross_entropy( + logits, + targets, + self.hparams.label_smoothing, + cutoff=getattr(self.hparams, "video_modality_loss_cutoff", 0.01), + weights_fn=targets_weights_fn) + recon_loss = numerator / denominator + else: + raise ValueError("internal loss only supports specific hparams.loss.") + tf.summary.scalar("recon_extra", recon_loss) + return recon_loss + + def get_sampled_frame(self, pred_frame): + """Samples the frame based on modality. + + if the modality is L2/L1 then the next predicted frame is the + next frame and there is no sampling but in case of Softmax loss + the next actual frame should be sampled from predicted frame. + + This enables multi-frame target prediction with Softmax loss. + + Args: + pred_frame: predicted frame. + + Returns: + sampled frame. + + """ + # TODO(lukaszkaiser): the logic below heavily depend on the current + # (a bit strange) video modalities - we should change that. + + sampled_frame = pred_frame + if self.is_per_pixel_softmax: + frame_shape = common_layers.shape_list(pred_frame) + target_shape = frame_shape[:-1] + [self.hparams.problem.num_channels] + sampled_frame = tf.reshape(pred_frame, target_shape + [256]) + sampled_frame = pixels_from_softmax( + sampled_frame, temperature=self.hparams.pixel_sampling_temperature) + # TODO(lukaszkaiser): this should be consistent with modality.bottom() + # sampled_frame = common_layers.standardize_images(sampled_frame) + return tf.to_float(sampled_frame) + + def __get_next_inputs(self, index, all_frames, all_actions, all_rewards): + """Get inputs for next prediction iteration. + + If the model is recurrent then the inputs of the models are + the current inputs. For non-recurrent models the input is the + last N stacked frames/actions/rewards. + + Args: + index: current prediction index. from 0 to number of target frames. + all_frames: list of all frames including input and target frames. + all_actions: list of all actions including input and target actions. + all_rewards: list of all rewards including input and target rewards. + + Returns: + frames: input frames for next_frame prediction. + actions: input actions for next_frame prediction. + rewards: input rewards for next_frame prediction. + target_index: index of target frame in all_frames list. + """ + if self.is_recurrent_model: + target_index = index + 1 + nones = [None] + else: + target_index = index + self.hparams.video_num_input_frames + nones = [None] * self.hparams.video_num_input_frames + + frames = all_frames[index:target_index] + actions = all_actions[index:target_index] if self.has_actions else nones + rewards = all_rewards[index:target_index] if self.has_rewards else nones + + return frames, actions, rewards, target_index + + def infer(self, features, *args, **kwargs): # pylint: disable=arguments-differ + """Produce predictions from the model by running it.""" + del args, kwargs + # Inputs and features preparation needed to handle edge cases. + if not features: + features = {} + hparams = self.hparams + inputs_old = None + if "inputs" in features and len(features["inputs"].shape) < 4: + inputs_old = features["inputs"] + features["inputs"] = tf.expand_dims(features["inputs"], 2) + + def logits_to_samples(logits, key): + """Get samples from logits.""" + # If the last dimension is 1 then we're using L1/L2 loss. + if common_layers.shape_list(logits)[-1] == 1: + return tf.to_int32(tf.squeeze(logits, axis=-1)) + if key == "targets": + return pixels_from_softmax( + logits, gumbel_noise_factor=0.0, + temperature=hparams.pixel_sampling_temperature) + # Argmax in TF doesn't handle more than 5 dimensions yet. + logits_shape = common_layers.shape_list(logits) + argmax = tf.argmax(tf.reshape(logits, [-1, logits_shape[-1]]), axis=-1) + return tf.reshape(argmax, logits_shape[:-1]) + + # Get predictions. + try: + num_channels = hparams.problem.num_channels + except AttributeError: + num_channels = 1 + if "inputs" in features: + inputs_shape = common_layers.shape_list(features["inputs"]) + targets_shape = [inputs_shape[0], hparams.video_num_target_frames, + inputs_shape[2], inputs_shape[3], num_channels] + else: + tf.logging.warn("Guessing targets shape as no inputs are given.") + targets_shape = [hparams.batch_size, + hparams.video_num_target_frames, 1, 1, num_channels] + + features["targets"] = tf.zeros(targets_shape, dtype=tf.int32) + reward_in_mod = "target_reward" in self.problem_hparams.modality + action_in_mod = "target_action" in self.problem_hparams.modality + if reward_in_mod: + # TODO(lukaszkaiser): this is a hack. get the actual reward history. + if "input_reward" not in features: + features["input_reward"] = tf.zeros( + [inputs_shape[0], inputs_shape[1], 1], dtype=tf.int32) + features["target_reward"] = tf.zeros( + [targets_shape[0], targets_shape[1], 1], dtype=tf.int32) + if action_in_mod and "target_action" not in features: + features["target_action"] = tf.zeros( + [targets_shape[0], targets_shape[1], 1], dtype=tf.int32) + logits, _ = self(features) # pylint: disable=not-callable + if isinstance(logits, dict): + results = {} + for k, v in six.iteritems(logits): + results[k] = logits_to_samples(v, k) + results["%s_logits" % k] = v + # HACK: bypassing decoding issues. + results["outputs"] = results["targets"] + results["scores"] = results["targets"] + else: + results = logits_to_samples(logits, "targets") + + # Restore inputs to not confuse Estimator in edge cases. + if inputs_old is not None: + features["inputs"] = inputs_old + + # Return results. + return results + + def __process(self, all_frames, all_actions, all_rewards, all_raw_frames): + """Main video processing function.""" + hparams = self.hparams + all_frames_copy = [tf.identity(frame) for frame in all_frames] + orig_frame_shape = common_layers.shape_list(all_frames[0]) + batch_size = orig_frame_shape[0] + ss_func = self.get_scheduled_sample_func(batch_size) + target_frames = [] + extra_loss = 0.0 + + # Any extra info required by the model goes into here. + video_features = self.video_features( + all_frames, all_actions, all_rewards, all_raw_frames) + + num_frames = len(all_frames) + if self.is_recurrent_model: + input_index_range = range(num_frames - 1) + else: + input_index_range = range(hparams.video_num_target_frames) + + # Setup the internal states as well as an auxiliary tf op + # to enforce syncronization between prediction steps. + if self.internal_states is None: + internal_states = None + sync_op = tf.no_op() + else: + internal_states = self.load_internal_states_ops() + with tf.control_dependencies(flat_lists(internal_states)): + sync_op = tf.no_op() + + res_frames, sampled_frames, res_rewards, res_policies, res_values = \ + [], [], [], [], [] + for i in input_index_range: + with tf.control_dependencies([sync_op]): + frames, actions, rewards, target_index = self.__get_next_inputs( + i, all_frames, all_actions, all_rewards) + target_frame = all_frames[target_index] + target_frames.append(tf.identity(target_frame)) + + with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): + float_frames = [tf.to_float(frame) for frame in frames] + func_out = self.next_frame( + float_frames, actions, rewards, tf.to_float(target_frame), + internal_states, video_features) + res_frame, res_reward, res_policy, res_value, res_extra_loss, \ + internal_states = func_out + res_frames.append(res_frame) + res_rewards.append(res_reward) + res_policies.append(res_policy) + res_values.append(res_value) + extra_loss += res_extra_loss / float(len(input_index_range)) + + # Syncronizing the internals states + # Some Tensflow Magic to make sure everything happens as it should. + with tf.control_dependencies([res_frame]): + sync_op = tf.no_op() + if self.is_predicting and self.is_recurrent_model and i == 0: + # The internal state save happens at the end of the 1st iteration + # which essentially allows recurrent models to continue + # running after one prediction. + # Necessary for planning/rl applications. + save_ops = self.save_internal_states_ops(internal_states) + with tf.control_dependencies(flat_lists(save_ops)): + sync_op = tf.no_op() + + # Only for Softmax loss: sample frame so we can keep iterating. + sampled_frame = self.get_sampled_frame(res_frame) + sampled_frames.append(sampled_frame) + + # Check whether we are done with context frames or not + if self.is_recurrent_model: + done_warm_start = (i >= hparams.video_num_input_frames - 1) + else: + done_warm_start = True # Always true for non-reccurent networks. + + if self.is_predicting and done_warm_start: + all_frames[target_index] = sampled_frame + + # Scheduled sampling during training. + if self.is_training: + groundtruth_items = [tf.to_float(target_frame)] + generated_items = [sampled_frame] + ss_frame, = self.get_scheduled_sample_inputs( + done_warm_start, groundtruth_items, generated_items, ss_func) + all_frames[target_index] = ss_frame + + video_extra_loss = self.video_extra_loss( + sampled_frames, target_frames, internal_states, video_features) + tf.summary.scalar("video_extra_loss", video_extra_loss) + extra_loss += video_extra_loss + + if self.is_recurrent_model: + has_input_predictions = hparams.video_num_input_frames > 1 + if self.is_training and hparams.internal_loss and has_input_predictions: + # add the loss for input frames as well. + extra_gts = all_frames_copy[1:hparams.video_num_input_frames] + extra_raw_gts = all_raw_frames[1:hparams.video_num_input_frames] + extra_pds = res_frames[:hparams.video_num_input_frames-1] + recon_loss = self.get_extra_internal_loss( + extra_raw_gts, extra_gts, extra_pds) + extra_loss += recon_loss + # Cut the predicted input frames. + res_frames = res_frames[hparams.video_num_input_frames-1:] + res_rewards = res_rewards[hparams.video_num_input_frames-1:] + res_policies = res_policies[hparams.video_num_input_frames-1:] + res_values = res_values[hparams.video_num_input_frames-1:] + sampled_frames = sampled_frames[hparams.video_num_input_frames-1:] + target_frames = target_frames[hparams.video_num_input_frames-1:] + + self.visualize_predictions( + sampled_frames, [tf.to_float(f) for f in target_frames]) + + output_frames = tf.stack(res_frames, axis=1) + targets = output_frames + + if any((self.has_rewards, self.has_policies, self.has_values)): + targets = {"targets": output_frames} + if self.has_rewards: + targets["target_reward"] = tf.stack(res_rewards, axis=1) + if self.has_policies: + targets["target_policy"] = tf.stack(res_policies, axis=1) + if self.has_values: + targets["target_value"] = tf.stack(res_values, axis=1) + + return targets, extra_loss + + def loss(self, *args, **kwargs): + if "policy_network" in self.hparams.values(): + return 0.0 + else: + return super(NextFrameBase, self).loss(*args, **kwargs) + + def body(self, features): + self.has_actions = "input_action" in features + self.has_rewards = "target_reward" in features + self.has_policies = "target_policy" in features + self.has_values = "target_value" in features + hparams = self.hparams + + def merge(inputs, targets): + """Split inputs and targets into lists.""" + inputs = tf.unstack(inputs, axis=1) + targets = tf.unstack(targets, axis=1) + assert len(inputs) == hparams.video_num_input_frames + assert len(targets) == hparams.video_num_target_frames + return inputs + targets + + frames = merge(features["inputs"], features["targets"]) + frames_raw = merge(features["inputs_raw"], features["targets_raw"]) + actions, rewards = None, None + if self.has_actions: + actions = merge(features["input_action"], features["target_action"]) + if self.has_rewards: + rewards = merge(features["input_reward"], features["target_reward"]) + + # Reset the internal states if the reset_internal_states has been + # passed as a feature and has greater value than 0. + if self.is_recurrent_model and self.internal_states is not None: + def reset_func(): + reset_ops = flat_lists(self.reset_internal_states_ops()) + with tf.control_dependencies(reset_ops): + return tf.no_op() + if self.is_predicting and "reset_internal_states" in features: + reset = features["reset_internal_states"] + reset = tf.greater(tf.reduce_sum(reset), 0.5) + reset_ops = tf.cond(reset, reset_func, tf.no_op) + else: + reset_ops = tf.no_op() + with tf.control_dependencies([reset_ops]): + frames[0] = tf.identity(frames[0]) + + with tf.control_dependencies([frames[0]]): + return self.__process(frames, actions, rewards, frames_raw) + + +def next_frame_base(): + """Common HParams for next_frame models.""" + hparams = common_hparams.basic_params1() + # Loss cutoff. + hparams.add_hparam("video_modality_loss_cutoff", 0.01) + # Additional resizing the frames before feeding them to model. + hparams.add_hparam("preprocess_resize_frames", None) + # How many data points to suffle. Ideally should be part of problem not model! + hparams.add_hparam("shuffle_buffer_size", 128) + # Tiny mode. For faster tests. + hparams.add_hparam("tiny_mode", False) + # In case a model supports smaller/faster version. + hparams.add_hparam("small_mode", False) + # In case a model has stochastic version. + hparams.add_hparam("stochastic_model", False) + # Internal loss for recurrent models. + hparams.add_hparam("internal_loss", True) + # choose from: concat, multiplicative, multi_additive + hparams.add_hparam("action_injection", "multi_additive") + # Scheduled sampling method. Choose between + # ground_truth_only, prediction_only, prob, count, prob_inverse_exp. + hparams.add_hparam("scheduled_sampling_mode", "prediction_only") + hparams.add_hparam("scheduled_sampling_decay_steps", 10000) + hparams.add_hparam("scheduled_sampling_max_prob", 1.0) + hparams.add_hparam("scheduled_sampling_k", 900.0) + return hparams diff --git a/tensor2tensor/models/video/base_vae.py b/tensor2tensor/models/video/base_vae.py new file mode 100644 index 000000000..9f6e11fa2 --- /dev/null +++ b/tensor2tensor/models/video/base_vae.py @@ -0,0 +1,121 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic models for testing simple tasks.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import common_video + +import tensorflow.compat.v1 as tf + + +class NextFrameBaseVae(object): + """Basic function for stochastic variational video prediction.""" + + def __init__(self, hparams): + self.hparams = hparams + + def get_beta(self, kl_loss=0.0): + """Get the KL multiplier, either dynamically or schedule based. + + if hparams.latent_loss_multiplier_dynamic is set to true, then beta + is being adjusted to keep KL under hparams.latent_loss_multiplier_epsilon. + In order to do so, the beta is being updated at each iteration + by taking steps of size hparams.latent_loss_multiplier_alpha. + The same formulation can be retrieved by solving the Lagrangian + with KL < epsilon as a constraint. + + Args: + kl_loss: KL loss. Only used for dynamic adjustment. + + Returns: + beta: the final value of beta. + + """ + if self.hparams.latent_loss_multiplier_dynamic: + beta = tf.Variable(self.hparams.latent_loss_multiplier, + trainable=False, dtype=tf.float32) + alpha = self.hparams.latent_loss_multiplier_alpha + epsilon = self.hparams.latent_loss_multiplier_epsilon + shadow_beta = beta + alpha * (kl_loss - epsilon) + # Caping the beta between 0 and 1. May need to change this later on. + shadow_beta = tf.maximum(shadow_beta, 0.0) + shadow_beta = tf.minimum(shadow_beta, 1.0) + update_op = tf.assign(beta, shadow_beta) + else: + beta = common_video.beta_schedule( + schedule=self.hparams.latent_loss_multiplier_schedule, + global_step=self.get_iteration_num(), + final_beta=self.hparams.latent_loss_multiplier, + decay_start=(self.hparams.num_iterations_1st_stage + + self.hparams.num_iterations_2nd_stage), + decay_end=self.hparams.anneal_end) + update_op = tf.identity(beta) # fake update for regular beta. + with tf.control_dependencies([update_op]): + tf.summary.scalar("beta", beta) + return beta + + def get_kl_loss(self, means, log_vars, means_p=None, log_vars_p=None): + """Get KL loss for all the predicted Gaussians.""" + kl_loss = 0.0 + if means_p is None: + means_p = tf.unstack(tf.zeros_like(means)) + if log_vars_p is None: + log_vars_p = tf.unstack(tf.zeros_like(log_vars)) + enumerated_inputs = enumerate(zip(means, log_vars, means_p, log_vars_p)) + if self.is_training and self.hparams.stochastic_model: + for i, (mean, log_var, mean_p, log_var_p) in enumerated_inputs: + kl_loss += common_layers.kl_divergence(mean, log_var, mean_p, log_var_p) + tf.summary.histogram("posterior_mean_%d" % i, mean) + tf.summary.histogram("posterior_log_var_%d" % i, log_var) + tf.summary.histogram("prior_mean_%d" % i, mean_p) + tf.summary.histogram("prior_log_var_%d" % i, log_var_p) + tf.summary.scalar("kl_raw", tf.reduce_mean(kl_loss)) + + beta = self.get_beta(kl_loss) + # information capacity from "Understanding disentangling in beta-VAE" + if self.hparams.information_capacity > 0.0: + kl_loss = tf.abs(kl_loss - self.hparams.information_capacity) + return beta * kl_loss + + def construct_latent_tower(self, images, time_axis): + """Create the latent tower.""" + # No latent in the first phase + first_phase = tf.less( + self.get_iteration_num(), self.hparams.num_iterations_1st_stage) + + # use all frames by default but this allows more + # predicted frames at inference time + latent_num_frames = self.hparams.latent_num_frames + tf.logging.info("Creating latent tower with %d frames." % latent_num_frames) + if latent_num_frames > 0: + images = images[:, :latent_num_frames] + + return common_video.conv_latent_tower( + images=images, + time_axis=time_axis, + latent_channels=self.hparams.latent_channels, + min_logvar=self.hparams.latent_std_min, + is_training=self.is_training, + random_latent=first_phase, + tiny_mode=self.hparams.tiny_mode, + small_mode=self.hparams.small_mode) + + + diff --git a/tensor2tensor/models/video/basic_deterministic.py b/tensor2tensor/models/video/basic_deterministic.py new file mode 100644 index 000000000..624acac29 --- /dev/null +++ b/tensor2tensor/models/video/basic_deterministic.py @@ -0,0 +1,233 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic models for testing simple tasks.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import common_video +from tensor2tensor.layers import discretization +from tensor2tensor.models.video import base +from tensor2tensor.models.video import basic_deterministic_params # pylint: disable=unused-import +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +@registry.register_model +class NextFrameBasicDeterministic(base.NextFrameBase): + """Basic next-frame model, may take actions and predict rewards too.""" + + @property + def is_recurrent_model(self): + return False + + def inject_latent(self, layer, inputs, target, action): + del inputs, target, action + return layer, 0.0 + + def middle_network(self, layer, internal_states): + # Run a stack of convolutions. + activation_fn = common_layers.belu + if self.hparams.activation_fn == "relu": + activation_fn = tf.nn.relu + x = layer + kernel1 = (3, 3) + filters = common_layers.shape_list(x)[-1] + for i in range(self.hparams.num_hidden_layers): + with tf.variable_scope("layer%d" % i): + y = tf.nn.dropout(x, 1.0 - self.hparams.residual_dropout) + y = tf.layers.conv2d(y, filters, kernel1, activation=activation_fn, + strides=(1, 1), padding="SAME") + if i == 0: + x = y + else: + x = common_layers.layer_norm(x + y) + return x, internal_states + + def update_internal_states_early(self, internal_states, frames): + """Update the internal states early in the network if requested.""" + del frames + return internal_states + + def next_frame(self, frames, actions, rewards, target_frame, + internal_states, video_extra): + del rewards, video_extra + + hparams = self.hparams + filters = hparams.hidden_size + kernel2 = (4, 4) + action = actions[-1] + activation_fn = common_layers.belu + if self.hparams.activation_fn == "relu": + activation_fn = tf.nn.relu + + # Normalize frames. + frames = [common_layers.standardize_images(f) for f in frames] + + # Stack the inputs. + if internal_states is not None and hparams.concat_internal_states: + # Use the first part of the first internal state if asked to concatenate. + batch_size = common_layers.shape_list(frames[0])[0] + internal_state = internal_states[0][0][:batch_size, :, :, :] + stacked_frames = tf.concat(frames + [internal_state], axis=-1) + else: + stacked_frames = tf.concat(frames, axis=-1) + inputs_shape = common_layers.shape_list(stacked_frames) + + # Update internal states early if requested. + if hparams.concat_internal_states: + internal_states = self.update_internal_states_early( + internal_states, frames) + + # Using non-zero bias initializer below for edge cases of uniform inputs. + x = tf.layers.dense( + stacked_frames, filters, name="inputs_embed", + bias_initializer=tf.random_normal_initializer(stddev=0.01)) + x = common_attention.add_timing_signal_nd(x) + + # Down-stride. + layer_inputs = [x] + for i in range(hparams.num_compress_steps): + with tf.variable_scope("downstride%d" % i): + layer_inputs.append(x) + x = tf.nn.dropout(x, 1.0 - self.hparams.dropout) + x = common_layers.make_even_size(x) + if i < hparams.filter_double_steps: + filters *= 2 + x = common_attention.add_timing_signal_nd(x) + x = tf.layers.conv2d(x, filters, kernel2, activation=activation_fn, + strides=(2, 2), padding="SAME") + x = common_layers.layer_norm(x) + + if self.has_actions: + with tf.variable_scope("policy"): + x_flat = tf.layers.flatten(x) + policy_pred = tf.layers.dense(x_flat, self.hparams.problem.num_actions) + value_pred = tf.layers.dense(x_flat, 1) + value_pred = tf.squeeze(value_pred, axis=-1) + else: + policy_pred, value_pred = None, None + + # Add embedded action if present. + if self.has_actions: + x = common_video.inject_additional_input( + x, action, "action_enc", hparams.action_injection) + + # Inject latent if present. Only for stochastic models. + norm_target_frame = common_layers.standardize_images(target_frame) + x, extra_loss = self.inject_latent(x, frames, norm_target_frame, action) + + x_mid = tf.reduce_mean(x, axis=[1, 2], keepdims=True) + x, internal_states = self.middle_network(x, internal_states) + + # Up-convolve. + layer_inputs = list(reversed(layer_inputs)) + for i in range(hparams.num_compress_steps): + with tf.variable_scope("upstride%d" % i): + x = tf.nn.dropout(x, 1.0 - self.hparams.dropout) + if self.has_actions: + x = common_video.inject_additional_input( + x, action, "action_enc", hparams.action_injection) + if i >= hparams.num_compress_steps - hparams.filter_double_steps: + filters //= 2 + x = tf.layers.conv2d_transpose( + x, filters, kernel2, activation=activation_fn, + strides=(2, 2), padding="SAME") + y = layer_inputs[i] + shape = common_layers.shape_list(y) + x = x[:, :shape[1], :shape[2], :] + x = common_layers.layer_norm(x + y) + x = common_attention.add_timing_signal_nd(x) + + # Cut down to original size. + x = x[:, :inputs_shape[1], :inputs_shape[2], :] + x_fin = tf.reduce_mean(x, axis=[1, 2], keepdims=True) + if hparams.do_autoregressive_rnn: + # If enabled, we predict the target frame autoregregressively using rnns. + # To this end, the current prediciton is flattened into one long sequence + # of sub-pixels, and so is the target frame. Each sub-pixel (RGB value, + # from 0 to 255) is predicted with an RNN. To avoid doing as many steps + # as width * height * channels, we only use a number of pixels back, + # as many as hparams.autoregressive_rnn_lookback. + with tf.variable_scope("autoregressive_rnn"): + batch_size = common_layers.shape_list(frames[0])[0] + # Height, width, channels and lookback are the constants we need. + h, w = inputs_shape[1], inputs_shape[2] # 105, 80 on Atari games + c = hparams.problem.num_channels + lookback = hparams.autoregressive_rnn_lookback + assert (h * w) % lookback == 0, "Number of pixels must divide lookback." + m = (h * w) // lookback # Batch size multiplier for the RNN. + # These are logits that will be used as inputs to the RNN. + rnn_inputs = tf.layers.dense(x, c * 64, name="rnn_inputs") + # They are of shape [batch_size, h, w, c, 64], reshaping now. + rnn_inputs = tf.reshape(rnn_inputs, [batch_size * m, lookback * c, 64]) + # Same for the target frame. + rnn_target = tf.reshape(target_frame, [batch_size * m, lookback * c]) + # Construct rnn starting state: flatten rnn_inputs, apply a relu layer. + rnn_start_state = tf.nn.relu(tf.layers.dense(tf.nn.relu( + tf.layers.flatten(rnn_inputs)), 256, name="rnn_start_state")) + # Our RNN function API is on bits, each subpixel has 8 bits. + total_num_bits = lookback * c * 8 + # We need to provide RNN targets as bits (due to the API). + rnn_target_bits = discretization.int_to_bit(rnn_target, 8) + rnn_target_bits = tf.reshape( + rnn_target_bits, [batch_size * m, total_num_bits]) + if self.is_training: + # Run the RNN in training mode, add it's loss to the losses. + rnn_predict, rnn_loss = discretization.predict_bits_with_lstm( + rnn_start_state, 128, total_num_bits, target_bits=rnn_target_bits, + extra_inputs=rnn_inputs) + extra_loss += rnn_loss + # We still use non-RNN predictions too in order to guide the network. + x = tf.layers.dense(x, c * 256, name="logits") + x = tf.reshape(x, [batch_size, h, w, c, 256]) + rnn_predict = tf.reshape(rnn_predict, [batch_size, h, w, c, 256]) + # Mix non-RNN and RNN predictions so that after warmup the RNN is 90%. + x = tf.reshape(tf.nn.log_softmax(x), [batch_size, h, w, c * 256]) + rnn_predict = tf.nn.log_softmax(rnn_predict) + rnn_predict = tf.reshape(rnn_predict, [batch_size, h, w, c * 256]) + alpha = 0.9 * common_layers.inverse_lin_decay( + hparams.autoregressive_rnn_warmup_steps) + x = alpha * rnn_predict + (1.0 - alpha) * x + else: + # In prediction mode, run the RNN without any targets. + bits, _ = discretization.predict_bits_with_lstm( + rnn_start_state, 128, total_num_bits, extra_inputs=rnn_inputs, + temperature=0.0) # No sampling from this RNN, just greedy. + # The output is in bits, get back the predicted pixels. + bits = tf.reshape(bits, [batch_size * m, lookback * c, 8]) + ints = discretization.bit_to_int(tf.maximum(bits, 0), 8) + ints = tf.reshape(ints, [batch_size, h, w, c]) + x = tf.reshape(tf.one_hot(ints, 256), [batch_size, h, w, c * 256]) + elif self.is_per_pixel_softmax: + x = tf.layers.dense(x, hparams.problem.num_channels * 256, name="logits") + else: + x = tf.layers.dense(x, hparams.problem.num_channels, name="logits") + + reward_pred = None + if self.has_rewards: + # Reward prediction based on middle and final logits. + reward_pred = tf.concat([x_mid, x_fin], axis=-1) + reward_pred = tf.nn.relu(tf.layers.dense( + reward_pred, 128, name="reward_pred")) + reward_pred = tf.squeeze(reward_pred, axis=1) # Remove extra dims + reward_pred = tf.squeeze(reward_pred, axis=1) # Remove extra dims + + return x, reward_pred, policy_pred, value_pred, extra_loss, internal_states diff --git a/tensor2tensor/models/video/basic_deterministic_params.py b/tensor2tensor/models/video/basic_deterministic_params.py new file mode 100644 index 000000000..bb86c866b --- /dev/null +++ b/tensor2tensor/models/video/basic_deterministic_params.py @@ -0,0 +1,201 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Param sets for deterministic basic next frame prediction model.""" + +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import modalities +from tensor2tensor.models.video import base +from tensor2tensor.utils import registry + + +@registry.register_hparams +def next_frame_basic_deterministic(): + """Basic 2-frame conv model.""" + hparams = base.next_frame_base() + hparams.video_num_input_frames = 4 + hparams.video_num_target_frames = 1 + hparams.hidden_size = 64 + hparams.batch_size = 4 + hparams.num_hidden_layers = 2 + hparams.optimizer = "Adafactor" + hparams.learning_rate_constant = 1.5 + hparams.learning_rate_warmup_steps = 8000 + hparams.learning_rate_schedule = "linear_warmup * constant * rsqrt_decay" + hparams.label_smoothing = 0.0 + hparams.initializer = "uniform_unit_scaling" + hparams.initializer_gain = 1.3 + hparams.weight_decay = 0.0 + hparams.clip_grad_norm = 1.0 + hparams.dropout = 0.1 + hparams.add_hparam("residual_dropout", 0.5) + hparams.add_hparam("num_compress_steps", 6) + hparams.add_hparam("filter_double_steps", 2) + hparams.add_hparam("pixel_sampling_temperature", 0.0) + hparams.add_hparam("concat_internal_states", False) + hparams.add_hparam("do_autoregressive_rnn", False) + hparams.add_hparam("autoregressive_rnn_lookback", 8) + hparams.add_hparam("autoregressive_rnn_warmup_steps", 8000) + hparams.add_hparam("activation_fn", "relu") + hparams.bottom["inputs"] = modalities.video_identity_bottom + hparams.bottom["targets"] = modalities.video_identity_bottom + return hparams + + +@registry.register_hparams +def next_frame_pixel_noise(): + """Basic 2-frame conv model with pixel noise.""" + hparams = next_frame_basic_deterministic() + hparams.add_hparam("video_modality_input_noise", 0.05) + hparams.bottom["inputs"] = modalities.video_pixel_noise_bottom + hparams.top["inputs"] = modalities.video_top + return hparams + + +@registry.register_hparams +def next_frame_pixel_noise_long(): + """Long scheduled sampling setting.""" + hparams = next_frame_pixel_noise() + hparams.batch_size = 2 + hparams.video_num_target_frames = 16 + return hparams + + +@registry.register_hparams +def next_frame_sampling(): + """Basic conv model with scheduled sampling.""" + hparams = next_frame_basic_deterministic() + hparams.scheduled_sampling_mode = "prob_inverse_exp" + hparams.scheduled_sampling_max_prob = 1.0 + hparams.scheduled_sampling_decay_steps = 10000 + return hparams + + +@registry.register_hparams +def next_frame_tpu(): + hparams = next_frame_basic_deterministic() + hparams.batch_size = 1 + return hparams + + +@registry.register_hparams +def next_frame_ae(): + """Conv autoencoder.""" + hparams = next_frame_basic_deterministic() + hparams.bottom["inputs"] = modalities.video_bitwise_bottom + hparams.top["inputs"] = modalities.video_top + hparams.hidden_size = 256 + hparams.batch_size = 8 + hparams.num_hidden_layers = 4 + hparams.num_compress_steps = 4 + hparams.dropout = 0.4 + return hparams + + +@registry.register_hparams +def next_frame_ae_tiny(): + """Conv autoencoder, tiny set for testing.""" + hparams = next_frame_tiny() + hparams.bottom["inputs"] = modalities.video_bitwise_bottom + hparams.top["inputs"] = modalities.video_top + hparams.batch_size = 8 + hparams.dropout = 0.4 + return hparams + + +@registry.register_hparams +def next_frame_small(): + """Small conv model.""" + hparams = next_frame_basic_deterministic() + hparams.hidden_size = 32 + return hparams + + +@registry.register_hparams +def next_frame_tiny(): + """Tiny for testing.""" + hparams = next_frame_basic_deterministic() + hparams.hidden_size = 32 + hparams.num_hidden_layers = 1 + hparams.num_compress_steps = 2 + hparams.filter_double_steps = 1 + return hparams + + +@registry.register_hparams +def next_frame_l1(): + """Basic conv model with L1 modality.""" + hparams = next_frame_basic_deterministic() + hparams.loss["targets"] = modalities.video_l1_loss + hparams.top["targets"] = modalities.video_l1_top + hparams.video_modality_loss_cutoff = 2.4 + return hparams + + +@registry.register_hparams +def next_frame_l2(): + """Basic conv model with L2 modality.""" + hparams = next_frame_basic_deterministic() + hparams.loss["targets"] = modalities.video_l2_loss + hparams.top["targets"] = modalities.video_l1_top + hparams.video_modality_loss_cutoff = 2.4 + return hparams + + +@registry.register_ranged_hparams +def next_frame_base_range(rhp): + """Basic tuning grid.""" + rhp.set_float("dropout", 0.2, 0.6) + rhp.set_discrete("hidden_size", [64, 128, 256]) + rhp.set_int("num_compress_steps", 5, 8) + rhp.set_discrete("batch_size", [4, 8, 16, 32]) + rhp.set_int("num_hidden_layers", 1, 3) + rhp.set_int("filter_double_steps", 1, 6) + rhp.set_float("learning_rate_constant", 1., 4.) + rhp.set_int("learning_rate_warmup_steps", 500, 3000) + rhp.set_float("initializer_gain", 0.8, 1.8) + + +@registry.register_ranged_hparams +def next_frame_doubling_range(rhp): + """Filter doubling and dropout tuning grid.""" + rhp.set_float("dropout", 0.2, 0.6) + rhp.set_int("filter_double_steps", 2, 5) + + +@registry.register_ranged_hparams +def next_frame_clipgrad_range(rhp): + """Filter doubling and dropout tuning grid.""" + rhp.set_float("dropout", 0.3, 0.4) + rhp.set_float("clip_grad_norm", 0.5, 10.0) + + +@registry.register_ranged_hparams +def next_frame_xent_cutoff_range(rhp): + """Cross-entropy tuning grid.""" + rhp.set_float("video_modality_loss_cutoff", 0.005, 0.05) + + +@registry.register_ranged_hparams +def next_frame_ae_range(rhp): + """Autoencoder world model tuning grid.""" + rhp.set_float("dropout", 0.3, 0.5) + rhp.set_int("num_compress_steps", 1, 3) + rhp.set_int("num_hidden_layers", 2, 6) + rhp.set_float("learning_rate_constant", 1., 2.) + rhp.set_float("initializer_gain", 0.8, 1.5) + rhp.set_int("filter_double_steps", 2, 3) diff --git a/tensor2tensor/models/video/basic_deterministic_test.py b/tensor2tensor/models/video/basic_deterministic_test.py new file mode 100644 index 000000000..b89c54a49 --- /dev/null +++ b/tensor2tensor/models/video/basic_deterministic_test.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic tests for basic deterministic model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.models.video import basic_deterministic +from tensor2tensor.models.video import basic_deterministic_params +from tensor2tensor.models.video import tests_utils + +import tensorflow.compat.v1 as tf + + +class NextFrameTest(tests_utils.BaseNextFrameTest): + + def testBasicDeterministic(self): + self.TestOnVariousInputOutputSizes( + basic_deterministic_params.next_frame_basic_deterministic(), + basic_deterministic.NextFrameBasicDeterministic, + 256, + False) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/video/basic_recurrent.py b/tensor2tensor/models/video/basic_recurrent.py new file mode 100644 index 000000000..e82cc6f15 --- /dev/null +++ b/tensor2tensor/models/video/basic_recurrent.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic recurrent models for testing simple tasks.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_video +from tensor2tensor.models.video import basic_stochastic +from tensor2tensor.utils import registry + + +@registry.register_model +class NextFrameBasicRecurrent( + basic_stochastic.NextFrameBasicStochasticDiscrete): + """Basic next-frame recurrent model.""" + + @property + def is_recurrent_model(self): + return True + + def middle_network(self, layer, internal_states): + lstm_func = common_video.conv_lstm_2d + hp = self.hparams + + lstm_states = internal_states + if lstm_states is None: + lstm_states = [None] * hp.num_lstm_layers + + # LSTM layers + x = layer + for j in range(hp.num_lstm_layers): + x, lstm_states[j] = lstm_func(x, lstm_states[j], hp.num_lstm_filters) + return x, lstm_states + + +@registry.register_hparams +def next_frame_basic_recurrent(): + """Basic 2-frame recurrent model with stochastic tower.""" + hparams = basic_stochastic.next_frame_basic_stochastic_discrete() + hparams.filter_double_steps = 2 + hparams.hidden_size = 64 + hparams.video_num_input_frames = 4 + hparams.video_num_target_frames = 4 + hparams.concat_internal_states = False + hparams.add_hparam("num_lstm_layers", 2) + hparams.add_hparam("num_lstm_filters", 256) + return hparams diff --git a/tensor2tensor/models/video/basic_recurrent_test.py b/tensor2tensor/models/video/basic_recurrent_test.py new file mode 100644 index 000000000..0ec66a753 --- /dev/null +++ b/tensor2tensor/models/video/basic_recurrent_test.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic tests for basic deterministic model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.models.video import basic_recurrent +from tensor2tensor.models.video import tests_utils + +import tensorflow.compat.v1 as tf + + +class NextFrameTest(tests_utils.BaseNextFrameTest): + + def testBasicDeterministic(self): + self.TestOnVariousInputOutputSizes( + basic_recurrent.next_frame_basic_recurrent(), + basic_recurrent.NextFrameBasicRecurrent, + 256, + False) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/video/basic_stochastic.py b/tensor2tensor/models/video/basic_stochastic.py new file mode 100644 index 000000000..68bc73589 --- /dev/null +++ b/tensor2tensor/models/video/basic_stochastic.py @@ -0,0 +1,311 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic models for testing simple tasks.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_attention +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import common_video +from tensor2tensor.layers import discretization + +from tensor2tensor.models.video import base_vae +from tensor2tensor.models.video import basic_deterministic +from tensor2tensor.models.video import basic_deterministic_params + +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +tfl = tf.layers +_MAX_BATCH = 128 + + +@registry.register_model +class NextFrameBasicStochastic( + basic_deterministic.NextFrameBasicDeterministic, + base_vae.NextFrameBaseVae): + """Stochastic version of basic next-frame model.""" + + def inject_latent(self, layer, inputs, target, action): + """Inject a VAE-style latent.""" + del action + # Latent for stochastic model + filters = 128 + full_video = tf.stack(inputs + [target], axis=1) + latent_mean, latent_std = self.construct_latent_tower( + full_video, time_axis=1) + latent = common_video.get_gaussian_tensor(latent_mean, latent_std) + latent = tfl.flatten(latent) + latent = tf.expand_dims(latent, axis=1) + latent = tf.expand_dims(latent, axis=1) + latent_mask = tfl.dense(latent, filters, name="latent_mask") + zeros_mask = tf.zeros( + common_layers.shape_list(layer)[:-1] + [filters], dtype=tf.float32) + layer = tf.concat([layer, latent_mask + zeros_mask], axis=-1) + extra_loss = self.get_kl_loss([latent_mean], [latent_std]) + return layer, extra_loss + + +@registry.register_model +class NextFrameBasicStochasticDiscrete( + basic_deterministic.NextFrameBasicDeterministic): + """Basic next-frame model with a tiny discrete latent.""" + + @property + def is_recurrent_model(self): + return True + + def init_internal_states(self): + if not self.hparams.concat_internal_states: + return None + # Hardcoded frame shapes. + max_batch_size = max(_MAX_BATCH, self.hparams.batch_size) + shape = [max_batch_size] + self.hparams.problem.frame_shape[:-1] + [ + self.hparams.recurrent_state_size] + with tf.variable_scope("clean_scope_for_internal_state"): + v = tf.get_variable("state", shape, trainable=False, + initializer=tf.zeros_initializer()) + return [[v]] + + def reset_internal_states_ops(self): + if not self.hparams.concat_internal_states: + return [[tf.no_op()]] + zeros = [[tf.zeros_like(s)] for s in self.internal_states[0]] + return self.save_internal_states_ops(zeros) + + def load_internal_states_ops(self): + if not self.hparams.concat_internal_states: + return [[tf.no_op()]] + ops = [[s.read_value()] for s in self.internal_states[0]] + return ops + + def save_internal_states_ops(self, internal_states): + if not self.hparams.concat_internal_states: + return [[tf.no_op()]] + ops = [[tf.assign(x, y)] + for x, y in zip(self.internal_states[0], internal_states[0])] + return ops + + def update_internal_states_early(self, internal_states, frames): + """Update the internal states early in the network in GRU-like way.""" + batch_size = common_layers.shape_list(frames[0])[0] + internal_state = internal_states[0][0][:batch_size, :, :, :] + state_activation = tf.concat([internal_state, frames[0]], axis=-1) + state_gate_candidate = tf.layers.conv2d( + state_activation, 2 * self.hparams.recurrent_state_size, + (3, 3), padding="SAME", name="state_conv") + state_gate, state_candidate = tf.split(state_gate_candidate, 2, axis=-1) + state_gate = tf.nn.sigmoid(state_gate) + state_candidate = tf.tanh(state_candidate) + internal_state = internal_state * state_gate + internal_state += state_candidate * (1.0 - state_gate) + max_batch_size = max(_MAX_BATCH, self.hparams.batch_size) + diff_batch_size = max_batch_size - batch_size + internal_state = tf.pad( + internal_state, [[0, diff_batch_size], [0, 0], [0, 0], [0, 0]]) + return [[internal_state]] + + def inject_latent(self, layer, inputs, target, action): + """Inject a deterministic latent based on the target frame.""" + hparams = self.hparams + final_filters = common_layers.shape_list(layer)[-1] + filters = hparams.hidden_size + kernel = (4, 4) + layer_shape = common_layers.shape_list(layer) + activation_fn = common_layers.belu + if hparams.activation_fn == "relu": + activation_fn = tf.nn.relu + + def add_bits(layer, bits): + z_mul = tfl.dense(bits, final_filters, name="unbottleneck_mul") + if not hparams.complex_addn: + return layer + z_mul + layer *= tf.nn.sigmoid(z_mul) + z_add = tfl.dense(bits, final_filters, name="unbottleneck_add") + layer += z_add + return layer + + if not self.is_training: + if hparams.full_latent_tower: + rand = tf.random_uniform(layer_shape[:-1] + [hparams.bottleneck_bits]) + bits = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0 + else: + bits, _ = discretization.predict_bits_with_lstm( + layer, hparams.latent_predictor_state_size, hparams.bottleneck_bits, + temperature=hparams.latent_predictor_temperature) + bits = tf.expand_dims(tf.expand_dims(bits, axis=1), axis=2) + return add_bits(layer, bits), 0.0 + + # Embed. + frames = tf.concat(inputs + [target], axis=-1) + x = tfl.dense( + frames, filters, name="latent_embed", + bias_initializer=tf.random_normal_initializer(stddev=0.01)) + x = common_attention.add_timing_signal_nd(x) + + # Add embedded action if present. + if action is not None: + x = common_video.inject_additional_input( + x, action, "action_enc_latent", hparams.action_injection) + + if hparams.full_latent_tower: + for i in range(hparams.num_compress_steps): + with tf.variable_scope("latent_downstride%d" % i): + x = common_layers.make_even_size(x) + if i < hparams.filter_double_steps: + filters *= 2 + x = common_attention.add_timing_signal_nd(x) + x = tfl.conv2d(x, filters, kernel, + activation=activation_fn, + strides=(2, 2), padding="SAME") + x = common_layers.layer_norm(x) + else: + x = common_layers.double_discriminator(x) + x = tf.expand_dims(tf.expand_dims(x, axis=1), axis=1) + + bits, bits_clean = discretization.tanh_discrete_bottleneck( + x, hparams.bottleneck_bits, hparams.bottleneck_noise, + hparams.discretize_warmup_steps, hparams.mode) + if not hparams.full_latent_tower: + _, pred_loss = discretization.predict_bits_with_lstm( + layer, hparams.latent_predictor_state_size, hparams.bottleneck_bits, + target_bits=bits_clean) + # Mix bits from latent with predicted bits on forward pass as a noise. + if hparams.latent_rnn_max_sampling > 0.0: + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + bits_pred, _ = discretization.predict_bits_with_lstm( + layer, hparams.latent_predictor_state_size, + hparams.bottleneck_bits, + temperature=hparams.latent_predictor_temperature) + bits_pred = tf.expand_dims(tf.expand_dims(bits_pred, axis=1), axis=2) + # Be bits_pred on the forward pass but bits on the backward one. + bits_pred = bits_clean + tf.stop_gradient(bits_pred - bits_clean) + # Select which bits to take from pred sampling with bit_p probability. + which_bit = tf.random_uniform(common_layers.shape_list(bits)) + bit_p = common_layers.inverse_lin_decay(hparams.latent_rnn_warmup_steps) + bit_p *= hparams.latent_rnn_max_sampling + bits = tf.where(which_bit < bit_p, bits_pred, bits) + + res = add_bits(layer, bits) + # During training, sometimes skip the latent to help action-conditioning. + res_p = common_layers.inverse_lin_decay(hparams.latent_rnn_warmup_steps / 2) + res_p *= hparams.latent_use_max_probability + res_rand = tf.random_uniform([layer_shape[0]]) + res = tf.where(res_rand < res_p, res, layer) + return res, pred_loss + + +@registry.register_hparams +def next_frame_basic_stochastic(): + """Basic 2-frame conv model with stochastic tower.""" + hparams = basic_deterministic_params.next_frame_basic_deterministic() + hparams.stochastic_model = True + hparams.add_hparam("latent_channels", 1) + hparams.add_hparam("latent_std_min", -5.0) + hparams.add_hparam("num_iterations_1st_stage", 15000) + hparams.add_hparam("num_iterations_2nd_stage", 15000) + hparams.add_hparam("latent_loss_multiplier", 1e-3) + hparams.add_hparam("latent_loss_multiplier_dynamic", False) + hparams.add_hparam("latent_loss_multiplier_alpha", 1e-5) + hparams.add_hparam("latent_loss_multiplier_epsilon", 1.0) + hparams.add_hparam("latent_loss_multiplier_schedule", "constant") + hparams.add_hparam("latent_num_frames", 0) # 0 means use all frames. + hparams.add_hparam("anneal_end", 50000) + hparams.add_hparam("information_capacity", 0.0) + return hparams + + +@registry.register_hparams +def next_frame_sampling_stochastic(): + """Basic 2-frame conv model with stochastic tower.""" + hparams = basic_deterministic_params.next_frame_sampling() + hparams.stochastic_model = True + hparams.add_hparam("latent_channels", 1) + hparams.add_hparam("latent_std_min", -5.0) + hparams.add_hparam("num_iterations_1st_stage", 15000) + hparams.add_hparam("num_iterations_2nd_stage", 15000) + hparams.add_hparam("latent_loss_multiplier", 1e-3) + hparams.add_hparam("latent_loss_multiplier_dynamic", False) + hparams.add_hparam("latent_loss_multiplier_alpha", 1e-5) + hparams.add_hparam("latent_loss_multiplier_epsilon", 1.0) + hparams.add_hparam("latent_loss_multiplier_schedule", "constant") + hparams.add_hparam("latent_num_frames", 0) # 0 means use all frames. + hparams.add_hparam("anneal_end", 40000) + hparams.add_hparam("information_capacity", 0.0) + return hparams + + +@registry.register_hparams +def next_frame_basic_stochastic_discrete(): + """Basic 2-frame conv model with stochastic discrete latent.""" + hparams = basic_deterministic_params.next_frame_sampling() + hparams.batch_size = 4 + hparams.video_num_target_frames = 6 + hparams.scheduled_sampling_mode = "prob_inverse_lin" + hparams.scheduled_sampling_decay_steps = 40000 + hparams.scheduled_sampling_max_prob = 1.0 + hparams.dropout = 0.15 + hparams.filter_double_steps = 3 + hparams.hidden_size = 96 + hparams.learning_rate_constant = 0.002 + hparams.learning_rate_warmup_steps = 2000 + hparams.learning_rate_schedule = "linear_warmup * constant" + hparams.concat_internal_states = True + hparams.video_modality_loss_cutoff = 0.03 + hparams.add_hparam("bottleneck_bits", 128) + hparams.add_hparam("bottleneck_noise", 0.1) + hparams.add_hparam("discretize_warmup_steps", 40000) + hparams.add_hparam("latent_rnn_warmup_steps", 40000) + hparams.add_hparam("latent_rnn_max_sampling", 0.5) + hparams.add_hparam("latent_use_max_probability", 0.8) + hparams.add_hparam("full_latent_tower", False) + hparams.add_hparam("latent_predictor_state_size", 128) + hparams.add_hparam("latent_predictor_temperature", 1.0) + hparams.add_hparam("complex_addn", True) + hparams.add_hparam("recurrent_state_size", 64) + return hparams + + +@registry.register_hparams +def next_frame_basic_stochastic_discrete_long(): + """Conv model with stochastic discrete latent, long predictions.""" + hparams = next_frame_basic_stochastic_discrete() + hparams.batch_size = 2 + hparams.video_num_target_frames = 16 + return hparams + + +@registry.register_ranged_hparams +def next_frame_stochastic_discrete_range(rhp): + """Next frame stochastic discrete tuning grid.""" + rhp.set_float("learning_rate_constant", 0.001, 0.01) + rhp.set_float("dropout", 0.2, 0.6) + rhp.set_int("filter_double_steps", 3, 5) + rhp.set_discrete("hidden_size", [64, 96, 128]) + rhp.set_discrete("bottleneck_bits", [32, 64, 128, 256]) + rhp.set_discrete("video_num_target_frames", [4]) + rhp.set_float("bottleneck_noise", 0.0, 0.2) + + +@registry.register_ranged_hparams +def next_frame_stochastic_discrete_latent_range(rhp): + rhp.set_float("latent_rnn_max_sampling", 0.1, 0.9) + rhp.set_float("latent_predictor_temperature", 0.1, 1.2) + rhp.set_float("latent_use_max_probability", 0.4, 1.0) + rhp.set_float("dropout", 0.1, 0.4) diff --git a/tensor2tensor/models/video/basic_stochastic_test.py b/tensor2tensor/models/video/basic_stochastic_test.py new file mode 100644 index 000000000..c9c7b865e --- /dev/null +++ b/tensor2tensor/models/video/basic_stochastic_test.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic tests for basic stochastic model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.models.video import basic_stochastic +from tensor2tensor.models.video import tests_utils + +import tensorflow.compat.v1 as tf + + +class NextFrameTest(tests_utils.BaseNextFrameTest): + + def testBasicStochastic(self): + self.TestOnVariousInputOutputSizes( + basic_stochastic.next_frame_basic_stochastic(), + basic_stochastic.NextFrameBasicStochastic, + 256, + False) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/video/emily.py b/tensor2tensor/models/video/emily.py new file mode 100644 index 000000000..1a8b1e4ef --- /dev/null +++ b/tensor2tensor/models/video/emily.py @@ -0,0 +1,527 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Model architecture for video prediction model. + + based on following paper: + "Stochastic Video Generation with a Learned Prior" + https://arxiv.org/pdf/1802.07687.pdf + by Emily Denton and Rob Fergus. + + This code is a translation of the original code from PyTorch: + https://github.com/edenton/svg +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import common_video +from tensor2tensor.models.video import sv2p +from tensor2tensor.models.video import sv2p_params +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +tfl = tf.layers +tfcl = contrib.layers() + + +@registry.register_model +class NextFrameEmily(sv2p.NextFrameSv2pLegacy): + """Stochastic Variational Video Prediction Without Learned Prior.""" + + def encoder(self, inputs, nout, has_batchnorm=True): + """VGG based image encoder. + + Args: + inputs: image tensor with size BSx64x64xC + nout: number of output channels + has_batchnorm: variable to use or not use batch normalization + Returns: + net: encoded image with size BSxNout + skips: skip connection after each layer + """ + vgg_layer = common_video.vgg_layer + net01 = inputs + + skips = [] + + # The original model only supports 64x64. We can support higher resolutions + # as long as they are square and the side-length is a power of two + # by inserting more downscaling layers. Corresponding upscaling can be found + # in the decoder, as well. + # (This procedure is ad-hoc, i.e., not from the SVP-FP paper) + _, res_y, res_x, _ = inputs.shape.as_list() + assert res_x == res_y, "Model only supports square inputs" + is_power_of_two = lambda x: ((x & (x - 1)) == 0) and x != 0 + assert is_power_of_two(res_x), "Input resolution must be power of 2" + assert res_x >= 64, "Input resolution must be >= 64" + ds_idx = 0 + while res_x > 64: + h = tfcl.repeat(net01, 2, vgg_layer, 64, scope="downscale%d" % ds_idx, + is_training=self.is_training, activation=tf.nn.relu, + has_batchnorm=has_batchnorm) + net01 = tfl.max_pooling2d(h, [2, 2], strides=(2, 2), + name="downscale%d_pool" % ds_idx) + skips.append(h) + ds_idx += 1 + res_x /= 2 + + # h1 + net11 = tfcl.repeat(net01, 2, vgg_layer, 64, + scope="h1", is_training=self.is_training, + activation=tf.nn.relu, has_batchnorm=has_batchnorm) + net12 = tfl.max_pooling2d(net11, [2, 2], strides=(2, 2), name="h1_pool") + # h2 + net21 = tfcl.repeat(net12, 2, vgg_layer, 128, + scope="h2", is_training=self.is_training, + activation=tf.nn.relu, has_batchnorm=has_batchnorm) + net22 = tfl.max_pooling2d(net21, [2, 2], strides=(2, 2), name="h2_pool") + # h3 + net31 = tfcl.repeat(net22, 3, vgg_layer, 256, + scope="h3", is_training=self.is_training, + activation=tf.nn.relu, has_batchnorm=has_batchnorm) + net32 = tfl.max_pooling2d(net31, [2, 2], strides=(2, 2), name="h3_pool") + # h4 + net41 = tfcl.repeat(net32, 3, vgg_layer, 512, + scope="h4", is_training=self.is_training, + activation=tf.nn.relu, has_batchnorm=has_batchnorm) + net42 = tfl.max_pooling2d(net41, [2, 2], strides=(2, 2), name="h4_pool") + # h5 + net51 = tfcl.repeat(net42, 1, vgg_layer, nout, + kernel_size=4, padding="VALID", activation=tf.nn.relu, + scope="h5", is_training=self.is_training, + has_batchnorm=has_batchnorm) + + skips += [net11, net21, net31, net41] + return net51, skips + + def decoder(self, inputs, nout, skips=None, has_batchnorm=True): + """VGG based image decoder. + + Args: + inputs: image tensor with size BSxX + nout: number of output channels + skips: optional skip connections from encoder + has_batchnorm: variable to use or not use batch normalization + Returns: + net: decoded image with size BSx64x64xNout + skips: skip connection after each layer + """ + vgg_layer = common_video.vgg_layer + net = inputs + # d1 + net = tfl.conv2d_transpose(net, 512, kernel_size=4, padding="VALID", + name="d1_deconv", activation=tf.nn.relu) + if has_batchnorm: + net = tfl.batch_normalization( + net, training=self.is_training, name="d1_bn") + net = tf.nn.relu(net) + net = common_layers.upscale(net, 2) + # d2 + if skips is not None: + net = tf.concat([net, skips[-1]], axis=3) + net = tfcl.repeat(net, 2, vgg_layer, 512, scope="d2a", + is_training=self.is_training, + activation=tf.nn.relu, has_batchnorm=has_batchnorm) + net = tfcl.repeat(net, 1, vgg_layer, 256, scope="d2b", + is_training=self.is_training, + activation=tf.nn.relu, has_batchnorm=has_batchnorm) + net = common_layers.upscale(net, 2) + # d3 + if skips is not None: + net = tf.concat([net, skips[-2]], axis=3) + net = tfcl.repeat(net, 2, vgg_layer, 256, scope="d3a", + is_training=self.is_training, + activation=tf.nn.relu, has_batchnorm=has_batchnorm) + net = tfcl.repeat(net, 1, vgg_layer, 128, scope="d3b", + is_training=self.is_training, + activation=tf.nn.relu, has_batchnorm=has_batchnorm) + net = common_layers.upscale(net, 2) + # d4 + if skips is not None: + net = tf.concat([net, skips[-3]], axis=3) + net = tfcl.repeat(net, 1, vgg_layer, 128, scope="d4a", + is_training=self.is_training, + activation=tf.nn.relu, has_batchnorm=has_batchnorm) + net = tfcl.repeat(net, 1, vgg_layer, 64, scope="d4b", + is_training=self.is_training, + activation=tf.nn.relu, has_batchnorm=has_batchnorm) + net = common_layers.upscale(net, 2) + # d5 + if skips is not None: + net = tf.concat([net, skips[-4]], axis=3) + net = tfcl.repeat(net, 1, vgg_layer, 64, scope="d5", + is_training=self.is_training, + activation=tf.nn.relu, has_batchnorm=has_batchnorm) + + # if there are still skip connections left, we have more upscaling to do + if skips is not None: + for i, s in enumerate(skips[-5::-1]): + net = common_layers.upscale(net, 2) + net = tf.concat([net, s], axis=3) + net = tfcl.repeat(net, 1, vgg_layer, 64, scope="upscale%d" % i, + is_training=self.is_training, + activation=tf.nn.relu, has_batchnorm=has_batchnorm) + + net = tfl.conv2d_transpose(net, nout, kernel_size=3, padding="SAME", + name="d6_deconv", activation=None) + return net + + def stacked_lstm(self, inputs, states, hidden_size, output_size, nlayers): + """Stacked LSTM layers with FC layers as input and output embeddings. + + Args: + inputs: input tensor + states: a list of internal lstm states for each layer + hidden_size: number of lstm units + output_size: size of the output + nlayers: number of lstm layers + Returns: + net: output of the network + skips: a list of updated lstm states for each layer + """ + net = inputs + net = tfl.dense( + net, hidden_size, activation=None, name="af1") + for i in range(nlayers): + net, states[i] = common_video.basic_lstm( + net, states[i], hidden_size, name="alstm%d"%i) + net = tfl.dense( + net, output_size, activation=tf.nn.tanh, name="af2") + return net, states + + def lstm_gaussian(self, inputs, states, hidden_size, output_size, nlayers, + name): + """Stacked LSTM layers with FC layer as input and gaussian as output. + + Args: + inputs: input tensor + states: a list of internal lstm states for each layer + hidden_size: number of lstm units + output_size: size of the output + nlayers: number of lstm layers + name: the lstm name for scope definition + Returns: + mu: mean of the predicted gaussian + logvar: log(var) of the predicted gaussian + skips: a list of updated lstm states for each layer + """ + net = inputs + net = tfl.dense(net, hidden_size, activation=None, name="%sf1"%name) + for i in range(nlayers): + net, states[i] = common_video.basic_lstm( + net, states[i], hidden_size, name="%slstm%d"%(name, i)) + mu = tfl.dense(net, output_size, activation=None, name="%sf2mu"%name) + logvar = tfl.dense(net, output_size, activation=None, name="%sf2log"%name) + return mu, logvar, states + + def construct_model(self, images, actions, rewards): + """Builds the stochastic model. + + The model first encodes all the images (x_t) in the sequence + using the encoder. Let"s call the output e_t. Then it predicts the + latent state of the next frame using a recurrent posterior network + z ~ q(z|e_{0:t}) = N(mu(e_{0:t}), sigma(e_{0:t})). + Another recurrent network predicts the embedding of the next frame + using the approximated posterior e_{t+1} = p(e_{t+1}|e_{0:t}, z) + Finally, the decoder decodes e_{t+1} into x_{t+1}. + Skip connections from encoder to decoder help with reconstruction. + + Args: + images: tensor of ground truth image sequences + actions: list of action tensors + rewards: NOT used list of reward tensors + + Returns: + gen_images: generated images + fakr_rewards: input rewards as reward prediction! + pred_mu: predited means of posterior + pred_logvar: predicted log(var) of posterior + """ + # model does not support action conditioned and reward prediction + fake_reward_prediction = rewards + del rewards + action_repeat = self.hparams.action_repeat + action_type = self.hparams.action_type + + assert action_type in ["", "image", "vector"], "Invalid action type." + if not action_type: + a_dim = 0 + elif action_type == "image": + a_dim = self.hparams.g_dim + else: + assert action_repeat > 0, "Action repeat has to be positive integer." + actions = tf.tile(actions, (1, 1, action_repeat)) + a_dim = actions.shape[-1] + + z_dim = self.hparams.z_dim + g_dim = self.hparams.g_dim + rnn_size = self.hparams.rnn_size + prior_rnn_layers = self.hparams.prior_rnn_layers + posterior_rnn_layers = self.hparams.posterior_rnn_layers + predictor_rnn_layers = self.hparams.predictor_rnn_layers + context_frames = self.hparams.video_num_input_frames + has_batchnorm = self.hparams.has_batchnorm + + seq_len, batch_size, _, _, color_channels = common_layers.shape_list(images) + + # LSTM initial sizesstates. + prior_states = [None] * prior_rnn_layers + posterior_states = [None] * posterior_rnn_layers + predictor_states = [None] * predictor_rnn_layers + + tf.logging.info(">>>> Encoding") + # Encoding: + enc_images, enc_skips = [], [] + enc_actions = [] + images = tf.unstack(images, axis=0) + actions = tf.unstack(actions, axis=0) + for i, image in enumerate(images): + with tf.variable_scope("encoder", reuse=tf.AUTO_REUSE): + enc, skips = self.encoder(image, g_dim, has_batchnorm=has_batchnorm) + enc = tfl.flatten(enc) + enc_images.append(enc) + enc_skips.append(skips) + if action_type == "image": + enc_action, _ = self.encoder( + actions[i], g_dim, has_batchnorm=has_batchnorm) + enc_action = tfl.flatten(enc_action) + enc_actions.append(enc_action) + + tf.logging.info(">>>> Prediction") + # Prediction + pred_mu_pos = [] + pred_logvar_pos = [] + pred_mu_prior = [] + pred_logvar_prior = [] + gen_images = [] + for i in range(1, seq_len): + with tf.variable_scope("encoder", reuse=tf.AUTO_REUSE): + # current encoding + if self.is_training or len(gen_images) < context_frames: + h_current = enc_images[i - 1] + else: + h_current, _ = self.encoder(gen_images[-1], g_dim) + h_current = tfl.flatten(h_current) + + # target encoding + h_target = enc_images[i] + + if action_type == "image": + h_current = tf.concat([h_current, enc_actions[i - 1]], axis=1) + h_target = tf.concat([h_target, enc_actions[i]], axis=1) + elif action_type == "vector": + h_current = tf.concat([h_current, actions[i - 1]], axis=1) + h_target = tf.concat([h_target, actions[i]], axis=1) + + with tf.variable_scope("prediction", reuse=tf.AUTO_REUSE): + # Prior parameters + if self.hparams.learned_prior: + mu_prior, logvar_prior, prior_states = self.lstm_gaussian( + h_current, prior_states, rnn_size, z_dim, prior_rnn_layers, + "prior") + else: + mu_prior = tf.zeros((batch_size, z_dim)) + logvar_prior = tf.zeros((batch_size, z_dim)) + + # Only use Posterior if it's training time + if self.hparams.stochastic_model and \ + (self.is_training or len(gen_images) < context_frames): + mu_pos, logvar_pos, posterior_states = self.lstm_gaussian( + h_target, posterior_states, rnn_size, z_dim, posterior_rnn_layers, + "posterior") + # Sample z from posterior distribution + z = common_video.get_gaussian_tensor(mu_pos, logvar_pos) + else: + mu_pos = tf.zeros_like(mu_prior) + logvar_pos = tf.zeros_like(logvar_prior) + z = common_video.get_gaussian_tensor(mu_prior, logvar_prior) + + # Predict output encoding + h_pred, predictor_states = self.stacked_lstm( + tf.concat([h_current, z], axis=1), + predictor_states, rnn_size, g_dim, predictor_rnn_layers) + + pred_mu_pos.append(tf.identity(mu_pos, "mu_pos")) + pred_logvar_pos.append(tf.identity(logvar_pos, "logvar_pos")) + pred_mu_prior.append(tf.identity(mu_prior, "mu_prior")) + pred_logvar_prior.append(tf.identity(logvar_prior, "logvar_prior")) + + with tf.variable_scope("decoding", reuse=tf.AUTO_REUSE): + skip_index = min(context_frames-1, i-1) + if action_type == "vector": + h_pred = tf.concat([h_pred, actions[i - 1]], axis=-1) + elif action_type == "image": + h_pred = tf.concat([h_pred, enc_actions[i - 1]], axis=-1) + h_pred = tf.reshape(h_pred, [batch_size, 1, 1, g_dim + a_dim]) + if self.hparams.has_skips: + x_pred = self.decoder( + h_pred, color_channels, + skips=enc_skips[skip_index], has_batchnorm=has_batchnorm) + else: + x_pred = self.decoder( + h_pred, color_channels, has_batchnorm=has_batchnorm) + gen_images.append(x_pred) + + tf.logging.info(">>>> Done") + gen_images = tf.stack(gen_images, axis=0) + return {"gen_images": gen_images, + "fake_reward_prediction": fake_reward_prediction, + "pred_mu_pos": pred_mu_pos, + "pred_logvar_pos": pred_logvar_pos, + "pred_mu_prior": pred_mu_prior, + "pred_logvar_prior": pred_logvar_prior} + + def get_extra_loss(self, + latent_means_pos, latent_logvars_pos, + latent_means_prior, latent_logvars_prior): + """Losses in addition to the default modality losses.""" + return self.get_kl_loss( + latent_means_pos, latent_logvars_pos, + latent_means_prior, latent_logvars_prior) + + def body(self, features): + hparams = self.hparams + batch_size = common_layers.shape_list(features["inputs"])[0] + + # Swap time and batch axes. + input_frames = common_video.swap_time_and_batch_axes(features["inputs"]) + target_frames = common_video.swap_time_and_batch_axes(features["targets"]) + + # Get rewards if exist otherwise use zeros + input_rewards = self.get_input_if_exists( + features, "input_reward", batch_size, hparams.video_num_input_frames) + target_rewards = self.get_input_if_exists( + features, "target_reward", batch_size, hparams.video_num_target_frames) + + all_rewards = tf.concat([input_rewards, target_rewards], axis=0) + all_frames = tf.concat([input_frames, target_frames], axis=0) + + # Get actions if exist otherwise use zeros + visualization_kwargs = {} + if hparams.action_type == "image": + input_actions = common_video.swap_time_and_batch_axes( + features["input_action"]) + target_actions = common_video.swap_time_and_batch_axes( + features["target_action"]) + all_actions = tf.concat([input_actions, target_actions], axis=0) + time, _, h, w, c = all_frames.shape + all_actions = tf.reshape(all_actions, (time, -1, h, w, c)) + if self.hparams.action_normalize: + all_actions /= 255. + visualization_kwargs["actions"] = all_actions[:-1] + else: + input_actions = self.get_input_if_exists(features, "input_action", + batch_size, + hparams.video_num_input_frames) + target_actions = self.get_input_if_exists(features, "target_action", + batch_size, + hparams.video_num_target_frames) + all_actions = tf.concat([input_actions, target_actions], axis=0) + + # Each image is being used twice, in latent tower and main tower. + # This is to make sure we are using the *same* image for both, ... + # ... given how TF queues work. + # NOT sure if this is required at all. Doesn"t hurt though! :) + all_frames = tf.identity(all_frames) + + retvals = self.construct_model( + images=all_frames, actions=all_actions, rewards=all_rewards) + + # retrieve tensors returned by the model contructor + gen_images = retvals["gen_images"] + gen_rewards = retvals["fake_reward_prediction"] + latent_means_pos = retvals["pred_mu_pos"] + latent_logvars_pos = retvals["pred_logvar_pos"] + latent_means_prior = retvals["pred_mu_prior"] + latent_logvars_prior = retvals["pred_logvar_prior"] + + extra_loss = self.get_extra_loss( + latent_means_pos=latent_means_pos, + latent_logvars_pos=latent_logvars_pos, + latent_means_prior=latent_means_prior, + latent_logvars_prior=latent_logvars_prior) + + # Visualize predictions in Tensorboard + if self.is_training: + self.visualize_predictions(all_frames[1:], gen_images, + **visualization_kwargs) + + # Ignore the predictions from the input frames. + # This is NOT the same as original paper/implementation. + predictions = gen_images[hparams.video_num_input_frames-1:] + reward_pred = gen_rewards[hparams.video_num_input_frames-1:] + reward_pred = tf.squeeze(reward_pred, axis=2) # Remove extra dimension. + + # Swap back time and batch axes. + predictions = common_video.swap_time_and_batch_axes(predictions) + reward_pred = common_video.swap_time_and_batch_axes(reward_pred) + + if self.is_training and hparams.internal_loss: + # add the loss for input frames as well. + extra_gts = all_frames[1:hparams.video_num_input_frames] + extra_gts = common_video.swap_time_and_batch_axes(extra_gts) + extra_pds = gen_images[:hparams.video_num_input_frames-1] + extra_pds = common_video.swap_time_and_batch_axes(extra_pds) + extra_raw_gts = features["inputs_raw"][:, 1:] + recon_loss = self.get_extra_internal_loss( + extra_raw_gts, extra_gts, extra_pds) + extra_loss += recon_loss + + return_targets = predictions + if hparams.reward_prediction: + return_targets = {"targets": predictions, "target_reward": reward_pred} + + return return_targets, extra_loss + + +@registry.register_hparams +def next_frame_emily(): + """Emily's model hparams.""" + hparams = sv2p_params.next_frame_sv2p() + hparams.video_num_input_frames = 2 + hparams.video_num_target_frames = 10 + hparams.learning_rate_constant = 1e-4 + seq_length = hparams.video_num_input_frames + hparams.video_num_target_frames + # The latent_loss_multiplier is divided by the number of frames because + # the image sequence loss in t2t is averaged instead of added through + # time as they do in the SVG-LP paper + hparams.latent_loss_multiplier = 1e-4 / seq_length + hparams.reward_prediction = False + hparams.num_iterations_1st_stage = -1 + hparams.num_iterations_2nd_stage = -1 + hparams.optimizer_adam_beta1 = 0.9 + hparams.optimizer_adam_beta2 = 0.999 + hparams.optimizer_adam_epsilon = 1e-08 + hparams.anneal_end = -1 + hparams.clip_grad_norm = 5.0 + hparams.add_hparam("learned_prior", True) + hparams.add_hparam("z_dim", 64) + hparams.add_hparam("g_dim", 128) + hparams.add_hparam("rnn_size", 256) + hparams.add_hparam("prior_rnn_layers", 1) + hparams.add_hparam("posterior_rnn_layers", 1) + hparams.add_hparam("predictor_rnn_layers", 2) + hparams.add_hparam("has_skips", True) + hparams.add_hparam("has_batchnorm", True) + # Repeat actions to signify gradients. + # Action type can be '', 'image' or 'vector'. + hparams.add_hparam("action_repeat", 40) + hparams.add_hparam("action_type", "") + return hparams diff --git a/tensor2tensor/models/models.py b/tensor2tensor/models/video/emily_test.py similarity index 55% rename from tensor2tensor/models/models.py rename to tensor2tensor/models/video/emily_test.py index bf19a307b..4d95500bf 100644 --- a/tensor2tensor/models/models.py +++ b/tensor2tensor/models/video/emily_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,21 +13,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Models defined in T2T. Imports here force registration.""" +"""Basic tests for emily's model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function -# Dependency imports - -# pylint: disable=unused-import -from tensor2tensor.models import attention_lm -from tensor2tensor.models import baseline -from tensor2tensor.models import bytenet -from tensor2tensor.models import multimodel -from tensor2tensor.models import neural_gpu -from tensor2tensor.models import slicenet -from tensor2tensor.models import transformer -from tensor2tensor.models import xception -# pylint: enable=unused-import +from tensor2tensor.models.video import emily +from tensor2tensor.models.video import tests_utils + + +import tensorflow.compat.v1 as tf + + +class NextFrameTest(tests_utils.BaseNextFrameTest): + + def testEmily(self): + self.TestOnVariousInputOutputSizes( + emily.next_frame_emily(), + emily.NextFrameEmily, + 1) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/video/epva.py b/tensor2tensor/models/video/epva.py new file mode 100644 index 000000000..2a3b895bc --- /dev/null +++ b/tensor2tensor/models/video/epva.py @@ -0,0 +1,739 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Model architecture for video prediction model. + +based on following paper: +"Hierarchical Long-term Video Prediction without Supervision" +http://web.eecs.umich.edu/~honglak/icml2018-unsupHierarchicalVideoPred.pdf +by Nevan Wichers, Ruben Villegas, Dumitru Erhan and Honglak Lee. + +This code is based on the original code: +https://github.com/brain-research/long-term-video-prediction-without-supervision +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import reduce + +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import common_video +from tensor2tensor.models.video import epva_params # pylint: disable=unused-import +from tensor2tensor.models.video import sv2p +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +from tensorflow.contrib.framework.python.ops import arg_scope +from tensorflow.contrib.slim.python.slim.nets import vgg + +tfl = tf.layers +tfcl = contrib.layers() + +IMG_WIDTH = 64 +IMG_HEIGHT = 64 +VGG_IMAGE_SIZE = 224 +COLOR_NORMALIZATION_VECTOR = [123.68, 116.78, 103.94] + + +def van_image_enc_2d(x, first_depth, reuse=False, hparams=None): + """The image encoder for the VAN. + + Similar architecture as Ruben's paper + (http://proceedings.mlr.press/v70/villegas17a/villegas17a.pdf). + + Args: + x: The image to encode. + first_depth: The depth of the first layer. Depth is increased in subsequent + layers. + reuse: To reuse in variable scope or not. + hparams: The python hparams. + + Returns: + The encoded image. + """ + with tf.variable_scope('van_image_enc', reuse=reuse): + enc_history = [x] + + enc = tf.layers.conv2d( + x, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1) + enc = contrib.layers().layer_norm(enc) + enc = tf.layers.conv2d( + enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1) + enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME') + enc = tf.nn.dropout(enc, hparams.van_keep_prob) + enc = contrib.layers().layer_norm(enc) + enc_history.append(enc) + + enc = tf.layers.conv2d( + enc, + first_depth * 2, + 3, + padding='same', + activation=tf.nn.relu, + strides=1) + enc = tf.layers.conv2d( + enc, + first_depth * 2, + 3, + padding='same', + activation=tf.nn.relu, + strides=1) + enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME') + enc = tf.nn.dropout(enc, hparams.van_keep_prob) + enc = contrib.layers().layer_norm(enc) + enc_history.append(enc) + + enc = tf.layers.conv2d( + enc, + first_depth * 4, + 3, + padding='same', + activation=tf.nn.relu, + strides=1) + enc = tf.layers.conv2d( + enc, + first_depth * 4, + 3, + padding='same', + activation=tf.nn.relu, + strides=1) + enc = tf.layers.conv2d( + enc, + first_depth * 4, + 3, + padding='same', + activation=tf.nn.relu, + strides=1) + enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], 'SAME') + + return enc, enc_history + + +def van_enc_2d(x, first_depth, reuse=False): + """The higher level structure encoder for the VAN. + + The high level structure is a vector instead of an image. + + Args: + x: The higher level structure to encode. + first_depth: The depth of the first layer. Depth is increased in subsequent + layers. + reuse: To reuse in variable scope or not. + + Returns: + The encoded image. + """ + with tf.variable_scope('van_enc', reuse=reuse): + a = 4 # depends on the inputs size + b = 4 + # a, b = 4,4 + enc = tf.nn.relu(x) + enc = tf.layers.dense(enc, first_depth * a * b, tf.nn.relu) + enc = contrib.layers().layer_norm(enc) + + enc = tf.reshape(enc, [-1, a, b, first_depth]) + + enc = tf.layers.conv2d_transpose( + enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1) + enc = contrib.layers().layer_norm(enc) + enc = tf.layers.conv2d_transpose( + enc, + first_depth * 2, + 3, + padding='same', + activation=tf.nn.relu, + strides=2) + van_higher_level_2 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 2]) + + enc = tf.layers.conv2d_transpose( + enc, + first_depth * 2, + 3, + padding='same', + activation=tf.nn.relu, + strides=1) + enc = contrib.layers().layer_norm(enc) + enc = tf.layers.conv2d_transpose( + enc, + first_depth * 4, + 3, + padding='same', + activation=tf.nn.relu, + strides=1) + van_higher_level_4 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 4]) + + van_higher_level = tf.concat([x, van_higher_level_2, van_higher_level_4], 1) + + return enc, van_higher_level + + +def van_dec_2d(x, skip_connections, output_shape, first_depth, hparams=None): + """The VAN decoder. + + Args: + x: The analogy information to decode. + skip_connections: The encoder layers which can be used as skip connections. + output_shape: The shape of the desired output image. + first_depth: The depth of the first layer of the van image encoder. + hparams: The python hparams. + + Returns: + The decoded image prediction. + """ + with tf.variable_scope('van_dec'): + dec = tf.layers.conv2d_transpose( + x, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=2) + dec = tf.nn.dropout(dec, hparams.van_keep_prob) + dec = contrib.layers().layer_norm(dec) + dec = tf.layers.conv2d_transpose( + dec, + first_depth * 4, + 3, + padding='same', + activation=tf.nn.relu, + strides=1) + dec = tf.nn.dropout(dec, hparams.van_keep_prob) + dec = tf.layers.conv2d_transpose( + dec, + first_depth * 2, + 3, + padding='same', + activation=tf.nn.relu, + strides=1) + dec = tf.nn.dropout(dec, hparams.van_keep_prob) + dec = contrib.layers().layer_norm(dec) + + dec = tf.layers.conv2d_transpose( + dec, + first_depth * 2, + 3, + padding='same', + activation=tf.nn.relu, + strides=2) + dec = tf.nn.dropout(dec, hparams.van_keep_prob) + dec = tf.layers.conv2d_transpose( + dec, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1) + dec = tf.nn.dropout(dec, hparams.van_keep_prob) + dec = contrib.layers().layer_norm(dec) + + dec = tf.layers.conv2d_transpose( + dec, + output_shape[3] + 1, + 3, + padding='same', + activation=tf.nn.relu, + strides=2) + dec = tf.nn.dropout(dec, hparams.van_keep_prob) + + out_mask = tf.layers.conv2d_transpose( + dec, output_shape[3] + 1, 3, strides=1, padding='same', activation=None) + + mask = tf.nn.sigmoid(out_mask[:, :, :, 3:4]) + out = out_mask[:, :, :, :3] + + return out * mask + skip_connections[0] * (1 - mask) + + +def analogy_computation_2d(f_first_enc, + f_first_frame, + f_current_enc, + first_depth): + """Implements the deep analogy computation.""" + with tf.variable_scope('analogy_computation'): + + frame_enc_diff = f_first_frame - f_first_enc + + frame_enc_diff_enc = tf.layers.conv2d( + frame_enc_diff, + first_depth * 4, + 3, + padding='same', + activation=tf.nn.relu, + strides=1) + f_current_enc_enc = tf.layers.conv2d( + f_current_enc, + first_depth * 4, + 3, + padding='same', + activation=tf.nn.relu, + strides=1) + + analogy = tf.concat([frame_enc_diff_enc, f_current_enc_enc], 3) + analogy = tf.layers.conv2d( + analogy, + first_depth * 4, + 3, + padding='same', + activation=tf.nn.relu, + strides=1) + analogy = contrib.layers().layer_norm(analogy) + analogy = tf.layers.conv2d( + analogy, + first_depth * 4, + 3, + padding='same', + activation=tf.nn.relu, + strides=1) + return tf.layers.conv2d( + analogy, + first_depth * 4, + 3, + padding='same', + activation=tf.nn.relu, + strides=1) + + +def van(first_enc, + first_frame, + current_enc, + gt_image, + reuse=False, + scope_prefix='', + hparams=None): + """Implements a VAN. + + Args: + first_enc: The first encoding. + first_frame: The first ground truth frame. + current_enc: The encoding of the frame to generate. + gt_image: The ground truth image, only used for regularization. + reuse: To reuse in variable scope or not. + scope_prefix: The prefix before the scope name. + hparams: The python hparams. + + Returns: + The generated image. + """ + with tf.variable_scope(scope_prefix + 'van', reuse=reuse): + output_shape = first_frame.get_shape().as_list() + output_shape[0] = -1 + + first_depth = 64 + + f_first_enc, _ = van_enc_2d(first_enc, first_depth) + f_first_frame, image_enc_history = van_image_enc_2d( + first_frame, first_depth, hparams=hparams) + f_current_enc, van_higher_level = van_enc_2d( + current_enc, first_depth, reuse=True) + f_gt_image, _ = van_image_enc_2d(gt_image, first_depth, True, + hparams=hparams) + + analogy_t = analogy_computation_2d( + f_first_enc, f_first_frame, f_current_enc, first_depth) + enc_img = f_current_enc + analogy_t + + img = van_dec_2d( + enc_img, image_enc_history, output_shape, first_depth, hparams=hparams) + + batch_size = tf.to_float(tf.shape(first_enc)[0]) + r_loss = tf.nn.l2_loss(f_gt_image - f_current_enc - analogy_t) / batch_size + + return img, r_loss, van_higher_level + + +def encoder_vgg(x, enc_final_size, reuse=False, scope_prefix='', hparams=None, + is_training=True): + """VGG network to use as encoder without the top few layers. + + Can be pretrained. + + Args: + x: The image to encode. In the range 0 to 1. + enc_final_size: The desired size of the encoding. + reuse: To reuse in variable scope or not. + scope_prefix: The prefix before the scope name. + hparams: The python hparams. + is_training: boolean value indicating if training is happening. + + Returns: + The generated image. + """ + with tf.variable_scope(scope_prefix + 'encoder', reuse=reuse): + + # Preprocess input + x *= 256 + x = x - COLOR_NORMALIZATION_VECTOR + + with arg_scope(vgg.vgg_arg_scope()): + # Padding because vgg_16 accepts images of size at least VGG_IMAGE_SIZE. + x = tf.pad(x, [[0, 0], [0, VGG_IMAGE_SIZE - IMG_WIDTH], + [0, VGG_IMAGE_SIZE - IMG_HEIGHT], [0, 0]]) + _, end_points = vgg.vgg_16( + x, + num_classes=enc_final_size, + is_training=is_training) + pool5_key = [key for key in end_points.keys() if 'pool5' in key] + assert len(pool5_key) == 1 + enc = end_points[pool5_key[0]] + # Undoing padding. + enc = tf.slice(enc, [0, 0, 0, 0], [-1, 2, 2, -1]) + + enc_shape = enc.get_shape().as_list() + enc_shape[0] = -1 + enc_size = enc_shape[1] * enc_shape[2] * enc_shape[3] + + enc_flat = tf.reshape(enc, (-1, enc_size)) + enc_flat = tf.nn.dropout(enc_flat, hparams.enc_keep_prob) + + enc_flat = tf.layers.dense( + enc_flat, + enc_final_size, + kernel_initializer=tf.truncated_normal_initializer(stddev=1e-4,)) + + if hparams.enc_pred_use_l2norm: + enc_flat = tf.nn.l2_normalize(enc_flat, 1) + + return enc_flat + + +def predictor(enc_flat, + action, + lstm_states, + pred_depth, + reuse=False, + scope_prefix='', + hparams=None): + """LSTM predictor network.""" + with tf.variable_scope(scope_prefix + 'predict', reuse=reuse): + + enc_final_size = enc_flat.get_shape().as_list()[1] + action_size = action.get_shape().as_list()[1] + initial_size = (enc_final_size + action_size) + + batch_size = tf.shape(enc_flat)[0] + + init_stddev = 1e-2 + + pre_pred = tf.concat([enc_flat, action], 1) + pre_pred = tf.layers.dense( + pre_pred, + initial_size, + kernel_initializer=tf.truncated_normal_initializer(stddev=init_stddev)) + + # This is only needed or the GAN version. + if hparams.pred_noise_std > 0: + # Add the noise like this so a pretrained model can be used. + pred_noise = tf.random_normal( + shape=[batch_size, 100], stddev=hparams.pred_noise_std) + pre_pred += tf.layers.dense( + pred_noise, + initial_size, + kernel_initializer=tf.truncated_normal_initializer( + stddev=init_stddev), + name='noise_dense') + + pre_pred = tf.nn.relu(pre_pred) + + if lstm_states[pred_depth - 2] is None: + back_connect = tf.tile( + tf.get_variable( + 'back_connect_init', + shape=[1, initial_size * 2], + initializer=tf.truncated_normal_initializer(stddev=init_stddev)) + , (batch_size, 1)) + else: + back_connect = lstm_states[pred_depth - 2] + + lstm_init_stddev = 1e-4 + + part_pred, lstm_states[0] = common_video.lstm_cell( + tf.concat([pre_pred, back_connect], 1), + lstm_states[0], + initial_size, + use_peepholes=True, + initializer=tf.truncated_normal_initializer(stddev=lstm_init_stddev), + num_proj=initial_size) + part_pred = contrib.layers().layer_norm(part_pred) + pred = part_pred + + for pred_layer_num in range(1, pred_depth, 2): + part_pred, lstm_states[pred_layer_num] = common_video.lstm_cell( + pred, + lstm_states[pred_layer_num], + initial_size, + use_peepholes=True, + initializer=tf.truncated_normal_initializer(stddev=lstm_init_stddev), + num_proj=initial_size) + pred += part_pred + + part_pred, lstm_states[pred_layer_num + 1] = common_video.lstm_cell( + tf.concat([pred, pre_pred], 1), + lstm_states[pred_layer_num + 1], + initial_size, + use_peepholes=True, + initializer=tf.truncated_normal_initializer(stddev=lstm_init_stddev), + num_proj=initial_size) + part_pred = contrib.layers().layer_norm(part_pred) + pred += part_pred + + pred = tf.layers.dense( + pred, + enc_final_size, + kernel_initializer=tf.truncated_normal_initializer(stddev=init_stddev)) + + if hparams.enc_pred_use_l2norm: + pred = tf.nn.l2_normalize(pred, 1) + + return pred + + +def construct_model(images, + actions=None, + context_frames=2, + hparams=None, + is_training=True): + """Constructs the tensorflow graph of the hierarchical model.""" + + pred_depth = 20 + + enc_out_all, pred_out_all, van_out_all, van_on_enc_all = [], [], [], [] + + lstm_states = [None] * (pred_depth + 2) + + enc_out = encoder_vgg( + images[0], hparams.enc_size, False, scope_prefix='timestep/', + hparams=hparams, is_training=is_training) + enc_out = tf.identity(enc_out, 'enc_out') + enc_out_all.append(enc_out) + + num_timesteps = len(actions) - 1 + sum_freq = int(num_timesteps / 4 + 1) + + reuse = False + for timestep, action in zip(range(len(actions) - 1), actions[:-1]): + done_warm_start = timestep > context_frames - 1 + + with tf.variable_scope('timestep', reuse=reuse): + if done_warm_start: + pred_input = pred_out_all[-1] + else: + pred_input = enc_out_all[-1] + pred_out = predictor( + pred_input, action, lstm_states, pred_depth, False, hparams=hparams) + pred_out = tf.identity(pred_out, 'pred_out') + if timestep % sum_freq == 0: # and not hparams.use_tpu: + tf.summary.histogram('pred_out', pred_out) + pred_out_all.append(pred_out) + + if timestep % sum_freq == 0: # and not hparams.use_tpu: + tf.summary.histogram('lstm_state', lstm_states[0]) + van_out, _, _ = van( + enc_out_all[0], + images[0], + pred_out, + images[timestep + 1], + tf.AUTO_REUSE, + hparams=hparams) + van_out = tf.identity(van_out, 'van_out') + van_out_all.append(van_out) + + enc_out = encoder_vgg( + images[timestep + 1], hparams.enc_size, True, hparams=hparams, + is_training=is_training) + enc_out = tf.identity(enc_out, 'enc_out') + if timestep % sum_freq == 0: # and not hparams.use_tpu: + tf.summary.histogram('enc_out', enc_out) + enc_out_all.append(enc_out) + + van_input = images[0] + enc_noise = tf.zeros_like(enc_out) + if timestep % sum_freq == 0: # and not hparams.use_tpu: + tf.summary.histogram('enc_noise', enc_noise) + van_on_enc, _, _ = van( + enc_out_all[0], + van_input, + enc_out + enc_noise, + images[timestep + 1], + tf.AUTO_REUSE, + hparams=hparams) + van_on_enc = tf.identity(van_on_enc, 'van_on_enc') + van_on_enc_all.append(van_on_enc) + + reuse = True + + return enc_out_all, pred_out_all, van_out_all, van_on_enc_all + + +def peak_signal_to_noise_ratio(true, pred): + """Image quality metric based on maximal signal power vs. power of the noise. + + Args: + true: the ground truth image. + pred: the predicted image. + Returns: + peak signal to noise ratio (PSNR) + """ + return 10.0 * tf.log(1.0 / mean_squared_error(true, pred)) / tf.log(10.0) + + +def mean_squared_error(true, pred): + """L2 distance between tensors true and pred. + + Args: + true: the ground truth image. + pred: the predicted image. + Returns: + mean squared error between ground truth and predicted image. + """ + result = tf.reduce_sum( + tf.squared_difference(true, pred)) / tf.to_float(tf.size(pred)) + return result + + +def l1_error(true, pred): + """L1 distance between tensors true and pred.""" + return tf.reduce_sum(tf.abs(true - pred)) / tf.to_float(tf.size(pred)) + + +def calc_loss_psnr(gen_images, images, name, hparams=None, use_l1_loss=False): + """Calculates loss and psnr for predictions over multiple timesteps.""" + del hparams + with tf.name_scope(name): + loss, error, psnr_all = 0.0, 0.0, 0.0 + for _, x, gx in zip(range(len(gen_images)), images, gen_images): + recon_cost = mean_squared_error(x, gx) + if use_l1_loss: + recon_cost = l1_error(x, gx) + + error_i = l1_error(x, gx) + psnr_i = peak_signal_to_noise_ratio(x, gx) + psnr_all += psnr_i + error += error_i + loss += recon_cost + + psnr_all /= tf.to_float(len(gen_images)) + loss /= tf.to_float(len(gen_images)) + error /= tf.to_float(len(gen_images)) + + # if not hparams.use_tpu: + tf.summary.scalar('psnr_all', psnr_all) + tf.summary.scalar('loss', loss) + + return loss, psnr_all + + +@registry.register_model +class NextFrameEpva(sv2p.NextFrameSv2pLegacy): + """Hierarchical Long-term Video Prediction without Supervision""" + + def body(self, features): + hparams = self.hparams + input_shape = common_layers.shape_list(features['inputs']) + batch_size, _, frame_width, frame_height, frame_channels = input_shape # pylint: disable=unused-variable + + # Swap time and batch axes. + input_frames = common_video.swap_time_and_batch_axes( + tf.to_float(features['inputs'])) + target_frames = common_video.swap_time_and_batch_axes(features['targets']) + + # Get actions if exist otherwise use zeros + input_actions = self.get_input_if_exists( + features, 'input_action', batch_size, hparams.video_num_input_frames) + target_actions = self.get_input_if_exists( + features, 'target_action', batch_size, hparams.video_num_target_frames) + + # Get rewards if exist otherwise use zeros + # TODO(blazej) enable rewards. + # input_rewards = self.get_input_if_exists( + # features, 'input_reward', batch_size, hparams.video_num_input_frames) + # target_rewards = self.get_input_if_exists( + # features, 'target_reward', batch_size,hparams.video_num_target_frames) + # all_rewards = tf.concat([input_rewards, target_rewards], axis=0) + + all_actions = tf.concat([input_actions, target_actions], axis=0) + # flatten actions tensor to have the shape: framesXbatch_sizeXaction_dims. + actions_shape = common_layers.shape_list(all_actions) + all_actions = tf.reshape( + all_actions, + [actions_shape[0], -1, + reduce(lambda x, y: x * y, actions_shape[2:])]) + all_frames = tf.concat([input_frames, target_frames], axis=0) + + all_frames = tf.unstack(all_frames, axis=0) + all_actions = tf.unstack(all_actions, axis=0) + + # TODO(blazej) - most likely this downsize is too strong. + all_frames = [ + tf.image.resize_images( + image, (IMG_HEIGHT, IMG_WIDTH), + method=tf.image.ResizeMethod.BICUBIC) + for image in all_frames + ] + + enc_out_all, pred_out_all, _, van_on_enc_all = construct_model( + all_frames, + all_actions, + context_frames=hparams.context_frames, + hparams=hparams, + is_training=self.is_training) + + enc_pred_loss, _ = calc_loss_psnr( + enc_out_all[1:], + pred_out_all, + 'enc_pred_loss', + hparams=hparams, + use_l1_loss=hparams.enc_pred_use_l1_loss) + + van_on_enc_loss, _ = calc_loss_psnr( + van_on_enc_all, + all_frames[1:], + 'van_on_enc_loss', + hparams=hparams) + + enc_pred_loss_scale_delay = max(hparams.enc_pred_loss_scale_delay, 1) + enc_pred_loss_scale = tf.nn.sigmoid( + (tf.to_float(tf.train.get_or_create_global_step() + ) - enc_pred_loss_scale_delay) / + (enc_pred_loss_scale_delay * .1)) * hparams.enc_pred_loss_scale + tf.summary.scalar('enc_pred_loss_scale', enc_pred_loss_scale) + epva_loss = enc_pred_loss * enc_pred_loss_scale + van_on_enc_loss + tf.summary.scalar('epva_loss', epva_loss) + + predictions = tf.stack(van_on_enc_all) + + if hparams.clip_pixel_values: + predictions = tf.clip_by_value(predictions, 0.0, 1.0) + + # TODO(mbz): clean this up! + def fix_video_dims_and_concat_on_x_axis(x): + x = tf.transpose(x, [1, 3, 4, 0, 2]) + x = tf.reshape(x, [batch_size, frame_height, frame_channels, -1]) + x = tf.transpose(x, [0, 3, 1, 2]) + return x + + frames_gd = fix_video_dims_and_concat_on_x_axis(target_frames) + frames_pd = fix_video_dims_and_concat_on_x_axis(predictions) + side_by_side_video = tf.concat([frames_gd, frames_pd], axis=1) + tf.summary.image('full_video', side_by_side_video) + + predictions = tf.unstack(predictions) + predictions = [ + tf.image.resize_images( + image, (frame_width, frame_height), + method=tf.image.ResizeMethod.BICUBIC) + for image in predictions + ] + predictions = tf.stack(predictions) + + predictions = common_video.swap_time_and_batch_axes(predictions) + predictions = tf.slice(predictions, + [0, hparams.video_num_input_frames-1, 0, 0, 0], + [-1]*5) + + return predictions, {'extra': epva_loss} diff --git a/tensor2tensor/models/video/epva_params.py b/tensor2tensor/models/video/epva_params.py new file mode 100644 index 000000000..23c70fdd9 --- /dev/null +++ b/tensor2tensor/models/video/epva_params.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Param sets for EPVA model.""" + +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import modalities +from tensor2tensor.models.video import basic_deterministic_params +from tensor2tensor.utils import registry + + +@registry.register_hparams +def next_frame_epva(): + """EPVA hparams.""" + hparams = basic_deterministic_params.next_frame_basic_deterministic() + hparams.video_num_input_frames = 4 + hparams.video_num_target_frames = 4 + hparams.bottom = { + "inputs": modalities.video_raw_bottom, + "targets": modalities.video_raw_targets_bottom, + } + hparams.loss = { + "targets": modalities.video_l2_raw_loss, + } + hparams.top = { + "targets": modalities.video_raw_top, + } + hparams.learning_rate_schedule = "constant" + hparams.learning_rate_constant = 1e-05 + hparams.batch_size = 2 + hparams.clip_grad_norm = 0.01 + # TODO(msaffar): disentangle EPVA from SV2P + hparams.add_hparam("reward_prediction", False) + hparams.add_hparam("clip_pixel_values", True) + hparams.add_hparam("context_frames", 5) + hparams.add_hparam("enc_learning_rate", 1e-5) + hparams.add_hparam("enc_pred_loss_scale", 0.1) + hparams.add_hparam("enc_pred_loss_scale_delay", 6e5) + hparams.add_hparam("enc_size", 64) + hparams.add_hparam("enc_keep_prob", .65) + hparams.add_hparam("enc_pred_use_l1_loss", False) + hparams.add_hparam("enc_pred_use_l2norm", False) + hparams.add_hparam("van_learning_rate", 3e-5) + hparams.add_hparam("van_keep_prob", .9) + hparams.add_hparam("sequence_length ", 64) + hparams.add_hparam("skip_num", 2) + hparams.add_hparam("pred_noise_std", 0) + hparams.add_hparam("lstm_state_noise_stddev", 0) + return hparams diff --git a/tensor2tensor/models/video/next_frame_glow.py b/tensor2tensor/models/video/next_frame_glow.py new file mode 100644 index 000000000..7186d180c --- /dev/null +++ b/tensor2tensor/models/video/next_frame_glow.py @@ -0,0 +1,639 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Experimental testbed for nfg.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from six.moves import range +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import common_video +from tensor2tensor.layers import modalities +from tensor2tensor.models.research import glow +from tensor2tensor.models.research import glow_ops +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf +import tensorflow_probability as tfp + +arg_scope = contrib.framework().arg_scope + + +@registry.register_hparams +def next_frame_glow_hparams(): + """Hparams for next_frame_glow.""" + hparams = glow.glow_hparams() + # Possible modes are conditional and unconditional + hparams.add_hparam("gen_mode", "conditional") + hparams.add_hparam("learn_top_scale", False) + hparams.add_hparam("condition_all_levels", True) + # For each video, substitutes "num_input_frames + num_output_frames" with a + # randomly sampled patch of length "num_train_frames" during training. + # -1 indicates that the entire video is used for training. + hparams.add_hparam("num_train_frames", -1) + # The following are hparams that model the latent transitions. + # Encoder that maps the latents to a Gaussian distribution. + # This function is used to model the prior over z_{t}. Can be, + # Pointwise -> point-wise multiplication of z_{t-1}. + # conv_net -> one-layer convolution over z_{t-1} .. z_{t - num_cond_latents} + # conv3d_net or conv_lstm + hparams.add_hparam("latent_dist_encoder", "conv_net") + # Number of latents used in the encoder above. + hparams.add_hparam("num_cond_latents", 1) + hparams.add_hparam("latent_architecture", "glow_resnet") + hparams.add_hparam("latent_apply_dilations", False) + hparams.add_hparam("latent_dilation_rates", [1, 3]) + # Use latent skip connections + hparams.add_hparam("model_input", False) + hparams.add_hparam("cond_first_frame", False) + hparams.add_hparam("latent_skip", True) + hparams.add_hparam("latent_encoder_depth", 2) + hparams.add_hparam("latent_encoder_width", 512) + hparams.add_hparam("latent_dropout", 0.0) + hparams.add_hparam("latent_pre_output_channels", 512) + hparams.add_hparam("latent_activation", "relu") + hparams.add_hparam("latent_noise", 0.0) + # Pretrains the glow encoder for "pretrain_steps" number of steps. + # By default, don't pretrain and learn end-to-end + hparams.add_hparam("pretrain_steps", -1) + hparams.bottom = { + "inputs": modalities.video_raw_bottom, + "targets": modalities.video_raw_targets_bottom, + } + hparams.loss = { + "targets": modalities.video_l1_raw_loss, + } + hparams.top = { + "targets": modalities.video_raw_top, + } + hparams.init_batch_size = 256 + hparams.batch_size = 32 + # Possible options: are prev_frame, single_conv and normal + hparams.top_prior = "single_conv" + return hparams + + +@registry.register_hparams +def next_frame_glow_bair_quant(): + """Hparams to reproduce bits-per-pixel results on BAIR action-free dataset.""" + hparams = next_frame_glow_hparams() + hparams.video_num_input_frames = 3 + hparams.video_num_target_frames = 10 + hparams.num_train_frames = 4 + hparams.num_cond_latents = 3 + hparams.depth = 24 + hparams.latent_dist_encoder = "conv3d_net" + hparams.latent_encoder_width = 256 + hparams.latent_architecture = "glow_resnet" + hparams.latent_encoder_depth = 5 + hparams.latent_apply_dilations = True + hparams.latent_activation = "gatu" + hparams.activation = "gatu" + hparams.learning_rate_constant = 3e-4 + hparams.learning_rate_schedule = "constant*linear_warmup" + hparams.learning_rate_warmup_steps = 10000 + hparams.init_batch_size = 128 + hparams.batch_size = 5 + return hparams + + +@registry.register_hparams +def next_frame_glow_bair_qual(): + """Hparams for qualitative video generation results.""" + hparams = next_frame_glow_bair_quant() + hparams.coupling = "additive" + hparams.temperature = 0.5 + hparams.coupling_width = 392 + return hparams + + +@registry.register_hparams +def next_frame_glow_shapes(): + """Hparams for qualitative and quantitative results on shapes dataset.""" + hparams = next_frame_glow_bair_quant() + hparams.video_num_input_frames = 1 + hparams.video_num_target_frames = 2 + hparams.num_train_frames = 2 + hparams.num_cond_latents = 1 + hparams.coupling = "additive" + hparams.coupling_width = 512 + hparams.latent_encoder_depth = 10 + hparams.latent_skip = False + hparams.learning_rate_constant = 1e-4 + hparams.batch_size = 10 + return hparams + + +@registry.register_hparams +def frame_glow_hparams(): + """Unconditional generation on video-frames.""" + hparams = next_frame_glow_hparams() + hparams.gen_mode = "unconditional" + hparams.num_train_frames = 1 + return hparams + + +def get_cond_latents(all_latents=None, hparams=None): + """Get z^{cond}_{t} given z^{1..t-1}. + + Args: + all_latents: list of list of tensors, + outer-size equals no.of time_steps-1 + inner-size equals hparams.n_levels. + hparams: See next_frame_glow_hparams. + Returns: + cond_latents: conditional latents at time-step t. + """ + cond_latents = None + if hparams.gen_mode == "conditional": + if hparams.latent_dist_encoder in ["conv_net", "conv3d_net"]: + num_cond_latents = (hparams.num_cond_latents + + int(hparams.cond_first_frame)) + if len(all_latents) >= num_cond_latents: + cond_latents = all_latents[-hparams.num_cond_latents:] + if hparams.cond_first_frame: + cond_latents = [all_latents[0]] + cond_latents + elif hparams.latent_dist_encoder in ["pointwise", "conv_lstm"]: + if all_latents: + cond_latents = all_latents[-1] + + if hparams.gen_mode == "conditional": + global_step = tf.train.get_or_create_global_step() + condition = tf.greater(global_step, hparams.pretrain_steps) + else: + condition = tf.constant(False, dtype=tf.bool) + return condition, cond_latents + + +@registry.register_model +class NextFrameGlow(glow.Glow): + """Extend Glow for video.""" + + def init_preprocess_single(self, features): + for label in ["inputs", "targets"]: + features[label] = common_layers.convert_rgb_to_real(features[label]) + return features + + def init_preprocess(self, features): + """Preprocessing as per the input modality. + + Equivalent to calling self.bottom(features). + + Args: + features: dict of strings to tensors. + Returns: + features: dict of strings to tensors. + """ + return features.map(self.init_preprocess_single) + + def preprocess(self, x): + """Converts x from [0, 1] to [-0.5, 0.5]. + + All inputs are already normalized to be in the range [0, 1] through the + VideoModalityL1Raw modality. + + Args: + x: 4-D Tensor. + + Returns: + x: Scaled such that x lies in-between -0.5 and 0.5 + """ + return x - 0.5 + + def infer(self, features, *args, **kwargs): # pylint: disable=arguments-differ + del args, kwargs + + # Make a copy of features that can be used in the call to self + # that builds the graph. + new_features = {} + new_features["inputs"] = features["inputs"] + new_features["targets"] = features["infer_targets"] + _, _ = self(new_features) # pylint: disable=not-callable + + if self.hparams.gen_mode == "unconditional": + num_target_frames = 1 + else: + num_target_frames = self.hparams.video_num_target_frames + + ops = [glow_ops.get_variable_ddi, glow_ops.actnorm, glow_ops.get_dropout] + var_scope = tf.variable_scope("next_frame_glow/body", reuse=True) + all_frames = [] + + # If eps=None, images are sampled from the prior. + with arg_scope(ops, init=False), var_scope: + for target_frame in range(1, num_target_frames + 1): + + # subscript -> timestep, superscript -> level. + # self.z_sample equals z^0_{t} (top-level latent) + # (X_{t}, z^{1..l}_{t}) = Glow(z^0_{t}, z^{1..l}_{t-1}) + # Get current set of cond_latents. + cond_level, cond_level_latents = get_cond_latents( + self.all_level_latents, self.hparams) + + glow_vals = glow_ops.encoder_decoder( + "codec", self.z_sample, self.hparams, eps=None, reverse=True, + cond_latents=cond_level_latents, states=self.level_states, + condition=cond_level, temperature=self.temperature) + predicted_frame, _, curr_latents, self.level_states = glow_vals + all_frames.append(predicted_frame) + self.all_level_latents.append(curr_latents) + + # Compute z^0_{t+1} = f(z^0_{t}) + if target_frame < num_target_frames: + cond_top, cond_top_latents = get_cond_latents( + self.all_top_latents, self.hparams) + prior_dist = self.top_prior( + condition=cond_top, cond_latents=cond_top_latents) + self.z_sample = prior_dist.sample() + self.all_top_latents.append(self.z_sample) + + all_frames = tf.stack(all_frames) + predicted_video = common_video.swap_time_and_batch_axes(all_frames) + + # The video-decode API requires the predicted video to be the same shape + # as the target-video. Hence, for unconditional generation, + # tile across time to ensure same shape. + if self.hparams.gen_mode == "unconditional": + predicted_video = tf.tile( + predicted_video, [1, self.hparams.video_num_target_frames, 1, 1, 1]) + predicted_video = glow_ops.postprocess(predicted_video) + + # Output of a single decode / sample. + output_features = {} + output_features["targets"] = tf.zeros_like(predicted_video) + output_features["outputs"] = predicted_video + output_features["scores"] = tf.zeros_like(predicted_video) + return output_features + + def get_squeeze_prior(self): + """Model the prior over z_{t} as a function of X_{t-1}. + + Returns: + objective: float, log-likelihood. + dist: instance of tfp.distributions.Normal. + + Raises: + ValueError: If input_height is not equal to input_width, not even + or if the image width is smaller than the latent width. + """ + _, prior_height, _, prior_channels = self.z_top_shape + _, input_height, input_width, _ = common_layers.shape_list(self.input_frame) + + if input_height != input_width: + raise ValueError("input height should be equal to input width") + if input_height % 2 != 0: + raise ValueError("input height should be even") + if input_height < prior_height: + raise ValueError("input should be larger than the prior.") + + # mean, log_std = NN(X_0) + # Reduce the spatial dimension by a factor of "squeeze_factor". + # and convolve with a stride of 2 + squeeze_factor = input_height // (2 * prior_height) + x = glow_ops.squeeze( + "prior_squeeze", self.input_frame, factor=squeeze_factor, reverse=False) + mean_and_log_std = glow_ops.conv( + "prior_conv", x, 2*prior_channels, stride=[2, 2], apply_actnorm=False, + conv_init="zeros") + mean, log_scale = tf.split(mean_and_log_std, num_or_size_splits=2, axis=-1) + return tfp.distributions.Normal(mean, tf.exp(log_scale)) + + def top_cond_prior(self, name, cond_top_latents): + """Maps the conditional top latents to a distribution. + + Args: + name: variable scope. + cond_top_latents: Tensor or a list of tensors. + Latent variables at the previous time-step. + If "pointwise", this is a single tensor. + If "conv_net", this is a list of tensors with length + equal to hparams.num_cond_latents. + Returns: + cond_dist: tfp.distributions.Normal + Raises: + ValueError: If cond_top_latents are not of the expected length. + """ + with tf.variable_scope("top", reuse=tf.AUTO_REUSE): + if self.hparams.latent_dist_encoder == "pointwise": + last_latent = cond_top_latents + top = glow_ops.scale_gaussian_prior( + name, cond_top_latents, trainable=self.hparams.learn_top_scale) + elif self.hparams.latent_dist_encoder == "conv_net": + num_cond_latents = (self.hparams.num_cond_latents + + int(self.hparams.cond_first_frame)) + if len(cond_top_latents) != num_cond_latents: + raise ValueError( + "Expected length of cond_top_latents %d, got %d" + % (num_cond_latents, len(cond_top_latents))) + last_latent = cond_top_latents[-1] + output_channels = common_layers.shape_list(last_latent)[-1] + cond_top_latents = tf.concat(cond_top_latents, axis=-1) + + # Maps the latent-stack to a distribution. + cond_top_latents = glow_ops.noise_op(cond_top_latents, self.hparams) + top = glow_ops.latent_to_dist( + name, cond_top_latents, hparams=self.hparams, + output_channels=output_channels) + elif self.hparams.latent_dist_encoder == "conv_lstm": + last_latent = cond_top_latents + output_channels = common_layers.shape_list(cond_top_latents)[-1] + # (h_t, c_t) = LSTM(z_{t-1}; (h_{t-1}, c_{t-1})) + # (mu_t, sigma_t) = conv(h_t) + cond_top_latents = glow_ops.noise_op(cond_top_latents, self.hparams) + _, self.top_state = common_video.conv_lstm_2d( + cond_top_latents, self.top_state, self.hparams.latent_encoder_width, + kernel_size=3, name="conv_lstm") + top = glow_ops.single_conv_dist( + name, self.top_state.h, output_channels=output_channels) + elif self.hparams.latent_dist_encoder == "conv3d_net": + last_latent = cond_top_latents[-1] + cond_top_latents = tf.stack(cond_top_latents, axis=1) + cond_top_latents = glow_ops.noise_op(cond_top_latents, self.hparams) + top = glow_ops.temporal_latent_to_dist( + "conv3d", cond_top_latents, self.hparams) + + # mu(z_{t}) = z_{t-1} + latent_encoder(z_{cond}) + if self.hparams.latent_skip: + top = tfp.distributions.Normal(last_latent + top.loc, top.scale) + return top + + def uncond_top_dist(self): + """Get an unconditional prior distribution on the top latent.""" + prior_dist = glow_ops.top_prior( + "unconditional", self.z_top_shape, learn_prior="single_conv") + return prior_dist.loc, prior_dist.scale + + def cond_top_dist(self, cond_latents): + """Get a conditional prior distribution on the top latent.""" + prior_dist = self.top_cond_prior("conditional", cond_latents) + return prior_dist.loc, prior_dist.scale + + def top_prior(self, condition=False, cond_latents=None): + """Objective based on the prior over latent z. + + Args: + condition: Whether or not to condition on cond_latents. + cond_latents: tensor or list of tensors depending on + hparams.latent_dist_encoder + Returns: + objective: float, log-likelihood of z under the prior. + dist: instance of tfp.distributions.Normal, prior distribution. + Raises: + ValueError: If input is smaller than the prior, uneven height + or rectangular. + """ + if isinstance(condition, bool): + condition = tf.constant(condition, dtype=tf.bool) + self._all_conds.append(condition) + + if self.hparams.gen_mode == "conditional": + # cond_top_latents is None when + # latent_dist_encoder is a lstm and frame_ind == 0. + # latent_dist_encoder is conv_net and frame_ind < num_cond_frames. + marginal_mean, marginal_scale = self.uncond_top_dist() + if cond_latents is None: + mean, scale = marginal_mean, marginal_scale + else: + cond_mean, cond_scale = self.cond_top_dist(cond_latents) + mean, scale = tf.cond( + condition, lambda: (cond_mean, cond_scale), + lambda: (marginal_mean, marginal_scale)) + return glow_ops.TemperedNormal(mean, scale, self.temperature) + if self.hparams.top_prior == "prev_frame": + return self.get_squeeze_prior() + else: + return super(NextFrameGlow, self).top_prior() + + def get_z_top_shape(self, init=False): + """Get latent shape at level.""" + if init: + batch_size = self.hparams.init_batch_size + else: + batch_size = self.hparams.batch_size + height, _, channels = self.hparams.problem.frame_shape + n_levels = self.hparams.n_levels + z_width = height // 2**n_levels + z_channels = channels * 2**n_levels * 2 + return [batch_size, z_width, z_width, z_channels] + + def squeeze_video(self, video, init=False): + """Squeeze a 5-D Tensor video with one timestep to a 4-D frame.""" + if init: + batch_size = self.hparams.init_batch_size + else: + batch_size = self.hparams.batch_size + frame_shape = [batch_size] + self.hparams.problem.frame_shape + return tf.reshape(video, frame_shape) + + def glow_encoder(self, frame, condition=False, cond_latents=None, init=False): + """Glow network that encodes frame to a hierarchy of latents. + + Args: + frame: 5-D Tensor of shape (batch_size, 1, height, width, channels). + condition: Whether or not to condition on cond_latents. + cond_latents: optional, list of tensors with length equal to + hparams.n_levels - 1. If provided, the latent at level l is + conditioned on the cond_latent at level l. + init: Whether the given batch is an "init" batch or a "train" batch. + Returns: + objective: log-likelihood of the frame per the model. + z_top: top-level latent. + z_levels: a list of tensors with latents at all levels. + """ + frame = self.squeeze_video(frame, init=init) + frame = self.preprocess(frame) + frame, objective = glow_ops.uniform_binning_correction(frame) + + glow_vals = glow_ops.encoder_decoder( + "codec", frame, self.hparams, eps=None, reverse=False, + cond_latents=cond_latents, states=self.level_states, + condition=condition) + z_top, encoder_objective, self.eps, z_levels, self.level_states = glow_vals + objective += encoder_objective + return objective, z_top, z_levels + + def get_num_train_frames(self): + """Returns the number of frames as a normalizing factor.""" + num_target = self.hparams.video_num_target_frames + num_input = self.hparams.video_num_input_frames + + # For unconditional generation, this picks a random frame during training + # and evaluates the marginal likelihood over "num_input" + "num_target" + # frames during eval. + if self.hparams.gen_mode == "unconditional": + if self.is_training: + return 1 + return num_input + num_target + + # During eval we measure the true objective. + if not self.is_training or self.hparams.num_train_frames == -1: + total_frames = num_target + # if hparams.num_train_frames=-1, we use an approxination to the true + # objective. + else: + total_frames = self.hparams.num_train_frames - num_input + if self.hparams.model_input: + total_frames += num_input + return total_frames + + def get_all_frames(self, input_frames, target_frames): + """Get the frames used as input to the model. + + Args: + input_frames: 5-D Tensor, (NTHWC) + target_frames: 5-D Tensor, (NTHWC) + Returns: + frames: 5-D Tensor used as input to the model. + """ + if self.is_predicting: + all_frames = input_frames + elif self.is_training: + all_frames = tf.concat((input_frames, target_frames), axis=1) + all_frames = common_video.extract_random_video_patch( + all_frames, self.hparams.num_train_frames) + # Measure the mean bit-per-pixel of the target_frames during eval. + else: + all_frames = tf.concat((input_frames, target_frames), axis=1) + if self.hparams.cond_first_frame: + first_frame = all_frames[:, 0:1, :, :, :] + all_frames = tf.concat((first_frame, all_frames), axis=1) + return all_frames + + def video_objective_tower(self, input_frames, target_frames, init=False): + """Returns the bits-per-pixel of the video. + + Args: + input_frames: 5-D Tensor of shape (N, 1, H, W, C) + target_frames: 5-D Tensor of shape (N, T, H, W, C) + init: Whether or not to run data-dependent initialization. + Returns: + objective: bits-per-pixel. + """ + # The arg_scope call ensures that the actnorm parameters are set such that + # the per-channel output activations have zero mean and unit variance + # ONLY during the first step. After that the parameters are learned + # through optimisation. + num_input_frames = (self.hparams.video_num_input_frames + + int(self.hparams.cond_first_frame)) + + # Set num total frames to average the objective. + total_frames = self.get_num_train_frames() + + # Compute the log-likelihood of target_frames at both train and predict + # time. + all_frames = self.get_all_frames(input_frames, target_frames) + all_frames = tf.unstack(all_frames, axis=1) + + cond_level_latents, cond_top_latents = None, None + total_objective = 0.0 + ops = [glow_ops.get_variable_ddi, glow_ops.actnorm, glow_ops.get_dropout] + + with arg_scope(ops, init=init): + for frame_ind, frame in enumerate(all_frames): + + # Get current set of cond latents of non-top levels. + cond_level, cond_level_latents = get_cond_latents( + self.all_level_latents, self.hparams) + + # Get current set of cond latents of the top-level + cond_top, cond_top_latents = get_cond_latents( + self.all_top_latents, self.hparams) + + # Superscript -> level, Subscript -> Time. + # (z^{0}_t, z^{1..l}_t) = Glow(X_{t}, z^{1..l}_{cond_t}) + frame_obj, curr_top_latent, curr_level_latents = self.glow_encoder( + frame, condition=cond_level, cond_latents=cond_level_latents, + init=init) + + # z^0_t ~ N(f(z^0_{t-1})) + # cond_top_latents is None when + # latent_dist_encoder is conv_net and frame_ind < num_cond_frames. + prior_dist = self.top_prior( + condition=cond_top, cond_latents=cond_top_latents) + prior_objective = tf.reduce_sum( + prior_dist.log_prob(curr_top_latent), axis=[1, 2, 3]) + frame_obj += prior_objective + + # Loss computation. + # Do not model the probabililty of the input frames by default. + # Consistent with other video models. + if (frame_ind > num_input_frames - 1 or self.hparams.model_input or + self.hparams.gen_mode == "unconditional"): + total_objective += frame_obj + self.all_level_latents.append(curr_level_latents) + self.all_top_latents.append(curr_top_latent) + + # During prediction time, store z_sample ~ N(f(z_{num_input_frames})) + # to generate the first target frame. + if self.is_predicting: + # Get current set of cond_top_latents + cond_top, cond_top_latents = get_cond_latents( + self.all_top_latents, self.hparams) + prior_dist = self.top_prior( + condition=cond_top, cond_latents=cond_top_latents) + self.z_sample = prior_dist.sample() + self.all_top_latents.append(self.z_sample) + + # Converts log-probability to bits-per-pixel. + hwc = np.prod(self.hparams.problem.frame_shape) + total_objective = -total_objective / (np.log(2) * hwc * total_frames) + return total_objective + + def objective_tower(self, features, init=False): + input_frames, target_frames = features["inputs"], features["targets"] + self.cond_latents, self.top_state = None, None + self.all_level_latents, self.all_top_latents = [], [] + self._all_conds = [] + self.level_states = [None] * (self.hparams.n_levels - 1) + self.z_top_shape = self.get_z_top_shape(init=init) + num_input_frames = self.hparams.video_num_input_frames + latent_dist_encoder = self.hparams.latent_dist_encoder + num_cond_latents = self.hparams.num_cond_latents + + exp_modes = ["conditional", "unconditional"] + if self.hparams.gen_mode not in exp_modes: + raise ValueError("Expected mode to be in %s, got %s" % + (exp_modes, self.hparams.gen_mode)) + + # Error checks for conditional video generation. + if self.hparams.gen_mode == "conditional": + exp_latent_encoders = ["pointwise", "conv_net", "conv_lstm", "conv3d_net"] + if latent_dist_encoder not in exp_latent_encoders: + raise ValueError("Expected latent_dist_encoder is %s, got %s" % + (exp_latent_encoders, latent_dist_encoder)) + if (latent_dist_encoder == "pointwise" and num_cond_latents != 1): + raise ValueError("Expected num_cond_latents: 1, with 'pointwise' " + "latent_dist_encoder, got %d" % num_cond_latents) + if (latent_dist_encoder == "conv_net" and + num_cond_latents > num_input_frames): + raise ValueError("Expected num_cond_latents <= %d, got %d" % + (num_input_frames, num_cond_latents)) + if (latent_dist_encoder == "pointwise" and + self.hparams.init_batch_size != self.hparams.batch_size): + raise ValueError("init_batch_size different from batch_size not " + "supported for latent_dist_encoder=pointwise") + if self.hparams.gen_mode == "unconditional": + if self.hparams.num_train_frames != 1: + raise ValueError("Expected num_train_frames to be 1 when " + "hparams.gen_mode is unconditional, got %d" % + self.hparams.num_train_frames) + if self.hparams.video_num_input_frames != 1: + raise ValueError("Expected num_input_frames to be 1 when " + "hparams.gen_mode is unconditional, got %d" % + self.hparams.video_num_input_frames) + return self.video_objective_tower(input_frames, target_frames, init=init) diff --git a/tensor2tensor/models/video/nfg_conv3d_test.py b/tensor2tensor/models/video/nfg_conv3d_test.py new file mode 100644 index 000000000..e3434cd04 --- /dev/null +++ b/tensor2tensor/models/video/nfg_conv3d_test.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test when the latent-network encoder is a conv3d net.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +from tensor2tensor.models.video import nfg_test_utils +import tensorflow.compat.v1 as tf + +conv3d_net_hparams = ( + ("conv3d_net", 2, 2, "conv3d_net", "conditional", -1, 3), + ("conv3d_net_gatu", 2, 2, "conv3d_net", "conditional", -1, 3, False, False, + "gatu"), + ("conv3d_dil", 2, 2, "conv3d_net", "conditional", -1, -1, False, True),) + + +class NextFrameGlowConv3DTest(nfg_test_utils.NextFrameGlowTest, + parameterized.TestCase): + + @parameterized.named_parameters(*conv3d_net_hparams) + def testGlowTrainAndDecode(self, in_frames=1, out_frames=1, + latent_dist_encoder="pointwise", + gen_mode="conditional", pretrain_steps=-1, + num_train_frames=-1, cond_first_frame=False, + apply_dilations=False, activation="relu"): + self.GlowTrainAndDecode( + in_frames=in_frames, out_frames=out_frames, + latent_dist_encoder=latent_dist_encoder, gen_mode=gen_mode, + pretrain_steps=pretrain_steps, num_train_frames=num_train_frames, + cond_first_frame=cond_first_frame, apply_dilations=apply_dilations, + activation=activation) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/video/nfg_conv_lstm_test.py b/tensor2tensor/models/video/nfg_conv_lstm_test.py new file mode 100644 index 000000000..982245464 --- /dev/null +++ b/tensor2tensor/models/video/nfg_conv_lstm_test.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test when the latent-network encoder is a conv-lstm.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +from tensor2tensor.models.video import nfg_test_utils +import tensorflow.compat.v1 as tf + +conv_lstm_hparams = ( + ("in_3_out_2_lstm", 2, 1, "conv_lstm", "conditional", -1), + ("lstm_pretrain", 2, 1, "conv_lstm", "conditional", 50000)) + + +class NextFrameGlowConv3DTest(nfg_test_utils.NextFrameGlowTest, + parameterized.TestCase): + + @parameterized.named_parameters(*conv_lstm_hparams) + def testGlowTrainAndDecode(self, in_frames=1, out_frames=1, + latent_dist_encoder="pointwise", + gen_mode="conditional", pretrain_steps=-1, + num_train_frames=-1, cond_first_frame=False): + self.GlowTrainAndDecode( + in_frames=in_frames, out_frames=out_frames, + latent_dist_encoder=latent_dist_encoder, gen_mode=gen_mode, + pretrain_steps=pretrain_steps, num_train_frames=num_train_frames, + cond_first_frame=cond_first_frame) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/video/nfg_conv_test.py b/tensor2tensor/models/video/nfg_conv_test.py new file mode 100644 index 000000000..5a07b812c --- /dev/null +++ b/tensor2tensor/models/video/nfg_conv_test.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test when the latent-network encoder is a 2-D conv.""" + +from absl.testing import parameterized +from tensor2tensor.models.video import nfg_test_utils +import tensorflow.compat.v1 as tf + +conv_net_hparams = ( + ("in_3_out_2_conv", 3, 1, "conv_net", "conditional"), + ("conv_net_cond_first", 2, 2, "conv_net", "conditional", -1, 3, True),) + + +class NextFrameGlowConvTest(nfg_test_utils.NextFrameGlowTest, + parameterized.TestCase): + + @parameterized.named_parameters(*conv_net_hparams) + def testGlowTrainAndDecode(self, in_frames=1, out_frames=1, + latent_dist_encoder="pointwise", + gen_mode="conditional", pretrain_steps=-1, + num_train_frames=-1, cond_first_frame=False): + self.GlowTrainAndDecode( + in_frames=in_frames, out_frames=out_frames, gen_mode=gen_mode, + latent_dist_encoder=latent_dist_encoder, + pretrain_steps=pretrain_steps, num_train_frames=num_train_frames, + cond_first_frame=cond_first_frame) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/video/nfg_interpolate.py b/tensor2tensor/models/video/nfg_interpolate.py new file mode 100644 index 000000000..9294785b5 --- /dev/null +++ b/tensor2tensor/models/video/nfg_interpolate.py @@ -0,0 +1,270 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for linear interpolation over the next_frame_glow latent space.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import os +from absl import flags +import numpy as np +from six.moves import zip +from tensor2tensor.bin import t2t_trainer # pylint: disable=unused-import +from tensor2tensor.data_generators import image_utils +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import common_video +from tensor2tensor.models.research import glow_ops +from tensor2tensor.utils import contrib +from tensor2tensor.utils import decoding +from tensor2tensor.utils import trainer_lib +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +# Flags placeholders. +flags.DEFINE_string("checkpoint_path", None, + "Path to the model checkpoint. Overrides output_dir.") +flags.DEFINE_bool("keep_timestamp", False, + "Set the mtime of the decoded file to the " + "checkpoint_path+'.index' mtime.") +flags.DEFINE_bool("decode_interactive", False, + "Interactive local inference mode.") +flags.DEFINE_integer("decode_shards", 1, "Number of decoding replicas.") +flags.DEFINE_string("score_file", "", "File to score. Each line in the file " + "must be in the format input \t target.") +flags.DEFINE_bool("decode_in_memory", False, "Decode in memory.") + +flags = tf.flags +FLAGS = flags.FLAGS + + +arg_scope = contrib.framework().arg_scope + + +def decode_hparams(overrides=""): + """Hparams for decoding.""" + hparams = decoding.decode_hparams() + # Number of interpolations between [0.0, 1.0]. + hparams.add_hparam("num_interp", 11) + # Which level(s) to interpolate. + hparams.add_hparam("level_interp", [0, 1, 2]) + # "all" or "ranked", interpolate all channels or a "ranked". + hparams.add_hparam("channel_interp", "all") + # interpolate channels ranked according to squared L2 norm. + hparams.add_hparam("rank_interp", 1) + # Whether on not to save frames as summaries + hparams.add_hparam("save_frames", True) + hparams.parse(overrides) + return hparams + + +def preprocess_frame(frame): + """Preprocess frame. + + 1. Converts [0, 255] to [-0.5, 0.5] + 2. Adds uniform noise. + + Args: + frame: 3-D Tensor representing pixels. + Returns: + frame: 3-D Tensor with values in between [-0.5, 0.5] + """ + # Normalize from [0.0, 1.0] -> [-0.5, 0.5] + frame = common_layers.convert_rgb_to_real(frame) + frame = frame - 0.5 + frame, _ = glow_ops.uniform_binning_correction(frame) + return frame + + +def frame_to_latents(frame, hparams): + """Encode frames to latents.""" + # Preprocess + frame = preprocess_frame(frame) + + # Encode [X_t] to [z^1_t, z^2_t .. z^l_t] + glow_vals = glow_ops.encoder_decoder( + "codec", frame, hparams, eps=None, reverse=False) + z_top, _, level_eps, _, _ = glow_vals + return z_top, level_eps + + +def latents_to_frames(z_top_interp, level_eps_interp, hparams): + """Decodes latents to frames.""" + # Decode [z^1_t, z^2_t .. z^l_t] to [X_t] + images, _, _, _ = glow_ops.encoder_decoder( + "codec", z_top_interp, hparams, eps=level_eps_interp, reverse=True) + images = glow_ops.postprocess(images) + return images + + +def interpolate(features, hparams, decode_hp): + """Interpolate between the first input frame and last target frame. + + Args: + features: dict of tensors + hparams: HParams, training hparams. + decode_hp: HParams, decode hparams. + Returns: + images: interpolated images, 4-D Tensor, shape=(num_interp, H, W, C) + first_frame: image, 3-D Tensor, shape=(1, H, W, C) + last_frame: image, 3-D Tensor, shape=(1, H, W, C) + """ + inputs, targets = features["inputs"], features["targets"] + inputs = tf.unstack(inputs, axis=1) + targets = tf.unstack(targets, axis=1) + coeffs = np.linspace(0.0, 1.0, decode_hp.num_interp) + + # (X_1, X_t) -> (z_1, z_t) + first_frame, last_frame = inputs[0], targets[-1] + first_top_z, first_level_eps = frame_to_latents(first_frame, hparams) + last_top_z, last_level_eps = frame_to_latents(last_frame, hparams) + + # Interpolate latents at all levels. + first_lats = first_level_eps + [first_top_z] + last_lats = last_level_eps + [last_top_z] + interp_lats = [] + lat_iterator = enumerate(zip(first_lats, last_lats)) + for level_ind, (first_lat, last_lat) in lat_iterator: + if level_ind in decode_hp.level_interp: + if decode_hp.channel_interp == "all": + interp_lat = glow_ops.linear_interpolate(first_lat, last_lat, coeffs) + else: + interp_lat = glow_ops.linear_interpolate_rank( + first_lat, last_lat, coeffs, decode_hp.rank_interp) + else: + interp_lat = tf.tile(first_lat, [decode_hp.num_interp, 1, 1, 1]) + interp_lats.append(interp_lat) + + level_eps_interp = interp_lats[:hparams.n_levels-1] + z_top_interp = interp_lats[-1] + images = latents_to_frames(z_top_interp, level_eps_interp, hparams) + return images, first_frame, last_frame + + +def get_summaries_log_dir(decode_hp, output_dir, dataset_split): + """Get nested summaries_log_dir based on decode_hp.""" + child_dir = decode_hp.summaries_log_dir + level_dir = "".join([str(level) for level in decode_hp.level_interp]) + if decode_hp.channel_interp == "all": + rank_dir = "all" + else: + rank_dir = "rank_%d" % decode_hp.rank_interp + child_dir = "%s/%s_%s" % (child_dir, level_dir, rank_dir) + if dataset_split is not None: + child_dir += "_{}".format(dataset_split) + return os.path.join(output_dir, child_dir) + + +def interpolations_to_summary(sample_ind, interpolations, first_frame, + last_frame, hparams, decode_hp): + """Converts interpolated frames into tf summaries. + + The summaries consists of: + 1. Image summary corresponding to the first frame. + 2. Image summary corresponding to the last frame. + 3. The interpolated frames as a gif summary. + + Args: + sample_ind: int + interpolations: Numpy array, shape=(num_interp, H, W, 3) + first_frame: Numpy array, shape=(HWC) + last_frame: Numpy array, shape=(HWC) + hparams: HParams, train hparams + decode_hp: HParams, decode hparams + Returns: + summaries: list of tf Summary Values. + """ + parent_tag = "sample_%d" % sample_ind + frame_shape = hparams.problem.frame_shape + interp_shape = [hparams.batch_size, decode_hp.num_interp] + frame_shape + interpolations = np.reshape(interpolations, interp_shape) + interp_tag = "%s/interp/%s" % (parent_tag, decode_hp.channel_interp) + if decode_hp.channel_interp == "ranked": + interp_tag = "%s/rank_%d" % (interp_tag, decode_hp.rank_interp) + summaries, _ = common_video.py_gif_summary( + interp_tag, interpolations, return_summary_value=True, + max_outputs=decode_hp.max_display_outputs, + fps=decode_hp.frames_per_second) + + if decode_hp.save_frames: + first_frame_summ = image_utils.image_to_tf_summary_value( + first_frame, "%s/first" % parent_tag) + last_frame_summ = image_utils.image_to_tf_summary_value( + last_frame, "%s/last" % parent_tag) + summaries.append(first_frame_summ) + summaries.append(last_frame_summ) + return summaries + + +def main(_): + decode_hp = decode_hparams(FLAGS.decode_hparams) + trainer_lib.set_random_seed(FLAGS.random_seed) + if FLAGS.output_dir is None: + raise ValueError("Expected output_dir to be set to a valid path.") + + hparams = trainer_lib.create_hparams( + FLAGS.hparams_set, FLAGS.hparams, data_dir=FLAGS.data_dir, + problem_name=FLAGS.problem) + if hparams.batch_size != 1: + raise ValueError("Set batch-size to be equal to 1") + + # prepare dataset using Predict mode. + dataset_split = "test" if FLAGS.eval_use_test_set else None + dataset = hparams.problem.dataset( + tf_estimator.ModeKeys.PREDICT, shuffle_files=False, hparams=hparams, + data_dir=FLAGS.data_dir, dataset_split=dataset_split) + dataset = dataset.batch(hparams.batch_size) + dataset = dataset.make_one_shot_iterator().get_next() + + # Obtain frame interpolations. + ops = [glow_ops.get_variable_ddi, glow_ops.actnorm, glow_ops.get_dropout] + var_scope = tf.variable_scope("next_frame_glow/body", reuse=tf.AUTO_REUSE) + with arg_scope(ops, init=False), var_scope: + interpolations, first_frame, last_frame = interpolate( + dataset, hparams, decode_hp) + + var_list = tf.global_variables() + saver = tf.train.Saver(var_list) + + # Get latest checkpoints from model_dir. + ckpt_path = tf.train.latest_checkpoint(FLAGS.output_dir) + final_dir = get_summaries_log_dir(decode_hp, FLAGS.output_dir, dataset_split) + summary_writer = tf.summary.FileWriter(final_dir) + global_step = decoding.latest_checkpoint_step(FLAGS.output_dir) + + sample_ind = 0 + num_samples = decode_hp.num_samples + all_summaries = [] + + with tf.train.MonitoredTrainingSession() as sess: + saver.restore(sess, ckpt_path) + + while not sess.should_stop() and sample_ind < num_samples: + interp_np, first_frame_np, last_frame_np = sess.run( + [interpolations, first_frame, last_frame]) + + interp_summ = interpolations_to_summary(sample_ind, interp_np, + first_frame_np[0], + last_frame_np[0], + hparams, decode_hp) + all_summaries.extend(interp_summ) + sample_ind += 1 + all_summaries = tf.Summary(value=list(all_summaries)) + summary_writer.add_summary(all_summaries, global_step) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/models/video/nfg_test_utils.py b/tensor2tensor/models/video/nfg_test_utils.py new file mode 100644 index 000000000..e81269e03 --- /dev/null +++ b/tensor2tensor/models/video/nfg_test_utils.py @@ -0,0 +1,185 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Testing utils for next_frame_glow.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tempfile +import numpy as np +from tensor2tensor.data_generators import video_generated # pylint: disable=unused-import +from tensor2tensor.models.video import next_frame_glow +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator +MODES = tf_estimator.ModeKeys + + +# TODO(mechcoder): Refactor or merge tests with the other next_frame_tests when +# this moves to a public version. +def fill_hparams(hparams, in_frames, out_frames, gen_mode="conditional", + latent_dist_encoder="pointwise", pretrain_steps=-1, + num_train_frames=-1, cond_first_frame=False, + apply_dilations=False, activation="relu"): + """Set next_frame_glow hparams.""" + hparams.latent_activation = activation + hparams.latent_apply_dilations = apply_dilations + hparams.video_num_input_frames = in_frames + hparams.video_num_target_frames = out_frames + hparams.latent_dist_encoder = latent_dist_encoder + hparams.gen_mode = gen_mode + hparams.pretrain_steps = pretrain_steps + hparams.num_train_frames = num_train_frames + hparams.cond_first_frame = cond_first_frame + if latent_dist_encoder in ["conv_net", "conv3d_net"]: + hparams.num_cond_latents = in_frames + else: + hparams.num_cond_latents = 1 + problem = registry.problem("video_stochastic_shapes10k") + p_hparams = problem.get_hparams(hparams) + hparams.problem = problem + hparams.problem_hparams = p_hparams + hparams.tiny_mode = True + hparams.reward_prediction = False + hparams.latent_architecture = "glow_resnet" + hparams.latent_encoder_depth = 2 + hparams.latent_pre_output_channels = 32 + if (hparams.gen_mode == "conditional" and + hparams.latent_dist_encoder == "pointwise"): + hparams.batch_size = 16 + hparams.init_batch_size = 16 + else: + hparams.batch_size = 16 + hparams.init_batch_size = 32 + hparams.affine_coupling_width = 32 + hparams.depth = 5 + hparams.n_levels = 2 + return hparams + + +def fill_infer_targets(x): + x["infer_targets"] = tf.identity(x["targets"]) + return x + + +def create_basic_features(hparams): + dataset = hparams.problem.dataset(MODES.TRAIN, hparams=hparams) + dataset = dataset.batch(hparams.batch_size) + dataset = dataset.map(fill_infer_targets) + return dataset.make_one_shot_iterator().get_next() + + +class NextFrameGlowTest(tf.test.TestCase): + """Utils for testing next_frame_glow.""" + + def should_run_session(self, hparams): + # dilated conv-3d not available on CPU. + return tf.test.is_gpu_available() or not hparams.latent_apply_dilations + + def checkAllConds(self, conds_array, num_total_frames, hparams): + if hparams.cond_first_frame: + self.assertEqual(conds_array, [True]*(num_total_frames + 1)) + elif hparams.pretrain_steps > -1: + self.assertEqual(conds_array, [False]*num_total_frames) + elif hparams.latent_dist_encoder != "pointwise": + self.assertEqual(conds_array, [True]*num_total_frames) + + def RunModel(self, model, train_op, hparams, features, num_frames, + model_path=None): + exp_num_frames = num_frames + int(hparams.cond_first_frame) + if hparams.gen_mode == "conditional": + self.assertLen(model.all_top_latents, exp_num_frames) + self.assertLen(model.all_level_latents, exp_num_frames) + + with tf.Session() as session: + + if model_path is not None: + saver = tf.train.Saver() + + session.run(tf.global_variables_initializer()) + + # Run initialization. + init_op = tf.get_collection("glow_init_op") + session.run(init_op) + + loss, top_conds = session.run([train_op["training"], model._all_conds]) # pylint: disable=protected-access + self.checkAllConds(top_conds, num_frames, hparams) + + if model_path is not None: + saver.save(session, model_path) + + # Check that one forward-propagation does not NaN, i.e + # initialization etc works as expected. + self.assertTrue(loss > 0.0 and loss < 10.0) + + def GlowTrainAndDecode(self, in_frames=1, out_frames=1, + latent_dist_encoder="pointwise", + gen_mode="conditional", pretrain_steps=-1, + num_train_frames=-1, cond_first_frame=False, + apply_dilations=False, activation="relu"): + """Test 1 forward pass and sampling gives reasonable results.""" + if num_train_frames == -1: + total_frames = in_frames + out_frames + else: + total_frames = num_train_frames + + curr_dir = tempfile.mkdtemp() + model_path = os.path.join(curr_dir, "model") + + # Training pipeline + with tf.Graph().as_default(): + hparams = next_frame_glow.next_frame_glow_hparams() + hparams = fill_hparams(hparams, in_frames, out_frames, + gen_mode, latent_dist_encoder, pretrain_steps, + num_train_frames, cond_first_frame, + apply_dilations, activation) + features = create_basic_features(hparams) + model = next_frame_glow.NextFrameGlow(hparams, MODES.TRAIN) + _, train_op = model(features) + if self.should_run_session(hparams): + self.RunModel(model, train_op, hparams, features, total_frames, + model_path) + + # Inference pipeline + with tf.Graph().as_default(): + hparams = next_frame_glow.next_frame_glow_hparams() + if hparams.gen_mode == "unconditional": + hparams.video_num_target_frames = 1 + hparams = fill_hparams(hparams, in_frames, out_frames, + gen_mode, latent_dist_encoder, pretrain_steps, + num_train_frames, cond_first_frame, + apply_dilations, activation) + features = create_basic_features(hparams) + model = next_frame_glow.NextFrameGlow( + hparams, tf_estimator.ModeKeys.PREDICT) + predictions = model.infer(features) + outputs = predictions["outputs"] + model_path = os.path.join(curr_dir, "model") + + if self.should_run_session(hparams): + with tf.Session() as session: + saver = tf.train.Saver() + saver.restore(session, model_path) + outputs_np = session.run(outputs) + self.assertEqual(outputs_np.shape, (16, out_frames, 64, 64, 3)) + self.assertTrue(np.all(outputs_np <= 255)) + self.assertTrue(np.all(outputs_np >= 0)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/video/nfg_uncond_test.py b/tensor2tensor/models/video/nfg_uncond_test.py new file mode 100644 index 000000000..041062ab7 --- /dev/null +++ b/tensor2tensor/models/video/nfg_uncond_test.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for unconditional glow.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +from tensor2tensor.models.video import nfg_test_utils +import tensorflow.compat.v1 as tf + +uncond_hparams = ( + ("in_1_out_1", 1, 1, "pointwise", "conditional"), + ("uncond", 1, 3, "pointwise", "unconditional", -1, 1),) + + +class NfgUncondTest(nfg_test_utils.NextFrameGlowTest, parameterized.TestCase): + + @parameterized.named_parameters(*uncond_hparams) + def testGlowTrainAndDecode(self, in_frames=1, out_frames=1, + latent_dist_encoder="pointwise", + gen_mode="conditional", pretrain_steps=-1, + num_train_frames=-1, cond_first_frame=False): + self.GlowTrainAndDecode( + in_frames=in_frames, out_frames=out_frames, + latent_dist_encoder=latent_dist_encoder, gen_mode=gen_mode, + pretrain_steps=pretrain_steps, num_train_frames=num_train_frames, + cond_first_frame=cond_first_frame) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/video/savp.py b/tensor2tensor/models/video/savp.py new file mode 100644 index 000000000..bf70623df --- /dev/null +++ b/tensor2tensor/models/video/savp.py @@ -0,0 +1,564 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Stochastic Adversarial Video Prediction model. + +Reference: https://arxiv.org/abs/1804.01523 +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numbers +import numpy as np + +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import common_video +from tensor2tensor.models.video import savp_params # pylint: disable=unused-import +from tensor2tensor.models.video import sv2p +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry +from tensor2tensor.utils import update_ops_hook + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator +import tensorflow_gan as tfgan + +gan_losses = tfgan.losses.wargs + + +class NextFrameSavpBase(object): + """Main function for Stochastic Adversarial Video Prediction.""" + + def encoder(self, inputs, n_layers=3): + """Convnet that encodes inputs into mean and std of a gaussian. + + Args: + inputs: 5-D Tensor, shape (batch_size, num_frames, width, height, channels) + n_layers: Number of layers. + + Returns: + z_mu: Mean of the latent gaussians. + z_log_var: log(var) of the latent gaussians. + + Raises: + ValueError: If inputs is not a 5-D tensor or not float32. + """ + latent_dims = self.hparams.z_dim + + shape_as_list = inputs.shape.as_list() + if len(shape_as_list) != 5: + raise ValueError("Expected inputs to be a 5-D, got %d" % + len(shape_as_list)) + if inputs.dtype != tf.float32: + raise ValueError("Expected dtype tf.float32, got %s" % inputs.dtype) + + # Flatten (N,T,W,H,C) into (NT,W,H,C) + batch_size, _ = shape_as_list[:2] + inputs = tf.reshape(inputs, [-1] + list(inputs.shape)[2:]) + n_filters = 64 + rectified = None + + # Applies 3 layer conv-net with padding, instance normalization + # and leaky relu as per the encoder in + # https://github.com/alexlee-gk/video_prediction + padding = [[0, 0], [1, 1], [1, 1], [0, 0]] + for i in range(n_layers): + with tf.variable_scope("layer_%d" % (i + 1)): + n_filters *= 2**i + if i: + padded = tf.pad(rectified, padding) + else: + padded = tf.pad(inputs, padding) + convolved = tf.layers.conv2d(padded, filters=n_filters, kernel_size=4, + strides=2, padding="VALID") + normalized = contrib.layers().instance_norm(convolved) + rectified = tf.nn.leaky_relu(normalized, alpha=0.2) + + # Mean pooling across all spatial dimensions. + pooled = tf.nn.avg_pool( + rectified, [1] + rectified.shape[1:3].as_list() + [1], + strides=[1, 1, 1, 1], padding="VALID") + squeezed = tf.squeeze(pooled, [1, 2]) + + # Down-project and output the mean and log of the standard deviation of + # the latents. + with tf.variable_scope("z_mu"): + z_mu = tf.layers.dense(squeezed, latent_dims) + with tf.variable_scope("z_log_sigma_sq"): + z_log_var = tf.layers.dense(squeezed, latent_dims) + z_log_var = tf.clip_by_value(z_log_var, -10, 10) + + # Reshape to (batch_size X num_frames X latent_dims) + z_mu = tf.reshape(z_mu, (batch_size, -1, latent_dims)) + z_log_var = tf.reshape( + z_log_var, (batch_size, -1, latent_dims)) + return z_mu, z_log_var + + def expected_output_shape(self, input_shape, stride, padding, kernel_size): + return (input_shape + 2*padding - kernel_size) // stride + 1 + + def get_fc_dimensions(self, strides, kernel_sizes): + """Get expected fully connected shape after a series of convolutions.""" + output_height, output_width, _ = self.hparams.problem.frame_shape + output_steps = self.hparams.video_num_target_frames + output_shape = np.array([output_steps, output_height, output_width]) + for curr_stride, kernel_size in zip(strides, kernel_sizes): + output_shape = self.expected_output_shape( + output_shape, np.array(curr_stride), 1, kernel_size) + return np.prod(output_shape) * self.hparams.num_discriminator_filters * 8 + + def discriminator(self, frames): + """3-D SNGAN discriminator. + + Args: + frames: a list of batch-major tensors indexed by time. + + Returns: + logits: 1-D Tensor with shape=batch_size. + Positive logits imply that the discriminator thinks that it + belongs to the true class. + """ + ndf = self.hparams.num_discriminator_filters + frames = tf.stack(frames) + + # Switch from time-major axis to batch-major axis. + frames = common_video.swap_time_and_batch_axes(frames) + + # 3-D Conv-net mapping inputs to activations. + num_outputs = [ndf, ndf*2, ndf*2, ndf*4, ndf*4, ndf*8, ndf*8] + kernel_sizes = [3, 4, 3, 4, 3, 4, 3] + strides = [[1, 1, 1], [1, 2, 2], [1, 1, 1], [1, 2, 2], [1, 1, 1], + [2, 2, 2], [1, 1, 1]] + + names = ["video_sn_conv0_0", "video_sn_conv0_1", "video_sn_conv1_0", + "video_sn_conv1_1", "video_sn_conv2_0", "video_sn_conv2_1", + "video_sn_conv3_0"] + iterable = zip(num_outputs, kernel_sizes, strides, names) + activations = frames + for num_filters, kernel_size, stride, name in iterable: + activations = self.pad_conv3d_lrelu(activations, num_filters, kernel_size, + stride, name) + num_fc_dimensions = self.get_fc_dimensions(strides, kernel_sizes) + activations = tf.reshape(activations, (-1, num_fc_dimensions)) + return tf.squeeze(tf.layers.dense(activations, 1)) + + def d_step(self, true_frames, gen_frames): + """Performs the discriminator step in computing the GAN loss. + + Applies stop-gradient to the generated frames while computing the + discriminator loss to make sure that the gradients are not back-propagated + to the generator. This makes sure that only the discriminator is updated. + + Args: + true_frames: True outputs + gen_frames: Generated frames. + Returns: + d_loss: Loss component due to the discriminator. + """ + hparam_to_disc_loss = { + "least_squares": gan_losses.least_squares_discriminator_loss, + "cross_entropy": gan_losses.modified_discriminator_loss, + "wasserstein": gan_losses.wasserstein_discriminator_loss} + + # Concat across batch-axis. + _, batch_size, _, _, _ = common_layers.shape_list(true_frames) + all_frames = tf.concat( + [true_frames, tf.stop_gradient(gen_frames)], axis=1) + + all_logits = self.discriminator(all_frames) + true_logits, fake_logits_stop = \ + all_logits[:batch_size], all_logits[batch_size:] + mean_true_logits = tf.reduce_mean(true_logits) + tf.summary.scalar("mean_true_logits", mean_true_logits) + + mean_fake_logits_stop = tf.reduce_mean(fake_logits_stop) + tf.summary.scalar("mean_fake_logits_stop", mean_fake_logits_stop) + + discriminator_loss_func = hparam_to_disc_loss[self.hparams.gan_loss] + gan_d_loss = discriminator_loss_func( + discriminator_real_outputs=true_logits, + discriminator_gen_outputs=fake_logits_stop, + add_summaries=True) + return gan_d_loss, true_logits, fake_logits_stop + + def g_step(self, gen_frames, fake_logits_stop): + """Performs the generator step in computing the GAN loss. + + Args: + gen_frames: Generated frames + fake_logits_stop: Logits corresponding to the generated frames as per + the discriminator. Assumed to have a stop-gradient term. + Returns: + gan_g_loss_pos_d: Loss. + gan_g_loss_neg_d: -gan_g_loss_pos_d but with a stop gradient on generator. + """ + hparam_to_gen_loss = { + "least_squares": gan_losses.least_squares_generator_loss, + "cross_entropy": gan_losses.modified_generator_loss, + "wasserstein": gan_losses.wasserstein_generator_loss + } + + fake_logits = self.discriminator(gen_frames) + mean_fake_logits = tf.reduce_mean(fake_logits) + tf.summary.scalar("mean_fake_logits", mean_fake_logits) + + # Generator loss. + # Using gan_g_loss_pos_d updates the discriminator as well. + # To avoid this add gan_g_loss_neg_d = -gan_g_loss_pos_d + # but with stop gradient on the generator. + # This makes sure that the net gradient on the discriminator is zero and + # net-gradient on the generator is just due to the gan_g_loss_pos_d. + generator_loss_func = hparam_to_gen_loss[self.hparams.gan_loss] + gan_g_loss_pos_d = generator_loss_func( + discriminator_gen_outputs=fake_logits, add_summaries=True) + gan_g_loss_neg_d = -generator_loss_func( + discriminator_gen_outputs=fake_logits_stop, add_summaries=True) + return gan_g_loss_pos_d, gan_g_loss_neg_d + + def get_gan_loss(self, true_frames, gen_frames, name): + """Get the discriminator + generator loss at every step. + + This performs an 1:1 update of the discriminator and generator at every + step. + + Args: + true_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C) + Assumed to be ground truth. + gen_frames: 5-D Tensor of shape (num_steps, batch_size, H, W, C) + Assumed to be fake. + name: discriminator scope. + Returns: + loss: 0-D Tensor, with d_loss + g_loss + """ + # D - STEP + with tf.variable_scope("%s_discriminator" % name, reuse=tf.AUTO_REUSE): + gan_d_loss, _, fake_logits_stop = self.d_step( + true_frames, gen_frames) + + # G - STEP + with tf.variable_scope("%s_discriminator" % name, reuse=True): + gan_g_loss_pos_d, gan_g_loss_neg_d = self.g_step( + gen_frames, fake_logits_stop) + gan_g_loss = gan_g_loss_pos_d + gan_g_loss_neg_d + tf.summary.scalar("gan_loss_%s" % name, gan_g_loss_pos_d + gan_d_loss) + + if self.hparams.gan_optimization == "joint": + gan_loss = gan_g_loss + gan_d_loss + else: + curr_step = self.get_iteration_num() + gan_loss = tf.cond( + tf.logical_not(curr_step % 2 == 0), lambda: gan_g_loss, + lambda: gan_d_loss) + return gan_loss + + def get_extra_loss(self, latent_means=None, latent_stds=None, + true_frames=None, gen_frames=None): + """Gets extra loss from VAE and GAN.""" + if not self.is_training: + return 0.0 + + vae_loss, d_vae_loss, d_gan_loss = 0.0, 0.0, 0.0 + # Use sv2p's KL divergence computation. + if self.hparams.use_vae: + vae_loss = super(NextFrameSavpBase, self).get_extra_loss( + latent_means=latent_means, latent_stds=latent_stds) + + if self.hparams.use_gan: + # Strip out the first context_frames for the true_frames + # Strip out the first context_frames - 1 for the gen_frames + context_frames = self.hparams.video_num_input_frames + true_frames = tf.stack( + tf.unstack(true_frames, axis=0)[context_frames:]) + + # discriminator for VAE. + if self.hparams.use_vae: + gen_enc_frames = tf.stack( + tf.unstack(gen_frames, axis=0)[context_frames-1:]) + d_vae_loss = self.get_gan_loss(true_frames, gen_enc_frames, name="vae") + + # discriminator for GAN. + gen_prior_frames = tf.stack( + tf.unstack(self.gen_prior_video, axis=0)[context_frames-1:]) + d_gan_loss = self.get_gan_loss(true_frames, gen_prior_frames, name="gan") + + return ( + vae_loss + self.hparams.gan_loss_multiplier * d_gan_loss + + self.hparams.gan_vae_loss_multiplier * d_vae_loss) + + def pad_conv3d_lrelu(self, activations, n_filters, kernel_size, strides, + scope): + """Pad, apply 3-D convolution and leaky relu.""" + padding = [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]] + + # tf.nn.conv3d accepts a list of 5 values for strides + # with first and last value equal to 1 + if isinstance(strides, numbers.Integral): + strides = [strides] * 3 + strides = [1] + strides + [1] + + # Filter_shape = [K, K, K, num_input, num_output] + filter_shape = ( + [kernel_size]*3 + activations.shape[-1:].as_list() + [n_filters]) + + with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): + conv_filter = tf.get_variable( + "conv_filter", shape=filter_shape, + initializer=tf.truncated_normal_initializer(stddev=0.02)) + + if self.hparams.use_spectral_norm: + conv_filter, assign_op = common_layers.apply_spectral_norm(conv_filter) + if self.is_training: + tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, assign_op) + + padded = tf.pad(activations, padding) + convolved = tf.nn.conv3d( + padded, conv_filter, strides=strides, padding="VALID") + rectified = tf.nn.leaky_relu(convolved, alpha=0.2) + return rectified + + @staticmethod + def train_hooks(hook_context): + del hook_context + return [update_ops_hook.UpdateOpsHook()] + + +@registry.register_model +class NextFrameSAVP(NextFrameSavpBase, sv2p.NextFrameSv2pLegacy): + """Stochastic Adversarial Video Prediction.""" + + def construct_model(self, images, actions, rewards): + """Model that takes in images and returns predictions. + + Args: + images: list of 4-D Tensors indexed by time. + (batch_size, width, height, channels) + actions: list of action tensors + each action should be in the shape ?x1xZ + rewards: list of reward tensors + each reward should be in the shape ?x1xZ + + Returns: + video: list of 4-D predicted frames. + all_rewards: predicted rewards. + latent_means: list of gaussian means conditioned on the input at + every frame. + latent_stds: list of gaussian stds conditioned on the input at + every frame. + + Raises: + ValueError: If not exactly one of self.hparams.vae or self.hparams.gan + is set to True. + """ + if not self.hparams.use_vae and not self.hparams.use_gan: + raise ValueError("Set at least one of use_vae or use_gan to be True") + if self.hparams.gan_optimization not in ["joint", "sequential"]: + raise ValueError("self.hparams.gan_optimization should be either joint " + "or sequential got %s" % self.hparams.gan_optimization) + + images = tf.unstack(images, axis=0) + actions = tf.unstack(actions, axis=0) + rewards = tf.unstack(rewards, axis=0) + + latent_dims = self.hparams.z_dim + context_frames = self.hparams.video_num_input_frames + seq_len = len(images) + input_shape = common_layers.shape_list(images[0]) + batch_size = input_shape[0] + + # Model does not support reward-conditioned frame generation. + fake_rewards = rewards[:-1] + + # Concatenate x_{t-1} and x_{t} along depth and encode it to + # produce the mean and standard deviation of z_{t-1} + image_pairs = tf.concat([images[:seq_len - 1], + images[1:seq_len]], axis=-1) + + z_mu, z_log_sigma_sq = self.encoder(image_pairs) + # Unstack z_mu and z_log_sigma_sq along the time dimension. + z_mu = tf.unstack(z_mu, axis=0) + z_log_sigma_sq = tf.unstack(z_log_sigma_sq, axis=0) + iterable = zip(images[:-1], actions[:-1], fake_rewards, + z_mu, z_log_sigma_sq) + + # Initialize LSTM State + lstm_state = [None] * 7 + gen_cond_video, gen_prior_video, all_rewards, latent_means, latent_stds = \ + [], [], [], [], [] + pred_image = tf.zeros_like(images[0]) + prior_latent_state, cond_latent_state = None, None + train_mode = self.hparams.mode == tf_estimator.ModeKeys.TRAIN + + # Create scheduled sampling function + ss_func = self.get_scheduled_sample_func(batch_size) + + with tf.variable_scope("prediction", reuse=tf.AUTO_REUSE): + + for step, (image, action, reward, mu, log_sigma_sq) in enumerate(iterable): # pylint:disable=line-too-long + # Sample latents using a gaussian centered at conditional mu and std. + latent = common_video.get_gaussian_tensor(mu, log_sigma_sq) + + # Sample prior latents from isotropic normal distribution. + prior_latent = tf.random_normal(tf.shape(latent), dtype=tf.float32) + + # LSTM that encodes correlations between conditional latents. + # Pg 22 in https://arxiv.org/pdf/1804.01523.pdf + enc_cond_latent, cond_latent_state = common_video.basic_lstm( + latent, cond_latent_state, latent_dims, name="cond_latent") + + # LSTM that encodes correlations between prior latents. + enc_prior_latent, prior_latent_state = common_video.basic_lstm( + prior_latent, prior_latent_state, latent_dims, name="prior_latent") + + # Scheduled Sampling + done_warm_start = step > context_frames - 1 + groundtruth_items = [image] + generated_items = [pred_image] + input_image, = self.get_scheduled_sample_inputs( + done_warm_start, groundtruth_items, generated_items, ss_func) + + all_latents = tf.concat([enc_cond_latent, enc_prior_latent], axis=0) + all_image = tf.concat([input_image, input_image], axis=0) + all_action = tf.concat([action, action], axis=0) + all_rewards = tf.concat([reward, reward], axis=0) + + all_pred_images, lstm_state, _ = self.construct_predictive_tower( + all_image, all_rewards, all_action, lstm_state, all_latents, + concat_latent=True) + + cond_pred_images, prior_pred_images = \ + all_pred_images[:batch_size], all_pred_images[batch_size:] + + if train_mode and self.hparams.use_vae: + pred_image = cond_pred_images + else: + pred_image = prior_pred_images + + gen_cond_video.append(cond_pred_images) + gen_prior_video.append(prior_pred_images) + latent_means.append(mu) + latent_stds.append(log_sigma_sq) + + gen_cond_video = tf.stack(gen_cond_video, axis=0) + self.gen_prior_video = tf.stack(gen_prior_video, axis=0) + fake_rewards = tf.stack(fake_rewards, axis=0) + + if train_mode and self.hparams.use_vae: + return gen_cond_video, fake_rewards, latent_means, latent_stds + else: + return self.gen_prior_video, fake_rewards, latent_means, latent_stds + + +@registry.register_model +class NextFrameSavpRl(NextFrameSavpBase, sv2p.NextFrameSv2p): + """Stochastic Adversarial Video Prediction for RL pipeline.""" + + def video_features( + self, all_frames, all_actions, all_rewards, all_raw_frames): + """No video wide feature.""" + del all_actions, all_rewards, all_raw_frames + # Concatenate x_{t-1} and x_{t} along depth and encode it to + # produce the mean and standard deviation of z_{t-1} + seq_len = len(all_frames) + image_pairs = tf.concat([all_frames[:seq_len-1], + all_frames[1:seq_len]], axis=-1) + z_mu, z_log_sigma_sq = self.encoder(image_pairs) + # Unstack z_mu and z_log_sigma_sq along the time dimension. + z_mu = tf.unstack(z_mu, axis=0) + z_log_sigma_sq = tf.unstack(z_log_sigma_sq, axis=0) + return [z_mu, z_log_sigma_sq] + + def video_extra_loss(self, frames_predicted, frames_target, + internal_states, video_features): + + if not self.is_training: + return 0.0 + + latent_means, latent_stds = video_features + true_frames, gen_frames = frames_target, frames_predicted + + loss = super(NextFrameSavpRl, self).get_extra_loss( + latent_means=latent_means, latent_stds=latent_stds, + true_frames=true_frames, gen_frames=gen_frames) + return loss + + def next_frame(self, frames, actions, rewards, target_frame, + internal_states, video_features): + del target_frame + + if not self.hparams.use_vae or self.hparams.use_gan: + raise NotImplementedError("Only supporting VAE for now.") + + if self.has_pred_actions or self.has_values: + raise NotImplementedError("Parameter sharing with policy not supported.") + + image, action, reward = frames[0], actions[0], rewards[0] + latent_dims = self.hparams.z_dim + batch_size = common_layers.shape_list(image)[0] + + if internal_states is None: + # Initialize LSTM State + frame_index = 0 + lstm_state = [None] * 7 + cond_latent_state, prior_latent_state = None, None + gen_prior_video = [] + else: + (frame_index, lstm_state, cond_latent_state, + prior_latent_state, gen_prior_video) = internal_states + + z_mu, log_sigma_sq = video_features + z_mu, log_sigma_sq = z_mu[frame_index], log_sigma_sq[frame_index] + + # Sample latents using a gaussian centered at conditional mu and std. + latent = common_video.get_gaussian_tensor(z_mu, log_sigma_sq) + + # Sample prior latents from isotropic normal distribution. + prior_latent = tf.random_normal(tf.shape(latent), dtype=tf.float32) + + # # LSTM that encodes correlations between conditional latents. + # # Pg 22 in https://arxiv.org/pdf/1804.01523.pdf + enc_cond_latent, cond_latent_state = common_video.basic_lstm( + latent, cond_latent_state, latent_dims, name="cond_latent") + + # LSTM that encodes correlations between prior latents. + enc_prior_latent, prior_latent_state = common_video.basic_lstm( + prior_latent, prior_latent_state, latent_dims, name="prior_latent") + + all_latents = tf.concat([enc_cond_latent, enc_prior_latent], axis=0) + all_image = tf.concat([image, image], 0) + all_action = tf.concat([action, action], 0) if self.has_actions else None + + all_pred_images, lstm_state = self.construct_predictive_tower( + all_image, None, all_action, lstm_state, all_latents, + concat_latent=True) + + cond_pred_images, prior_pred_images = \ + all_pred_images[:batch_size], all_pred_images[batch_size:] + + if self.is_training and self.hparams.use_vae: + pred_image = cond_pred_images + else: + pred_image = prior_pred_images + + gen_prior_video.append(prior_pred_images) + internal_states = (frame_index + 1, lstm_state, cond_latent_state, + prior_latent_state, gen_prior_video) + + if not self.has_rewards: + return pred_image, None, 0.0, internal_states + + pred_reward = self.reward_prediction( + pred_image, action, reward, latent) + return pred_image, pred_reward, None, None, 0.0, internal_states diff --git a/tensor2tensor/models/video/savp_params.py b/tensor2tensor/models/video/savp_params.py new file mode 100644 index 000000000..b5705f43e --- /dev/null +++ b/tensor2tensor/models/video/savp_params.py @@ -0,0 +1,92 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Param sets for SAVP model.""" + +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import modalities +from tensor2tensor.models.video import sv2p_params +from tensor2tensor.utils import registry + + +@registry.register_hparams +def next_frame_savp(): + """SAVP model hparams.""" + hparams = sv2p_params.next_frame_sv2p() + hparams.add_hparam("z_dim", 8) + hparams.add_hparam("num_discriminator_filters", 32) + hparams.add_hparam("use_vae", True) + hparams.add_hparam("use_gan", False) + hparams.add_hparam("use_spectral_norm", True) + hparams.add_hparam("gan_loss", "cross_entropy") + hparams.add_hparam("gan_loss_multiplier", 0.01) + hparams.add_hparam("gan_vae_loss_multiplier", 0.01) + hparams.add_hparam("gan_optimization", "joint") + hparams.bottom = { + "inputs": modalities.video_raw_bottom, + "targets": modalities.video_raw_targets_bottom, + } + hparams.loss = { + "targets": modalities.video_l1_raw_loss, + } + hparams.top = { + "targets": modalities.video_raw_top, + } + hparams.latent_loss_multiplier_schedule = "linear" + hparams.upsample_method = "bilinear_upsample_conv" + hparams.internal_loss = False + hparams.reward_prediction = False + hparams.anneal_end = 100000 + hparams.num_iterations_1st_stage = 0 + hparams.num_iterations_2nd_stage = 50000 + return hparams + + +@registry.register_hparams +def next_frame_savp_l2(): + """SAVP with L2 reconstruction loss.""" + hparams = next_frame_savp() + hparams.loss = { + "targets": modalities.video_l2_raw_loss, + } + return hparams + + +@registry.register_hparams +def next_frame_savp_vae(): + """SAVP - VAE only model.""" + hparams = next_frame_savp() + hparams.use_vae = True + hparams.use_gan = False + hparams.latent_loss_multiplier = 1e-3 + hparams.latent_loss_multiplier_schedule = "linear_anneal" + return hparams + + +@registry.register_hparams +def next_frame_savp_gan(): + """SAVP - GAN only model.""" + hparams = next_frame_savp() + hparams.use_gan = True + hparams.use_vae = False + hparams.gan_loss_multiplier = 0.001 + hparams.optimizer_adam_beta1 = 0.5 + hparams.learning_rate_constant = 2e-4 + hparams.gan_loss = "cross_entropy" + hparams.learning_rate_decay_steps = 100000 + hparams.learning_rate_schedule = "constant*linear_decay" + return hparams diff --git a/tensor2tensor/models/video/savp_test.py b/tensor2tensor/models/video/savp_test.py new file mode 100644 index 000000000..94bf0c056 --- /dev/null +++ b/tensor2tensor/models/video/savp_test.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic tests for SAVP model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.models.video import savp +from tensor2tensor.models.video import savp_params +from tensor2tensor.models.video import tests_utils + + +import tensorflow.compat.v1 as tf + + +class NextFrameTest(tests_utils.BaseNextFrameTest): + + def testSavpVAE(self): + savp_hparams = savp_params.next_frame_savp() + savp_hparams.use_vae = True + savp_hparams.use_gan = False + self.TestOnVariousInputOutputSizes( + savp_hparams, savp.NextFrameSAVP, 1) + self.TestOnVariousUpSampleLayers( + savp_hparams, savp.NextFrameSAVP, 1) + + def testSavpGAN(self): + hparams = savp_params.next_frame_savp() + hparams.use_gan = True + hparams.use_vae = False + self.TestVideoModel(7, 5, hparams, savp.NextFrameSAVP, 1) + + hparams.gan_optimization = "sequential" + self.TestVideoModel(7, 5, hparams, savp.NextFrameSAVP, 1) + + def testSavpGANVAE(self): + hparams = savp_params.next_frame_savp() + hparams.use_vae = True + hparams.use_gan = True + self.TestVideoModel(7, 5, hparams, savp.NextFrameSAVP, 1) + + def testInvalidVAEGANCombinations(self): + hparams = savp_params.next_frame_savp() + hparams.use_gan = False + hparams.use_vae = False + self.assertRaises(ValueError, self.TestVideoModel, + 7, 5, hparams, savp.NextFrameSAVP, 1) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/video/sv2p.py b/tensor2tensor/models/video/sv2p.py new file mode 100644 index 000000000..09cc3f586 --- /dev/null +++ b/tensor2tensor/models/video/sv2p.py @@ -0,0 +1,823 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""SV2P: Stochastic Variational Video Prediction. + + based on the following paper: + https://arxiv.org/abs/1710.11252 + by Mohammad Babaeizadeh, Chelsea Finn, Dumitru Erhan, + Roy H. Campbell and Sergey Levine +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import common_video +from tensor2tensor.layers import discretization + +from tensor2tensor.models.video import base +from tensor2tensor.models.video import base_vae +from tensor2tensor.utils import contrib +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +tfl = tf.layers +tfcl = contrib.layers() + + +@registry.register_model +class NextFrameSv2p(base.NextFrameBase, base_vae.NextFrameBaseVae): + """Stochastic Variational Video Prediction From Basic Model!""" + + @property + def is_recurrent_model(self): + return True + + def tinyify(self, array): + return common_video.tinyify( + array, self.hparams.tiny_mode, self.hparams.small_mode) + + def bottom_part_tower(self, input_image, input_reward, action, latent, + lstm_state, lstm_size, conv_size, concat_latent=False): + """The bottom part of predictive towers. + + With the current (early) design, the main prediction tower and + the reward prediction tower share the same arcitecture. TF Scope can be + adjusted as required to either share or not share the weights between + the two towers. + + Args: + input_image: the current image. + input_reward: the current reward. + action: the action taken by the agent. + latent: the latent vector. + lstm_state: the current internal states of conv lstms. + lstm_size: the size of lstms. + conv_size: the size of convolutions. + concat_latent: whether or not to concatenate the latent at every step. + + Returns: + - the output of the partial network. + - intermidate outputs for skip connections. + """ + lstm_func = common_video.conv_lstm_2d + tile_and_concat = common_video.tile_and_concat + + input_image = common_layers.make_even_size(input_image) + concat_input_image = tile_and_concat( + input_image, latent, concat_latent=concat_latent) + + layer_id = 0 + enc0 = tfl.conv2d( + concat_input_image, + conv_size[0], [5, 5], + strides=(2, 2), + activation=tf.nn.relu, + padding="SAME", + name="scale1_conv1") + enc0 = tfcl.layer_norm(enc0, scope="layer_norm1") + + hidden1, lstm_state[layer_id] = lstm_func( + enc0, lstm_state[layer_id], lstm_size[layer_id], name="state1") + hidden1 = tile_and_concat(hidden1, latent, concat_latent=concat_latent) + hidden1 = tfcl.layer_norm(hidden1, scope="layer_norm2") + layer_id += 1 + + hidden2, lstm_state[layer_id] = lstm_func( + hidden1, lstm_state[layer_id], lstm_size[layer_id], name="state2") + hidden2 = tfcl.layer_norm(hidden2, scope="layer_norm3") + hidden2 = common_layers.make_even_size(hidden2) + enc1 = tfl.conv2d(hidden2, hidden2.get_shape()[3], [3, 3], strides=(2, 2), + padding="SAME", activation=tf.nn.relu, name="conv2") + enc1 = tile_and_concat(enc1, latent, concat_latent=concat_latent) + layer_id += 1 + + if self.hparams.small_mode: + hidden4, enc2 = hidden2, enc1 + else: + hidden3, lstm_state[layer_id] = lstm_func( + enc1, lstm_state[layer_id], lstm_size[layer_id], name="state3") + hidden3 = tile_and_concat(hidden3, latent, concat_latent=concat_latent) + hidden3 = tfcl.layer_norm(hidden3, scope="layer_norm4") + layer_id += 1 + + hidden4, lstm_state[layer_id] = lstm_func( + hidden3, lstm_state[layer_id], lstm_size[layer_id], name="state4") + hidden4 = tile_and_concat(hidden4, latent, concat_latent=concat_latent) + hidden4 = tfcl.layer_norm(hidden4, scope="layer_norm5") + hidden4 = common_layers.make_even_size(hidden4) + enc2 = tfl.conv2d(hidden4, hidden4.get_shape()[3], [3, 3], strides=(2, 2), + padding="SAME", activation=tf.nn.relu, name="conv3") + layer_id += 1 + + if action is not None: + enc2 = common_video.inject_additional_input( + enc2, action, "action_enc", self.hparams.action_injection) + if input_reward is not None: + enc2 = common_video.inject_additional_input( + enc2, input_reward, "reward_enc") + if latent is not None and not concat_latent: + with tf.control_dependencies([latent]): + enc2 = tf.concat([enc2, latent], axis=3) + + enc3 = tfl.conv2d(enc2, hidden4.get_shape()[3], [1, 1], strides=(1, 1), + padding="SAME", activation=tf.nn.relu, name="conv4") + + hidden5, lstm_state[layer_id] = lstm_func( + enc3, lstm_state[layer_id], lstm_size[layer_id], name="state5") + hidden5 = tfcl.layer_norm(hidden5, scope="layer_norm6") + hidden5 = tile_and_concat(hidden5, latent, concat_latent=concat_latent) + layer_id += 1 + return hidden5, (enc0, enc1), layer_id + + def reward_prediction(self, *args, **kwargs): + model = self.hparams.reward_model + if model == "basic": + return self.reward_prediction_basic(*args, **kwargs) + elif model == "big": + return self.reward_prediction_big(*args, **kwargs) + elif model == "mid": + return self.reward_prediction_mid(*args, **kwargs) + else: + raise ValueError("Unknown reward model %s" % model) + + def reward_prediction_basic( + self, input_images, input_reward, action, latent, mid_outputs): + del input_reward, action, latent, mid_outputs + x = input_images + x = tf.reduce_mean(x, axis=[1, 2], keepdims=True) + x = tfl.dense(x, 128, activation=tf.nn.relu, name="reward_pred") + x = tf.expand_dims(x, axis=3) + return x + + def reward_prediction_mid( + self, input_images, input_reward, action, latent, mid_outputs): + """Builds a reward prediction network from intermediate layers.""" + encoded = [] + for i, output in enumerate(mid_outputs): + enc = output + enc = tfl.conv2d(enc, 64, [3, 3], strides=(1, 1), activation=tf.nn.relu) + enc = tfl.conv2d(enc, 32, [3, 3], strides=(2, 2), activation=tf.nn.relu) + enc = tfl.conv2d(enc, 16, [3, 3], strides=(2, 2), activation=tf.nn.relu) + enc = tfl.flatten(enc) + enc = tfl.dense(enc, 64, activation=tf.nn.relu, name="rew_enc_%d" % i) + encoded.append(enc) + x = encoded + x = tf.stack(x, axis=1) + x = tfl.flatten(x) + x = tfl.dense(x, 256, activation=tf.nn.relu, name="rew_dense1") + x = tfl.dense(x, 128, activation=tf.nn.relu, name="rew_dense2") + return x + + def reward_prediction_big( + self, input_images, input_reward, action, latent, mid_outputs): + """Builds a reward prediction network.""" + del mid_outputs + conv_size = self.tinyify([32, 32, 16, 8]) + + with tf.variable_scope("reward_pred", reuse=tf.AUTO_REUSE): + x = tf.concat(input_images, axis=3) + x = tfcl.layer_norm(x) + + if not self.hparams.small_mode: + x = tfl.conv2d(x, conv_size[1], [3, 3], strides=(2, 2), + activation=tf.nn.relu, name="reward_conv1") + x = tfcl.layer_norm(x) + + # Inject additional inputs + if action is not None: + x = common_video.inject_additional_input( + x, action, "action_enc", self.hparams.action_injection) + if input_reward is not None: + x = common_video.inject_additional_input(x, input_reward, "reward_enc") + if latent is not None: + latent = tfl.flatten(latent) + latent = tf.expand_dims(latent, axis=1) + latent = tf.expand_dims(latent, axis=1) + x = common_video.inject_additional_input(x, latent, "latent_enc") + + x = tfl.conv2d(x, conv_size[2], [3, 3], strides=(2, 2), + activation=tf.nn.relu, name="reward_conv2") + x = tfcl.layer_norm(x) + x = tfl.conv2d(x, conv_size[3], [3, 3], strides=(2, 2), + activation=tf.nn.relu, name="reward_conv3") + return x + + def get_extra_loss(self, + latent_means=None, latent_stds=None, + true_frames=None, gen_frames=None): + """Losses in addition to the default modality losses.""" + del true_frames, gen_frames + return self.get_kl_loss(latent_means, latent_stds) + + def construct_predictive_tower( + self, input_image, input_reward, action, lstm_state, latent, + concat_latent=False): + # Main tower + lstm_func = common_video.conv_lstm_2d + frame_shape = common_layers.shape_list(input_image) + batch_size, img_height, img_width, color_channels = frame_shape + # the number of different pixel motion predictions + # and the number of masks for each of those predictions + num_masks = self.hparams.num_masks + upsample_method = self.hparams.upsample_method + tile_and_concat = common_video.tile_and_concat + + lstm_size = self.tinyify([32, 32, 64, 64, 128, 64, 32]) + conv_size = self.tinyify([32]) + + with tf.variable_scope("main", reuse=tf.AUTO_REUSE): + hidden5, skips, layer_id = self.bottom_part_tower( + input_image, input_reward, action, latent, + lstm_state, lstm_size, conv_size, concat_latent=concat_latent) + enc0, enc1 = skips + + with tf.variable_scope("upsample1", reuse=tf.AUTO_REUSE): + enc4 = common_layers.cyclegan_upsample( + hidden5, num_outputs=hidden5.shape.as_list()[-1], + stride=[2, 2], method=upsample_method) + + enc1_shape = common_layers.shape_list(enc1) + enc4 = enc4[:, :enc1_shape[1], :enc1_shape[2], :] # Cut to shape. + enc4 = tile_and_concat(enc4, latent, concat_latent=concat_latent) + + hidden6, lstm_state[layer_id] = lstm_func( + enc4, lstm_state[layer_id], lstm_size[5], name="state6", + spatial_dims=enc1_shape[1:-1]) # 16x16 + hidden6 = tile_and_concat(hidden6, latent, concat_latent=concat_latent) + hidden6 = tfcl.layer_norm(hidden6, scope="layer_norm7") + # Skip connection. + hidden6 = tf.concat(axis=3, values=[hidden6, enc1]) # both 16x16 + layer_id += 1 + + with tf.variable_scope("upsample2", reuse=tf.AUTO_REUSE): + enc5 = common_layers.cyclegan_upsample( + hidden6, num_outputs=hidden6.shape.as_list()[-1], + stride=[2, 2], method=upsample_method) + + enc0_shape = common_layers.shape_list(enc0) + enc5 = enc5[:, :enc0_shape[1], :enc0_shape[2], :] # Cut to shape. + enc5 = tile_and_concat(enc5, latent, concat_latent=concat_latent) + + hidden7, lstm_state[layer_id] = lstm_func( + enc5, lstm_state[layer_id], lstm_size[6], name="state7", + spatial_dims=enc0_shape[1:-1]) # 32x32 + hidden7 = tfcl.layer_norm(hidden7, scope="layer_norm8") + layer_id += 1 + + # Skip connection. + hidden7 = tf.concat(axis=3, values=[hidden7, enc0]) # both 32x32 + + with tf.variable_scope("upsample3", reuse=tf.AUTO_REUSE): + enc6 = common_layers.cyclegan_upsample( + hidden7, num_outputs=hidden7.shape.as_list()[-1], + stride=[2, 2], method=upsample_method) + enc6 = tfcl.layer_norm(enc6, scope="layer_norm9") + enc6 = tile_and_concat(enc6, latent, concat_latent=concat_latent) + + if self.hparams.model_options == "DNA": + # Using largest hidden state for predicting untied conv kernels. + enc7 = tfl.conv2d_transpose( + enc6, + self.hparams.dna_kernel_size**2, + [1, 1], + strides=(1, 1), + padding="SAME", + name="convt4", + activation=None) + else: + # Using largest hidden state for predicting a new image layer. + enc7 = tfl.conv2d_transpose( + enc6, + color_channels, + [1, 1], + strides=(1, 1), + padding="SAME", + name="convt4", + activation=None) + # This allows the network to also generate one image from scratch, + # which is useful when regions of the image become unoccluded. + transformed = [tf.nn.sigmoid(enc7)] + + if self.hparams.model_options == "CDNA": + # cdna_input = tf.reshape(hidden5, [int(batch_size), -1]) + cdna_input = tfl.flatten(hidden5) + transformed += common_video.cdna_transformation( + input_image, cdna_input, num_masks, int(color_channels), + self.hparams.dna_kernel_size, self.hparams.relu_shift) + elif self.hparams.model_options == "DNA": + # Only one mask is supported (more should be unnecessary). + if num_masks != 1: + raise ValueError("Only one mask is supported for DNA model.") + transformed = [ + common_video.dna_transformation( + input_image, enc7, + self.hparams.dna_kernel_size, self.hparams.relu_shift)] + + masks = tfl.conv2d( + enc6, filters=num_masks + 1, kernel_size=[1, 1], + strides=(1, 1), name="convt7", padding="SAME") + masks = masks[:, :img_height, :img_width, ...] + masks = tf.reshape( + tf.nn.softmax(tf.reshape(masks, [-1, num_masks + 1])), + [batch_size, + int(img_height), + int(img_width), num_masks + 1]) + mask_list = tf.split( + axis=3, num_or_size_splits=num_masks + 1, value=masks) + output = mask_list[0] * input_image + for layer, mask in zip(transformed, mask_list[1:]): + # TODO(mbz): take another look at this logic and verify. + output = output[:, :img_height, :img_width, :] + layer = layer[:, :img_height, :img_width, :] + output += layer * mask + + # Map to softmax digits + if self.is_per_pixel_softmax: + output = tf.layers.dense( + output, self.hparams.problem.num_channels * 256, name="logits") + + mid_outputs = [enc0, enc1, enc4, enc5, enc6] + return output, lstm_state, mid_outputs + + def video_features( + self, all_frames, all_actions, all_rewards, all_raw_frames): + """Video wide latent.""" + del all_actions, all_rewards, all_raw_frames + if not self.hparams.stochastic_model: + return None, None, None + frames = tf.stack(all_frames, axis=1) + mean, std = self.construct_latent_tower(frames, time_axis=1) + latent = common_video.get_gaussian_tensor(mean, std) + return [latent, mean, std] + + def next_frame(self, frames, actions, rewards, target_frame, + internal_states, video_features): + del target_frame + + if self.has_policies or self.has_values: + raise NotImplementedError("Parameter sharing with policy not supported.") + + latent, latent_mean, latent_std = video_features + frames, actions, rewards = frames[0], actions[0], rewards[0] + + extra_loss = 0.0 + if internal_states is None: + internal_states = [None] * (5 if self.hparams.small_mode else 7) + if latent_mean is not None: + extra_loss = self.get_extra_loss([latent_mean], [latent_std]) + + pred_image, internal_states, mid_outputs = self.construct_predictive_tower( + frames, None, actions, internal_states, latent) + + if not self.has_rewards: + return pred_image, None, None, None, extra_loss, internal_states + + pred_reward = self.reward_prediction( + pred_image, actions, rewards, latent, mid_outputs) + return pred_image, pred_reward, None, None, extra_loss, internal_states + + +@registry.register_model +class NextFrameSv2pDiscrete(NextFrameSv2p): + """SV2P with discrete latent.""" + + def video_features( + self, all_frames, all_actions, all_rewards, all_raw_frames): + """No video wide latent.""" + del all_frames, all_actions, all_rewards, all_raw_frames + return None + + def basic_conv_net(self, images, conv_size, scope): + """Simple multi conv ln relu.""" + conv_size = self.tinyify(conv_size) + with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): + x = images + for i, c in enumerate(conv_size): + if i > 0: + x = tf.nn.relu(x) + x = common_layers.make_even_size(x) + x = tfl.conv2d(x, c, [3, 3], strides=(2, 2), + activation=None, padding="SAME", name="conv%d" % i) + x = tfcl.layer_norm(x) + return x + + def simple_discrete_latent_tower(self, input_image, target_image): + hparams = self.hparams + + if self.is_predicting: + batch_size = common_layers.shape_list(input_image)[0] + rand = tf.random_uniform([batch_size, hparams.bottleneck_bits]) + bits = 2.0 * tf.to_float(tf.less(0.5, rand)) - 1.0 + return bits + + conv_size = self.tinyify([64, 32, 32, 1]) + pair = tf.concat([input_image, target_image], axis=-1) + posterior_enc = self.basic_conv_net(pair, conv_size, "posterior_enc") + posterior_enc = tfl.flatten(posterior_enc) + bits, _ = discretization.tanh_discrete_bottleneck( + posterior_enc, + hparams.bottleneck_bits, + hparams.bottleneck_noise, + hparams.discretize_warmup_steps, + hparams.mode) + return bits + + def next_frame(self, frames, actions, rewards, target_frame, + internal_states, video_features): + del video_features + + if self.has_pred_actions or self.has_values: + raise NotImplementedError("Parameter sharing with policy not supported.") + + frames, actions, rewards = frames[0], actions[0], rewards[0] + + if internal_states is None: + internal_states = [None] * (5 if self.hparams.small_mode else 7) + + extra_loss = 0.0 + latent = self.simple_discrete_latent_tower(frames, target_frame) + + pred_image, internal_states, _ = self.construct_predictive_tower( + frames, None, actions, internal_states, latent, True) + + if not self.has_rewards: + return pred_image, None, extra_loss, internal_states + + pred_reward = self.reward_prediction( + pred_image, actions, rewards, latent) + return pred_image, pred_reward, None, None, extra_loss, internal_states + + +@registry.register_model +class NextFrameSv2pAtari(NextFrameSv2p): + """SV2P with specific changes for atari pipeline.""" + + def init_internal_states(self): + # Hardcoded LSTM-CONV shapes. + # These sizes are calculated based on original atari frames. + # TODO(mbz): find a cleaner way of doing this maybe?! + batch_size = self.hparams.batch_size + shapes = [(batch_size, 53, 40, 8), + (batch_size, 53, 40, 8), + (batch_size, 27, 20, 16), + (batch_size, 27, 20, 16), + (batch_size, 53, 40, 8)] + + with tf.variable_scope("clean_scope"): + # Initialize conv-lstm states with zeros + init = tf.zeros_initializer() + states = [] + for i, shape in enumerate(shapes): + # every lstm-conv state has two variables named c and h. + c = tf.get_variable("c%d" % i, shape, trainable=False, initializer=init) + h = tf.get_variable("h%d" % i, shape, trainable=False, initializer=init) + states.append((c, h)) + return states + + def reset_internal_states_ops(self): + zeros = [(tf.zeros_like(c), tf.zeros_like(h)) + for c, h in self.internal_states] + return self.save_internal_states_ops(zeros) + + def load_internal_states_ops(self): + ops = [(c.read_value(), h.read_value()) for c, h in self.internal_states] + return ops + + def save_internal_states_ops(self, internal_states): + ops = [[tf.assign(x[0], y[0]), tf.assign(x[1], y[1])] + for x, y in zip(self.internal_states, internal_states)] + return ops + + +@registry.register_model +class NextFrameSv2pLegacy(NextFrameSv2p): + """Old SV2P code. Only for legacy reasons.""" + + def visualize_predictions(self, real_frames, gen_frames, actions=None): + + def concat_on_y_axis(x): + x = tf.unstack(x, axis=1) + x = tf.concat(x, axis=1) + return x + frames_gd = common_video.swap_time_and_batch_axes(real_frames) + frames_pd = common_video.swap_time_and_batch_axes(gen_frames) + if actions is not None: + actions = common_video.swap_time_and_batch_axes(actions) + + if self.is_per_pixel_softmax: + frames_pd_shape = common_layers.shape_list(frames_pd) + frames_pd = tf.reshape(frames_pd, [-1, 256]) + frames_pd = tf.to_float(tf.argmax(frames_pd, axis=-1)) + frames_pd = tf.reshape(frames_pd, frames_pd_shape[:-1] + [3]) + + frames_gd = concat_on_y_axis(frames_gd) + frames_pd = concat_on_y_axis(frames_pd) + if actions is not None: + actions = tf.clip_by_value(actions, 0, 1) + summary("action_vid", tf.cast(actions * 255, tf.uint8)) + actions = concat_on_y_axis(actions) + side_by_side_video = tf.concat([frames_gd, frames_pd, actions], axis=2) + else: + side_by_side_video = tf.concat([frames_gd, frames_pd], axis=2) + tf.summary.image("full_video", side_by_side_video) + + def get_input_if_exists(self, features, key, batch_size, num_frames): + if key in features: + x = features[key] + else: + x = tf.zeros((batch_size, num_frames, 1, self.hparams.hidden_size)) + return common_video.swap_time_and_batch_axes(x) + + def construct_model(self, + images, + actions, + rewards): + """Build convolutional lstm video predictor using CDNA, or DNA. + + Args: + images: list of tensors of ground truth image sequences + there should be a 4D image ?xWxHxC for each timestep + actions: list of action tensors + each action should be in the shape ?x1xZ + rewards: list of reward tensors + each reward should be in the shape ?x1xZ + Returns: + gen_images: predicted future image frames + gen_rewards: predicted future rewards + latent_mean: mean of approximated posterior + latent_std: std of approximated posterior + + Raises: + ValueError: if more than 1 mask specified for DNA model. + """ + context_frames = self.hparams.video_num_input_frames + buffer_size = self.hparams.reward_prediction_buffer_size + if buffer_size == 0: + buffer_size = context_frames + if buffer_size > context_frames: + raise ValueError("Buffer size is bigger than context frames %d %d." % + (buffer_size, context_frames)) + + batch_size = common_layers.shape_list(images[0])[0] + ss_func = self.get_scheduled_sample_func(batch_size) + + def process_single_frame(prev_outputs, inputs): + """Process a single frame of the video.""" + cur_image, input_reward, action = inputs + time_step, prev_image, prev_reward, frame_buf, lstm_states = prev_outputs + + # sample from softmax (by argmax). this is noop for non-softmax loss. + prev_image = self.get_sampled_frame(prev_image) + + generated_items = [prev_image] + groundtruth_items = [cur_image] + done_warm_start = tf.greater(time_step, context_frames - 1) + input_image, = self.get_scheduled_sample_inputs( + done_warm_start, groundtruth_items, generated_items, ss_func) + + # Prediction + pred_image, lstm_states, _ = self.construct_predictive_tower( + input_image, None, action, lstm_states, latent) + + if self.hparams.reward_prediction: + reward_input_image = self.get_sampled_frame(pred_image) + if self.hparams.reward_prediction_stop_gradient: + reward_input_image = tf.stop_gradient(reward_input_image) + with tf.control_dependencies([time_step]): + frame_buf = [reward_input_image] + frame_buf[:-1] + pred_reward = self.reward_prediction(frame_buf, None, action, latent) + pred_reward = common_video.decode_to_shape( + pred_reward, common_layers.shape_list(input_reward), "reward_dec") + else: + pred_reward = prev_reward + + time_step += 1 + outputs = (time_step, pred_image, pred_reward, frame_buf, lstm_states) + + return outputs + + # Latent tower + latent = None + if self.hparams.stochastic_model: + latent_mean, latent_std = self.construct_latent_tower(images, time_axis=0) + latent = common_video.get_gaussian_tensor(latent_mean, latent_std) + + # HACK: Do first step outside to initialize all the variables + + lstm_states = [None] * (5 if self.hparams.small_mode else 7) + frame_buffer = [tf.zeros_like(images[0])] * buffer_size + inputs = images[0], rewards[0], actions[0] + init_image_shape = common_layers.shape_list(images[0]) + if self.is_per_pixel_softmax: + init_image_shape[-1] *= 256 + init_image = tf.zeros(init_image_shape, dtype=images.dtype) + prev_outputs = (tf.constant(0), + init_image, + tf.zeros_like(rewards[0]), + frame_buffer, + lstm_states) + + initializers = process_single_frame(prev_outputs, inputs) + first_gen_images = tf.expand_dims(initializers[1], axis=0) + first_gen_rewards = tf.expand_dims(initializers[2], axis=0) + + inputs = (images[1:-1], rewards[1:-1], actions[1:-1]) + + outputs = tf.scan(process_single_frame, inputs, initializers) + gen_images, gen_rewards = outputs[1:3] + + gen_images = tf.concat((first_gen_images, gen_images), axis=0) + gen_rewards = tf.concat((first_gen_rewards, gen_rewards), axis=0) + + if self.hparams.stochastic_model: + return gen_images, gen_rewards, [latent_mean], [latent_std] + else: + return gen_images, gen_rewards, None, None + + def infer(self, features, *args, **kwargs): + """Produce predictions from the model by running it.""" + del args, kwargs + if "targets" not in features: + if "infer_targets" in features: + targets_shape = common_layers.shape_list(features["infer_targets"]) + elif "inputs" in features: + targets_shape = common_layers.shape_list(features["inputs"]) + targets_shape[1] = self.hparams.video_num_target_frames + else: + raise ValueError("no inputs are given.") + features["targets"] = tf.zeros(targets_shape, dtype=tf.float32) + + output, _ = self(features) # pylint: disable=not-callable + + if not isinstance(output, dict): + output = {"targets": output} + + x = output["targets"] + if self.is_per_pixel_softmax: + x_shape = common_layers.shape_list(x) + x = tf.reshape(x, [-1, x_shape[-1]]) + x = tf.argmax(x, axis=-1) + x = tf.reshape(x, x_shape[:-1]) + else: + x = tf.squeeze(x, axis=-1) + x = tf.to_int64(tf.round(x)) + output["targets"] = x + if self.hparams.reward_prediction: + output["target_reward"] = tf.argmax(output["target_reward"], axis=-1) + + # only required for decoding. + output["outputs"] = output["targets"] + output["scores"] = output["targets"] + return output + + def body(self, features): + hparams = self.hparams + batch_size = common_layers.shape_list(features["inputs"])[0] + + # Swap time and batch axes. + input_frames = common_video.swap_time_and_batch_axes(features["inputs"]) + target_frames = common_video.swap_time_and_batch_axes(features["targets"]) + + # Get actions if exist otherwise use zeros + input_actions = self.get_input_if_exists( + features, "input_action", batch_size, hparams.video_num_input_frames) + target_actions = self.get_input_if_exists( + features, "target_action", batch_size, hparams.video_num_target_frames) + + # Get rewards if exist otherwise use zeros + input_rewards = self.get_input_if_exists( + features, "input_reward", batch_size, hparams.video_num_input_frames) + target_rewards = self.get_input_if_exists( + features, "target_reward", batch_size, hparams.video_num_target_frames) + + all_actions = tf.concat([input_actions, target_actions], axis=0) + all_rewards = tf.concat([input_rewards, target_rewards], axis=0) + all_frames = tf.concat([input_frames, target_frames], axis=0) + + # Each image is being used twice, in latent tower and main tower. + # This is to make sure we are using the *same* image for both, ... + # ... given how TF queues work. + # NOT sure if this is required at all. Doesn"t hurt though! :) + all_frames = tf.identity(all_frames) + + gen_images, gen_rewards, latent_means, latent_stds = self.construct_model( + images=all_frames, + actions=all_actions, + rewards=all_rewards, + ) + + extra_loss = self.get_extra_loss( + latent_means=latent_means, + latent_stds=latent_stds, + true_frames=all_frames, + gen_frames=gen_images) + + # Visualize predictions in Tensorboard + if self.is_training: + self.visualize_predictions(all_frames[1:], gen_images) + + # Ignore the predictions from the input frames. + # This is NOT the same as original paper/implementation. + predictions = gen_images[hparams.video_num_input_frames-1:] + reward_pred = gen_rewards[hparams.video_num_input_frames-1:] + reward_pred = tf.squeeze(reward_pred, axis=2) # Remove extra dimension. + + # Swap back time and batch axes. + predictions = common_video.swap_time_and_batch_axes(predictions) + reward_pred = common_video.swap_time_and_batch_axes(reward_pred) + + if self.is_training and hparams.internal_loss: + # add the loss for input frames as well. + extra_gts = all_frames[1:hparams.video_num_input_frames] + extra_gts = common_video.swap_time_and_batch_axes(extra_gts) + extra_pds = gen_images[:hparams.video_num_input_frames-1] + extra_pds = common_video.swap_time_and_batch_axes(extra_pds) + extra_raw_gts = features["inputs_raw"][:, 1:] + recon_loss = self.get_extra_internal_loss( + extra_raw_gts, extra_gts, extra_pds) + extra_loss += recon_loss + + return_targets = predictions + if hparams.reward_prediction: + return_targets = {"targets": predictions, "target_reward": reward_pred} + + return return_targets, extra_loss + + +@registry.register_model +class NextFrameSv2pTwoFrames(NextFrameSv2pLegacy): + """Stochastic next-frame model with 2 frames posterior.""" + + def construct_model(self, images, actions, rewards): + images = tf.unstack(images, axis=0) + actions = tf.unstack(actions, axis=0) + rewards = tf.unstack(rewards, axis=0) + + batch_size = common_layers.shape_list(images[0])[0] + context_frames = self.hparams.video_num_input_frames + + # Predicted images and rewards. + gen_rewards, gen_images, latent_means, latent_stds = [], [], [], [] + + # LSTM states. + lstm_state = [None] * 7 + + # Create scheduled sampling function + ss_func = self.get_scheduled_sample_func(batch_size) + + pred_image = tf.zeros_like(images[0]) + pred_reward = tf.zeros_like(rewards[0]) + latent = None + for timestep, image, action, reward in zip( + range(len(images)-1), images[:-1], actions[:-1], rewards[:-1]): + # Scheduled Sampling + done_warm_start = timestep > context_frames - 1 + groundtruth_items = [image, reward] + generated_items = [pred_image, pred_reward] + input_image, input_reward = self.get_scheduled_sample_inputs( + done_warm_start, groundtruth_items, generated_items, ss_func) + + # Latent + # TODO(mbz): should we use input_image iunstead of image? + latent_images = tf.stack([image, images[timestep+1]], axis=0) + latent_mean, latent_std = self.construct_latent_tower( + latent_images, time_axis=0) + latent = common_video.get_gaussian_tensor(latent_mean, latent_std) + latent_means.append(latent_mean) + latent_stds.append(latent_std) + + # Prediction + pred_image, lstm_state, _ = self.construct_predictive_tower( + input_image, input_reward, action, lstm_state, latent) + + if self.hparams.reward_prediction: + pred_reward = self.reward_prediction( + pred_image, input_reward, action, latent) + pred_reward = common_video.decode_to_shape( + pred_reward, common_layers.shape_list(input_reward), "reward_dec") + else: + pred_reward = input_reward + + gen_images.append(pred_image) + gen_rewards.append(pred_reward) + + gen_images = tf.stack(gen_images, axis=0) + gen_rewards = tf.stack(gen_rewards, axis=0) + + return gen_images, gen_rewards, latent_means, latent_stds diff --git a/tensor2tensor/models/video/sv2p_params.py b/tensor2tensor/models/video/sv2p_params.py new file mode 100644 index 000000000..6a151dcab --- /dev/null +++ b/tensor2tensor/models/video/sv2p_params.py @@ -0,0 +1,152 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Param sets for SV2P model.""" + +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import modalities +from tensor2tensor.models.video import basic_stochastic +from tensor2tensor.utils import registry + + +@registry.register_hparams +def next_frame_sv2p(): + """SV2P model hparams.""" + hparams = basic_stochastic.next_frame_basic_stochastic() + hparams.optimizer = "true_adam" + hparams.learning_rate_schedule = "constant" + hparams.learning_rate_constant = 1e-3 + hparams.video_num_input_frames = 1 + hparams.video_num_target_frames = 3 + hparams.batch_size = 16 + hparams.bottom = { + "inputs": modalities.video_raw_bottom, + "targets": modalities.video_raw_targets_bottom, + } + hparams.loss = { + "targets": modalities.video_l2_raw_loss, + } + hparams.top = { + "targets": modalities.video_raw_top, + } + hparams.video_modality_loss_cutoff = 0.0 + hparams.scheduled_sampling_mode = "count" + hparams.scheduled_sampling_k = 900.0 + hparams.add_hparam("reward_prediction", True) + hparams.add_hparam("reward_prediction_stop_gradient", False) + hparams.add_hparam("reward_prediction_buffer_size", 0) + hparams.add_hparam("model_options", "CDNA") + hparams.add_hparam("num_masks", 10) + hparams.add_hparam("multi_latent", False) + hparams.add_hparam("relu_shift", 1e-12) + hparams.add_hparam("dna_kernel_size", 5) + hparams.add_hparam("upsample_method", "conv2d_transpose") + hparams.add_hparam("reward_model", "basic") + hparams.add_hparam("visualize_logits_histogram", True) + hparams.add_hparam("action_normalize", False) + return hparams + + +@registry.register_hparams +def next_frame_sv2p_discrete(): + """SV2P discrete model hparams.""" + hparams = next_frame_sv2p() + hparams.action_injection = "multiplicative" + hparams.small_mode = True + hparams.add_hparam("bottleneck_bits", 128) + hparams.add_hparam("bottleneck_noise", 0.02) + hparams.add_hparam("discrete_warmup_steps", 40000) + hparams.add_hparam("full_latent_tower", False) + hparams.add_hparam("latent_predictor_state_size", 128) + hparams.add_hparam("latent_predictor_temperature", 0.5) + hparams.add_hparam("discretize_warmup_steps", 40000) + return hparams + + +@registry.register_hparams +def next_frame_sv2p_atari(): + """SV2P model for atari.""" + hparams = next_frame_sv2p() + hparams.video_num_input_frames = 4 + hparams.video_num_target_frames = 4 + hparams.action_injection = "multiplicative" + hparams.num_iterations_1st_stage = 12000 + hparams.num_iterations_2nd_stage = 12000 + hparams.anneal_end = 40000 + hparams.latent_loss_multiplier_schedule = "noisy_linear_cosine_decay" + hparams.latent_loss_multiplier = 1e-3 + hparams.information_capacity = 0.0 + hparams.small_mode = True + return hparams + + +@registry.register_hparams +def next_frame_sv2p_atari_softmax(): + """SV2P model for atari with softmax.""" + hparams = next_frame_sv2p_atari() + hparams.bottom = {} + hparams.loss = {} + hparams.top = {} + hparams.internal_loss = True + return hparams + + +@registry.register_hparams +def next_frame_sv2p_atari_deterministic(): + """Deterministic for atari.""" + hparams = next_frame_sv2p_atari() + hparams.stochastic_model = False + return hparams + + +@registry.register_hparams +def next_frame_sv2p_atari_softmax_deterministic(): + """Deterministic for atari.""" + hparams = next_frame_sv2p_atari_softmax() + hparams.stochastic_model = False + return hparams + + +@registry.register_hparams +def next_frame_sv2p_tiny(): + """Tiny SV2P model.""" + hparams = next_frame_sv2p_atari_softmax() + hparams.batch_size = 2 + hparams.tiny_mode = True + hparams.num_masks = 1 + hparams.video_modality_loss_cutoff = 0.4 + hparams.video_num_input_frames = 4 + hparams.video_num_target_frames = 4 + return hparams + + +@registry.register_hparams +def next_frame_sv2p_tiny_external(): + """Tiny SV2P model with external loss.""" + hparams = next_frame_sv2p_tiny() + hparams.internal_loss = False + return hparams + + +@registry.register_hparams +def next_frame_sv2p_cutoff(): + """SV2P model with additional cutoff in L2 loss for environments like pong.""" + hparams = next_frame_sv2p() + hparams.video_modality_loss_cutoff = 0.4 + hparams.video_num_input_frames = 4 + hparams.video_num_target_frames = 1 + return hparams diff --git a/tensor2tensor/models/video/sv2p_test.py b/tensor2tensor/models/video/sv2p_test.py new file mode 100644 index 000000000..c0e40e96c --- /dev/null +++ b/tensor2tensor/models/video/sv2p_test.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic tests for SV2P model.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.models.video import sv2p +from tensor2tensor.models.video import sv2p_params +from tensor2tensor.models.video import tests_utils + +import tensorflow.compat.v1 as tf + + +class NextFrameTest(tests_utils.BaseNextFrameTest): + + def testSv2p(self): + self.TestOnVariousInputOutputSizes( + sv2p_params.next_frame_sv2p(), + sv2p.NextFrameSv2p, + 1, + False) + + def testSv2pWithActions(self): + self.TestWithActions( + sv2p_params.next_frame_sv2p(), + sv2p.NextFrameSv2p, + 1, + False) + + def testSv2pWithActionsAndRewards(self): + hp = sv2p_params.next_frame_sv2p() + hp.internal_loss = True + self.TestWithActionAndRewards( + hp, + sv2p.NextFrameSv2p, + 1, + False) + + def testSv2pWithActionsAndRewardsExternalLoss(self): + hp = sv2p_params.next_frame_sv2p() + hp.internal_loss = False + self.TestWithActionAndRewards( + hp, + sv2p.NextFrameSv2p, + 1, + False) + + def testSv2pTwoFrames(self): + self.TestOnVariousInputOutputSizes( + sv2p_params.next_frame_sv2p(), + sv2p.NextFrameSv2pTwoFrames, + 1, + False) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/models/video/tests_utils.py b/tensor2tensor/models/video/tests_utils.py new file mode 100644 index 000000000..2a38b1cf3 --- /dev/null +++ b/tensor2tensor/models/video/tests_utils.py @@ -0,0 +1,294 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilties for testing video models.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np + +from tensor2tensor.data_generators import video_generated # pylint: disable=unused-import + +from tensor2tensor.layers import modalities +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +def fill_hparams(hparams, in_frames, out_frames): + hparams.video_num_input_frames = in_frames + hparams.video_num_target_frames = out_frames + problem = registry.problem("video_stochastic_shapes10k") + p_hparams = problem.get_hparams(hparams) + hparams.problem = problem + hparams.problem_hparams = p_hparams + hparams.tiny_mode = True + hparams.reward_prediction = False + return hparams + + +def action_modalities(hparams): + """Modalities with actions.""" + hparams.problem_hparams.modality = { + "inputs": modalities.ModalityType.VIDEO_L2_RAW, + "input_action": modalities.ModalityType.SYMBOL, + "targets": modalities.ModalityType.VIDEO_L2_RAW, + "target_action": modalities.ModalityType.SYMBOL, + } + hparams.problem_hparams.vocab_size = { + "inputs": 256, + "input_action": 5, + "targets": 256, + "target_action": 5, + } + return hparams + + +def full_modalities(hparams): + """Full modalities with actions and rewards.""" + hparams.problem_hparams.modality = { + "inputs": modalities.ModalityType.VIDEO_L2_RAW, + "input_action": modalities.ModalityType.SYMBOL, + "input_reward": modalities.ModalityType.SYMBOL, + "targets": modalities.ModalityType.VIDEO_L2_RAW, + "target_action": modalities.ModalityType.SYMBOL, + "target_reward": modalities.ModalityType.SYMBOL, + } + hparams.problem_hparams.vocab_size = { + "inputs": 256, + "input_action": 5, + "input_reward": 3, + "targets": 256, + "target_action": 5, + "target_reward": 3, + } + hparams.force_full_predict = True + return hparams + + +def create_basic_features(in_frames, out_frames): + x = np.random.randint(0, 256, size=(8, in_frames, 64, 64, 3)) + y = np.random.randint(0, 256, size=(8, out_frames, 64, 64, 3)) + features = { + "inputs": tf.constant(x, dtype=tf.int32), + "targets": tf.constant(y, dtype=tf.int32), + } + return features + + +def create_action_features(in_frames, out_frames): + features = create_basic_features(in_frames, out_frames) + x = np.random.randint(0, 5, size=(8, in_frames, 1)) + y = np.random.randint(0, 5, size=(8, out_frames, 1)) + features["input_action"] = tf.constant(x, dtype=tf.int32) + features["target_action"] = tf.constant(y, dtype=tf.int32) + return features + + +def create_full_features(in_frames, out_frames): + features = create_basic_features(in_frames, out_frames) + x = np.random.randint(0, 5, size=(8, in_frames, 1)) + y = np.random.randint(0, 5, size=(8, out_frames, 1)) + features["input_reward"] = tf.constant(x, dtype=tf.int32) + features["target_reward"] = tf.constant(y, dtype=tf.int32) + return features + + +def get_tensor_shape(tensor): + return tuple([d.value for d in tensor.shape]) + + +class BaseNextFrameTest(tf.test.TestCase): + """Base helper class for next frame tests.""" + + def RunModel(self, model, hparams, features): + with tf.Session() as session: + model = model(hparams, tf_estimator.ModeKeys.TRAIN) + logits, _ = model(features) + session.run(tf.global_variables_initializer()) + res = session.run(logits) + return res + + def InferModel(self, model, hparams, features): + with tf.Session() as session: + model = model(hparams, tf_estimator.ModeKeys.PREDICT) + output = model.infer(features) + session.run(tf.global_variables_initializer()) + res = session.run(output) + return res + + def TestVideoModel(self, + in_frames, + out_frames, + hparams, + model, + expected_last_dim, + upsample_method="conv2d_transpose"): + hparams = fill_hparams(hparams, in_frames, out_frames) + hparams.upsample_method = upsample_method + + features = create_basic_features(in_frames, out_frames) + output = self.RunModel(model, hparams, features) + + targets = features["targets"] + expected_shape = get_tensor_shape(targets) + (expected_last_dim,) + self.assertEqual(output.shape, expected_shape) + + def TestVideoModelInfer(self, + in_frames, + out_frames, + hparams, + model, + expected_last_dim, + upsample_method="conv2d_transpose"): + del expected_last_dim + hparams = fill_hparams(hparams, in_frames, out_frames) + hparams.upsample_method = upsample_method + + features = create_basic_features(in_frames, out_frames) + output = self.InferModel(model, hparams, features) + + self.assertTrue(isinstance(output, dict)) + self.assertTrue("outputs" in output.keys()) + self.assertTrue("scores" in output.keys()) + self.assertTrue("targets" in output.keys()) + expected_shape = get_tensor_shape(features["targets"]) + self.assertEqual(output["targets"].shape, expected_shape) + + def TestVideoModelWithActions(self, + in_frames, + out_frames, + hparams, + model, + expected_last_dim): + hparams = fill_hparams(hparams, in_frames, out_frames) + hparams = action_modalities(hparams) + hparams.reward_prediction = False + + features = create_action_features(in_frames, out_frames) + output = self.RunModel(model, hparams, features) + + targets = features["targets"] + expected_shape = get_tensor_shape(targets) + (expected_last_dim,) + self.assertEqual(output.shape, expected_shape) + + def TestVideoModelWithActionsInfer(self, + in_frames, + out_frames, + hparams, + model, + expected_last_dim): + del expected_last_dim + hparams = fill_hparams(hparams, in_frames, out_frames) + hparams = action_modalities(hparams) + hparams.reward_prediction = False + + features = create_action_features(in_frames, out_frames) + output = self.InferModel(model, hparams, features) + + self.assertTrue(isinstance(output, dict)) + self.assertTrue("outputs" in output.keys()) + self.assertTrue("scores" in output.keys()) + self.assertTrue("targets" in output.keys()) + expected_shape = get_tensor_shape(features["targets"]) + self.assertEqual(output["targets"].shape, expected_shape) + + def TestVideoModelWithActionAndRewards(self, + in_frames, + out_frames, + hparams, + model, + expected_last_dim): + hparams = fill_hparams(hparams, in_frames, out_frames) + hparams = full_modalities(hparams) + hparams.reward_prediction = True + + features = create_full_features(in_frames, out_frames) + + res = self.RunModel(model, hparams, features) + + output, targets = res["targets"], features["targets"] + expected_shape = get_tensor_shape(targets) + (expected_last_dim,) + self.assertEqual(output.shape, expected_shape) + + output, targets = res["target_reward"], features["target_reward"] + # Assuming Symbol Modality + expected_shape = get_tensor_shape(targets)[:2] + (1, 1, 1, 1, 3,) + self.assertEqual(output.shape, expected_shape) + + def TestVideoModelWithActionAndRewardsInfer(self, + in_frames, + out_frames, + hparams, + model, + expected_last_dim): + del expected_last_dim + hparams = fill_hparams(hparams, in_frames, out_frames) + hparams = full_modalities(hparams) + hparams.reward_prediction = True + + features = create_full_features(in_frames, out_frames) + + output = self.InferModel(model, hparams, features) + + self.assertTrue(isinstance(output, dict)) + self.assertTrue("outputs" in output.keys()) + self.assertTrue("scores" in output.keys()) + self.assertTrue("targets" in output.keys()) + self.assertTrue("target_reward" in output.keys()) + expected_shape = get_tensor_shape(features["targets"]) + self.assertEqual(output["targets"].shape, expected_shape) + expected_shape = get_tensor_shape(features["target_reward"])[:2] + self.assertEqual(output["target_reward"].shape, expected_shape) + + def TestOnVariousInputOutputSizes( + self, hparams, model, expected_last_dim, test_infer=True): + test_funcs = [self.TestVideoModel] + if test_infer: + test_funcs += [self.TestVideoModelInfer] + for test_func in test_funcs: + test_func(1, 1, hparams, model, expected_last_dim) + test_func(1, 6, hparams, model, expected_last_dim) + test_func(4, 1, hparams, model, expected_last_dim) + test_func(7, 5, hparams, model, expected_last_dim) + + def TestWithActions(self, hparams, model, expected_last_dim, test_infer=True): + test_funcs = [self.TestVideoModelWithActions] + if test_infer: + test_funcs += [self.TestVideoModelWithActionsInfer] + for test_func in test_funcs: + test_func(1, 1, hparams, model, expected_last_dim) + test_func(1, 6, hparams, model, expected_last_dim) + test_func(4, 1, hparams, model, expected_last_dim) + test_func(7, 5, hparams, model, expected_last_dim) + + def TestWithActionAndRewards( + self, hparams, model, expected_last_dim, test_infer=True): + test_funcs = [self.TestVideoModelWithActionAndRewards] + if test_infer: + test_funcs += [self.TestVideoModelWithActionAndRewardsInfer] + for test_func in test_funcs: + test_func(1, 1, hparams, model, expected_last_dim) + test_func(1, 6, hparams, model, expected_last_dim) + test_func(4, 1, hparams, model, expected_last_dim) + test_func(7, 5, hparams, model, expected_last_dim) + + def TestOnVariousUpSampleLayers(self, hparams, model, expected_last_dim): + self.TestVideoModel(4, 1, hparams, model, expected_last_dim, + upsample_method="bilinear_upsample_conv") + self.TestVideoModel(4, 1, hparams, model, expected_last_dim, + upsample_method="nn_upsample_conv") diff --git a/tensor2tensor/models/xception.py b/tensor2tensor/models/xception.py index b6e271c36..83b6697c6 100644 --- a/tensor2tensor/models/xception.py +++ b/tensor2tensor/models/xception.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,22 +19,21 @@ from __future__ import division from __future__ import print_function -# Dependency imports +import math +from six.moves import range # pylint: disable=redefined-builtin -from six.moves import xrange # pylint: disable=redefined-builtin - -from tensor2tensor.models import common_hparams -from tensor2tensor.models import common_layers +from tensor2tensor.layers import common_hparams +from tensor2tensor.layers import common_layers from tensor2tensor.utils import registry from tensor2tensor.utils import t2t_model -import tensorflow as tf +import tensorflow.compat.v1 as tf -def residual_block(x, hparams, train): +def residual_block(x, hparams): """A stack of convolution blocks with residual connection.""" k = (hparams.kernel_height, hparams.kernel_width) - dilations_and_kernels = [((1, 1), k) for _ in xrange(3)] + dilations_and_kernels = [((1, 1), k) for _ in range(3)] y = common_layers.subseparable_conv_block( x, hparams.hidden_size, @@ -42,31 +42,109 @@ def residual_block(x, hparams, train): separability=0, name="residual_block") x = common_layers.layer_norm(x + y, hparams.hidden_size, name="lnorm") - return tf.nn.dropout(x, 1.0 - hparams.dropout * tf.to_float(train)) + return tf.nn.dropout(x, 1.0 - hparams.dropout) -def xception_internal(inputs, hparams, train): +def xception_internal(inputs, hparams): """Xception body.""" with tf.variable_scope("xception"): cur = inputs - for i in xrange(hparams.num_hidden_layers): + + if cur.get_shape().as_list()[1] > 200: + # Large image, Xception entry flow + cur = xception_entry(cur, hparams.hidden_size) + else: + # Small image, conv + cur = common_layers.conv_block( + cur, + hparams.hidden_size, [((1, 1), (3, 3))], + first_relu=False, + padding="SAME", + force2d=True, + name="small_image_conv") + + for i in range(hparams.num_hidden_layers): with tf.variable_scope("layer_%d" % i): - cur = residual_block(cur, hparams, train) - return cur + cur = residual_block(cur, hparams) + + return xception_exit(cur) + + +def xception_entry(inputs, hidden_dim): + """Xception entry flow.""" + with tf.variable_scope("xception_entry"): + + def xnet_resblock(x, filters, res_relu, name): + """Resblock.""" + with tf.variable_scope(name): + y = common_layers.separable_conv_block( + x, + filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], + first_relu=True, + padding="SAME", + force2d=True, + name="sep_conv_block") + y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2)) + return y + common_layers.conv_block( + x, + filters, [((1, 1), (1, 1))], + padding="SAME", + strides=(2, 2), + first_relu=res_relu, + force2d=True, + name="res_conv0") + + tf.summary.image("inputs", inputs, max_outputs=2) + x = common_layers.conv_block( + inputs, + 32, [((1, 1), (3, 3))], + first_relu=False, + padding="SAME", + strides=(2, 2), + force2d=True, + name="conv0") + x = common_layers.conv_block( + x, 64, [((1, 1), (3, 3))], padding="SAME", force2d=True, name="conv1") + x = xnet_resblock(x, min(128, hidden_dim), True, "block0") + x = xnet_resblock(x, min(256, hidden_dim), False, "block1") + return xnet_resblock(x, hidden_dim, False, "block2") + + +def xception_exit(inputs): + """Xception exit flow.""" + with tf.variable_scope("xception_exit"): + x = inputs + x_shape = x.get_shape().as_list() + if x_shape[1] is None or x_shape[2] is None: + length_float = tf.to_float(tf.shape(x)[1]) + length_float *= tf.to_float(tf.shape(x)[2]) + spatial_dim_float = tf.sqrt(length_float) + spatial_dim = tf.to_int32(spatial_dim_float) + x_depth = x_shape[3] + x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth]) + elif x_shape[1] != x_shape[2]: + spatial_dim = int(math.sqrt(float(x_shape[1] * x_shape[2]))) + if spatial_dim * spatial_dim != x_shape[1] * x_shape[2]: + raise ValueError("Assumed inputs were square-able but they were " + "not. Shape: %s" % x_shape) + x = tf.reshape(x, [-1, spatial_dim, spatial_dim, x_depth]) + + x = common_layers.conv_block_downsample(x, (3, 3), (2, 2), "SAME") + return tf.nn.relu(x) @registry.register_model class Xception(t2t_model.T2TModel): - def model_fn_body(self, features, train): - return xception_internal(features["inputs"], self._hparams, train) + def body(self, features): + return xception_internal(features["inputs"], self._hparams) @registry.register_hparams def xception_base(): """Set of hyperparameters.""" hparams = common_hparams.basic_params1() - hparams.batch_size = 4096 + hparams.batch_size = 128 hparams.hidden_size = 768 hparams.dropout = 0.2 hparams.symbol_dropout = 0.2 @@ -75,7 +153,7 @@ def xception_base(): hparams.num_hidden_layers = 8 hparams.kernel_height = 3 hparams.kernel_width = 3 - hparams.learning_rate_decay_scheme = "exp50k" + hparams.learning_rate_decay_scheme = "exp" hparams.learning_rate = 0.05 hparams.learning_rate_warmup_steps = 3000 hparams.initializer_gain = 1.0 @@ -85,5 +163,24 @@ def xception_base(): hparams.optimizer_adam_epsilon = 1e-6 hparams.optimizer_adam_beta1 = 0.85 hparams.optimizer_adam_beta2 = 0.997 - hparams.add_hparam("imagenet_use_2d", True) + return hparams + + +@registry.register_hparams +def xception_tiny(): + hparams = xception_base() + hparams.batch_size = 2 + hparams.hidden_size = 64 + hparams.num_hidden_layers = 2 + hparams.learning_rate_decay_scheme = "none" + return hparams + + +@registry.register_hparams +def xception_tiny_tpu(): + hparams = xception_base() + hparams.batch_size = 2 + hparams.num_hidden_layers = 2 + hparams.hidden_size = 128 + hparams.optimizer = "true_adam" return hparams diff --git a/tensor2tensor/models/xception_test.py b/tensor2tensor/models/xception_test.py index 106604659..36ca2d1be 100644 --- a/tensor2tensor/models/xception_test.py +++ b/tensor2tensor/models/xception_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,36 +19,47 @@ from __future__ import division from __future__ import print_function -# Dependency imports - import numpy as np from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.layers import modalities from tensor2tensor.models import xception -import tensorflow as tf +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator class XceptionTest(tf.test.TestCase): - def testXception(self): + def _test_xception(self, img_size): vocab_size = 9 - x = np.random.random_integers(1, high=vocab_size - 1, size=(3, 5, 1, 1)) - y = np.random.random_integers(1, high=vocab_size - 1, size=(3, 1, 1, 1)) - hparams = xception.xception_base() - p_hparams = problem_hparams.test_problem_hparams(hparams, vocab_size, - vocab_size) + batch_size = 3 + x = np.random.randint( + 256, size=(batch_size, img_size, img_size, 3)) + y = np.random.randint( + 1, high=vocab_size, size=(batch_size, 1, 1, 1)) + hparams = xception.xception_tiny() + p_hparams = problem_hparams.test_problem_hparams(vocab_size, + vocab_size, + hparams) + p_hparams.modality["inputs"] = modalities.ModalityType.IMAGE + p_hparams.modality["targets"] = modalities.ModalityType.CLASS_LABEL with self.test_session() as session: features = { "inputs": tf.constant(x, dtype=tf.int32), "targets": tf.constant(y, dtype=tf.int32), } - model = xception.Xception(hparams, p_hparams) - sharded_logits, _, _ = model.model_fn(features, True) - logits = tf.concat(sharded_logits, 0) + model = xception.Xception(hparams, tf_estimator.ModeKeys.TRAIN, p_hparams) + logits, _ = model(features) session.run(tf.global_variables_initializer()) res = session.run(logits) - self.assertEqual(res.shape, (3, 5, 1, 1, vocab_size)) + self.assertEqual(res.shape, (batch_size, 1, 1, 1, vocab_size)) + + def testXceptionSmallImage(self): + self._test_xception(img_size=9) + + def testXceptionLargeImage(self): + self._test_xception(img_size=256) if __name__ == "__main__": diff --git a/tensor2tensor/notebooks/Transformer_translate.ipynb b/tensor2tensor/notebooks/Transformer_translate.ipynb new file mode 100644 index 000000000..4e9925498 --- /dev/null +++ b/tensor2tensor/notebooks/Transformer_translate.ipynb @@ -0,0 +1,1105 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "Transformer_translate.ipynb", + "version": "0.3.2", + "provenance": [], + "collapsed_sections": [], + "toc_visible": true, + "include_colab_link": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "accelerator": "GPU" + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "view-in-github", + "colab_type": "text" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "e7PMze9tKHX9", + "colab_type": "text" + }, + "source": [ + "# Welcome to the [Tensor2Tensor](https://github.com/tensorflow/tensor2tensor) Colab\n", + "\n", + "Tensor2Tensor, or T2T for short, is a library of deep learning models and datasets designed to make deep learning more accessible and [accelerate ML research](https://research.googleblog.com/2017/06/accelerating-deep-learning-research.html). In this notebook we will see how to use this library for a translation task by exploring the necessary steps. We will see how to define a problem, generate the data, train the model and test the quality of it, and we will translate our sequences and we visualize the attention. We will also see how to download a pre-trained model." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "KC8jNpnyKJdm", + "colab_type": "code", + "cellView": "form", + "colab": {} + }, + "source": [ + "#@title\n", + "# Copyright 2018 Google LLC.\n", + "\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "AYUy570fKRcw", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Install deps\n", + "!pip install -q -U tensor2tensor" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "hEhFfyVNbB_D", + "colab_type": "text" + }, + "source": [ + "#1. Initialization\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "i23pCAVwegx3", + "colab_type": "text" + }, + "source": [ + "##1.1. Make some directories" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "oUf4e18_8E31", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import sys\n", + "if 'google.colab' in sys.modules: # Colab-only TensorFlow version selector\n", + " %tensorflow_version 1.x\n", + "import tensorflow as tf\n", + "import os\n", + "\n", + "DATA_DIR = os.path.expanduser(\"/t2t/data\") # This folder contain the data\n", + "TMP_DIR = os.path.expanduser(\"/t2t/tmp\")\n", + "TRAIN_DIR = os.path.expanduser(\"/t2t/train\") # This folder contain the model\n", + "EXPORT_DIR = os.path.expanduser(\"/t2t/export\") # This folder contain the exported model for production\n", + "TRANSLATIONS_DIR = os.path.expanduser(\"/t2t/translation\") # This folder contain all translated sequence\n", + "EVENT_DIR = os.path.expanduser(\"/t2t/event\") # Test the BLEU score\n", + "USR_DIR = os.path.expanduser(\"/t2t/user\") # This folder contains our data that we want to add\n", + " \n", + "tf.gfile.MakeDirs(DATA_DIR)\n", + "tf.gfile.MakeDirs(TMP_DIR)\n", + "tf.gfile.MakeDirs(TRAIN_DIR)\n", + "tf.gfile.MakeDirs(EXPORT_DIR)\n", + "tf.gfile.MakeDirs(TRANSLATIONS_DIR)\n", + "tf.gfile.MakeDirs(EVENT_DIR)\n", + "tf.gfile.MakeDirs(USR_DIR)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HIuzsMzgbLv9", + "colab_type": "text" + }, + "source": [ + "## 1.2. Init parameters\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ZQaURmfKBGus", + "colab_type": "code", + "colab": {} + }, + "source": [ + "PROBLEM = \"translate_enfr_wmt32k\" # We chose a problem translation English to French with 32.768 vocabulary\n", + "MODEL = \"transformer\" # Our model\n", + "HPARAMS = \"transformer_big\" # Hyperparameters for the model by default \n", + " # If you have a one gpu, use transformer_big_single_gpu" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "EikK-hW5m-ax", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#Show all problems and models \n", + "\n", + "from tensor2tensor.utils import registry\n", + "from tensor2tensor import problems\n", + "\n", + "problems.available() #Show all problems\n", + "registry.list_models() #Show all registered models\n", + "\n", + "#or\n", + "\n", + "#Command line\n", + "!t2t-trainer --registry_help #Show all problems\n", + "!t2t-trainer --problems_help #Show all models" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "78kBAIMQbeO6", + "colab_type": "text" + }, + "source": [ + "# 2. Data generation \n", + "\n", + "Generate the data (download the dataset and generate the data).\n", + "\n", + "---\n", + "\n", + " You can choose between command line or code." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CrDy3V7ibpQH", + "colab_type": "text" + }, + "source": [ + "## 2.1. Generate with terminal\n", + "For more information: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/bin/t2t_datagen.py" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "0Dfr8nFXmg1o", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!t2t-datagen \\\n", + " --data_dir=$DATA_DIR \\\n", + " --tmp_dir=$TMP_DIR \\\n", + " --problem=$PROBLEM \\\n", + " --t2t_usr_dir=$USR_DIR" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tMvCiiBtbuzh", + "colab_type": "text" + }, + "source": [ + "## 2.2. Generate with code" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Of5bHYVJmbwH", + "colab_type": "code", + "colab": {} + }, + "source": [ + "t2t_problem = problems.problem(PROBLEM)\n", + "t2t_problem.generate_data(DATA_DIR, TMP_DIR) " + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UkSwoqBzb47T", + "colab_type": "text" + }, + "source": [ + "# 3. Train the model\n", + "\n", + "\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1JVF2PJn7ByQ", + "colab_type": "text" + }, + "source": [ + "##3.1. Init parameters\n", + "\n", + "You can choose between command line or code.\n", + "\n", + "---\n", + "\n", + " batch_size : a great value of preference.\n", + "\n", + "---\n", + "train_steps : research paper mentioned 300k steps with 8 gpu on big transformer. So if you have 1 gpu, you will need to train the model x8 more. (https://arxiv.org/abs/1706.03762 for more information).\n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "yw6HgVWA7AQF", + "colab_type": "code", + "colab": {} + }, + "source": [ + "train_steps = 300000 # Total number of train steps for all Epochs\n", + "eval_steps = 100 # Number of steps to perform for each evaluation\n", + "batch_size = 4096\n", + "save_checkpoints_steps = 1000\n", + "ALPHA = 0.1\n", + "schedule = \"continuous_train_and_eval\"" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ze_YvVnIfD8z", + "colab_type": "text" + }, + "source": [ + "You can choose schedule :\n", + " \n", + "\n", + "* train. Bad quality\n", + "* continuous_train_and_eval (default)\n", + "* train_and_eval\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-zAub7Ggb8tj", + "colab_type": "text" + }, + "source": [ + "##3.2. Train with terminal\n", + "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/bin/t2t_trainer.py/n", + "\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "kSYAi4BsnpSD", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!t2t-trainer \\\n", + " --data_dir=$DATA_DIR \\\n", + " --problem=$PROBLEM \\\n", + " --model=$MODEL \\\n", + " --hparams_set=$HPARAMS \\\n", + " --hparams=\"batch_size=$batch_size\" \\\n", + " --schedule=$schedule\\\n", + " --output_dir=$TRAIN_DIR \\\n", + " --train_steps=$train_steps \\\n", + " --worker-gpu=1 \\ \n", + " --eval_steps=$eval_steps " + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bNfNBWtNVMwO", + "colab_type": "text" + }, + "source": [ + " --worker-gpu = 1, for train on 1 gpu (facultative).\n", + "\n", + "---\n", + "\n", + "For distributed training see: https://github.com/tensorflow/tensor2tensor/blob/master/docs/distributed_training.md\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "nnSoC1AUcLG6", + "colab_type": "text" + }, + "source": [ + "##3.3. Train with code\n", + "create_hparams : https://github.com/tensorflow/tensor2tensor/blob/28adf2690c551ef0f570d41bef2019d9c502ec7e/tensor2tensor/utils/hparams_lib.py#L42\n", + "\n", + "---\n", + "Change hyper parameters :\n", + "https://github.com/tensorflow/tensor2tensor/blob/28adf2690c551ef0f570d41bef2019d9c502ec7e/tensor2tensor/models/transformer.py#L1627\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "RJ91vQ2hyIPx", + "colab_type": "code", + "colab": {} + }, + "source": [ + "from tensor2tensor.utils.trainer_lib import create_run_config, create_experiment\n", + "from tensor2tensor.utils.trainer_lib import create_hparams\n", + "from tensor2tensor.utils import registry\n", + "from tensor2tensor import models\n", + "from tensor2tensor import problems\n", + "\n", + "# Init Hparams object from T2T Problem\n", + "hparams = create_hparams(HPARAMS)\n", + "\n", + "# Make Changes to Hparams\n", + "hparams.batch_size = batch_size\n", + "hparams.learning_rate = ALPHA\n", + "#hparams.max_length = 256\n", + "\n", + "# Can see all Hparams with code below\n", + "#print(json.loads(hparams.to_json())" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KZX1cwK3TEXs", + "colab_type": "text" + }, + "source": [ + "create_run_config : https://github.com/tensorflow/tensor2tensor/blob/28adf2690c551ef0f570d41bef2019d9c502ec7e/tensor2tensor/utils/trainer_lib.py#L105\n", + "\n", + "---\n", + "\n", + "\n", + "create_experiment : https://github.com/tensorflow/tensor2tensor/blob/28adf2690c551ef0f570d41bef2019d9c502ec7e/tensor2tensor/utils/trainer_lib.py#L611" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "yByKcs7XvAXL", + "colab_type": "code", + "colab": {} + }, + "source": [ + "RUN_CONFIG = create_run_config(\n", + " model_dir=TRAIN_DIR,\n", + " model_name=MODEL,\n", + " save_checkpoints_steps= save_checkpoints_steps\n", + ")\n", + "\n", + "tensorflow_exp_fn = create_experiment(\n", + " run_config=RUN_CONFIG,\n", + " hparams=hparams,\n", + " model_name=MODEL,\n", + " problem_name=PROBLEM,\n", + " data_dir=DATA_DIR, \n", + " train_steps=train_steps, \n", + " eval_steps=eval_steps, \n", + " #use_xla=True # For acceleration\n", + " ) \n", + "\n", + "tensorflow_exp_fn.train_and_evaluate()" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "03xuR70jce_2", + "colab_type": "text" + }, + "source": [ + "#4. See the BLEU score" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "MiwyVWPhhGrk", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#INIT FILE FOR TRANSLATE\n", + "\n", + "SOURCE_TEST_TRANSLATE_DIR = TMP_DIR+\"/dev/newstest2014-fren-src.en.sgm\"\n", + "REFERENCE_TEST_TRANSLATE_DIR = TMP_DIR+\"/dev/newstest2014-fren-ref.en.sgm\"\n", + "BEAM_SIZE=1" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "agnSg_89cr63", + "colab_type": "text" + }, + "source": [ + "##4.1. Translate all\n", + "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/bin/t2t_translate_all.py" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Jrt5fwqsg3pl", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!t2t-translate-all \\\n", + " --source=$SOURCE_TEST_TRANSLATE_DIR \\\n", + " --model_dir=$TRAIN_DIR \\\n", + " --translations_dir=$TRANSLATIONS_DIR \\\n", + " --data_dir=$DATA_DIR \\\n", + " --problem=$PROBLEM \\\n", + " --hparams_set=$HPARAMS \\\n", + " --output_dir=$TRAIN_DIR \\\n", + " --t2t_usr_dir=$USR_DIR \\\n", + " --beam_size=$BEAM_SIZE \\\n", + " --model=$MODEL" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "O-pKKU2Acv8Q", + "colab_type": "text" + }, + "source": [ + "##4.2. Test the BLEU score\n", + "The BLEU score for all translations: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/bin/t2t_bleu.py#L68\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "EULP9TdPc58d", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!t2t-bleu \\\n", + " --translations_dir=$TRANSLATIONS_DIR \\\n", + " --model_dir=$TRAIN_DIR \\\n", + " --data_dir=$DATA_DIR \\\n", + " --problem=$PROBLEM \\\n", + " --hparams_set=$HPARAMS \\\n", + " --source=$SOURCE_TEST_TRANSLATE_DIR \\\n", + " --reference=$REFERENCE_TEST_TRANSLATE_DIR \\\n", + " --event_dir=$EVENT_DIR" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "13j50bpAc-bM", + "colab_type": "text" + }, + "source": [ + "#5. Prediction of sentence\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8WHPnqxhdQl6", + "colab_type": "text" + }, + "source": [ + "##5.1. Predict with terminal\n", + "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/bin/t2t_decoder.py" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "3SD-XhImnwpo", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!echo \"the business of the house\" > \"inputs.en\"\n", + "!echo -e \"les affaires de la maison\" > \"reference.fr\" # You can add other references\n", + "\n", + "!t2t-decoder \\\n", + " --data_dir=$DATA_DIR \\\n", + " --problem=$PROBLEM \\\n", + " --model=$MODEL \\\n", + " --hparams_set=$HPARAMS \\\n", + " --output_dir=$TRAIN_DIR \\\n", + " --decode_hparams=\"beam_size=1,alpha=$ALPHA\" \\\n", + " --decode_from_file=\"inputs.en\" \\\n", + " --decode_to_file=\"outputs.fr\"\n", + "\n", + "# See the translations\n", + "!cat outputs.fr" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "sGOC25N4dWdM", + "colab_type": "text" + }, + "source": [ + "##5.2. Predict with code" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "S6u4QmhPIbDx", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import tensorflow as tf\n", + "\n", + "#After training the model, re-run the environment but run this code in first, then predict.\n", + "\n", + "tfe = tf.contrib.eager\n", + "tfe.enable_eager_execution()\n", + "Modes = tf.estimator.ModeKeys" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "PaCkILfjz9x3", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#Config\n", + "\n", + "from tensor2tensor import models\n", + "from tensor2tensor import problems\n", + "from tensor2tensor.layers import common_layers\n", + "from tensor2tensor.utils import trainer_lib\n", + "from tensor2tensor.utils import t2t_model\n", + "from tensor2tensor.utils import registry\n", + "from tensor2tensor.utils import metrics\n", + "import numpy as np\n", + "\n", + "enfr_problem = problems.problem(PROBLEM)\n", + "\n", + "# Copy the vocab file locally so we can encode inputs and decode model outputs\n", + "vocab_name = \"vocab.translate_enfr_wmt32k.32768.subwords\"\n", + "vocab_file = os.path.join(DATA_DIR, vocab_name)\n", + "\n", + "# Get the encoders from the problem\n", + "encoders = enfr_problem.feature_encoders(DATA_DIR)\n", + "\n", + "ckpt_path = tf.train.latest_checkpoint(os.path.join(TRAIN_DIR))\n", + "print(ckpt_path)\n", + "\n", + "def translate(inputs):\n", + " encoded_inputs = encode(inputs)\n", + " with tfe.restore_variables_on_create(ckpt_path):\n", + " model_output = translate_model.infer(encoded_inputs)[\"outputs\"]\n", + " return decode(model_output)\n", + "\n", + "def encode(input_str, output_str=None):\n", + " \"\"\"Input str to features dict, ready for inference\"\"\"\n", + " inputs = encoders[\"inputs\"].encode(input_str) + [1] # add EOS id\n", + " batch_inputs = tf.reshape(inputs, [1, -1, 1]) # Make it 3D.\n", + " return {\"inputs\": batch_inputs}\n", + "\n", + "def decode(integers):\n", + " \"\"\"List of ints to str\"\"\"\n", + " integers = list(np.squeeze(integers))\n", + " if 1 in integers:\n", + " integers = integers[:integers.index(1)]\n", + " return encoders[\"inputs\"].decode(np.squeeze(integers))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "5zE8yHLUA2He", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#Predict \n", + "\n", + "hparams = trainer_lib.create_hparams(HPARAMS, data_dir=DATA_DIR, problem_name=PROBLEM)\n", + "translate_model = registry.model(MODEL)(hparams, Modes.PREDICT)\n", + "\n", + "inputs = \"the aniamal didn't cross the river because it was too tired\"\n", + "ref = \"l'animal n'a pas traversé la rue parcequ'il etait trop fatigué\" ## this just a reference for evaluate the quality of the traduction\n", + "outputs = translate(inputs)\n", + "\n", + "print(\"Inputs: %s\" % inputs)\n", + "print(\"Outputs: %s\" % outputs)\n", + "\n", + "file_input = open(\"outputs.fr\",\"w+\")\n", + "file_input.write(outputs)\n", + "file_input.close()\n", + "\n", + "file_output = open(\"reference.fr\",\"w+\")\n", + "file_output.write(ref)\n", + "file_output.close()" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "y6jbQ6FoRsmG", + "colab_type": "text" + }, + "source": [ + "##5.3. Evaluate the BLEU Score\n", + "BLEU score for a sequence translation: https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/bin/t2t_bleu.py#L24" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "il2oevmXRrbf", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!t2t-bleu \\\n", + " --translation=outputs.fr \\\n", + " --reference=reference.fr" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FXegHzD1I67e", + "colab_type": "text" + }, + "source": [ + "#6. Attention visualization\n", + "We need to have a predicted sentence with code." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ISHauPT8I-3S", + "colab_type": "text" + }, + "source": [ + "##6.1. Attention utils\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "2RHCTrc9I55K", + "colab_type": "code", + "colab": {} + }, + "source": [ + "from tensor2tensor.visualization import attention\n", + "from tensor2tensor.data_generators import text_encoder\n", + "\n", + "SIZE = 35\n", + "\n", + "def encode_eval(input_str, output_str):\n", + " inputs = tf.reshape(encoders[\"inputs\"].encode(input_str) + [1], [1, -1, 1, 1]) # Make it 3D.\n", + " outputs = tf.reshape(encoders[\"inputs\"].encode(output_str) + [1], [1, -1, 1, 1]) # Make it 3D.\n", + " return {\"inputs\": inputs, \"targets\": outputs}\n", + "\n", + "def get_att_mats():\n", + " enc_atts = []\n", + " dec_atts = []\n", + " encdec_atts = []\n", + "\n", + " for i in range(hparams.num_hidden_layers):\n", + " enc_att = translate_model.attention_weights[\n", + " \"transformer/body/encoder/layer_%i/self_attention/multihead_attention/dot_product_attention\" % i][0]\n", + " dec_att = translate_model.attention_weights[\n", + " \"transformer/body/decoder/layer_%i/self_attention/multihead_attention/dot_product_attention\" % i][0]\n", + " encdec_att = translate_model.attention_weights[\n", + " \"transformer/body/decoder/layer_%i/encdec_attention/multihead_attention/dot_product_attention\" % i][0]\n", + " enc_atts.append(resize(enc_att))\n", + " dec_atts.append(resize(dec_att))\n", + " encdec_atts.append(resize(encdec_att))\n", + " return enc_atts, dec_atts, encdec_atts\n", + "\n", + "def resize(np_mat):\n", + " # Sum across heads\n", + " np_mat = np_mat[:, :SIZE, :SIZE]\n", + " row_sums = np.sum(np_mat, axis=0)\n", + " # Normalize\n", + " layer_mat = np_mat / row_sums[np.newaxis, :]\n", + " lsh = layer_mat.shape\n", + " # Add extra dim for viz code to work.\n", + " layer_mat = np.reshape(layer_mat, (1, lsh[0], lsh[1], lsh[2]))\n", + " return layer_mat\n", + "\n", + "def to_tokens(ids):\n", + " ids = np.squeeze(ids)\n", + " subtokenizer = hparams.problem_hparams.vocabulary['targets']\n", + " tokens = []\n", + " for _id in ids:\n", + " if _id == 0:\n", + " tokens.append('')\n", + " elif _id == 1:\n", + " tokens.append('')\n", + " elif _id == -1:\n", + " tokens.append('')\n", + " else:\n", + " tokens.append(subtokenizer._subtoken_id_to_subtoken_string(_id))\n", + " return tokens\n", + "\n", + "def call_html():\n", + " import IPython\n", + " display(IPython.core.display.HTML('''\n", + " \n", + " \n", + " '''))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9PGwUbJuJHJS", + "colab_type": "text" + }, + "source": [ + "##6.2 Display Attention" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ijTOlrt8JI4t", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import numpy as np\n", + "\n", + "# Convert inputs and outputs to subwords\n", + "\n", + "inp_text = to_tokens(encoders[\"inputs\"].encode(inputs))\n", + "out_text = to_tokens(encoders[\"inputs\"].encode(outputs))\n", + "\n", + "hparams = trainer_lib.create_hparams(HPARAMS, data_dir=DATA_DIR, problem_name=PROBLEM)\n", + "\n", + "# Run eval to collect attention weights\n", + "example = encode_eval(inputs, outputs)\n", + "with tfe.restore_variables_on_create(tf.train.latest_checkpoint(ckpt_path)):\n", + " translate_model.set_mode(Modes.EVAL)\n", + " translate_model(example)\n", + "# Get normalized attention weights for each layer\n", + "enc_atts, dec_atts, encdec_atts = get_att_mats()\n", + "\n", + "call_html()\n", + "attention.show(inp_text, out_text, enc_atts, dec_atts, encdec_atts)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "r8yAQUDZdm1p", + "colab_type": "text" + }, + "source": [ + "#7. Export the model\n", + "For more information: https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/serving" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "c2yulC7J8_I9", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#export Model\n", + "!t2t-exporter \\\n", + " --data_dir=$DATA_DIR \\\n", + " --output_dir=$TRAIN_DIR \\\n", + " --problem=$PROBLEM \\\n", + " --model=$MODEL \\\n", + " --hparams_set=$HPARAMS \\\n", + " --decode_hparams=\"beam_size=1,alpha=$ALPHA\" \\\n", + " --export_dir=$EXPORT_DIR" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2ltjEr3JX5-e", + "colab_type": "text" + }, + "source": [ + "#8.Load pretrained model from Google Storage\n", + "We use the pretrained model En-De translation." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QgY3Fw261bZC", + "colab_type": "text" + }, + "source": [ + "##8.1. See existing content storaged" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "7P7aJClG0t8c", + "colab_type": "code", + "colab": {} + }, + "source": [ + "print(\"checkpoint: \")\n", + "!gsutil ls \"gs://tensor2tensor-checkpoints\"\n", + "\n", + "print(\"data: \")\n", + "!gsutil ls \"gs://tensor2tensor-data\"" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wP8jrR5bbu7e", + "colab_type": "text" + }, + "source": [ + "##8.2. Init model" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "AnYU7lrazkMm", + "colab_type": "code", + "colab": {} + }, + "source": [ + "PROBLEM_PRETRAINED = \"translate_ende_wmt32k\"\n", + "MODEL_PRETRAINED = \"transformer\" \n", + "HPARAMS_PRETRAINED = \"transformer_base\"" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DTgPvq4q1VAr", + "colab_type": "text" + }, + "source": [ + "##8.3. Load content from google storage" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "FrxOAVcyinll", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import tensorflow as tf\n", + "import os\n", + "\n", + "\n", + "DATA_DIR_PRETRAINED = os.path.expanduser(\"/t2t/data_pretrained\")\n", + "CHECKPOINT_DIR_PRETRAINED = os.path.expanduser(\"/t2t/checkpoints_pretrained\")\n", + "\n", + "tf.gfile.MakeDirs(DATA_DIR_PRETRAINED)\n", + "tf.gfile.MakeDirs(CHECKPOINT_DIR_PRETRAINED)\n", + "\n", + "\n", + "gs_data_dir = \"gs://tensor2tensor-data/\"\n", + "vocab_name = \"vocab.translate_ende_wmt32k.32768.subwords\"\n", + "vocab_file = os.path.join(gs_data_dir, vocab_name)\n", + "\n", + "gs_ckpt_dir = \"gs://tensor2tensor-checkpoints/\"\n", + "ckpt_name = \"transformer_ende_test\"\n", + "gs_ckpt = os.path.join(gs_ckpt_dir, ckpt_name)\n", + "\n", + "TRAIN_DIR_PRETRAINED = os.path.join(CHECKPOINT_DIR_PRETRAINED, ckpt_name)\n", + "\n", + "!gsutil cp {vocab_file} {DATA_DIR_PRETRAINED}\n", + "!gsutil -q cp -R {gs_ckpt} {CHECKPOINT_DIR_PRETRAINED}\n", + "\n", + "CHECKPOINT_NAME_PRETRAINED = tf.train.latest_checkpoint(TRAIN_DIR_PRETRAINED) # for translate with code\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "LP6cro9Xbygf", + "colab_type": "text" + }, + "source": [ + "##8.4. Translate" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "CBoNpy5HbzoF", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!echo \"the business of the house\" > \"inputs.en\"\n", + "!echo -e \"das Geschäft des Hauses\" > \"reference.de\"\n", + "\n", + "!t2t-decoder \\\n", + " --data_dir=$DATA_DIR_PRETRAINED \\\n", + " --problem=$PROBLEM_PRETRAINED \\\n", + " --model=$MODEL_PRETRAINED \\\n", + " --hparams_set=$HPARAMS_PRETRAINED \\\n", + " --output_dir=$TRAIN_DIR_PRETRAINED \\\n", + " --decode_hparams=\"beam_size=1\" \\\n", + " --decode_from_file=\"inputs.en\" \\\n", + " --decode_to_file=\"outputs.de\"\n", + "\n", + "# See the translations\n", + "!cat outputs.de\n", + "\n", + "!t2t-bleu \\\n", + " --translation=outputs.de \\\n", + " --reference=reference.de" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bKI4WF0DgoFd", + "colab_type": "text" + }, + "source": [ + "#9. Add your dataset/problem\n", + "To add a new dataset/problem, subclass Problem and register it with @registry.register_problem. See TranslateEnfrWmt8k for an example: \n", + "https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/translate_enfr.py/n", + "\n", + "---\n", + "Adding your own components: https://github.com/tensorflow/tensor2tensor#adding-your-own-components\n", + "\n", + "---\n", + "\n", + "See this example: https://github.com/tensorflow/tensor2tensor/tree/master/tensor2tensor/test_data/example_usr_dir" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "mB1SIrJNqy1N", + "colab_type": "code", + "colab": {} + }, + "source": [ + "from tensor2tensor.utils import registry\n", + "\n", + "@registry.register_problem\n", + "class MyTranslateEnFr(translate_enfr.TranslateEnfrWmt8k):\n", + "\n", + " def generator(self, data_dir, tmp_dir, train):\n", + " #your code" + ], + "execution_count": 0, + "outputs": [] + } + ] +} diff --git a/tensor2tensor/notebooks/asr_transformer.ipynb b/tensor2tensor/notebooks/asr_transformer.ipynb new file mode 100644 index 000000000..71a8bf456 --- /dev/null +++ b/tensor2tensor/notebooks/asr_transformer.ipynb @@ -0,0 +1,421 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "form", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "6uNrFWq5BRba" + }, + "outputs": [], + "source": [ + "#@title\n", + "# Copyright 2018 Google LLC.\n", + "\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "7tB9m_fw9Xkl" + }, + "outputs": [], + "source": [ + "!pip install -qq tensorflow\n", + "!pip install -qq tensor2tensor\n", + "!pip install -qq pydub\n", + "!apt-get -qq update\n", + "!apt-get -qq install -y ffmpeg\n", + "!apt-get -qq install -y sox" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "hF_ZmvGjEyJd" + }, + "outputs": [], + "source": [ + "import sys\n", + "if 'google.colab' in sys.modules: # Colab-only TensorFlow version selector\n", + " %tensorflow_version 1.x\n", + "import tensorflow as tf\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import os\n", + "import collections\n", + "import base64\n", + "import cStringIO\n", + "import pydub\n", + "import shutil\n", + "from scipy.io import wavfile\n", + "\n", + "import IPython\n", + "import google.colab\n", + "\n", + "from tensor2tensor import models\n", + "from tensor2tensor import problems\n", + "from tensor2tensor.layers import common_layers\n", + "from tensor2tensor.utils import trainer_lib\n", + "from tensor2tensor.utils import t2t_model\n", + "from tensor2tensor.utils import registry\n", + "from tensor2tensor.utils import metrics\n", + "\n", + "# Enable TF Eager execution\n", + "tfe = tf.contrib.eager\n", + "tf.enable_eager_execution()\n", + "\n", + "# Other setup\n", + "Modes = tf.estimator.ModeKeys\n", + "\n", + "# Setup some directories\n", + "data_dir = os.path.expanduser(\"~/t2t/data\")\n", + "tmp_dir = os.path.expanduser(\"~/t2t/tmp\")\n", + "train_dir = os.path.expanduser(\"~/t2t/train\")\n", + "checkpoint_dir = os.path.expanduser(\"~/t2t/checkpoints\")\n", + "tf.gfile.MakeDirs(data_dir)\n", + "tf.gfile.MakeDirs(tmp_dir)\n", + "tf.gfile.MakeDirs(train_dir)\n", + "tf.gfile.MakeDirs(checkpoint_dir)\n", + "\n", + "gs_ckpt_dir = \"gs://tensor2tensor-checkpoints/\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "LwPvdJJ4xN6y" + }, + "source": [ + "\n", + "### Define problem, hparams, model, encoder and decoder\n", + "Definition of this model (as well as many more) can be found on tensor2tensor github [page](https://github.com/tensorflow/tensor2tensor)." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "hH0FEHhDIGjM" + }, + "outputs": [], + "source": [ + "problem_name = \"librispeech_clean\"\n", + "asr_problem = problems.problem(problem_name)\n", + "encoders = asr_problem.feature_encoders(None)\n", + "\n", + "model_name = \"transformer\"\n", + "hparams_set = \"transformer_librispeech_tpu\"\n", + "\n", + "hparams = trainer_lib.create_hparams(hparams_set,data_dir=data_dir, problem_name=problem_name)\n", + "asr_model = registry.model(model_name)(hparams, Modes.PREDICT)\n", + "\n", + "def encode(x):\n", + " waveforms = encoders[\"waveforms\"].encode(x)\n", + " encoded_dict = asr_problem.preprocess_example({\"waveforms\":waveforms, \"targets\":[]}, Modes.PREDICT, hparams)\n", + " \n", + " return {\"inputs\" : tf.expand_dims(encoded_dict[\"inputs\"], 0), \"targets\" : tf.expand_dims(encoded_dict[\"targets\"], 0)}\n", + "\n", + "def decode(integers):\n", + " integers = list(np.squeeze(integers))\n", + " if 1 in integers:\n", + " integers = integers[:integers.index(1)]\n", + " return encoders[\"targets\"].decode(np.squeeze(integers))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "pGhUGptixYBd" + }, + "source": [ + "### Define path to checkpoint\n", + "In this demo we are using a pretrained model.\n", + "Instructions for training your own model can be found in the [tutorial](https://github.com/tensorflow/tensor2tensor/blob/master/docs/tutorials/asr_with_transformer.md) on tensor2tensor page." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "p9D8OJdFezsJ" + }, + "outputs": [], + "source": [ + "# Copy the pretrained checkpoint locally\n", + "ckpt_name = \"transformer_asr_180214\"\n", + "gs_ckpt = os.path.join(gs_ckpt_dir, ckpt_name)\n", + "print(gs_ckpt)\n", + "!gsutil cp -R {gs_ckpt} {checkpoint_dir} \n", + "ckpt_path = tf.train.latest_checkpoint(os.path.join(checkpoint_dir, ckpt_name))\n", + "ckpt_path" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "arS1sXFPxvde" + }, + "source": [ + "### Define transcribe function" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "od7ZPT3wfkZs" + }, + "outputs": [], + "source": [ + "# Restore and transcribe!\n", + "def transcribe(inputs):\n", + " encoded_inputs = encode(inputs)\n", + " with tfe.restore_variables_on_create(ckpt_path): \n", + " model_output = asr_model.infer(encoded_inputs, beam_size=2, alpha=0.6, decode_length=1)[\"outputs\"]\n", + " return decode(model_output)\n", + "\n", + "def play_and_transcribe(inputs):\n", + " waveforms = encoders[\"waveforms\"].encode(inputs)\n", + " IPython.display.display(IPython.display.Audio(data=waveforms, rate=16000))\n", + " return transcribe(inputs) " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Qz5u2O5LvShm" + }, + "source": [ + "# Decoding prerecorded examples\n", + "\n", + "You can upload any .wav files. They will be transcribed if frame rate matches Librispeeche's frame rate (16kHz)." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "xAstJTeyvXMf" + }, + "outputs": [], + "source": [ + "uploaded = google.colab.files.upload()\n", + "prerecorded_messages = []\n", + "\n", + "for fn in uploaded.keys():\n", + " print('User uploaded file \"{name}\" with length {length} bytes'.format(\n", + " name=fn, length=len(uploaded[fn])))\n", + " mem_file = cStringIO.StringIO(uploaded[fn])\n", + " \n", + " save_filename = os.path.join(tmp_dir, fn)\n", + " with open(save_filename, 'w') as fd:\n", + " mem_file.seek(0)\n", + " shutil.copyfileobj(mem_file, fd)\n", + " prerecorded_messages.append(save_filename)\n", + " \n", + " \n", + "for inputs in prerecorded_messages:\n", + " outputs = play_and_transcribe(inputs)\n", + "\n", + " print(\"Inputs: %s\" % inputs)\n", + " print(\"Outputs: %s\" % outputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "mJvRjlHUrr65" + }, + "source": [ + "# Recording your own examples" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "oirqsdqVoElk" + }, + "outputs": [], + "source": [ + "# Records webm file and converts\n", + "def RecordNewAudioSample(filename=None, webm_filename=None):\n", + " \"\"\"Args:\n", + " filename - string, path for storing wav file\n", + " webm_filename - string, path for storing webm file\n", + " Returns:\n", + " string - path where wav file was saved. (=filename if specified)\n", + " \n", + " \"\"\"\n", + " # Create default filenames in tmp_dir if not specified.\n", + " if not filename:\n", + " filename = os.path.join(tmp_dir, \"recording.wav\")\n", + " if not webm_filename:\n", + " webm_filename = os.path.join(tmp_dir, \"recording.webm\")\n", + " \n", + " # Record webm file form colab.\n", + " \n", + " audio = google.colab._message.blocking_request('user_media', {\"audio\":True, \"video\":False, \"duration\":-1}, timeout_sec=600)\n", + " #audio = frontend.RecordMedia(True, False)\n", + " \n", + " # Convert the recording into in_memory file.\n", + " music_mem_file = cStringIO.StringIO(\n", + " base64.decodestring(audio[audio.index(',')+1:]))\n", + " \n", + " # Store webm recording in webm_filename. Storing is necessary for conversion.\n", + " with open(webm_filename, 'w') as fd:\n", + " music_mem_file.seek(0)\n", + " shutil.copyfileobj(music_mem_file, fd)\n", + " \n", + " # Open stored file and save it as wav with sample_rate=16000.\n", + " pydub.AudioSegment.from_file(webm_filename, codec=\"opus\"\n", + " ).set_frame_rate(16000).export(out_f=filename,\n", + " format=\"wav\")\n", + " return filename" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "90BjliFTCQm9" + }, + "outputs": [], + "source": [ + "# Record the sample\n", + "my_sample_filename = RecordNewAudioSample()\n", + "print my_sample_filename" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "PdBfEik0-pMv" + }, + "outputs": [], + "source": [ + "print play_and_transcribe(my_sample_filename)" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "default_view": {}, + "name": "ASR with Transformer example notebook", + "provenance": [ + { + "file_id": "notebooks/SR_with_Transformer_example_notebook.ipynb", + "timestamp": 1525703542020 + }, + { + "file_id": "1hEMwW8LgaQPLngfka0tbobYB-ZTVqy34", + "timestamp": 1525702247248 + }, + { + "file_id": "1Pp4aSAceJRNpxtSrTevUKpHKudMxHyBF", + "timestamp": 1518630927690 + } + ], + "version": "0.3.2", + "views": {} + }, + "kernelspec": { + "display_name": "Python 2", + "name": "python2" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/tensor2tensor/notebooks/hello_t2t-rl.ipynb b/tensor2tensor/notebooks/hello_t2t-rl.ipynb new file mode 100644 index 000000000..47ea4bf84 --- /dev/null +++ b/tensor2tensor/notebooks/hello_t2t-rl.ipynb @@ -0,0 +1,1890 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "xCLcAmON-m2i", + "colab_type": "text" + }, + "source": [ + "# Tensor2Tensor Reinforcement Learning\n", + "\n", + "The `rl` package provides the ability to run model-free and model-based reinforcement learning algorithms.\n", + "\n", + "Currently, we support the Proximal Policy Optimization ([PPO](https://arxiv.org/abs/1707.06347)) and Simulated Policy Learning ([SimPLe](https://arxiv.org/abs/1903.00374)).\n", + "\n", + "Below you will find examples of PPO training using `trainer_model_free.py` and SimPLe traning using `trainer_model_based.py`.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "id": "RW7gEGp3e87G", + "colab_type": "code", + "colab": {}, + "cellView": "form" + }, + "outputs": [], + "source": [ + "#@title\n", + "# Copyright 2018 Google LLC.\n", + "\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "id": "pq0BqXm4-3gJ", + "colab_type": "code", + "outputId": "6086719f-6268-4b61-8fa3-d251eda24c97", + "executionInfo": { + "status": "ok", + "timestamp": 1.553273826475E12, + "user_tz": -60.0, + "elapsed": 20650.0, + "user": { + "displayName": "Piotr Miłoś", + "photoUrl": "https://lh3.googleusercontent.com/-050ZBEGpNAA/AAAAAAAAAAI/AAAAAAAAk9g/r6cv_J6J5qA/s64/photo.jpg", + "userId": "12158759908531801397" + } + }, + "colab": { + "base_uri": "https://localhost:8080/", + "height": 163.0 + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[K 100% |████████████████████████████████| 1.3MB 9.4MB/s \n", + "\u001b[K 100% |████████████████████████████████| 215kB 27.3MB/s \n", + "\u001b[K 100% |████████████████████████████████| 143kB 29.6MB/s \n", + "\u001b[K 100% |████████████████████████████████| 21.1MB 1.7MB/s \n", + "\u001b[K 100% |████████████████████████████████| 409kB 24.7MB/s \n", + "\u001b[K 100% |████████████████████████████████| 296kB 25.0MB/s \n", + "\u001b[K 100% |████████████████████████████████| 61kB 21.5MB/s \n", + "\u001b[?25h Building wheel for pypng (setup.py) ... \u001b[?25ldone\n", + "\u001b[?25h Building wheel for opt-einsum (setup.py) ... \u001b[?25ldone\n", + "\u001b[?25h" + ] + } + ], + "source": [ + "!pip install -q tensorflow==1.13.1\n", + "!pip install -q tensorflow_probability==0.6.0\n", + "!pip install -q tensor2tensor==1.13.1\n", + "!pip install -q gym[atari]" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "id": "R7-Ni-39DGZW", + "colab_type": "code", + "colab": {} + }, + "outputs": [], + "source": [ + "# Helper function for playing videos in the colab.\n", + "def play_video(path):\n", + " from IPython.core.magics.display import HTML\n", + " display_path = \"/nbextensions/vid.mp4\"\n", + " display_abs_path = \"/usr/local/share/jupyter\" + display_path\n", + " !rm -f $display_abs_path\n", + " !ffmpeg -loglevel error -i $path $display_abs_path\n", + " return HTML(\"\"\"\n", + " \n", + " \"\"\".format(display_path))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pueuiKUmAOUT", + "colab_type": "text" + }, + "source": [ + "# Play using a pre-trained policy\n", + "\n", + "We provide pretrained policies for the following games from the Atari Learning Environment ( [ALE](https://github.com/mgbellemare/Arcade-Learning-Environment)) : alien,\n", + "amidar,\n", + " assault,\n", + " asterix,\n", + " asteroids,\n", + " atlantis,\n", + " bank_heist,\n", + " battle_zone,\n", + " beam_rider,\n", + " bowling,\n", + " boxing,\n", + " breakout,\n", + " chopper_command,\n", + " crazy_climber,\n", + " demon_attack,\n", + " fishing_derby,\n", + " freeway,\n", + " frostbite,\n", + " gopher,\n", + " gravitar,\n", + " hero,\n", + " ice_hockey,\n", + " jamesbond,\n", + " kangaroo,\n", + " krull,\n", + " kung_fu_master,\n", + " ms_pacman,\n", + " name_this_game,\n", + " pong,\n", + " private_eye,\n", + " qbert,\n", + " riverraid,\n", + " road_runner,\n", + " seaquest,\n", + " up_n_down,\n", + " yars_revenge.\n", + " \n", + " We have 5 checkpoints for each game saved on Google Storage. Run the following command get the storage path:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "id": "x9pKfNbDFfVh", + "colab_type": "code", + "outputId": "97e763cc-caaa-49c8-e532-fcbde828d1a2", + "executionInfo": { + "status": "ok", + "timestamp": 1.5532741511E12, + "user_tz": -60.0, + "elapsed": 6162.0, + "user": { + "displayName": "Piotr Miłoś", + "photoUrl": "https://lh3.googleusercontent.com/-050ZBEGpNAA/AAAAAAAAAAI/AAAAAAAAk9g/r6cv_J6J5qA/s64/photo.jpg", + "userId": "12158759908531801397" + } + }, + "colab": { + "base_uri": "https://localhost:8080/", + "height": 147.0 + }, + "collapsed": false + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\n", + "For more information, please see:\n", + " * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n", + " * https://github.com/tensorflow/addons\n", + "If you depend on functionality not listed there, please file an issue.\n", + "\n" + ] + }, + { + "data": { + "text/plain": [ + "'gs://tensor2tensor-checkpoints/modelrl_experiments/train_sd/143'" + ] + }, + "execution_count": 4, + "metadata": { + "tags": [] + }, + "output_type": "execute_result" + } + ], + "source": [ + "# experiment_id is an integer from [0, 4].\n", + "def get_run_dir(game, experiment_id):\n", + " from tensor2tensor.data_generators.gym_env import ATARI_GAMES_WITH_HUMAN_SCORE_NICE\n", + " EXPERIMENTS_PER_GAME = 5\n", + " run_id = ATARI_GAMES_WITH_HUMAN_SCORE_NICE.index(game) * EXPERIMENTS_PER_GAME + experiment_id + 1\n", + " return \"gs://tensor2tensor-checkpoints/modelrl_experiments/train_sd/{}\".format(run_id)\n", + "\n", + "get_run_dir('pong', 2)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "77fFdm-cFEOB", + "colab_type": "text" + }, + "source": [ + "To evaluate and generate videos for a pretrained policy on Pong:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "id": "X-nGlbuTAQXj", + "colab_type": "code", + "outputId": "888968f2-f551-4a0f-9fc7-074a949362d6", + "executionInfo": { + "status": "ok", + "timestamp": 1.553271580737E12, + "user_tz": -60.0, + "elapsed": 842128.0, + "user": { + "displayName": "Piotr Kozakowski", + "photoUrl": "", + "userId": "01014928596539690143" + } + }, + "colab": { + "base_uri": "https://localhost:8080/", + "height": 17088.0 + }, + "collapsed": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\n", + "For more information, please see:\n", + " * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n", + " * https://github.com/tensorflow/addons\n", + "If you depend on functionality not listed there, please file an issue.\n", + "\n", + "INFO:tensorflow:Overriding hparams in rlmb_long_stochastic_discrete with game=pong,eval_max_num_noops=8,eval_sampling_temps=[0.5]\n", + "INFO:tensorflow:Evaluating metric mean_reward/eval/sampling_temp_0.5_max_noops_8_unclipped\n", + "2019-03-22 16:05:45.007030: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 2300000000 Hz\n", + "2019-03-22 16:05:45.007306: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x2697860 executing computations on platform Host. Devices:\n", + "2019-03-22 16:05:45.007346: I tensorflow/compiler/xla/service/service.cc:158] StreamExecutor device (0): , \n", + "2019-03-22 16:05:45.105281: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", + "2019-03-22 16:05:45.105857: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x2697440 executing computations on platform CUDA. Devices:\n", + "2019-03-22 16:05:45.105908: I tensorflow/compiler/xla/service/service.cc:158] StreamExecutor device (0): Tesla K80, Compute Capability 3.7\n", + "2019-03-22 16:05:45.106380: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1433] Found device 0 with properties: \n", + "name: Tesla K80 major: 3 minor: 7 memoryClockRate(GHz): 0.8235\n", + "pciBusID: 0000:00:04.0\n", + "totalMemory: 11.17GiB freeMemory: 11.10GiB\n", + "2019-03-22 16:05:45.106420: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0\n", + "2019-03-22 16:05:45.499212: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix:\n", + "2019-03-22 16:05:45.499307: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 0 \n", + "2019-03-22 16:05:45.499332: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0: N \n", + "2019-03-22 16:05:45.499671: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:42] Overriding allow_growth setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.\n", + "2019-03-22 16:05:45.499741: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 10754 MB memory) -> physical GPU (device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7)\n", + "INFO:tensorflow:Using DummyPolicyProblem for the policy.\n", + "INFO:tensorflow:Setting T2TModel mode to 'train'\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Colocations handled automatically by placer.\n", + "INFO:tensorflow:Using variable initializer: orthogonal\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/utils/t2t_model.py:1358: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.cast instead.\n", + "INFO:tensorflow:Transforming feature 'input_action' with symbol_modality_6_64.bottom\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/function.py:1007: calling Graph.create_op (from tensorflow.python.framework.ops) with compute_shapes is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Shapes are always computed; don't use the compute_shapes as it has no effect.\n", + "INFO:tensorflow:Transforming feature 'input_reward' with symbol_modality_3_64.bottom\n", + "INFO:tensorflow:Transforming feature 'inputs' with video_modality.bottom\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/layers/common_video.py:495: py_func (from tensorflow.python.ops.script_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "tf.py_func is deprecated in TF V2. Instead, use\n", + " tf.py_function, which takes a python function which manipulates tf eager\n", + " tensors instead of numpy arrays. It's easy to convert a tf eager tensor to\n", + " an ndarray (just call tensor.numpy()) but having access to eager tensors\n", + " means `tf.py_function`s can use accelerators such as GPUs as well as\n", + " being differentiable using a gradient tape.\n", + " \n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/layers/common_layers.py:277: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.cast instead.\n", + "INFO:tensorflow:Transforming feature 'target_action' with symbol_modality_6_64.targets_bottom\n", + "INFO:tensorflow:Transforming feature 'target_policy' with identity_modality.targets_bottom\n", + "INFO:tensorflow:Transforming feature 'target_reward' with symbol_modality_3_64.targets_bottom\n", + "INFO:tensorflow:Transforming feature 'target_value' with identity_modality.targets_bottom\n", + "INFO:tensorflow:Transforming feature 'targets' with video_modality.targets_bottom\n", + "INFO:tensorflow:Building model body\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/models/research/rl.py:598: conv2d (from tensorflow.python.layers.convolutional) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use keras.layers.conv2d instead.\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/models/research/rl.py:602: flatten (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use keras.layers.flatten instead.\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/models/research/rl.py:603: dropout (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use keras.layers.dropout instead.\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/models/research/rl.py:604: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use keras.layers.dense instead.\n", + "INFO:tensorflow:Transforming body output with identity_modality.top\n", + "INFO:tensorflow:Transforming body output with identity_modality.top\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/layers/common_layers.py:2887: multinomial (from tensorflow.python.ops.random_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.random.categorical instead.\n", + "2019-03-22 16:06:00.352605: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0\n", + "2019-03-22 16:06:00.352688: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix:\n", + "2019-03-22 16:06:00.352724: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 0 \n", + "2019-03-22 16:06:00.352744: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0: N \n", + "2019-03-22 16:06:00.353037: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 10754 MB memory) -> physical GPU (device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7)\n", + "2019-03-22 16:06:00.588787: W tensorflow/core/platform/cloud/google_auth_provider.cc:178] All attempts to get a Google authentication bearer token failed, returning an empty token. Retrieving token from files failed with \"Not found: Could not locate the credentials file.\". Retrieving token from GCE failed with \"Cancelled: GCE check skipped due to presence of $NO_GCE_CHECK environment variable.\".\n", + "2019-03-22 16:06:00.647797: W tensorflow/core/platform/cloud/google_auth_provider.cc:178] All attempts to get a Google authentication bearer token failed, returning an empty token. Retrieving token from files failed with \"Not found: Could not locate the credentials file.\". Retrieving token from GCE failed with \"Cancelled: GCE check skipped due to presence of $NO_GCE_CHECK environment variable.\".\n", + "INFO:tensorflow:Restoring checkpoint gs://tensor2tensor-checkpoints/modelrl_experiments/train_sd/142/policy/model.ckpt-171992\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/saver.py:1266: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use standard file APIs to check for files with this prefix.\n", + "2019-03-22 16:06:00.711910: W tensorflow/core/platform/cloud/google_auth_provider.cc:178] All attempts to get a Google authentication bearer token failed, returning an empty token. Retrieving token from files failed with \"Not found: Could not locate the credentials file.\". Retrieving token from GCE failed with \"Cancelled: GCE check skipped due to presence of $NO_GCE_CHECK environment variable.\".\n", + "INFO:tensorflow:Restoring parameters from gs://tensor2tensor-checkpoints/modelrl_experiments/train_sd/142/policy/model.ckpt-171992\n", + "2019-03-22 16:06:00.793701: W tensorflow/core/platform/cloud/google_auth_provider.cc:178] All attempts to get a Google authentication bearer token failed, returning an empty token. Retrieving token from files failed with \"Not found: Could not locate the credentials file.\". Retrieving token from GCE failed with \"Cancelled: GCE check skipped due to presence of $NO_GCE_CHECK environment variable.\".\n", + "2019-03-22 16:06:00.953239: W tensorflow/core/platform/cloud/google_auth_provider.cc:178] All attempts to get a Google authentication bearer token failed, returning an empty token. Retrieving token from files failed with \"Not found: Could not locate the credentials file.\". Retrieving token from GCE failed with \"Cancelled: GCE check skipped due to presence of $NO_GCE_CHECK environment variable.\".\n", + "2019-03-22 16:06:01.086594: W tensorflow/core/platform/cloud/google_auth_provider.cc:178] All attempts to get a Google authentication bearer token failed, returning an empty token. Retrieving token from files failed with \"Not found: Could not locate the credentials file.\". Retrieving token from GCE failed with \"Cancelled: GCE check skipped due to presence of $NO_GCE_CHECK environment variable.\".\n", + "2019-03-22 16:06:01.259521: W tensorflow/core/platform/cloud/google_auth_provider.cc:178] All attempts to get a Google authentication bearer token failed, returning an empty token. Retrieving token from files failed with \"Not found: Could not locate the credentials file.\". Retrieving token from GCE failed with \"Cancelled: GCE check skipped due to presence of $NO_GCE_CHECK environment variable.\".\n", + "2019-03-22 16:06:01.322896: W tensorflow/core/platform/cloud/google_auth_provider.cc:178] All attempts to get a Google authentication bearer token failed, returning an empty token. Retrieving token from files failed with \"Not found: Could not locate the credentials file.\". Retrieving token from GCE failed with \"Cancelled: GCE check skipped due to presence of $NO_GCE_CHECK environment variable.\".\n", + "2019-03-22 16:06:03.034751: I tensorflow/stream_executor/dso_loader.cc:152] successfully opened CUDA library libcublas.so.10.0 locally\n", + "INFO:tensorflow:Step 5, mean_score: 0.000000\n", + "INFO:tensorflow:Step 10, mean_score: 0.000000\n", + "INFO:tensorflow:Step 15, mean_score: 0.000000\n", + "INFO:tensorflow:Step 20, mean_score: 0.000000\n", + "INFO:tensorflow:Step 25, mean_score: 0.000000\n", + "INFO:tensorflow:Step 30, mean_score: 0.000000\n", + "INFO:tensorflow:Step 35, mean_score: 0.000000\n", + "INFO:tensorflow:Step 40, mean_score: 0.000000\n", + "INFO:tensorflow:Step 45, mean_score: 0.000000\n", + "INFO:tensorflow:Step 50, mean_score: 0.000000\n", + "INFO:tensorflow:Step 55, mean_score: 0.000000\n", + "INFO:tensorflow:Step 60, mean_score: -0.015625\n", + "INFO:tensorflow:Step 65, mean_score: -0.078125\n", + "INFO:tensorflow:Step 70, mean_score: -0.078125\n", + "INFO:tensorflow:Step 75, mean_score: -0.078125\n", + "INFO:tensorflow:Step 80, mean_score: -0.078125\n", + "INFO:tensorflow:Step 85, mean_score: -0.078125\n", + "INFO:tensorflow:Step 90, mean_score: 0.484375\n", + "INFO:tensorflow:Step 95, mean_score: 0.843750\n", + "INFO:tensorflow:Step 100, mean_score: 0.828125\n", + "INFO:tensorflow:Step 105, mean_score: 0.828125\n", + "INFO:tensorflow:Step 110, mean_score: 0.828125\n", + "INFO:tensorflow:Step 115, mean_score: 0.828125\n", + "INFO:tensorflow:Step 120, mean_score: 0.828125\n", + "INFO:tensorflow:Step 125, mean_score: 0.828125\n", + "INFO:tensorflow:Step 130, mean_score: 0.812500\n", + "INFO:tensorflow:Step 135, mean_score: 0.812500\n", + "INFO:tensorflow:Step 140, mean_score: 0.812500\n", + "INFO:tensorflow:Step 145, mean_score: 0.812500\n", + "INFO:tensorflow:Step 150, mean_score: 0.812500\n", + "INFO:tensorflow:Step 155, mean_score: 0.812500\n", + "INFO:tensorflow:Step 160, mean_score: 0.812500\n", + "INFO:tensorflow:Step 165, mean_score: 0.812500\n", + "INFO:tensorflow:Step 170, mean_score: 0.828125\n", + "INFO:tensorflow:Step 175, mean_score: 0.843750\n", + "INFO:tensorflow:Step 180, mean_score: 0.843750\n", + "INFO:tensorflow:Step 185, mean_score: 0.843750\n", + "INFO:tensorflow:Step 190, mean_score: 1.140625\n", + "INFO:tensorflow:Step 195, mean_score: 1.765625\n", + "INFO:tensorflow:Step 200, mean_score: 1.765625\n", + "INFO:tensorflow:Step 205, mean_score: 1.765625\n", + "INFO:tensorflow:Step 210, mean_score: 1.781250\n", + "INFO:tensorflow:Step 215, mean_score: 1.781250\n", + "INFO:tensorflow:Step 220, mean_score: 1.765625\n", + "INFO:tensorflow:Step 225, mean_score: 1.765625\n", + "INFO:tensorflow:Step 230, mean_score: 1.765625\n", + "INFO:tensorflow:Step 235, mean_score: 1.765625\n", + "INFO:tensorflow:Step 240, mean_score: 1.765625\n", + "INFO:tensorflow:Step 245, mean_score: 1.765625\n", + "INFO:tensorflow:Step 250, mean_score: 1.765625\n", + "INFO:tensorflow:Step 255, mean_score: 1.750000\n", + "INFO:tensorflow:Step 260, mean_score: 1.750000\n", + "INFO:tensorflow:Step 265, mean_score: 1.750000\n", + "INFO:tensorflow:Step 270, mean_score: 2.312500\n", + "INFO:tensorflow:Step 275, mean_score: 2.687500\n", + "INFO:tensorflow:Step 280, mean_score: 2.703125\n", + "INFO:tensorflow:Step 285, mean_score: 2.703125\n", + "INFO:tensorflow:Step 290, mean_score: 2.703125\n", + "INFO:tensorflow:Step 295, mean_score: 2.703125\n", + "INFO:tensorflow:Step 300, mean_score: 2.703125\n", + "INFO:tensorflow:Step 305, mean_score: 2.703125\n", + "INFO:tensorflow:Step 310, mean_score: 2.718750\n", + "INFO:tensorflow:Step 315, mean_score: 2.718750\n", + "INFO:tensorflow:Step 320, mean_score: 2.718750\n", + "INFO:tensorflow:Step 325, mean_score: 2.718750\n", + "INFO:tensorflow:Step 330, mean_score: 2.734375\n", + "INFO:tensorflow:Step 335, mean_score: 2.734375\n", + "INFO:tensorflow:Step 340, mean_score: 2.734375\n", + "INFO:tensorflow:Step 345, mean_score: 2.734375\n", + "INFO:tensorflow:Step 350, mean_score: 2.750000\n", + "INFO:tensorflow:Step 355, mean_score: 2.765625\n", + "INFO:tensorflow:Step 360, mean_score: 2.765625\n", + "INFO:tensorflow:Step 365, mean_score: 2.765625\n", + "INFO:tensorflow:Step 370, mean_score: 3.062500\n", + "INFO:tensorflow:Step 375, mean_score: 3.687500\n", + "INFO:tensorflow:Step 380, mean_score: 3.687500\n", + "INFO:tensorflow:Step 385, mean_score: 3.687500\n", + "INFO:tensorflow:Step 390, mean_score: 3.703125\n", + "INFO:tensorflow:Step 395, mean_score: 3.703125\n", + "INFO:tensorflow:Step 400, mean_score: 3.703125\n", + "INFO:tensorflow:Step 405, mean_score: 3.703125\n", + "INFO:tensorflow:Step 410, mean_score: 3.687500\n", + "INFO:tensorflow:Step 415, mean_score: 3.687500\n", + "INFO:tensorflow:Step 420, mean_score: 3.687500\n", + "INFO:tensorflow:Step 425, mean_score: 3.687500\n", + "INFO:tensorflow:Step 430, mean_score: 3.703125\n", + "INFO:tensorflow:Step 435, mean_score: 3.703125\n", + "INFO:tensorflow:Step 440, mean_score: 3.703125\n", + "INFO:tensorflow:Step 445, mean_score: 3.703125\n", + "INFO:tensorflow:Step 450, mean_score: 4.265625\n", + "INFO:tensorflow:Step 455, mean_score: 4.640625\n", + "INFO:tensorflow:Step 460, mean_score: 4.656250\n", + "INFO:tensorflow:Step 465, mean_score: 4.656250\n", + "INFO:tensorflow:Step 470, mean_score: 4.656250\n", + "INFO:tensorflow:Step 475, mean_score: 4.656250\n", + "INFO:tensorflow:Step 480, mean_score: 4.656250\n", + "INFO:tensorflow:Step 485, mean_score: 4.656250\n", + "INFO:tensorflow:Step 490, mean_score: 4.671875\n", + "INFO:tensorflow:Step 495, mean_score: 4.671875\n", + "INFO:tensorflow:Step 500, mean_score: 4.671875\n", + "INFO:tensorflow:Step 505, mean_score: 4.671875\n", + "INFO:tensorflow:Step 510, mean_score: 4.687500\n", + "INFO:tensorflow:Step 515, mean_score: 4.687500\n", + "INFO:tensorflow:Step 520, mean_score: 4.703125\n", + "INFO:tensorflow:Step 525, mean_score: 4.703125\n", + "INFO:tensorflow:Step 530, mean_score: 4.718750\n", + "INFO:tensorflow:Step 535, mean_score: 4.734375\n", + "INFO:tensorflow:Step 540, mean_score: 4.734375\n", + "INFO:tensorflow:Step 545, mean_score: 4.734375\n", + "INFO:tensorflow:Step 550, mean_score: 5.031250\n", + "INFO:tensorflow:Step 555, mean_score: 5.656250\n", + "INFO:tensorflow:Step 560, mean_score: 5.656250\n", + "INFO:tensorflow:Step 565, mean_score: 5.656250\n", + "INFO:tensorflow:Step 570, mean_score: 5.671875\n", + "INFO:tensorflow:Step 575, mean_score: 5.671875\n", + "INFO:tensorflow:Step 580, mean_score: 5.671875\n", + "INFO:tensorflow:Step 585, mean_score: 5.671875\n", + "INFO:tensorflow:Step 590, mean_score: 5.671875\n", + "INFO:tensorflow:Step 595, mean_score: 5.671875\n", + "INFO:tensorflow:Step 600, mean_score: 5.671875\n", + "INFO:tensorflow:Step 605, mean_score: 5.671875\n", + "INFO:tensorflow:Step 610, mean_score: 5.687500\n", + "INFO:tensorflow:Step 615, mean_score: 5.687500\n", + "INFO:tensorflow:Step 620, mean_score: 5.703125\n", + "INFO:tensorflow:Step 625, mean_score: 5.703125\n", + "INFO:tensorflow:Step 630, mean_score: 6.265625\n", + "INFO:tensorflow:Step 635, mean_score: 6.640625\n", + "INFO:tensorflow:Step 640, mean_score: 6.656250\n", + "INFO:tensorflow:Step 645, mean_score: 6.656250\n", + "INFO:tensorflow:Step 650, mean_score: 6.656250\n", + "INFO:tensorflow:Step 655, mean_score: 6.656250\n", + "INFO:tensorflow:Step 660, mean_score: 6.656250\n", + "INFO:tensorflow:Step 665, mean_score: 6.656250\n", + "INFO:tensorflow:Step 670, mean_score: 6.671875\n", + "INFO:tensorflow:Step 675, mean_score: 6.671875\n", + "INFO:tensorflow:Step 680, mean_score: 6.671875\n", + "INFO:tensorflow:Step 685, mean_score: 6.671875\n", + "INFO:tensorflow:Step 690, mean_score: 6.687500\n", + "INFO:tensorflow:Step 695, mean_score: 6.687500\n", + "INFO:tensorflow:Step 700, mean_score: 6.703125\n", + "INFO:tensorflow:Step 705, mean_score: 6.703125\n", + "INFO:tensorflow:Step 710, mean_score: 6.718750\n", + "INFO:tensorflow:Step 715, mean_score: 6.734375\n", + "INFO:tensorflow:Step 720, mean_score: 6.734375\n", + "INFO:tensorflow:Step 725, mean_score: 6.734375\n", + "INFO:tensorflow:Step 730, mean_score: 7.031250\n", + "INFO:tensorflow:Step 735, mean_score: 7.656250\n", + "INFO:tensorflow:Step 740, mean_score: 7.656250\n", + "INFO:tensorflow:Step 745, mean_score: 7.656250\n", + "INFO:tensorflow:Step 750, mean_score: 7.671875\n", + "INFO:tensorflow:Step 755, mean_score: 7.671875\n", + "INFO:tensorflow:Step 760, mean_score: 7.671875\n", + "INFO:tensorflow:Step 765, mean_score: 7.671875\n", + "INFO:tensorflow:Step 770, mean_score: 7.671875\n", + "INFO:tensorflow:Step 775, mean_score: 7.671875\n", + "INFO:tensorflow:Step 780, mean_score: 7.671875\n", + "INFO:tensorflow:Step 785, mean_score: 7.671875\n", + "INFO:tensorflow:Step 790, mean_score: 7.687500\n", + "INFO:tensorflow:Step 795, mean_score: 7.687500\n", + "INFO:tensorflow:Step 800, mean_score: 7.703125\n", + "INFO:tensorflow:Step 805, mean_score: 7.703125\n", + "INFO:tensorflow:Step 810, mean_score: 8.265625\n", + "INFO:tensorflow:Step 815, mean_score: 8.640625\n", + "INFO:tensorflow:Step 820, mean_score: 8.656250\n", + "INFO:tensorflow:Step 825, mean_score: 8.656250\n", + "INFO:tensorflow:Step 830, mean_score: 8.656250\n", + "INFO:tensorflow:Step 835, mean_score: 8.656250\n", + "INFO:tensorflow:Step 840, mean_score: 8.656250\n", + "INFO:tensorflow:Step 845, mean_score: 8.656250\n", + "INFO:tensorflow:Step 850, mean_score: 8.671875\n", + "INFO:tensorflow:Step 855, mean_score: 8.671875\n", + "INFO:tensorflow:Step 860, mean_score: 8.671875\n", + "INFO:tensorflow:Step 865, mean_score: 8.671875\n", + "INFO:tensorflow:Step 870, mean_score: 8.687500\n", + "INFO:tensorflow:Step 875, mean_score: 8.687500\n", + "INFO:tensorflow:Step 880, mean_score: 8.703125\n", + "INFO:tensorflow:Step 885, mean_score: 8.703125\n", + "INFO:tensorflow:Step 890, mean_score: 8.718750\n", + "INFO:tensorflow:Step 895, mean_score: 8.734375\n", + "INFO:tensorflow:Step 900, mean_score: 8.734375\n", + "INFO:tensorflow:Step 905, mean_score: 8.734375\n", + "INFO:tensorflow:Step 910, mean_score: 9.031250\n", + "INFO:tensorflow:Step 915, mean_score: 9.656250\n", + "INFO:tensorflow:Step 920, mean_score: 9.656250\n", + "INFO:tensorflow:Step 925, mean_score: 9.656250\n", + "INFO:tensorflow:Step 930, mean_score: 9.671875\n", + "INFO:tensorflow:Step 935, mean_score: 9.671875\n", + "INFO:tensorflow:Step 940, mean_score: 9.671875\n", + "INFO:tensorflow:Step 945, mean_score: 9.671875\n", + "INFO:tensorflow:Step 950, mean_score: 9.671875\n", + "INFO:tensorflow:Step 955, mean_score: 9.671875\n", + "INFO:tensorflow:Step 960, mean_score: 9.671875\n", + "INFO:tensorflow:Step 965, mean_score: 9.671875\n", + "INFO:tensorflow:Step 970, mean_score: 9.687500\n", + "INFO:tensorflow:Step 975, mean_score: 9.687500\n", + "INFO:tensorflow:Step 980, mean_score: 9.703125\n", + "INFO:tensorflow:Step 985, mean_score: 9.703125\n", + "INFO:tensorflow:Step 990, mean_score: 10.265625\n", + "INFO:tensorflow:Step 995, mean_score: 10.640625\n", + "INFO:tensorflow:Step 1000, mean_score: 10.656250\n", + "INFO:tensorflow:Step 1005, mean_score: 10.656250\n", + "INFO:tensorflow:Step 1010, mean_score: 10.656250\n", + "INFO:tensorflow:Step 1015, mean_score: 10.656250\n", + "INFO:tensorflow:Step 1020, mean_score: 10.656250\n", + "INFO:tensorflow:Step 1025, mean_score: 10.656250\n", + "INFO:tensorflow:Step 1030, mean_score: 10.671875\n", + "INFO:tensorflow:Step 1035, mean_score: 10.671875\n", + "INFO:tensorflow:Step 1040, mean_score: 10.671875\n", + "INFO:tensorflow:Step 1045, mean_score: 10.671875\n", + "INFO:tensorflow:Step 1050, mean_score: 10.687500\n", + "INFO:tensorflow:Step 1055, mean_score: 10.687500\n", + "INFO:tensorflow:Step 1060, mean_score: 10.703125\n", + "INFO:tensorflow:Step 1065, mean_score: 10.703125\n", + "INFO:tensorflow:Step 1070, mean_score: 10.718750\n", + "INFO:tensorflow:Step 1075, mean_score: 10.734375\n", + "INFO:tensorflow:Step 1080, mean_score: 10.734375\n", + "INFO:tensorflow:Step 1085, mean_score: 10.734375\n", + "INFO:tensorflow:Step 1090, mean_score: 11.031250\n", + "INFO:tensorflow:Step 1095, mean_score: 11.656250\n", + "INFO:tensorflow:Step 1100, mean_score: 11.656250\n", + "INFO:tensorflow:Step 1105, mean_score: 11.656250\n", + "INFO:tensorflow:Step 1110, mean_score: 11.671875\n", + "INFO:tensorflow:Step 1115, mean_score: 11.671875\n", + "INFO:tensorflow:Step 1120, mean_score: 11.671875\n", + "INFO:tensorflow:Step 1125, mean_score: 11.671875\n", + "INFO:tensorflow:Step 1130, mean_score: 11.671875\n", + "INFO:tensorflow:Step 1135, mean_score: 11.671875\n", + "INFO:tensorflow:Step 1140, mean_score: 11.671875\n", + "INFO:tensorflow:Step 1145, mean_score: 11.671875\n", + "INFO:tensorflow:Step 1150, mean_score: 11.687500\n", + "INFO:tensorflow:Step 1155, mean_score: 11.687500\n", + "INFO:tensorflow:Step 1160, mean_score: 11.703125\n", + "INFO:tensorflow:Step 1165, mean_score: 11.703125\n", + "INFO:tensorflow:Step 1170, mean_score: 12.265625\n", + "INFO:tensorflow:Step 1175, mean_score: 12.640625\n", + "INFO:tensorflow:Step 1180, mean_score: 12.656250\n", + "INFO:tensorflow:Step 1185, mean_score: 12.656250\n", + "INFO:tensorflow:Step 1190, mean_score: 12.656250\n", + "INFO:tensorflow:Step 1195, mean_score: 12.656250\n", + "INFO:tensorflow:Step 1200, mean_score: 12.656250\n", + "INFO:tensorflow:Step 1205, mean_score: 12.656250\n", + "INFO:tensorflow:Step 1210, mean_score: 12.671875\n", + "INFO:tensorflow:Step 1215, mean_score: 12.671875\n", + "INFO:tensorflow:Step 1220, mean_score: 12.671875\n", + "INFO:tensorflow:Step 1225, mean_score: 12.671875\n", + "INFO:tensorflow:Step 1230, mean_score: 12.687500\n", + "INFO:tensorflow:Step 1235, mean_score: 12.687500\n", + "INFO:tensorflow:Step 1240, mean_score: 12.703125\n", + "INFO:tensorflow:Step 1245, mean_score: 12.703125\n", + "INFO:tensorflow:Step 1250, mean_score: 12.718750\n", + "INFO:tensorflow:Step 1255, mean_score: 12.734375\n", + "INFO:tensorflow:Step 1260, mean_score: 12.734375\n", + "INFO:tensorflow:Step 1265, mean_score: 12.734375\n", + "INFO:tensorflow:Step 1270, mean_score: 13.031250\n", + "INFO:tensorflow:Step 1275, mean_score: 13.656250\n", + "INFO:tensorflow:Step 1280, mean_score: 13.656250\n", + "INFO:tensorflow:Step 1285, mean_score: 13.656250\n", + "INFO:tensorflow:Step 1290, mean_score: 13.671875\n", + "INFO:tensorflow:Step 1295, mean_score: 13.671875\n", + "INFO:tensorflow:Step 1300, mean_score: 13.671875\n", + "INFO:tensorflow:Step 1305, mean_score: 13.671875\n", + "INFO:tensorflow:Step 1310, mean_score: 13.671875\n", + "INFO:tensorflow:Step 1315, mean_score: 13.671875\n", + "INFO:tensorflow:Step 1320, mean_score: 13.671875\n", + "INFO:tensorflow:Step 1325, mean_score: 13.671875\n", + "INFO:tensorflow:Step 1330, mean_score: 13.687500\n", + "INFO:tensorflow:Step 1335, mean_score: 13.687500\n", + "INFO:tensorflow:Step 1340, mean_score: 13.703125\n", + "INFO:tensorflow:Step 1345, mean_score: 13.703125\n", + "INFO:tensorflow:Step 1350, mean_score: 14.265625\n", + "INFO:tensorflow:Step 1355, mean_score: 14.640625\n", + "INFO:tensorflow:Step 1360, mean_score: 14.656250\n", + "INFO:tensorflow:Step 1365, mean_score: 14.656250\n", + "INFO:tensorflow:Step 1370, mean_score: 14.656250\n", + "INFO:tensorflow:Step 1375, mean_score: 14.656250\n", + "INFO:tensorflow:Step 1380, mean_score: 14.656250\n", + "INFO:tensorflow:Step 1385, mean_score: 14.656250\n", + "INFO:tensorflow:Step 1390, mean_score: 14.671875\n", + "INFO:tensorflow:Step 1395, mean_score: 14.671875\n", + "INFO:tensorflow:Step 1400, mean_score: 14.671875\n", + "INFO:tensorflow:Step 1405, mean_score: 14.671875\n", + "INFO:tensorflow:Step 1410, mean_score: 14.687500\n", + "INFO:tensorflow:Step 1415, mean_score: 14.687500\n", + "INFO:tensorflow:Step 1420, mean_score: 14.703125\n", + "INFO:tensorflow:Step 1425, mean_score: 14.703125\n", + "INFO:tensorflow:Step 1430, mean_score: 14.718750\n", + "INFO:tensorflow:Step 1435, mean_score: 14.734375\n", + "INFO:tensorflow:Step 1440, mean_score: 14.734375\n", + "INFO:tensorflow:Step 1445, mean_score: 14.734375\n", + "INFO:tensorflow:Step 1450, mean_score: 15.031250\n", + "INFO:tensorflow:Step 1455, mean_score: 15.656250\n", + "INFO:tensorflow:Step 1460, mean_score: 15.656250\n", + "INFO:tensorflow:Step 1465, mean_score: 15.656250\n", + "INFO:tensorflow:Step 1470, mean_score: 15.671875\n", + "INFO:tensorflow:Step 1475, mean_score: 15.671875\n", + "INFO:tensorflow:Step 1480, mean_score: 15.671875\n", + "INFO:tensorflow:Step 1485, mean_score: 15.671875\n", + "INFO:tensorflow:Step 1490, mean_score: 15.671875\n", + "INFO:tensorflow:Step 1495, mean_score: 15.671875\n", + "INFO:tensorflow:Step 1500, mean_score: 15.671875\n", + "INFO:tensorflow:Step 1505, mean_score: 15.671875\n", + "INFO:tensorflow:Step 1510, mean_score: 15.687500\n", + "INFO:tensorflow:Step 1515, mean_score: 15.687500\n", + "INFO:tensorflow:Step 1520, mean_score: 15.703125\n", + "INFO:tensorflow:Step 1525, mean_score: 15.703125\n", + "INFO:tensorflow:Step 1530, mean_score: 16.265625\n", + "INFO:tensorflow:Step 1535, mean_score: 16.640625\n", + "INFO:tensorflow:Step 1540, mean_score: 16.656250\n", + "INFO:tensorflow:Step 1545, mean_score: 16.656250\n", + "INFO:tensorflow:Step 1550, mean_score: 16.656250\n", + "INFO:tensorflow:Step 1555, mean_score: 16.656250\n", + "INFO:tensorflow:Step 1560, mean_score: 16.656250\n", + "INFO:tensorflow:Step 1565, mean_score: 16.656250\n", + "INFO:tensorflow:Step 1570, mean_score: 16.671875\n", + "INFO:tensorflow:Step 1575, mean_score: 16.671875\n", + "INFO:tensorflow:Step 1580, mean_score: 16.671875\n", + "INFO:tensorflow:Step 1585, mean_score: 16.671875\n", + "INFO:tensorflow:Step 1590, mean_score: 16.687500\n", + "INFO:tensorflow:Step 1595, mean_score: 16.687500\n", + "INFO:tensorflow:Step 1600, mean_score: 16.703125\n", + "INFO:tensorflow:Step 1605, mean_score: 16.703125\n", + "INFO:tensorflow:Step 1610, mean_score: 16.718750\n", + "INFO:tensorflow:Step 1615, mean_score: 16.734375\n", + "INFO:tensorflow:Step 1620, mean_score: 16.734375\n", + "INFO:tensorflow:Step 1625, mean_score: 16.734375\n", + "INFO:tensorflow:Step 1630, mean_score: 17.031250\n", + "INFO:tensorflow:Step 1635, mean_score: 17.656250\n", + "INFO:tensorflow:Step 1640, mean_score: 17.656250\n", + "INFO:tensorflow:Step 1645, mean_score: 17.656250\n", + "INFO:tensorflow:Step 1650, mean_score: 17.671875\n", + "INFO:tensorflow:Step 1655, mean_score: 17.671875\n", + "INFO:tensorflow:Step 1660, mean_score: 17.671875\n", + "INFO:tensorflow:Step 1665, mean_score: 17.671875\n", + "INFO:tensorflow:Step 1670, mean_score: 17.671875\n", + "INFO:tensorflow:Step 1675, mean_score: 17.671875\n", + "INFO:tensorflow:Step 1680, mean_score: 17.671875\n", + "INFO:tensorflow:Step 1685, mean_score: 17.671875\n", + "INFO:tensorflow:Step 1690, mean_score: 17.687500\n", + "INFO:tensorflow:Step 1695, mean_score: 17.687500\n", + "INFO:tensorflow:Step 1700, mean_score: 17.703125\n", + "INFO:tensorflow:Step 1705, mean_score: 17.703125\n", + "INFO:tensorflow:Step 1710, mean_score: 18.265625\n", + "INFO:tensorflow:Step 1715, mean_score: 18.640625\n", + "INFO:tensorflow:Step 1720, mean_score: 18.656250\n", + "INFO:tensorflow:Step 1725, mean_score: 18.656250\n", + "INFO:tensorflow:Step 1730, mean_score: 18.656250\n", + "INFO:tensorflow:Step 1735, mean_score: 18.656250\n", + "INFO:tensorflow:Step 1740, mean_score: 18.656250\n", + "INFO:tensorflow:Step 1745, mean_score: 18.656250\n", + "INFO:tensorflow:Step 1750, mean_score: 18.671875\n", + "INFO:tensorflow:Step 1755, mean_score: 18.671875\n", + "INFO:tensorflow:Step 1760, mean_score: 18.671875\n", + "INFO:tensorflow:Step 1765, mean_score: 18.671875\n", + "INFO:tensorflow:Step 1770, mean_score: 18.687500\n", + "INFO:tensorflow:Step 1775, mean_score: 18.687500\n", + "INFO:tensorflow:Step 1780, mean_score: 18.703125\n", + "INFO:tensorflow:Step 1785, mean_score: 18.703125\n", + "INFO:tensorflow:Step 1790, mean_score: 18.718750\n", + "INFO:tensorflow:Step 1795, mean_score: 18.734375\n", + "INFO:tensorflow:Step 1800, mean_score: 18.734375\n", + "INFO:tensorflow:Step 1805, mean_score: 18.734375\n", + "INFO:tensorflow:Step 1810, mean_score: 19.031250\n", + "INFO:tensorflow:Step 1815, mean_score: 19.656250\n", + "INFO:tensorflow:Step 1820, mean_score: 19.656250\n", + "INFO:tensorflow:Step 1825, mean_score: 19.656250\n", + "INFO:tensorflow:Step 1830, mean_score: 19.671875\n", + "INFO:tensorflow:Step 1835, mean_score: 19.671875\n", + "INFO:tensorflow:Step 1840, mean_score: 19.671875\n", + "INFO:tensorflow:Step 1845, mean_score: 19.671875\n", + "INFO:tensorflow:Step 1850, mean_score: 19.671875\n", + "INFO:tensorflow:Step 1855, mean_score: 19.671875\n", + "INFO:tensorflow:Step 1860, mean_score: 19.671875\n", + "INFO:tensorflow:Step 1865, mean_score: 19.671875\n", + "INFO:tensorflow:Step 1870, mean_score: 19.687500\n", + "INFO:tensorflow:Step 1875, mean_score: 19.687500\n", + "INFO:tensorflow:Step 1880, mean_score: 19.703125\n", + "INFO:tensorflow:Step 1885, mean_score: 19.703125\n", + "INFO:tensorflow:Step 1890, mean_score: 19.703125\n", + "INFO:tensorflow:Step 1895, mean_score: 19.718750\n", + "INFO:tensorflow:Step 1900, mean_score: 19.734375\n", + "INFO:tensorflow:Step 1905, mean_score: 19.734375\n", + "INFO:tensorflow:Step 1910, mean_score: 19.734375\n", + "INFO:tensorflow:Step 1915, mean_score: 19.734375\n", + "INFO:tensorflow:Step 1920, mean_score: 19.734375\n", + "INFO:tensorflow:Step 1925, mean_score: 19.734375\n", + "INFO:tensorflow:Step 1930, mean_score: 19.750000\n", + "INFO:tensorflow:Step 1935, mean_score: 19.750000\n", + "INFO:tensorflow:Step 1940, mean_score: 19.750000\n", + "INFO:tensorflow:Step 1945, mean_score: 19.750000\n", + "INFO:tensorflow:Step 1950, mean_score: 19.765625\n", + "INFO:tensorflow:Step 1955, mean_score: 19.765625\n", + "INFO:tensorflow:Step 1960, mean_score: 19.781250\n", + "INFO:tensorflow:Step 1965, mean_score: 19.781250\n", + "INFO:tensorflow:Step 1970, mean_score: 19.781250\n", + "INFO:tensorflow:Step 1975, mean_score: 19.781250\n", + "INFO:tensorflow:Step 1980, mean_score: 19.781250\n", + "INFO:tensorflow:Step 1985, mean_score: 19.781250\n", + "INFO:tensorflow:Step 1990, mean_score: 19.781250\n", + "INFO:tensorflow:Step 1995, mean_score: 19.781250\n", + "INFO:tensorflow:Step 2000, mean_score: 19.781250\n", + "INFO:tensorflow:Step 2005, mean_score: 19.781250\n", + "INFO:tensorflow:Step 2010, mean_score: 19.781250\n", + "INFO:tensorflow:Step 2015, mean_score: 19.781250\n", + "INFO:tensorflow:Step 2020, mean_score: 19.781250\n", + "INFO:tensorflow:Step 2025, mean_score: 19.781250\n", + "INFO:tensorflow:Step 2030, mean_score: 19.781250\n", + "INFO:tensorflow:Step 2035, mean_score: 19.781250\n", + "INFO:tensorflow:Step 2040, mean_score: 19.781250\n", + "INFO:tensorflow:Step 2045, mean_score: 19.781250\n", + "INFO:tensorflow:Step 2050, mean_score: 19.796875\n", + "INFO:tensorflow:Step 2055, mean_score: 19.796875\n", + "INFO:tensorflow:Step 2060, mean_score: 19.812500\n", + "INFO:tensorflow:Step 2065, mean_score: 19.812500\n", + "INFO:tensorflow:Step 2070, mean_score: 19.812500\n", + "INFO:tensorflow:Step 2075, mean_score: 19.812500\n", + "INFO:tensorflow:Step 2080, mean_score: 19.812500\n", + "INFO:tensorflow:Step 2085, mean_score: 19.812500\n", + "INFO:tensorflow:Step 2090, mean_score: 19.812500\n", + "INFO:tensorflow:Step 2095, mean_score: 19.812500\n", + "INFO:tensorflow:Step 2100, mean_score: 19.812500\n", + "INFO:tensorflow:Step 2105, mean_score: 19.812500\n", + "INFO:tensorflow:Step 2110, mean_score: 19.812500\n", + "INFO:tensorflow:Step 2115, mean_score: 19.812500\n", + "INFO:tensorflow:Step 2120, mean_score: 19.812500\n", + "INFO:tensorflow:Step 2125, mean_score: 19.812500\n", + "INFO:tensorflow:Step 2130, mean_score: 19.812500\n", + "INFO:tensorflow:Step 2135, mean_score: 19.812500\n", + "INFO:tensorflow:Step 2140, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2145, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2150, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2155, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2160, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2165, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2170, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2175, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2180, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2185, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2190, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2195, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2200, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2205, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2210, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2215, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2220, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2225, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2230, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2235, mean_score: 19.828125\n", + "INFO:tensorflow:Step 2240, mean_score: 19.843750\n", + "INFO:tensorflow:Step 2245, mean_score: 19.843750\n", + "INFO:tensorflow:Step 2250, mean_score: 19.843750\n", + "INFO:tensorflow:Step 2255, mean_score: 19.843750\n", + "INFO:tensorflow:Step 2260, mean_score: 19.843750\n", + "INFO:tensorflow:Step 2265, mean_score: 19.843750\n", + "INFO:tensorflow:Step 2270, mean_score: 19.843750\n", + "INFO:tensorflow:Step 2275, mean_score: 19.843750\n", + "INFO:tensorflow:Step 2280, mean_score: 19.843750\n", + "INFO:tensorflow:Step 2285, mean_score: 19.843750\n", + "INFO:tensorflow:Step 2290, mean_score: 19.843750\n", + "INFO:tensorflow:Step 2295, mean_score: 19.843750\n", + "INFO:tensorflow:Step 2300, mean_score: 19.843750\n", + "INFO:tensorflow:Step 2305, mean_score: 19.843750\n", + "INFO:tensorflow:Step 2310, mean_score: 19.843750\n", + "INFO:tensorflow:Step 2315, mean_score: 19.843750\n", + "INFO:tensorflow:Evaluating metric mean_reward/eval/sampling_temp_0.5_max_noops_0_unclipped\n", + "2019-03-22 16:12:57.935045: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0\n", + "2019-03-22 16:12:57.935160: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix:\n", + "2019-03-22 16:12:57.935189: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 0 \n", + "2019-03-22 16:12:57.935209: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0: N \n", + "2019-03-22 16:12:57.935553: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 10754 MB memory) -> physical GPU (device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7)\n", + "INFO:tensorflow:Using DummyPolicyProblem for the policy.\n", + "INFO:tensorflow:Setting T2TModel mode to 'train'\n", + "INFO:tensorflow:Using variable initializer: orthogonal\n", + "INFO:tensorflow:Transforming feature 'input_action' with symbol_modality_6_64.bottom\n", + "INFO:tensorflow:Transforming feature 'input_reward' with symbol_modality_3_64.bottom\n", + "INFO:tensorflow:Transforming feature 'inputs' with video_modality.bottom\n", + "INFO:tensorflow:Transforming feature 'target_action' with symbol_modality_6_64.targets_bottom\n", + "INFO:tensorflow:Transforming feature 'target_policy' with identity_modality.targets_bottom\n", + "INFO:tensorflow:Transforming feature 'target_reward' with symbol_modality_3_64.targets_bottom\n", + "INFO:tensorflow:Transforming feature 'target_value' with identity_modality.targets_bottom\n", + "INFO:tensorflow:Transforming feature 'targets' with video_modality.targets_bottom\n", + "INFO:tensorflow:Building model body\n", + "INFO:tensorflow:Transforming body output with identity_modality.top\n", + "INFO:tensorflow:Transforming body output with identity_modality.top\n", + "2019-03-22 16:13:12.260846: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0\n", + "2019-03-22 16:13:12.260981: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix:\n", + "2019-03-22 16:13:12.261059: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 0 \n", + "2019-03-22 16:13:12.261099: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0: N \n", + "2019-03-22 16:13:12.261613: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 10754 MB memory) -> physical GPU (device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7)\n", + "2019-03-22 16:13:12.493082: W tensorflow/core/platform/cloud/google_auth_provider.cc:178] All attempts to get a Google authentication bearer token failed, returning an empty token. Retrieving token from files failed with \"Not found: Could not locate the credentials file.\". Retrieving token from GCE failed with \"Cancelled: GCE check skipped due to presence of $NO_GCE_CHECK environment variable.\".\n", + "INFO:tensorflow:Restoring checkpoint gs://tensor2tensor-checkpoints/modelrl_experiments/train_sd/142/policy/model.ckpt-171992\n", + "2019-03-22 16:13:12.556955: W tensorflow/core/platform/cloud/google_auth_provider.cc:178] All attempts to get a Google authentication bearer token failed, returning an empty token. Retrieving token from files failed with \"Not found: Could not locate the credentials file.\". Retrieving token from GCE failed with \"Cancelled: GCE check skipped due to presence of $NO_GCE_CHECK environment variable.\".\n", + "INFO:tensorflow:Restoring parameters from gs://tensor2tensor-checkpoints/modelrl_experiments/train_sd/142/policy/model.ckpt-171992\n", + "2019-03-22 16:13:12.651009: W tensorflow/core/platform/cloud/google_auth_provider.cc:178] All attempts to get a Google authentication bearer token failed, returning an empty token. Retrieving token from files failed with \"Not found: Could not locate the credentials file.\". Retrieving token from GCE failed with \"Cancelled: GCE check skipped due to presence of $NO_GCE_CHECK environment variable.\".\n", + "2019-03-22 16:13:12.715180: W tensorflow/core/platform/cloud/google_auth_provider.cc:178] All attempts to get a Google authentication bearer token failed, returning an empty token. Retrieving token from files failed with \"Not found: Could not locate the credentials file.\". Retrieving token from GCE failed with \"Cancelled: GCE check skipped due to presence of $NO_GCE_CHECK environment variable.\".\n", + "2019-03-22 16:13:12.816774: W tensorflow/core/platform/cloud/google_auth_provider.cc:178] All attempts to get a Google authentication bearer token failed, returning an empty token. Retrieving token from files failed with \"Not found: Could not locate the credentials file.\". Retrieving token from GCE failed with \"Cancelled: GCE check skipped due to presence of $NO_GCE_CHECK environment variable.\".\n", + "INFO:tensorflow:Step 5, mean_score: 0.000000\n", + "INFO:tensorflow:Step 10, mean_score: 0.000000\n", + "INFO:tensorflow:Step 15, mean_score: 0.000000\n", + "INFO:tensorflow:Step 20, mean_score: 0.000000\n", + "INFO:tensorflow:Step 25, mean_score: 0.000000\n", + "INFO:tensorflow:Step 30, mean_score: 0.000000\n", + "INFO:tensorflow:Step 35, mean_score: 0.000000\n", + "INFO:tensorflow:Step 40, mean_score: 0.000000\n", + "INFO:tensorflow:Step 45, mean_score: 0.000000\n", + "INFO:tensorflow:Step 50, mean_score: 0.000000\n", + "INFO:tensorflow:Step 55, mean_score: 0.000000\n", + "INFO:tensorflow:Step 60, mean_score: 0.000000\n", + "INFO:tensorflow:Step 65, mean_score: -0.031250\n", + "INFO:tensorflow:Step 70, mean_score: -0.031250\n", + "INFO:tensorflow:Step 75, mean_score: -0.031250\n", + "INFO:tensorflow:Step 80, mean_score: -0.031250\n", + "INFO:tensorflow:Step 85, mean_score: -0.031250\n", + "INFO:tensorflow:Step 90, mean_score: -0.031250\n", + "INFO:tensorflow:Step 95, mean_score: 0.937500\n", + "INFO:tensorflow:Step 100, mean_score: 0.921875\n", + "INFO:tensorflow:Step 105, mean_score: 0.921875\n", + "INFO:tensorflow:Step 110, mean_score: 0.921875\n", + "INFO:tensorflow:Step 115, mean_score: 0.921875\n", + "INFO:tensorflow:Step 120, mean_score: 0.921875\n", + "INFO:tensorflow:Step 125, mean_score: 0.921875\n", + "INFO:tensorflow:Step 130, mean_score: 0.921875\n", + "INFO:tensorflow:Step 135, mean_score: 0.921875\n", + "INFO:tensorflow:Step 140, mean_score: 0.921875\n", + "INFO:tensorflow:Step 145, mean_score: 0.921875\n", + "INFO:tensorflow:Step 150, mean_score: 0.921875\n", + "INFO:tensorflow:Step 155, mean_score: 0.921875\n", + "INFO:tensorflow:Step 160, mean_score: 0.921875\n", + "INFO:tensorflow:Step 165, mean_score: 0.906250\n", + "INFO:tensorflow:Step 170, mean_score: 0.906250\n", + "INFO:tensorflow:Step 175, mean_score: 0.921875\n", + "INFO:tensorflow:Step 180, mean_score: 0.921875\n", + "INFO:tensorflow:Step 185, mean_score: 0.921875\n", + "INFO:tensorflow:Step 190, mean_score: 0.921875\n", + "INFO:tensorflow:Step 195, mean_score: 0.921875\n", + "INFO:tensorflow:Step 200, mean_score: 1.890625\n", + "INFO:tensorflow:Step 205, mean_score: 1.890625\n", + "INFO:tensorflow:Step 210, mean_score: 1.890625\n", + "INFO:tensorflow:Step 215, mean_score: 1.890625\n", + "INFO:tensorflow:Step 220, mean_score: 1.890625\n", + "INFO:tensorflow:Step 225, mean_score: 1.890625\n", + "INFO:tensorflow:Step 230, mean_score: 1.890625\n", + "INFO:tensorflow:Step 235, mean_score: 1.890625\n", + "INFO:tensorflow:Step 240, mean_score: 1.890625\n", + "INFO:tensorflow:Step 245, mean_score: 1.890625\n", + "INFO:tensorflow:Step 250, mean_score: 1.890625\n", + "INFO:tensorflow:Step 255, mean_score: 1.890625\n", + "INFO:tensorflow:Step 260, mean_score: 1.890625\n", + "INFO:tensorflow:Step 265, mean_score: 1.890625\n", + "INFO:tensorflow:Step 270, mean_score: 1.890625\n", + "INFO:tensorflow:Step 275, mean_score: 2.875000\n", + "INFO:tensorflow:Step 280, mean_score: 2.890625\n", + "INFO:tensorflow:Step 285, mean_score: 2.890625\n", + "INFO:tensorflow:Step 290, mean_score: 2.890625\n", + "INFO:tensorflow:Step 295, mean_score: 2.890625\n", + "INFO:tensorflow:Step 300, mean_score: 2.890625\n", + "INFO:tensorflow:Step 305, mean_score: 2.890625\n", + "INFO:tensorflow:Step 310, mean_score: 2.890625\n", + "INFO:tensorflow:Step 315, mean_score: 2.890625\n", + "INFO:tensorflow:Step 320, mean_score: 2.890625\n", + "INFO:tensorflow:Step 325, mean_score: 2.890625\n", + "INFO:tensorflow:Step 330, mean_score: 2.890625\n", + "INFO:tensorflow:Step 335, mean_score: 2.890625\n", + "INFO:tensorflow:Step 340, mean_score: 2.890625\n", + "INFO:tensorflow:Step 345, mean_score: 2.890625\n", + "INFO:tensorflow:Step 350, mean_score: 2.890625\n", + "INFO:tensorflow:Step 355, mean_score: 2.906250\n", + "INFO:tensorflow:Step 360, mean_score: 2.906250\n", + "INFO:tensorflow:Step 365, mean_score: 2.906250\n", + "INFO:tensorflow:Step 370, mean_score: 2.906250\n", + "INFO:tensorflow:Step 375, mean_score: 2.921875\n", + "INFO:tensorflow:Step 380, mean_score: 3.890625\n", + "INFO:tensorflow:Step 385, mean_score: 3.890625\n", + "INFO:tensorflow:Step 390, mean_score: 3.890625\n", + "INFO:tensorflow:Step 395, mean_score: 3.890625\n", + "INFO:tensorflow:Step 400, mean_score: 3.890625\n", + "INFO:tensorflow:Step 405, mean_score: 3.890625\n", + "INFO:tensorflow:Step 410, mean_score: 3.890625\n", + "INFO:tensorflow:Step 415, mean_score: 3.890625\n", + "INFO:tensorflow:Step 420, mean_score: 3.890625\n", + "INFO:tensorflow:Step 425, mean_score: 3.890625\n", + "INFO:tensorflow:Step 430, mean_score: 3.890625\n", + "INFO:tensorflow:Step 435, mean_score: 3.890625\n", + "INFO:tensorflow:Step 440, mean_score: 3.890625\n", + "INFO:tensorflow:Step 445, mean_score: 3.890625\n", + "INFO:tensorflow:Step 450, mean_score: 3.890625\n", + "INFO:tensorflow:Step 455, mean_score: 4.875000\n", + "INFO:tensorflow:Step 460, mean_score: 4.890625\n", + "INFO:tensorflow:Step 465, mean_score: 4.890625\n", + "INFO:tensorflow:Step 470, mean_score: 4.890625\n", + "INFO:tensorflow:Step 475, mean_score: 4.890625\n", + "INFO:tensorflow:Step 480, mean_score: 4.890625\n", + "INFO:tensorflow:Step 485, mean_score: 4.890625\n", + "INFO:tensorflow:Step 490, mean_score: 4.890625\n", + "INFO:tensorflow:Step 495, mean_score: 4.890625\n", + "INFO:tensorflow:Step 500, mean_score: 4.890625\n", + "INFO:tensorflow:Step 505, mean_score: 4.890625\n", + "INFO:tensorflow:Step 510, mean_score: 4.890625\n", + "INFO:tensorflow:Step 515, mean_score: 4.890625\n", + "INFO:tensorflow:Step 520, mean_score: 4.890625\n", + "INFO:tensorflow:Step 525, mean_score: 4.890625\n", + "INFO:tensorflow:Step 530, mean_score: 4.890625\n", + "INFO:tensorflow:Step 535, mean_score: 4.906250\n", + "INFO:tensorflow:Step 540, mean_score: 4.906250\n", + "INFO:tensorflow:Step 545, mean_score: 4.906250\n", + "INFO:tensorflow:Step 550, mean_score: 4.906250\n", + "INFO:tensorflow:Step 555, mean_score: 4.921875\n", + "INFO:tensorflow:Step 560, mean_score: 5.890625\n", + "INFO:tensorflow:Step 565, mean_score: 5.890625\n", + "INFO:tensorflow:Step 570, mean_score: 5.890625\n", + "INFO:tensorflow:Step 575, mean_score: 5.890625\n", + "INFO:tensorflow:Step 580, mean_score: 5.890625\n", + "INFO:tensorflow:Step 585, mean_score: 5.890625\n", + "INFO:tensorflow:Step 590, mean_score: 5.890625\n", + "INFO:tensorflow:Step 595, mean_score: 5.890625\n", + "INFO:tensorflow:Step 600, mean_score: 5.890625\n", + "INFO:tensorflow:Step 605, mean_score: 5.890625\n", + "INFO:tensorflow:Step 610, mean_score: 5.890625\n", + "INFO:tensorflow:Step 615, mean_score: 5.890625\n", + "INFO:tensorflow:Step 620, mean_score: 5.890625\n", + "INFO:tensorflow:Step 625, mean_score: 5.890625\n", + "INFO:tensorflow:Step 630, mean_score: 5.890625\n", + "INFO:tensorflow:Step 635, mean_score: 6.875000\n", + "INFO:tensorflow:Step 640, mean_score: 6.890625\n", + "INFO:tensorflow:Step 645, mean_score: 6.890625\n", + "INFO:tensorflow:Step 650, mean_score: 6.890625\n", + "INFO:tensorflow:Step 655, mean_score: 6.890625\n", + "INFO:tensorflow:Step 660, mean_score: 6.890625\n", + "INFO:tensorflow:Step 665, mean_score: 6.890625\n", + "INFO:tensorflow:Step 670, mean_score: 6.890625\n", + "INFO:tensorflow:Step 675, mean_score: 6.890625\n", + "INFO:tensorflow:Step 680, mean_score: 6.890625\n", + "INFO:tensorflow:Step 685, mean_score: 6.890625\n", + "INFO:tensorflow:Step 690, mean_score: 6.890625\n", + "INFO:tensorflow:Step 695, mean_score: 6.890625\n", + "INFO:tensorflow:Step 700, mean_score: 6.890625\n", + "INFO:tensorflow:Step 705, mean_score: 6.890625\n", + "INFO:tensorflow:Step 710, mean_score: 6.890625\n", + "INFO:tensorflow:Step 715, mean_score: 6.906250\n", + "INFO:tensorflow:Step 720, mean_score: 6.906250\n", + "INFO:tensorflow:Step 725, mean_score: 6.906250\n", + "INFO:tensorflow:Step 730, mean_score: 6.906250\n", + "INFO:tensorflow:Step 735, mean_score: 6.921875\n", + "INFO:tensorflow:Step 740, mean_score: 7.890625\n", + "INFO:tensorflow:Step 745, mean_score: 7.890625\n", + "INFO:tensorflow:Step 750, mean_score: 7.890625\n", + "INFO:tensorflow:Step 755, mean_score: 7.890625\n", + "INFO:tensorflow:Step 760, mean_score: 7.890625\n", + "INFO:tensorflow:Step 765, mean_score: 7.890625\n", + "INFO:tensorflow:Step 770, mean_score: 7.890625\n", + "INFO:tensorflow:Step 775, mean_score: 7.890625\n", + "INFO:tensorflow:Step 780, mean_score: 7.890625\n", + "INFO:tensorflow:Step 785, mean_score: 7.890625\n", + "INFO:tensorflow:Step 790, mean_score: 7.890625\n", + "INFO:tensorflow:Step 795, mean_score: 7.890625\n", + "INFO:tensorflow:Step 800, mean_score: 7.890625\n", + "INFO:tensorflow:Step 805, mean_score: 7.890625\n", + "INFO:tensorflow:Step 810, mean_score: 7.890625\n", + "INFO:tensorflow:Step 815, mean_score: 8.875000\n", + "INFO:tensorflow:Step 820, mean_score: 8.890625\n", + "INFO:tensorflow:Step 825, mean_score: 8.890625\n", + "INFO:tensorflow:Step 830, mean_score: 8.890625\n", + "INFO:tensorflow:Step 835, mean_score: 8.890625\n", + "INFO:tensorflow:Step 840, mean_score: 8.890625\n", + "INFO:tensorflow:Step 845, mean_score: 8.890625\n", + "INFO:tensorflow:Step 850, mean_score: 8.890625\n", + "INFO:tensorflow:Step 855, mean_score: 8.890625\n", + "INFO:tensorflow:Step 860, mean_score: 8.890625\n", + "INFO:tensorflow:Step 865, mean_score: 8.890625\n", + "INFO:tensorflow:Step 870, mean_score: 8.890625\n", + "INFO:tensorflow:Step 875, mean_score: 8.890625\n", + "INFO:tensorflow:Step 880, mean_score: 8.890625\n", + "INFO:tensorflow:Step 885, mean_score: 8.890625\n", + "INFO:tensorflow:Step 890, mean_score: 8.890625\n", + "INFO:tensorflow:Step 895, mean_score: 8.906250\n", + "INFO:tensorflow:Step 900, mean_score: 8.906250\n", + "INFO:tensorflow:Step 905, mean_score: 8.906250\n", + "INFO:tensorflow:Step 910, mean_score: 8.906250\n", + "INFO:tensorflow:Step 915, mean_score: 8.921875\n", + "INFO:tensorflow:Step 920, mean_score: 9.890625\n", + "INFO:tensorflow:Step 925, mean_score: 9.890625\n", + "INFO:tensorflow:Step 930, mean_score: 9.890625\n", + "INFO:tensorflow:Step 935, mean_score: 9.890625\n", + "INFO:tensorflow:Step 940, mean_score: 9.890625\n", + "INFO:tensorflow:Step 945, mean_score: 9.890625\n", + "INFO:tensorflow:Step 950, mean_score: 9.890625\n", + "INFO:tensorflow:Step 955, mean_score: 9.890625\n", + "INFO:tensorflow:Step 960, mean_score: 9.890625\n", + "INFO:tensorflow:Step 965, mean_score: 9.890625\n", + "INFO:tensorflow:Step 970, mean_score: 9.890625\n", + "INFO:tensorflow:Step 975, mean_score: 9.890625\n", + "INFO:tensorflow:Step 980, mean_score: 9.890625\n", + "INFO:tensorflow:Step 985, mean_score: 9.890625\n", + "INFO:tensorflow:Step 990, mean_score: 9.890625\n", + "INFO:tensorflow:Step 995, mean_score: 10.875000\n", + "INFO:tensorflow:Step 1000, mean_score: 10.890625\n", + "INFO:tensorflow:Step 1005, mean_score: 10.890625\n", + "INFO:tensorflow:Step 1010, mean_score: 10.890625\n", + "INFO:tensorflow:Step 1015, mean_score: 10.890625\n", + "INFO:tensorflow:Step 1020, mean_score: 10.890625\n", + "INFO:tensorflow:Step 1025, mean_score: 10.890625\n", + "INFO:tensorflow:Step 1030, mean_score: 10.890625\n", + "INFO:tensorflow:Step 1035, mean_score: 10.890625\n", + "INFO:tensorflow:Step 1040, mean_score: 10.890625\n", + "INFO:tensorflow:Step 1045, mean_score: 10.890625\n", + "INFO:tensorflow:Step 1050, mean_score: 10.890625\n", + "INFO:tensorflow:Step 1055, mean_score: 10.890625\n", + "INFO:tensorflow:Step 1060, mean_score: 10.890625\n", + "INFO:tensorflow:Step 1065, mean_score: 10.890625\n", + "INFO:tensorflow:Step 1070, mean_score: 10.890625\n", + "INFO:tensorflow:Step 1075, mean_score: 10.906250\n", + "INFO:tensorflow:Step 1080, mean_score: 10.906250\n", + "INFO:tensorflow:Step 1085, mean_score: 10.906250\n", + "INFO:tensorflow:Step 1090, mean_score: 10.906250\n", + "INFO:tensorflow:Step 1095, mean_score: 10.921875\n", + "INFO:tensorflow:Step 1100, mean_score: 11.890625\n", + "INFO:tensorflow:Step 1105, mean_score: 11.890625\n", + "INFO:tensorflow:Step 1110, mean_score: 11.890625\n", + "INFO:tensorflow:Step 1115, mean_score: 11.890625\n", + "INFO:tensorflow:Step 1120, mean_score: 11.890625\n", + "INFO:tensorflow:Step 1125, mean_score: 11.890625\n", + "INFO:tensorflow:Step 1130, mean_score: 11.890625\n", + "INFO:tensorflow:Step 1135, mean_score: 11.890625\n", + "INFO:tensorflow:Step 1140, mean_score: 11.890625\n", + "INFO:tensorflow:Step 1145, mean_score: 11.890625\n", + "INFO:tensorflow:Step 1150, mean_score: 11.890625\n", + "INFO:tensorflow:Step 1155, mean_score: 11.890625\n", + "INFO:tensorflow:Step 1160, mean_score: 11.890625\n", + "INFO:tensorflow:Step 1165, mean_score: 11.890625\n", + "INFO:tensorflow:Step 1170, mean_score: 11.890625\n", + "INFO:tensorflow:Step 1175, mean_score: 12.875000\n", + "INFO:tensorflow:Step 1180, mean_score: 12.890625\n", + "INFO:tensorflow:Step 1185, mean_score: 12.890625\n", + "INFO:tensorflow:Step 1190, mean_score: 12.890625\n", + "INFO:tensorflow:Step 1195, mean_score: 12.890625\n", + "INFO:tensorflow:Step 1200, mean_score: 12.890625\n", + "INFO:tensorflow:Step 1205, mean_score: 12.890625\n", + "INFO:tensorflow:Step 1210, mean_score: 12.890625\n", + "INFO:tensorflow:Step 1215, mean_score: 12.890625\n", + "INFO:tensorflow:Step 1220, mean_score: 12.890625\n", + "INFO:tensorflow:Step 1225, mean_score: 12.890625\n", + "INFO:tensorflow:Step 1230, mean_score: 12.890625\n", + "INFO:tensorflow:Step 1235, mean_score: 12.890625\n", + "INFO:tensorflow:Step 1240, mean_score: 12.890625\n", + "INFO:tensorflow:Step 1245, mean_score: 12.890625\n", + "INFO:tensorflow:Step 1250, mean_score: 12.890625\n", + "INFO:tensorflow:Step 1255, mean_score: 12.906250\n", + "INFO:tensorflow:Step 1260, mean_score: 12.906250\n", + "INFO:tensorflow:Step 1265, mean_score: 12.906250\n", + "INFO:tensorflow:Step 1270, mean_score: 12.906250\n", + "INFO:tensorflow:Step 1275, mean_score: 12.921875\n", + "INFO:tensorflow:Step 1280, mean_score: 13.890625\n", + "INFO:tensorflow:Step 1285, mean_score: 13.890625\n", + "INFO:tensorflow:Step 1290, mean_score: 13.890625\n", + "INFO:tensorflow:Step 1295, mean_score: 13.890625\n", + "INFO:tensorflow:Step 1300, mean_score: 13.890625\n", + "INFO:tensorflow:Step 1305, mean_score: 13.890625\n", + "INFO:tensorflow:Step 1310, mean_score: 13.890625\n", + "INFO:tensorflow:Step 1315, mean_score: 13.890625\n", + "INFO:tensorflow:Step 1320, mean_score: 13.890625\n", + "INFO:tensorflow:Step 1325, mean_score: 13.890625\n", + "INFO:tensorflow:Step 1330, mean_score: 13.890625\n", + "INFO:tensorflow:Step 1335, mean_score: 13.890625\n", + "INFO:tensorflow:Step 1340, mean_score: 13.890625\n", + "INFO:tensorflow:Step 1345, mean_score: 13.890625\n", + "INFO:tensorflow:Step 1350, mean_score: 13.890625\n", + "INFO:tensorflow:Step 1355, mean_score: 14.875000\n", + "INFO:tensorflow:Step 1360, mean_score: 14.890625\n", + "INFO:tensorflow:Step 1365, mean_score: 14.890625\n", + "INFO:tensorflow:Step 1370, mean_score: 14.890625\n", + "INFO:tensorflow:Step 1375, mean_score: 14.890625\n", + "INFO:tensorflow:Step 1380, mean_score: 14.890625\n", + "INFO:tensorflow:Step 1385, mean_score: 14.890625\n", + "INFO:tensorflow:Step 1390, mean_score: 14.890625\n", + "INFO:tensorflow:Step 1395, mean_score: 14.890625\n", + "INFO:tensorflow:Step 1400, mean_score: 14.890625\n", + "INFO:tensorflow:Step 1405, mean_score: 14.890625\n", + "INFO:tensorflow:Step 1410, mean_score: 14.890625\n", + "INFO:tensorflow:Step 1415, mean_score: 14.890625\n", + "INFO:tensorflow:Step 1420, mean_score: 14.890625\n", + "INFO:tensorflow:Step 1425, mean_score: 14.890625\n", + "INFO:tensorflow:Step 1430, mean_score: 14.890625\n", + "INFO:tensorflow:Step 1435, mean_score: 14.906250\n", + "INFO:tensorflow:Step 1440, mean_score: 14.906250\n", + "INFO:tensorflow:Step 1445, mean_score: 14.906250\n", + "INFO:tensorflow:Step 1450, mean_score: 14.906250\n", + "INFO:tensorflow:Step 1455, mean_score: 14.921875\n", + "INFO:tensorflow:Step 1460, mean_score: 15.890625\n", + "INFO:tensorflow:Step 1465, mean_score: 15.890625\n", + "INFO:tensorflow:Step 1470, mean_score: 15.890625\n", + "INFO:tensorflow:Step 1475, mean_score: 15.890625\n", + "INFO:tensorflow:Step 1480, mean_score: 15.890625\n", + "INFO:tensorflow:Step 1485, mean_score: 15.890625\n", + "INFO:tensorflow:Step 1490, mean_score: 15.890625\n", + "INFO:tensorflow:Step 1495, mean_score: 15.890625\n", + "INFO:tensorflow:Step 1500, mean_score: 15.890625\n", + "INFO:tensorflow:Step 1505, mean_score: 15.890625\n", + "INFO:tensorflow:Step 1510, mean_score: 15.890625\n", + "INFO:tensorflow:Step 1515, mean_score: 15.890625\n", + "INFO:tensorflow:Step 1520, mean_score: 15.890625\n", + "INFO:tensorflow:Step 1525, mean_score: 15.890625\n", + "INFO:tensorflow:Step 1530, mean_score: 15.890625\n", + "INFO:tensorflow:Step 1535, mean_score: 16.875000\n", + "INFO:tensorflow:Step 1540, mean_score: 16.890625\n", + "INFO:tensorflow:Step 1545, mean_score: 16.890625\n", + "INFO:tensorflow:Step 1550, mean_score: 16.890625\n", + "INFO:tensorflow:Step 1555, mean_score: 16.890625\n", + "INFO:tensorflow:Step 1560, mean_score: 16.890625\n", + "INFO:tensorflow:Step 1565, mean_score: 16.890625\n", + "INFO:tensorflow:Step 1570, mean_score: 16.890625\n", + "INFO:tensorflow:Step 1575, mean_score: 16.890625\n", + "INFO:tensorflow:Step 1580, mean_score: 16.890625\n", + "INFO:tensorflow:Step 1585, mean_score: 16.890625\n", + "INFO:tensorflow:Step 1590, mean_score: 16.890625\n", + "INFO:tensorflow:Step 1595, mean_score: 16.890625\n", + "INFO:tensorflow:Step 1600, mean_score: 16.890625\n", + "INFO:tensorflow:Step 1605, mean_score: 16.890625\n", + "INFO:tensorflow:Step 1610, mean_score: 16.890625\n", + "INFO:tensorflow:Step 1615, mean_score: 16.906250\n", + "INFO:tensorflow:Step 1620, mean_score: 16.906250\n", + "INFO:tensorflow:Step 1625, mean_score: 16.906250\n", + "INFO:tensorflow:Step 1630, mean_score: 16.906250\n", + "INFO:tensorflow:Step 1635, mean_score: 16.921875\n", + "INFO:tensorflow:Step 1640, mean_score: 17.890625\n", + "INFO:tensorflow:Step 1645, mean_score: 17.890625\n", + "INFO:tensorflow:Step 1650, mean_score: 17.890625\n", + "INFO:tensorflow:Step 1655, mean_score: 17.890625\n", + "INFO:tensorflow:Step 1660, mean_score: 17.890625\n", + "INFO:tensorflow:Step 1665, mean_score: 17.890625\n", + "INFO:tensorflow:Step 1670, mean_score: 17.890625\n", + "INFO:tensorflow:Step 1675, mean_score: 17.890625\n", + "INFO:tensorflow:Step 1680, mean_score: 17.890625\n", + "INFO:tensorflow:Step 1685, mean_score: 17.890625\n", + "INFO:tensorflow:Step 1690, mean_score: 17.890625\n", + "INFO:tensorflow:Step 1695, mean_score: 17.890625\n", + "INFO:tensorflow:Step 1700, mean_score: 17.890625\n", + "INFO:tensorflow:Step 1705, mean_score: 17.890625\n", + "INFO:tensorflow:Step 1710, mean_score: 17.890625\n", + "INFO:tensorflow:Step 1715, mean_score: 18.875000\n", + "INFO:tensorflow:Step 1720, mean_score: 18.890625\n", + "INFO:tensorflow:Step 1725, mean_score: 18.890625\n", + "INFO:tensorflow:Step 1730, mean_score: 18.890625\n", + "INFO:tensorflow:Step 1735, mean_score: 18.890625\n", + "INFO:tensorflow:Step 1740, mean_score: 18.890625\n", + "INFO:tensorflow:Step 1745, mean_score: 18.890625\n", + "INFO:tensorflow:Step 1750, mean_score: 18.890625\n", + "INFO:tensorflow:Step 1755, mean_score: 18.890625\n", + "INFO:tensorflow:Step 1760, mean_score: 18.890625\n", + "INFO:tensorflow:Step 1765, mean_score: 18.890625\n", + "INFO:tensorflow:Step 1770, mean_score: 18.890625\n", + "INFO:tensorflow:Step 1775, mean_score: 18.890625\n", + "INFO:tensorflow:Step 1780, mean_score: 18.890625\n", + "INFO:tensorflow:Step 1785, mean_score: 18.890625\n", + "INFO:tensorflow:Step 1790, mean_score: 18.890625\n", + "INFO:tensorflow:Step 1795, mean_score: 18.906250\n", + "INFO:tensorflow:Step 1800, mean_score: 18.906250\n", + "INFO:tensorflow:Step 1805, mean_score: 18.906250\n", + "INFO:tensorflow:Step 1810, mean_score: 18.906250\n", + "INFO:tensorflow:Step 1815, mean_score: 18.921875\n", + "INFO:tensorflow:Step 1820, mean_score: 19.890625\n", + "INFO:tensorflow:Step 1825, mean_score: 19.890625\n", + "INFO:tensorflow:Step 1830, mean_score: 19.890625\n", + "INFO:tensorflow:Step 1835, mean_score: 19.890625\n", + "INFO:tensorflow:Step 1840, mean_score: 19.890625\n", + "INFO:tensorflow:Step 1845, mean_score: 19.890625\n", + "INFO:tensorflow:Step 1850, mean_score: 19.890625\n", + "INFO:tensorflow:Step 1855, mean_score: 19.890625\n", + "INFO:tensorflow:Step 1860, mean_score: 19.890625\n", + "INFO:tensorflow:Step 1865, mean_score: 19.890625\n", + "INFO:tensorflow:Step 1870, mean_score: 19.890625\n", + "INFO:tensorflow:Step 1875, mean_score: 19.890625\n", + "INFO:tensorflow:Step 1880, mean_score: 19.890625\n", + "INFO:tensorflow:Step 1885, mean_score: 19.890625\n", + "INFO:tensorflow:Step 1890, mean_score: 19.890625\n", + "INFO:tensorflow:Step 1895, mean_score: 19.906250\n", + "INFO:tensorflow:Step 1900, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1905, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1910, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1915, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1920, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1925, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1930, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1935, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1940, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1945, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1950, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1955, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1960, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1965, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1970, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1975, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1980, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1985, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1990, mean_score: 19.921875\n", + "INFO:tensorflow:Step 1995, mean_score: 19.937500\n", + "INFO:tensorflow:Step 2000, mean_score: 19.937500\n", + "INFO:tensorflow:Step 2005, mean_score: 19.937500\n", + "INFO:tensorflow:Step 2010, mean_score: 19.937500\n", + "INFO:tensorflow:Step 2015, mean_score: 19.937500\n", + "INFO:tensorflow:Step 2020, mean_score: 19.937500\n", + "INFO:tensorflow:Step 2025, mean_score: 19.937500\n", + "INFO:tensorflow:Step 2030, mean_score: 19.937500\n", + "INFO:tensorflow:Step 2035, mean_score: 19.937500\n", + "INFO:tensorflow:Step 2040, mean_score: 19.937500\n", + "INFO:tensorflow:Step 2045, mean_score: 19.937500\n", + "INFO:tensorflow:Step 2050, mean_score: 19.937500\n", + "INFO:tensorflow:Step 2055, mean_score: 19.937500\n", + "INFO:tensorflow:Step 2060, mean_score: 19.937500\n", + "INFO:tensorflow:Step 2065, mean_score: 19.937500\n", + "INFO:tensorflow:Step 2070, mean_score: 19.937500\n" + ] + } + ], + "source": [ + "game = 'pong'\n", + "run_dir = get_run_dir(game, 1)\n", + "!python -m tensor2tensor.rl.evaluator \\\n", + " --loop_hparams_set=rlmb_long_stochastic_discrete \\\n", + " --loop_hparams=game=$game,eval_max_num_noops=8,eval_sampling_temps=[0.5] \\\n", + " --policy_dir=$run_dir/policy \\\n", + " --eval_metrics_dir=pong_pretrained \\\n", + " --debug_video_path=pong_pretrained \\\n", + " --num_debug_videos=4" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WKWPdwP8BW_v", + "colab_type": "text" + }, + "source": [ + "The above command will run a single evaluation setting to get the results fast. We usually run a grid of different settings (sampling temperatures and whether to do initial no-ops). To do that, remove `eval_max_num_noops=8,eval_sampling_temps=[0.5]` from the command. You can override the evaluation settings:\n", + "\n", + "```\n", + " --loop_hparams=game=pong,eval_max_num_noops=0,eval_sampling_temps=[0.0]\n", + " ```\n", + " \n", + " The evaluator generates videos from the environment:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "id": "At9LC5rxFyv2", + "colab_type": "code", + "outputId": "983b0e7a-2700-4e4a-d776-03c459669770", + "executionInfo": { + "status": "ok", + "timestamp": 1.553253830168E12, + "user_tz": -60.0, + "elapsed": 4036.0, + "user": { + "displayName": "Piotr Kozakowski", + "photoUrl": "", + "userId": "01014928596539690143" + } + }, + "colab": { + "resources": { + "http://localhost:8080/nbextensions/vid.mp4": { + "data": "AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1wNDEAAAAIZnJlZQACqh1tZGF0AAACrgYF//+q3EXpvebZSLeWLNgg2SPu73gyNjQgLSBjb3JlIDE1MiByMjg1NCBlOWE1OTAzIC0gSC4yNjQvTVBFRy00IEFWQyBjb2RlYyAtIENvcHlsZWZ0IDIwMDMtMjAxNyAtIGh0dHA6Ly93d3cudmlkZW9sYW4ub3JnL3gyNjQuaHRtbCAtIG9wdGlvbnM6IGNhYmFjPTEgcmVmPTMgZGVibG9jaz0xOjA6MCBhbmFseXNlPTB4MzoweDExMyBtZT1oZXggc3VibWU9NyBwc3k9MSBwc3lfcmQ9MS4wMDowLjAwIG1peGVkX3JlZj0xIG1lX3JhbmdlPTE2IGNocm9tYV9tZT0xIHRyZWxsaXM9MSA4eDhkY3Q9MSBjcW09MCBkZWFkem9uZT0yMSwxMSBmYXN0X3Bza2lwPTEgY2hyb21hX3FwX29mZnNldD0tMiB0aHJlYWRzPTMgbG9va2FoZWFkX3RocmVhZHM9MSBzbGljZWRfdGhyZWFkcz0wIG5yPTAgZGVjaW1hdGU9MSBpbnRlcmxhY2VkPTAgYmx1cmF5X2NvbXBhdD0wIGNvbnN0cmFpbmVkX2ludHJhPTAgYmZyYW1lcz0zIGJfcHlyYW1pZD0yIGJfYWRhcHQ9MSBiX2JpYXM9MCBkaXJlY3Q9MSB3ZWlnaHRiPTEgb3Blbl9nb3A9MCB3ZWlnaHRwPTIga2V5aW50PTI1MCBrZXlpbnRfbWluPTEwIHNjZW5lY3V0PTQwIGludHJhX3JlZnJlc2g9MCByY19sb29rYWhlYWQ9NDAgcmM9Y3JmIG1idHJlZT0xIGNyZj0yMy4wIHFjb21wPTAuNjAgcXBtaW49MCBxcG1heD02OSBxcHN0ZXA9NCBpcF9yYXRpbz0xLjQwIGFxPTE6MS4wMACAAAADkWWIhABvrNdXNvEPmO7lwVl73sPl0EDBzzvrz1O9Sgfa49FGnVhGNj4PrUzIEjAsiR14q5boH034au6fMfeHzW8BQIdLu5D8GWFcvhnUQvMLIDm/5fDlJWNI1pLZ0KekyKgRZvEg10IZZePvLcj64kGJzCMbJi6QZbX4WMzyM/ZwsXoWWPBmmlKBzFixHWdkptjcAYhpgDpXSILlIffpBFr5Fmv8Xdrl5eZtB/U18q6RE0tX2BrhekKyOZ5lJWnXZWIEICkLYIda8x0l/aAug9zkJAN2UJ5v8AfQgXgS7iPy41I11UQneH59QQ6r2Fy+bXVz7hKXvFUUQUW2NfwyAHSubAtKRV8FgrIBnKXwxAjc8zc/00LsdZVdehIaL1eI9qZtyap5GmVpF7ZJdkQbo7j2k9/o8Ztr6lwZrODqoujHSJK6V9bK0u9Et564zU+wWgftergJVAEl4m/D3N6/lD6Tni/a6bLzIcdcVjnfWLPAUBwAoj19NpxhAbe1VyiybbzF11k65OpExrnTpeyfXnWi2YKXmv6NMcvP6YS8WOK4pM7nWhyKetjJvO69p10oeh7Pv3PuQBq3kARIBKQ+MPYmymnbhgmxG/6w3hJ2A2Urz2k1DctVq7TiUCWnAReHSDqSpYcdQwxCm/lIpIwtl/dffgss5v+hhFs6NSNe3zqLc+wa/P6fKKBzHBPA6mZtXbiaJH0Y+5hMHtf92lFc+6I4pZ1q2XpI5Nr1V7em9lfehnp6KwZCFUTCrCle3ZgVn3/WlL0hiX3HqF/qGx1rSRBE7lqG2nGEQXx7BFJGNLF0vFVi0j2agV+lVqGOVlIxAjK3E9wGWVM0V7xAFGXQtxAYJ6qA6zMuM1AzlTqoEWcy+zkYm6Z2/Vn8RMHtpHaCW7GF05Wujcn0D05dR11MQem50GDiKxlzGighyGKWmfeex/qNBXelV1apol1nwDCUiSbC9fPUu70YI94kit+OnCdHe588u+9o5tqmvG+4ju2D1U0YtGzBJLwNtKIxTj+ycim3c7lWMz9gyNpdcRw85nQOO+UebN2j6KDuTy4XNxidtzFIcvo67EYGfl+q3WaPfQzFQLuOqvybvDRViyMxNwUidf7UCNcjzUMa0RFtd4HPTD4pR9pL0oOHG0XMOwvMlfvI/0tUl0nH8gKjxa10D+0pCAG9Sq76K3xNRb2QQ4PhDp7u3P+U7CY7JpR9qHasfUEAAAAxQZojbEb/+8M7YXuyASh7Kplen5y7UyO14JIrbok4XTbRQe3ORruR41lvOoDou8ClgAAAAB1BnkF4m/+AoGMvgzynj7iP1aLLDUqgN6Jo/suYQQAAACABnmJqRH+I4ID6PupU5REY3sf/aa003+ohQ9ie2tSAgAAAAbhBmmZJqEFomUwI3/3q7CUD4uvUkflAlpSidF3UDZUIJZsuBftR/Ot+q3GhwTn9egJDu/Q0u302gbAsB/IV/AFE9llDfK6lUW9694v3+SwMl6mllWP/WwEuWwr3bvYMZvmSan6TfqkNjfFQ2gIUyqpE9/WshNO04YsB0gSlJqYcVRMCqNuW9LOcW33er/NiW98OL0en+UGHVMigjfMRwtvQwEBx2TCEvqNHiHUZK47Ql9EBEwOC+9RX+RfTR+dz/gd9jggR1VC/W7S6VbwMD6OLZQ6pAbUGRTytAu/D3nUIHJpw2KDyU/GpGdKbj2WQLEoSWl/G1erWEOohpWvfDxHkGcHJyES1MZ1DvjVLlpUz1LfdDNETW9u5oytpQuaaon1mmF0VFggCZuK84NA/jjnYLjbiGa2mQVNuCz0xJcY1SlKU0HHZWh4xyJUcesClYPnBpMFzn1F68BZn1odfSkUsfnPO8Ozfwiiz7f3xZq0mYnuxahAz2t7bEpiZEBhl9vOvB5IwNQnQ2WMn3D1n/XqqbGvjTwCrUHeBDng8+lhZDYcGCdhZ+2y21qFiMlJaX159s9jZ9Nr1pQAAAFpBnoRFESzfiNYKja/0xDhfTAiTpHLhMdYxhLBGYN2yGAaE7v0DPQ2YrW+QLfMv2VvTlemA0fw0ITn/7H4QKf0Smi2r8j9M+wtBuP4ooX+ZG3O079gxDawGHzkAAAAeAZ6lakR/gQeVglqse6icagSNwuz60J4uGyZCePmbAAAASUGaqkmoQWyZTAjf6DCt6Yuhred9EHrqUOfXsyntdBaWSJrN7aPiRCVp6QCYQRjgAPaX306Ulc6wvo26P1UQNC4biU/od187SikAAAAxQZ7IRRUsn36T35XM0R9+0te5g6nnf972YKyQEsNo7mvZhGpibk4k6GYi5Xw1oII/gAAAACwBnud0RH+KcQxiHI/+tYvdUXZoto0LM0yuhiZoPS8TyCql7aDErRX1MckO7AAAACABnulqRH+Gdbgx/3UyLHgLBkL+GlWi7oO1l81SbcV5BQAAAC1BmutJqEFsmUwI3/QsGvLB67IEGcjaf9jQmmp2xwadCZqIc+QsmxjXnMZQAp4AAACeQZsNSeEKUmUwUVLG//1hfa7H0q8GiMhKKARrmw6Z4xdeyr7ujJme+WFMGuspP/+ISBWO3g4rVjHKMZczL9N457VGygcsdj7HL9XSQg7I9GyDcuezylwimiV/wFo/tnsP85jKo1N9u1U/c32ZRoJ6MLzOYkUW9bgpdQwc/vwurbPO6DqdSp7R4DyF3mA4Y/IOgh69Sz6dfNBC5f60MoAAAAAUAZ8sakR/iOA7CrYE++z83mxaT9kAAACCQZsxSeEOiZTAjf/7wc1PkbwRgr+fjnyNR8pfsXICQZIWSD29LO9OdoFeaCGC7kNQcI6QVUvVoNv//1awF0I8N0tfCIdJ96/qdtaHfFDnSTMhoQ9GjaS9EN6FwfTiM0n2rikHxnG2wBAjL4oKVFB1J9q4Q6oi+C8FkQ6nZ9weS8YziQAAADVBn09FFTyffW4h7bT+Vmnsg7Jwp4y5XEPjbVoz423RdhZiyhHoVMXw4WOwpIvW8VtedZMRgQAAACsBn250RH+HsPlaZzJ171H1c9c+P/yRU8UOm2VwDFLU0JopWl1F2Khtda0wAAAAHgGfcGpEf4TUK0IOqyu7XwKdBz1mDC7mcZOyimAiwAAAAGlBm3JJqEFomUwI3/wDwzHAex2BjGQbp8PmN+QZU96uDVeJsc29OEXjcldhiXFvZCjL1XZadbCRD86yiAgWQRsivW2zgAoC7DVCL9e3fSnRGKpVU/LGLClh3Y48sH47PWqbcpIEZ6IQMXUAAAByQZuUSeEKUmUwURLG//vD9/y1OQEdZZUsKUwNqfrKLfwKdmKP4MYlUfWBfMS6AB2YzPOOewKgSMfLoK01LvNA1j94yZ+2K1utCc4bPCb5fgtJtphyhALG+EJzcxQG8TmO93UCKf8P7LJwQsMlijgw4NSUAAAAMQGfs2pEf4etAFroO/Tre2AdB5O/z7sBdNC+yMviqgLALp+SqJXA7Pg/DPRDeTGlt4AAAABvQZu1SeEOiZTAjf/9dtYVwOWMQEFcGvjV4Syzg11kPr1s9125wYmt9rvuf3mx7RYJ6jF6ErfUzdz0/3v9ae/O7utr4LCBaSp+/oto7O3F1qWD/jeXpJo3T76eNJFHYJw8VuPijBUU3Vp8h5phb4yxAAAAO0Gb1knhDyZTAjf//XzJzOIH3VrgDqkUNgn86gOXkEPCMlG+9fgG6imSEzHDCVblECfv6lWOPhYa83SgAAAAbEGb90nhDyZTAjf/++FR2EKSC1to8pc6YSO3kyM61vFVTIqwBRLCgvhTW2uELbv4P1t6j3j6R5JEnH93R+6ccuNOfX+S8DB7HTWlWJcQLgqD+S2bSR1NJYKOXWfT3yjStlWid2o9fNbVb/WnQQAAAIZBmhlJ4Q8mUwURPG/74DjgdzkBXMYP2UA+265sg2RvtJWp7hbeXljZyPg5U1jpyoQAMkswdLvArqw2ReEXhiCOCghoFR9+KlVVnzlHwdrRAGNlgaJUCgaXSP04WBiefPhnJH+UNq4LFoPepHhK//kKOrVcBFaR4anoX/aA5EFLlBZC3z8HwQAAAC4BnjhqRH+HsRNxvly6e2k2NMUXC/vU7UPeiHPyDV2p/YvXZWhgk+q5Cpb6bHPQAAAASkGaOknhDyZTAjf/++A44Ilb6dcZ4EfRuTLTuDvo3J2im59tzroCzqfsO8XO0To/kC1I69bSu6OBA9I+wRfSH4s2YCwMyLJB4SXTAAAAXkGaXEnhDyZTBRE8b/wDoisgQ2L7kEBMp1YsiwFGNSG/OTV6DNluS1vSFNmiEavm3sOuILR8pgK4M6P/qHxZC276LRYUYYCavgfWkcLXP29c5JUTuaP0/h/7ytcJzS8AAAAwAZ57akR/h7Ee1u4Mf7i3wEEg6o2q6d802TgnxIfuhG+eE+Dw81v3A/7HzNqgsMZrAAAAZkGaf0nhDyZTAjf/++s+OA7zprt9FalqM30xdIjlcH2DZkkFtbEKP2KnwPRDHCudTxGL9Qj/p+X8zQUT6DsKw9cio1ehc/Va1ccw4608TJVh/tU58mts0w5trYhfnxrz7ghBbWrPgQAAAB9Bnp1FETzffU2jm3iBOi008YuPeRt0exkohagoWh6AAAAANwGevmpEf4k5No92pOx4d3tVe/jt/92QHqWDJ1gLa+IuwyWHPUwqtoP8fV9Ym+scICO5EURRBtAAAABkQZqhSahBaJlMFPG/++FR2CGqQlUqIWgPGeTAl0jkR8nWtez7chaAXKIsKU2quN90XktERKg37/m9ip4Dz5kOgzQscz6nyNY0f/sd6sj1HySlAcKWdh7zp6SDANGHziaLu7KO4QAAADABnsBqRH+JNh+VxVwUJ+Y3lV+rya5I/8TCxwR0ZLGKwtu7rocrr7JN3QZSGNke1uAAAABQQZrDSeEKUmUwUsb/++A44DvMfbctqjDdsKqNf0xOA39UYyLAzYIZyyzAIzc51s3jIS5ItuDvEtTXgkuVVUxzdDw7I8ZpdZavGnbqYY9+GhMAAAA1AZ7iakR/gQ9rhvIRB0ySHXw58101sjT3zRJJs0q1N75B5kzDs336j3kR4+OINWFbqx8fObwAAABPQZrkSeEOiZTAjf/74VHYIlYiKMdyQZmvfSW5HcZkgM1RFiY3gEH71z/3fVBzOwcraE90El70+ZDjx4HGc66IARVSqHcYq0qBrrRk30FtqQAAAFNBmwVJ4Q8mUwI3//vD7l3aeLA9vGddw8aOZ4u/aB1tWqMJJSgQk2EKksQOal+1aR5lX56hJg2jFlZ0Xy5wPHFC5sT+XuWXvNeiHLLg872OhCGzwQAAAGBBmyZJ4Q8mUwI3//vDobVTkBINCYpxqf92jigZ0Srp218/WFaj2TRlEHOqssGMM0Apc19xhhprfp/o+nyoo3nU4qIPx0toevbOZ/uMdat8h/rz3l6mAf6cx4JPPMV/58EAAABZQZtHSeEPJlMCN//7xIHZpGQEF4WlOSmv5sDvQ1HkPJ24b8mLANR7I1VevWJebie2fuXBDZeaCivuJcWRnz+sLxLjPYolWMX+OCeAR284M5BlQ+hDK45MhLkAAABfQZtpSeEPJlMFETxv++A44EJXr11WQvEfDTQVOZV8/+Xr3S8f8Sdkd5yhFoZ0Eykj3Xivg1BzZSewsPXa9nQ6gqdDgf7T2DmCjKrk7f2e2vK9ypwXpi7kgz9A0TgFjRgAAAA3AZ+IakR/h7ESVb5jShGX8VmBJn/MdoNQ3Z6fh4j/jmgTG8KfR4gLxPvHCfsFkI5Dojx4hvmkmAAAAEFBm4pJ4Q8mUwI3//vgOOAkGyUenL98r33EsuAShImg94uLBrTk6dhiDVHpVEky+DhSk5kCbedWMCnU+cpV31ziJwAAAHdBm6tJ4Q8mUwI3//vBcoj4CbBCQT2RExz0aNVXfJZgEMRjG13pvbjo3lZ6TLZP87/4osMgfYBX228XW1pfmU8k2l4pXWkF2ONrCPtLUsjWqi9NqYjz9uxWY1lh/hx5fARLNZPbPZztgBCOOKpjSsjqrRTvm2kDmwAAAGlBm8xJ4Q8mUwI3//vgPUQfdNRvnx3FP1pywn3LJO+BgkxSFcCfgS8xAc5jl7DjT9LzYEwD9isIU1yUsdzHqy/6ngxDd8Uf0UrLVqXY4UPbCNliO8n/ZegcRbOArrd0G+CXl9/2CFy7Fp0AAABdQZvtSeEPJlMCN//74D1EIHvgowka8CSMlOV1djkcwdrmN+M3w0ez2EF3Z8QgY9h2qKskrDR5Mk2+uEQHAHNJFzVoZ8OEnB6N5bVjkoQH9GCmMuS0GmqX+iknVHfRAAAAYEGaDknhDyZTAjf/++A44EPraJILnY0LuTUTVqG8Y4R+iod8qU50V48CYGzxUBWrE5/inR2Nxzjn9BK+lYDY6wizKJTKtNBOivIiofjj7puT2VHhc5kdvPo7rDnnm0xNBwAAAGpBmi9J4Q8mUwI3//vBnCJ2LA71GF25ddxrRcaDbZ6JrFKdXAbBcwenVA97XR386r26bMwGidvp2PmbPeLJeVTrLs40YO/0dkENxamY3tNtYwvFfYvB8Y8OeKM6xrpDUHsU4NOoFoGqtzCdAAAAYEGaUEnhDyZTAjf/+8N7/LIyAjCC10eRAkC4payNwDB4TaAJfH4zC+l57EAHdxnDWH/9FYP1pFGTKBZX4dIkEoguP5WochPIdwRBalFTzEmR/DYBfxJGM8ekovk7ApnUkAAAAHVBmnNJ4Q8mUwI3//wDwzHAd5j8RhmR9H0z98nDjYj/Nc7vsZGPARdvUDB2TCWaeccqi54nDBz6EstQtzy6v+wvloWKraz+uz/M18zKVlwhNHeSR4GZPnFXhk/6SFiPs3DdL9fCsVu/U6phJsgQpW9nziiIrsAAAAAlQZ6RRRE8331No5t4gTzZDbeKeBR95P1RisqLON/iq6lMAjgQ8QAAAEEBnrJqRH+HsRJVgDz377tnoNimjhTZvPY9yHf2fmv0wsOMF+wp88Zp9A47UCY9LcsoRhIaJ8uNlzZCrsZsy0xHSwAAAEdBmrRJqEFomUwI3/vgHICtV4/N+3TLBHjt+Fcn7DWjz8soUTNhnfkqkcMHOJu59OaFxhuVf+ti1/1ybHdnNcNjs9ZKiS4KwAAAAGJBmtVJ4QpSZTAjf/vB/wweEBHWjwtlVbql9dgODZ2kZPlG+D4KXaVCP4hDGqGFAR8WEBDCUpQahknTJjX5ZfkvoG4DfOZ2jNfwPVMAdpL7FdEsgo7WPCSMB38k1yCOW8g6+QAAAE1BmvZJ4Q6JlMCN//wRIX9RB98K5rsT6v40vwfGBFHd36t3Lmg6acxZXjPDoIP1nuZtPbJR67n3MUNs/jpJGr2SW9ZnVls5Jb7a9l/19AAAAFdBmxdJ4Q8mUwI3//vgR+AQ8tE14oytZoCQH1vmaiP6zviuiN8WiV39XBnBmeNGUBPj1lZw3uGVd1ASBgSvQit8rSzdGRcZWThyfaYyQbzG3uhl9FJ+f80AAABOQZs4SeEPJlMCN//74DjgOFPapEipwNQ5JLf/iFkT7vTC27zO0O1Hhr7Be9x5uGF5a9SD1TJTW/IfLAT7Nh7DJMwi4QswAkdulgB0QpQxAAAAd0GbWUnhDyZTAjf/+8Gbu08WB7ipWtNjMHVF/iPxLBqJR3dWf3VXwht118WdJ2yr8sybv5tw7Gkcen/6wXcv6hm1FN3LQa1BDmxg9YZtMWlhqjvMsaNo3uWx/3Zsn/9CB07I7ahNSdEvwIcfvP9pOIVFVaVu8QFYAAAAWkGbeknhDyZTAjf/+8Ohm1TlEJHbNU/mWRKFgYzLN2yQ8jXAB9wCzOsfbVP2lU1EY8QOgEzKqXo0R7iSjvfThP3Ejb6DhTio3axVyuxNGvXKyq/GmvvY+WeEeQAAAINBm51J4Q8mUwI3//vD7xujgdzj8Rhi1Ykq7OuqSWTi3Jyq4sFrVH+uGrfgBuxTFBDV13U4LCn/MgN+f4ZzOHXoTdRaVuXBhLEbCtA5Ms+hD9d2LkEvLXaBv0E8NnWBPx9w4ZTvBVStx4l9Na6nPeTrSSDBnaVQkpIMXYq0+x5SehxmeAAAACdBn7tFETzffVS1iQH6ubsRADaTWu5ionu4K0/0ak9zTnZggGqi+fkAAABTAZ/cakR/inEMZAKk66wt/cwHaBMWpfAc500hIeLekQWHoYVeIuwzwo3AD4zjCnVWjbJVYnBRhCSWWZm8rpySFJ3514IPVb1tZpdA21QwKGClE4UAAAB9QZvfSahBaJlMFPG/+8IGeHNgeeQ43E7kwPs1VGXGJw973890r2GIuyVyjsOqfdfCvVaSkH16R0M1/zHTQJQoPuOg2+H7r68wE56bXlYqz1zmB//ygylNGdR6NOHJuRI4gUP87HXbU2cwVemECH+lErFeByiJwCmM07PdY7YAAAArAZ/+akR/iTYflbJv66XME0GHkCF6mtJpaggCYpsevkqPWehQsYwUCZ42gAAAAHxBm+NJ4QpSZTAjf/vBm7tPFgQmOx8vn1I4MEdYOyJbxW9+T8JyO3vjlruuDeJ94kHVp6mE4InbW3mhydReOAPRHSvfloMOy1X4f1k61/T5AyQdQwSfNs/ceWh+FXJYPI5vBAAfACpz0P00Y0ZQiTyXgYR5dHA4282fpEnhAAAATEGeAUU0TJ99biHttP5WbMcugQMX/6p30Ja38txT+7rJCjWU5BS5Hu/Ped4RgDO/8NmgX1/xtKU+guus6I+GB0iyn+7zzesJ2+Dd1FQAAAA7AZ4gdER/h7D5W775k5qly41vVWL+9TyyQd/+taVv6vysDLrv4GuSfz03ASJUej48yG8+gYk5sPihZ4kAAAAvAZ4iakR/hNQrQyxiUa04Hw7yezOPlcbNtcK39Adv5vlNi03I0KSWCx97lgf8KiAAAABOQZokSahBaJlMCN/7w/fqde8Ef0LFVa6z8obTV+Z3fHanJstZCK+C9VlS5uYoIGfyFRCQyvYs8eXMoinX6wkePCQbIrdvJZtx24a1z2OBAAAASEGaRUnhClJlMCN/+8HPtQiIJq4KJFcEZqp5eZHjbfXvRNExlEoNtkHV/O1j3XsPkfmAogRZ6H3xb4fGyDO/psnXuk/zjrKbgQAAAGFBmmZJ4Q6JlMCN//vEgWa8cB7g7HnWVr5afQMBhPt/eF0XZINzgf1DTqt4LgNJ9n7jJeXS6jzqED9EpR64Rz+FTHrYHSWny0wQxBJBhnSb/iEP/JdZ86h/Wi9v9QW09VhrAAAAQEGah0nhDyZTAjf/++s+OAznGqGizRSqo3BVcpAWaqCB8ovSoNU/Q+cLNp60lGoY1XxqNFsDbFsRe3APbjIzVIEAAAA7QZqoSeEPJlMCN//74DjgJBsj6l+X0srb4l6b6JH2Ft3abZgPDnb7Plzz2pLEPDmuaBtebuDUX6hFZoAAAABcQZrJSeEPJlMCN//7x1Dlo+AmwPQ/8NXi9GKqmuY1UPDYZG3NPKYYZuf5RLSPKtiYcXru35hiQqjNXqPD4oWVOHCsaOXNK65RabsS8NeFVBf/1FIziqztdYeHaIAAAABfQZrrSeEPJlMFETxv++A44DvMfnbEhBgUx9vnA4Ho8ktSkKylpuzQvYY0CtUp4uNU/8qrDXOOYbe12K7QloChfe0qoEDZAp+Ls75A+yk9tDX6iPXAaWpTzsfeqD+dbeEAAAA4AZ8KakR/h7ETcb5ctlmh7dxoqP/yRU8UOqHP2aaGG/KPrV+Afm67tENSRXPgCr2j+g5e2szXEdIAAAA2QZsMSeEPJlMCN//74ByArVvp1VBzo6FR5mbIURGBYtSZMz2ctINnHT3rH2VjPX/zzdapOx0wAAAAW0GbLUnhDyZTAjf/+8PuXdp4sCBqPO04eytS9D46/Avaazen9hU5pU11Hmv3PBlT0PTYG549QlWOOVwWlJAXcvc1syzyvf1qFX3EhgXM1IOIcGnTwUuYkLr42jEAAABaQZtOSeEPJlMCN//7w3J3dNWQEg0JK5pzBQVfpWoOKLSI1/VmJjcsCrBle+ncG6tiC7wuDEZ5USHcZC55aKT2OIPW9YHg1aVeYKYR0wQWQDP1DG/oXsGKUwexAAAAV0Gbb0nhDyZTAjf/+8HPtP3gfFlpFrQPirkvcmukg1+owUzumItdD2e8mcIUJrYgE5Sa+gOebQM6C3UJSm7qj2KyQP6H+uiQQf6dQ3nUsv4RzQXEB9IZsQAAAHpBm5BJ4Q8mUwI3//vEgWa8cB37/p9ZF86iN25nKIigWeKhgv7YJ/UNEZWwthTtyyAJEfsOe3Z46qB8Y9u4aNguQI82wq+DpdZlQKDejU+B0T7yh/vWZYzvEjdR8mEIAxxdRZpU68SFTtrnTu7KpqA68eg72PoiJCihwAAAAEtBm7FJ4Q8mUwI3//wC0LvBNTmF1Z9IfU+yXng9Xlh5jOvxWurF0OzyKrrbAG/LhzXREODrp9/66YlkypRbzROY6MEMzZYnzbptXwMAAAAzQZvSSeEPJlMCN//74ByBNNkfODOhV2l7PuVP6jv9DFZM/xRiaZ7NjolC2zFnBs1tnrqTAAAAUUGb80nhDyZTAjf/+8dnvoPCAfKn6R60RAOrjtGLSYzJf83HxmDNtKNcZR3s7Q17gEYux4ptcFO6KrQhBRjlRY6a9irUPBzxRSHCk6HKHHiDGAAAAGlBmhRJ4Q8mUwI3//v+jz7KI4HFxHpTS0oxWxOKguK69Iqb7Pv80RE8zRIcDeLBSrbSsht1y6Ct5KnKYedMgtXkRlKks+2+B7FYsVvxecCZHOfXyHzislTlSVnRDtTX3Xm3aOyRaBXu5sYAAABAQZo1SeEPJlMCN//7wZmMlyhHAV+DQxbHCYKLdsYP8alRhrAXEGFKASYmbvEaFUC8jJbbP3+VwoeATImNqxZxmQAAADBBmlZJ4Q8mUwI3//vgHIE1raJILnXvo8sZurnfn67164Fu2/h+6+ydNTr/+OxuokgAAABKQZp3SeEPJlMCN//7w+7Y4o+oh8mWdvl4UGi7/t84ZAPcQ9+MBMDQq//WhRWSosr28nZ9S+NamaT8AaHjCriTA80H12cPmqsMBr0AAABZQZqYSeEPJlMCN//76z9RD7r4RWowH8hXR1qM2/0RPeiKZ9pQxjXugxDv5Y/OlrDSWjfi+WFQhmWEG3gYNg2K8SmfzQY7iAamgvUBbTcVy2n5t1ecNgemmcEAAABoQZq7SeEPJlMCN//77lcOB3OQuw6HD+AjOfELwUGp9SJicXV1sFnhvaKjnvCxiZwl2jex2l9TUZcaQ8RCwsTwL7Cb3QWKztEEttb0Jo9sjMWDnWrh5iANpRQyrCjmn+qlJDlANP179jgAAAAeQZ7ZRRE8331No5t4g3EwRcCB6QcQmRnRJJhqS7+1AAAAQwGe+mpEf4exElW+Y0rm0PJ/wbTD/lW+CtIwcWOFlp4l4dVOEXYZReMT7JXZAnpNCK1hAhXjScp5NYYnhBjzjGD/rSAAAABpQZr9SahBaJlMFPG/++FR2ESusZKMxurmUPsaoB3yvXwtgjhKcOw7RRICF1EOtbOnMwE+WuGfg86L92P86V4YhcXg6X7+adBswWrcPNmvGF/+VM2MJnXpcNhI8G84ECcACh3CpkqclfZfAAAAJAGfHGpEf4jgOwqrh+lSqMFWNRECeaFV48q85DBbzjXglEAaLwAAAR5BmwFJ4QpSZTAjf9P1UW2P31QTlnuH9bjbYJ7XWt/ai9e9KF19PysQ7DabFT0nEsV0yceUNAJX0BBUVv6dG9+bf4cKP6gx1WQaTSkLrl6RjZl8G8g+mu0jnqW9tdKrF6RdsgVAMdnm94bRz+PzA54Zyuy3ATHLl2cUlXEyCW9KigPQ0J5U3y/k52ZP7lyfuZ5la/1Q9Pj6hGjOXZEUFpJACy7fj69cHAm5TQWn8MCi9bYtYpHNqiSnSLn4A+l5LrMldm0cXmrlCIKI+1dB0+ItJmzaYll6630NuKKUYK9we5JspVhzVBRfoTKIYbqUfFWokuJzg/aLs0nAnaPalKUUmOI8qX3c1g70zXzdIptzRWo0lBpoXPimAkfYfsLAAAAAUkGfP0U0TJ/Tuf49tp/KzFPCIEDEvSN1rnYJbiqzzxVfJ/8OCLsf7ljUGroVfs9mei4RJMtv7Zcn6zTeStOI46ruvNTH1vZzMeIyae9K1E96hHUAAAA5AZ9edER/h7D5Wmcyc1S5cFCuXC/vU/dYydSXmT4ray0A6VWt90PLvJGDUf11AKA6FxyAGoDLyBHBAAAAMwGfQGpEf9fFlYaEHVZXMWGGCx/WUqHrS0vhwUrligXTGXJS4DE/QUk+ncgeHLqWgy+vzgAAAKVBm0NJqEFomUwU8b/oMJ46ACuOTXqhV0ljwLZShz4Tc58bkcnq9w3uogc6YHf0kLpTfoclGUCaCZ4AxrrKOqJsYXwsspdKo6DAKGat7GSKdIulJbXbhuc/IvIa1CF/DNWzV6r+cO1H8weRmlyh8KFBzm3Sb9biHpOZEdIVGO3TyFoUJ3SD0ZzyH7EROwGbMEzO1FkpRP4jj2zcTUYldf8k10XzYXEAAAA1AZ9iakR/mI64NNsipLH3uhfv8LrdLfjRclwIPczo2kTYfFSqf6TNBKKbBeuQKHqSSWNQZ4QAAAA4QZtlSeEKUmUwUsb/++A44EJXr12b4QrXF3nTB+IiC36W7K9k0PInXSBIMLwHooA0xe2aJ27xzqEAAAArAZ+EakR/h7ESVb5jShGX8VlCE/7Y/nQZhvcjseRoWw3awq8RtUipbXM1BQAAALhBm4dJ4Q6JlMFExv/9Y3sZGUqwJ/ZJQ3AZgBXt9Ufjt4ZzNxGyAfjakJd+hT55Tw9cNhg+w6ug3vz/oXT+HSXuNuSH3FG5/LjmCDKvQYF1YEcEbLkXk9fh9BECrWP0tPknccp1bt+CPAa7hxxvktQiTyB05TND7a4HjRFbvq5TqgaLwDS28Cxz/2MBBMq7yg+pQVD0bX2Sl/mRKrv1ISvuJ2v1VJxyBQ+KKcEXAsgkK1RpCQS4jATBAAAAJgGfpmpEf5BbOR6mhN42Utt0sKpEJIAR7fw4EeQJlSVwz1K/GoHlAAAAeEGbqknhDyZTAjf/++A44Hc4+26pb+I0qiyyaTY86qeqgj4kwIFKO32Zd3pJfJsEyCt3X/2gIau4QMjcRp2plA5yiGVX5wEKuxlMS7/gGuv6Qw92srH94+7Mg9EV6V3w0yihnRIaFYSL1USPSa2cDMI6VI+grv22GAAAADtBn8hFETzffU2pysalekPthsm2ZMTNyS424hytUB9PxsDoKgpOtQiLMZ0iPUVzASv533olMcsSGUtWzgAAACQBn+lqRH+BB5WG0Yk9tcQHTHiL9VohRhZ/SCKeLiSmHZaZBYEAAAC8QZvuSahBaJlMCN/HqeNQwz8YC6lddTUaq/wlNA30DyObwUgGgN9P+XYVKGiTwxMURC6nsd69EfWbXorW5UKp/iLDB0tXqGqCdJCKds+YuzihvwR1B4oMcWI3hPmbK4zhnIvnkOawDk1lzJMAFAd5Xs6In7X2yeUfKr5WJHmhlN0mNbPW0T7jDw7LEkRVdNs79QlUlWSBMCb7P0EyD2FPtivHzwZbF/QBKslG6u1N02WmHh1SzyYHC6SYhYAAAABSQZ4MRREsn812AW0R7+Pctap0S/62v/Y0HOJuqNfDpqTpeIBGPqgtfdqY1GtZkmBpYYfpCvY9UDU+DGLJFacH14P9elmz+ffaFFAfxY7tFIA+wgAAAD0Bnit0RH+ezJaXm0/z6MpMxrufrdPmfUHceEr1rbvBdYpSb3s5MV2ylnH+g7e3nIEQSIuWIYQBSs9KfJ9JAAAAKQGeLWpEf4Z1uDH/duS/8VkmZodKFFLza+EKeodh2cw31hLmuImTU+zBAAAANkGaMEmoQWyZTBRMb/QsGt4s8uUfHR6wH3Eruy7jdPNSHdGx4H2+eY45bHHjrvu+npGPfuKs4QAAACABnk9qRH+BB5WDztJXfGFrhk6Xb6COb2LMvQK/NgrJwAAAAI5BmlFJ4QpSZTAjf/0fwO8d+rKuAd3nWKoNCPGmENKEldonVkGjg98w4Me5Z48NellC0FxCEd/kdNLe1cM8GE783LvVTMHmMX1wOrayRMiu60nJZ7AhcL3MR8AOnKU7oGDehMw21vsadVsuyATMJtrcH76ypGC8FIQF3X4VlmnhpX1WTqIkRvuvbCddYWNgAAAAakGacknhDomUwI3/+/6N/UQ+1Ea1+CN+lmcvTOPgijtDvzZhN0D2G8CBJq0xATOAf5U01N9dCRuCxIu7R2vxMfg848kgrRbFR6x23LTcr2silj6PlhF7bXGnsSksL0sP7ItpzFz6DxDAfIEAAABQQZqTSeEPJlMCN//74D1EObFx0Ccz500Sa/EklQzLOUat9ZefscXQjlR3Mx55ToX4Xuw4yk5jOe10fAnmAMrV30VKTa2W5vE12YQot/5YC3AAAABHQZq0SeEPJlMCN//74DjgiVvp1xjN3F4nu5NzvpHqsfivvp/bbY0P4LsFxB1hB6kH8V7RfuzmdrHI2vT7+H8cVEjDPvhujc8AAABdQZrYSeEPJlMCN//7wcOsd4IGgTeySKCPIuwxg0YYo+96hJ2UPP5xSEcCoL3qXVqKFs/+7whBdM8r0BuQQeLuwVm53f5UyL9EOx7Lh69/Ofouy30Ld83TsPD9W9AxAAAAREGe9kURPJ+A6iW+6tW/uR9lhCZLeuQ4oZcBFeKAu5UN4mG1qmIlUSZUB+VEhmQryk+9/puz2UHFvGA+dUoS1cGBKKWIAAAAOgGfFXREf4egEy+iMAaiy7zEWbVUnq4AyqydOp/rPmZg3eGRT1NmWJ3z0oQRnaNxgE548Z1QPX7xmOUAAAA1AZ8XakR/hnW4Mf928rmxp4HnnBMUqhe0zvX0oeNpgN+7Uu+KKS3fCuekbQu5jS3lzKvhqNkAAABAQZsZSahBaJlMCN/77mtCBIDC4q6hbKW6i+2ykwSQy3Fgc2ZOrZmrvUZMVzT54+uJ2QGpl/6fQueAl+NdAyBz9AAAADRBmzpJ4QpSZTAjf/vgHIE02R84M7FU/ejsVHqM66DEeEz+EEL17DltFUqXJViZAeKAf2cfAAAAZUGbW0nhDomUwI3/++FR2EKSC1to+4xCmEjt5LncA0mqNBUks+Ym0IghLbBCqyHI+/o4VwUizGvOHaK62P8WHO1D41LvGG5uRQbBRxa2T+sO0fWvHLEeFz60KXZuVfnunSGseJ/OAAAAfEGbf0nhDyZTAjf/+8Gbu08WB7eMlZakaObAXDazwj3bsg0VKVyXEZdPl4Ml5p/16cJP/U78Iz8EJFZ2maeMrllGWqaR/jYpXDX36keDWXpidqMEWfOXkQ8fZImQl1OdRErD09YOHHaf5NGC+6km9YfOUDqGX9Pq3XQRj3EAAABXQZ+dRRE8n31uIfah+Vvv5ElY/3/K6wW7DB17bbOdxcR0rSz4be7jHQy0EgS/lLK0iaQUrBIQrvTwd5mZPmZ4ApMYBTCkxmGGY4ysfzByNNQiAJD3piF9AAAAKQGfvHREf4Bw4zB2SpmzpJNdP//8f6dskHoit65Hrw6c76E2VmXkD2UoAAAAKAGfvmpEf4TUK0MsYlS18DDnFz7Y2UwUKL7z8XXLeoLGQWrrq0RBevoAAABiQZuhSahBaJlMFPG/++5XVEH0KHETUXGSGsXNMJtH/jNeRzgaZvNQtjBuinxU/liYq+MxIz8WoClfRA/KTLqF4/Fa0SAqiv0o9OwWGtPhEpGIZo1GD+Cou/3P4e/6cRklt4EAAABAAZ/AakR/h7ESVYBQhM1ypHXwCXiUkjPjSDADDv8Q18gyx9OBwNx7koSQDcnOOQ671+kC98Jm6w05OE/cunBeYAAAAHNBm8VJ4QpSZTAjf/vhUdgh9eh1QZyogZ7+hmtlbPNslgd7qIrlLoeRIb/et4+8Vq1ePF+NNLB4Nr5y2LIdg8c54s2iT1WYUe5vxH2qfxAHd6oe3EPUU6bAiHT77lV4nOsi77nk7apWUyTxHGqurceFxTOBAAAALkGf40U0TJ9/cH7YcKBioTwOqZoa6GOmGKX7krY2+ZZJFD1Q548KXkilnIlw490AAAA1AZ4CdER/h7D4ZJjNasvMvYzkHey/9PvUWs7fgtuPBRXLAX7CfyYyOMWD0kVUTTq8sgZLsG0AAAAkAZ4EakR/hNQrQmu507TYOsLT3zhWb32toH/fgbuu1qCFxEeBAAAAVkGaBkmoQWiZTAjf/XbliF2xAJMQm7dNMvTYZ4BH/3IqFg9bdiX1lecwbkkdjr1LW6pM2UN1kfniZvPkJBXmPFV59ScvpJEqbeREB1KHz3B7jm6BYferAAAAWEGaJ0nhClJlMCN//APDMcB3mPztiP3uapuxK9w1XpJGf1ey0bVBH2OVz7uNe1T/11fYO9Iv7Fc7kkj5DXP4lwVGQc+vNvTeM4IHYwYcrs9kxs6CASSVaQ0AAABKQZpISeEOiZTAjf/74DjgJDW0SKRZBRnHc1aiJ3lEevKh7RRg1AT8zcCut9u3ECa53MXXjAVR/5TQqy9iMtme5BHk09PtAdSIfrAAAABbQZppSeEPJlMCN//7wZu7TxYEJkiWNfakae+YsHrw4VAfICk7Nb7oLk+0i+Mpuy4D8f/TgFMT1z8D+3aoAm/Am00CBytVnpVYqPkD+waLnY1NOQR2k51+vgGAegAAAExBmopJ4Q8mUwI3//vDoZtU5QkPFvbm4haJgZvT6vbhkh4T4F3xSdoAUekhgoFrQuyi5WLE6GUiQEppgDoxQGgzx7AhRZZEztPG3fTBAAAAQ0Gaq0nhDyZTAjf/++5XDgO8x+Iwxa46cT58hPz/3xXGJ1VivV3KaFJ/3WQxqtPg3LKAamhPU0NAdWGoS0oO7WTR/EAAAABfQZrNSeEPJlMFETxv++A44EJXr4MQrxHw02XeFmca+qmRNOoUcRRGPhSgHkAcuC9vwQHiT3krg6bT/BkQSItsA8CwlnnzN24kW98J7AEeP0NiNW7ZKbpdvkMpFyUz8l4AAAAyAZ7sakR/h7ESVb5ji8/rDDaLJg/7XqiPJ1B0qKo+hBZfe7U4Rdhl3xjVAHcpz+vB/FEAAAAvQZruSeEPJlMCN//74ByArMYDm/bz8wxn23X7eeY8ueYRJcmDuVxZyy1hXLUX1ucAAABgQZsPSeEPJlMCN//7x0/RSwgInmw7ggx6hfLg8Mo63KinbZ7POUyq9lLfsMaS3xKvOrwHbYeQNzz2RoRgXOv/exp6/Mde6Dums6ZRED4RzeaCX4PJ9AgJuwbmO9tXNIbBAAAAZUGbMEnhDyZTAjf/+/6N+OB6zo5Fy5eHpx0/tejGu520bLkOKYZmUYwoEKtu0gsWwF6ztlWdsn2cBXYI99l6dfY0me7vv4CJ7ndd4PxDQYUsSz0tIM6CYnR8oOrc/Gfexffa12GAAAAATkGbUUnhDyZTAjf//CnvmQIc2OrRA8MwDA9HMkp0e+MI7Pm6epVIxMw29SeVpA1ToAxIhYkHTNN5ShviDJ3BuOFRgp1C9Ah/PPMQYFk/kwAAADJBm3JJ4Q8mUwI3//vgRKhKrrGIpjJo14wE5fxn+jkJ7L0hg0Y/aAfD07llO5gOpjeyTQAAAEtBm5ZJ4Q8mUwI3//vDootTkBHWutfyL3CvbJ+vrkeH295y995Ase9ZpuUtjRd/dTZ08jGdVP/9jByukXhfEKbwAjssjjm1tYa2KIQAAABAQZ+0RRE8n4DqJb7q1BrisxfO905/2QQCdm2BOraG6khWyZ2qOCWjta0B7hKGZCu7bgEpU4a1B1yA6YtIK+nvcgAAADEBn9N0RH+HsQIbL4x/P9Mv+bVwIYmppI+PEO0zwOR0FYU9+OSfjcQXZbl+lOJ/yuuBAAAALwGf1WpEf4Z1uDH/duR8Z1p/stCcUr053SmodhihH153Yzn0yYfGEtEcJcDx5XQwAAAAR0Gb2EmoQWiZTBTxv/vuVw4DvMj6PtjWXbeCDWe8HbgEv/NcdLhFqVKB8exZhz3jCX6jioMxFUtp3r/RC8ltZJfrteOt8L2BAAAAIQGf92pEf4EHlYPO0DFLXPMyZmip/4spisSOYUIUhCPWMQAAAIxBm/lJ4QpSZTAjf/vBGT5YQIc2HKxMsdoJ483hDJQtEt1oSVY1OtNsacpcCHC1hLiyide7+igKkUYmgW63zjDEC973kPfv/kGvM9IQJRrdPvgu8NP7Gg4FonN9g1w21bXVxN2U+b+Kba5ydK3Fwptozj887wZ5U43AxRVa5ir6jhSItTpA4qmjsWn/cAAAAJVBmh1J4Q6JlMCN//vBzU+RvBF1xHc1oj+U/TZywQssct1DlyEQP3bm66D0erMGNf+QJB5DFmPKyxwSETeXY47tb9OQ+xlPePfb355uJNYeS2Og2DJDc6qxeP+J/6dc/Sv/p/LXg678yyOLMGYC22x0VzI6myko3Y2jbnwPZYHoVN3W9bTc2x7RxOC/79m7UesteT5niwAAAEpBnjtFETyffW4h7bT+QggNBgC1Tx90dvwgqWi/2bEAaoy9k8G5OWvd6LsgFQzqIuBbNMM8rzbsW0pRv4t/6wnIDQ/mCBNdzbJDDAAAAEoBnlp0RH+AcYN6mjmSXWqx+E1SBd6filHrC4NN/gK7ATVZjyRHQcmyIQAZumDsG240peDLOLW3gVbq4LmG3Njs3hJcfGYGdq1drwAAADUBnlxqRH+E1CtCDqsru19uEmwmchClU8Q9+ckZEbxoQUVCtJ39bW/wYXdNpPtmAsJQybL9wQAAAHlBml9JqEFomUwU8b/8A8MxwQo/6EiaK37P7zGFrPazCtkNNv4vIp0kq5kSnVxolGnW0VIejbpP5ZiiC+I/F0ma3+IF7CpxnIMPf4egyyXNCkjhnJHSXHusxYcAUqAOlV5uTf2eoJk7NXqf/lcR5JWs+Sf/4CuRrynQAAAASgGefmpEf4ERwYTMPCynKnF/ULnVV0i5kdCM7P0vSFSGSpVSXT+9+3FrB1//zAGZ86aGphUfLcz/y3mFRVu6iiyShLlf1IBxT4rwAAAAk0GaY0nhClJlMCN/+8HrVh2DajAEnVrgIluIofrb5gjHi7I0o+eSqEr/KXkOiwTxu2NN5bbjgK7wNu3j1ZZsWnH+OrtoEaJsgcV6iH5Pd7Uko8sgwhTol2e1ZdJ/icre4wKyTmNr5bcC5di+bdY2nLvrzZtdpa9DX7QJestuRmj2U10Cl1As2ZueLxIRqj2wGBHESQAAADpBnoFFNEyff3B+2HCdri0/UUVYHd8epT2d+YMYObZm8nW/ez46SIN8EbMlhsLlCSFaW4sFX2nF2P34AAAANAGeoHREf4ew+GS+7UvFV/w2hI9vC/vTUrzGx1QcGgUQ8rwi7DMOqBjdSkbuDSjnaADQL3kAAAA0AZ6iakR/h417ZNpNh7L591MHWZo7LvvoIcpQmohn+Xd70kfnW1Ud5bf8T7beBDmR8Xcd8AAAAGhBmqZJqEFomUwI38oIQ86YuCDOQ5tOdExAsQJusAUoBl6PXbngL0EHOm/XgoQK4cIAx16UWM0dQcvANQqKp2EBOaWAoRwWN1bqOg1bQrKKmV/ijRbo+vvcxfowunt49/emVGo9MQAXvQAAAE1BnsRFESzfg/tuGYHKjTB5ZID1kuLiL2dqNk66JpJk8w1d6Kcc2wUWTZM13JvgxIJn74EkYNkgC4PjHLlyOy0Hwh6m6YSunCOq8UCBEQAAACcBnuVqRH+BB5XFIVzyClcf37dfxipiNXufl10iycx4d2lwwnYOlmEAAACBQZrqSahBbJlMCN/7xIFnX2UJY3B5G9rwJVPFBhNUhdhx/fLZ3fI40N7ZzzVF/KfTN++yvpUue1sMAod2GLm7S5Mnr+HDpXolz9xb/2QF7HgW6iFPZ0TSu72RZcJPzQzsgubn35XTFLL3mozUEl03Plt0iKzniOced0OUW4K+kw+BAAAAXUGfCEUVLJ99PbzhStPV0DPWCp739xrVtKTUsEdCecaEM+e0OjfkkUkuNJFophvGmqzcNDPP+G6mO1enkonn7obx3Z30VXOOnNy2O0Eiwd48Dx3VZnk6OQouxGUAgAAAADUBnyd0RH+HoC9/9Y3zHp1BIl2MfTFJGRIlfr2DnVE0Sw1ikOxYlKnFwH2Ru+ncOjZ2P3qIlgAAADEBnylqRH+H5CPxj/uvJW0xJiOenPhWEISI76CVOVnP0kO3twxfh5J19iCz++FpQtdBAAAAZ0GbLEmoQWyZTBRMb/wDwzHA7nHfcLHdl3CBcWDNUIHw5qJ/uI/Rxc+UScIeicTquE85DXU/XssWAvri4jZAdY1hh7+q6h1EQeWGPd8v9kVk45hJzDq4QFLRn0armvMglMXFFZrGuXAAAABFAZ9LakR/gQeVg87ywhqQ0UZ0H9SsmVmcAjAxEPJNkzxuBCGvKgW3OfU6j6sPxhv4LrsmE68KdHBdFoxyuLi785Fg1b6+AAAAhEGbTUnhClJlMCN/+8di5MOwazCfUdt4QXA7i2vzFU3g/WQsndT77JVDSX3dyiHIJ+dcHuYgNieBhDWi40XytcT7MzqGHQ8BKd7emcKr7t9ZMy2246DgCMU6O8nfWZamMp0hGAd1hoDnPEpKtXES8lvdf8QvUpJi7H+RY6YmEvOOXCg14QAAAHRBm3FJ4Q6JlMCN//lIWPInJdToRM/Di8/wOIa7fAwqyFmQUIqzAB8UxzfJluX0+Td7Rj1n+911ubH3x5Ng8MZiV90GkBT1677/EI4xf49/XAJzyB4a0vGsKyC/rL2cdk9MBpe8qr+6ehvxBOlZU5I+9k8HgQAAAExBn49FETyffW4h7bT+VmzHLoEDF+QSPtyOibdtnOwtVf938/IoeR8/CfdhBqH8efnQNzSTcptOtFhLNhHsC9QRs6JUpZ3g6L+0o2hvAAAAPAGfrnREf4ew+VpnSNtXMyHIouF/ep2oe9EQ/UgNEJmOXn3+zo/llDpYdbcSubbJNj3j3tm/n+QkREY4VgAAADQBn7BqRH+E1CtQOK56tKoCiM8K0R3dr/Xh4xtb5DeCzfJ3ISlqMnAuma7Jff3Osd2p1kQ4AAAAeUGbs0moQWiZTBTxv/wDwzHBCj/nXShLUbtqalbXUXB4c+Jr8UsTF9vlJYy5JAsd6N3YqAwRTAVVRhIyCYy8OFra9SCQJIdvyrOlBtx6e4Nm3jz/vVxUbjh4mewKCUl3melRvDxX4XCovgeHkgRgAO0Q4EkZqLJtHfEAAABFAZ/SakR/h7DkTgDV56NbnZl/ml39qIOiEFfzBef+RWvHUibHEjD4zje6WjYo61QyA138/8GbcEZEmbUXjgjCWt9nPALYAAAApUGb10nhClJlMCN/+8HrVh2DgEzkPQSYjrWyzR/cxxfpkyJIH7uTdsyWu2DHpet18vbjP31VvB4OvU2zGwNJhH03pIxozyt5CjT0DxM8Eu6iU+hynjaNStmVahBNpSsriT7NNCrjVDXNYImUZaYR5CNKrD2nPtelij1qj2HUukGEN0p8HTXl4uhVEfUSYArW7VPcWydU5vlLZYaJeCj6iL7TK9654AAAAGFBn/VFNEyff3COo0/prpbjGuq+Yz9qQ/0UtxBbR2wSbc63al5dG7nbMBmTmMJALX+gAAHd4U3sZUwDf/j8MBK/Z+midrtvZ/dhX7BGy5kbGSKhcRoIJqsRYrNy2G/mf35bAAAARwGeFHREf4ew+GSYyDT9FKwHNGCE3zTvKrR2YTzumc5w1YU5tpz8yyBsGigtzFTb4gkcmmGk8BosAJh8sW1hPLsi/oP9XLQpAAAAKgGeFmpEf4eNe2TaahDYDlAE00B5TiGe5P3cqwgz7Jz7dAwop6UVlxL/wQAAAMZBmhtJqEFomUwI3/vBm7tPFg7wyzAD0Mnjaxg9o1oowtbJ4Iq6fMSBsDEVXAUv3Ek0V6bUA9AyXfdXplJl5OklAZrAT2DLE0CTU2Gb1ph7uMvZ6e730j1uTDGRcia83XP9lLWy0N2lgd3miSshfYKbuLgLsYsiwlykYvR7QV0d0CqGOqo55SN1Jjj3z4lpX6c0RGgzwd3ziVIoWQeFFwVeqqCbllBnRSSRaTmdGVG2XFLb5K4/MLixbmd/kI/o/AQS9ak96IEAAABVQZ45RREsn31uIe20/lZsxy6BAxfkEj7c3vTu2znWnO0o+LL2TewvhwqHeJD01QF78BcCbCuwQBKbqpl90L6uNZIPK2wJ2IYROSzDobv1Y58U1sN/QAAAAEABnlh0RH+HsPlaZzJL659u40VH/5IqeKHVDn7np90TGQtQWPbiEvzRpaGCnUR9bwO4lkoO9UNv4cqr2uoaHtjRAAAAOQGeWmpEf4TUK0IOqyRzDxju6fds63t8C0diXyWCw0nZtf4XS2WBIN2TqV3HMYRmD9ycxi04LTVA2AAAAIBBml1JqEFsmUwUTG/8A8MxwlzTJ87d34cixfMcHbv+VHu06EPCplmBHBDuntSkT2yRgALAJie3XxuB3xfOH/D7ydP8ErkN7FeqXmM3xkd7kLShI3yZeVnO9ZFHHuwsNecvxwV8jY12J7GCNBCKjcvRWa0RiLGeyRhv7w7uCpPSTQAAADwBnnxqRH+J9+qocEqY3mgUADG/qVAbloOL+4LbHt1Vueid0r6SqxaL3S0xMXyghgv+Ji1+lINFTyTiXpEAAAB/QZphSeEKUmUwI3/74VHYelCJVnhRTg1BLYiZcXT5dxyk3eX+rJNoNiJEdunbKmh+C5I7HMdbrb16UYYiG+jet6KLxaN1w2WtwbVe3TezkTPnAeJ+tInJzjZlV1FqpcNW2arifXJoFuPI/Iz2s42jmNCDbRbNxQpODftQ9RldQAAAADxBnp9FNEyfeyCnmveYuSODs+6cbniKe/vM4S56nhz+m9i8Zex7ONz5LSvfzcVpUjqdCJWt7Tx6czK0loAAAAA/AZ6+dER/h7D4ZJjHyl5mjfaYPRhVd6h/FiZQAFr6JH2QqvbB/RwwwlywmDQA/o5AxedWALtCD568UWghNBmBAAAAMQGeoGpEf4k2H5XFXAtLnyu/P7BuNQSHZxysH6gKDLB09GXBu1f/byom4T1PU9j8d7AAAABwQZqlSahBaJlMCN/7wZu7TxYPY30W6JoFSNQpwVvW1QrXI4toGmRdXr6ug5AcIzYxb3sVhpDt2R6YmUdAxjB6i/FCQrVb4p6cx70BUXqsfihn6ICLLcl0nvY1vzENxIRzj0j24J7cCRqf3p3r+wKgQQAAAE5BnsNFESyffW4h9qH5WbMXYFj/ael8uqKOT4Kdx343HScFjvKFNiDbjO2olivLVLdWGTyFi/Nn+rmW8yKVjA5v2wbZq/jG80u3A30934AAAAA4AZ7idER/h7D5WmcyS39QLC0VH/5IqeKHVEQEr8EriQ0rMLKqwWs2RvSTdcH/r+SXrvRjzQL2kGkAAAAvAZ7kakR/hNQrVcoxKlr42ol5Yisw4FbJimCD3s7LQHkiALNax3evqAJ2Z8nqxtEAAAEEQZrnSahBbJlMFExv1WRxLgVrlSU5kvRPQefQNL+hnY+5sxyM4OGP3W6gipxl+VeQcVtIs+PdwqJefHjTv2e+aZM41f823N7uJYxEIRmdHjyj4j8dVr5J5jvbLxXLbyf1aePF+es1ewhAgs3sr0LDYMZnAVaSTpMiiCjkwD85nRHwA0GmWfVDsiw1fCQ08yGxuwdcMd61SPpfRADYJpeVmWilLIkk9w7lCbACvWCJVPVxpYkJW8dIszr9QbdlVVkW+1uatXpBfwsbLty6mMFmnr5Fm2i6dwp+IR9w1Urt++4L+nMYz1LxgXCx3//Zxzgt2ANHgQF4NuaWK8l1q7TAxuxaM0EAAAA4AZ8GakR/3DTbiwrhhwSpjeaA68X7+pUBukHe3+XGyF7HhYlJIdhyOmcdef7GeSfs6o7iJKfivUUAAADJQZsJSeEKUmUwUsb/9BIkgoUyga5F+ZT3IdOhBODSowpHgJE6OOIA0hhuJSSkDCReDJDR0qEUS4FpJoQHvLAlVaAWTk//H9e/17IrpZwyJ6wCC9sEeRYyik1ullhgvUDdNUQzGaLfMevgU8An/KAX0X1/cOEMqpcK9aaJxowlmuUS21UtcIb9euUUnTFnUYwlhmrjdbVG+lNOTBID04MLbIMZIZzw6PX/9JpiKMQPPmV99BkjSWM89NuJhl5mlC5ED/6kt13QRfBgAAAAPwGfKGpEf5iZsaTq/dqTsanc/0IT3kf5Hr5m3oLGjyM1iBcL6c2qPe1QQjiri0diK6w7972kh1OttrOxLezWwAAAAJRBmytJ4Q6JlMFExv/0LBqptRAkFpHKdI1ZlaFSI2+VAhWAnIutcz+4N6enTxH30oYaYrWXeNE7ifjDCoHMMyW2gFUY+wbi/JE8adAKifO+RhxPHcvsbcbFsnmwVPaFpbwyxJYTd5QCBNnA5eacY9cU6ZwSL/KPzlL1bcR5waT+lbJg8F9k0Ygkey54qcWn8L2WvH0xAAAAIAGfSmpEf5BbOR6mhN42UttyvF0PAGvTXN8LUtVDAEJ9AAAAqkGbTknhDyZTAjf/x6oDJtkwPh8BHMKIJQeKakhBaoY1uTU1Dv+mLEdoR9c+RexgX+HTtr1kNcl3RElgObLDY/PaRJqf4jDnBLlQj1v/j+gBpH7ME2CB67cRZ0xo1T0OMscgDL2ja/1P3y/3R5yr255srgQtnSzKhY+hp0JxNKYyYJi1IFRA564tmvre91OlcSUCU/OIFZJE4oY32tHuv8tWU01eFm8j3ElGAAAAPkGfbEURPN/QgoY6wn8yMqFM8ScEy9Q41T0IvLskW7ZAQd6JL5LdsFFkc8iVrAOSANPjytFHsQEpJgFmIQehAAAAMQGfjWpEf4EHlYbRiT21xAdL3k+Vm4+8bP5+kpCxYbPyDxTyetngNkKsEo/OcSIoI20AAACGQZuSSahBaJlMCN/0LBreLNnVwoJ7HokTqcp6Q9iGVOTjfWRdX3HNLzYq0LH11Wq8Jq+uiZjebL3X4L63Z+y9aEj8POW4Tzlvo4K7tfR97PaNJboUExwnpuB7qJVSNwKDoTb48nTn0tJrvw/O3EZNUilImcvTCGE126axIMdq8DiFXn5Rl4EAAABdQZ+wRREsn309vOFLdicCm2x3Pufzb+QqT3EAn8TFTgB+03dALeaGOw1/6I6cxZ1UST7oYT8jPMi37QdASPs6q4fKsSNxFICCpLX/nIOtEt/OgZpH2dK2wf0xARgbAAAAMwGfz3REf4exAhsvjH6163w40hUbVYqr0IJX9KQl/IjfEvL74S4AfJknBl2wEwSJxGK44AAAAC4Bn9FqRH+Gdbgx/3bkh9Wq+haFwumUOl4Nq407OonaoLPLjUaPLWQnkuQZ8bOFAAAAPkGb00moQWyZTAjf9BLL/c2UM1KQX+BxGGL7bb/E+C2iJSq8ZT/fki7Xj3xHv79/3OCR4eUyro+hPAO6kwpQAAAAbkGb9UnhClJlMFFSxv/0LBqns2dXlsN5umPuNMnDIBekdIzGY3PnWipO8EQ4VO29m+HWcI89dez+iwGudwY8934b5mTOutNkupDrTYAefwpBLHDS4f6nH0oDndONGfIYoajXM6v5ZsonVMn+J7bcAAAAHgGeFGpEf4jgOwq2BPvOKJdhwabiU0ees+grZ1YMIQAAAJtBmhdJ4Q6JlMFExv/BdFX04QlYNpjTO0cfnbEhBFxg9ac2tx4t2ngcyym5CpUChqKx99i1D5EkvHHFPX9/7KMXq7jI/q3wkBGpGR/RHgt9ep8OxX3IASAi+yKKBVIw9GzZv1fxmZ6HBMZD2MbTpq0Mi3EG2QC+iK1rC7QF8ooxwG2N1JEtqo2tAd03sU98LQ9M68dpUpC/xpxxgAAAADQBnjZqRH+BD2jlniCi2imSX6//59WeyNYZ7EThdu+jrAY1bVw+gxFikWceh3l4wiMCVEWBAAAAY0GaOEnhDyZTAjf//WKRs+/QRQ3PXPg7GyNgHRyY2qAL7E/g8+2wAxx57bb9N5LU5Lwf1jU218is1FOXmWQFA+ILNlI2cuW4VuFEAUs55tZrFJbPFNXTxKQFb6zgrOGYxN6gYwAAAF5BmlpJ4Q8mUwURPG/9YpGyYu6xTlLEz1V6fELYXJYCl0J6HV2jidgU1kHU1TaIy98emaghsv30jfu//giS4u7YXeCrhV7hICMr6GVBT7WEUf+L/NNHBp/YziKg+rPOAAAAOQGeeWpEf4exHtbuDH+8TXGnxL+KjE1xi3rYQfEyAC01BI7R1nn8TQtbK9qWu/fdySehP03udb3CCQAAAFNBmn5J4Q8mUwI3//1ikbQD/zwawOGki1yA8PO6m26UL2L/9mKgReK7+ebK/al/M7IPAICEACwHoICsV2NTBXt4+cDul/OajszW51KtkLVl+aMGkAAAAERBnpxFETyffT2/FQo96LwXOn2o93ozcOD5tpYSJ7cwIJok8j0UU8aRtemPmdp7YYIUiCycIRpOWrK31Ohdggd67CoH2QAAACkBnrt0RH+HoDzW2qN3QBJ/E8gMm82gikxdFp7fAWLcEhRiEwpvi8PEkQAAACMBnr1qRH+BB5WDztAwU5oZn2uFb+hZJTgCS3z3O/PoqGYuVgAAAGpBmr9JqEFomUwI3/1ikbIV0EXMsa3YekHPSpYuprfZeuR3WOSiKkC0kFZpj7xpSGhq9ZWjmxEgbDlVL++iKztpTP/7VY798ET282ga/R1srrqOxD5u/U/+0pygtO04b7giKBp6WzCweDafAAAAekGawEnhClJlMCN//WKRs+/d5TeJbh49Ilqhs9vkFuDy5tILYlZvTBUtgGhCajr5vBfprgFsCD23kwfPBCzu+fBE9vNoGt6/fUDXQT6zOaThfR6pRtMs5SAtMZaMwqag6Y2HrhAcrJKrd7zb+2xmYXa8/28eupRQ2l2pAAAAZkGa5EnhDomUwI3//WKRsip9r0Ll4vNW49yxGm0rPx6d/W4e0Trq4nQjnHlJpUyssaFXC6OWq6h/nBwV81fzGtv5FLjjRbokIbdqcOylodWrIY6RdCrUEz4tfkGqVbBtksCTQEmc4gAAADhBnwJFETyffT2ygMiGUO/inwbX3T9EEiT+fzwLws9sywg0V8+Bo+hNRgRMgu2Mkb323JGVsThPeQAAACQBnyF0RH+AbCrDaMSfDibxDReFdC1UvC+7xMpiCX1dYFy6YMAAAABTAZ8jakR/h6Avf9Ts921mTsNzZj1+OJVQ8DVfaupr23+v6Ud85kP8cxjIPbdrvuP+lE9QsgrlQXtOD6M87SDXTNYuQnTLolxoGd6Iw9QfFrz+rYEAAABHQZsoSahBaJlMCN/KCDhlVhQaAeDWA+zilE4F4JvN0n7qpgAwW3vUlRQKCzVZ2fDY8WBCzFmBKgdcnVnxRMksKfRYMOIlGMMAAABUQZ9GRREsn309tLhK5CuvbdzT5vjjshRdA7IpVn/x5vBwKg++SIs8yQwRWo0QTWUmAKd3c5qJygjJ4vikTp3AKpczzE0vrvu10mq+f//pJzyqB/lDAAAANgGfZXREf4MOb59GU02DO+CVL8CSCBWNmo9IbuMSHjgvPw2O5k2vA2ympmv//pZzZUNT8y61kQAAACgBn2dqRH+BB5WDJUsHZv77UUMhfzqZBSad9zQgrlg0O2kNk5DK5ilQAAAAaEGbaUmoQWyZTAjfygg4ZVYMKd+rrGfciPhIiq2gCFAqMZFI7DVM2ZD3XLjJGnQmMgOIvEEsS8t+XEZfwTy76m0Qap5ds9FsZSQ6iIFZ0GU9cLjijfT2W6sRfprNkuPjiKDz/dClDTiAAAAAYkGbiknhClJlMCN//WKRs+1FSeYueQO7h1DZ7e+xqDAxtzmGG2fToXnjatvz1Tn7mSkpRsA/hIvPhdRN3kl1bvHvJTMKB9UA9ykYobd6r7+FKw/F3VVri6aWYgE36UGcq4wRAAAAZkGbq0nhDomUwI3/ygg4ZVYNdbNtBrA1dyGL1aw7/5eOTzHjgYmlvgs0LekeTrnBmIqn/lVENJBHf1RN5RqsI2gVGmV4sa2fCQ5tezOGmg2pMMQ4mlHRXTuRSMJg746BbfQilUiiLAAAAD5Bm8xJ4Q8mUwI3//1ikbPts0l1vp1ED2x1ji6bVEURYnvYIR3zetSp5m1e6jKMQTp7+VkiqKd2tavPg/v5wAAAAFRBm+1J4Q8mUwI3//1ikbHOi3JXDnst5+9/Q8YozwOIgFT629Pl7FB8I6iF0Zc7IF96heR57AZnekg18MeZ8yh2uOHSqcUTsaug9Sj4I8zr7wmNr9kAAABtQZoOSeEPJlMCN//9YpG0OvyOUdYK1lQfyPABv19dN2EuVGroHzp2zUVU0NODsPiVBimmNmF/CSLBt4F+dGOjvRT6LiulIFp5sR3+G1h1ATu9T+HuPV1x5zNOLgqLSCXQN/UuvfZ64plbVF+9zQAAAGJBmi9J4Q8mUwI3//1ikbHSvGoxINYD3SLvvyxPZxlxKc1XKrT03i5NYjyxJoTxK6gyI9UVV9jQM7CeNG8TnLBSp36B8rOa2OVfe31qsGMYmSKR4dZKBGwbKDN7WtkaEsZZZwAAAGxBmlBJ4Q8mUwI3//1ikbOuOtG+ZLt7BBIpdazWzzFa2oIXuKvsSfKEX099+kQxskSmsj0JIeAt4NsniL/81T0XG32CsxykbWR//z7P3ayX2T+O0bkNhfiOKnIVANdHE3DTcxt7oZfS/97sH3AAAABJQZpxSeEPJlMCN//9YpGyKf88uaZQCcSFhy2E1I13cQ7T4qAfZ/efBEseeX43YrqDE31Oe51xgWu3dMtxJ7PVwhztNF4CSSDjwAAAADdBmpJJ4Q8mUwI3//1ikbPts0lsarG2hQ9dcXCqWONWEzXuGsn18ByM8MdGGWrqt78s6xW7qPrhAAAAaEGas0nhDyZTAjf//WKRshXQOHBpVAT0KbRHowXnCt1oBqGLr3USERo5s9MW+PmLvdT3UmgBF+tZO4KwNstRUb0WbAoQxjGZF5h/wf/HrDjvpjl+CJ1AbSxs9y9+9mJLpBNXqaw9rkBgAAAAUUGa1EnhDyZTAjf/ygg4ZVYVaN1iGuf33zgSf6omrlwSndrkCTLPF3gaG94Se1OTYoSOIPiUGjc389Qa3QtMLmdfxJ/9EJgaN8vT7Keo1Vi14AAAAEFBmvVJ4Q8mUwI3//1ikbJi7rEg1gbbls4vToej136f50b8WMzjf3gU8vi9SoIuAXfe8+Kxcl5VytPUCNZR+0dQwQAAADxBmxZJ4Q8mUwI3//1ikbPtHlRWB+qqDdxH6C2zc77qhJ1fZ9qy55n/wDkZ1sej4ZDv6vAKtYQSdyHy4zwAAABWQZs3SeEPJlMCN//9YpGxzowPspkvEL9KenZlAqwx2krQK3l2oCa0wsWc8k2NlSHrH+b26/w5V1g8f/pnNfDFAjYmFGxsaSxgtrssgtpmXxb0wraVhN8AAABqQZtYSeEPJlMCN//9YpGzq87hxUgQYaLWrw1yOFa3/w8/9YwOlVMZthDmUgBo65bZZnRaIfs7t/B7omgkl6q/wSNF+R+xHNbXAqgPYfIrf0t/ExK3QwGLCO/7HBF9dcZigKukbsI6FQbxgQAAADFBm3lJ4Q8mUwJvFGew97V1Lcd+O+QDMDFw9wWzOYcwFZxnnmFDt6/+J6XbkhV2LCDKAAAE62WIggAJ/74Mt5iA31diPnODkHlAm45CKmrkO6tYSOdbdSitE8v4aantb94gCmriEDoY7sxccjIMexW5zP9owVi0pMMzzPblktB+I1kmpKIM33vbcRLOHPv0LC3kz7y85a64aEb5O+r1+heOtihVeJUPASxV9SaWTlNdsSrJgj4xmmjiYMl0IjoWJ9oVs4HKkTYxl/l9OJO8f5MIUmsmAuHXs9jIbJdKBA47Isns19FTdkpBDNqpHKr7q8GFdm/zMWCdfXqX41NkD8CuzgCgpicALz3yxhbW+N0bt/hFjnzm/UaPrgZPcZzqDiV5eYefi4KrgKA0ZVe3VpNSqYxcKn0235Wuxf+zZBgzqMmKLS0lLynJtcUkdDEeIyyfK4LIm48C4xhv8CoyP8GUDK8uivbXBNn7CgW7AuII2yToKdSV23wBkjD62p+pn+OELClzpVps9i3NFELoEbszOb28KZqNUr1lS8Wduuc2YwDKwxXVCCIeAzfkKMHwScs52kSwYmRCd/d8allKuQ2V4hIlH9Jl47UyFemjMfV7+rS2jrnUFGhJvwePtyEVo9P2WdscKW/cbYN2YTUkX7ujH/5UC056sGo7QCtikXJOlN9RqsmVVLXhNH901MjEGXfzKbWH5NYOmyN1mZ5lK/o17zObpovEPe3A6lXdH5FNGkd396zj8jXwQhCpcisxza7aEuADlEolAttpaS0aItC5D/12ya/vkaXhuhqDeowf/qnrMJEgsm2F4woahQkdykwDfkajV8Xoyi3jBpM/z3SsBgY6HWn3rW0ngUlpH53febs2YoTMxFBG9OvOZZ4WIWOcIb01o0qnK+4aqXMILB6bnNAae4qEqWz9+Z1pCpWendWC2cgOI5T5MSpidy6h+FiqCWdrWfJa2hloCwt6h+wVJKuhO2EUZjv5Bfuy8bQtvt4Ewrd0GT6CUsin3tuzheiKHIY7c7GARYxWOVsu7Y2YBaVScU2CERHIxqjA2+m2S99h81bJehej0iGHFz5U4SYskqcTAlUyrY13Nasq23dV54zIXVBj2HdPP/FgHKntLtIpTJhLgMLXrBf/HllTDNcthyCVB+wy+lmVyo5EGwdFVisnyRsii7TpXq3YGN3aKn3/gz7LAcKnAw5oD+vPQthR3tO2J/mgouNkoZ2GTWA2usO+gX2YiAf0fCodcAUuh6p+bh+zP/ArJYNnup5wOWspOUA0IovBT2AJTCeoNMfOAYnEEkLLP5P14x78y2wW1gWBgatjq9keNhveNxm3OId6Z/+RTUg7V/jZPZKnQJNBEy/eCv4qmUjDL8YpXPHrFThVUwNS3NoSGhVS4JZ7bpB03I1nSKSTH4DG1Lf5C7pla5OMYG3Y6n76Qbbfl9EZ86iKbiUcdGryD/jMHV12JykpDiaDb5FHPDkHH/vDAgnyicavYZBKgrx+j9OSTYucK815/uPHKsmteJ4DuUy00hL+bTACJ+49So3QPADbTTVdYUxcp68LjjJa/yyomh0a/d6qcsV/aT8s30oHmWbqIBloL+J1PNY1MzOCCU1U5ZMaoTRdCZwnZlEC/HchwPUMHg2DDEvmKsv4PqNfVSZC/GfvnrurcViZysYuuCjXGkUlYV0fNLrTBtq6ykyQkJUrZqjQdqw6kil1NFQWrOaWMeGubzMX3Bsgc3u8GFFquetJAAAAX0GaImxG//vteAAg9MZ+S0i/PC0zlHX/QQ3L0D4FiZmIOjpIFCJKKu+YprzBENnmxMhdyCs5hHE8xaJE2hp/2GNIqeNg/xvscT4V/xoCp2fXsc1CK8Yb8IojHKubQGZ8AAAAJQGeQXkR/4EHlYPO0mTyth87XGb0QiDlsCs7SOJZk1y96Bg/H+UAAACeQZpFPCGTKYRv++FR2IiwU0RA8qA1ChJxSNLn6LehKVP3kDzlM1ADrYhw7wd4Ip7AQzseLJdG5Wk6xUPebxn9TmVKfObBTfkJNs3ock1GokDdOFS6NZD0ZXPz//Ud2gir3gj4Dx1v/MhlLWLDnShmxzoy3/7AQNWKIUFk7mJHH+gLLt/06WNqUe+p7gC2E0AaYTuW0oNS7l5TDBLgt+EAAAAnQZ5jalPN/4H4Vo+TODPKsmBYbLE3RR0oNHHTburHIqYChXMkXhLgAAAAQAGehGpEf4exE2/rMWYbvveZW0hf2uqeKAaPNwi4iL5LnlJJ6paVEaSZbn+/5GomK7pT5XP9xVnXWAQw85PnL4AAAACCQZqJSahBaJlMCN/76z44S5pk3knFEhyI1Ta+1wxYdmynPZlwpDMB5imzzm+brVdgj4Ve8W9egCohBqHE8VTW+aug2DoRpkyvvsOPUbzL+H8XIBGreeSAS6btFT8tlLl2VaVtAxevDlGvVQTnr78zMiiWOQniAGGOB8j3OPC1oxhUPwAAAFdBnqdFESyffT2/dTSVQOypzfxQ2+DUil4GWJIgvIkBLArS+ydK0VasRPrGHtnNMEBi0Ye06NmDV2cc8yAn4FapQmg/tHUSVZv8nmxd+pKZ4n49OY6GrcAAAABBAZ7GdER/hRLK4pCufOnSwAxieT2ESQKanmN9zyXjppN4TTZfPjYr52MwkENEajixR/ruD0u5TZi8/TESJpQaWOcAAABSAZ7IakR/h7DkMUOCGBJh+lP5pZ7hsk9ED80Ikh1LLF5Y06UPw2TtMFuWrNVoJtNJFPcLihlZ9xJ1Td3KPSex82src3Nfa1Z7l65jvwwWZi0CQAAAAK5Bms1JqEFsmUwI3/wZX/CB9oOI26TZh6KulR6t3agG/iMaBA6B/6qMTvqGVg4k0YebbNb+LKTM12MrCqqoUI8hGiD2iXPrHlOmpgdC3Qb+yp7n1ju75DCZDB9Rh9M8MBS44veosH5GPzQfZ3g85NWC/wgf9Sj/wt2ZHm7uia3RMjx0cInK0e6gmf2UIdsg++PvXMcBn0gM8Se6JG8DKFx436aem9R9/OoLsInKDNEAAAA+QZ7rRRUsn39wfthx56FiN3xIMTuB500p7P9azlWaxHm/XjqkrOgES0XmZvka4Ctq1g1y9N9V22HlsJnZM+EAAABFAZ8KdER/h7D4ZJjIOE/G/Yh5t2iTFoHhadHs+YUVSRGmyvCLsMp2wNI81ubiOdKh6cKDbpl4cUw6CZHuVE7caJVrVL2BAAAANAGfDGpEf4k2H5XFXAtLnyqNr3TnYj9FIKhGPIf0deRjBKSCA4espm33EiqcHf4SHokHvLcAAAByQZsQSahBbJlMCN/74DjhBrqxUlYTYumv3ra8FUgL+AgXjYUzvRyU+e6HvKXgk8xL/pusoXU2ZW4/lqsHsMHeb8YKaReZyD2xnfEzDGr/au71JgRab6BdwH1e0PQ6ZYMc3yLQJm6faf8goYHDJ7uRU+gbAAAAUUGfLkUVLN+BznEOh5fTV8lzZdXNmbQtHmURs/S4z/04sX/c5yXQAjkgX0sC2qb0YByR+BiM7qb8PCLi4y3UBZ//3HtEURqKtuvuIfwOuQF+IAAAADgBn09qRH+BB5XFIVzyB9wJv/LfxiM8ksv3Sg0OqcE4Yrqo1JjCRCketlzK8503P3asAJA7OT5xlQAAAIZBm1RJqEFsmUwI3/vEgWa+ohqyK7peNimapDBP/rmBAmpQD6OSjgVVooLkr8s+Xn8exw7XhF9TA1dM1L0nWyyYDn0YYo5qCungDaPef0jmHbsa/NST2MzomuWP12pBzS/D8UW68cG/U7WtRkyLaQ2o6xwlezid/ALSuNnhZUQ8LKvoBIjMxAAAAHNBn3JFFSyffT3DpS3ZRuxv7Y2vybz+bP8qJsrnWB3uZuD6OPtDGdX3ghT2/RUlUSZnoKabDsiTKHthUxvZZN5kazH/ANj2RZ4dXRg3GoPqzpyXMF08NHSw9ukMXVXozck1ZVN27kSwitnpaQpcwBkrkivgAAAATgGfkXREf4egL3/U7PdtZa3GHl5V5zeF2LLWs0MbzXOZlb3kOt8MN0Y4o4Qo4DC8r0V9YQ7rqrIWW9ryH/Yg4OqxHG3QnithZLe466OdQQAAAEEBn5NqRH+Gdbgx/3Uy5jiTO2z6NGuYZhDosvmqOy+HJIrRZ/1j+mdlQeOSAXr9NMUAqXflcAvnLw0HhARRdO9cgQAAAGJBm5ZJqEFsmUwUTG/77lcOB3OO+4ld2XaGYPZe+y3Nf7PM2BZEj3/ak7IMgYJZ292AtkE/+fDvHwSjzWBw4ZWnozBz4y6QXsHgftapY5tl8Hx4uA9PzOWUKmjPAFe5E1HvcQAAAC4Bn7VqRH+BB5WDztAwdp20lm5s3YSJceebDQR3Or6z816nVxtIBRfgErPOT5y3AAAAe0Gbt0nhClJlMCN//XbgfUwOWc23U11yALjVVb2w/NZo+TO7IFE7jF6EPo4+YMYzRIKbgiu3F3kB85pl36VhUcvvytbvKOop5eSVkjG1CTrVgXP7WxMDh0fOf1+slEO+lIyatR6SsaZM3IzBZreQq2JodfFlKOfZwWWZLAAAAQJBm9tJ4Q6JlMCN/8FQss3+ZKWvz1Vvgwf6g9Ha3XKud4lLmF6JaAC5IPporCJUHxbKHU7vYH4HXDa3zFXkkAfldXiwDv9hY4EqQrh7laZJ6pLfk5IRDi10QhgrLa6I1cpiSQ45EGgE+Xxsrg4o4flhDnQ5Ihthw83MHZGB3B10kbD+Qt/BG0au6ZcfFXOkkOBPmx5s9/Pzof7lYV/0dpNlxpwp/tqKTFwyLX4rqRzgPi5GmFJ0mKT/M/Hz/TxmtqRDs4NYZoN35tdFAHpO0d0mGyRL2D3iMHWnkDF/LAzFtqzHaDqN77mbGjyug1vU8DKtwrcKfMEHvF/O9Vqueeym3YEAAABmQZ/5RRE8n9O9y35ttP5XA8yfAgYvyCUdxljqWjOdhas1mddNiDe/3VrNDcwN1etJ4bY18tX/hyWyP4GIwlHrtyez+ZCz5ghiUA/FV9anrOpGlw/LmUeoI015mUjorg3GPJNvCnPAAAAAPgGeGHREf4Bx3gMa3MtJzXKNV1X/TvXH0R7p0TV51BB27zemt1rU8h+OxY4zhjatnT2bUD2FU0s2nNDpkPAcAAAAOQGeGmpEf9wqXEWq5RiVMaSwAxlOO+23LmXEvXP/IGbVJ5FsJzif9EnJ3uZT0nLj1G4aGVRcuYlRjwAAALxBmh1JqEFomUwU8b9uJt+4JAB2PDGkUZKn1LVaQmhnA3PX4/yJdbhJ6+ZhTvgJKp1Mpd4XsRmIufzYB0Gjg5HjsymTtW09NUcla0xdquO7VsJlNt1J+fS07p/3CtM9jIS4PXn662Okc2oWKdm6qRq6L6au5t7Fmndyg+y3aiIWUUQe67w5+Tvad6VsoM2DsXseu9ACjX7KP9LZCac5WFt8E5lYKT+WyzrehW+bryfOIPcnHP5Q7WKqAMVWwAAAADQBnjxqRH+Yjrg02yKk3NJk1+zTlSEjhOv9WdIP223OJgbbUqmQtvMLft3pIcnpIH3fSrw9AAAAWkGaP0nhClJlMFLG//QsHcexAhSB/PMs/QCqkuzYg99CJH+9JGOv5aFabrl2r2ctLBY6M2x4Zbvd9hht9FyQ6133GQK/Ox+Ab7R/IiaxaiHq0mgOYL42zxXHdQAAAC4Bnl5qRH+HsRJVvmNK5tDyf6EJ/1EigZ555nVBvboO63s3YDIFnXPkddwMkLKQAAAAYkGaQUnhDomUwUTG//QSPy7vTRAErbpXCmQqCkFeba8BWdHebwLy/TbXEM1CAdbroo7rrsm2hvYzIsEAE/hNIfu+eyIxwlnCjcjAA2wohh/23XNHd2jo5ayVrDbcMuugD8lZAAAAHQGeYGpEf4eNe2TaTeL8zPwYEq4gvbPJGyoYjEgzAAAAeEGaZEnhDyZTAjf/++A44HeOPnbQKXKFpJcaRdAGqeg9I1987zdEm3R60S7BqCKKCkCiM2sDvOC+Lw0lYZ0ctcQkkNkRnYsQfrl2/a8fGMNco18ZFNi4twcv152XeDYkOPPOBVk/8oWKhB5mE61fKSUIalH3QzOSlQAAAEdBnoJFETzfg/vT37uAoiBAB62fY4T7UhTDnZgXNwAMuT9zHwCV7PJPIez8/S26iuBAOJekb2B0RKzV9AVe/JyD5c0nVj80UQAAACYBnqNqRH+BB5WG0Yk9tWoDpe8K6FsAkXKMufOCq5zTCtm/E5mXXQAAAJZBmqhJqEFomUwI38etDOA3Wak4jIfVLa19s6Y0Rw/msIXrJHoKvxhcLNOe8r7slL4Wa5KoaggYjByeswoCOZrcIbqHWjdx7a4x7OyGnR642XUuyo+PuqhJohJR0twDTn/jWlpUwQC7UFGO8pvAyih3nOBLZzUtnawIh01N4/zJmVIzs03ryQ/CsNNwGXnSogOB7nyq3zAAAABCQZ7GRREsn81cfmOuKtuOmYL5CdH7jVoF6ZsqPcI7do3JhGpCQH0VNgZvZikZzZ0PoPATUfW5vZTR6uB/pZC6qxM9AAAAMQGe5XREf57Mf84VVo08PAmIFtyz68/xZtlcGHAK7LcnvzsXqehJ/LT41oVmRW0o5mcAAAArAZ7nakR/hnW4Mf925RcTlmD+h0U3BboCgA52LwY4NlHSNS0Dhr7XMuQKgQAAAEpBmupJqEFsmUwUTG/KCEHK3fhHiuibLgfkSu7LpIIY3nGHB/7BUdXL8qXzP68vVCju+6mDLSp04UM3Yy5sMOop5Db5UDv6zCEmEAAAABcBnwlqRH+BB5WDztJXdtm1EV3ecxpV2QAAAGZBmwtJ4QpSZTAjf/vhUdhEluql+hMAizNOFJ5HtLg8fPT3ztEuLEr1XGD5xhtaNEj9qa8FFVnv7A21sXMqfiyoIXW19PEUwYPgxOYHv+WGMXik1z6DYg4KxCAi5t9mD/wLAUi9+oEAAABjQZssSeEOiZTAjf/74Djgegh1UZQX/WDsSXsFC7ltHlVSseL9p8ERavj1mdYkVohq6rg2aShXbm9fgcktbs7aTqfRR48AbzyAdovzp+bia6bsdbHEDLxLykgR6RTMRnNhvmvRAAAAV0GbTUnhDyZTAjf/+8PvG6OB3OPtuWzfxGhQy+SIsOLPOJtIbYUIIMS0V4qVXaqngqCi//dOlQY8F7ndPEOY7ybsB51X9jVAWXsMnnsbNepMd2SEvVMXwQAAAEtBm25J4Q8mUwI3//vEhm6OCJW+nXGeBIiklAPuU/0ZI1kWndzC0dHYpngvdDP+J144OxWFY4ccxDyLLIj6szhtQ7/kePrw8DROXeAAAABZQZuSSeEPJlMCN//7wcOsd4IFAwltfPFspkUwU+tL4SrplClPxXSDmSimJiQeXcXpRNlc4r6tw/bM9uY4UWJpP7B364iwyGkyu+lQkibJiggxD+c+9Bvsu3gAAABGQZ+wRRE8n309Vqjbpbriq8XvOdcqau8JX/6UTOUVpRY9WKhf83qV6gviCooy2oTUp7uQzw7CwPScfAIZVLOOefPzZ7OvoQAAADoBn890RH+HsQIbL4x/wcxICg+7jcceczhq0AWjcfMYx47NzTdIEGN7i0aSwUyq5LiHB0crT/GwU/SAAAAANwGf0WpEf4Z1uDH/duUXE5Zg4eut6v/g2rzmcUOWPfIVraaS0gPT3J2ns5evZ1CI1ydypxrgCnQAAAA4QZvTSahBaJlMCN/7w+8bo4Hc4/EYYvlEGmfvhZdsa87VJ6JltdXwa00H/ieFaI/Y7du5hEl+VYsAAAA8QZv0SeEKUmUwI3/74ByBNNkfUtOKnL3+oyVxxmsptyCPddx/B5KMUbKrDnuC2Vyx0n1eE4KlwYD5Vy7VAAAAZEGaFUnhDomUwI3//WN7G3P4NYxvBghU7kkcBVleyVOEARn3ED3ZepoZq5RhvWCC/yqm2nrsXEILV4frQDCdGWESAMuwcPI6wEXjGyfMZqSkgvdx7JIjxrwTiUqQdGnnY8X6eu8AAABoQZo4SeEPJlMCN//74Djgdzj87Yh9SyaadvlLvsYIaG789+bDzKp4nI7Ux9DSaBly7yJ7/BdbBlU59uboJHfQNPDqioaaRPekZqtjhMesy31GxBhA1OKS4Ps6VJGf+JLm3Aa3xC0tIAwAAAA5QZ5WRRE834P709++8xDSwYyJmXntNFlWK92Sf/fQDIC13sgr2eS1PqEUq8jnPixfgD+QQYI32zWlAAAANQGed2pEf4EHlcUhXPIH3Am/8sLtAuTjaqUxAE/k7/M900QRhPQgPEHPLe77P+AZmROrczDlAAAAbkGafEmoQWiZTAjf+/2woAB97w5AEvi75kBbZEZSk6i5LHQkcNyP+hSCGc7+2SDRFNE5x5z/5s+xx+xzk7p8O4w7+mh+W5do9ddvYMtDfUKDtyA+F85e07Sekz50aiZ+A2LJ2uLJoEQWDwl+okL4AAAAbUGemkURLJ99mioJbr7gM0pfNOj+bZFRc0eF3Hz740MUqo06KEIR1upjhqV/Gzz5amPTy3yzTR6/dNTQU+pkKnktDCZZSdmNwG8n5niOiMJYGfQ5IyXF8S4KMq7xjbHIWbNosUD2y1HHlqmypIAAAABLAZ65dER/inEMYh/V/63CvSgvZEXbfhyQx2wa0izGid6YTPbRTv0aQJw+H2w2yN2UnmHoAMN0UTFRR/aDBMgxksMmIswPXoXpwRBBAAAARwGeu2pEf4etAFVTbLs6LYf57fPpukYdfH0vO7n4NhsecsU4ANjZ8RyLB7ip5BIX0Mx0+7ZAp+laB0Jf+sCagXiPlxvuigHgAAAAnEGav0moQWyZTAjf/Xb7hr774ABr8i42lWBj0uR1Z9SPh+ggHjsNADRKfCe1SC2gCz1ZV2wfdi4gTAYitJI1nTCixMcZaP4yGm/n4CfqGc6sKSk0KSFZjxx71wspXvBNzxUtZdrOzeAHxomBMOPBhu1dzQNzjtmlggUsl7VLlU74bd3nV5k7rd2++U9+8ggj3YDa0RWl4HtEE/EBfwAAAC5Bnt1FFSzfgzrToKs1FKb08R33Z2J50qKbNlQV9+d+S4eldtRoddXTZwHI3pZ9AAAAJgGe/mpEf4jgOwq2BPvs/N5cLI/GjH7DHCev2QPFr9/a5wG4YVCQAAAAYUGa4EmoQWyZTAjfyghD0zehAPTP6iP8vI/K8ulMYZHKRgVkbItuqNJ3HEAokTCb6Pnt7yl0OLzZwaSoIONyTBCZHmwGWPv3H83nFo1lWoU+cguVFa9NsDp0gOBgSBwkMEEAAABNQZsBSeEKUmUwI3/7w+8bo4DvMfUVj6CQ3wk0ZhZRNy/Weym5JT2cFcNQkc517Tr/49Tt0A4xq3KD+uTRFAhPphx4G+TQtX/b6Q9KLoAAAABIQZsiSeEOiZTAjf/74ByBNa2ipZMyAB8rFbNzvqfypmxt6LH+u+4jWhgXuj+cO3YiKHJNJ1WQFBNgS8wGPIrNRLwTz121G6k9AAAAXkGbQ0nhDyZTAjf/+8Gbu08WB7kttaaGR98kbL2C0gBL0MGbGJD6y21jf9/9lA37OP49VLmwv/65IaqtSBP9i3lOAQ5XqdAIIYxDxwBb1v2HeMCQXhXOkOBAThSFTcEAAABkQZtkSeEPJlMCN//8AoIGyiRKThReSI30K30Lv43t8SJ+8KvIhuBxhxo/in9rIRgUOPXJvOP0aqCWU6/xuEs9JaJ8GMhw30DvHBYxT/U0Kj8r2ehB+UmTWWcR/WTYZVe6KZ5swAAAAFRBm4VJ4Q8mUwI3//wDwzHA7nHfcLHfflRrPXe74AVeC1IbYIGvxkydW+YZkCPntNjxM2qO/tU91qFpC4ImWGO5QaY+ISilW3oVGEXv9m5/sTC2qUAAAABQQZunSeEPJlMFETxv++A44IUcwuy4ITXX3nLrPA3Q//hXK5HZ7fz2fB0R5AIIhVciP1yw8k0h9Np/gyGu6xn4JoLPSopgQYiI15qUK7avBOMAAAA4AZ/GakR/gRIN2pOx7ohh37pmz7Klk5+7v+6dmyrL07Wcjao8Z7/mUQRNj6WbSSQGD97CVmEVJHQAAAAtQZvISeEPJlMCN//74ByBNNlS+GzqUsik57u8ykO74fuvsnTVRg/9Ptqbp+XBAAAAUEGb6UnhDyZTAjf/++FR2EKSC0P90owSMxGoCsS0jIjkJXAY9nBDNCe1Bk/slGqt0QC1TqM5EeT/mtDAwGnbXjDvOUDKJN+rzAy12AIq0PbWAAAAa0GaCknhDyZTAjf/+/6PPsocLdBktI9rDLAZxNlt9wqUjirtVNt+xqsPZxzK6F5vZZijVpT+u6KITqFhQSn9k+W8ILsVjI7zrB9WmSKDyuy8O409jwJKv6h/HdxfnOL2WIBLbqi82goKAz6BAAAATUGaK0nhDyZTAjf//APFuECSmz/LfleQbzi9RxtCMFA3aSBw7cfB5VdpUKlIOO9D5eaJ77NvYp1NRMnpOo8KCNP4Zz+5SwFgGPmDpyQ/AAAAOkGaTEnhDyZTAjf/++A44IldYymy5ae50jTmHGl+LJMPRkmIEvlkx6J3enH/Z1ZYnSaiWfS9Qd3bZWEAAABRQZpwSeEPJlMCN//7xIFmvqIeT19k4jZC5qgXt/92KDK0y5JmDGlXIo9RAWJ1sdV9OzVDj0ny3mjY57wjldIvC+C3BkGsD8nC+eIrXj01+wxBAAAAQUGejkURPJ+A6iW+6tW/s8hDZPRPj/ZS4R77YE3gu1EWsAgpWZFUkBfemZrEkJkVcTAOnvNlQOvZHJ6uIRbuaIJAAAAALwGerXREf4egL3/jdwY/RuCrxWt5oYNMoyXcgYWnulwzd87FVyGo2D8hOuzPX1rgAAAAMAGer2pEf4Z1uDH/deqwuVerjcUBHrhTnazCTYcm52J3Yzn0y9s0foT1etFDJ+eKgQAAAIdBmrNJqEFomUwI3/vua0IEqusHZHfP89a8nE1UwIsrlNvzfQ3IirE5WhcQKxUzthp7BUf5HkhK3zIaLvN0HYEJo/wGnymLnnzFyHPfS9TQcFpsTCptT+W+ftmIjvDK0LfLQWDU5ngMJHz/n4ZUiF0aa3S3RfkSsidDqKKs1LyafNz8fl1XPT4AAAAoQZ7RRREs34M606CdEYoROePxtpctRNT26oIcfF6VX0DqQPJvGMKc4AAAACUBnvJqRH+JNh+Vsm/raPip6RP9Og6Y/+RSUgkDHk9xc2O0jrwxAAAAnUGa90moQWyZTAjf+8Gbu2qQAjMVaMFUREfFFP+x4btt00ueekq5HGN89Wm19wbx3Kdw+o0PqxQpY0DcAXpv9K4n1LD86yV7ODCzYbxbeNispTBcvrKPCMwkXGwZIKam/tG7bdG9+lzB6J8XZvmU48gqeMwbRPYvIVNA81JXiwX5VfXEp+tOTePj9OoOWcrIF8LDW8sH5NteKrA41RcAAABMQZ8VRRUsn357kZSwzxr9U8YWP9/xl0oFI+aTRf6oWUqAXw298uTlr2goAldpwAKLQthKd2eV5xwZcFfuV6zka+DNcWzVwddT5dqn4QAAAEsBnzR0RH+HsPlbvvmSW/qDUBoMhf2usSxi8Wum4LDDW0Fu2KeEyWSegiZwWr0lVsQcu/F0Necrzgb7KR5QDSLx9yh9ScE0/rzHaJwAAAA7AZ82akR/hNfANpljEqY1fAK9wQesw2QAGiR2LAytbhpI31Aax1ACoVdfHniU9EpvumT6lsdtl9FTtjAAAACFQZs5SahBbJlMFExv/APDMcEKP+df8b2bcbvMcHhLvzdzipVTXYQpeF6xqpUhj4bNawZXtZlSHo26T+WH3zqaIllB37/NGNQ1VkY8BOUKE+zpXj8ANW28eEEEdJdDLN1PiFpb5PIMKKW9o9IlM9LDe1K216q6Y7zZmIMkCDaMmuxIIuprEQAAAEwBn1hqRH+HsRJVgFi+ZrlLDr4CkfmPNcriMt98U9nbw18dR45otYG8DOLKOeaxiW7U396pvPAr+t6Gn8NLGkSaktuhYMuySbpXKWCAAAAAp0GbXUnhClJlMCN/+8FGR0k7BwEOlhvznFDc8MKd6PlOwJwx+IkDWajbiHtKLtq12J317Zrrh1gtLttHBFn7OwW9kLbr59zfz9WSxFbcUiYfBU+4PJI46FozlfEF8jFGF4/hW1jigr3Oe+UyN6JQ7klFrYmpLuv1jvLAjYdokw6od71OZf5GD4K1jtQiVwCSoCZlTnckIlQyvMjv/ZinSucoeLKUdjlgAAAAREGfe0U0TJ97IKea95i5I4Oz7pxueJfZG4kOmA+QbzXKw7vkhvqrsiqjd/kL4XyY3lQf0TmOeb7Jxwp736HbF34x3jaBAAAANwGfmnREf4ew+GS+7XjuUTR+buTP+d/8H1HLLwuG6VVjsCWSzm1R7OCK0QXFWRrnznHKxhkYoPwAAAAuAZ+cakR/iOA7Criq74tIQStcVo0TUW87D3aTI7cUwCOn+Wxs+VAcJAD2/FWI1wAAAHBBm4BJqEFomUwI3/vgOOB3OPtuWWgGLWHjA2dK83VlKvt2+n8reBM2XzHD3zBxBIqIRM/rFRT7vg6GzXbZYVE4qWcVvmjn8+cOQkH82Z90ruTl7Fjpv4YW3KK5nKkBzEFtS3TO1V0jlDTMhHqvo4khAAAAVkGfvkURLN+D+9OfdQR6q/7IXDaBvaDpMhPEswOM1+7l4S2FsorT/pLAVOjCdAJLasClJUflp/50NXl8VQKz+m+7rpgfYkqz2G+vKYeS9jaTSkokmfCAAAAAKQGf32pEf4EHlcUhXPIuvJ32qeFdoZAknNUtwBScyRRJETefzNQclkJBAAAAcUGbxEmoQWyZTAjf+/2woAB9YANHst9xAn8Ot4dHzqUwafrp05d0vUqpjWj5to+oP2Bvpa+fUTyobSQwuvbyPiaW5y0mT1/O4V2M/c/cW/9kBex4FuohVoCvWoVQg/16KJ9s7jM9tnwTb/JeLWapEhPoAAAATkGf4kUVLJ99mioJWnVwU28WW1/7jNoqYi8ej+b1wieEOmo+V6ElGEDFkN0d6FE9/r6vZJXwBtcGjNFa7FbJe4U0C3VVH6u9ab3qqXMOQQAAADsBngF0RH+HsQIbL4x/uLfAQThciDnXlgeXNkbuTWOhTs0T8clBXVZVPYHhl/rxl3hE9I+Hrbh7KVpysQAAADEBngNqRH+Gdbgx/3Xq50pXw8mqoa5/S6osvnea87hsE9/0OGaERxf1tCTkMhfZ7ECEAAAAY0GaBkmoQWyZTBRMb8oIQcmggO5x33CtfQoNIePFJ40j0Rw5ce/FyFAT5ISrtO7pr4/ZWcfWhBygM64QbSUQzME+lubt8WOWt0M6f2A3BfB70bi4jXwkpKQm6O/cjjNNV79bYAAAAEYBniVqRH+BB5WDzvLCHMeSc6D+pWTKrSTRgYiHkmyaD8ady4xoFuAGAC4gZprGG/guu9ew6u+KEQtPsf7XkTxEL5oOibpxAAAAfUGaJ0nhClJlMCN/+8EZPlhBHWC1fwUqYD4gl8M84OnkMvjhAR1eA25Xfq3KZ7HDMrdVm+l24vSAkfG3ha5Og11+q/XKHpFWzSPOORK4/q+VjpIgK+wW3rohvnrLXSLrg00iFleonFUn2Zm8WiDb62AKQG8rPQI2jk4tFOzPAAAAd0GaS0nhDomUwI3/yghA8yNyXL4QryWtpO9TBWG+5lvHFT8rZrJwigk2mHXYghgKHt37c17joLyvASuy11M5xsDnIGswur45lcp/pg6KTMv64Bm5l7g8OAtRTsOC/qs/q8KCOxtRlop8JGxN+wVyJAecO+x+WH2JAAAATUGeaUURPJ99biHttP5WYp4RAgYvyI+p8tb+TRf6oWaAAXw4HkfPwn3YQah/Hn50Dc0k3Kbwtlf++vgNgAn442VMlPHd1szsO3j9hrRgAAAAQQGeiHREf4ew+Vu++ZJcAjosbnx/+SKnisgJBzGvhE2wYk44wff7Oj+WUOlh1tx4rNtkmx7x72zfz/ISHcplyHXRAAAAOAGeimpEf4TUK0MsYlTGr4BXuCEvYQ8yfXuACUj2t8k/d+aCycsFhAIUjm9ScUV3x2YTx3NuXIgZAAAAdkGajUmoQWiZTBTxv/vD2xXRwQo/5tqyIg0oegdJzRBsuIl+7b7ib975tSk5oO+RagBoP94bo8xTAUoC6UAV7hD2pl/WnNEYhjeekT1PkCFVNeqc3AZwsou8tNpbY/mzbsB2O5/LkQ+UJINnK+GSH8HOdUvNWTkAAABKAZ6sakR/h7ESVYBYvct3ZUH80aLSL3lILK48NjH93k+JR1JE74PB6N+HZWuHDRS04N7OCEWv96qoQw6Qs01sUJTT++6b2XHrGL0AAACqQZqxSeEKUmUwI3/7wetWHYdbgBdtDzUw0EHUbfim0S92FkIfRPj16pWKKLwRo3AU8FDsa9QId76SyIZbrKSW1k3GGqM46d3clPOrcjz5FiWx32mC+NDau6Yn5iLvV/ruKE+CYviocKuN1PAgXeEie0yP1W3y7ir7QTZDPhf3ZL2j64xZerSGI+I/vp60I4Yfhme/LvW+sG6aWg7iglDM6mb9UFBSV9g/opoAAABZQZ7PRTRMn37oQ9jXIx1lBX9mqfhAUp5p6J6k829lJWvVzf6M7ggijphbu0clU5M1vBwhyYZzt/rM9/6ylFgz0BPTGJMWpIznDxoA5gzNXt6b4LnZ35PBfoAAAABGAZ7udER/gHIYyAVJ11hb+xYFCFd58j6mfUxiy60D2axlSlNv4jbTnYFRvT61K/ZgtX21MlfVI4/OkXJuUpop7f64MnECrQAAACkBnvBqRH+I4DsKsOxDYDk/6PHIMsNIpxKsIM+ybhNlwmFnio9k9isHXAAAALZBmvVJqEFomUwI3/vBm7tPFg7we8ueSW3AsmgN1pq6VL5mONAom5p4474jhWe4CPB0/d75Ta+3AbyIfJutqpUJfL9iHeRoITsbO2cFPftnBnbG+U99/U08f+CG9I+wfsE58CY8Lf3D3w/MItP7ygZt3mLhmC8NDeSwaV013CnmXqHO8zdoI9keTJr4VJTM94XD+qoMrzWDNiUadSEvxRjUAUo8/J8dVa+AVsL8l9fEVbn49GcOQAAAAFNBnxNFESyffW4h7bT+VmzHLoEDF+QSPtyPFrba/50Rn+HndNiDb5+E/AuX0YogGD8wr8BoMCMYsRM5UoXrME0WskDjNcWx93MwmTeYntQpF1DOgQAAAEABnzJ0RH+HsPlaZzJzVMKscA/2L+9TyyQd/3O+nm90ZXl8Ksqn5Id4lYTPtFj0zEdxLJQd6obfw58aUu4rDzZRAAAANwGfNGpEf4TUKzbRXPm8OWGDdlZhb7MPjSB0cLQgO5R2NblH67E1i0ao4Dkk3g/masUQXUJUJOEAAABnQZs3SahBbJlMFExv/APDMcJc0yucaeHB6PvL/u479qPe7hcn8oUhAYtxr7RP+9d6EP7vADBZs2ne/afeqbqNMqONZ5d2u+aLATtq5FH7fBN2ApzqD2YHoBAbx7AylRx7VOtEtRa5YAAAADgBn1ZqRH+HsOQxQ4IYEJ9R380tftck9EBbFpYrpDKVIDssfLu6OZD0KIvfnYuj1dQTryD5+83wVwAAAMNBm1tJ4QpSZTAjf/vhUdhyzm1U2leLW9Qg70H6FjRAKOWGmq9x5x025+Fv3p3S/FQm5Tp0onIj4h8Zx4V1izZBK7CKWM3Sg4BQE8SIJ4khFfeHsmBs3BWM4yet7zvBdepBB46hzuc6TbCM87USRY/aAltVYBThJ2Dy1Kk+X7oELhrfv+rE7sa2+kv58rOX6d1pHhsn+G4eGGB5RP9NO1f9TZuBI2+uPWtPojfHGH2/8Aq9EEUM/xWKbPACAkCDEeGpfVcAAAA+QZ95RTRMn3sgp5r3mdEHB2fdKSdICJFhyUw9U6B6nMiDQFTH10Mf+Nz59OffzcVpUjcUpLvcP2XrpMgmGzwAAAA7AZ+YdER/h7D4ZJjHpgBMmQmFWTxuFtuKN+1nWmrvVP+cuwy6cfDaxn9EPm//fbSX/BC1BXlKAR4dXRgAAAAyAZ+aakR/iTYflcVbbqsjzADeWk/N4gygRKahtBY4GTk21u4qAH9//uy/xUzjKGN4gOEAAABkQZufSahBaJlMCN/7wZu7TxYO7yWtpO9TBWejmOuTienHcMDCHKUGeJgU8kvIN+p4Q3OK88HSnfDGE1MdkaQi+aoz5+X/+HFemTy4gYlIXyYWnSJDJo/cOHmT5qotrLEMdESHSAAAAE1Bn71FESyffW4h9qH5W+/h2dk4U8g5Pt7nJndunw3PPWIDrz9uaZyKwd46Sk4PBi2OZZ+81wbZ8oc5SG+A22lt4AZz/jYoLj5rNtjCgQAAADsBn9x0RH+AcOUFoNKCQYCGgc//6BZF/1e++Y5yM2PoTn2oKho+FzY+MYnRSCxwYlPnGESpnGT2xIEfQQAAADABn95qRH+E1Cs20Vz50jhSs7iL2EQrs9j3tB+E0OgBAtIl74/4sMyLjOTnhw5AY8AAAAEBQZvBSahBbJlMFExv08O2aXJzY9aWPq93Hlrmqv5B0QtXOYZZloiKjehd74nU1AKH52Z3+TFO92LMM98MEm40V7V+C1GenupfeqytRxcuUcO6u7TqCPTeIwqzTSEBSPq9aYeksIflaFksME00+qb4RPN3k9VCAuDXIDcWlIfhtRfV+FZNJvsVXv9aSTadgIZXTCel8E+da9dln1o4EHbsCWRxMU227KCu+aooSEKKHlx/66IpLeADW25hj/AgX0h2GHLGpU+s9OsBO2nNNGGqVU2s+i6OVvVpOmhdZkIXLFAuaV6rywH//SZMiw2+TxWyRZ7vy1v8JUsXVPcmUxHw0+EAAAA+AZ/gakR/18yldm4EIIwJMqwy/zSlRwwRYyU0uDbf23QW+RyODJVXr6oxUFynmeXdw9rHk4uHPi3FC8lMF5AAAACwQZvjSeEKUmUwUsb/9BI9qBbAgvoKrhEVvOFiUYViG/M5EJ8mdYUiEaj97nKw+nocaPSSxXVNZ1shjf+5GKqK3d7aQA54mLlWRII10yz7m8Bep30K/8/RHcJBI+3YKy6Dnp4kI0bb5XEQ/cK7bxUfpMy7cMapjRKOoRLMTOm5NDf8KQfAqQ5TeInNVpTmGv///kutmRd+MQ4u5qz5Vq8W/uVrDIc4XyeP9023KOhDiYMAAAA6AZ4CakR/mJmxpOr92pSK0PJ/oQn/bH9a0f7ds14XLC9W6fZ4EbVHvaoG3QUOZmMZfHun6TyTV7Kr/wAAAH9BmgVJ4Q6JlMFExv/74VHYcuN4xrugJWIKSCyEVY45OXNWVZpnwYhiQRi8ouWZeYuX9UrcKkoRdSfCZZoOmnAnOQp7HCaBaZsLqFrDoWpfb/ojyr+kPVjjyThmNhOz59P+kfdZc/N9o20arxf55nhcJHGZLjukhPirl/LdFsHrAAAAGQGeJGpEf4jgOwqw7EQkqKLsnOa2jmmZovYAAAChQZooSeEPJlMCN//Hr5DS9RRAa54Z7hoy0gQSdNxHb8oFhjXsrpRKZMbMTTdkBvu3TAncUA10EqZ7+4nEteFrv70v8BmFG1wgiHERfC31S38TWliBAZGrZPRYU78tMXq6ichNMahFbFt7DygIkxM0pvU/Vl+YLZqN/y1MDAS6SE19XdtA3PtOjnSNlsS0SqWCQzXThAQMqHaQZ2uGXkv+3sEAAAA7QZ5GRRE839BbCV26Byo0weWSA9YDX3+QMejMyEEDStqDfwdOOZPoewjLmsgoJTYBfGhx9VUyx/y7bNAAAAAgAZ5nakR/gQeVxSFc8gfcCb/ywu0C5M8eQRzzLW5tT1UAAACEQZpsSahBaJlMCN/7/bCgARAV0d4PVu2DIDo//3hRFBNpnloEynMkS6IgSJm98XQILX4peWuq1XgtmjZvcfov+so/e8U3LVg0FPoU/Mo4SQs54IeRwOVrQ4GMDhGOj3J+7LW8cm8BcglvzqyhewVZdBEH6P6sGuFKFltAUejrF5DmiW+AAAAAVEGeikURLJ99mioJb39gpt5ZOf/5t8yMdqhflAjF3VSF9LKRKHSP+6fBR0zNoOBXiwoFPSpm2LefrXJYQBXANQoIKzterZ//Rd7bAfQwQE2Okj3HIQAAAEIBnql0RH+HsQIbL4x/uLfAQTkb4yILaCZ3iPJCU9pVv5zI9a2kfiP3oWWAYeh919yuVlfR5mK81H/GkOepytZyXlMAAAAtAZ6rakR/hnW4Mf91Mix4JlquVA684PFShL+fqcs8dpqe6XtLIH5N1f8BA1aBAAAAOEGarUmoQWyZTAjf++5XDgdzjvuJXdlt/kTGF+RV/LdKMM3O2Izx8kplgk0r1rh9cxk0ue6JeItZAAAATEGaz0nhClJlMFFSxv/74VHYRK6xsfYetUOdE5zQVKQV9OZlZwDb2XeIsRYByM1q78Ie7kjTv+lVLWwES1iMaYWnsFBfK73puU6X9u4AAAAYAZ7uakR/iTYflbJxD5R0UYmRI1Tlb8yAAAAAWkGa8UnhDomUwUTG//vgOOB3OPztiP2UA+261TZeWO+pLr6FuxY1Gm4KJTeKb22KE1CtKd2PE/YlwvYroPfLhJg8ZrWdDVfKHDYJ8TeXliWDg+DDMHa6evgdfQAAADQBnxBqRH+BB400xy7oyzZN7DiQhYX6mHtDc+034OuRbz0duExV4R2w0AHsAUlmmJIw06XAAAAAVUGbEknhDyZTAjf/++A44Ilb6dcaU5H0bjlOnxLIo0SXrQTWgueivNvHPE/8vDiGPA/ersyEsLWjNH8iFcw0ZL+tkT9WxPFLyHI0aZt9JkWOOsnrayAAAABgQZs0SeEPJlMFETxv/AOiKyBDZVHR4Nb12r2vQbwl1ZUGpuF2dtyRYmAgNHFu9YI8DTzjaZZP2/AhZiSYSEOKyyEGzaeRtjr6LBzxI4L9G5pO+D4VbaY3srs2TWCx4mqXAAAANQGfU2pEf4pxDGIcj/67dNk+uMhrkyCin4ep171/SmuXlGTPbQXRJPBnppBZpXl2UXy4JS9RAAAAU0GbWEnhDyZTAjf//ALQu8EXYDiMMWrNiM6GWH7uYInAl2MtZ/kgekQpPORIiGhZ/nRumtGaN2e05a4CsUGjPa7Jt1HD/aPBVyqAiWxtSFal//ERAAAAOEGfdkURPJ99K3q8cj1odDChudhfcpeV0qEp95cV8xPVLYNNjT/96mXg6NNq46gQvUi2kw+4owVAAAAALwGflXREf4MOYBiW8DO/bTvzhcPmLoHmc9yW0C7QrF99GHjG17qqQEM6OaQ7qATwAAAANgGfl2pEf4EHlVcd04Is9ClW6FZCH4nYyUbEMAwSiUszmoA/R2IMPcZs3ej3LlzkBx5akMr+kQAAAJxBm5lJqEFomUwI3/1jexiPPuVF4N3+NUAUuIHEz2QO9Qjed+3c9KFSmMPrydJHG31a3UXHzy+f5ILebe40628nHuliEo6jFwOTxVv1UhoKiyPtASysBMxVAoFRxcO10B0l4c3iX8DbGNOa6io3rXjVXTO0z3QG2JpqNAWBXcHV1gHdDwEtkGhKhQAfl2adu3mdyJam0GGvKzmODyAAAABTQZu6SeEKUmUwI3/9JoP7PlJhERd+nNBdwdAGicV2sDdshxtss0WwtsFE9iV+OAFa0FSYHSTT81z38H7GxqDvlLWAdJmzfoGzta6+3HuOXIhSfDAAAACGQZveSeEOiZTAjf/9Y3sbYq0egyLMuoFUSS1zW3tAR1EhxIsXQLfJqvT5msgJUJ5lF6n+eX5oaHM3FvUq7s0ubwvX+FHL351BlWvMj+p0hEQccUBBuAKEAtp/FcKbFnScMfZEX2zhg5cgAhSWHPmnBc4H3/GBfKQU8pvzMeWNgvr9aUU5AVUAAAAzQZ/8RRE8n3wHSlhnjX6wuuQMkOxSt1zZ9hoUMwDDnzb488UvfimL2oJcGWgtG3RGIynAAAAAJAGeG3REf4BsKtkMVY91KCkU84i2Lj9E6hfd4mZLJmupc44LzQAAAEQBnh1qRH+HoC9/1Oz3bWZ3RPxWyj+fW0/FzMZb82m6SxXIuxzXGz2O9ueW8NrCaIuPQmb+HlSCvaDrTOCW1gf+t9oHuQAAAEhBmgJJqEFomUwI3/vrPjgIGEb1mbkKmaEb7B3LpV9EsRLeONLVcFxuuED+lT3/pAzfeZLEw2Kb38X2btrExLs1IQ4mp1/hywIAAAA8QZ4gRREsn309tLhK5Et0327KxGXKAtwcI4JfdoKiSSv6tpnwJ1fs0HcHDvesezzWNAerFq04zWyFWRqVAAAANgGeX3REf4MOYBiW8DPHRkut1B2MvM8gxWoMkGJG6fJNRKcDcXEbvKD2g6pI1wtMG2qZlsUlgAAAACgBnkFqRH+BB5WDztAx3ehsFB1Y2ENPbEBth3hGfB9PEXgdSLcgUejBAAAAbUGaQ0moQWyZTAjf/XbVbdFBvxTAPcrrcz6VZOCFULaB6H0iT9bpVOi048iO1wulW4BtFnQgnXqt/dwAcB0kgppQux9PGL3+oIrcHf/88oLvTn3UeHSiGxY0fUoo0QSzEULynDOOcZGpy8/0v00AAABzQZpkSeEKUmUwI3/KCEPTN6EA9ymLrly8O6B7t7VmUeTH/G0a7t2ifM3r1iMMfcBrASVNUPovKG8GYNYNCbz/ppRUblstrHKIbzRyZab8jak9fpDfmpy/m5pgm0ZFpIjmcCLOk2J7HyFBwjEKZneHITdafAAAAFpBmoVJ4Q6JlMCN/8qLkmeQgEPSPuqCmr5ArtbnLRl4fTGGkErnT9fA9ShDSntjr7GEjAb8W3TZIJnx5IZJCQVx1Awj6XTCsn7hJoDgdkQML672DCqIiq+WioAAAABHQZqmSeEPJlMCN//74ByArVvp1EDVN71GG530l/KjHXzb02zLOHiQXYfX9VEBccGpAGmsDLsvD7H889ZLz9QGv3FQvckk+bkAAABkQZrHSeEPJlMCN//7wZwidiwPbbYagjwfDcPSQg33o5EFwjljEQKjlBmdpaMBOqA7OQf5sN+AJ3xRw8XQzvvEfWu7eb+JbmKBNgl+YwIiKZxi6zn0Whj9DxvtFKFYv/L4uXFg8AAAAGNBmuhJ4Q8mUwI3//wCgK7wSDWMAKdUGMUb645fTdwYnhpkNPya+L8DWZnT0HAzto+auQYGWoyZsX3gYPKgXfkFrIfwZz5ku2kqON3Et4mdMuvJcP/wIlB3jHcb0QXsltP714EAAABiQZsJSeEPJlMCN//77lcOA7zH4jDFrjpxOzOGUd5ZawgfJTi+lCyrDkvXFWhsE8XF0nUH+68r3FUz/xnMGyUBef43CwO4Fu7mkzlFHt7U2RY0wKM3AWV7ncpWtZdHMWbpKuAAAAByQZsqSeEPJlMCN//77lcOA9yrrlSWZGlkSvhsHkUA+TYWmiPcd9erKEll/VhIYwxpcKy2emhfmAiAQ3BXo41lpahCQkJa5wBO43r2oRyWCTe4Hr4f5iHFvgO/n9/U/Wv3cJr0kF/5/7WMzkvgsE0ESqtXAAAARkGbS0nhDyZTAjf/++s+OCFI9lK+huo+FMo46iSF2IPShKnDhY+b4Nyyfn422/qBUX1fTLaA9u3h/ChZF+GVwRdZ2WRkBeEAAAA6QZtsSeEPJlMCN//74DjgQ7ZHzgzntGfTAsTgNjJN+UQhfBz3E1P/plckBND9E86yk083GNZZLnqs4QAAAFxBm41J4Q8mUwI3//vhUdhCXYakQCl1mC9vjlAE6W6K+YGIyz//JA3BetDKXvDl3VvDxmOvEBMODpgm+jCKU2Gl8xC9oBzxxspKYcHrqYH05l+P8v6Y5Qk/1pPAwQAAAE9Bm65J4Q8mUwI3//v+jfjges12kf9dZQFJRGONVgWYOoxzG5uK3Jxnwr2M+6smx4PBb//XwNxXDNlrK0yKc8dfQ3w+72OG2IyxsTTpWG8fAAAAQUGbz0nhDyZTAjf//APDMcDucfnbEfvHS6zrNEJPWVHkE15RWPV3XeHelWYNVUgciz2fa0ECdV6bGtEIrhn9uo1vAAAAP0Gb8EnhDyZTAjf/yghC9V+DzI7WDVxvgaNgcVv0j50t1Fi5NV8PQP4/2jlF1RxSfv6bfmVSxmd7SAf+mKajzQAAAFhBmhFJ4Q8mUwI3//vBm7tPFg9mlCdoqUDYZ8xiLA/58i+MrJi/Oj9iY90OsO12pTviJqIUVx7vxap1f+uydZk4zez/EtYp/4fYp4Q3XO1R4juDaEeRsAQQAAAAUkGaMknhDyZTAjf/++uE4Ai5s7YnlAoVYdUxHA6lgf+GQxBdkuL/GOcCv2WRMFlJ071NInuH+zP8ObDVzOx5D/yBFdXBjGv5oMdw9Mw36f6YbxAAAABYQZpWSeEPJlMCN//KCEPdL34Qpx+Iwxas1iPqZ7d7C263bvdkqW2ukMa2nT+82TtnjrJ/eEhfgMZHvbgwHN3G1mSeyf5ERFLmZN+rihWcq91jTRoFRIgx2QAAAEpBnnRFETyffZoqE5BKXkkXp2UOeJEo59JeY9aj9AeTC6IYWjavARsEhBIEdBw2Rve/kWbUHHscnz8S/B1NjHiIhOhIZBxTgdtyQQAAACcBnpN0RH+DDmAYl6IayBmmZNEDqZSB4/1mlPS3tDlsV/Fh1LZ9b8EAAAAoAZ6VakR/gQeVg87QeYm3bURZpmDefiWKuuiEiUDlMyxeXtBIWKCTYAAAAHRBmplJqEFomUwI3/vhUdicsgfnibHpetEFWH90rguJ3ElBMQTQH7c3ygYCf/Drrg/dtGQkavBREa3bvVMAltQMkijFYWTfqWL//8Egk401tHl0A5hfv/3NvhBuZ/FxLMknFPgLLzedIjC8QeurMrgT7aB7fgAAAC1BnrdFESzfgfhWj8hRbl2W307v17sFwmYW+jS8UpzFjs/eVgK5cyRdyT/WT7kAAABGAZ7YakR/gQ9o5bvE/zRjV0ev/5aKcO19Luh0B14WQ6ZPUsDRRYb+qb/GrGriVtRBld0p3TqxJM3rCnvICZvAAlZ5yfOX4AAAAIZBmt1JqEFsmUwI3/vrP1EjJ9ApoRLZwxrgZM9KFuXGgqknu1QvZh+YXzmCVkYA2kXu5XfTPYyy8+Cwf9E3yexwGKwVVsHQkC08DCU5fhqZfK1z8qrqNUDzTK72NXRlKxrFR1ZmiVPj2Xq4At3GtTri/A05eWs/ccw0A8+MLhzoW7U93GW4fAAAAFZBnvtFFSyffT2/dTSVQUKj9rf+0mUMU9bY6PcvN6uG7mXsKRuTE+sYe2c0wQGLRh7SjC+dfY/ykuTFxQBC9+JuGvBsR4DhRM1QspMMQ0PRXYOYT3CwOQAAAEABnxp0RH+FEsrDaMSpa8wIWWIvYRFuurkj19cl46aZDCbA0SBpIQmv8EkNyrj0XwVlw8L1phD0sXTHNQHFOHuAAAAAUgGfHGpEf4ew5DFDghAjpjpN8kQrknsoqluAN7r/wdMHwzFeUfhsnaYLctWarPhpJ1pPTcHcQt1NddNzpv6yDKOANWXO32tWe5euY78MFmYtAkEAAAC/QZsBSahBbJlMCN/8GV/wgjheSMGlboEggPQ8iUHEclFSu/AtGO3I3Vk2ER/4BHv8Tbue6BAabqGiQdgTzc4oRsQ1gh9ZCu/VbhiFCPYnJiHdwoX4YU+hRuenZpNtsObSdPTabZeCoJiif6l0xR2ZC1r2W4UwfkY/Kbu9X8RRqWwjiZ+lCEe2yO97qkKitGLRXhsDDPy6A42LbEzFS5m3Bfj9FzHAZ9IDPEUXHyZS+YwQIKAdldUuWh2n8/PuDNEAAABCQZ8/RRUsn3sgp5r3Cm/SDs+6O3nB5JhyYbLrIl3aW8UcgJDENn36FSTX+WiqBPcaJGrhj1oT+XMgOFmQ+3rs4Yq4AAAAQwGfXnREf4ew+GSYx8peZo32mD0ZxX3r90gQmxgMzNTtzm1R7ItzavQ1ubiOdKh6cKDbpl9nH+Ni2s3puThONblt6/kAAAA0AZ9AakR/iOA7CrirIwsXZRWN3XV5/opBUIx5D+UhAdCUpwQTa9Dm77iIFrwPR5D0Wos1uAAAAHZBm0RJqEFsmUwI3/vhUdhEC5b/sF1SYFY16qcgc4pX3SjdOcxNGiWTn6CYPHzCyrtX74+6hxGZS/gSlPlQe04vlBzgd9y4b9wSmNmTbl7qHsmH/8WHyl3DolsoVYqYdHMsqsE7ZL7tq75kchJTZcF0jg7A6sSBAAAATkGfYkUVLN99TaxVytK9J0LEC7e9QMMUfwZ+bYFBPy6qi3f3mYx5+pfBsels2wDlgtcp/zu436EhZrd06L3ibH1Yqz/2X7weI/SnSNFnaQAAADsBn4NqRH+BB5XFIVzyCm2kI+3PwfLld5Q/dKDRNU+AxchZjpfprwo6gFhSOxp5leYMuT7tEBqg+rCoyAAAAIlBm4hJqEFsmUwI3/vBjyyMgPRE6nSRtV0KpMIijNNSxNV6fK186yR1lZCuSqeK0zpSMgyYXrqP8vepgaucl/4jaGmvVdxyh0e9LI0kKda6Fdc80iHHn7M/V+CUeawOHDK09GYOfGXSC9g8VnDTn54KoCAuYx0MXXmNYVT7fUCj+DImYMTgYnhipAAAAGZBn6ZFFSyffZoqCW6+1T8IDFjZ/2UM4MdbOfM6cSgf0UAPojz7pI5kEFknrYw+ENpQAYUTYtr4SqEWUn7junbR8Wt56p5h7uhIcGkZL92u9QC38gUwBk1Ex5E22TDcZLcBpyUi+s0AAABQAZ/FdER/h7ECGy+MeBKdNfVxUPeZpFC63XlrzdjoJZreQchSff/+dpqAridjEZDcjRKKurU0q+oPY3sW4PFZGDT1eSQOPqlSLhDyd9rNotIAAAA+AZ/HakR/hnW4Mf925ITm3O+8SFKZ3zxUoRkrtFQyng+VH/hMQ/Sik3etRXXXNlxlZUXH5+UDoy4GCxn/BYEAAABpQZvKSahBbJlMFExv++5XDgdzjvuFjuy7jd6PFJ41DAH3l/X1jKdL2Nh/X8fAtcq/fEL8Eo81gcOGVp6Mwc+MukF7B4jwey/r9CcqRx65yLF3b31zSZv614yBGc4M9cNdQpEICcQUO1uAAAAALAGf6WpEf4EHlYPO0HmACcpM3NnBfztXc3YkJpZoDQxZYYDyvNb0Pj+EZFz7AAAAhkGb60nhClJlMCN//XbV/0CALgiH+1UnutP56iKl/F3VC1DeTp73t9y15JQ8NdWsfi5ioASn3Q9MNo/IlUuX52yG0tN0TwNd1c7MGXk7xvpMQBKyiLGoACsHChpPyimGD1eDR61bb/pFXOSc8YOX5I6sYEzmjHHEsZBjKDlilzAK/wULUzcvAAABQkGaD0nhDomUwI3/03wWPufQrGYALLUwsJoxEMXwn8/5NCn2gZUgE1FzDh4+XyZdmkDpzw9gAUC6kukl2lQx24vXEN5j5DOH1GIYPDhINtBDfyJ8WfNT/Zh9gvQkc/PgO/6SLQMtIl38tX4A4gmbreT1L7HRARz72Z1XC3C1MiMvoDJ7VVn05DVGD32R54DC+p00fXgSF34RaX9p/nhPqfqWedglnMwJtxRR8T655s+4GIfQryP4RVGWXnkJEq22REKoBSZ6ysplrhgLEE+6q2u3y8N7Ts9Gp2LifzfiCd1+mPeg3wsFSzJW1W4R7PzKVLjmOSAYJ0Z1tw5ZriRBPXA+vDYfsvkJniSnJ44XojzZkBW5ylkDkLDJmRGK798IPsVpoZydkPXtIrYMAjNlGhRi/1/7d/7lypvEWzYzVcs56JUAAABSQZ4tRRE8n9O9y35ttP5XA8tj1j/f8ilMGgI8Y0X+r/O18i9VDvuDif6vnGMNy/wHlXR9Ruw6ja3/zSdWeANSXvyVEX9mq8ty/YEARNUYIs31GQAAADkBnkx0RH+AcOZcuyaLd28wzeOfzpKa+1FVhZF6ix7MqNSnD1fychy8xNmjL0jFeLjxZFPXR4lL64AAAAA5AZ5OakR/18Wdv2q5RiSBDhQMM5SV2fbqkEFaulvczIGbVJ49MJ0WW4sHI4AZItwvvenMKbRODBhSAAAAq0GaUUmoQWiZTBTxv8IgPkZ4dbAn/C5fKq3XjZar6aAnr/IwTI6NfhYxOOdY90vHFZcpPrVgtYQEGJcIs9138OsOgln47ETCAT3wsxykodHaHt5j5zd3pHkivyreE3pXWJ39gwvIPH7BtJC2ujNyuwcpegUpqIHioXE7MREQ7wqg3InLIx6/+VYvQ5iyZJg9fQEMDyurdPwUG97r3XLTUK5xQEoR2kd86Au5sQAAAC8BnnBqRH+YjqwgmYeCDpFqcX+FVib2Lrr/3UMPLGmd1jhtC6ur496SG8bD43VmgAAAAFxBmnNJ4QpSZTBSxv/74Djga4r9eoJPe8V1sdAFa0CHxG4haVB5Ovh8u8POW4TcTMxsptzm5D7YlBYw8lG6CyBBn/nDvcnVGWnkGJ6RrJu5JKQCK1vTDPu2rBdI6QAAADABnpJqRH+HsRJVgDVevwny/m2ehVgkT1H0/+i6OSAZ+6Mbwnkk7i2l6zyDhghf5iUAAABdQZqVSeEOiZTBRMb/+8HrVh2HfgnJcCBHcdoH/EHq//8+L7kvHr9AwlItE+XW8GuuTbZfIyKey+xtzx7/z7v0mSlg6JyClysHvmOYE28a/Hm+/EFhTDgOJfpyQ5cVAAAAHQGetGpEf4jgOwqweXy2F7Du2qK5I8q22RN0g1ThAAAAbkGauEnhDyZTAjf/++A44Hc4+26pcXq1U9uXkaTwRX1v/02a9jGRPrQ2GgD4ByCHatmQBBgoB4cOdikx24qn7FSN+jELlbhvLO0Fzbb5WsWgoL++Qm4ZaBej9aGyTmk4go9Jd/7p1u9ZDw+8QmFQAAAAPUGe1kURPN99TanKxqV6ToWIFi47JibnpS3MA/q3/m8HYEriYfagkjp32QrwpuOP5xPUmZSLfxN/vcgqC/gAAAAjAZ73akR/gQeVxSFc8gpXH9+3PwF5PUSrMv/xOAUOg3reTKEAAADGQZr8SahBaJlMCN/PivaVsDLRwju4FJRtGyJld3PRfB6Xtp+0EP2ehbnkk6yo1l9S2z7XQSpm8aK8H35oc1ChfOjV/41daHgy3+x8N9GxWBhXCKBEFAFwfSFUpL6aLnnZF/WsKnCiSNGkpuCtg8KGhAifBX2jdPWMZybTgbq9FQPBnlegtHQ1gMS0JPKw8fpMr/l4VaXgPN/WiXupgFWBTWMufBFXx/pwY4hzINiNuuh8vDdV6dZqzU6E2zTAetTgZI718atmAAAAV0GfGkURLJ/NW8aI33FXDdlOIL56x/LE61rb/LEC+So3AiQ/8F2RvGBS9ER8hLuHAUZ1asXN72jsM5Y8Rsb/8m/wSfi863R2YTaypcHVPZVzRNGbKL9hgAAAADYBnzl0RH+ezH/OFUf3+Y9OhGlOXkyMreXizJcXSVs+sr5MlHfOxYn+EpqyS+pzJWKhTWIZJCkAAAArAZ87akR/hnW4Mf77wcPYM8+uXxARwgNs3mq4c8S0xyjev6Bw96W1w3E5wAAAAENBmz5JqEFsmUwUTG/8A8MxwHeY77MPL7mjRFVFR0Z/iYOC4Yp43LzKh/KoiomQ7CIxYiuEq/otbdQROB4TPZzrum1PAAAAFwGfXWpEf4EHlYPO0DB1ukwiu5JQBDnZAAAAjkGbX0nhClJlMCN/++FR2DWFm+9o4wWclxR2utVG2+azrP+x4C74zgq9slOh87PLWTihGxNCkPERnZsOwKKHGBDItM6n+UO9MhbU4gRdPCbDzqp0LYuZWoHRtYrRrXBf15GZzNVJg8jQkdGrKOzzbeXrr8TrvjpovWonk3/zv//ORDTwucm9tODFLZPZUK8AAABmQZtgSeEOiZTAjf/7/o39RD7fvlHF/LNkISXGH4z56Ai/14Fui1YLh2MuTPImlrfB6tXNm3i2ltMHwB0R0XoODBGGdW+0dQtq/9aAzDsN+WbhUEnMSSne3XEH5LffBS7TGVCwM92/AAAAXEGbgUnhDyZTAjf/yghD3S9+EKcj7DVPmcgdNOSh7FRWYDdiiT8S5ms2TgSRC1HdbXoPao/j1pyyvcmUyyOr7D8VfrorJ9Y3Ia+/Vy5I8VajatIrRzOFE3LjoTFuAAAAWkGboknhDyZTAjf/++A44Ilb6dVZAj6+5eBpPqpy0hGCly7Fd7tZOR8lBt+xP1Bp4udbg/7G0dq2F/ig4mG065Jsb5K+VasAFZ0NwTnEg3ZzHdvF7Eg7qw9SgQAAAFNBm8ZJ4Q8mUwI3//vBw6x3g997CjDRenHxJ0XtD3+ZsaWlrDYyGJRc1JoEwL+kntl1Dn61wxnH31YZTaUVFntEYkpX3nfnQ65nvul3NPCVGfp4PQAAAElBn+RFETyffV6IS33RIzDNhC6Rz4N/7Il8kZTcQoK/cQdyXIGV2ki+axJCPZb9IDd9bBgUYo7Q242WZg7cP39Ppn+R9qeii6RAAAAAMwGeA3REf4exAhsvjH6163w40hSkl0dhxqvJ28aur4o8Qw8s0CKm6YtPcSeQHz3E9C8uvAAAACgBngVqRH+HrQBVU2djmFhWg1aZbX8552eSKPQTInN9Jr83hsggwky5AAAAXkGaB0moQWiZTAjf/APDMcDucfhpJoEdgP1f82nspVGylrZAY5TqKCcynw/jcC+Jqlhay5xuF9h/wgvXJVaziw53TDP2hodN6QtZC6r5EmXQMakhypMctn19xQ16J6AAAABAQZooSeEKUmUwI3/74DjgiVfUWivtLYxGQT5W2ZTVlespsDp7BuKK9RAPQwV4UZkOEH/M1tUBScFtvFQZbdu4IQAAAGxBmklJ4Q6JlMCN//vhUdh6HHGNd0B/p9FfdgmAb2hpUl+urE83Dowm8tGYb3I7ijBirHv5xiW0qPCI8pYvAfgKKPOp/lNBaxO92h5BlDhH+jDsdWP13tgaQXw/5AYaS+Jl1e5qLi0meHE4ZMAAAABgQZpsSeEPJlMCN//74Djgdzke7Ewm/iNKmZv+eBdZSsBm3kdy+Jxax1DOGv4ctvMEmTptqD5q7dbTzcZOZfcFWec54awcbIvWYJNuhCIrSQwganEwlNR93+G0LUHO/L7HAAAALUGeikURPN99Tam3Cx80XWuScbrI8ef27x4OlvfwhN8a0R2GOpfaziVyuEYanQAAADEBnqtqRH+BB5WG0Yk+JGGXBmlhdn2n24dWagV/yd/lHGugo5EAQHiE5RPtDB43FuuRAAAAhEGasEmoQWiZTAjf+/2woACNjELAEvi75dnr9c1/IljWX13Dpl+arXwZ48mEtEWdudCKaJzjzn/zZ9jj9jnJ3T4dxh+JDYdtPLY9alEq/yvIYKIf6nHoWUKR4HUAVgyKfewwUN6TBSDcysMFrfJrLB3fH67CeBSbapkCkw0R7IsA+zrYIQAAAFhBns5FESyffZoqCW9/YL3/eJSR//l8bcyjUNJcqqPIQpL4s4lCEI7ELUU1x73ujzgFVMGgdWaoy6vKtupvoLwBbryeu7ncAuloej1ir6dhXCn/9p5M5mGAAAAAQgGe7XREf4exAhsvjH61lOxDQpSTOE5Rr6c/YMJcR40gl5ozXSNr5K0gN30y3xwpq1Doi+RBmswRSVIyyiZDT4vuRAAAAEIBnu9qRH+GdcG6fA0ab8kNOTv52rX70y5CdadgShyyECf+CNopeKFkbu/KLZSfnj1sdOK6r0ii9DLbnwNgGTiqNCEAAACbQZrzSahBbJlMCN/KCEDO8x7vwT26lNl/gUfBmtbre0IKq6E/qqdCqRddo6tWS7hBeYS8byVLG6s5bUk2/ZsQLzfaGjnk5bK2DJGat0L5NBmmSHVq8s6V5vFgGpYgYBplI9JWytw3yEcIR9mMup/7M/PMBbSjjYsrk7wqyK/gnjeQOMfm8zcWP5JRN3q8ihlin1D6q61GFw1zLmAAAAAtQZ8RRRUs34M606CqUVFCJWU4Ky0MnnSmBsKVBYKXtwQD9bO1GgwnOtqMNh0gAAAALgGfMmpEf4TUKzWWd2fqIhqL177tilPXm5J8DJFaCASw/ejbPc30eus0z8KtHbcAAABNQZs0SahBbJlMCN/8Aqdq3gjhBnFy5XA/L+1MyAlYxhiqCHpYw+5gq9qVl/jSDu/Zxz8k+rcaAMAWshx2Lt6lAsCXP1ggHOhg0h9HO+EAAABOQZtVSeEKUmUwI3/74D1EHSkI9FUIquTDM3rKaNWSFy696/Qi+BwnpHE+ukHESGyxNmYBi2gCDySgK7ykEfDx1eZ83MsyM6XidSYhkeMVAAAAR0GbdknhDomUwI3/++A44DhT2ug1cazm1y9Eq/w51RkAPdbegFjO86K9lsQdW+KZ8FITyXlyVrMcZ2dEB+mty+r1aZPOgTfAAAAAXkGbl0nhDyZTAjf/+8Gbu08WB7e+xX/RKyCUlwLsCM947CKmmivCIJDojdrhemKwj68SRMATMf/1ZjV0RX9q+NeV7iUhcKy7URTsFOrM01gL2NmAF0DuInJP62fDE0AAAABlQZu4SeEPJlMCN//8AoCu8Ic2OSncAbLPwyKakd8JvTcutKPXYQaN0OqVJNStMu+lFOFblgBZyntC3N9Z9aj1T/qbHte4IkOIPGS4G7XHAIEZZ+vYQDNER4xNgppDUTCuIneQCJ8AAABbQZvZSeEPJlMCN//KCED59yngQHs2OJROVEKCAWdcGFHRSwN1YWw4Ja+vCA1DR/wWuwzoDuSzJ8m8/s8a8vs6MregbVfZO4g5Nu/OfbvwQXQzbNm531lzo6++2AAABOdliIQAJ/++DLeYgN9XYj5zg5B5QJuOQipq5DurWEjnW3UorRPL+Gmp7W/eIApq4hA6GO7MXHIyDHsVucz/aMFYtKTDM8z25ZLQfiNZJqSiDN9723ESzhz79Cwt5M+8vOWuuGhG+Tvq9foXjrYoVXiVDwEsVfUmlk5TXbEqyYI+MZpo4mDJdCI6FifaFbOBypE2MZf5gDnzZ5v8KmDeJZU85nFu03pUJirEg0Ch4LatUJlnXu1uVKmUsxM0BfauVLDb5iKlbV8Rxe706n9ByCWTYcNmUsWgK2BOx01vNUSepY8KQudanHo3PJDxToO0OLrabPAghuBeO7puO+FrVqFKMISkHJjS3EmTdBG02wgZepLedOuw6T+jsJltu6ElokeeisTTrzRHarkaC1NCRtCBiNuKEigoGG5TW/NYmxDmIUwJrPGuuEwRd1KazxAA2rsqyTJqLM0sjQZEKi/uESROrh+l53XQlmI5Fr2TToEa1Mz9Q0myFVMzlLsQqx3/ZRQx/eaJZU1/3iRY67X7pQgN8jPG7Mu6N8yvaY+Aqj7NpykjyTLzJHyZJOS8uJC5eDrvviZZfFd8hAX6jnJ0ZJE1ceTBCU68b2mfGkwl9oA3rNyC3kk0Uu8eOhHrIGj+7a64L8d3FiwoDY47nxsn1Y9aseVrNpfuZL8I3UMDdSnrXcWoYyESX7OVr2O8rZxn9LDxM6fe5FRhF4y7x35eGeAWARki3ksb7bLxg8DWvLG/+RwVPcpcizEE8l3Xt5+avL+ZiAXLEnwFkaUZDgHR/YEmvUJL0akz3W+4HoMrkbavzzs+VqiPswLqfDbnOg90ye7LST+LLq83FG1+6MSJeC7uys0Em20StpYNB17y0kIWJY4WoqsWsZ431/5zMrMnsO/w1fUI2ocSHX8pNHwFp4puUb+kPcS/Qh09xlBE4IT8uQiu8T2UDKEJf9iCtfqONZHMdPq/oob+ISU7mDp/lbACtBQUxbSPdO3HSaLklWzLgg0sclDx8EWk7I6bx2ZO5UAMGXVQtj+CCaYjfoBtDkXhWf7nWsT0hHJZrDi2Lra0z6P95hKbBHipvDDcqX6r8JNPqXyyckj8WVa7ygUVg99xXIM3DG5RRWnB41GtWfCqWChNibXPGQ6uh1H6EF8dmXH8surDi51hLT19j9myqwkNKmT8sfWOXCSiZtHYNl9wayNXOnug23v1/WAGCRrLJeHUUtVHTIX5R9cvqnbFcs3PTXx9R74CfvppdnOxRFNeTTBjRlRfQ+cjnV6LGJ+ftcX+9UqNBGia2nKp1uKP4ccPHEr/CsMJq4q8HWHOTdHO3jV+GZWnEoLlQ6Chy7Hbj//eKkHIOL4i+yqkzkPRSbB7sfUP9vHZ0uCQVKXYKu//r0MgUKN8P5NWtC5FEPRp8HfsROGFe/2knJyGh8s53uvSdYVz93B3c9Z2acgXQNzwm2yabDfBYT4AbEZLwPZK7m0eC5LKEHAxOLjG9NU26PPjXEMfajfx/8iXqLqjmgozrPx8k41W10JV5nX0OJWGa0qBU80PviynD2x3OkSEckOt3iTHjB9RR5c5nsqTL0EgDg1YWzkmuLeJTeKxOy3pZHTLdnNqj7pt+W/2OWi1UaTdjKxFLON8eBwDadGOUrbe2ZBGiOQ8/zfFIWmtk1vwMVGZD/Iod8uAAAAATkGaIWxG//vEnVTkBINY1xBnYqV2GYveJxG6pO1ytvZdPbEXt5LeT2BV/GdImH/VH7OxTyATpFD6nkhtuovj/nla9ShYi1okzGdxi99RJwAAAC5BmkI8IZMphG/74DjgQ7aLylrdTsuPGlvjUJs/BGQzvh+6+xQ9wiS1hY7U4V8YAAAAhEGaY0nhDyZTAjf//Zwh2MSAtcqcH6DBw+kiRJyBfSLlTlMVKsKbIZvDezqc3T5Wd3NGZa+B+Pjdq0z2oeP3B1ynbNRtvd91FAPKHi9eAZQhXkzjRR1YSlxJOK3yfwo9klCCVTafgfuL5sDFzP0e62FQJQ60CKx6fANRbJhNojOKZPzzwQAAAIlBmoRJ4Q8mUwI3//1jexb3GfDeuG3QIp0iOWNrPYaRICXk20KwX4AJSK3G01g3yC/ow3Dys3hXKUnzco3wdRZFXg3LRNpOH7wjxDdO6SGSNKnYi89v4MEaiVQLPE3ixfnhiAfLznBdpEhe+byYE2PLypisngAgqtZKdabisoZIYcgmWrafiKuWvwAAAFFBmqVJ4Q8mUwI3//v9sKAA/rehi7vgA080JM8SmYpPIuCaWfrNkOSrviyYia4nOycnCZbBmW3b9POpyhuj0YMiNpTWFdH6ofSMf1BO6NsLk44AAAAvQZrGSeEPJlMCN//74ByCXW+nXGeBo12ucPu1Q3jIarCClBC9oB9Vn8fnB9XMTBkAAABQQZrqSeEPJlMCN//7w3wmMjIEMD8lMTeUbP8EWg5RZj/kvdI2pZcX8B1kNiYTAl2O0lslP+qTbr+aNjnvHbN84RS2/4Py+FdljGGrZaq0CiEAAABBQZ8IRRE8n4DqJb7q1b+zyENk9E+P9lLAQ/bAnTrrSAta8TkZGO0FbRoL9hiQMM6FUzWT+8DFzTHADnP9RRGF2gkAAAAwAZ8ndER/h6AvXSm3Bj9a9bnh63e7Lt0dwASWkqO5RTd88ehBuGdNotf7Fgu58KElAAAAMQGfKWpEf4etKGWAMuE31cSYTo8cYlrTJwO4hT1iaYKoh5yIznLWIYM+u2AdetqlZhgAAACoQZstSahBaJlMCN/77mtspIuqyAydbMmdV2hUlnnEKtJjAgm2ilJGA1nNcqvKyjyAWC+D8UjH9KvdInrfn8JalYu7So7UCAXcOewUU5OwhCi4Gn87YJgH6zDs1GEUzCJ8L4JntHmTTg/MXIc99L1NBE6e+O3vv+OYZ2RYqJ+FOkeVPph7PM7bvRsalZ9v+6j8c+a2e9FeVHNkUcRhx7xXVeu40Ckc84xcAAAAKUGfS0URLN+DOtOgq1Qm/f1v5ExIEbysTirTzQFn6S1t6UO66bJ9QaeBAAAAJgGfbGpEf4jgOwq2BOh6mMPEvE5KJWUuH45P8CXP+ZdCo9KA8njAAAAAkEGbcUmoQWyZTAjf+8dbF2niwezLbWnHkGUoGoj7QIIOkFGvROy82ieNrbjUGA+u27nmEN5OyIUtEYm1Aa29w3BQ+77exlM1D2vRT7tzMqq7x40NgyQf9UyDOeFALvKyW/UjEdd+ZTj3e7SNxPWZbzdyhtLBs/d1mrAgFnRSFTdTWE14mrJQdgV/38CNOEQBzwAAAE1Bn49FFSyffW4h7bT+QcFhQIGL8ZhHK2aOc2q4nANbZ0GFL6Hw3v96ujtmH3X1f2UGhLCU7s8t2ALYQ5jkD+tMpZBnKJdI7zOIZg3UfgAAAEYBn650RH+HsPlaZUroiY+o5658f/kip2JFnP8SLnrUdiJYATw+vVm1xpmGefahnkYvhxrqmtK90ncP5Iwqoo7sRj3ypQHBAAAANQGfsGpEf4TUKzbRXPm7/rDCb1PYRHnrxQN3OgOeuwBoQBUyoz454hkxVpE0j3mzip8afJWLAAAAgkGbs0moQWyZTBRMb/wDxbhBIn8Ka729n9RowP1GPGwmIKU7l6A++D+NCeFcwINyDokoWo5uNZep8A+tnW0VIejZtiEBw828C5obBhnpukZNMqbuoZzcKwUcdD+TUNnmG1P7VNp3N/HRWhTj24xVfemeYzegPZhwqPtf5Vu3ZOBdTWMAAABFAZ/SakR/gQeNFYTouJTIPcX9ODyG/mOY1LtDn4Z4fdloxMzM5TGxEPn1lw/NNkwpQDBeevJFEc5m+/9Go9q+nDesjfivAAAAmEGb10nhClJlMCN/++FR2Jc7HZXI1ggkQzpQbLdE3oyI2e9RpinMX6ttYauzP3zhtktbs6rBQk+2bRLN2Zii5+KnqAGy6xu+oWg28GhfC0K53s+ETlNxGnX6kbnMFmbPxkhZYhjRahFfw8hooaTDBlOVRucGi/xGwHC4t9N/ktnpgbYThbnVUl6j4xW46umuQZce+h6EgY5YAAAAP0Gf9UU0TJ97IKea95tkF1g7PulJOBBxUf3oQ8Fgj/d/EvsVRz2v92wCCfdP3jpU1XFxnM2iUhxS2IZ5eBICgAAAADUBnhR0RH+HsQBkvuNRCE5o/cA/hS/2X1Ifx8G+v32b7PS02yVwPwQRWbCtWYhN3Fq4gdnPwQAAADMBnhZqRH+I4DsKsOugF5agq8KzwVB0SOKhHPr71cVpWK3aQSfqFUhMbf8T7Lt5HmLfgm4AAACIQZoaSahBaJlMCN/74Djgdzj87Yh9SyaadyNqF883ukqwWJfvVqpKQh+Cq2+ijPdoCbCBI/sOgdzCRvS17Gg4tWxC4C6lNS9Io4e2m05/dsoHILwYVElOoJDZ/MwYJQtRJ7sMYyXzLK7leOwuqfKqCHgoXnqt0hDtESiOuqtwcgabWxgY6yvJsAAAAF5BnjhFESzfg/tuGYHKjTB5ZIQwyRxCIT4MdkzIYOd8632MtmXbBRZjmn1eHX7fw0cYLgael3Bt26A9uYW+WQfDAKavRUOn1s++6E4EarbpfnHpm4uc7noN/2Jer1MpAAAAKwGeWWpEf4EHlYbRiT21xAh/pYXaBtudfqA0qjCKgx/lAHKVbTowHw8rOoEAAAB5QZpeSahBbJlMCN/7xIFmvqIe/Z6A/pmYt10zyE29uhmz+B2Dkk2oJvashyLN7AdP2QFAd8nNOQALgCphNk8C9krr0S5+4t/7IC9jwLdRCyqMfhEx0LgdtUukX/rP3vR0dcaKekFBDBp9NJs3ELeS44zfwhtjbfJhoQAAAHRBnnxFFSyfgBLo0GKtuOp0S/62v/Y0HNZk73O1NRvXJ78CK1NMTqIBwvVi+7nrhZNFcYmSvBtS8aarNw0M8/4bqY7V6eSiefuhvHdnfNViR+1mI4DfLT0GD/NfHmcU+shsVRJ+gRZvcMuzDyIyflNTInWB4AAAAEABnpt0RH+KcQxiH9X/rcK9KC9jqj38E9Cx6kZJfJmOLXjenMkHGGnhFyfDLoqpt/GOsv4ayAmoWSNp05UVCWVgAAAALgGenWpEf4Z1uDH/dvK5xRRkl16iX0NdLBx0eflcOeGwT2wyO5F6Jk/qa44eccEAAABoQZqASahBbJlMFExv++5XDgdzj8Soxq4R8HyFDsHMPNBcfu6u0JC3yOt9Zf/oOlinfwad64b/5ZuUQET9ghJNRnLBbp2fwAk7+v9LiLGc0Wy7m/MlaKm5bEbUwKubBxg4bDV+ieC+CYAAAABHAZ6/akR/gQeVg87ypn7D6HEVU8+rDMLH00gtcABN3WFNH/SXGNGasZxuBcRvLGcib1ofmI9nJ1EIhafY/27h+f0U80Zw3TgAAABiQZqhSeEKUmUwI3/74VHYnLGdrsN8fZFN+AYUaSy1qUzsqr5angQrl4TAR4cjThoFgPzgtGOm/+rZ6RiEUZb1p/ibsShVQuItv9q99GCa7QVtMF7k5//VfdZc+IdQXtUSRRkAAAB0QZrFSeEOiZTAjf/7wZu7T4AET0n1K/wcGaW5FPPPeSEVQq1wqPmVjHHGzMMaLIFS9D9xLjG8IHT3uYZjAjm+gAJnanr133+IRxjFAFTRBkx2srdq61z07DgwIEr4PcBIbo6eVoNdPRdn3p7C6JhswPrZOxYAAABJQZ7jRRE8n31uIe20/lZinhECBiXrAbzFWKWjBooZi4YZeyeDcnLXtkoxy+PPzoG5pJuU3cZMSZ6MT5Sk+0MRyXthyQf8s2D30QAAAD8BnwJ0RH+HsPlaZzJLf1BqA0GQv7XVPFANHr6Ehnvy7et3ba61H8sodLUCczvDm+Xl28e9s38/yEiA9pPYUvcAAAA1AZ8EakR/hNQrVcoxFq3sC1exT2bccLesQAlI9raV/idtSMz02Fau0gbClXfq1HVRq3Pjj/AAAABxQZsHSahBaJlMFPG//AgTwyBIj/nZtn0aw/YuY4OjamoTtQYFPOSeyuPadadakSLJXyzkE+yUN1FhGpSW0m0mzOfSaLl6RQi3UFaI3XVG4DOFmNwv2QiaKoOLHwxouX+jWVEmhJBs6gzIfBVG2WvwKcEAAABAAZ8makR/gQeNFYTo0teWJjfvPMFcU66/zULU/AjuVpjv6JMs2v8Uk8dPu8G96ISprega9vCssVT4Py38fqdQhgAAAK5BmytJ4QpSZTAjf/vhUdg9mx2D8J1iA1xk+D/u/eWFRKPYXoR4YrnXnQ9BJv4U7EyAxknkxv1P4KGiZu1XCzyMTxrAEalziwXrOaH5CIg19HpXT8adjJuBd+g8AVU1iY+ragKeFtHO8+FXCs1iqBNVFtNR+QrIASXXiKH9ySLmtNtwkBLAWVkei24IbUCqoeu2eSmChhIvR8SYf+gjoCylIj0hQWhGkF9P9sVMkdkAAABbQZ9JRTRMn3sgp5r3m2NaOy3X8HPswTGPT1V6qqyW8TAIkb1l84u61oezFaXM31Ex6tTfZXwH8l41V+ZLVvdVcPEZOg5HCqpsYndTaGYDZU2IBrf/HbgESDFhYwAAAEYBn2h0RH+HsPhkvu1KRWh5P9CE/7YMZ8EH8FtRGt2iZZYTb+I20oUztyey5/gcWsMembQCMoi5qzr3GWB6TyRqnmRNbxfwAAAAKwGfampEf4TUK0JrudPWO9DprT1cI+LcpiA9d9Uv2SIBa8dwd7ktVb0K/sAAAAC9QZtvSahBaJlMCN/7wZwidiwO32tSx4CI9VHvf8FfC8imIOnDW7X9dLvxLE8x1BQ2+Flva/hMlBEeDnDW8Kyzs7nJ6R+vwdkQhkORE0CohKGRVTKH6TrAI0jsBYWIHEzhI/DD598H/p+WJCB1WNOv/rDnbdHgZYdaqnTDwcDWZDxml47NphnI8tuJ1LEeyLF+MD0/bGs2eXrqa0n+B7DWP332LtukSOVrs5rOy329VanveHft7k/xSHl0G6dpAAAAUUGfjUURLJ99PYWDWGUO/inwiBAxf/vE27SbnXVov+t7ciiHO3pMMbk5hOMpHCmJoAtQU+lyEMKhoGTKDStuGSi6zfSBRDqB7UFC7Ub6lRrH8AAAAEIBn6x0RH+HsPlaZzJL6599KwwEOf8I9PqjXFRQKe4Cdxrd9B4PO3J74fYU+3lo88Z9cxAIlkoQasgv4bTkDHt417sAAAA3AZ+uakR/hNQrNtFc+dI4UgJcDtsPKVP01/w7gtDdiG8weomxmMV2jRs+mYeb8YsCdKiDBK6EQAAAAG9Bm7FJqEFsmUwUTG/8A8MxwQo/6EY7tRsTvMVqrKMcemP06VoeULKvsG05ajGPmO2MoK4g9WsHj/Pw+OQXY/RfWlxRBDYU5qtmrwgOMibbsU1z+xBDOIFJfIEU7QhyGFhExFTti7BO80z66P6DnI8AAAA2AZ/QakR/h7DkMUOCIRZCQlH2r3Q6geLsF83caeCI1bZY+Xd0YL1LmfzC34lEn3p7FbZoKir3AAAAl0Gb1UnhClJlMCN/+8EZPlhAhzY4hjuP9zep6GX41gR0vpNBRWRZ0UJPip2CeOhHj9isfDJTZVLrPDCGanOAhHmGphCIAPC1fKwnrEyemLG2qRrRX0P+lii+U7+s0UA9lDzojDxhAWTZ2G0F/aKbm54hHFNSHJtKf+oJOsv2i/hXe6R9WFXNJ0qKbHaCDXE5SAk17PNkKsEAAAA3QZ/zRTRMn37oQ9jW+VIhCNZdo1DTECbzHzraqtIWwcFXlHzfzpW3DjaPNagdZrahX3k2FcjBQQAAADsBnhJ0RH+HsPhkmMfK/TRTx1yNE7nufRZv4q2ObBg0Y85tUezANeXV6ceBVnWXr0jPx3kmWjuo3pxLLwAAADABnhRqRH+I4DsKquYeXbXa80z+Rp/+iDuHWPDYZQauQjmlruP/lzQ5MNgtrxuo9+AAAABwQZoZSahBaJlMCN/7wZu7TxYO6+X+O2RPzaO3P9AZSfEckBGR/iJyC69X2mzEdsbEARVatDv39gXZOq5q1DbnQ0mecF485kMmU//5paM4iZd8h23uDrAJgM1rO1fswnjjEy9xf4szlbNzDknjKgAlhwAAAE1BnjdFESyffW4h9qH5WbMcugQMX5WbtC61gVlqffRYYu75nGloWw2+flAxCV1Cr5kLWgK0cUy5ESSi8mwPgAklHpUS2QdtwkoX7J+G7AAAADcBnlZ0RH+HsPlaZzJ17dg40xRcL+9TtQ96Ic/V7TTmS8UoawBAXq1QI1QIwW783wiVM4yfS6ugAAAAMQGeWGpEf4TUK1A4rnMTHCUTE6/ZCv+/TbCro2Jb5kNAeSIDyYf8WGZnEtBvd7y7p2EAAADtQZpbSahBbJlMFExv1TpdGQI7CrAUef/B5h6ksk8VHxtoqBOtcVtjm20cMpPx7YKCba8PtsDiRbMvl2Vzp7ViJJGJslpXSe4Da21o01Tre+PL4ouLsDE2fza/o8u0e/o193a92YPHQjxT1F+Xl837GKhxj1zCeTYoBc72ajk9wDGlXMBiyPuI/NQcbrbcGJ79AkHjk9YLxxYD/SNREJer2QOEa7kdnKAs2nctMaAcgEj7lXi2nC/Ax5gsLQKiDWNIJ6EZsLrTgM5L6GMBmxmY1G//WcuQa5/+wM7s0+v98zEhGRgkPv8Pqi9S60lbAAAANgGeempEf9wncM6KwnCyxLkxv6hw6qpeuv8JH8/pARA3aQiOdCW0jL7dvTy2HOkGJ2WI+L8RYwAAAL5Bmn1J4QpSZTBSxv/0Ej2oDXQbnmJ9xHt6Y3M7+zkeot1lPkVAjOrBk5KutJxuFikQjMMm54iVUcgq0mUJ7XmxxVFK1B6T6213KH562Y+RpgQQ2x7gidLCR4uQyyI44VQZbTdq2DELgkXsCdiNKr8x4ESrFQl2hMUKVh9TAo5qMet3aV71iz0PMWrns+RwD+p05KZ5PkbOR8UZdEHr12Xyp6MXO03cMCdxvt8XSDnM5DDAwory7MxXFStmXjinAAAAOwGenGpEf5iZsaTqsZBVHN23kGvXifJ7fiaFVng/pASKG+Iuw2tQVKHpTjjL49UFkCnlve1QSoeNybR0AAAAc0Gan0nhDomUwUTG//vB61Ydg33yGjTc8vx+XfX6HCnb3oXqwFbwLkycdi4a18wxKaCie2RwyAvsyA0uooexqJ/tYWo+LRvOoUmoxRFTOQSzQtQ02/rq9eSsBVCGtHKcyXiFeX4sl7E4a69xKNtZ4XR2mcEAAAA2AZ6+akR/h417ZNpqEQkqKLsoWa64MImq3wZOBY+C6717C9/lyBZ0g6hTD0NY1rt85FbbEW/AAAAAnUGaoknhDyZTAjf/x6o2uVDXrlL01pKNSmhQXr4P4TJQsSM1NFMlKvnjcKn1PKq2soE7idryXsHEaf0O8GkHVQjbY/noZr0ipij3QQYPsNY5BiccYwiMaOIoVLU7GYyZILtn12cl54KfPwPTwjvSw+K4bQ5yOg3G/Z0pavUu5uigl8D6ssFCj6v5AXSFHWAjabypdfzcI1hiTFK83ZYAAABNQZ7ARRE839BbCV3PROKhTO6SEYHVcap6D/bB3xdjsl1hYn21dsFTAVhbVYBtR9XbqXR7RUq+tQJh1Q7mOALEIaPB0INz3lNIRir4aIkAAAAyAZ7hakR/gQeVW1VzyCm83gTep8bM3if7zuz+nCbMLSDxTS+tvaDoEjU6XYt9NoaA70AAAACFQZrmSahBaJlMCN/Bc9IeRdMKBQUoDdmOpylyK4+kq5dpRNdskQahtc3w+l0fbnrSe3FvIzIgw10Zk3I2ld9hkChK5nRYgL3ZtOHEBaiunfTa4ieYkASLLA/NG32QGn6PIgI+rNletA0lBbBKcG5b3PN4cer0cfvtkcfPCHp+QZU01FpMCwAAAFxBnwRFESyffT284Ut2gLU6T/OF97/L42Zk7TpYejMubi0UT1LYIwyi8wna5sal5SW2vk3PndfO9Ygjv7w527MVQundwsPGKGTzDFCdJOti60VM/ivQ0/9l0RrwrQAAADMBnyN0RH+HsQIbL4x/wbzGsYYRQbbp3oKzYEUN0nPNIKpfJlJ0wD5Mk4OdIwxB+uB2eKgAAAAnAZ8lakR/h60AVVNnY0Snvntxfl4ah0IXe77u05wjItrjxSxk7twJAAAAPEGbJ0moQWyZTAjf++5XDgdzkPa1JXdlt/iHg3e20KGGbpJNaAeD/NHcMVqnpuI+rGRg85TMlkZYGQ+buAAAAGVBm0lJ4QpSZTBRUsb/++FR2DWGd4E74BcOffFFDrjSdNqIHPjqmL/A4V7zuz6iK2RJtZ67hmaIV4RT+DJzIBQaAPE+vuu5mX9yRlf+Xz3MDvkp8OYGDoPwsmZQhPHmCII0d6nKUQAAABcBn2hqRH+HjXtk2moRCSooxNiKe6PW+QAAAHlBm2tJ4Q6JlMFExv/74Djgdzj87DB+8bpj7b61MAK02m0SjItmx1gb8vC/xkdmAxbAK3mtGvA6G2vVc9Tnw2/i3sUSrGL/HbhKMa2sK/+P9jhm46fzs6O00Ue9XW7FkptH/JCazHfcLFaniM469HEARwqsesFKmIPAAAAANQGfimpEf4EHjTTHLttWmKSJqlJBcodqFPtQbfjXOzXaRUJjHdTp9FoeBf71Kn+yqxtU3pUeAAAAXEGbjEnhDyZTAjf/++FR2EAuCDGvtGowoi8nI+jfRpfapBnzNQPYo2Vr8QmdtgYjsnpaHEEUEj5fzdXZ5z8ZMEZYVB+N4bG+rt/oQ/tJOOnO8NMl+l71MxcY1QGBAAAAZ0GbrknhDyZTBRE8b/vG3I0XvCGxfcoqF4IjXpZsDkZv7oRIdkuZllH2s4RDfJ2UFZ0K+2+MrbzLGTPgjZmyPT8lOVqYMq1VU/V4XinxIGWe6yhZAC7TBvENB4AWJBowQdYdeboaXvYAAAA0AZ/NakR/h6Avf/WMAaiuyJhL+KT4JwkHNgd10OH2kTFHZuZ+4In/rkqbThwoea2xhOS5EQAAADhBm9JJ4Q8mUwI3//vrPjgO8x+Iwxa5AW96PE6JjAAg9tvdbYLxnthXvYjAICsZOG1tuY+xvSOzEAAAAEBBn/BFETyffT2/FQo8uLN+ZccYdXGUegcTqm+73SyiRqyxjBgiXqvyckNmzTA/sqcKN6IxF8rB8AB6aJm1tPGRAAAALQGeD3REf4MOYBiW8DO/c0klwuDU2bHJ6zPTXTSFXKSsxC0nU39JMiqbRsiRQQAAAB0BnhFqRH+BB5VXHdOCXHYpVjQiP78rXDhs2RTqFQAAAHNBmhNJqEFomUwI3/vhUdg1hjtBgEPsnnqReMCf9fk0pXkAepqW0GvzkT/N7d++eAjFF0LqEWTOQwynb70MqWR2LVAT3BGuJjMDsrfL3saQZmSV6KVXH2F2scQ1murg2f6hRdm+IshdKI4OTJ0vpuACyrZZAAAAWkGaNEnhClJlMCN/+/6N/USMh05oLuDoAZwruKxif239tQ1VJ4v5ZqpArC08Gz4PxiK1Zw3gJNSO9N1MaBrO4NXyS0Kk5yJ53TqLyQyUa3YaV6o9CrHZ784vgAAAAGRBmlhJ4Q6JlMCN//vDcjlriweTEJ1e1T49rsbXnEYWTbUIkOTqF9p/L4Ed+sSuPC4msb/sWIIL1Bg3A0EKKr8CzywgYo2G0BZdMZXDAvTPj7Xu2U8kKOf7J0Nmp5Nxc/b1wCJeAAAAPEGedkURPJ99PbKA1hlDv4p1vxOs/Y4zfPg6iHF0NVmxylt2/uz5ddxtZSf9odYXHgkvwvq5a8FEnAsAeQAAACgBnpV0RH+AbCrDaMSfDlBTuXnhXQUYGq59CbNAd/8CU+K1jwZOGWMmAAAARAGel2pEf4egL3/1jfMenU+Ipl63RDQu+raphKcMtMgItiKQVS5cjIUWjl5mHHdx54Jm73xhCUgr2g10y3zkK7P29MDQAAAAYUGanEmoQWiZTAjfyghBljfBFTHet0U3BKQoBgmH8wmCWbb2fhS9kf8YrXaOzQFjOPpvXVSbYEH7/91upJzbcIGzTBnw9iN8+sz32CUkThqoo1kDvR6ExRFTQPVt63gM1akAAABKQZ66RREsn309tLhK5ErqsM8w/NBncxs8+XNMfQtyU397Ah/MDPI+uVDBGq9NMLyY/eg7uJexiq401plodH0NOZVtS74swPLgbj0AAAAnAZ7ZdER/gwyVSt4Gd93RvQzgj5K3CnQvm7OjBKD3UgCK1+lg5LhBAAAAJwGe22pEf4EHlYPO0DSNXbXCGQv51Mgmje+6XpjUxwk1173/hFbKQAAAAIJBmsBJqEFsmUwI3/vhUdgSGsf7YM42S+/vjzG31Ii8r+yG9Z2rwtMeopT4f1ZXmgCKDIL/jmrbYA8wR4mFw0PT9NVLBPsNXrKLeAacgNRksycpYqdXIV7VH+6jtYtcIHq7VaKAVYULBW7vO3pgC3A4br5z1oXvv7pxKzVVrdDydj0wAAAASEGe/kUVLJ9/0qcZp1xS4Q6K/byHOdoBIV6Xps/UwNkM8yttvswZ8Jzr9I6Yffad0OJ7Pt/vu8H/euA9uoco+FMMpzX4CWnWQQAAACQBnx10RH+HvdEvcu4XZBGgCHIf5/KTqwJP2rRK48Dsj9hXDpIAAAAlAZ8fakR/gQeVxSFc8i68nfap4V2gL3VVnIbk0JUIrK52lB5NkAAAAFpBmwFJqEFsmUwI3/vBm7tPFgQM7JWZsrd+AFcbR02voqglcUnWYy1KjlCBoWGrB4/yDkcsF/yfA0JiredR3d1/zQTCNN3YSNi3+6GLuK5XeureNSQvNWSNTKEAAABLQZsiSeEKUmUwI3/76z44CE3mgfD3z/rIASyI/4ajysO1TyQ+cKUy8BCEPXwFvV7Q1kwh0NGs/guw48vBxqodJo0my+qYZKZwCVTfAAAAXkGbQ0nhDomUwI3/++5XDgIGEb1mgVp4z2caIApSFuXOZMOWUKsM6uZXxOHgwgvvNbQRkVIuaLICKhGo4osmKLu5nHbNQ2eBpkjJ0tBuuGJ+8FlhcF1OznrI2UofcQ0AAABnQZtkSeEPJlMCN//7xIFmvqIPwLXapFLrpy24N2AYpX55tMIyEhwIGCCAWtX4dRhVjqoPHEX7ZHYi/+5mDJL6QpKDldqAw59qEcj7XmhguiYHg7hxSzxJYABsNFnTFdhSjxeeeQ3HwQAAAFdBm4VJ4Q8mUwI3//vrhHYIVisLS3Cz75CdmPBNSQtJmyjSkVk8S6O1d/mV7n8Rrd+6LOzS3sEdBdAhEdUoK/3HLiqSbIKyHxx0QL+V1ET6o51gLYhrc+AAAAA3QZumSeEPJlMCN//74DjgJBsj5aW27b4bt8+9bnwCqvg57ian/4Crkwz302lGZ2Pv+rWZcpSzgQAAAHVBm8dJ4Q8mUwI3//1jexkG5DGfTzn6ACkLgiMo7PNL+8MNWKBuY2ww9P4WA81P2z1Uo6HFLULVfe0UWARzjI9fYO1TGuDiw4iAYUYp62du9UL2+zNI7rHN267ZkwLWg++4sTNfEBlf+9nqtqNbpC7ef2VGPncAAABVQZvoSeEPJlMCN//74D1EH3TVIcIrzH0/chzRECMo8UYdBgVM6BUQ/qnmrHcIvQI2T4gwNuNCHOfc8xOvd8e4RUUrEg7UZrrwxAx7PnW70QrnAkn3gQAAAEZBmglJ4Q8mUwI3//wDwzHAd5j7ZQY2ZrSRVRGS4RBORnR2QxEztLNjTbcTvgLcPBn1MFP2hKUm69thjXP4Z8ejw6yLt7eBAAAAPEGaKknhDyZTAjf/++AcgTWtokf5L9MFh6k1zvz+EhUSCxgLY7sBvcK7i6dPmedsqE34PPj3OdSB7bkjgAAAAFJBmktJ4Q8mUwI3//vgPUQhuJTLhEJqu+Apna/+hU4PzU02rtLQFGBiYXaJMHEktyfr4sDAJw5gQgiM1vOUDKdw5h9IrJLuyeXoGbf4wrVN09r3AAAAWkGabEnhDyZTAjf/+8N7/LJcoi6iLVaYzKjcM3p9XtxS6Kwh3zS27UOOJokA10Oalb5Mh9jTjqiJ2mZpX4f6frTs95x4EQuNnHB2EuOeRNjdeN33AYWtQV+GtQAAAFZBmo9J4Q8mUwI3//wj5K7wgzjvuFjuy3LAdQ+tnajIU2FYf2/S8asjPFa8Heexf9iyg+O2t1pIIQe9N50utnzQAT4GusPtoeQwLMtTfJDIuk4WzhVsVQAAACpBnq1FETzffVS1iQAjw4mKGWZ99z4TIUbS/mDFOaQ2BzRPfOEJhO2fZfkAAAA0AZ7OakR/h7ESVYA0mDs38HcgN3BlSvTaG65b/g/pfq94yFXiLsMlUCZdl+Sj6EnesriDgAAAAJxBmtFJqEFomUwU8b/9Y3sZAzW9hPGahGw7flXYE4FP6yvCNzmXPKd9sNuacBlbT71wo0upN8/BkEcSxj5QOSyFbOl4aFdbwIN/85qZNxlQT3WmJ0lyxLEqm3GKGmR/Q9dLRwbnvK1UjtfHRqg/kLxCsjH+LVN9IFW92ci8U6z0Su8pzeXhOXBhoqHLd/KR3LzdJ3Q+/pqGJRbSf5EAAAApAZ7wakR/i8dTtuWwJ99n5vKBTInol2bsI01rGqiIrOoR71X4Q081QcEAAABXQZrzSeEKUmUwUsb/++A44QawOdhgfUi1MfHqmy+I4SJvb7slhfqvjc6dx1/LRyTOBiepLebRHg/6uB0qH06DelPtYLjqPVsidGv4WOqaIDxCE334DXtxAAAARQGfEmpEf4exE2/rMauJo+rnrnx/+SKnih1QnG/vzvVzCJmpdIrzItT2aVilxCxecVUkmU6Xcdbiwv1Hi2eVfOYva6XMcQAAAHlBmxdJ4Q6JlMCN//wDwzHCXNMnztovd14vJ625TIq7dg7bjOkJiogvd97B3i0yj/I1tMjzhb+H4NpywxTMQVOk9VS90im936odpBgEAln5etaGgewn7zxeYOtk/lsAB0UYEr26ytGYTnykyW97BDgbmncu1z0/tcTAAAAAU0GfNUUVPJ99PbdJpKoJiBXMhGSlyCkS3qgHnDWoGiC2KtexepnugaGZOIX0UYhHhpVdynfptSF4E2r0Uv58jiFtJmUBvNE2oOgYwxDQ9IZDYeOAAAAAOgGfVHREf4n36qs07VZIPxf1t+QUqlPNBFREIxt7hE7ahK0iE85r2mNsOC5htsIgigROAWVjVceCw4EAAABSAZ9WakR/gQeKxIeCDSxZxf39Gix/112nHFC1PzbN92IjXL5eOaAOw4p3LVmqz/XDkVqmVp/QwtqVFTXi1vi4EzWJjc3Nfa1Z7l65jvwwWZi0CQAAAKtBm1tJqEFomUwI3/vhUdiXOx2MTspDBHmUaNxnAKtxo1ZidUG16yzOqQ6r6rwou8sSCWAoHPh0R0076A8s6id90rZISEbQSI4RHOQA08yjeqA8L6RD2GtXfiIoCm2bQHe8hkBL3gMquBV9xsgOcsX8KXQlQ4myFK/fF4oEBZAQrHDDSzotzZCZOAyfnfF5fPIXTv5TGCrdBxcbOAJoIYdUVUA1wPS5ma+yJ9AAAAA+QZ95RREsn3sgp5rjMV8XzVP+pp1YxygYLRWB2wZHB+pWeUU3ZYrSXAwe2bahlT/6TYFQTJt0HNTpEP3ubVcAAABFAZ+YdER/inEfdryR+UBQh0g+98kl5ilCOtNo1YP/LuQq8RtpQSglo/BF1WPBxIxyIr5KAxwIpISuyR2pURhW4rUvLdfxAAAAMwGfmmpEf4jgOwqw66AXnAyisbuurz/RSCoRjyFbk5dILFFLRMyoUi9I/yQ284snWqurXQAAAHlBm55JqEFsmUwI3/vhU4AmHk2jvpp39+DVM7fgz+/0mDAgC9sSz81+siWGpUWfuj4GyTOZUOB35xF0hF67DNn0ZuMEXcGIXz00pKtOL3sBXsUgc/ePA52/EzhUAvLLgjOyGZLNGqxerZSlwTR99vdiMIycknIR/ViQAAAAUkGfvEUVLN+BtiMx77QlavkuKJHZZ9Q3HsrRt1QLIHVTAm1R73+pWNsjQ0q58gETaMA5QLuokf/hrA/s22hdm3istSVmd0URqKtuvuIfwOuQF+IAAAAzAZ/dakR/gQeVxSFc8gfgZAkdinyst3l5uX7pQaHSAIaHm6zNCuVavCVTgW8m03yN9WRhAAAAnEGbwkmoQWyZTAjf+8SBZrxwNxE6nKekPQnYXJm5wBvfHrogTYU3NIBfehDfOIiS6ySK3363CBAuzRu6fA4vIPVeDAL86ZR4LtBStB/BM1+Vq+8tYxISLhvBnW8Dyw05HPMDTibFdFLhiDfH6SCO/cg2NYHDixZKTHvuNNlOBA8Eb/AVnrFkz+v5UJdJl1nEtDkUt9F4X7Ko52NhMAAAAEtBn+BFFSyffZxiPbso3KVMv0xrg/tKL0xl7/2ug2XFt9Z/8yNSEkmzVe4IR1kQbVaS6gPVxGBlsxGiPdVg0hy8V1fpzap1S8LNa54AAABQAZ4fdER/h7ECGy+Mf8HMSAoaH/GRCeAOPoTHrElxuxk6cLhDnYyUbEMAwSiUszmoA/R1gKCdAprLE4LysACZvlN3jo3Q22r11pJc7VqKCUUAAAA+AZ4BakR/h60oZYAy4R2w8B/3Wta04n3kCJAW7+Q2gBRN+jJHYMfX5uQDuUKfZkFiLp0cf+1j/fb/e24j8dQAAABxQZoESahBbJlMFExv++5XDgdzj8Rhi1ZsIZ8Sm28CkaHPAYRK2SFSACU5cWdipBQ+vrEx0RbW5ZoDsg0jJR++lEuFdaMABu0GnPbEfBPglHmsDhwytPRmDnxl0gvYO3LQZM1n54PZOnfSIOyUJsG8mYEAAAAjAZ4jakR/gQeVg87ypn6p/FGQB5EJfWf6QKy0B/hS5J4dn2EAAAB8QZooSeEKUmUwI3/74VHYRK6xsWuLkzkMFi5kUGmAexj8Ar3bWWH098HYvsoR3J76TgTz/oDQV5cpDVqE1+A6h12YunuTYfth0SsPDgk0pLNX7k4axqHCb90xZXJ6QQE0rqZvxK94nl6XIz8bJURBNnwVSR2U9urp0GW5iQAAAE5BnkZFNEyffRPrpUzJLr2/AS2nxOSJCtdNcLjn6cXpPNkqjvg+oO3azFJERI3eaUINnWDk1Poc6rrAogWMSwS1ixlRAlvzi6VgIwdfokMAAAAvAZ5ldER/h73RL3LuITCI3oDv4BPtzSRBT0ox9Zs6XKAiQZAu3sncW22aE5GNUZQAAAAzAZ5nakR/gRHB6GWMSe17aOt16nxs4vMoxLGJ6z8ZwihTVdCrHs/gv2FN45XH5F77G7EvAAABGkGabEmoQWiZTAjfzjAzJNh9+MsHWu8d7Cs0acVk7TvkhyFGrIZeP7VKZT8YSa+fqLmJANOx+HwIQKZ+hbChanD7CSavKxfEpeCd3O875McKYrY5v5SCqAR4LZnb/unYHbZG6CJQLkdmdXI3S9mC+vh3J1TAPWl+HZzfy6FGRPZ20VqNe1/AuRmfBkIEA4iz9PfeZIPIT/nIKX4skpc+PdUp5tjAsBGG8DV35+BHBUfG6nHmCnw+kVYk4zIk4Xclt1NYI/JiZT+WqrMgnGmO1KB+XctzpeUXBmT7ef+PRBXYvDjeBflA3MnR1+dcK7H8tqUmzVSB1TwBI9+qN2nnuuRbqrb0lST+s59Y8w9uzAlwZZ2ZsKrcj1TJcQAAAE9BnopFESyfzVU/m33U0DGS8eDHAR1n/6XVZZ9uylPsAlipuRHFF1836S7pQx8uES23pt0I4STTpIPdian6xejkAYoF2nWj27H7EQ1w3xZAAAAAawGeqXREf9Pb2DkjtB4in5wCGYpcxy45arMPjM909IA9gzca/O+GyZGg7wkOdd+VfzvhhMt5NvbfAVFmfDXW5Jmj20M+nvxZ2Z8uZoUmYJ/Ok7BKfgCSaQdQQUP1bKE4bTtcHvsCHyMnF6keAAAAKQGeq2pEf4Z1uDH/duUzFC4JTU6hTUz3R+lQw5ZFRkiy5ri+QAklUBnDAAAAOkGarkmoQWyZTBRMb/kp5kxTMHZmETVTIgsd+A+NbgreHIdnNX9tCFzUmdk/+DL0/JDoveQNd/vCsbAAAAAgAZ7NakR/gQeVg87ypW2l15M2hNNzDWakH6ukSDuxzYEAAACpQZrSSeEKUmUwI3/74VHYPZsdVGEb9i0qy4XOd49kR/Zd27xbxgdkhHGHJC5y1l2os18hgqRE1KaZVj5VSPxxYMpx681OhYazthcfQAXvfCWuIYtRdInpGWzy1sxMLVqgPWw2rPBeteX3bwTAuESDIworEPEl9lhf6OwWpk0mu9bIKNzmWd7gpQ9E8pnmi/jBUgxN2yeN4FcSjUT9R9wbJD4lX/zQk/2MtAAAAD1BnvBFNEyffqpK6p9FFWrePWQt+t023fow08TUcBgf6pBXtRGOvd6vNalSElK/B+orsc6pxa+DU1KIEsYhAAAAJgGfD3REf4e904PVduAufu3efP2dUs6dm0IH8JDwzh2JpxWrp7PBAAAAKAGfEWpEf4EHlYbRiSTrye8J7wrvIAVKDRGHYha2PjTkam//yFWsIsEAAACbQZsWSahBaJlMCN/Hrv6DGHttBKhEf7oitzULKk/UkvXrRGN5TSj1fcIGe9X0COkbiUIPuCUmvFu3j4oaIwdDOZcC6YE657YPavN92lNXaCH3mYlGVn/ZhGTMtcl+H3Znx2tzw6OnG8LWtRLGegqT+9PKN37VFH+g0bBpNjO5UwkVHcgMy/NYkLLWLbOHn2+rwVZaqtmV1BLeu68AAABUQZ80RREsn81bxhrs9aLlZoqp8iOSssZPF6+vuh4reMnAu5tYCzgCviZbUJoMuyNDyQ5sQrG7eozVDcPAOhyEE7YCkUrWX17txueCEEo85uzZ+398AAAAOwGfU3REf57MlpebTfh/uLfAQTkb6fSvSijofFdaiiHHloNZyWGn5J4eiSHd2ZAxcNxMqZvRdlvUiUYyAAAAKAGfVWpEf4Z1uDH/dUm3pglXxGoCPaXPuwJNhybnmpjlHL8vPo6GCD0AAABbQZtYSahBbJlMFExv++5XDgIGEb7MdX9/tJBBP77OrDPDuf6bRmi2cH4plgeyDD0btjDKcl9nj3VzhH2Dh3jpoNa7QLLWuVUen8M08hu978wsuy3OhNJ1Sm5jUgAAAB0Bn3dqRH+BB5WDztJXeH+x0rqoojqtJRdUUKnIgAAAAIpBm3lJ4QpSZTAjf/vhUdge5Ym1P1V/GNX9AIT/jkz3GQqbKNPRZXoIj0uUAbDPtH3Rh3Ek+Bu+4pMcb704ggfye904H48vWEDu5uQg/N2uSES/KbFzLPFtQ2OTa/8qYBpX1/aD+T+6hJJK1ru+MZTghCGmNpSQXWnK2Hv1rX94UdXZenTBkKmXqgUAAABnQZuaSeEOiZTAjf/7/o39RD7fprUF/NAaOCr3pu1+Sc+O4fPzCXHhcvaqDp9XfBNrwZ00TugT9n/WmQBb8BI4LwStzpt0RUudpuht1/3oLhjOlCXs3ObsVj+wmRIxGocRDVdQfy+08QAAAGNBm7tJ4Q8mUwI3//vrPjgdzj859cWvgfbdn4AHc5U1oFwreoYsjtgTR9pWZhFMOPGk65+kAs4C+VojpHiEg82clYzEV+LFhJqn3NZwdE5ihwYUpFp2IKdxJaXvho2ltDn86KEAAABMQZvcSeEPJlMCN//74DjgiV1jEgudcPZjzgKu40Dxkog8ar133QofRc/AH/ZjqW6NQl1fHvbLAdPdJrabUaT/78/X6QmNkJeXNfxPuwAAAGZBm/9J4Q8mUwI3//vgOOB3R8fPH5+zlRCyD6mVH/aMulGQkckScEuTaTId2v705sWz0b6RhD6pVrrfZlw2bn4W4jUHEnaKMk72slzxowCIHDa2FHirxY2J4Aogy/1G71Xvqzs774AAAAA5QZ4dRRE834G9C6jp7AxObQ1D5nh6xcB4XoXLYDo06V5jfpQHI0j/RblPPLPhUSYgXonQeS6joQ+hAAAAKQGePmpEf4egOXq98g4uEQsyFmb1HLaC4k+C8XMK02ZavI+WCx5//WHAAAAAQkGaIUmoQWiZTBTxv/vgOOBCV6+DEJpLiNMEuxO6P5BRQh2K+vNT+Sg5udE4JTIIQLzSP0bwLFYQSRlbfZFEgNKAxAAAADwBnkBqRH+HoDXutdsAXaYM5d5s/ldvZ1ZUZqSPwmL03HPhv2wTpeNIW0aHyufZg9R3ewLwGGEJnNYKZsEAAABJQZpCSeEKUmUwI3/74ByBNNkgWomc9ochzMRrfD4ihgnoRApgkRdI+usVmu6Y8v3e0UScczsCjQjsNfzAoG7XkUz+H1yuPskDuAAAAGdBmmNJ4Q6JlMCN//vB61Ydg8gqaxeVjbcbFf2tmleGxIAMrv7NFEHar+eha3FuY/vTZzPKjJC2Oq5sKE5oNwUknilo4J+t1ZmDCAqKelikGKtqi7gdJP35m4Kn+yOr7aYtXrd6AdZBAAAAYEGahknhDyZTAjf/++A9RDqMbrkil10LEKl6Ttr7miP6kcL0ohDF7+vXevrzG+zTLebM+8F8Mqm1WPiQigJSclUc32sO1HgWuzAPWzc3pJHtUBg400W+ishyY5Uy/xEMmQAAAEpBnqRFETzfg/tuGYHKjS/jGBEY6YCRcT42dm5owW7iDaE6eZzNyRpt3yfTYmI4OxsgAkcyVuBGb+F3A/b7UbL/gPEwyEc0Vpa2lwAAADsBnsVqRH+BB5XFIVzyB9wJwDYp8rPxDk9qpTEARkTv8z3S1UGE8PcjATyV6Zz8GAZwGPHIMrJR6gwBSwAAAINBmspJqEFomUwI3/vBjyyXKEdXz5LJiOmapC7Dh+bqzRRczmQAYnOJBgYTvI6vsixxDeHUOwDiuti47UC86r13T09kIqPRmtJYNyAq1tP4/Ft/8pL1VrpczUxkwqL5a+AJdlYjtK4aB1EcbZFat2ii0Xh5BXeTFj4VS6DQ2G/nJLgwVwAAAEpBnuhFESyffGXEt8MjyOXZ1Br/nR/99FrXZZadcoA6YG0Znt1Y61Fwz4DJCwY26xDhlnhFmPYwjeDf0M+x30DUfLS/zFrIDmZ/MQAAAD4Bnwd0RH+HoC9/9YwBqK/PwA6Zil5xHhfqFkRqWZ06PPIdbHE9bbxRGEDxKdUaS9iieGaHN/QijO9ByAGQWQAAAD8BnwlqRH+Gdbgx/3Uy5jiTLpXihCuS/L2kShyyEDBZR05rxcfD2VGCwXsTh9INhGlzQp0UwAxUx1vZR1T6xUQAAABZQZsMSahBbJlMFExv/APDMcBAwje62AS64edylmitQLrKTHGHtcvoRS05r00OvVkJCNrMh+/mVw3eQxDA3590PKiZB3nWfrN5pDxzTrKYWq9sI5F4omOgfooAAAAnAZ8rakR/gQeVg87QMgUIqzoc+IQ0ccVZvftYC2gzOa0pG5p1idXxAAAAbEGbLUnhClJlMCN/++FR2BsEzfe0cZac+UJkdgjTB26Jig5W2j5cZ26NMmefLD0RqMng2y7laLakc6lHRdZNw1oHaHFsXiqzatXDN5Pz0v6/WwXUQ/oZM3evQzDrkzK2TYV/MjXfwp98+OlJJwAAAF9Bm05J4Q6JlMCN//vgPUQfdNSB0ivMgv12kewdPs8LDHEnccStTFGw2T9LqoRXFd4GA6Lmn/C0Ae+2Ys461r7jzbWD67darQQW6fpKD70itK+HYmaanX59v4Kvjz2TrQAAAFhBm29J4Q8mUwI3//wDwzHAd5nLgo0KbNa84paAodp1tGNxaXmvV+VvBFtu1np4hpixdq01/tsJuXnBDpI6pjgYmOuYM9uC1Czm+OIeAHPNqzeKqP5ocdj4AAAASUGbkEnhDyZTAjf/++AcgTTobJBaHKECxQ6c79GRuRfeQ0KwXvcejjr76PM0aqZOg6nHibX9Iua+A36w2f//ebnY1vV75tCTbJ8AAABpQZuxSeEPJlMCN//7wZu7TxYHfZLW0nepgrO++53cE5t5rTbH/j4CuZw0tqBjVHvT/NuCxYAqZPHuW+4z/LgYIOyIRCJSyOgmlLVczhDzE5KkFP/GXuQLXa8D+P/5fNEgPj1lTUpFVO4xAAAASkGb0knhDyZTAjf/yghD0KSEAj8fRNDXd8ZwkGDcdqAIE0puIoVKKR11TsEWfh9IlRceiw1biBwH50bd5SVeqveaOBsfxwvHo+S/AAAASUGb80nhDyZTAjf/+8SB2aRkB8IFrk6Yu0T79Zx2yWX26LbBct6B2P7nMJlSNU+8im32OIA82qWr6M0WXr/wOnu9asEIe+TzxoEAAABgQZoVSeEPJlMFETxv++A44EJXr11WI/mWGmgjspyywfThqWu6vt3f6IeutLnnxyOVezpxz66ZO+XTJSNz+hSyTccsijssaFateiNeQeLGmXnXJq3lh1/FYw7UzWsF4lLwAAAALQGeNGpEf4exElW+Y0oRl/FZQhP+oVccc8fw6tVIVhtOXYZd8Y1R9+768Me1CgAAACxBmjZJ4Q8mUwI3//vgHIE02R83+/Vlx6PE7gcgcvw/dfZOmqjB/6fbU3T8uQAAAFZBmldJ4Q8mUwI3//vhUdg4BM3dCQn71waETfG5XS6Y6qV1OmJ2ZI1/p1k7hngqfLDRlYxzHulMp52MY33dNDvA4xKrjGSUX3xOzqdXe+MisKohr4vpgAAAAH5BmnhJ4Q8mUwI3//v+jf1EjId8qaxOH+NL8HxQ2fp5PizkRPrB5xmQS+tbXXrCt7darNka9SvAVPptUtLJ5w5YyvLUX1kGEExPUgNHTM6K6C2ru/0HOEs9PXbEuaYln/GuFzif4kjrgcvg+8JdtRNs5H/bTBw0pf0EAsoCf7sAAABQQZqZSeEPJlMCN//74D1EgTaKZbqNH0joJJniUx8Ruqc4abzu2rURnTM10DKgg9fFoJmD440LTpJLO9oCBujuWU7mA65jaO0wi7HremRTBMEAAAA4QZq6SeEPJlMCN//74VHYnWWrD3GQIHk5ghig/U3DKJNeEKm/nL7OOuV9SKp1OrFdiEZQH1cmsNEAAABQQZreSeEPJlMCN//7wcOsoiIc/4xpRxHtkU2H2faBzAZluyLYQW6IF6Sg2Z8+2s6OiVvP7unG/PaKjnvHx284RVK7RkYgtGcnC+eIrXj0kIEAAABCQZ78RRE8n309jmEtq7hLzM71lTx1/0xhOjUhRbpoPBZse8D9DOokAeCvfYnyUFigFVOpER0UjkrFfP8CohPIs1WAAAAAMAGfG3REf4egL3/U68Y/kJfuO5of8ZPB+vRdik27eJJcU3fOYgVo/ofNZHen2wm64AAAADIBnx1qRH+GdcG6e+81iB3HSdBA1qVClucbNobVh3bfiI53Yzn0y9s0dcFwAHbyPWxxUQAAAItBmwFJqEFomUwI3/vD26eZCCisgtY7Q4/nAy8diTjLXaDcRhbm6PzfosfVpvyYRHRlUjwp0BnQ/NWsB7lZrV0Z2KMWnG+8gj/2wN3ueG8T8c99L1NBE6r4v7E7tCr/OznxbvNYjxfXhceW8dti/xTVY8P4CvNMCqkTDHLfY5PChguysyasokCazUmXAAAAKkGfP0URLN+DOtOgnSjGpTM5Wdix7kUjmGwL/wc5dr6DxK39Y49jtKsbgAAAACgBn0BqRH+JNh+VsnD/m3MN5x6Sg5Qu8+nj/5HUWhu2O+TNUelAcVFhAAAAlkGbRUmoQWyZTAjfyghC9V+ELITH3LCIHSIvm9CG6OjHlL/31AO68jyrK9IXLK5IdMok/VYaXk303UZ7pRLjQA0f/4qS5bfqL5QiXvH0ffwKCt0Fq2vBbWwZIOms8XXulZOVX3znJGj/solkcNIBtwjIWnGGvWEb1puQ7+6i7LeJ9OTdMX37A9Yfx9MPio17AaTThn/g4AAAAEdBn2NFFSyfeyCltp/KzZHjCx/v+MwPlO+6S0X+qGwMBBl7Zd3aGLZ6jXXcpABaEUHQQndnlecT0CSIxETMb0IEO8QcA68axwAAAEUBn4J0RH+HsPlaZzJLf1BqA0GQv7XWJY6mi5DH26gpsyq8ggTdiqyPYNRnn3ZbbfzdoybbOog2JQKGI36v4G9OFjBq7XEAAAA3AZ+EakR/hNQrQyxiVMaSwCvWnQsw2ksmBu2UpY5bkEhGHFQrC+fPFAFNp4o85UVgqnxqzlGAgAAAAI1Bm4dJqEFsmUwUTG/8A8MxwlzCIUaKgXMXLU3QQJ1EpMKnk54cIKtfRAathRBVUcjjYAmeqTvTihh6Nuk08XkauvWPadL4Vijodg4AVC739ye8e1iwE/NwJHDIQv/TOUzJWUNQojoW8MDz68svtIvZyoIhTX9vvxCDlonewBE5q3SspGmeeozvc/Ou3OkAAABDAZ+makR/h7DkMU2l6PKcHO/zShtHBIsYylwPXYPe5TqpRiePe1Ts30NzxAzEqW4PxaI5KV/5p/0v4lsSu94p/NesEAAAAMlBm6tJ4QpSZTAjf/1jexkZYO6eHpSGJJB5secePzct/DA48qcDfs+3F2H5KAOIzt2Dgt7ew96sGOjWUYhosJfHZkPXPvBEDaNms1hmlF9yL7QrVVstFpOhrOq4+qlmyqh/ldeLSeBIR0MDaNW2T6lc8cDTyqgcYV+Id6ev9tWnX/Pg/+/3Bx9Euhw2lfQiFdanPgzmX/55qCVEPMNFYpIBwPcaLU3yfrivjcJks8gRYsIJlknW6guCjN+neZa8hQkWtZwBla0sYTkAAAA/QZ/JRTRMn4fNy4UC4h+oc2BifKvWVgEVteOt6GdcPcqYcktVsScBfm/exmQJXfl0JC/xI/s+EI2ZfJUQBohxAAAANQGf6HREf4ew+GS+7UpFaHk/4Emf158OcWu01ywvQyMU5k4T2GGCK0QXFWRrnznHAUmOJaj8AAAAMwGf6mpEf5Kigzx6E13On1ZVie1EO84nBtPPa0VWD3aS/Ddk0ZkbJz8T95L6crCgsixKpAAAAGVBm+5JqEFomUwI3/vgOOEGsDnbEfsgdrJ7tKwX8nHR83gyULQVhL5x+AgWYfj7sj53F1FklU/dfB0NntEcc8LAeFRHrF4HF/lXkKKa3TzSgRuUANQcRW1CFfPMX5gBxfeWFsPEkQAAAFlBngxFESzffU2ptw5lYiNEGv798jp2yP5EhQTY9tWckQ2I9YpqN5ieaWAqdGE6ASw40+H7sZMawfHoCoDply5HZaD4Q9TdMJXThHdVh2fQNrSDXLaofGcFLAAAACYBni1qRH+BB5WyGKseqwNDLE8nxiM3gCXD2Qs8x6pTnxeKoYIGqQAAAHdBmjJJqEFsmUwI38oIOGvYtN1rSBTzyT+MoTaZxH/5QP0RwoqkIo8BIYjA+yAn/0EDCt8e4sfiSZAjYvAKxVNatVPYvX26/q3BEA+lCvg82wYomT1/sm0oujidy0OvyL0i4I1stZMMRcaxZssLhgLuUf5qu0yhYAAAAGNBnlBFFSyfhIT8MY1eZNHsRSE0h+4P5gINzsuqj6GnC3epFTfnf17zrDrtInbEItFMN401WbhoZ5/w3Ux2r08lE8/dDeO7O+HJXJlcFYNAFuyz8daGCaCCNey5dBaMTuealfcAAAAxAZ5vdER/h6Avf/WN8x6dT4hVMDgeRaCIEr6D0AY2ibt4Z5BJQpnXAfZG9fF7OQKPEwAAACoBnnFqRH+Gdbgx/3bkhObG4+6VBRfnLnVvz5ibbBPbDI7nFtBHdbdYGYEAAAB+QZp0SahBbJlMFExv/R5j/x5hxQqCYDiX9FqzOLs9ly1QT91Cp7pRDMF2Jkxd2wtt+BpFU6ZBrmxwLfJiwLCcv9T3lq9NZ2wLCT+/wnYYaZFCO5IgouVXlejqeKxfunKWRud5TjL7EHetewgMnoQTi+FFP1HpNOyNxxWv3UwhAAAALAGek2pEf4EHlYPO0DFagf010Xgc4NtyHA6rTfxLOyTg0Ox5hTe92mTjN7boAAAAg0GalUnhClJlMCN//SaD/44ef0/kxstvutqsDDhlXYfdekHXbmlDX82+t1ItqO3M/ML44EVCP/8ffQY6CAnVY1XBN5ZbnYha3+46HPtD5SMAfNYPV3RND0lVvOS15Fm1CdExPBODnBhUkaWtEAbcewQEoOOq2Hen6odimOMzFQ5d9rZgAAAAg0GauUnhDomUwI3//WKRu6nVyVxRee7XpiIxlKBqJvLZu3azo2YYPGF8qjUhlKiymApjcRRZlTf7Nef6tukffI6Kd7PrbAcHwGvELaeaP0ibxo0QBjNurmKmZaKkIE6v7wE4Wpmgpar7VBmLnzu8twOoVcMvF0fNF0fSaxrmXamWCb3xAAAAXEGe10URPJ99biH2oflZsxy6BAxf/qvJQn0UmNGcVBNMrhzZROK48G5OjPWtBqH8efnPpu4XSt3r1R6HAtsxDtSSxVrh6V0/kLJR05IiNa/ZWgvQYnGCF9qmzKBvAAAARAGe9nREf4l57VUKFwSmPSmlWex1Pm065LEm1OKxbJb7BMmnONzBDc+f4xWPkczvzmTohRN3zIL6Ojmccwe1SM5TTAzFAAAAPAGe+GpEf4TUK0MsYlIRurtl/BL2J+PMXABKR7W0r/E6t29wkdoiEyOny9hdlQSa5XVyIlDO89Cpm0EBTQAAAGNBmvtJqEFomUwU8b/8A8MxwlzCHtLBjwzZdybRP3L28+w3h5H9q83W+4R7Iqot5pTdotVf866A50TMubMMKbl+05ylorAZNPpFCLdQn1pTLuSgnmK11G/Uu/D96ElwYRQfS58AAAAzAZ8aakR/h7DkMUOCGBJh+lPcfvBfkdYcocwBe9qmP8DL4C3VmEv6j6IDX8OhLFl8JqBdAAAAsEGbH0nhClJlMCN/++FR2Inm+ENt5odDIwW/GJwkPcJuckK0JIaAU3CdS/tVTXZvLR05nQOjqvn8+KStPWBFi3Gez7OgeSNwcVElqJE1rP7gdMNhbcN68HuvsKSeQvpaJEnYw8wvyoh/QW1CYyoGlOno9K1aJIr9Pk0Dj8g8+NGIQjaqBemVGjXOrl9UV8iu8KodQ7jGbm1Zq/jwidOehhp797VsqpvsPYf6ZBDtDwWEAAAAYkGfPUU0TJ97IKea4yjH9NU+Ki3CHMkcSJ9htM0rapvgZK16qh6u5dqOGGAOrakaQCu114AMCSgb/V52v4CxfpXrn5qv0WiKgUPDhAE/bLxIRa7EHZrz32McwKF2qrLBShrvAAAAVQGfXHREf4ew+GSYyDhPzPlV2bRuCDR/Fea7xly0Zv+RhCiwpzapFX2nVaD4D0ZNqSzT0q6pbaI4DoNcxC/GWSw4XfnszHz8xeBlADF65GFV4DR+M3cAAAAoAZ9eakR/h417ZNpp1xtu+fafR+HC0KcS40oDskcFqn/O0sQuxpu7lwAAAK1Bm0NJqEFomUwI3/vBm7tPFg9jGVWovZ0jmu/rAuAvcW7ubK01SqdOcWBIz02mXZObB/CLB6HlDKDI+VlNhCg/6zrUx+GcHA2xWFHBAfdbhNgkXIqJBYT8FRKoB8Q1unN40IlItyXayDFwzBeGhvGI/KN3ea1R5uXGk1x7p1Y9/VGM6iUzPd/IwqqDaaStukEQBdqZPJMfq1T+2RPjqIFRCfd5FvxLcBRYuHwYlgAAAFFBn2FFESyfeyCltp/KzZjl0CBi/IEonyzMc3bZzsIWT4CydiIbfPye6ei9io+At/mFoyePgIN2xEzlQ9KswTRayQNCfgryEsw6EmQWr1T0ADMAAABBAZ+AdER/iLlSgg3Uqa5kstRvP/YJiqA7KR90cw3LNvcyMbNM10dPyQ7u7ZQ7Pxn1vA7iWSg71Q2/hzofKx8Il0AAAAA3AZ+CakR/hNQrVcoxKlrmAEcm59sbNzD40gdHC0EziACGmA+UjRF1XzhaI48Z56F0dPEF2w0kgQAAAGRBm4VJqEFsmUwUTG/77lcOEuaZPnc+jWH7FzzFeNiG18puHuQh5C/ueG8rR5oKuQk08AWgzZtU8fG+9U3UaZPtsL8BReEOwTp6VR6Dx/WI+9cQyMaZkUwAXj5v/UdnByly7VU5AAAAOAGfpGpEf4ew5DFEIIgTtjL/DAkM6khmBhIgRPsTh1KkB2WPl3dHMh6FUA7iA9fty/cEHvdpGr2AAAAApEGbqUnhClJlMCN/++FR2JyyC0hpnDh7PIxPg6BMTLhBL1SK8o0CDfiMpJT4kRDl9tdZ62EidOxdhN0HnNX76b7eC69SCDzmuZwOgLR7EIjB4kgvtAS2pqCJfoNyhrvP0D+dIb+QRSH+77hmtbfE1K1QnwSPb3D56hVITusU1SKKSHGmPML+xyt5i7yztzLCuUxQ6GBMKC1lr+K5ma8CRUoFzeYFAAAAOUGfx0U0TJ9/cI6jT+mdVbvBbfqe0kP70WvSpDFVICoTveNkI3cjDw4DDjaPNgfOu7y52O5KIIit+AAAADwBn+Z0RH+Achjb2/3sCtNWp7oUGbK5uD2avL6oCzA40KUrmSqFJOSULMgMJ/dZcQX7xKN4kdd8aG2wieEAAAA2AZ/oakR/hNfANpNdzqjAmab/E8ax6Fvug8RPvYSSLNwtDqJx4K2Zi3etFf/xJFtiuMoa2OKpAAAAZ0Gb7UmoQWiZTAjf+8Gbu08WD2MZVh8WrSpRiUBZUWy+1Z3OOJ3h3uYEjEFhMU8BzaG88lQfmplKnzW9sLz44+YZ5qjPn5f/4kD07E+kjAoFn0kEueWpFWqaK18eV3GeAKJtOJ2gcdIAAABLQZ4LRREsn31uIfah+Vvv5ElY/3/IpzyyoeDu2znc3BzxpsJ+RQ8j5+UBxv8Bc+yc4/gYr85cjlE67KN8CH6/crTj4W8FigNzW7tQAAAANwGeKnREf4Bw4zBVesMI5kvH//3lWiD0u7E7ycYGE39qDZBe9Ez4XiaOH2ZBiU+cYRKmcZPbFu0AAAAsAZ4sakR/hNQrNtFc9WisO+KwtEafX8vb6ZXxEB5GdrbfwSZkZhFQA45ylQkAAAD2QZovSahBbJlMFExv02+yx/CHKbBQXEKSiQ8/bO2xeS1HQBGRs5Hf/V0hpgC19PEbrC94YtsEONo7PCTkPWJcLGlhvRVXDsvhZchHblbTte0GhtaJBAsYRQtNVaJ8kzaeIHkxTXGujtsMr89Zq9hCEObGEdp9ad+G+69ZLh+FJjSEVgOkHkH0PEpLJYyFAvBlFP2jIHUVm3qm3jb97P4pJCxeSwXqGAfuihTU/RGSLNPwJun8vR1Ksa+vR5Zon3K2Nw9i2s9ZxgoBryAZB90K/m6pC4pSyLW9BsQ5Qz/96LFgNPiTaKC6ucaXERHMEYPOW9aNn7YVAAAAPAGeTmpEf9wncM1iQ9Gl0i1OL+ABc9ME6axttF81n1bVRz8hEXRQGH4rWzfbm5v3nB61lX8Ctgc7vz9QgAAAAM1BmlFJ4QpSZTBSxv/CIPOnQv6FGt8l3K0kX55Ov5JRlQ03g0N8vQOD5sBbfTPpEWH0nyx5HL+fie+qID6xiOC1xEwq8zJdI2VFblTxxBMPSypkRcUoWHmHXCxUKRVZYFMjeGgiRV+6kZS1BgU0P9oD/Oidv7irP/aV0V26J1DBG2+EtBjS4lHyivwXdip0d1BQfBGsk1ugFW2Fy8/EjyelHeTPHBgZ943+z6bcQ1EJkW0tMaad2fGspFHI0IbGKScCPWX/jQ6iT/ojIy4xAAAAOQGecGpEf83u2UnV+7UnY1O5/oQn/Y6AF0MpXYTzrVvtUezCnzx+JtHpSvytH38dTVSqrrE8zYKZ6QAAAHhBmnNJ4Q6JlMFExv/0LB2GXt39+D3Cj7aj3DPZ6Pd+InbEtW1V3J2UHsXT1v31OO4EXiBsko5smAXJphYrW0fTkQJirwR8BXL92Ei4QvU8E/9ALgLDiPsylWFhiHKkipiOyh9H2qwcv9t+CUifmj23yzrIU/BAu0kAAAAkAZ6SakR/iOA7CrirItMfJpRo9cnv5zDAP6itv+Og6IlbYQGxAAAAnEGalknhDyZTAjf/yfc+3zQHuCTSl/U7GZJy/gS5f9Cy2h3VQBHdy17hrU6xaDgmNvuPdO+3nvolJXmlwIAkxED8Lx4XeaN4VE46PRHNsXwgF9trLiwzDBPhYZ8V2tPERnsdMtjrFEmrEm+drqLgNlW/gCWzTU6jf35hzyjGb6uEJOQlcGPQpDxqrtzr8Nc8vp4B1BYjdvjrlCrq/gAAAENBnrRFETzf0DZ7oRVytJSzitxXMmJRFhcbeha5ual5F1I2q2YB+zAT4PLPb+JmJ0ARNc/AIxeBO0WVPQWFlcQBs+2QAAAALgGe1WpEf4EHlcUhXPIwsFKRCeT4xUX4F3Q/slkJ9DbNgs6Z0J5ivSJcp3D4p0EAAABWQZrZSahBaJlMCI8bzhd8dK3gZ46kOJN672WFTGEpwGtbDZDet5dEQMHNg5ySdNRqv151JHg5eF5RqsxWW7PejJfmZTCQ9Q1IN/bi5e+xWsYriMFYv8AAAAA5QZ73RREsR4etQctl8Y8CYRFjDEwJCzxPJgJwzTf+tolyI5ByCNErI5NnAqLN6VSiQ3zkMiK+t/rdAAAAKwGfGGpEf4EHi7cJ0aWsBD/fwiVyP3OFrw7+rNjWzuM2AuJ/SSLVjrYN/7EAAAT1ZYiCAAn/vgy3mIDfV2I+c4OQeUCbjkIqauQ7q1hI51t1KK0Ty/hpqe1v3iAKauIQOhjuzFxyMgx7FbnM/2jBWLSkwzPM9uWS0H4jWSakogzfe9txEs4c+/QsLeTPvLzlrrhoRvk76vX6F462KFV4lQ8BLFX1JpZOU12xKsmCPjGaaOJgyXQiOhYn2hWzgcqRNjGX+X2BzGn8mEJ7aGMcvtMYUAkI8JdRks13nKu9bo7rrtiKbOLdcmLQMypUndSoHj4HIx0L94EanyxNktjsUH6UEl/L/6OZSnHAINvBlAMWXlIWyt38JXD0JUnOv+PLJ9lXJviKQHF0pLbebPoF7UwMYacH5I9nuT2nhJTj4jMvriexQBuncAXjupdykvxkUgQsRr6KKam+klVKsKAJwzYn02pUzn8ZX99Kgp0Zv6A/4AZIw+tqzSXaBRvQyPlagCtwxNENMBXBGJCaTT/M3d1NYKMBuEQdSzG/ZYjE1JEavYncvI+HARC2j+WXM1aZgz9ZL8CgLjpcv4/YrSBFGs6H2gmgzdANfJkk5Ly8dYyGKrEOA+lfembLO2OFLdnp0Kca5ZBG4NPOmAF4BBvDzXD767tWmEsGNf2xMnrIGj+8i72JWEhZfuKA2OO58X8ZDSx+O1JqcX7S0VhG6hgbqU5ktHE9a9+Ve9c4kGo1CPjPl8CTm4FxZbQjzsmwP6HqcIFYBDPVBZ9WoQ+2D7QvXIfv5HX1K2NuPVZNm9uwFtcmGsf6PuOhqK1pWSzqCgCmwApkdzlejUme6mZrR23REJ3PkQdaAyFo3gfa5LdW7PmNx7jcVqBgkY/SNldaGv+tW+xdJQrKAgqxhGSxiG5fPPeb6tkPlvE3G0CAEuGghWptSDagus6PiEnU/3yKJHGviaPLkzl10/T0zWMpIJouNnIfO3N5LeJK2C4xONQHkEUvqEC0YiEq/qK/U66LBsyfPc3Q2VPtQVRJkl/If0X6ijW2cTcVVwN2xTAqMA2dSbXoMPK3bH/VU7nJqjh5LpA1vIdkAYZ7+xljSLiYHT9oE2LHNhfDNZ2s8GSgeEYKD1VirZxu1ZbwqSl6Xuire6yZL+zwkbGlmIlsAcwVhhOfhAfiaIh4/+ogejQgeDyfWoDmEbXzSAXUx7RmQg8IZIWOZzcuX2GexReoXjhzAe2YabjMhqvs+FSJ4b2KP30cggQmhKlwsFqD1ev4SKHH0Ge/jrjEKbN1HzQ3oLtBg4GpkYoRNfImFVoXHLFg7l2LgETKlwJhboxTHUPGneYTP7AQ+K5Eb0/uCSO5Ymq3DzI1gmuGU4MVmzQSuYm/yP8e0MvRQ04RZiZtS50qyxWMrIOnk+IJ7NV3E0pPM3+ZSPpWftqVuEKawLfKtVnt/jTfRjniO7HM6pAY//32DoinfYKZ1Q8GPwjncAQdzvOS+csasvxPzi3DIy77DGfgyPztLfczbeCwixhep+9Vqr31F49wN+AMxHwVCsOJIRkR/wOscgZTuowD55INTwrue5rjZiT6CVfcKanAArmyYb3q8SvJB+n+cPqmbhydgqPgtkHij+7EGXRHJg+g2fpEgj5bDzmfXOTO2YG2fuHCeugHQkpDQ4gK1xMFUb1525vNMHHp0iO6hZxgePKmk6xBwKiD+jgXwh9euxUHrTJXmXSWmplATtcnNxFTI6sQuXnJEZ2JpBKsD+2BAAAAP0GaIWxG//vEnVTkBBzwgdJ0ZWNF1Rhjd1kKxuETzl8V2+eU76R66MAyhvqkmnkxp0vkl/+a6/BSJxSj4/j6swAAADJBmkI8IZMphG/74DjgQ7ZH1Lrefmefbtk4wS5Y2/x+DLBweoV7XRDx2U0ZTPS42sTXUAAAAF5BmmNJ4Q8mUwI3//vBGT5YQIcSq44tpvwtmFs1bxuNY3/v09IKvTPLNo54VVGqqjoQ4KZPNJThwuSr0wg0X709s5PGIgRr0KCsOg/kGruNjLFSrx/JisqEekF8neXKAAAAm0GahUnhDyZTBRE8b/vgOOB3OQFc9AXcbpj46i15PbP+Q9GQ2tJqySrnQwI7VJls29RfMrHbfsSK8eX/l7+18PjO5gb4bIXLCoP9kNhUaOMVrscc/CQyFS2DmIIbp/qLfqVQA7JSH5O/IhV43txHuIs879XU2UcXwNJG+MNUv/9SvID6qp3sUX+AyNp07MsYGwo+5nUN8BjKM9iPAAAAMgGepGpEf4exE2/rMWYsUC9Nc+P/yRU8UOqE44EY/0tBUFtXD6vVpxSXjArlMIURYAeAAAAAXEGapknhDyZTAjf/++A44Ilb6dinlSkT63y1KiWQ/IWMp8xWpc99kAKnB2h7AefDWJA658UxgGiN2hdpqZnrxVEyqj01yCAccBMH+y2STVmGjawy5zlKkJY8a3FAAAAAckGayEnhDyZTBRE8b/wDoisgQ2MspyG9MTL2twAnx4YO3N7dBgMj53wgsoD7fgMzpRUSBvxUyvU2F3yMVPcr9aQehYe7tuqCXqk4O3XxyB/oozQG6cLwffJ31HMNBW5ShOThCQwSZuY7d4iRHTfMsm/D2QAAADkBnudqRH+HoC9/9Y3zHp1PiKWpA3D3YoIQK4j0M6XxrUNNDfPHjMjM+xDhyvcRekwlGLKw4c3efGIAAAA8QZrsSeEPJlMCN//8AtC7wRdgOIxYG6HICf0tt8EYRWGccTPXL9ID3ANrHXjlUlFMeXfH4RGJz/GNKEO5AAAAOUGfCkURPJ99K3q+WhOPVadJ0Zn+q4gSRe2/zKKvWouYJMdACf73czGhSzAbCZzI2xgFyyWnZjV5cQAAACkBnyl0RH+DDJVK3gapAzXdW8ZNJVvmI9scX8y+ulkPHeyLSaA+eDUERAAAACEBnytqRH+BB5WDztAxKT4xFpAt8qSdgkigXdBpZgOPffcAAAB8QZstSahBaJlMCN/7wetWHYNorkyweRiHOPPdcXS1IITtq7AezlT6fweBHbowbC7asO2PF2/UBCbMgGRBUVHraspQzmQA6D7PUakKIGCmWZK+kVgi7Knv/moFsXlEd8QJPGaF0SHBN4ELaLTmKwUa7PPSxV2g8M1cOkFDggAAAGBBm05J4QpSZTAjf/vgPUSLvylvSJaoX67yRc1fALtzOq+SiE4b8qGjclrzocoYChnLAMbHgIHZNX73/lsLDOMLY4x13csDW904+9bFMORIwShS9SrF2hIu4YTLBzKObT8AAABsQZtySeEOiZTAjf/8AoC0REXf8qqj003CfEolq5ZHPeIt/nTlRdl6OlOcoVNgvTKHDmyYVBe5RD5gFrfZYd2HkGO874iVFT8MXHBHECxLA88R/Dor69i7i2G9sMY7GGZAq/0gNA9/XbGiCiL1AAAAL0GfkEURPJ99PbKAyIZQ79YXXIGQ9Zn3sXohriIew4GeDzxFPtkfYMRAykrz2oK+AAAAJAGfr3REf4BsKuKQrvnfCNbyOxT9/LxaMU530JspiBe0HCmuIAAAAEYBn7FqRH+HoC9/43g3bWZ3P6qT2ZqH2A9FOFkkCa9OSnFIbR6HJHG3QgSA//4qcrFGX1dvdEywSR/smSgEwGCmbp95ZNSBAAAAXEGbtkmoQWiZTAjf++s+OA7zHfcTr2WCBetLHUxD7/WNcoHXS1dnHb6Gxii8D96x0pb0d6PjC4DrB/WhVpxnxwTGDhj4JrHO2bjjfg9OQ6Gfn1d+m/OSL0v95OrEAAAARUGf1EURLJ99PbS4SuQriYYXJQH/fi36a9f+dXe/nbPtQtUH3xb5/YCrJqm5SDKlJfww5Fp9cPYaad58GXqPyd3Np0omTwAAAC4Bn/N0RH+DDmAYlvA1SBmtJcJQ6ephfzmo9IkH4wmc2d24zXHcPLKh+Lnw0TzAAAAALgGf9WpEf4EHlYMkv9E2/PMkyg5XBF3hcZ4c77PtedhhUSll/MrYkqHYxHIF000AAAB7QZv3SahBbJlMCN/9dtVqlh0uAaN0ofVjxMhZ1h5qorEdatiQ783inAG2LXHB1MiAuY0iagySzluM1H3bIodNuek635EEPQurFICs0REgQRMJVUluT+40C3yNiRn0C82e0RNNjNdsjWBfmakWZtlwAWD/Sn+XVH0p6IfBAAAAZUGaGEnhClJlMCN/+/6N/UQffCtxxis5+gpt/z2N6/IRl2ZC+fkvRLr0komX3ouJlTkB4RiYtyBbQy8957b8kr1rd495KdRJpeJJwYRiht2s6g5ssfWHOxE2mlZuF4E9aBSO+RThAAAAYkGaOUnhDomUwI3//APDMcB3mPtur2L1aw7/475bzHjZPyheciacbIxHbZ0dVmofkE1MGw1npgtJAA+Op2CJi6U6FI+Fd6w/4z91fbaUzx1UjzZgEuMaupFEOYn3mjpLK++pAAAATEGaWknhDyZTAjf/++AcgTWtokUg3fR5ZBhOWluG1KszNLcF72sEzcEsRw+v6kcUufDwbKgmv+8HgVhfNY38jxmDgIOVqtV0QZJ9wkAAAABZQZp7SeEPJlMCN//7wZu7TxYECs364K1K9zOpGtt1ZZOODFKfndyW2uhIDiGAJZLhzB4/6bupiaM+sug/+57cXUnsnlte9DZAA+U0bGdXg2VWdQHryyM7bKQAAABZQZqcSeEPJlMCN//8AoCu8Eg0IAKdPalTJwL5k3brsQ/2kMhr5e/LMbxJi9LMuGNB+zmBqO816m+p0J54fRMFrIfwddbFhUF2M8i5i2yKkZU/u9ZCP8OKKucAAABmQZq9SeEPJlMCN//7xIHZpGQEF4WuTpc9YQoczh9M1Lmum+7ob3DdhJhUVeNksMkmbU/aTy524uFw6YjqhrhnvYI9JX5S+eulWATUNNzLVA87Eb09ZoT9jApBuZ93/euYFurmkkWAAAAAhEGa3knhDyZTAjf/++A44DvTrlMIVpBxjTybeO80fsQOd462lkQixl/i8ixhGZjblZ4BIJj50LvODBaMD4t74KP0xM1A0shJj3XDsIWRvMHUMW2oF+/62kSLqLr/NQ6ysmc/R19+j6tUcDuLJBR9j9Ks4GTX/cn3YO+gyTrCwbIURfvNuAAAAEdBmv9J4Q8mUwI3//vrPjgQlevXQoH45WptzQNYqW5lvUt7k370Zb40x7Lqnj6WT8/G239QKi+rn9bt99F7eH8NCe9EpQ0PgQAAADVBmwBJ4Q8mUwI3//vgHIE02SfYN8H05IGFKI/2FJ4/9xTz/6bnIt1458gQ4K84mOGQcC5N4QAAAGVBmyFJ4Q8mUwI3//vhUdg1hSSB1aabIlvddm+MomJ80yRI4f09f1qYnZFqKwTK1GeZgBhUqgBV1BeNxJBtS3WYBxDo0ZtkTRCh095TeDwxQPJ/8LwJ+/nKDXqEa7WrGfxoouefcQAAAE1Bm0JJ4Q8mUwI3//v+jf1EPtOhzZ10VMZoqZ57G/blO7Ho+Yi3NWGkKEaOWnKTMTQBZb/3jumdLhfCXmgoAlPMUT3exw2xGWNiadKznwAAAEZBm2NJ4Q8mUwI3//wbcNERCUal7f5h0QKVTPWNlgbjmkWJ7S2NKgIHwG0iGg0786GE3lcq/pK9y34kGTeDc/yr07QOL094AAAAPEGbhEnhDyZTAjf/++A44Ilb6h2GJcOd7Cm/Xs77EAaBBuyH+UJ495yfcTaGsKqPsfkXwqaBplYMShrGWQAAAFxBm6VJ4Q8mUwI3//vBm7tPFg9mTMd/orjDHC6R/TN14aq1uH6BfRoMmsnLIs+xQljFq9ImogOyRHLL//9dg0AOfyMm3nG6BsZY4fYp4Q3XOVG/nIPZmEHHeOzYPgAAAFRBm8ZJ4Q8mUwI3//wCgK7xHVn2P8TqIU579zJBs783creb+z3a5PkBHTlb/74v4uMpPCgMQyA5H8erUDcdIt1E6LwdC+Yd5xch2XzNIEenrNxxtoAAAABbQZvqSeEPJlMCN//77mtCCdsTY5Z3Geb7Pm/QsC8ZaS5D5HX+6B/kPwTX54IPJEXG0Z05F//tLrak3nsdfXudbo0PXPSotywcbWZJ7pa7XExYa26grBmfGE1IQQAAAENBnghFETyffT3E9CkDsyrDByDhrW4cvbhQ6Som9ctl2PhFMGZxVTqRHWTPWDxP4Sfm3HbhULO9Fy6NYBKZryCailXwAAAAJAGeJ3REf4egPNbtmUKQ0b3pPCD9eBXfRv59JqxVlyBWqnu08QAAACYBnilqRH+BB5WDztJcUhExA+6Zo3n44kMJuHtPfnXh1zDcqNof5QAAAIRBmi1JqEFomUwI3/vhUdicsgtcoZFZMaO9tuG7apZo1pIvnF/gM3FczW9nxAaKJeV2vj19Cv3exUNOijnfRiR9ASN9ltcz+VQ4/0i7jHGYF+KFZP3uGFYBHkT2QRzPu+40UwUjscvCxMulW7ycJ2gzsxyoAhycqC6uUVDHWkGCd55vkPwAAAAeQZ5LRREs34H4Vo/ACpoAhQy5Ym6KOlAwH1FD/kXhAAAANwGebGpEf4exE2/rMWYbvt3Gio//JFTxWQEj0jgsGN8zykk9TVdEaSZbn/AJPKDK7pTunVhX0/AAAAB+QZpxSahBbJlMCN/76z44IUf87NrRe74hd6WSQF0BeASvxdUxL4Rm4GnonDSgvlzazo7BX+ROHA3HCwCRipy2tg6EaaSo8weDziWONE8To2CI9Nf6xMIHsssxak/PU/YdCrDJs2119wdbr4Gb4bSqtsAe2S56lY/+8eHBMDPBAAAAUUGej0UVLJ99Pb91NJVBQqP2t/+IqmE8MrKhL6lXP6xi7XAoI/PH3EdbwEwYHQ13+d+UGm6YHZjB0tU+x5EgcHwOjl3Wv0NdZKEk8tYEJAq9JwAAADoBnq50RH+FEsrDaMSpjV8Ar03X7CI6tPTzHGiBpSWUkMJVOVPbCEUeYgsYcFzDQe+eqnhHQbxBqzvTAAAAVwGesGpEf4exElWAWL5mv1zn78AGfLg+vpjLffFPe0N1Gn3GyNbeSNBNOMwp3LVlKeiRguPo5EckDmXdrHphQLAExUYkI1WZ9328MC5f+Y/xxKuGNSXp9AAAAMxBmrVJqEFsmUwI3/1jexkZYO6f0SqUBJutOylzcq7633ejQM+YcsbI01FdCSdVMPBHTcsAfhFmyKZdJ9U2koiIqT1xC6nMTtNhYSNWKTjEkyshkiwoA2kvaff/xZ7MolmxHJAhqhjc60XuqzboSWvYiUYabtLNBXbUEc94ymgQJJgQehwjl+Rj3g8ihDXo5vs8YeeuDIHNpoqyq5Z0gfHVseBv/VNMJy2baCQS8R4kw7jDJW8iWXhZUAEJXBoXny6onzxbxU7N9r71XS0AAABDQZ7TRRUsn4fNy3GU/pr7hiYIKBgdsTtsrSiKt3zM/HqMjo9nD98MgGaBM9BOcraqeIPJTzn3j7MnOe8aMxA90X+twAAAAEIBnvJ0RH+HsPhkvu1J2NTuf6EJ/2wV7RDK/mYg08fzz6vEXYZTtgVzrhrYCI12GtLgxwIpISuyR2pURhW4rUvLdf0AAAA2AZ70akR/kFs5HqaE1ZwEakhKwmD4BSiyCsJ5I7i1b/JX/yAr2QptkrDRNOf2vgGvhvSIFmCfAAAAgUGa+EmoQWyZTAjf/WJuYK1IHlx90OrSnTNXeuEGfLuro9aOPtuWzfxGlUqLMokdEgNX5hs/sSGuavwQOvVjNR8vXHeA7drDwV4vSf8z11/Kgo3tdVMNLWwgwf/pF0Ahg+eMMrSN4mNib+1ERK71wdTSsh7YgqUhZ1sRulfT/DmgQQAAAE1BnxZFFSzfiNYJXadE4qxy7VpUy9Q41T0IwKvWOe6XdY5vDoVxuSNPv5DidAVc2oMbHfP990ARqTeT6SM/eIoqojUVbdfcQ/gdcgL8QQAAADIBnzdqRH+BB5XFIVzyB9wJv/Lfxs38Dbl+6UFytIOZY0QdJSWK3tXhSOzb4vSc0LZtnQAAAIhBmzxJqEFsmUwI3/121j7DkGICHPtqvASEuNUhdr93NZ3drLM1IHCZVXsfuAYpeOu4rIQCjNeDjn/nmhlPTKoORxu4iqjlk8//dcd5KvIs5DQidbJMVc7qc3oVJpbzwdr8dbcRFN+vvSDcc7Fqa18YeCHCZHZwXF3Cz+Dy0oArWOp8iRf6FgOJAAAAR0GfWkUVLJ99mioJbr7gTovvLkf/l/YtKEL2mu5oyOikfBlzmuZ6xLlP+smIZv0LBUks+sQjAEsE5q8iWpcQhDfktWHs348gAAAATwGfeXREf4pxDGIf1f+twr0oL2RF3LEOzI8akUIhkzGFQYj1qALSEqZaaagajuk+Xra1iK8MNJ545dD2pPxL+B29q8NRoc/1g0bDynEulFgAAABCAZ97akR/hnW4Mf915MgtV8Fyl+jyhJqaffOqPIliFZXeE+XQchMQ88eJgdqLi6y+ytVR4nyOKwv8jWcpSMzkR+NpAAAAZ0GbfkmoQWyZTBRMb/wDwzHA7nHfcLHdlhDBdxj1K2iflRpBQ54lGRaeTModRU5a8siLRIR5KHbbhYv4ItU1x58f1xPglHmsDhwytPRmDnxl0gvYO3B8/ds3Yg+ydO+qQwyKDZQ62qAAAAAhAZ+dakR/gQeVg87QMGmP7dx9N0u/+sitdubSF+CATvuAAAAAf0Gbn0nhClJlMCN/+8dP0UsID4MvVXMepTuAd92+Z0g5Hm6laabBXW3H6+DO83pTGwIUl3MsSoPcrWr246Y/Qww7pWgjMh3A6DDXOd05ptlNb8IL/CBYCg/64pd6ZHCzVpOaP/3b5cwCm0Sn5E7yOZ/JXi1D/JAEeBMIy/0ioB0AAADkQZujSeEOiZTAjf/J/Y+WDSC6t+DsFl+KShs2+BRlWjHwkCbRkfk5F1YAt0JG+eWgtV3xfaTFPYBv+mxEEdHUfsAnOpwhTvDIVMNDKRQeXrvoZCgkP6BOglvBRcnVRm/QCrn8gMOw51JbsPpVerALbca6HHK4IkOueWgXj8/rw/Gq7hL81TYpKHnCgsWhhNql0edQUpWAf3cM29hrgBSKwsy3jQs6DrK17B584Fz6wiEv//JAFYwi3YgyIjyLVn3WboOj/oZw3ulrAwtsHz0duObfFaFbLBt6sED+kDCfEscSq2hhAAAASUGfwUURPJ/Tvx2ZXIhlDv4p8IgQMX5AlE+WsCstVw0AQsep3TWwxt8vGXFQoms3Q7j6KFLJHsNyi/X2PtL8B13mVJbuvM9va1sAAABDAZ/gdER/gHDjMGGbkbroMeA/+R+98sKd9OE/osjRzxqU4fU5myqG9RgJ9RKLHL1WRSCFE5PdzrAJ6RnJAR/133tzwAAAAEUBn+JqRH/XxZ2/arlGJHqHMGb4IS9k1LB2PTOkX1omkDNqjv/Hyd0h1US0g+kY2+XgTJ0qtQYnHLVn+AHiS4iyJNOTtEAAAACwQZvlSahBaJlMFPG/9BI9qA10G55ifcR7emNzO/s5HqLdZT5FQFk+OcoHWoo43SEpEIzC+YKxdwwVhbLtnL6xRqv3B4n9IDF9aNGjQC3lBkrevbFcw6jXvPXdk7lFbzKvLaUj6IFJ440Id8uOH8S2xSDBjkuw7fw7kuVtpJQ71cMUX6WsUmpYvzm182OmweSjE2W6mQahX5TDXHtuYFW+s7wqQG7mczW59/E+jHqj+GEAAAA4AZ4EakR/mJmxUG34cEMCTD9KfzSlBy5J63PV9xDXOTeu/CSqfUIWFTARN80zbyy1JouE1/9W5KAAAABpQZoHSeEKUmUwUsb/9Cwdx7ECFH/O6zLxukzGly8jJexJ12NnWZVyEH6v2GpBHI24QsdFluV6MU99hhAQ/ORGnQtP16u598A33ALGAhCiT6Fkia5Y+ko5b6BByfi+fm1pz4EM+HBx3ixAAAAAMQGeJmpEf4exElWANW9/woTjJTRwrYLgW5TCxhNGjZKsQXGThPJJ3FtL1nkHDBC/zEkAAABbQZopSeEOiZTBRMb/++FR2EKSC0QlA46ppypN08vPo2SkMTwZmYivCQot+O022lnoqxQWOoCxOpu+brG/vq1OiqImEEUqH1aSs5FB5wZJH0p8pceogOzxqbSFCAAAACEBnkhqRH+E1CtCa7nT6sPwHk1BQYUkaWxgrGWqbiIHMPMAAACMQZpNSeEPJlMCN//74Djgd5CY+5YA50g7ZWcl3e6HlM7+829ssvr2kKYX39Qy0iHGV9xtA5s2X+EIG8ksj90L//eHWxQYDvlTbZLHSXrXkPuNSEsLXk5NQmC2u4ggLAcIDZeLlKdlT77ixCPNy4BIJtROhSq40uHLaCpv/QYI7EgtcO9f8Zyycfr2+0EAAAA3QZ5rRRE8n360LEl02ntKg9DJ1qf0Sq2mRcCNvdMAMT2Deu+GU2vzNalSElMySEM6p0O8C0Zo8AAAADkBnop0RH+AbCj0Y6pj2ya9J7lxHir9K6838HLwTqvrUGcUcg/6mmd9tmwJ2olawiUUSVMN0KYF8/cAAAAhAZ6MakR/hNQrVcoxKlr4Jc/+7NWRzf8YKIY6R80MIEDgAAAAkEGaj0moQWiZTBTxv8fOfWo2KDtv87okIotgJjCiYB6nOqRdka6T2FSgrP2m37XMvHZdI8A21zD1pYWm/KGf+qjCETUhQS1RRxjZtApAg2eNLrsSASJT/M/z6WcXboIDcyflWuaJGh/CcbDrACgNgPWBxQ9Cy4GZ1z2sg/n7DG834nDz1pRrY0cd7DCoe+kfoQAAADMBnq5qRH/N1V08EzDwtoIri/39PtNtN5FyTk7/bpdx9BTbj9S5CIuX3khlff7kMGMVX4EAAACFQZqzSeEKUmUwI3/9Y3FKc0qbE9JKkUV9XeNE8I6QHJCzI1xykL6LJyJn8sMTobA325dt1+ho1fe5CgSyqyKmWLEtqPoxN/8FgOH7HVcYanqHPuM2LmQI6mevX4hqU31p6zFddFY3jjC9SJd2W8DUlE/UMqWtTbO/zdKi6N77GCUpGa8akAAAACFBntFFNEyfex1FrjBvpN37qK/wIQZCAFBrCFmyElK/aOAAAAAvAZ7wdER/h7D4ZJjIOAubxKBfeUw6T8sW9NMSWDaQ0+veb/Jd0BOAnj3BtP+dOcEAAAAaAZ7yakR/iOA7CrB7i/Mz4jo97V0uGcnVWvAAAABdQZr0SahBaJlMCN/7/o39RD7fvlTWJwGXUUqvm05+1QVmloNO9nFYywACdKqyxRSD8AZPjb9mbnY8gjIvuePK0VaUznabobdf+xj0P3e7kDpR3NBMu5vVSLNUN/ElAAAAZ0GbFUnhClJlMCN//APDMcDucj7DVPmcuWIK5vVjxlhCepo4trDOkrTmEGu/ZySN/QsIlFHz/yEpWOSVZ8qYXpP5zoMHCuoHGjtJgUJ9YY/EQHTth3rvh5r0lUIjrN6aJq1qPEo0fVYAAABHQZs2SeEOiZTAjf/74ByCXW+nW+BxzvYU369nej5XDAX92W9IvylVTvLOZPNdVa3B4/9OBz/yENcbuBsBlmdr/FOsXai19kEAAABQQZtaSeEPJlMCN//7wcOsd4IOcpHAOYB35obyD9fNfDE0mDNzUbz/oQknAvvZ4dBtywxoOtUrwhbgoCnEAdmdPKWcS/ilLOLAuTdnno4dinEAAABFQZ94RRE8n360LElvuh+CIdT9OlDp/0q7PJ+MJgJbrVboZA4kHTJ3ToffbZ9DMO5KtxdcbtUFSeygBOcNgX5zGmhe9rpJAAAANgGfl3REf4egL3/U68Y/3GAEKHH5y9IApJhkFHkWHqclHfOYIjpgTUXMpdkmgPno80wnwLqyZwAAACoBn5lqRH+Gdbgx/3baE/Aqppa+0jtFnUyKiy+Zwy1ZHyz9fyMNwfsj+4AAAAA2QZubSahBaJlMCN/9dtmbcgHeY/EYukQ7aaclKiAhW7LP0wcwt78GzVH9jEX+ul4MCqF/2B0EAAAAQ0GbvEnhClJlMCN/++A44EO2R9S/OhWXCYYnfMvGSfmF3VhHoRAookQ/B0zybEDHraNElAkFmtkn7maAS3rK5ywR/4EAAABvQZvdSeEOiZTAjf/7x0/RSwgPcTDLgPuFlZF5qsphrzQTuuhcRXT5hraAgVTlKSofuENEmB9xuPyUnh4xra8xjPnk0cC9TqCjw8oFY3hxp8p6Yv1rzk7Q/KA8AKgsEUTklNSbcJitU+GRzJIOetTuAAAAlEGb4UnhDyZTAjf/yghA8yNyXL4I6H5lYLZt7x9sYonEZViic8JINT2N7pAvBTCcrMb9wQzCFD0BtXGxWf5Rsx/8l+VOmJ7mN3Vdkcll0IUwspcRV/reK8gEFqp572XghaC8+bGg1kDUICNBmBkZxBrR0PZDAzSKR/HGRfU3wFVnoidVdywk6QM8o3fSqG0jFI6YMWAAAABdQZ4fRRE8n309hYNYZQ8K1MQRY/3//frBdkHLgzarcM4u6jySXsngsvUyy6EI7lO5ODiPJckJ9lqLmYq9/4Aow4+wuMKOMB1SJrQK2t0CbZ91mEJWOzNjbZQ4RrPhAAAALgGePnREf4ew+VpnMlKRTlxMhp2z/jXp9OUQEWoWTWTddscAANjrykkhGK8n8fEAAAAuAZ4gakR/hNQrVcoxKmNXwBAAZe2CmhnDxHLa4k62bX3bzPVMZ+4eeevEvCRnSQAAAFxBmiNJqEFomUwU8b/8A8MxwISvXq6TvqNm1NMGbNBYU3BnWrbyWWdLxOVYy1WXkGewPWIzm5q7yUvGhk/YnyoDdKJ8CrXR3ARnW/KIQtqUzAbH4jz9hM3MiI/EigAAADYBnkJqRH+HsOQxTh18+g5ZBI+1d8/B/i7BlQB+WddgPTvIA214hf/1CtuC75Mbg76dyaxml8AAAACrQZpHSeEKUmUwI3/7wRk+WEBIIWYIPFZOw56t33hsE66tFa3ZLekWImE4LvDK36TKHAgCfZYuTC9EbJYEL7N1Y6TRBVtzM2ya6R2+ErPXB2jjcjTa8RL4ox+ohOuPW7UZIiqbV/BEGgQbrsylo7OhkBXa3M4cx81PtPa5oTEEJZAzD14sh2F8HgCI7sfH9KNgsLmzFk/eBzzI7emYM5SDkSldP7xc0a0irmWBAAAASUGeZUU0TJ99EZTLsyDpEIRrKJpnoG9EXvvB3xvGEbs61IZFsRW5tRZxKSoUAv2FlyzhGJgURfX95iQnOI5xMTYX4Tm9fdpIYHAAAABCAZ6EdER/h7D4ZJjIOE7mYezedRutYU/Lr+V1IdLd155NZOE9hpCyg0nHA4eDwBg282Ojm5gxupgyr7ONyBfl+pZAAAAAKQGehmpEf4jgOwqq8yvLtAmySKIuJcaTjoN//CBYJDzeFU/Xqm2khRqBAAAAYUGaiEmoQWiZTAjf/XbliF2xAJMQm7WeNk8OMW3rdjdKBNxob1e/+NepMrYVL0nMuodAkrIMadu5mJBy5uvlxvNQp7VxX7xQevbsT781icpake6htsC9t2XleFcTwqueItgAAABQQZqpSeEKUmUwI3/74D1EHSkIO4Z9Wvyhvb+EqEIAwhAaGZld9PgYxqFm6CIw70d3crTbR84Rl1QSjXA0mnDPfQOFBboYhbHq4C0vpJXMNo0AAABQQZrKSeEOiZTAjf/74ByBB09qkSKnA0a7ky+N/3pDcHVcDQBwXvawTNvvn2svBAJZAi7D6VbLM9Eih/kRceBxSGO9APDLO5zFex2ykGUOA4EAAAByQZrrSeEPJlMCN//7wZu7TxYHfvdr3YChXTkbqSdy1lvNC4XiN9KR1PqesySw+ABEfzbkpLtzgLfVv+d1ee4PGmvseKH5sGEQg1/DG7yZPZbZVEiMJvrtqcWWCmcntEA1XNbsrhVN9bi050eM8WOF6MnOAAAAYUGbDEnhDyZTAjf/+8OhtVTlCNvyUsskUPEVWUnL9c90uNQCPoB4rg9+sYpzZa6v16m/de0qMOA/BNngL8GoEKLLIlgO5+X/dNSJs7hCfm/5KPsbwl8m3mNvXuL195ed9WEAAABaQZstSeEPJlMCN//7wc+0/eDsJNVZQtMJRUijWyOQP+twyrFR67F8Pj5Yf/Gi52beGXbajrcE2ZHcat654rgyJ1pKhO5Uy/gC8B+G9/OVL6pzknCH54OLfIXDAAAAUkGbT0nhDyZTBRE8b/vgOOBCV69dVilfxFNE78xMP/CEHWNxHiUnX6DyHq+qeebHfMqief3NvEbCPaYjoYHVRYSzyLFgSrimfr+f4tOe9AjjPHEAAAA4AZ9uakR/h7ESVYA1XP+1Kfs2fuAWfT1RiJtfaEy2T44UcRtUezFEZUPv4YZsGo4J8wK41PBEKIEAAAA0QZtwSeEPJlMCN//74DjgQ7aLyl6IAVFCZ93OMgKO7RPkPiBlzxXr1IPdsldBX9MhV6aIfAAAAFRBm5FJ4Q8mUwI3//vB61Ydg2jrrF0XGCsxft8+s5TpCXAiRHLtzCBWGDzOIi2fFiTNEfqmbdWsGe9ejcTntioJKJzrv8cb/s/5QolStYE3YQp5+/8AAAB+QZuySeEPJlMCN//7/o344HrOM5ff/3Mc7c1rlUvDf+1BYAPMg/BpElFxK2uuwTvrLu3L9d3CeuKWIiq2NiwAKzpkZXVwYoWgYQjBJkeY/r7x0HhuZX04/RYn2nHmTL/bcfYO6cjHT3FMoIt8A/Veo+J36ssJSsTbgb64suXdAAAATkGb00nhDyZTAjf/++A9RDRLfNfS154ZVlkmiUy6/g+2oGU7FxgxqSr5jI8p2G+RtB4SgTfOQhgR/tAQN0eXRO5enuY2bDLAZlbho5L7gAAAADdBm/RJ4Q8mUwI3//vgOOCJXWMSryw4h9E3CB6A0Ke4+kyCv0XYLF03+9oB9Vjy6J3L026VqcJXAAAAU0GaGEnhDyZTAjf/+/2woADtubdqT10cQKSKXeNNuaLX2/j7JafRM2QBPoIzUNui7IROPY6gQ2i2OrH/7RUn94RFukRHfq8+EKh3HJwvniK149JCAAAAR0GeNkURPJ99XohLbldl8+WVFj+Uva/8XeQ7k+3TjC2+NP1oPDpKnfim/+OozWJAwVNoYbSwY768ReLtDwbAMvTprf9JfzUVAAAALwGeVXREf4egL3/U68Y8CYofdFnLKmENVl4qCMGjiGYU3fOYIjY08srI70+2E3XBAAAAMAGeV2pEf4Z1uDH/duSE5sdhg1tyJKvPRV0pqHivmE9r1icmTrC7mlJ7RSXv2UrMMQAAAEdBmlpJqEFomUwU8b/77lcOB3OPxERFqzYjd6PE6HsucwQ5Vqvwef/OwSmT2LMOe8YS/MYKVzL0ysCkf9ELyWxaN9aVNLSQfwAAACMBnnlqRH+BB5WDztB5pfz3BcMFhRhOn/CrTtDVV5wj6Le4OwAAAJNBmntJ4QpSZTAjf/vBPT5YQOjX9DqM5HaGpFMwQ3svWbjKwnG+fOlGDVqt1ndhhMJgv4GvGjt71cvzG5oQlpabB9hxzOPik6dNzXgscBR4+cbPmz/fP5xfYfX1NBE6N+5E94WuKtbCmyYygQguamHJ0rcWbq8PZ31zvB4cN2ZTaqhzCYM9gDyDIxGEYnFFk8tLu78AAACPQZqfSeEOiZTAjf/7wZu7TxYPZR9WnPBp8pmX4bfrqeMVK4ZK+wWWuCUmWaP3lATqHHJZqYOGZWX5B1ExYwR/eLb7x7S8jkASRZ4JYh0GwZIP9CaseXY+FdZZiTo1Af9+ZTj3dfC0dKh212pB65IP6OYNrtjjWbaU5N4lw/dIVo6wfWQ1Ox2R/Ah/76cmOMEAAABSQZ69RRE8n3sgptQ/K338OzsnCnldYLd7RX/bbPagtm9AT2uZlYrwak/pO2OvTeE+MLYy+EAUJYdRjCTJ5ws02INMTcZhmyBQbhyMvzM27KZkFQAAAEkBntx0RH+AbCjmbUS83YiObdjoslRvNC+1RH/LHg47MFmS5IoLuTYHGm+zPkdYAIx1ysH28isEgGxKBbm/3QA1sfXNc14DBq7XAAAANgGe3mpEf4TUK0MsYlS18DEAHiL+PcNPmI7mQ70eRh7AhGHFQq8+OOpKoNFJ1E0xbu/KTN8mAwAAAIRBmsFJqEFomUwU8b/77lcOCFH/Oza0YIke18+jZcsifjoIbn8hLB2G1sybcgi8yUenoEyRzOvlQ9G6LQ3LjzLO/wVkdTKQGKdViQNnNx/ukcfeqyW26GuP1irc+a/5I0StQFo5b3fttkhWZLCcmvXpDHKib2Z3J8v4Mxl+oH8tYhQMmnUAAABHAZ7gakR/gQeKxIei4mGll/v6lUvt6J01kEEpB2nbmZodQK3mc4O0v1zs/0nPzjxxUZ3GOYRGobV/cAuzXdUnGujW6ZuRq5cAAACZQZrlSeEKUmUwI3/74VHYcs5s9CQn7HDcl8JfyAYtwAQoJsnV5HQm+wXQ23ptJOsGIH0+K+T4tJmGWxKq2QkvbVGAt0rBbo9MwfKNZ4T+zu+cRWcAzne7vkeYo34ehQqXPe0QPB8YoxnrjvXyG9IVsIPvTqRMCT5da0BnF7ybZ+dwHNKFrocGgyWwGl61sMCNyw7UdJNZmxywAAAAOUGfA0U0TJ97IKea4wcfTVP+rq1dhzTiOczB8P98SD4FQyfV4nTwQWnbpuSHsjXkGDVY+VFqlbqpfAAAAEABnyJ0RH+HsPhkmM1qy8bxI+teINNs77QclW9Syn7VgOysKvEXYZh1N9bkMSWtoQGKeb4wR7zVJmCMskcNH/eBAAAAMQGfJGpEf4eNe2TaTWCeCgaUGvNb9YsQ6osP4XH63aOXxG1jZCXFAnW0W+T3+SXJsEwAAABgQZsoSahBaJlMCN/74Djgeww9t1S4BqrecgJLTxVeOuPfGThsraJOF2+pS0DHf1L7/3y7WCHgywvW5UfaSeI840uFRKAdPikqUbRyEQbimIxPxFcVU7IF9oKgqilKzVfQAAAAS0GfRkURLN99UveC5swUPUef8Rxxd/DnGwoGPLEv0jI5bF/aVNigJFob+cQVyS7+9J0DBc/WkPNi4m+wyxdk5RauSIYK+bf04StobQAAACgBn2dqRH+BB5WG0Yk9tZXU7nhXQUxniPOff2if/2gJwmvSs+oXpZoQAAAAgkGbbEmoQWyZTAjf+8SBZ14QI606liNL7F0TnvmzD9NgO6qUoiqkr2dMXGBxVHEYasaZ1lgL1s3uJPXelryzxkrFiQQAJkYTJ6/ddfxquNOViqUsaM7edj3CU24qxjGC94+WxcABRaj4MjKQx/0LCKQROQaDtp8J+TfTj/pDj9bBDoEAAABiQZ+KRRUsn32aKglZlvQTovHIeNH7gDIJfCEh/db+sTywUiMMdw5UkCuutzw8i0Uw3jTVZuGhnn/DdTHavTyUTz90N47s76Kr+H9cZcMdUHeBR1waOeg55kL0Lnz5MaAKqHUAAABHAZ+pdER/h6Avf/WMAaj+02h/nyrbUOfaALfnmG2jGc9vXFwOALhj3/sE457lRtcFy2I4QbRoLfMtRwbncoZiPe5ckLTaUSAAAAAoAZ+rakR/h+Qj8Y/7qZFjwDICR967ohOCf916Jz9qNGDcGK/ZVQo0JwAAAGpBm65JqEFsmUwUTG/8CBPDIEGcd9xK7sr7PeVRUblA+A4wrZEIYDPPG4pGP8GneuG/+WblEBE/YISVAlzm8YRF1iarh9/0YM16M73ZZ8APAFET4NhFgwWEytfdD1qhPlSrycwSgeZBOZfgAAAAMAGfzWpEf4EHlYPO0DUfukzjrb6sMwf9TWr92SL5dYTUf9xex5gvecavMELDpnbvgQAAAFlBm89J4QpSZTAjf/vhUdhErrGuSF/CSLUgNGgjSozDQkuWrpzMDWZP18VLPXElhj565QB5ATyxhTtCBjXMORwH+flU+06xO0G6o+HBCuMLMHZQWc86Sgn8YQAAAHNBm/NJ4Q6JlMCN//vBm7tPFg9k/OLpHQkps0Dx8gSIgZVvNmWxh2JbcEgdj1ANWONNo0C46lGcg6+GMwWquPjmXeWwPjjFXLS5WiDMCJHZI1S8p+dhwYEHn2JemwPkCTGJT1YfRwqRnd4zFrrK7w/IrZzwAAAAUkGeEUURPJ97IKW2n8rgeWx6x/uvwym+iKn21XOuUJQvFl7Jvz/4BnZZvs/HplT9c0k3KbuOgl09GJ9Lr+6rhKi+Rn9GiuEu1IK84X22BQAPloAAAABMAZ4wdER/h7D5WmUT0hGm55/V9t/EfeHSNG4H3z8SITMcvPv9nR/LKHSw6207w5vl5dvHvbN/P8hIiRRBAbI6dHaJoqpBvvYa6Mw0GQAAADgBnjJqRH+E1CtQOK583hyuzCeT2ES3e7EAJSPa2lf4ndS5OFQQrgTaHVvvcR/0mjt9zA+CF6l0IAAAAFZBmjVJqEFomUwU8b/77lcOCFH/QjF2o2bU0D1gZNTTA5MxeI3pXbKFI+z+I3Ehsro47fLOUN7RwIz1I1j5IIQ79ZTKZXOQw7psvfz8GmJZtuqJ31wKhQAAAEYBnlRqRH+HsOQxTh16NjvxJP5pSGoyZ2MG9YV7VnkhZNujwds1WAb+DnvAXVsQBWexlXy/XhL2mMKRVlpuMTQ2rNsOIosHAAAAp0GaWUnhClJlMCN//WN7G1kj63WwyR24JHnTxkh+gYl+KzInu1p18pjfQ9WbiBY0GWq7ysDGaRRp+JW+XOLVoUSeFufVt5tWKNC3usOBK2/C6PhT+8zUu5EZNdqT60Qq4VgNXwzslbfFjnJRBDpdAh5wLLWFV14beae+G0fFOrSHDkUNpkd4sy70UWw9D1jdUuQ5rhLdlkIPJ0x98OQL5TO5gSkKid+BAAAAVkGed0U0TJ+HzcuFAuIfw9nuIuW5/0fmGPCk2buV6y+c17CysRtliEWgeuDAVuS2NEOS+YOmLSnbstF7rBcIT0xiTFqSL00ySVVFsbWwkDRuJB35PF4ZAAAARQGelnREf4pxDGQCv7OsLfYo7T8YkPot3UwsfuaooJN278sKvEbac7AqI6vugAMF0dBPcCL6ff5MKRFnV3TxohZvBW8Q2QAAACkBnphqRH+QTAw7Sa7nUoHLO0XWnIk0uc9V1EtugHZKDYDxjTdjxpe/wQAAAK1Bmp1JqEFomUwI3/vBm7tPFg7wfAZdXzxs0p4sQVbaLB0awSzslu6gp+TNyByCumJLwW5CyVRGbnfEuH5vtaS4wvsym7ljMIKBynK3Kg1MUwKdsVhXZ1B9ez8GwiegBu3bUflTwt/cPd13A1iwLV6wHvPtPLPVBufoRxLdi7fXErtqXZFkhyUPiuvfApRuTZLb6JlWmyXzVCqHWPaoL5G0ARafVrc4NRAD45EOQAAAAFFBnrtFESyffW4h7bT+VmzF2BY/3/IEonznzSaM52EK3yed02INvn4T8C5fRiiAYPyU20UWm0Zioarkwh5eCaLWUgTp5urYldHQq3kp7USwoZAAAAA/AZ7adER/h7D5Wmcyc1Gak7aI9t/EfeHMOpqmvMV7oyvL4VZVPyQ7xKvwftFj0zEdxLJQd6obfw6JTO/DrvdBAAAANgGe3GpEf4TUK1A4rnqdTB7ynQtEgcvcbzbIwk1IFg5AL8SoEuxeGvos/djDz9/TNXsT339wiAAAAGJBmt9JqEFsmUwUTG/8A8MxwQo/6EY7tRuuvUQ+IkRW5CuuQ16cI6K9UjLCHt/DFRPI0/cesLTyTUMPgo74dmiSCFWGW0NNaVwImkdPP8OS321tUl0fM2FRF/ydM1FfUQ53LAAAADsBnv5qRH+HsOQxRCCMCTKsMv80pUcMEXeFoGccbVvJvQNyqsPl3dGC9TDj68G4sLpt/4joEDUDO0HQPQAAAKZBmuNJ4QpSZTAjf/1iba5425bwbsmO+LqBQWsUydEd6pAOGPrrpQZuwRoMIuUNmIqaa/ZfsYj18F3+2O2VzASzLDk7M5bTVsxDhHKKWgIuIMjnu3J7use13jH+mTne9OXAolpGEM+YOkgGn0roRh08mvTLZT60kAdnRLZNC1MbYLQv/UtAcmYn97x3XP7Gik2RIKYbYXaybwMSmgqFRWYYz4aJwVLLAAAAPEGfAUU0TJ97IKea4yheePM3UV4BYP87QSjQ68uFMzfjZmCa2q2jTWdubpQmvIRE5Szocob6t0FwExDfgQAAAD8BnyB0RH+KcR92pSK4d0iK5pn3//DwN9n1Sf1/FycV11cqIRtpQSj1cG8kqGHiP/LJ2Q3+b3t9voEB0KrTJ0AAAAAuAZ8iakR/iOA7CrYE++z83jlf99FGL+nfLDBphBaI+WarzWjX/yqdZNNuIw/QVQAAAHhBmydJqEFomUwI3/1jd3Ei9XsYRCa6uUFLqSIqHEXps61oaZW//CrIivsMhGGZt/+wTW5oOHXAD9VovymjVgcdopGLcyH3+31aTy3MJifAOoc8WLgubo4ISngsCMnt7ldH1X98l1NIvHlX3mcEIj84dFxX5oK66OkAAABHQZ9FRREsn4SFSqCugoeFae6NWP9//36wXZWgjOWqAHC9KPG2ihYHENvn+jgjcT/OLdhLP58EwMXKROhlixI0N1Rfgfyt4FoAAAA6AZ9kdER/gGwmmmOq/Wm/D3dnucn5igsLTeqyV3HaZ/bb0TUerDFX4r8MRrARcezd2SF5uBaMc2S+4AAAACsBn2ZqRH+E1CtDLGJUtfBL4cPgesw0gfX/r7jzYBrCPcjlvYuTujdx5N+RAAABSUGbaUmoQWyZTBRMb8EdgCeRWNj1vNMstxJbRx7SX4GlzlNFlrR55BZRfwuUxlJowdEahz2muYI8I5vx8hRYpCbfVNcn/buf7/gZ9+q/Xex6CfNdIK5dm6hYw4V04WRD2xx5XfgFgFusJZ2Lu3x5KCiAJR/MKrRmuD0LpBj2HTd8vSTNdD0mqExm1jVxz11VS0JkCz1kf3+GAZ8aWDLfmsfKwTV6EgQsGOAvWjEoPcSCVw4czv5SM2yxrz2q4RWCXKP2cQ32KjlN9WUwxh1yU9H8IlwQpYkE9YLpUDTnf5VkAgRqHHtbB1bKzLqdQEBRnPDUxU3lCJ8z1ffgnZnrwQ/1jrkIVCHtxSHz+jlGJYYDk6e7WXKb6WyPh0gIrK8i81b+HquyEnpTv7rHDiPZTeoAPC5ivY3TNLqzME6cL8+xoSGHv3D8YziNAAAAOwGfiGpEf9e9wzWJD0aXSLU4v4AFyP5OmsbBEcN/WP0ljTwYR9rY1eB5C5dJcii79292VcOw01nChUeBAAAAwkGbi0nhClJlMFLG//QSJIKCLA21sITm22CQNIzL75x/coPBB2IH+CkNL45/JcPWK61azax0RKRKTm5d+BssnzMtwKy+njVCxG7G4L/0fmAAHFHQoH93jRGl/HjJy46vx0G6NyLZe8h2jDkca48Z8CHa2hCT6RIrRCpQcVFTXZInLx/gmR/ABv72Y6Rg934/cwAwDTTvn/FxI59I7ms+9ozaPN3OuXj/ZPs9jO8lHXscoqhncvzhU0B/ZiiYXSNR5B1ZAAAAQAGfqmpEf5icgpJHKscCg4dcTFi4T//E/XAXJpG32nkk08fkKUrnPGoSQKoKEiE2wpfQy29GE8eVH7/yrD8SLOoAAABWQZutSeEOiZTBRMb/++FR2EKSC4tylzphI7uNxilsvtRiMSiFLXZzkidfIAjCjC8+Yq1U+7FQos18k2wGsrHuaY/yfMMHdO+Ytp9nyOtNwN8ZSWlSsgcAAAAZAZ/MakR/iOA7CrDroBeWntKNDlGBefdNgAAAALZBm9BJ4Q8mUwI3/8b8mat5xbgYxKkPL6JuE0JrSSkGLS34qllRYb//n6qI6h+DVp72C2Bni+fBUJkeQ0aRWTwoPYVKKNVd/i/tVkLXXk1eYhf5mDfymriDx+s9bFWvnxtSq7+R30/mIRxeiq80eB3FjjMV0v9A9Ip/t0I8jdkPBxJNLY6dHX57kOj8uBOxzCEcewQyFjVwZ4U9eeiGDNW5cmamVQO+G1zTO18X6eiBVrQ1sAaKeQAAADJBn+5FETzf0FK+iADehK1fJc1pMmFCTGtLvZ658Bpk3MRU0V0znp7jGG86RAJ+kBsHtwAAACMBng9qRH+BB5WG0Yk9tcQHS94V2gbW9d0P7JZCiBx0bHdSHAAAAHRBmhRJqEFomUwI3/vEgWa8cBuzHU5S0ix1bYRHinDETekQfB+cgzhJLg7T1kDPCseQtg55Z8jXr7sf2ocT2sSw5hDEdVg0IPoU/FGMpuD08g8QhOFlplsTFkZYkjR7winNg/k/jN/HF5GJogmrWYBvwozzlAAAAFpBnjJFESyffZoqCW9/YL35uOpRg/l/iy41hTe/tE0ssOv4hnLHsHPfmKcSLTegTjkNLZVWXjO2NeDSEq6BzQ01VDjT7II0nXMOQjdC1CNOmzNCCPqLBayW02EAAAA2AZ5RdER/h6Avf9Trxjzf30y6kAOs6WLBLxUEPR8k/75zI+NLVHsG3auYqXMQfTCCvtQyg3VgAAAAKwGeU2pEf4etKGWAMuE3sYEV/anP9U6xl/ko75T+a5ob50RK54RK1XOQ4sEAAABcQZpVSahBbJlMCN/9dtmbcgHeY77hZBIuaacrM+PCy3y2hv4fgKY3CAJLaPuYPjQ9hPdbPybTfLA9ZhcESX6IM5yNtlJbVY47bfUjagQR0EA5I+ypI3tSGPRusuAAAABKQZp3SeEKUmUwUVLG//vhUdgh9eiEMPfVB8tcq9JDka35FeHzBuuUIwuu3ZTYJerdc8OfLMQc0Kb0i+lVDCTmweLq3VzwQLqGbWkAAAAeAZ6WakR/iOA7CrDrXqtfOU0OnEFoMihrTVlbhcOxAAAAckGamUnhDomUwUTG//vgOOA7zH23LaoiapuTmxATD2bJaDhWz+mrw1lKFIcbAS6u3oi38M5gOc+H7/dMAlr5FiwNQQ+MnP95HqTXfhGOyOvhpUnJ8xbBRClFaRLviLbneAYkMEKA0mVg3tgaK2CBRyPigwAAADABnrhqRH+HsRNv6zFmG77dxoqP/yRU8VkBI9JIa9XsBjVtXD6MA4j5uBIgllV5DYEAAABnQZq6SeEPJlMCN//74VHYIicdzoZjPCSvgahArEevFvdThrsLLMVaJDVNLqpZTPWNC9vVX/wO1tcwEn76IJESXUEvUSeHuARe65ZpLp38T60KexwII0tfQkOqrPOP2CqJfFhslG4y3wAAAGJBmtxJ4Q8mUwURPG/7w+3BOtghVFB1Iv2nqcWzEymAHxzkidM6m2fE6ogCs/6Xy6jtl/XzhhsHnCSDlAQ38GrqPDxXxyRtvk4SCfcKOBRFyRAOB8n6sM4vESFElpVQD/vjoAAAADABnvtqRH+HoC+APa8YzC92E2GO/nuYBco//L1SmEwKP9mqIR44ppDFXNReibXTYz0AAABWQZrgSeEPJlMCN//8AtC7wJQ477MPq5rGdC7Dcs2GbwHJuPCJyM5OIOtlfIO3cE34Ssa8cZkPDWcsRjRYjceQX64sn64LgFO/LLPWgI/X+x8DlTPlkHgAAABAQZ8eRRE8n30rer4SgxWSdC/KZ7+otOEzo+UFD9ZmniYypUuFzJJ18Je3Kzvv6f8AHcMmi6YRwakaHS3jzaoZQAAAACsBnz10RH+DDmAYlt28A+7MCxlUQzSjDlyF0aYn6fKlx9oD55kOvT73INX1AAAAIAGfP2pEf4EHlYPO0Dlcu33mg7UP+2G/OS+vSWgfJ6uzAAAAdUGbIUmoQWiZTAjf+8FGR0k7B7JF8YA+wQjft+K1l7Pl2lFva4U4iloi7jK05WAXlUvU0NMbneg0W8bipRMj+e5PR6G298PZD8Lx4/pmueJhR5QgmVsDRHocNbVfEJGisYpDRTRCNELAzBSPwHo5TkjGz1jvpQAAAFVBm0JJ4QpSZTAjf/vEhSlnIiISPCRtw/Fa1SUZHmNR9eS02TNX9cdJHHP4scrbtJbcvjnamrR2131icLwbB8mAm/JS1f8/MJ+s1WuEEA/spOfP7NBAAAAAXUGbZknhDomUwI3//AKArvBG5U5RSifFlD/Ne8Dro+FmjNGrFQ79lqWS95HaV+HspU897WfSmm3q8POHAYXGxvGACHm9MrOpAgNLObTHeON5xAh/8S0wNVUbBIo4TAAAADFBn4RFETyffT2ygu6Evr15zpphVL9ezEh177sG3lNh72dSy6+LMKsjFpOfLT5s3125AAAALAGfo3REf4BsKuKQrn4+ytpBYNz429+gOd9CbMcAZjWPrdtHWJw6lVT6DYuAAAAASwGfpWpEf4exHtbuDFFtO9aECqMpTVAgxUDNC6R6Ov/Bv45Ck/DDI++GHNS//D9vpyc2nRYpkvKI+H2S9SCYvOaW3tGooBG5951jQAAAAGBBm6lJqEFomUwI3/vrPjgITCZv0Cm/UN86vq9j2om8TSXh+Amp9UymJ2Lmx+GhPvnAhOiY8nnAk80c8/zR4tT3chvitOsi24OHlSIEosC0pK5DFjcJ9H6E07p5eo+PK4EAAABAQZ/HRREs331No5t4RtqmNt6u0B+Dhno+LpcPQ4byTbJJSghklqX8XC5+HsKV6xKkIFe7OUTxgQ0olu+pKqtaRQAAADoBn+hqRH+HsRJVgDTILm9SUFNJlrvLvY/SNT3pkk83hKOIuwzsAgzN2cA2ruLxHhJGC0nlMqRs5YvLAAAAPkGb6kmoQWyZTAjfyghC9HoFZjDr4Z3T9vVMSrvyBTETIkGEDt4rHdxFlDop46jR72snDXzSvTxci6fpWQvZAAAAbkGaC0nhClJlMCN/+8EZPlhAJgR+ivH2tTAf7wMtf7Q0lmwSp6DMeubPofusLc1pk88yiSuDMj9NPivga9JoF3g+xq0Q86goTEG6ZSyNzWHsnx25FbgqP+krw4d7Ztwy2JXZPgQfwac+bc+pWEYQAAAAa0GaLEnhDomUwI3/+/6N/UQQ5pDzZ2qOGQygvLtIfyWYFUfIE/BzO/DrGw19HGK3hwfXoRT80JapmkcAzbWSDpBJ+ucKJJwYRfQ0v3dYjGPpzjoBe3cpWbig6cBTj8apIEOyji3iKmvu0ocjAAAAaUGaTUnhDyZTAjf/yghC9YOgdCGGxMBNvyCtjziLKdC2QZ7FDESe5J9Q9AtgUrneVdY5XUnITqU7yU6iBGOU1CpzwDon0qnZ58WoQb5Yntsn3pLkDSXmlHf4GUB+Hia+8e9WqG1nEx0tyAAAADRBmm5J4Q8mUwI3//vgHICs0H6uMZza6s7teVDeMd6CYdhr9xUUK38FJYrIQPqnYUcIjdPXAAAAYkGaj0nhDyZTAjf/+8Gbu08WBCblR+AghU5I4PjruVw1nmnblE52norPEgNszfsQwVFghKI1Diu0eqa7hbfw1+Ep5QkXdJelsyIpnCozqoRLLfdal5qiCN/T5eg+PO3oDHLfAAAAaEGasEnhDyZTAjf/+8Ohm1TlBNW/tdPMTK/c3ybOs/lY+8ZeligkNk05fNM9wL12M3EuLDYgcCCXSoBpriTN/A1g6V/gLzeZ6XLH44A5B/SCX5SLpQw9q99kGKI5Kg0SBuVmiSFKDmwPAAAAXEGa0UnhDyZTAjf/++5XDgIS808950w5VkT/t3hHWXzd4j+0CV+IqCE8SuscXSYBiVDelxjqtSC3ZHtkbYDFeKciUbuZ1jlpEM/uS2KCxQtBuTVImzCSPE8oOq3AAAAAW0Ga8knhDyZTAjf/+8SBlbxwEJo+j5XM44k4C8vgBUE958Col3eoL9NbbmKsxW8m7j4xRgVojr6fB+NvuumtaBaIlIqjCRjWB+8H8xBIXkvlaBSjiwCBA42PXb8AAABJQZsTSeEPJlMCN//764R2B5oaIM9nfTCNty23j45IR1Uylna2WhBb8CUTivs1xgc8YLMXXuABpWaMZx/XUnylyMe7+O3whopv/AAAADhBmzRJ4Q8mUwI3//vgOOAkGyPnFZDh08wNcfuRYVMfPh/g57ian/0yuRdww/1n7ykQqNJMvWQ3gQAAAE1Bm1VJ4Q8mUwI3//vB61YdgbRGgzS0lo6m3KNl1T26A52Lu3lQ0zu0gITxVmY3a52vCyX+xlNajR4CQjogG549vg9JY3n9NViV9WrQgAAAAFVBm3ZJ4Q8mUwI3//v2UuUJDTVPO7uHUL9d32bbRDn5GpAyvyun5eOVCscEeVYfMq+tDarXNr6Iw0bo8A2x7veHCIxWFjrr1e/Z5ydf+rNut9UgsY3hAAAAVkGbl0nhDyZTAjf/yghBljfBFTH1dyGLzpj4+bIErKAmSdIbdehwtvdaiEU1PfP7BLv/m0QUWAKIqCCCv6cnt9U55oA+nM4cUPpNGIImms98PbEjWuRhAAAAPEGbuEnhDyZTAjf/++AcgTWtokgs1Kg+3puZmyB2zn+Ft34Z7zrHnHnpGkVel3jgu/cRJkseHDVwekGctwAAAFRBm9lJ4Q8mUwI3//vgPUQh0QPQquZqwQfnSdoeSCQCzHAsvi6Sa/8leZL4+gZ5agcDt/8ZjU6OKKewYE9oGmb98U/7Q9w6RYYgUyseVRyuZy+FRnEAAABYQZv6SeEPJlMCN//8AoC0RERAjFSuAaDWIHfxup8AQJpTK0TZjCJkqKKVH0rnE+j2G0jr/6PUOpHvIfb9HtdnRu84uQ7BUI47R1ptvzy8Ih329lBpeqV6wAAAAFNBmh1J4Q8mUwI3//vuVw4Hc4/EYZkfR9NOYQkN6IR+HLjwNnXmfJ3tFRz3h6PoPlGR9Li8C26dewveJqFV+gHMH01AqXp6nEKs3zf/A6nxbhFlYAAAACtBnjtFETzffU2jm3sH5xlgay9R1QXJfemvQPA5ZQM6hlOnPGFXo84lVDBTAAAAMQGeXGpEf4ESDdqTsacWmS87R3boOwBLmPjr9HbKZR3BTbJMNFk7UaY4D+7PnY7F7BwAAAB5QZpfSahBaJlMFPG/+8dPR8sIHRr+KlWnACcQrjiBl9PrT9yPzdGTDPem1rqoZeo+O+V63Y/a74OMloCXYtj7s5ZuipBPqAm6qHry/NiVr+IWLYfvYiQd8br9YnzFyHPfR+RTXTMKvVV/KjPD/ctM2HNXr4HAOamPMAAAACsBnn5qRH+JNiiXuXcPpCfMZDOo7tZbnRWsBJ0tsa5Lgg0pFjK/6ozWiljBAAAAY0GaYUnhClJlMFLG//vgOOEQ8B1SOgWNXwdoVV1ru5CEMBY1K6LZ98XM0dIKZIqwe5RQeu1WVBdf8q18RVrmysbabE4YEFPFSDmDTf9crfxMvgknEd2vk9sWgLG4Ej0lyc7cTQAAAEcBnoBqRH+BB400x1GiEKvi8jqlJCwv1QQndqVvnTA05w24ChMFL28XtsmkzSsUuIWLziqkkynS7jrcWF+o8Wzss8ftVv3C4QAAAG1BmoVJ4Q6JlMCN//wDwzHBCj/9cyI1iJwiC2YDq77/ovgCtBWTqAK7Qd4tMo/sMxMoLVehiq1uxY67X5FKvvfdZbk571Wc3XQkxwSdzgyBXFfYMHtvUybBlgS3PwuBgOeOBptIcLchkHRh5RWAAAAAUEGeo0UVPJ99PatKaSqB2VOb+Jj+9bQe58CI9t/mO2D7iXRZ6LVPwa0DZ6RZvvddI6Fbxs5CUyQ6tXX32iUFx46XgEEzck4WJrjn27/rI7DAAAAANwGewnREf4USysNoxKlr4GH/7yewiW8E18hvueS8dKXaY0ZRvRxC4LmG9VnnRggXFRoWNTsMGYEAAABRAZ7EakR/h7DkMUOCGBJh+lP55SdE5jTC7V7n2PYNjFca8HhrbO0wW5ZpVWgnD67LghASkduDl4tN3mv9lgWKKDVlzuDELPcvXMd+GCzMWgyAAAAAskGayUmoQWiZTAjf++FR2IObN4lvte8kZ2zczUBUIke9zcAiDpc0tIGFXyhAP1rxFfYV9NigpvIUKbs+TB+6WyVSWBBcm9MFQqwPuVSWkoeLyC9MIaVAuYZceC89lbP77DKjo077qt64djt94DKl7X3TkhZMKCZRkli9PMehVycRIJ11KVnyazgfI6QFyZBWKpZ//e1ExmOA65u9Rb7EpDZypnuNTKWfi082KgLh1cxMCfQAAABGQZ7nRREsn3sgp5ri/wKRgQ7Gu3vI9crS3yfW0cNACqpw9OIocYE6q5EFc0ZcGv9rKrcElxkw0g99wSSyz9azLUdIyByPgQAAAEQBnwZ0RH+HsPhkmMfKXjerJPWkzSzLvYro3D5CmzUrpe93QDwpOs1Z8ue1cdrhkVLaJ6fnHRj/QNrVfPJjXQ1luW3r+AAAADQBnwhqRH+E1CtCQU7NGQR/SzEljM35RLPhIf7CuGp2nH7PGqbYx4k/MVtURJNrZRoSWG+5AAAAe0GbDEmoQWyZTAjf++FR2JNg6dJRFEWpLDaHGWYdvgn2Ihhkb4tam6cKVyugm5faVmXTaDsJAu+kZAz2r1gaf/a+k5hP0Q9ZmUfdwQ9RO6sckew+0/TuDcwET5LS6b0bjlYPX3/3sW/C+vHsLkXWQdgV9S/yoRC2BzqxIQAAAFNBnypFFSzffVS161ytJSzivxBMnWEBBJbIqQ36FF83chfte2KE9wjt18/IKs6+mCtLOAckfgeDBt2owHOLwPzOn+zvI5qJcNySwzh6eafp0V1+IAAAADABn0tqRH+BB5WyGKse6lBSKcVv42bFQay/dKDQ6P/xo+hOzobC10h2D4F5zC/jvsMAAACTQZtQSahBbJlMCN/7xIFmvqIfV0ksEmI6ZqkME/+uYEFZp9EgRyfMfI9TwvOdvYfNMg96XA75e+Ug1d7MZsjl+wIS6P4ARavP29AQ7GgEU3ga/qHpTK+j4PavPhJNuvI3vAVPp+hqdWdcPhLbXeG5Bavj/tfM22MWc8DWUPgPRup92tOMWZq7zTg4fRCn0RmDMdRAAAAAZUGfbkUVLJ99nGI9vhkeUtjd0FBbV/Y0chN5PW4RPFtqyuHM6CMMdaYL9nKS2MPgWTHLmFoV57n1YIpZP6NS6yNWHqbcybKI0qEdvxQUm/iL7kE1gKC+fig+SwYYBKRaTsPgiDmBAAAARQGfjXREf4egL3/U68Y/RuCrxLwXiYQlWXYt5tY5DXkNo5EpiJppoQPvg9AOtTRsWp+ZH8D4KriCBrIbQXvLyYYUk6guCQAAADoBn49qRH+Gdbgx/3UyLHgmWsQQA67kWXbzUxYaBkUPtfk5+96VwwOUX427Zy3tdPCl2FQMk6Iaz/Z4AAAAdUGbkkmoQWyZTBRMb/vuVw4QawH5IL3wgIlYl4N9vN2/EPiB18zVQ/xYvRnR37JySPr8J67t3TEEeawOHDK09GYOfGXSC9g8VJk5uBSTrdp8SDss/hdObxpyozwaea3Hbi8nMDNDeWI3uORE/3DMAFFILF1w9wAAACMBn7FqRH+BB5WDztAwaY/r7Z4kRBTDYmo+gXVOFAHpeYiX3wAAAHRBm7ZJ4QpSZTAjf/vhUdhErlYqhggZ3H5bTa1/vToTRb7froaegfKbJwqLbBW+kSnruoRnFWo3+XuGIHxrqAtWXyVLj2RxmDnonpBATSupm/Er3ieXpcjPxjEdhAERQo1d3evkEK8xubffKr67kXpOm1rXRAAAAExBn9RFNEyff9JABS2FcQ8AcUeHDbcg6r2VMcDhBCTlaLaDqBcTdsiD8BmqZH0SKwn6VGYbJ1Q5LmOm6cs6MiJ7HuWG4vjXM+7PUM+nAAAALgGf83REf4fkGk3LuITCI39/Buk2GiLZ5Qm2lilCulAD0MHZ7IiFhpwQ4cN3dmwAAAAzAZ/1akR/gQeVhtGJPhygpFSbr+MRmIRmrZNzW2fSHOqVlo2OShwB4K5dCDy6EojzpurJAAAAhkGb+UmoQWiZTAiPjDpjcgQswobwLtrOt8Kbn50NO1b8FyU1efKfDFwXZG3fQEF2nXHsOOeAZm6hV6ynSwPWwas2HEejxuO2U7eYWeG4BCTxQsqO5lECivpfGEWizf/HBrRDxbzeeL8AMx7yZyACTy3DlgHl6NdrvffsWb+btUZdfydSDQkpAAAAekGeF0URLEfOK2yXld2veMoVsharS5jlxy1WYfGZ6w0Y2W4SyvoZW0vw2TI0HcN+K1KTShZbSy9NVAcHfLkabTxGzbYQj8WsrHVNjL1UdoGjWTy9FdZl/uN/rXdkOQFudoHdTY917al0GV88Ot6qxJTUo5a0bNZ1+ujBAAAANAGeOGpEf5iZsaTpiYK8Ckzq34eYNkr5wTjqRNgZK+2VKyFYuAaNfhszOaZxYrXPiVM5moEAAAViZYiEACf/vgy3mIDfV2I+c4OQeUCbjkIqauQ7q1hI51t1KK0Ty/hpqe1v3iAKauIQOhjuzFxyMgx7FbnM/2jBWLSkwzPM9uWS0H4jWSakogzfe9txEs4c+/QsLeTPvLzlrrhoRvk76vX6F462KFV4lQ8BLFX1JpZOU12xKsmCPjGaaOJgyXQiOhYn2hWzgcqRNjGX+V1k8oj99bE+1vc15C8Q37MxkeCbaMkDN/t8mxkCWv844zRz7jjHMSn7OeiLDTN9e7Cv32YENkK+X2fgPjTA2f9fID7IG6+92XMlKB4gG0/ewdEoHPbTbV5cbTze8c/T9wMuzhP10AuKfYQG1eXnZ1ykp0P6DRPy9Ld6m+2H6X5Ni7ssRrEN4TuoAEb5bQp2TW/e41VdMbUA37GNZ5x7DsYhq5D8keOph2B2Aoqxc+qPJPC1eN8uzF4kT9NEbttz3OMknmnGRbLqo9BolsBfy9A61ilUr1hK+lfVlTjk05qU3y3wHYEc66DTCaL4HkcZGfGwlWCtebzJGH1tSY/TGvlzwoiR6D2VHDw4Qd/QL3sXXc+s5L0t9Sw7eMZ3S47uKfNx/FSmsxh8Vu2q6ia3DDbKLB0cs1CwbSXU5GqIQSYlNKNsVMAv1OgbV1e/q0to651ASYAj8tdg/GWuSkyEBfqOW0VahPSNiWW6MzjEdYDBORzkPU+fSsEZX6EUEJWgIR6Tk5qkRkpHpcm/zO12eo3M4AhR9Cx6B/Bjk6bjXMMI1JbvT4s3OmvDMWhm4GnlKV7U+z5WfoAn+r+XEGmS5H0VzKkjL9DjNzfvJzjcKRQzSjV9OMxXeO3VBTd+WFAIv2qLLi61YRg7R2i0zYBkZiZjznSxv0Oh3XHIYCRtkOS6xY+goD6NZWh7U8+5INbjxbNW9vW0F854H74WEbjrsIE7vUh38zBz8lmukAzLqGi6lSYQVXRAstPq/sjujZpTHuizcX6q5h208Hya5GB6cWOla3tMwJs10rHPZhITBZAHx9Dfn9xOK6H7+wi//UuozeCF596Bj/46ApJUXxnQw+iPZClbVtv32bAEWwjAq5WxRo58IBlPczqDbMQpJ2v8hJZ5eL2AFeiCS8d28CkwIevJBJczJ4bl3YAfkhz6EvtCfS59R8MtyqJAHmYMxFa2BzAk4Ceko+cmThvwJsTo9TwWz6/y6nqWgaEvkLNZuyixNMc/cXcRwsDQOLI4ZyMttZLESIVUpeJZ/6cbN2LT812AnxvEtv81Oo9jf67SuSMRsG6/uLlpm7ay66WEKT2P2eyv5sPPbiv8phYtyC/hpNWM2L6InvRMw1S6W6QQw7ANE+NOljaO2+xYiaex9bOmfqoAKKcGloWbR978SVkvTCVMZ+HYiMb1wAqx/PF/B7YT3YdqVhfszlXZYvWaSPMzU5okM33PRVoLBIGHcW9rG42KMvtm5tZJTaxxpsdsz8X16wDYWJ0ZVgl2FlFrFiwpttQ+K/SBAfBbCBjc4w+9++Vy9ubBe/R1rfhdPo7EO8Upn4ojmO4i0QAQPk/+Njyc3FUAfeGBrW8UR33ylHcXQhrcCZUhYC8/6+rQTbAW1pErnFsaRgQxa84DSprqtocWc1KyDwYQLDXOQyRgEVTWwp0C4n4FCtkK+u/u1SbZZ+WlsAmGCMhbvZttIOR1dvnyda1VEmJ0q4MLNPcwxMdDoqgdP9/cDbj6E8rNAG5pJQbjF9KX6GfGfetGbQA0fy+0g/NVu53wvOeMDlX5aZKNl9hASlsYvBxoiPV+IOBqNBkpAft4IlL1zcz0cOUncvHJKuevXoD5KrbBNqehopgoUlzYTvetOhot3VJAEcPkLAAAAERBmiJsRv/7xJ1U5AfCBaOk8iMcVZeCFVxqmruwhZ1VxVh86clNufhi8lqRRs6U6DSeCULyvbJ8s2QvtuTrcAMn+2m1oAAAABwBnkF5Ef+BB5WDztAx0jJ7uwLOc/joWyLAt56hAAAA8UGaRjwhkymEb/1jexjsxsfi6hr6As1+ItIPaaxdtL/Psp4lmstRcCE43uyBptRSOlaI0NUP9+Rr2RU9EU2/XAcFdqfD0kTm2yOmA4hz5KtaLVYCIyYQrzAIpsbD5TTBRHqAEIhPQuPAFKMNcGmm6KkfE8h/rUhCTb57Tr0SCkvByHkqBQxIkeSnZE3RETT0gzu8clkUB5C2/f9gqjzOEdr0tMQy028X9g6I+jHHVIcOQqoQJqlYLTVMEPnxTm8swUhbukQQqB/V1CMcORCC/q5HxRtMLYD5qtCkgPY4iYs9BYTsiCpbpzh3Z3BHriiVt8AAAABdQZ5kalPJ/4RtMreUjvAg87ctPZkwB7tGp3pNULncn75y63gaBD9ooZ+2Z5BGJuEzAt8WFMMMtfOq7F1nffEJMK0f/ywEmY+FnZVV4/fl1h2Pxh96wGJFYB7eE78IAAAAMwGeg3REf4uRpKqqyH5pDz4TvCO+HJMaGqrdtS376viJGvqRNtOi7CrvGS9ThjJaQu0ZQQAAACABnoVqRH93OX2EwrJauMv/aYKWiM4dLgfPA6VlKtP+gQAAAOdBmopJqEFomUwI38epjsLFC/F3bWYVsVjbCSDz3/NFFz7EJcPUsjeAhu5g0HabZ/pTdPiudhVDGiYxVKcEifZtHJ0mwAP02f3KxpLwILBoE5KpKZ3ak6k96u9QX0BYIZ2zDURThz3E5Ecs7TFwmbLYgIzCpqmx3WZ2+nHSOQT2luDm0mS7tC2WUBIB1L038q70IP7mmgmcfxvHDAXuwIgQQvTDkNbDDnb7qEeNphuMepAZpH8UGL526eLVgrKBJghOlt/VEGAzajEUl3BhFpPgSsX/QRkJ/xFpACLLDGkb2Jne1e6/1z0AAABLQZ6oRREsn812DBeerx++BXNh97FhCAHBFp2YmevU1UrQGvZZfGeZt4lRrbLNgTpILEiupxGlUxfWlIrjdvxwBNEtx8ud+WR8eT/AAAAANQGex3REf9fXXAqIFgtajW1njE1bz+UortEyKI1hn9ddwwTBBszfncLM5umhE6wLGy54PXqAAAAAKQGeyWpEf4TW9Rzxc6AcnNg8AfIm2Rmp/I+vcel00x36c2AMOw8muHw9AAAAsUGazUmoQWyZTAjfbibf1bDnDmV6F6O/WnufALeelmNBoakQ+RD8No/50k5DsmJ/O8f5+6zmOB2dS7zL9WBPkesZ8MQ3/0MksLiL2syu0Os4+DX6zzVah7a75x5i3NZB9mNahlXbFzLVLgWbDFo0MPp2ztyCmOo/mDC8YFYHXaGQYAnLA1akkBw+w7m97niKcIN0hcHT0r/8VhoRhstD4zpvXRFwqgp9DQSmsJ2LaYfRqQAAACVBnutFFSzfgcsWP16iAbHfMo6XUJpab95jekM9opekt48P33/AAAAAGwGfDGpEf4RTMK9Ia6xDXExmf11mxxz6JBZgFQAAAIhBmw5JqEFsmUwI3/QSy/3Mn9LlHVQzmvoMZRAfpfzmYydz58i/v/K7UpAsceKOBkM39soM37DBOWqDpDJYUzpx6MgjQXdFpMMlD1sF3jXfAp63NUJB2HxYwKAS3+X42lams0gG7HPbT9qoElWzMJ58INMDP0k6kDCkWtzZGiljRWKipkGdSBWAAAAAb0GbMEnhClJlMFFSxv/0LB3HWhFl4F9qAxu0d+LmsKyTRhMet5l8ycg7FLJ6bMwnHDvdth6Ny3YdAsTsiP7Q356Lcu08PjhT6If5FExZc2ZvHjw+Z0tvTWfIt2djM4q2QM2UtcHgN2bP19edptftPQAAACEBn09qRH93OX3cYVktXGX/s03NlZ9nnR8Um9cfaQPg9mEAAABtQZtUSeEOiZTAjf/74D1Ed3Fo+hammI+tzPVBVB9GAMtkRSmX9W55hcG3CFWWzlEIUNNLjUOeelsGZvhADsD+Ofy3nAN5GVG/eZg36oP7ixXBkBIm0aSeFatlwVCbRiYm+S35yo4wsvnxxokQwwAAAD9Bn3JFFTyfgOmmneDD0Jh97FhDeN8cWWgHHL9ZXuuK5vJRYDuSt7EY3WZi5fyM6swhMLsBDMNgeDfcSyJYY4EAAAA7AZ+RdER/im4U4K1F63xN3y7TWMTYDhtYotw9N+3yCyCwG7hbmm/x6VomjGrPQRMq2kgD57PUgsO5+FYAAAAsAZ+TakR/hNfANQiYzYypltEIwmskWD5UNtzWBIJZzKZTZZF3JtrR7AcrpL0AAABCQZuVSahBaJlMCN/77lcOEudjsGvy5OKoLXdnzB8pMXFbfBomeiXX0yT9Bi3pzVu8acc1sJ8hB4UGgAg1X7FrVSaBAAAAPEGbtknhClJlMCN/++AchVJ/hJ7fwbdEstXIOKFMPOfow0W1J9MjZSXaU2JvcLoxTQ3MkZ+p/N6EF1h/4AAAAFlBm9dJ4Q6JlMCN//vrP1EbRInimhmXtw9PH/HLVA1X13BZGuO5tuR281DwTZtzSQ+X8/0bJQ2SirHIQ09BynMc2hGsy4/B5wCyAIX1VOyVwH2zU/wrNxXXcAAAAIxBm/pJ4Q8mUwI3//13BGajj12djqpakBtTio3DJ9DBFwgACSqtkYv5h5ytASPgOdOqkOdgnQKmgWojr1xZ/Deftcan6H9Tw3ZA+MEWlDNo1ZYBWRYty0ND9KfhocFL32qclLBL9NniRJkfFCDsnUhqrM64RBnhBgh3Fvlja3uOhq+MRbDNIEtu24O7QQAAADdBnhhFETzffU2vo9kHhP0kUckkmlcOZ1qvSaHsv7FYPzfK4q0p2kKE7Mm1B1C8Hwvzr66y7F2UAAAAMgGeOWpEf3c5fdxhWS1cZf+zUAjrwmtqgIPXmRHv2k2L/2uJj4e2MaADqH/NY1U5BRBBAAAAeEGaPkmoQWiZTAjf/WKRlJMqrSGu8YMSMo4NdVTVxbO2c0DfzHpWj3NO//rb0lcIQgLmD/JdeB2LmNOXjmIWgYrNK2njO9hbOQv+tHayjtl/SNf2g2g4kXhlY9QRMuPkMSkCrA4mXqQYXjex6M4K3kGlpFUGNYB2CAAAAFFBnlxFESyffGVVRtiU2M5XSTBlLlLpuEazvf6T0QUYwco555QlcaEH0Dj+hFfQ6SVisFfojn73EUbm8SrLs/HDJEPq2aQ8uqOTVCis92ohdCEAAABFAZ57dER/inDB4mEZFozMu4+KKCyG++hK3D037ngCnbT0Imo7MlsV5lr5kGDygb2dj6rCAP+8m8TBp/+LfwVdRRtehi0bAAAAOwGefWpEf4TUKkzU19rfRxca9Jc/qwQnAu0qRq2xIkvrtGv5GbqxB1VczyvDgln4tgvLQ64l1ZbfxKHVAAAAekGaYUmoQWyZTAjf++s+OCFIz0x01M5hRDAgQ1SNkLroyGxDyE3PDxh0h/mEHwlASDMpnpSSDpuFfd6ZN1GJRdp7/1NKZ0BSML9Ri+WpFVWYy+oWu7FUhBjVLBsl4nohvDKmz8vEbpKUMsdmHeD8TfRVnZtvTlvIPNsxAAAANUGen0UVLN+BHt1YNOwIBo8J7x78Wox8hzQych4IV8JIX4JAHVjfP6+E363LAITjUT0/2RT3AAAAJgGeoGpEf4RTMK9Ia7m5tNYSHf4/ZtvhHWvqCx8lGWv53wQ3Vc9qAAAAZEGaokmoQWyZTAjf++A44S52Owi7Ki/xHuS/BZp70AE+JQqCbzs6/5q8Bi6TLqq2chJ4O0HsA3vaJtaNjwJK08NQv3nUM1lYoOFoLcHLdvLTHCfibUiXw/+uN1EJnDUQ731KL/EAAABQQZrDSeEKUmUwI3/74ESoS0QZzPsXZCCOIIaZzh4p+rex8ji1xPYtKKYeWQssfQMp3vKo0mHw0vJQBLiKE8ApAr2l/ILTZvWEdw0SQbXmWBwAAABSQZrkSeEOiZTAjf/74ByCUGSE5NLsdewC1Yf8gDKRgA9A3XGNkTmRAIk0zuqTVUB97w21ebY1ZD4A5JuX4GJerYYG1yn3M9Fjq85VjxY3yOfXgAAAAGdBmwVJ4Q8mUwI3//vgPUREOuU+W50ENB4ZTfPgxJ4R/QAgLWfpJ4afx0kcK6OroqS7DFWyZDTIp4V4in/vBYNjSauh3e2aTeWXDJZfmmNfyGj/jeE/EeTuP4327cpUG6wUjxVSODOdAAAATEGbJknhDyZTAjf/++s+OEsdvlfnggh/lGSNWpaGXz9PDwU7DfMzDnOAUqnRhTRcsjlee5HHdMPWnOhXPk2yc35P2EwrAHuNbsWtVJsAAABJQZtHSeEPJlMCN//74ByE7aEx9iXfU0+Qi7cT6qjr7osl+ltza7QEqjKcHDYgjA8iNym4TNocc8MUDP2ywSpM0nUK6tcXIpAVoQAAAF5Bm2pJ4Q8mUwI3//vgRKic7NNky66/c817e64Ip2yRLzp3SkaICilaCnIprlv8SQe7wRAh5lXA9b8r69v0/THs/oLcawFvELgXfQ9ZMLp1MwPkpBjFsgw2mzZd4z68AAAANUGfiEURPN+EdaQm1vW/TINWOBhDWfqLGez+cStDKozqO3ZaLVmanISgcBX9Quzd3kidv0xIAAAAGgGfqWpEf3+cgskCm2JriciFWFR8KQwp4tWhAAAAXUGbq0moQWiZTAjf+8P9YCJ2LEHgSkHxWPcIhuhg99q36oJg+yoD2YMOzEXYsm24bpU7e5HDTbai/djWZ57WEvP0X74/hSDvW98TN4gQ8FYDbsgAomfxCR6enD3bgAAAAIFBm8xJ4QpSZTAjf/vgOOK5ljY+B/+L9+6Ag5nJ5NgD1rC58rPGzwaZU3TSod4Momby9YNlv2Wut5rt0gK6/Y3YwVJRjdwKaYBRIsbzg3k2OgRes4iDLyq2y1We08s8H5cePlc8GGSMvtVpkDZXgQ3ARvXgVsnai3RWNmYHRmfNrWAAAABGQZvtSeEOiZTAjf/74Dji3z+EL3mriZEn7qSTHtaSGpQSz7vu0Qa1DTfh90G8QU81BZ+J+YsG68GTxDc1tNVG2FxQvD3JmQAAADJBmg5J4Q8mUwI3//vgHI3KkJj2o4R7wAdkoxoAPl6DDEXLEUPxOrh3d5SlgY3fxMeUwAAAAHFBmjJJ4Q8mUwI3//vgOOEudjpLG75zaU3JffQF+0lnrfbIzuF4nbSX2KhtapcwdzSYt5xk/gC0dG6joJPOtvfYDyHPdosm0/eNEHkREpQU/Lx3HWTyhi/QVBnzgdkBQv8ovz6Jzex7/D3vMDIqrT0wYQAAAEBBnlBFETyfgEm4jKr15Rd2F2D4x2LPtpGUSKL3xrnxXz43NEoj/PN5GE+rLyJeghmbwz2xiT1z1CL9RYYKsz5tAAAANAGeb3REf4puFOCunliFzADLMuR8YF8uYkQJbFv5RTB0u6zvuSXxrl6rDsz+i7vbod3bBSMAAAAvAZ5xakR/im4VKd901cdgCtt0Mf0BD0pitdn5JUOOTT1aiDQ1D71X9AF0lVJsK4EAAACQQZp1SahBaJlMCN/76z9RUBUcvPliLciIvjqR73/pxCfQ8GX7jBwWgVXkAwndy884ukxbTKeablPB5O9VW5A2hak9PYq/Z45Oo6shsBUaTyA/cKmxsiL7o+G5kUuQ8MYCGvoKy2DKLSe6RNtH4L9/+rIRsEmOj8yoDy4VgxHPKb/WQm4tsTdBVn/g3+/tzcq4AAAAMkGek0URLN+BH0uHOpR/2lJ+qsy4cNxEGsg1Os89HfXB7kgKMsUuWa6wa3E9Cvz9Vu2BAAAAJAGetGpEf4RTMK9DxTPCJrwplTdrNLTARpp/H+2xqo210z7XBwAAAHRBmrhJqEFsmUwI3/vgOOK5ljY9oQx//XXAhhGlJBmz1SK5mkHqP2hvO14o18d/5fUUVISry+PxljiI49Ncla4x7BQYXDweGuz/vR/EY/E2El2sS+X64RWh5rFNorrfK7VEZzIHehE7YBLUZYyCOgseeuPE/AAAAEdBntZFFSzffU2vo9kO0Cnibp453nDAGgY00PiyDpgtljOwFy8YbTMWPWaJg1ksU2ghAt/uAQ6k2TZLipHJGD5DZkUzEzwc4AAAADUBnvdqRH+KbhXAZA8EAuKk34jx10uATWjooIQO4t7y/vwPiFVePKe96ekG16OebeqUuepNxQAAAJZBmvxJqEFsmUwI38oIQZY3yoDaqyVEGIIzrxX5up3P+yHPuwxQuIh6RmqexMGn2JW4TA6S8a45F6aXSoHDu2LRXHvNy93pz1lWEn8VRiuUaT4hawwlUHpq+vaVckWlKSp4IpFdIAZg3tMDq/eCxf/2BMIs4+tEBE2gLOsQ3IOpZN1Nbh8+Q39DhHggVbCQaqg1VnbmMLgAAABfQZ8aRRUsn4Dppp3eFORjRG7B5yFTmmIQ465a/eCYalbPok7MkL2t6szR+A0CkcE/nExx730z4edybGjkTzNz41lOg+CSJyI4X3ooGwbsjFtXwRjxwm+LP6VgAP21jRUAAABHAZ85dER/gwyVRD3jGC4pS6OxRSsTP24dYF84UyHNnutQawSpCF2+m9LZXyJCC+IxSaMtdFhc4BHjgm9gCKjgMNKJ8wtLHlYAAAA2AZ87akR/hNQrDpfZzHTty0SYpUlNuCqQ6/BRPgxApygEU5Fkje1Z2NJ1LTucnfFcqhZof1iBAAAAvUGbIEmoQWyZTAjf+8UQb2oRIDdyJBJG3p2eSRku//vGBvYLMgzmEIVmMtrZl6/1t+3GqkdEZly0y2btApk7ZYZsIsTNb8bH83WEuTPj5wetHe/hfU1579gz9eJmyWaFEN4R4kRUT5qpZ3V5aB0GqdGynWTZ4tnQlWYylh6GVVEwLVvNFqWdqS5yTosyb0fNLjd6vhS6+1ch7S8mxV5lmhCvgGRXTY5b2+5TgsBpT8dK7WAunqXgh1UZsPH2jwAAAD5Bn15FFSyffGVVSHdSb2LfjLSUp6GX0WfYywKsLovtWybvi56QWoaMzSn7o2XmUV6WjcC/zKrkS2qukaOC4QAAACoBn310RH9/iXZSQOIbeTW0680W96zcWiFcp4kIDofTn07/nwFewF7sA6UAAAA1AZ9/akR/gRCXiXa4/WbZ+NyaVYeW6Pf/aBVzqs5yIgtGflctsp+WlKrincEJHtYZiVyZ4kAAAABrQZtiSahBbJlMFExv++BEqXFF1Gh1OTCwItMBlK1vXU7Y1ChujfwgDSEjxp0EbQ9CFRRs3Ld4UN+pKee//cFrSip/MsXQzRJi6a1nf5wlbHjxkRq5Gl8WS/GeHRd97LN6POgir71VTPszaxAAAAAoAZ+BakR/dzl93GFUqBjx10gSJvD19gv7sp63PpHZI6Ie6pbZTy3WoQAAAI9Bm4ZJ4QpSZTAjf/vgPUVXdpG8vg4rlnY/ft0DGCD3fGKCJUJtOkcYpkI/rGxid5A3QTlupDhXOLGx7FAh5xXChqo0uaNM8+rYrg+39wO/wMNbPlztdpk9fui2dj0P/mWfeH1Ug09qs5t6x/FiWlyKrutmeEX6/v0IBu5vp9sVb+RmDgKXu8CyfSKKiKb4rgAAAHpBn6RFNEyfgOol5DugVzX2orzM1hG+r8MU8NyTphLAcR7riuJhLCmXsqR4r+fP38yAn52AQJ3oVcR0wEHGBUJOmK8pqozKbzUBCcuWr4/sv4PosC3bNQ7d4cLNL61+4Hajc2av2ig15vb1u1wugeDCXJaiz13UgQDQlgAAAEIBn8N0RH+KbhTd2l4QxwqahFOEir8WO4bHJp8S/K7MsINQWNL/SSuhWUuwzya5TSxVSamU7UEGgtWToCyOldc6FVEAAAAzAZ/FakR/hNb1HPFzoSznrKuNC2BC8XbeOVatSDLSeW0TBFsdg62GXXU9E7NOBwisegIBAAAAsUGbykmoQWiZTAjf/AOiKyFAmRxUiMbEE4XKUlvaqkfdnPFkuSrv/uZ3Z2K7U4O9pzkfIf7vPupN9AbVD8Dwu0MYNhQUfSc7G5a1t/833BIPTo9W4fFuf2qMVYdA1azZtGYexCZGVNJ/9/42W7lwA+c3UjwmvDt+IQ8gN/ySJxefGd+6V4IysnaZg/17TFedcsedScJPSmsaAWJV2YZr9PQrnZWmT023Hkz08fPsg3CfQQAAADpBn+hFESyffGW008aR19uICCzq5NQWZ90NfrmxIeDOiit/PCFcxaNQKPjDjQ7qd4CRyqeYilJwmJpgAAAALgGeB3REf4BsK4JrMmvsK/sAAGwlDBfyHnOGZN3YuTYe9/CpCnyrjEBzHGVjUCAAAAA/AZ4JakR/gRHB0sI2Gg5rX+0O0q2TvDPSc97UgUfQvJFcWSBSyl1AmJl5+F9Odi9A3zNqS2n8PseQabVvy6/BAAAAe0GaDkmoQWyZTAjf++A44ria4Qw4WCiSmMPHSR1OYI8sMHbiuDjzddiGMt5FFfQsG3Xg/s7xrPsrUAb7CZP61yqPvjLICQfvhRYvztPo6E8M402MdjGVc+5MoikSIp9hwj+gs/Mh6ONlkOtd6lmuN/ed6YdaokhXg6/p3AAAAD9BnixFFSyfex1K7S/556qAF2wZ6eiICEv8kLtY3rwO+XcsSPv9lHbtQilvH7t6S2w93Av2M0H+BnDw0AQGkaAAAAAyAZ5LdER/dvu+WAiZLVxl/UCxr+jB0mUBX88+mWTBAoHaD9tT3RLSGpwK3dfZUneAJAMAAABMAZ5NakR/inDB4mCq76bkuBs3tkF946bT823lmJeD00/zi7mcXVT/glUaS6jNfGEtB5R2xabRDQfa38sKu7jTT8gvmqqlDa7KdrZ/cgAAAI9BmlJJqEFsmUwI3/vrPjhLnY6p+6D40pzcI3EESZhkQEBCdNnAeRIPFFVSkNeEcVI8X5XssF+l+r0Xo+G2YobSzOdbqNWcfU6YbitpLAmr/TX0rnj2EMlad7XNyf0cLZ6ug0PPBXULiz9m54l70LAnrx45mASDWufLQkxiUHCgzbJ5tLv8P8fLTViejM1LgQAAAGZBnnBFFSyfgOolVYNR5FvP/wKasCCjnYQlxD8XXZsDY1OG4dlU0osuRUdZ9269GrNsNOazGKWnVWEXEQ356Raa/y7lNVGogDCtXx7LDbgwD5Dg7QfELHis1MwTs25skxKF8NR6CxUAAABSAZ6PdER/gwyV1/gGl4DoPPqyLS9sHc3sEMPzJO+wfSsvfs+SXVSm/M8+KeWiKtOiUBDrSQJ/dV0wHz21x3Nlknh5MfWPAzaoA5JbIyBOBuVi9AAAAC8BnpFqRH9/nILJGk5gad+eLsoOm/85p1qMApGsq5TXcltUPCfAAuJzBXY34TxlyQAAAMtBmpZJqEFsmUwI3/vBmqSMhNsPuU/Iade4QHot3LB6WrIq1REVRUWuTSSkVV07RY4SBhsJu6bBxcmmu/7p7/XlQZ81iuMoEWB0fIuqz+0F9hRriMm+Zu++c5UoVrMVXoe4liL4u9dlcciYCOKVOu20Yyvw0sSQzjqY0OXZVZDApK7OgpTk3QL5BbxEbzQlLUIc9ecQXGeD9042d6e5Frg8c5maepHMbf/XGm8v7g+lOh4PjQiOo2rPGPN5ca44oRCfG6hjuP4Xue0SoAAAAFpBnrRFFSyfeJlWxHrflwbYugGcTSy6U0LQpmxil0xIJFwT8XFx7PNwczoCH6Fhbo0X15NUcnc1rsuxZVBBQnNcbPv70rYrdQG974QEotol0nT2TDHb+Qv9E50AAAA6AZ7TdER/gw5gGJg+c863gfTJmm26qPpKYjlbgotHYmhUqPnWNt7iyfWEOKe9buyfpqlqllaZlsDSnQAAADUBntVqRH93OX2EwrJauMv/aX8LwP5QXIKtjPfqQBiLL4Nctt04kl+GBFA8tTN8WaPF6LkgQAAAAJNBmtpJqEFsmUwI3/vrP1FV2ybca4m7Qi+CcCwEN6Sm6YngKSVvXnUfM7sChF5+nlbWXM16e6m0eP3IRF/opSl4/wJ8Tmp/j912lrTy9yAirrx3u2AijNyh/O2sWcv5ZF3bAQNrmKGJ5rRtgcXJ2EyIs1FHVEKdlaUfrkIuoLml7BajyLWU7EXhV8KiyXp0EkB1IWEAAABIQZ74RRUsn4DqJeP16izBqJDziqWfWcM1ER08s1jzQXvUMa1oZMXncVBGC9ZI7nV/tGPht7tK805Y3P+T/QfODA/eDIltS+iRAAAARgGfF3REf4puFOCunliFzADLMuSGFg8oLzH6dqAnVVV0V/5aqCM05ibhk+ZkUat4TgBxzdHsk8qTta9BtubHdd0z1ZwtYcAAAAA5AZ8ZakR/hNdbIqL3l1+IwTqfo4tGdH+aq0/IiizAMo318AwUtAm1jEwsljCSWs74wPi4J4g263FBAAAAjkGbHkmoQWyZTAjf++5XDiz2R9bMf+PSkj74lbvgf+CLlKO2lnAEeCo/BovEKd8MOME92netTHHgj2Xxbi65aAN6935tsknj3pq8q8prCwziwqggVz1hNBwrQ1X6JhikyanOkZST2ZeJMNQ7Y9f7rNXp1lNxIE3MyU/nHtKei86tCmCb/jB4vfQ9xWLRyxAAAABBQZ88RRUsn3xlxPNhKxMxOguiBxRkNmyuAT/aa6QxM1PpGwiqB8JRZtraexcKZFHXdXzPREf+tWX2PRuHYEt70OEAAAAuAZ9bdER/f4l2UkDvsZGNAf/yJYoKrKSiCC4Thsosst1DE6Zt2qgCipQ/85ZSywAAAEIBn11qRH+E18A2k13OofglWp1hqldV5ObnQoJScCal5gPbTIjyW5ljrk8khorjjE0JhwAgDO8DAXOtPqLGK9BvShkAAABuQZtCSahBbJlMCN/74DjhLgyOzpkIzRKgaw5w5N1Q9CI2CzFs+eee7UKL04QlzU2OMinPtJrKhxVooB9zLXIdj1wU1RccAL/RK5Wir3p4Z5Rhz1udVe6K4shR4iHm7EJ7ua8uxY5WA3d2NucMe5EAAABDQZ9gRRUsn3sdSu0vuq3oSzNlAM8OUuryflbanNQhQdg2gcKDlBVHwMruBd8g/Ot4PFgifoE/KP6Ohum7YS9dcg57gAAAADEBn590RH92/rZMJhVKmtJ58r16K0MJGmMWg8xAuD/vkLLPx78bVItQ7HSx+t9TsrPYAAAAQQGfgWpEf4puFOCunxZgudv849O8E79Kzy8NTAnO6eR26TxQmY+gxXfNtnCzUB22XkPe8BFERNAs2Mkr21PBuWuBAAABDUGbhkmoQWyZTAjfx6pjxyne0Ht21MeCz9epidJeHVRgt2OzsUDZ9fGji/m34qapaDq3peBBbvkhLBPU43FEf8C/nr02F/kqRsg4pwf4rJ9PEH4DvxUg6fBJQ7D2PXTuvDFf4X/10q3JA3YywimHAzkd07MRAHpUkRpMLgBL8pLEMwRhbPWVpnT20qZSuLPZcdFVIiFASoAoRw2NZovY8cyAqYzbIPWrSsq65p4K76JrBR5ZOJlpYoaGRw5W0g2bASg9QEtRnRj/wfIJlboNxF8URYCsrV90PwYzTaGML8kxduJ1HEhaPv8aDti8cRlE7XHaGXPFi4RmNryyxl8CqT02GdWdUGWMt1v+HtWAAAAAUkGfpEUVLJ/NSlfleNWMCRxiLP05rGSLgkzLFEKRmQxMuJmvzGpyeNPxT3UwwrV3v/0iFzkqNLK3bHJnTrwhtjxlzfd3LB7TBGgwkOn3N18GCDgAAABaAZ/DdER/zdG63bG7KwCn5wCGa3osZLctRFh023EneSsmn3MzWC9KqCBDdw8pzx3PPwNQzEg6Gn5QdiDUHnMAGDZ3RWqH8/GR/hNAU8HnZ5GQH7ni/2AUHpBBAAAAIQGfxWpEf3+cgskDvsTAlIkk3Qv4QtRWQBJxMPzllMwDnQAAAPxBm8pJqEFsmUwI38ep3IcnxuTWqQj8g/rgSxKei33dMugniCkcN8GPABf44Tw8t1Qy3eEsRn18PRXxXi7KFfT3IERwMDz7zpA3QCQSPyLpyVwXLhM7PhboSU8kbfPO1nIY8SIm+UMfGoyE0RMWGKM0JLzYaywMKx9vosxmymYX3DSB8gROvjqj5bAU+Je92mJJE3ZzVPQnpxpwyoFzVDoLe998j0tIsq44A33luvrT6F1Iu7OaZJWo7H1e64kDqoUz6nvhWH05mQLuC2M5Y6ktRKSEcGarBYzmLNzmHh7vugEWRyOLzOV3ZQVWiOAToNJ5ySrGU9bIN9xzr1kAAAA5QZ/oRRUsn81KWB285AKVSC7RFY+doT+AHMerKL1ioOnmHy3pPwHyRwqUvphrWUAjK3ENTLIIi9eYAAAAJwGeB3REf57EiBPUsI2Gf4tmrnTNH6no8dZUfNSMntiCE3jpMKWXLwAAABwBnglqRH93OX3cYVktXmd/7NLucyKFso25IGF/AAAAl0GaDkmoQWyZTAjfyghBljfFGc2weQw5uG12FPp0eBzDiQcuXdbNVOSPW3hp8N/oJG5bAO3Zb373SP/YhHQc+p7lx0UOuTmVc4HVR7yuvu9I8eIWq8uvvC2yoCvXDnsvL6Ml7y1Qbsl4kWfJSSnIKW682NKdd78et0c3c2fAD5VjJnXMrm9zYiL5tXnYF3cZwssrIHOcmXwAAABdQZ4sRRUsn4BJuIyq9eUXdd8kUEVxBXvZDstOfWiaasOOGXz6d2BweVCpPVueVKNIzwQFXuFUU9XBKpnS68hOjBnpCQT4QYBqFRqI+fPbQVzfi6cOp2mU7sSvCMpmAAAANwGeS3REf4puFOCpcPJ2WonQTLkhtA5MCJGSAixNk7uE8XY5myUHB0SlZsop1BQ/gkY1XDSgyIEAAAApAZ5NakR/hNQq4wqUtHk0A/hBBQEMC8kIK/T0jJcco6ztGO/MojFLnPAAAAB+QZpSSahBbJlMCN/77lcOEOGGqjflHZLVcFnf06lApNkw1dyuEV9YbzJBRtMBgx2f6FglR2zHhzFYrtQrtFgGZ+DglcCrKvwM3YU7JhUBsKErOd3vij9YsCRfhH7aSRLCbVXwbVZgzfTpOsK2Ik5aDiH8Fbx0ZTn2TUf8Z05hAAAANkGecEUVLJ98ZcS8h1iXBUMZDLJz9G659Jpfnv5AxC0q3dUwbyNGgKrSkrRowhNA2+4791C3SwAAABoBno90RH9/iXZSQKtBXKXppdd1hcPZsBwcUAAAAC4BnpFqRH+BEcHSwjYaDmtfG61R47GinqanteSqsWgtBpO/fwqOA9xgYQFYm1iBAAAAbUGak0moQWyZTAjf++A44TvouN2t1o3r0TOX19VkdemfIkJQNjJtg2asYKxFD5FgdXiAetHtc+GXPVttB06G314qyFnyAAJOZ29a9/0H+3xR6ynmfY/umA2SihUcWtibuBfPpAHobrBH25owHGAAAABTQZq0SeEKUmUwI3/74DjhPbILSOAstfUwFE4bEuwQsadfXqn7plGI0oW1t1Y/wn0dDyxQqMCm+V/gNYrsp0t8yc1kYM4KZZ/Ep47KXKTj0fca5jEAAABqQZrWSeEOiZTBTRMb//vgOOEuDjiPT6VQ+iwpFIBPfM7Vc576QnIa86wIKthdGtodrF9dyzF6wRQCkAGjjRZKD9pc+qeVKQR+yM0R2LkRny6G0uUgLJfUm1EaS0RgY974VyZDjM1LWcR5wQAAADkBnvVqRH+KbhTdyySQ5ZqJ0+qpbnfk0JbeSoqS8GFf39+Y3CXtR+VXfNwhOcPWv7xsKzRkAg4LHFAAAABwQZr6SeEPJlMCN//76z44S52OVALIq71Pd2EwQJX7/N3fXhfhz3oxlU0IeuIeFSImpqXiYeUYQ8aIei7lagfq9dJeClxCf3/FRuzxdmLsPWEpBDNcG4b7eaMqmz6BvHDqlmnYyHJrEZ8IZKKB4lJ/QAAAAD5BnxhFETyfgOahWDclf403X4Twewl66rB03AAAR9ia2izX/w64YBBi7Jz+yLB2fmcHmEV1G0PRn850sHSM2QAAACoBnzd0RH+DDJXX+AYxXI2kR0rdOucxMrUy+Pe9iIFKs2gPngM17pEs73MAAAAdAZ85akR/f5zgScsRbQaQhHGzw2FWBvNSvTqWvH8AAABYQZs7SahBaJlMCN/74D1Egm5pgj7LpdrUXh+DK2shFxcZendNqyb3B1+J2NOtJQLO0BY1+ExmI8pwJ6OXctPqDeFKHLYODoMFdurTJF+a9y3NPu44TpmggAAAAJFBm1xJ4QpSZTAjf/vgOOIGX8Dywu5b6HQtuh3ftgTaLAoHUTu8zVtxvvBrwf8pq0n+w4fJtp/GtGwKM5hmO9cHCkxx0FIpHiSkAHgDSjdJ9Bu8/qv30MANfg5dNOeEtmWEqPoExrZqFj8JsKEB0TfzZtNFAYsTE01g9Z2gRAsp5MxV9DO5F2sdgezsL8ViWTohAAAAaUGbYEnhDomUwI3/++s+OEtQtJaAtNXXgd4+LqdFPoVeYoa3U/EzzhI37OlznPU7rmyUThBJLF421rLihpccajuYn/QNJFilPGf3pTu2b7OjUvIJM8X3UCvvvycXDv1blPNG3IoYzzU+0QAAAC1Bn55FETyfex1K7S+6rehPNxk1X2rNShPMmLN749hOCs32PoHwwGQnuRiu7dEAAAAiAZ+9dER/dvu+WAiZLV5nf+zUFr8AEzE8J+xc6y+E6EtaiQAAAEYBn79qRH+KbhTdyySQ5ZqKHVStxrf8zUErUDlRLkpwXJGWM5A57aENWOIeQhlA6EA+NMSo4Nf2axnu6UPFdOUMdBtyzDxoAAAAaEGbpEmoQWiZTAjf++s+OCFJBaqu5RyJqCI5ciFy+QfdViKCVpnQDNKhtjLsbBDTAxQkGtiA7qEUco1DRu3mLPro5hMkwOFSJ8jWf1cptWx5ZqYdGH7hd58lsJBKjBOOxqM2gWtMZwhsAAAAQkGfwkURLJ98BvQr/uX5kKHT5/lStdRB3EpNtMxwioGAW9Yj/IJ6BkSDVCrLUVYjVeHAMS+JXklTR7CAUKq0auNYPQAAADQBn+F0RH+DDJXX+1g7QyFsak3Ft1Oi7eT/WrVLikgNs5g3WmkArqxtoNirEsBZgIMNFCYgAAAALQGf42pEf3+cgskDvsTAnaZI7vQ1BYaJISL78CwSAW7O8MKN4TJcSzKr6QpQQAAAAI9Bm+hJqEFsmUwI3/vrPjghR6aRbs42PAWhfKG9hWLBQBYO7VhzdwkmyTb4OhpVXnuVoSIE4n58p2LyiEkgihsMcSjpZIoAbvLWcxcbbUxw9yvpC4qcgM9TIHk9bnA/m1AEfoKukgxmshP3lvq4sLFHjIb1Bb+gDAKawHICIFXSjngCSxCAgl6WAKcq3XBRUQAAAENBngZFFSyfeJlWxHr/xodW1dt25DUmu/wD5rm8TWMxQj7U8N4VJthmBHDdc8Y8kUy0uyF/QVRg6T1Nw3dOMMsOpcspAAAANgGeJXREf4MOYBqx+AWriBGIRbd89ihAU0EQj7cqlqBeUwwo+Hl7YF8BhJvIxqmsu0cqLbvOrQAAACABnidqRH93OX3cYVktXGX/s1aluoN3CrEK5rAEtDv1gAAAAG9BmilJqEFsmUwI38oIQZY3wS9iq8Ru1ZuwHge5YzmrwHbkgMduq9QhK+TbcU4LXoh7vtGatuH+orZwfaI43jayClVBfzKYUUCnemRjycGZN57cgRK9ea1OwkugTmsWQEppkBNh/JdFjjQyZ4JpTcAAAABSQZpKSeEKUmUwI3/77lcOCE/ywLc6XOt25/biPs8yLMMrhhhyESShoB+zNzfnIF0ukeMr6Ju6bgeroNShV6SlG50RR9jhmJI53KndzULEpskTgQAAAGNBmmtJ4Q6JlMCN//vgRKhGBrYv0qobhWAfXvyR/FlCNuqZwJVrpTIPYrZL+bMPL0iiMNcUQ2zWjrP0PVXRdSCVrfqzazsxwiJ/Zm9QLkBsqmq6bw0xk+Bar3U6aKO2MHQ7w54AAABjQZqMSeEPJlMCN//77lcOCEpHhvk51D3BWHT5CL5wZKLK8ZPkfOIV1GODU/wlBTykgfVfM7+jBF4bQaduzQ/+3A+AvwaK0CE5cHbjVc1pJ53H+StJxG9Ua8/DrUCHl8SGEn2wAAAASkGarUnhDyZTAjf/++tBKhInjrA/ex2nOLzwJ3o+r2vWQ3+t3tlTqjSvEVovxTIc8Jr3NY0ry7qzN4IljCb1qcgZ7YvzWMn0aYd5AAAAREGazknhDyZTAjf/++AcgmTgdQc6/hgQ6BptGAKfJDuajnh3DwMwZaWGpVqb7Mh/5bvAT6CAEuDJtJ56z+N9ZmTYlY3PAAAAZ0Ga70nhDyZTAjf/+8HNT5G8Rzz6vkY3Rq88twFJVOYwb1Xg5/k6UGi002nITY9rzS8PPaCmc5nqS0Vh2hJ5DZv/OV29OQcMFyct9AKS8kL3wc8YwFvLGykWfhhLsilALVhO0EfbQJkAAABbQZsQSeEPJlMCN//74DjhLnMdk0oGHhcQFzV4h856+SZ06+ZzX949gBdXZGd9E6QP+F7soJy+XXtQh6tUmGDVgsotechBGEMrKDbwe5/+7BdVED/4EqLPYycb4QAAAD9BmzFJ4Q8mUwI3//vgHIT56s8EtfCC7Nx7BLXzRep/pBVMb6qXu2U65llvCElOC5o5lAlBAWmxsuQ6hG/bP4AAAAA5QZtSSeEPJlMCN//74ByFT5oRDGgg8nQkCHzopVZlIYNDj50PxOrh5uMMbamN5s9P/sz2hGglU1JfAAAAWkGbc0nhDyZTAjf/++A44S5za/LdFJ3sHpSo66b88l6UQOHPKyMuIn8r+SwswHCeYG8PNQSxT+5oV0IxUUsMcQ2ooBv+/Y/FAlBB46wXopImuyCaT/Y0EtV5MAAAAGJBm5RJ4Q8mUwI3//vuVw4S4OjJsHVijyDrIVPe0J5xdGasNWKBtM9uPh9nF4cTcB0XMQNeXo2jAvyPsnd8B9C6JaPfT2A3eKrYalWHgH9tUOCy9Knavp6PwTVwZLwjY3e7jQAAAFtBm7hJ4Q8mUwI3//vuVw4S52Of/J35LxMtdpwPUYqk+ah1t5ft73SHY5tDI+UGs5XnciWB4Bckb81EGVyf92Io8RDVoOwgtO7jiYmvZj6oahKvO0TUvdWw62HlAAAAQ0Gf1kURPJ+A5qFYNR5FvuejVz6I0sUwXTmatChezYmLm4QESF4hGnzo+2N789GMiY7Hg7gSQ7H4n+uPL1I0UjWopTkAAAAiAZ/1dER/gwyV1/mv5SPC2NY+LjsomX/n03iAQv0ER9NxIAAAAB8Bn/dqRH9/nILJAn8Gzsc+imyjJea4qC7Q+WOHNZVfAAAAkkGb+0moQWiZTAjf+/2woAW0kUklgimlgFCIb///hyD//bwn+Oqnuh62tNC5+WgKA+8xn/nply3uOc/ur2f0h1fx1vLWFTcNCmIsUc8q93KPKUmAUmJhrNfrZikefSi8Pp68VDmF+//T/fZ94uc7Um4CesacTL0HWpbC4pwVw6HALwAKhczR9X+xqYErJKlS4y+AAAAAKkGeGUURLN99VLWI5lTeYG9BPyle/82GFMyCKf3wI756PFoOBiQwNLrHDQAAADMBnjpqRH9/6UAHHyCU4A1cOHRliECctKkLiOmAbCVU75F6EeKMbMG1VNe7P3F+zi6z2GAAAACKQZo/SahBbJlMCN/76z44RMcXI48OwBOWvEVAsQP9NTZluNbhD6AJVYnC7zW7VxWM/HOBQiFHH+gn6l7YBSiWoIeoGHmNKgXbtRVNg6EmvoJbA9qtyZntMZgCmaCc/vz/8jRR2HQyWOQpuYlCjrJKp0G1cCrOLzK/bZToKz16r79FSV7J1Wz3m2J9AAAAWkGeXUUVLJ+A5qFJ68onA6dsJzs09N4LvIj3cDBjRfEdWApojACyNY0OnrR86nreca/gKP/udzozcD47DfiybsBvJNDTMeXU0IZfQ5QUfUQrr8Uho5P99QOyDQAAADgBnnx0RH+Eo2cLvj4vNhzCIRfjlupd5qSQ43HazEjhH1jmlnzSvnBcw28QnLtZNA1sBrhY1L+GTQAAAFIBnn5qRH+KbhTVzL5Iwd9W8s9aJtz6PoSUC0gyKP/IlosfSF1jR7mDYgMoJzHwJmTvCykmr5d9a55uBDNKtoDcBCNVk/X7WrPcvXMd+GCzMVi/AAAAr0GaY0moQWyZTAjf+8HPtQiKzcGjVgu3kVtj50fuo8YxUJ07ui4ClGPzR4sW5Oy4COuivineLE9MqxaZ+Om25zN2GqsYwHwOjbCmp/yOZ9thUdCfxibVxPf23ClpXIFCx9pfQ3L3gMhk33abt/kuTykbkgMonPh/peH+AUaF5KyXel7QmQs8VNnRi/oq6rs/xkPZZApzDDMdJbqCvanFzHalDHtkp+lq2f+uQsCGxuQAAAA7QZ6BRRUsn3sdSq3L8Z6sV6pC5e2JBxZNJinGlzs6ATElOuuq0YT7sb+JIlwFgYRvazPMwERrZYQMplwAAABGAZ6gdER/inEMd83hLolJGCEU2A5LWgNtuQHR3ohRJdjldGiSpKiSaCj6sqpbpZtjxpR8tMmPHBZmo35RU1qsmIxgL7vhMQAAADMBnqJqRH+EaK8pIFP2GKqMXBdJoLodSSH+w/mReILD9PWSmxDPtHlmi0aTpQN6QzAxa4AAAACAQZqmSahBbJlMCN/74DjiuZY2QoaXm5FeZph47u1XtpsL5Az4WSZShugtci/xJ3x+JchRTFcSCzajdyIBFD0EmzyuRUcNgfqEDW7dKQFFCS3LIIA75c3I5bEeWMNffND2q8RNvHgFxIiQTv80jmOiJhMyx+fiURB9txK7Jr/00sAAAABRQZ7ERRUs331Nr6PZHsxt0kUckkfcgh7z3n9GmoeN10UT9FBRzEdYc5EHASutw1r3EgM7+7iDD7kqUo/s5W4KMEx5YygXQS8Tag3KI7Y8iAvxAAAALQGe5WpEf3c5fYTCslq8zvxPrc1+6JFYuIG9MO2VN6us9stILo+Cd0DlQvGUYwAAAKVBmupJqEFsmUwI3/vgPUSWjRxdIcdgBijf15+5OcRgW/d/tWcjbO3MZMV83KAk4iqqV860nMKCr1y37e9s5DWqpUgfY/zSEY7/mejd1nXV4y6ASyy7Tidqy+itoBo2ai8/KleueofaP2gYMEwzc8CsZ3c5zZFOxH/DE1t8caigPteCEIVRz7SOG5MhaZRao3+/RoS1pQJfJMw4indglMRRujCISvEAAABkQZ8IRRUsn4BNWWWPzM46t4L3m0bngUEjMFtvaLh11pCL74yxwXywy7hSWyjJhKVUryYLLI7LEmEa38rtO2jQrJ7qp+4gHE4WAIkvSUMYkTurlCHxAi5I4XvUWaZ6Iv16g8K3fAAAAE8Bnyd0RH+DDmAYYFgtadtWEF20O27XaZ+whNwYc3jC6EmY1ypgpN2m0kur0wxTdHYayalaRxghl83RSGlQP7QpaJhHZnAs8TP3vDYer5uAAAAAOwGfKWpEf4TUKkzaVNQT7JA6ND2nVupGemBJq8ChfhNtpxCX4JwNHEZ9s7UeKcUe4X4Tr5S+yFh9nZYhAAAAj0GbLEmoQWyZTBRMb/wDwzHFcyxseeHtK+jVLY3A8ftTIVqtjziN4DBisXVomoyMfd1Sdb6tJXsseTvlPzSSgUV/wZCU3d/3F5VNMl9zrx0xBHmsDhwytPRmDnxl0gvYO++K7bpjqvYn5Af5Uv9cJNeJ0/tM0VGvOcRQAHEE7GPbnUfyWt3/xg/JTgWMC3oYAAAAIwGfS2pEf3+cgskCrRYsbkBnYXyLyFu/tRS+DCNY2/rA1wIwAAAApUGbUEnhClJlMCN/++A44nVBa6M6WcdL98HeebjAAer9Qpd/rGaSXMqDHAum3dQpiQlLhO66/sRs+qjg25SFCsZ1Vo4nYX2Mb50Cn+tgIDPoTLtW+6oNkjuj9xBK8l4mOVWx/rY+B5HpiKwKANP3WwumLK5PSCAmldTN+JXvE8vS5GfjO8R4YQe0Bloy7B12ige4QGTZi7fBYPlKWdxV35hH4+gi5QAAAE1Bn25FNEyfeJmV5SQClUo6EKDR24A91N62GkhAu8Y44c0RYjbPE4+ZbvjaBpON+8Xjc6J1wglUayytVTVHCY+766iHhKM20y4SXfgwUAAAADgBn410RH+DDmAYl2uPzEbnMKed3nsUICyCyeDbyNodbOCziTs7weQQn1LnK0BhMLkLDxK2gfVQXQAAADMBn49qRH93PjJljXXeOtUIt+VecJLVYqr6QwrBQGtz901C+SNkcNsuewdjFgpR0rkVyO8AAAEIQZuUSahBaJlMCN/HqbLy8/wEVP9rejuhgZ1/hTiRpJ6ACUBjzu0rZf+rwUPqu6H7kTYGQFdQa/1tJhAwmL82YZXnHTmWT+k7HnR3DjYHPBWnvdO68MV/h6Jcec8o8RZA/ryFL10MTl1FFKOnx9c5nTtu7WVeqQ82jjFbnYiUJNgKb+Gf4/paAXxjzK3x1VOKsji7JRVvVt7IT3RxUS3bPHqqBqcQlYpSxPlruhYyaW1ZA6jay+0HRBh3+Hyob185kbI6d9pHKI+sD3Tq0UtpSvdz+/HXXIXTeB1Rm/g1ldwTI9+9Xz9EHGdGIIViutN6mdwpp2q3QburL4OaDa4rbM66+0tY5DFAAAAASUGfskURLJ/NY7+LO8GHdi60riHOu6JlBVqwNg1VZMFN4ejipDuWEsd0/ErZ0th6Pxc9Cw1Kjmuq/zyEfYtQmhQsswGsgZ1YMKkAAABrAZ/RdER/zdG63bG7KwCn5wCGa3osZLctRFh023EneSsmn3MzWC9KqCBDfOcj+nGJ9RozF7PJP3C9bOztIPLCjeboJrWZS3xCKtt7JR8Uda3/E7pTMmmqtRMFjpXMNYSOVV2dKi7GuhQUoYAAAAAjAZ/TakR/hNQq3WE1NT/3tCaEtiN3t4YxY880nJUIH9NlCBEAAABDQZvWSahBbJlMFExv++5XDhLnY7HnguBrf5Zur2mrLT+Vz/GnEwY9bAGfUJr3XeDoQFE1iAMpidl0+4X9hFsf/8NFvwAAACsBn/VqRH9/nOBJylVotztSxpovwWJ7DmTJloPNrARFySkRNw2TMBuSJKqAAAAAsEGb+knhClJlMCN/+8GajlL3k20Wujiv8rx1TQrXb7GSm0kYYGmTH9/rx3KO2kVqyvJPzuuFfEim0UAXCWVIQw97gFcZWGwEG++myxOImz7Meuw0MWDCCNZZRC2on3KRz62Ye9OQv/dYQ8UMk1DJlNJTLsnSiKauhVJx4gsD9dWa1Kjljwa7NOoMT0lw+1ORCbOeHGoMPOXwCTsZiawz1MU0E5ekYhfXVHCB3xVjdCt1AAAAQ0GeGEU0TJ94mVbEet+VNuwEtG19Vp9qEKqXEQQaYjS6ZG6EkvByqXm1cZlLTLEjoe/LYqJRdE/SF+jsKjMJt27U1bEAAAApAZ43dER/gw5gF/dlobTz7RdVKB+qPpkr6GiWwyvC7wIA+3QCH4y5k5AAAAAgAZ45akR/dzl93GFZLVxl/7NQUtFWdUUvh5Beyz+kFAkAAACkQZo+SahBaJlMCN/Hq8yjL5u9j+dOIruITmk6F0314KXaka87ZCfL7tAHNxO+iju7Wtc+zvhxob4L7mjhRqS6xh5aFdu4srY3Z9kJMkMwAoPlbslt0ED4rQc9cCKEvzZe8vP5SuEFkEJ8M+mCHUY0aVpfO6zWHHxMB8o43nhSklJXgCtKGumFXlujSGPlk4FS34jNmpqXuBhr+dGn1yKAZ3sKSYgAAABVQZ5cRREsn81VP5vFlB4brUQOGtdboyvxjRTY+YUybkkkNYTH7v9LDS9i+dPj1ZQKYWWFo2eX4uZLvCRgIBXKzUoZJdkcWfXIyanYodRMHyDg+OeXWQAAADcBnnt0RH+e0OjClgq3ZadlqC4PYwt5pyZHpbAtX2CDDw6VQUFJdMB1qD9hMsCCcnWNss5UPmCBAAAALwGefWpEf4TUKm2xG47R3Pr01Jw7Qj0y+sYt+R7i5ZfZWQaodl6QPXmStL4WVN2BAAAAckGaYEmoQWyZTBRMb/vuVw4S52Op1bIfPeM2q9fIEDP8jiZLCKuzV8ftycOzLks1iKOF1EIt3ZtTQ9F5OaVLBfL+oXlHR0aM8Z0FAD3WY96fF4f7fCu71ZyQlXJ+Xqv31x+daLq88norMrMWmEMlQ6N1gQAAABsBnp9qRH9/nILJGk9FNJxMcY1SBNTaLEKBkEAAAABtQZqBSeEKUmUwI3/74DjhOA4HWckmvMs3ndZqaLe9W6S7iB4Q7A88qrDfeboB+rjGK0bIQqpCu+nuXe0EwJabFzLCZi73OsBrQNSTgkPJi6/soSPtur9skY2PhGs7djxF0DyGMzRzfwPqmGKw3AAAAHBBmqJJ4Q6JlMCN//vgOOEuc2omt8p6qS3Ldw5989LDKzuDHw6H9tFiHKlV1hKTORwp2nJBpiWf7T+A6Gy7CdBbhR1LLJqSDyzQfYbByt6HTZQ3hiVDL/V01gSXBPgX36fCFFEJxNZwInp0CmM1ZnvBAAAAREGaxEnhDyZTBRE8b/vgHIVKDNGLdlSHwPwIRb+tFWCuSrukzMABQCTH80n679WSke43t/ig4V9E5EyVF2d/zscVwbygAAAAJwGe42pEf3c5fdxhWS1eZ3/s03ZhFuMfWe2wKdUy6MHZBxrJBq965AAAAHVBmuhJ4Q8mUwI3//vrPjghSM+5BWn2Ge5VCc0ecBc7EOmKermZ8iWDJ4BRe+G/1S2eQMXMqXpPhZIa3jeKjGvYeHBEFF/sMWQpQ0qrYWCvPbD0bfqtAROiYI0svXAdofjDVcDRuxRo9ypWwrQsM3G3a4CbkmEAAABZQZ8GRRE8n3igS8fsin25OPmCjaoPOqdQy40hqX2JsnRU3RfPgOmKeVL8IQC9iFPZaNhAD9T5boSTEP+kwtDeCH78uwP5hUVwfti/rNaWniv7PSSGTiBYuyEAAABLAZ8ldER/im4U4K6eWIXO4QmaqlkN+TQlwR1AnPBlX1lEm5Uviyk02+lXK903CJg7gqnqUC3PCmW786/bPWqAfGLvDpW5m9IU54/ZAAAAKAGfJ2pEf4TUKkzbUQAa919yFiCEgIFdd+EDagYoE1SAUggpxbI2QuQAAABSQZspSahBaJlMCN/77lcOCFIz4vqiFuZcNnUTVzCcRWfNdM98rzWozz33Q7tLgb6TakmSicu29hGCOBKuAKHMQ1NBeTV2csdtYUMxTioTCcrT4AAAADpBm0pJ4QpSZTAjf/vgHIJkCtvXBZfB+14jkra1juFx0W5C2dCXvZjLy9zVPAyjT9j980ElW/V1rmRjAAAAgEGba0nhDomUwI3/++FR2Jc7HZDkW4uJMmtI7OMrf1NHhL48zEpmHBdgRL4PFlMYTP+bOV+4pFezhcm6vv7Z7df8Mo/izx4ojiI42oNc5zuIVj9opEKt8krPnE/wfreAmiZIhYwe8cCEwpfjXXA6bJNO4q/P6CuHDPJaoZseFBbJAAAAcEGbjknhDyZTAjf/++A44S5zaiKQuEhJ4jmNCR534wzFsjpJVuFfEuiUFOAHPYvijJXldwbC09W+F5ucQhVaOaL0yaM7JqDvSBd52cPA47AOCCjU7wu4stqnihjO5ec0ETnzf0diadQ9rPIVw+r7NiQAAAA2QZ+sRRE8331Nr6PZBVkLGHLziB4VNczKxSY25csybjCivPqOIy1NJzulsNvAJVCJH0qinwjhAAAANwGfzWpEf3c5fYTCslq8zv/aYBHXhN31AQe00BnlGw3gDiP8P3ehzc2BPCbVfqZx7BaQyOGwMuAAAABwQZvSSahBaJlMCN/76z44IGSiLnArHSWpo/s1uhN/5B5ilnLO9crpBdkUCLo55nXPs/bbw0m1ni0JxFnWRtKVr2HRpNb2z2Bzw00yKTCsaNunJLxfhVSpb57Qap9q81paINA/so2j+STu6esbY+YyZwAAAFVBn/BFESyfgOol4/M1FWMKl1NWmFHIrOMqFKzWFvxvaf+3gDItP7fuN53V0wplQxqROuxwNisKkYf1A0L9vOJVudeMNQFQxSkGQDs7GIlrSq3kzQx3AAAARQGeD3REf4puFNcwyKvZaiX5qqbL35NCW5PTI+EGVfTC7EKS8nUfK0RPsMBuJRJi+OvTAxVsTOvunx8J/nO06kwqHnuQ4AAAAEIBnhFqRH+E1vUc8XsyRGeV8iG0MB/zFxYS/vkcNLBnuFwB+E7v3VvwY+kBV6T1vy286lrWGXJod8XkgjgvJuvRIJEAAACVQZoVSahBbJlMCN/7xIOhIUAB4s/hp/HFjXsfBSfVzZJCJlAqX8IqjA3hrTr26f6UjDw5+ICxhwJWzzIjlK/pZ76OT3X6i+JXOWGDxxI7H8VsRNRLv8lmJsG43V4JiuFXvawRR78NKMZC3LHXIjfHxkbHB/JcwR+f69KV2/QtRyZ2dRq7sBgyFC/D6+JgWb/L3sBmXeAAAAA0QZ4zRRUs34Ee3aUVIPxx7mDUX17S6FVLhfjTjm/q/myIrqTAit/zI5wjoSwqYdmF2r6hcQAAACQBnlRqRH+EUzCwuV86LLP/gj3H7Nt8I619QX+jP+BvQi+oayEAAABbQZpWSahBbJlMCN/74Djg6QUKvd9e947e3J2lOAkYhiVNr9cDr3ELDEevtAnqI6c+JX9/Hl6IF6osErWJfEaZznOiFMoX0003WslCugmdnvq+vcZwkx0sZcaeLAAAAE9BmndJ4QpSZTAjf8oIQvW1xSZ7x7rM2+/pGwLqLOU5uTIQepdfmTQp8FGjoLDBWrpb5S15nV/7T1MmUpm8t0TDIMYbRQkDTpqwSDDRE/k4AAAAVUGamEnhDomUwI3/++AchU+lEC2Ii5zQFVFGdU4MJVerkDyzoY2q5+KZYNo7qgNGISSgsF+cGU0UYWSZEeb+O/N9AOGI87QduAAXd1wUH2zvdsrzG8MAAAB6QZq5SeEPJlMCN//74DjhLnY6/LPx82l6Bk5pK9HvlvJS5+uXxVzkYeM+SI2nn5l8845JM2j8M31GNo9VnTQBFtuxtZgLhZRvBX9iGRYWh0U9En0SXymWgiKz8LWajDidYO5kVrzWsaI8uEAxpZex2ijK5k/IhCK3vqYAAABXQZraSeEPJlMCN//8A8MxxXEsNPU5MyGp9izFtgK5B1fgvNcD+Z43iIuSvepRhmJjGw//jbq3VYyQGndV5yIhGAhdpfOXdaDwSiD7VkSVBa0B7buOG6IhAAAAUUGa+0nhDyZTAjf/++AcjQRnx3awW7tncZ5B5Juzr9se8S1VRa6KH4UBW6LI1HAegAOeGogjZT0PeHL9xMUqvsOwVSI7eLqasA4yAdPUEaLbgAAAAGVBmx5J4Q8mUwI3//vgOOKGHX9dX0WCIwBpYFz0D6F8pUyB6eshOk+dTkDqLXSsUh+Gr9DuLFPZf17nuf/ueCIOjxOgcQScSRdP0xc1ZVu5Jqv3yslnizRuOXMfXxkx28Z7fAXjgQAAAD5BnzxFETzfhHWkJuNUHXHYaxywUjiGRpqXXHfO/nq8EmC1znAhOoVyi3LAXh5udzgkSjzv3BiqOhlYw5ff4QAAAB0Bn11qRH9/nILJAn23SE5EKnm0eUobJ6rVo9aydwAAAFpBm19JqEFomUwI3/vgOOJ1QWlW2Hq57X357pDMQnGy6t/q1J3KOnfwatDKcCKk0FUX7afaojFcA44+ouv/VS5zgOikY2Jo3gD+Kea4bPlYDa1BAowxsskYioEAAABwQZtgSeEKUmUwI3/74DjiuZY2PamuR6r++1qE53Iv1qMB7HQ4ViqPGfOl2Zby/x/4pN8jh89MtO/osbGCpJthzQcEKpTHULvYU+KEoRclqH1cLUBG25PaeWeD8zxyPBhkjHoeK7nvqLLbLzvLi4uamgAAAE5Bm4FJ4Q6JlMCN//vgHIiQ3BG1Ae3g4noIB59LbXTzyxAiwWYAA78rBEuHCDQZgSoMfigSgb1vVN+xtQOAzXzgo3pxNP2vKC9JlDb7AWAAAAAwQZuiSeEPJlMCN//74ByNynH/TQlKMDZynXLHrMF6i39xuh+J1cO7vKUsDG7+KRiZAAAAaEGbxknhDyZTAjf/++A44rmWNU+hTQglopLhhmAYLK/ZpHdfpOlqk91IgMNgFmHV5vRn8W/Xr8cuOcpnmkUCutdRPjNbPLQZXJ/3ZczxEXjRBuNotgM7N9t+vTv3LJoVu5Gxx4RvBJ+4AAAAR0Gf5EURPJ94oEvH7IgaFXdPjVnk2O6nLuXIwqxnLW/+qohFcx7b+h5swmqFMWhnXHDdgVqWqcEU8jPLgjtj7gTWitr7+s+gAAAANwGeA3REf4puFOCtReQubuf3nGJsBw2LCxyegE3x6o+mF9S3iTu6rNVSi3sIbiyLwJ2lalMxkYEAAAAwAZ4FakR/hNQq4wqUtHk0Ek9zmyjVKtbfh+Ep3L+pF8DNTunjSXvZ3i3A3VLAaZfBAAAAeUGaCUmoQWiZTAjf++s+OK5kJjsYIqxbAN9qTX7RKmcdzUOR4WmyrfmGXlSsY4VW0N0z/pBEVT3CsU34BdrTRs/Mj365dhncAfPS39tR96BzM/Bfv/33oqAmHEP2cAcTsMGtEH+NPMAl88/Okh/VCCmQF4RGqYBUBV0AAAAtQZ4nRREs34TtSIINpafvDmraZs67s4zwOz7bhVRQUEsaKdSs9EDDJCr+0tXeAAAAJAGeSGpEf4RTMK9Ia6xDVybe6bVn8VMBUydoiydjxdfTkz37RgAAAHZBmkxJqEFsmUwI3/vgOOK5lg7JrSIIIA/sNpXxrR0mCihBXocOnszuWQ+6aCY3Qf/ooqPuZPkLUBxMfTn/KI2XKfgwuHg8Ndn/ej+Ix+JsJLtXyYV1Ee5PeWlz4wtnPHfYsD2opYlEN9EtBxxEIP+F6hy+hsFBAAAAWkGeakUVLN99U+XYnHXrZhpGAxLbiGJkujNzoot//snhNxSyWew8NVYOzMd4cv+InhXj9mVM8q9zNsNntf2Lf8crPpKa3BPYJbhAof+Pw38XwU54f+mhs8MZwAAAAC8BnotqRH93PjJguRMlq8zv/ZvacNf3ZgDNS6n2w8JdDTja2l1kfVwpI5RyQrCnRgAAAJpBmpBJqEFsmUwI3/vrPjitsEkocBG1hRaRC7O1B+CbrXs8hHZmhG2DNlmpBvFXAQ8ZzaWTctxOTdptC0ofjr2GSDqDoPTD7fLpKEEg49jN3WOp2j5Odu8aGp4KdhyWrxYcLD12s3Q1/uv7HQxh4I4Z3zALxj7IUwYKgxbBRg9IlcbVNQWTb+a3gaDRS4UaCkAw/1qd0064DhTpAAAAYkGerkUVLJ+A6aad3hR4/uXSqV1B0dTjjrlZ57rkwkz5qH8A5BiiEyjQz9F4C9kFj0976Z8PPdfw/bqaUlhpDnPTBYGWwhyC6yOlloZsDr8qrxeFAV10I20MbiNT/qY9/jTAAAAARQGezXREf4puFNc5iD93O4M+je+bLdricvgJeIzf4IF7YCFGXutVSWIsMd4abDjWFaDTXZx9lcHSO7Z6bVKQQZkvgLpJIQAAADYBns9qRH+E1vUcPAJx4GZtKUaFsCF4uxw8Y6tLrD5q3QdAfoxOrS+VUyv94KTkHGklV3NC7hkAAACdQZrUSahBbJlMCN/7wZu7TxZMM/e3lqS3Hf7gR0yQfSMdS0gp+EYh3ACNJeFZWrJYWy+Tuxmmg9yENzYhMzSp0MSd2WcLpHZqtLjZjQM+Ku+MEhhVWRbHi/Vo+GTEjar+dbIOrWe8xdbaEKLpaDLCjIApOoy+j4hP/sVb741ZZK/0nw+kO0/AXBEe8Neoan+CYCnv+Ow4wt32pXRHgAAAAEFBnvJFFSyffGVVSGsrz+3b5v1Zg5L2NFJN02uV8jUcyUVVzVImpmzOJPahozMXoce68AXxo2JwibbE1C2KfdoQcQAAACMBnxF0RH9/iXZSQkB/vO6m2O+aoEGAw2+uAjDlKDyeUraXTgAAAD0BnxNqRH+BEcHSwjYaDmtf7P/3EHd73hx4e4SGlU8teL9lc635JeBPPhznR0FXQYVX63aPmrj7NT/7R+2ZAAAATUGbFkmoQWyZTBRMb/vgHIjamvtGoxs1uSLcMbcKSXfpDcfNEklZ4VeRp0H+PR6Ez0IVFGz/p3jRWk/Pf7VVg1CUVglF6oKe6mOwVL1ZAAAAJwGfNWpEf3c+MmWAiUqATXXR1gfDZs1lrr5MWJQArYUxiqhfYhdIXwAAAE5BmzlJ4QpSZTAiPxVmeOlhGwz/Fs1c6V1b8P2RQeXHGQ7kXXgq8WzG32u2Iy6GVw/lQvrM24qRUp/8JG4USCPSdqAC5av0hupmdqUQHIAAAABLQZ9XRTRMR4puFN3GPnq2c/KJzU4nbiaIdSG7X0s9TSe4iwC+u9L3527jGFEAW/pgQ1w8VKNK8KcC/2VVebquBtHGTz76FnlStsFpAAAAKgGfeGpEf4n36qqADnsqQPIJnEcRRKczAkBlIY2jZnmdmdek7gJRW9Eg0AAABZhliIIACf++DLeYgN9XYj5zg5B5QJuOQipq5DurWEjnW3UorRPL+Gmp7W/eIApq4hA6GO7MXHIyDHsVucz/aMFYtKTDM8z25ZLQfiNZJqSiDN9723ESzhz79Cwt5M+8vOWuuGhG+Tvq9foXjrYoVXiVDwEsVfUmlk5TXbEqyYI+MZpo4mDJdCI6FifaFbOBypE2MZf5XWR/BCziCfa3ua8heIb9mYyPBNtGSBm/2+TYyBLX+ccaIy4TYZdApJmzt8tHhRNNuRbzsMmREmXlyFjU2KTmRU1Z3ZVzYLYM7mBumK0HRe3QE+vr6A4gpTOdn2j9qjAKj6NNg2MmzTCieFUwql9L6ptX7F/UuuzRuFAfTt932rxOmz2BNjBs10CPMOhJ6Pu9idR/eLvVQSuID26B7YxP1Kc9QZT+tp+YkaEPC2XBY3bdZ6qbfFKSpyAAvrJhEkF1eJhOtzQzGB9M9QVORu8jkG4zlLQbGF6ZqgPB9YhIYnHkO6pyRG4ZEwQQRv3pY66HZyqmP1pcw4M/JTWg0Dc5WzUWPGIAgkAFQc/1Ta7hNEPWBuZrLM3ySme5CdFuvklwbISWRsoUpauBnsVWWN5QeWEHF6TJILZUHeFXnSgp97RhPWIEEhIlKaUd4hpJb7cGhxHmSPkyScl5dA7+PIQv2HUjD/HkIC/Ucyw3+BoGsOX7lkWmxzmjnqmrW7wAKr3betEKeTC6MuBZC2Eie0lFVnNNRkT4VOQIj4zh9jxJYkGeDLwst9Jm6zQ9hKlPa63RCfp9NnaHWjPsnJjMX9F4gwR547dWgbLvYE7IuBo2AK2pqD7u9lEBFGBQEfEy79ZlxUwHxhYbaKRUP2ScWs/5ZBg+sELMAUKpCbKjPE+vDv9EAYHgEOzeiT71raTwKS0j8v4q+LNmKEzMRQT8YYKkcXDIj+hIcNxaNKpyvuGqlycqBZerfPCw7NyeCJ1RZg+wkCwjrzypk5ge+Jt7o/7sW9DR3cOslqBHx96pj5KJC/hClCIaZB6HFYXv5qM9NlNVSZUKsifIwMWVlTFH44dQrGr49VglslLicK98bhApMsqpHLl6mvI2nl6M00yk4zvnor0ybl+78LwbD6/u2JjQ2C5StNvCBcm5T9DSRQmWz+xgw3XM2cXGR9bRyAeishmO4rIwBRYnfjW42CduoUT5uCKdwarOs6+ZReAa6Qm032qE+yuGHc+0eaRHUxlZw4pvpia7X72bi/pxC4qWFZD2iC6up9fej8EinMBLHUXfZvheqdQetWdPndbIazJcmP6J8bggLDQ4mKE7dyUF19ugTbguFq279TrvB9ynED8sJa9KoY20QSV4oLpYcqOxvEiV7vzPO9OS9m3f72BkB1HiYIFjaJXwqYGkTXiuZJWrbol4+U0Y08C/7GdlrZPe6z/onp3lz5ODKTA0KXD5NMWK1y/vxPV51UDcOCw+Hv63HNJxyUPECSyqdmyGfW9a3IZeGpmcvlwizm/fg9CiY8lZSQs8QDGJpatu/aiCPaZPOU5MgVYO8MYY3Bg83oVFT8iz4wJf9K6OnKc1nq8GDGNbbnNkX1eqV7en2c97ZImpgcP2BFIf94YIjngEbP6/uGWVEfFyMJdl81YnorD2O2896phTQJZ011W2fThMp6sugWNSD4zAbCi+DLD8su8T5rb61RQ5zpFRjgGvMFoA+Swos5EfX7Q+pWIiU3BmZVbzG6lOYqF+5VQhycWb+BBOdBy+Gpnne+KvK78cSzPaQYJ91kufnulzaeUUO2hFOH59m9d49JV2fpLZStgYBSIJTM/9Ox/Ms+HFo9hdmgYr3o87P4pp1ZyLChdnLPD4OKjtr+1825bbLYc/XTrWBAc/9du/48l2gB3e8DXTZp0m+PbqLMrA+8QljaKoWb0phbJdacof3gIbAAAAhUGaImxG//vtcWI95nYiPte4ifOiv2MWrXa6oc5GwmVX4VGEPL3MSQvvQOWz4iK1KtWLJpeEgjzPL169ZcnBgeMLsKHdUM2wjMTrsHdOygwiz5mAKMj9iGuCQFkHUsm7iXWuKx/0L39JtWxH2DB5DrjSNacwTXnDHcq4Qo51kaWnzhKTZQYAAAArAZ5BeRH/f5yCyRJTRNLqZxFl4gyBe3yn49jBqp0OksTJCkbjJxyS0eiYhQAAAJZBmkY8IZMphG/7xIFmvHEwzcpixf6g85KZsRgAFuJ83fQKmW6HunVKuoozc5sQw4UsnqcarVRces3II69oVcoWZSdF1HPDrGm+RQUode0jm5khpoTxFEJWu1alofRiI9UOP3gOWkENJTpyKfX8mUK4iEwYLYiJBNu6J7fSvAmoGVC/qd36RNT1oid+703GbBe6n57kGF8AAABZQZ5kalPJ/3iZlhm9wMbG6wiDR23j91OZf89JoXeMWa6ZGKfp7xbU0IXx54eFVJ0ZoDzq3qCKdLExoFVdKuttpjLhgTdmW67ERQDYRx2CX2eTTzYWTJl97oEAAABGAZ6DdER/gw5gGJdrj8xG5yWNjcN5RzyeYQWTwbg/YJJ06mabrNeaaAIQL3fS05zTmWWz/Ddjyn7rd/fj4m4Wz+Fu7Q7lrQAAAC8BnoVqRH93OX2EwrJauMv/aYA5jWgK/npk++GXjyk7I/nxAuL2+vMCt3X112NqjAAAAF9BmopJqEFomUwI3/vgPUR3rMO/T2Y2VhLfhP72rEslxqZ/WAOcDYyisIRnWP8zgmcr3lUb56tCV7LscA3yQHB7Y/tXggxHENASIP9bZsUZSafMEOPrKIJo6LpThykrgAAAAE9BnqhFESyfgOol5DumVJ0S0cVIM7am+NZ19h6NTyxakBR/Ofn07JLTms/iq96suWA4PuOQi/6c2rG5IgiADAoCMybzxtewQXqSlO0vzLnhAAAASAGex3REf4puFOCunxZguX9wJlyQwsHnoh0gIq3mjZDFzCFf8XeZpx63GVk7z776u7t3kmCYNdaCOmNgmx+oj+n8mM6gYJcRfwAAAE4BnslqRH+E1CsKg5vffNV6KJEaafCYgryLkW8mp8xVM0IsV7+RHJ4vavIBIrAgydB5rvlAxBRgXHTpdpwIKS0filN50G/LRYjXoqAeXMAAAACfQZrNSahBbJlMCN/7w6grIVFJtmFWBNfhha03j96D82U7Vx+2nsoSjttpRDsy/gZVh1/0E/eE4qYqKmuDqanK7zl/QXx6E6sICctx8MFYLq7fS7pEM+ZN+K8Xv+uUh4vwC1duL2ZLxK0/Cen1zyK0H9sY5VGD83b0M1fhLQs9AaMHLOe55U5g3QoShXVmVZpN6cYhIcm9HRRzqlropIiBAAAAO0Ge60UVLN+BH0uHOo1OpQs1to+yKHFwaptQ5BFtgDlNMU+icULmLGxujc2EQSoFU5ygASOOA1Zvaf/FAAAAKAGfDGpEf4RTMK9Ia6yBEGEbBgFIX6ZVi4oZ9kz+DhKMG5HG7B0dtYEAAACsQZsRSahBbJlMCN/74DjiuZYyN7bnUg1vzKqdRlP5zSFw1h8FWJhA1x/pcA6l3LhJz1U+gBgFSepwTB3UvN0Ol6E6gnujgvTnK96bFAIXqhaB1HSHC989x9BPETy+h3mNxHpOKsWF1XH3KQjb8NMCBha0VBvTpEaYlRrVz0cWvC21LWVlsZ88JWjCsQwtjBrW5+pViDUFe0ils5WWRrnU/nnwnmdT65Qet+sgwAAAAFFBny9FFSyfeKBPNcaL7+V5X1IGhoxsOwcUUvCWuXvEiV7QwC42PiHrznZ2MxQBbEYjkTmgN7viHiaaidZgm4UkJPkk5c2BG05COu3x+CtcJ/AAAAA1AZ9OdER/f6W0XexwRhdsnetPageFCn6RV/+/fWfxA49EgEMtzDU27kTwCwBlXxqw37RvOyEAAAA1AZ9QakR/hHV8g/txlNnB3G4bTYjFW6A2jIYi1+5c/6wZQtVDrFEdc65a08gfqr425KneZpgAAABuQZtVSahBbJlMCN/77lcOLBfClrEvtK1bf3ouakjSedY0d3/QQpN2eRz/kCnq0HwQhsB+x/LqObYzJyoTEkOmGoN2mLlvk6EVJl5TXjbWz/ZmHsOadqHbIr3P2nKT//Dpvb/k1/evkG6EZ6FppcAAAABDQZ9zRRUsn3xW34B8XvH3qT97ssK9VB5rRJg15lNNNG2qjubIElb3HZhXKsOnG844Dve5W64/ukmghSwP0aO7uOMrwQAAADoBn5J0RH+AbCpxyDalGnNLTih1g6/0qOdJjORclUnHAhTKEO+MwTEmokHDhXkKpprte1wLoG6IbahgAAAANwGflGpEf4n36qr5vCXRELsL+sPgx40RoVK7tlQbDF2IVvmSd+z9ZY532dnlS8XL+vQIDhQ6PYEAAACpQZuZSahBbJlMCN/KCDlDfdqihMhNsI4qRGQFbeAgSReinIWm8Q4v1leH/5Oe95uqoaeIdhN+wGZD579BhEjNcllwMDo21tloRbtSe1uLHorlQEF2gL1+3QvtZ6Ww+YbiKc0Q3snOvvYLlV6VBkpFTEa/TljT/92LFpy+EncdIYAHscQDkrRLnVMWpdwe1oCFjeZaru/rcF7oZmGko8JpJoEP9QBsOq97wQAAAE9Bn7dFFSyfeKCcVlysnd4zeF+E4wNIhpJk3CJk50y4UPNHY2/KywiCicYiTy5aF8x2ZKGFuGUKWkFqvmv0KbGkq4auUlZxFpHVEF/DCBLAAAAALwGf1nREf4USy2J4yUop700PwUbNMjzSdo+LuZe1m+BDNgF5z/+hAiVM4yhjVIaFAAAAMgGf2GpEf4As1+KMQ0SOqKb3+q2lDv62ayTIdee1EruQL1OwR7PM6bm0uHHhVSam9bCpAAABBUGb3UmoQWyZTAjfbiYOYl0PwIa4cphLVFbEikyeFvuGUtjj9rexKdFRrEbKH+W5YBn8+aNOedn0EvYpxVizQmiRCHVywZ3ukr7oPH9177x5o1M447NLUgHH2HTsuf+vyFIgHZ9FLK0jO0NIH3ib3olKIR6JsVfteJT2lw+suG01vzwcItvz385NPA29gXXf9D6eaOQ73pXz6Bx2gc4oTPpo+WdUakNFj/2HXkeAtBbWl4gLb1YcKDA4y8G1BFnlxHv88wkrXKwI6Yol6fmR3bJI1y5dVNJQ9vudagKOV49JX0x1oxZdx/+gPH+URlfuXDOCUGqDljPZF5g8ZqvpMQOAuyLrrQAAAE5Bn/tFFSyf08HdPKf1cgoaUwH1/y3y+A+ulifGqb8DB2/uEFk2vpxeiHhlya1/JJnifRcTg6+KPhDWKH2iLwmqbqc3CDxYX3kZrHMTaHkAAAAlAZ4adER/hHi2UFyJkuq62ZFmJbEncTEmcCmczdiVr+IJ1OPJgAAAADcBnhxqRH/cLDyOioAOezAE4F60h/6HDW+EY9SSjz2LoTUrQn+4MyfhBEJdSMVRLllcvr+A6MoQAAAAz0GaH0moQWyZTBRMb/QSJIKCLArV/OlT6lqtITQzgbnr8f5Eutwk9fMwp3wElU6h8UpJ4dbhFq7lbGPtQxwukMLM6TueYoDKwOsy3Kfevszyh9gQA/RGL/2Z3AABMi/YwMbw/lzJ9i4vtQYdX3S6A0UZk8DF1yH/sWuw9zQKTzfzoQmtJ+btMgMswQyVUHemhiShtbMmfnX0YTBCZIu9ZV0hmGWGErVuj/rW2+UZU9nZmMEUv1AL47Rdpv3vU+5WsKtGVwtlbafwVaVPp7mHgQAAADcBnj5qRH+Yl5YFRBc8xrboQonVRcNgLm4flPt+8unUaOv5vuV8sqY8Po1ab+Ohyjt/kuvOUGmAAAAA2kGaI0nhClJlMCN/x6qvIqNCZDKPAcy41m6RBftLAOTU/JG9a/HDlAjWHlgLUf7lw2zHlW1dN6NmvoUeaQm4Rf3oT8dK/A6+EPtUha31C9R8AGA9Pc1PHLm78eBmqhdW0/ws5Wu1XWXR1uYbEmmCJLKmCQsDRBZlUnN5H1wYtmh+CZAiqdf6mqvSNUeSmYEs+cI/4mIy1lPLWsph1V0bChaKID0e/3kgcXyOXc2Aqc80V+XWH2ucNKEcuCZxIb+YLN8Yphusto9627pXQnMmE21y2Sy3yM8koWxxAAAANEGeQUU0TJ/TtTGaqRkjlJwPbvsmSI1lkdSxXl87ZF6YN95PfBag7xnAxXc3Gmj48UCzJYAAAAAaAZ5gdER/hHi2UvUrOZ/a5c0ermQddoJtsMEAAAApAZ5iakR/zdGPifqOf3ginCBX+vXCxrjKKxMmmQAUZhX0/pxpxbOnD0EAAAB5QZpnSahBaJlMCN/9dub8vkKP19B+pw6kf9CAjfIRxnEwttHyxpVXRku3ltHlZ/lkYpN2KXe14XnfKsJvlIt7C0jFqLMM1jsU7x0dnYDgnxa3KRXsBPz9MvvWPMhxArAEWOQPLOVcPX2KE3Zj8lL7CigZAVxYZRZxRwAAAFhBnoVFESyfgOahSeu9LaQlIdYgvy8B8MWOBXFmIgdWQSr2vo4u4eMzya1/JqL8142I8jEYL77i0hisMK1dxUEyVjcD9xg19ovelUxl0c1xZjCxVSzJLLZBAAAANwGepHREf4R4tlLARMdvFFuxCgK+j1kYjkNsSNdqSwreLdD/OSM1WLdrguFezNg07LOAkL0dD/AAAAAqAZ6makR/gQeUVGdf4170agBA1IK1mKqcCUzcAxkJipPFkjr8XVNqedToAAAAb0Gaq0moQWyZTAjf++A44SZIX6myKoZpfSwx3KsEKMJToKDMqUqApD1eL5COD6ggxaIYo2oBHwnAsVolI/njGgWAuS6GnAALjGTHXTap6KqRt2wQfFm8fHkqvFwMRzV8ZoAfwTNrZTwwkwSZyDrc4QAAADFBnslFFSyfex1KrddINV5fGhaSsbM71utgNqArEne5/psmiII46ZfwBD8NIJ75jHsYAAAAMAGe6HREf4pxDHfN4S6IhdmW0Xai/zbnkB0d8faNtHQnA0/PXBa9GH5DiHe/KxjzFAAAAB0BnupqRH+EUzCwtvKZsEyQVsXmvMVAOIOnZjpXqQAAAIJBmu1JqEFsmUwUTG/KCEL1X4ozsdj2k6Ce9zYKOCXqaE6ZjAsmKXfbBn6uZEpzN2/8UmUwlCJhbW3/dILPwAkQYsQakIryqg3c7gMWCnsjcddWB3LQzuP1+p9jOVcrD3Xr4c9IecX1eIa4w8BDPtg42swGnJPlLgbQhIpfm0zFW/KBAAAALgGfDGpEf4As1+KMO/5+YX8sjGGhQz1ZkZQqK5ID4Enmz30wLs4t+OPMVJ4+oDEAAABTQZsOSeEKUmUwI3/74DjhPbILSGmfMEGXTtKl+F4XYSvJwzWWb1ZwbfPPDCkNhDyw6pPEXZYk986ThQwSCTwvFR4sqnGv09HOAFhHcX57o94v+6AAAABrQZswSeEOiZTBTRMb//vuVw4S5zapKxN+T/89Irq/c8BeyQjHqSyDZmxoDKD5I2ZER30lxBIf1WoAoNUW98QLYvYdm6zyybpZzG4HMZW8BLCyOn7D2+fhOzraEYLfhZPKgWhn6VDfppf0am8AAABBAZ9PakR/im4U4K6fFmC52/zhJFXvzNhyan1A8C0itT4SM2A+aqKJpuDqCdva6Fc3DzqWoU+0qgoASrLy+6MMhd0AAABaQZtUSeEPJlMCN//76z44IUkFqq7aTfAxQGo0WrVRPKTltKcUg/xnRhZwIe+DxhONjdOh8IzzK35Wf3PTjClIJPbgHcpwpxkfKcBfS/pqFbk+KY4ESO7kMpEgAAAAPEGfckURPJ+A5qFYNVyzptbWOl/Lio4wgDNAqOZS327gG2S1pd88/6C26jWrIjYrV1arRkukKguv1vHOvwAAAC8Bn5F0RH+DDJXX+1g1VdtIjdzQ0EXREbOPgvpkgaGuH7Q/rjrGKps7wnLqUFIdCQAAACgBn5NqRH9/nILJAn23SZGImicBQ3DUHDjZ5s7/AV1JPRKG7gT6le4kAAAAYEGblUmoQWiZTAjf++FR2JyyC1iitS+dYx0EO9MkwWF5beGuOG1ZTyHyr3lFsRHyiSoIXq4+8kJeLsXYAYR46+4k/GgvTAKNRfO38FkHkrRwtEd/5K6QMpzqI//YNmn7wQAAAH5Bm7ZJ4QpSZTAjf/vgOOEudjsWuSxRrOyCZjbojYuMdD82NYeOPzHOOkboB3OZcVutWe2p4e2RjezDJTnX7ykx++hEsasudgkCPoByJ77cStz8pSx2Mc8cb6wHv4P1NBXSxRODEmyzR6vAkTP/h4qQ+1JpzMmK543nMxeRvcEAAAB+QZvaSeEOiZTAjf/74DjhLHUdnTIr/kiBro1rbcLYxqvkewiG/h/qLASFUG7vjx8I/NQYp5JX260H1dnWv/6sKBFvjZO2Obn0YWxpuvjdc2STEdWWh1bdxgv1NBpRMD3oT8UI6ZG2HYtJKeyaW6r/H9f6pel3kaQA0Z9+aPHAAAAALEGf+EURPJ97HUrtL/mgidYi0TC+wiKzM13IdxH0hFqOnPLWN0qPOjbY0oUxAAAAHwGeF3REf3b7vlgIlKgE10AyLX4AJlNtNgJsyqIGh8wAAABHAZ4ZakR/im4U3dqjRaZYi6ufJW41oOZxfaude2YO0qLYL6p+tv2n+ZEhy2TKBugHpw+CmXFI0YFOx6UPFevBE6JTjViEV+0AAABnQZodSahBaJlMCN/9duV8/kCFIz5BMs12wfEGnFGw8X17TL2yufvVuIdnWBmq147QWoxvvU4sQmiODTxcU5IxrW48XUQ0l/UgndpZkMnXV4yvU5Wl2xRn7JOqG5tG1Wjk4im+e43neQAAADtBnjtFESzffVPfJNmH6c8uGGvN6Ua1w3dSVxyq52qBEQ5AQ+3UFiUIgQtkx2AipnIUrZN40brHaz8w/gAAADcBnlxqRH+BB5SnfnFFCFzzDarOeQ+n3x9mjYHrxdG+1G2y/RedFEpdZPCX1ehb7HH7JWFYy4bQAAAAukGaQUmoQWyZTAjf+8aEFzqPqIfb98OjTtEtGYksUtQHqef/YQxv2TlelbawxGTG5fTWV8g+MnDomIil9fbxikYH9XQmdZy8SyM4eT5UbSB3yEfjPZuf69KjR9VOuSEgKQch9dgwqBRFURNNn/ZHanJPjS32PTuCkyyVSbGosl1nzCvs5IhumOPeVjXmkR/0r0gyXtZomPanE/2OhFNpNTOQ88Qr5lABTVZ/tpyQT0FJkYsHvnwCpfD9gQAAAFJBnn9FFSyfeKBKqU691GQnRt7wT2bAwvsFlFpJYfJL3kI0Ejx2m9rfTT1n/rKrW+o3DIZl+x/z/z6dvQ5v53+xVKnddxG+xfI7Lp5qTATEEOogAAAALQGennREf4R4tlJElLvUEtQcMggGQj6hD8aXPWAOycZ1yxgmNuAfy4hKPpmOIQAAADEBnoBqRH9/t8AO1iKF8NFd5NzOsKBLGjI7tyNx83qt1R1YPIAs/dScM1VYQvLo/32AAAAAQ0GagkmoQWyZTAjf++Acgi4Oq6OXF4I6IPPdwzJFin4336rAUggpBKkw1FQCveDhbBVqTv1fDTBUjHvoqU72VIneOhEAAABfQZqjSeEKUmUwI3/76z44IUggGGDyEJHJ0zdy4sFJbfwH7K8r2+9osdhXBVubU94QlL3+0o16lzaMEUX+s5hDiLE+HU3QAsuJzTdUVHWDE2crLRMBDiXB3Nry4va26IEAAABUQZrESeEOiZTAjf/KCEHJoIEIqOD/LBqTEmdKsw0VZiIHhVnR0YHo2X8W67cycOsus55XJ0Jqd6wt0yhFaWJl16uDYWLglY5OB8KN8lyu74G+Q2atAAAAZEGa5UnhDyZTAjf/++AcgkyM+G1Ouhcn6Z2Bc0JS+SytB+cEXwbM6nE1arVgMsYki0/teQ7FCmWw4Us2D3g2l9Xae2Na6Cm9q7U3uGVww7Ls5vAowUGvf1+1PeObba2e2poP0GcAAABfQZsGSeEPJlMCN//77lcOD4iDd1SEdD3NEMCzmr6ToyL4AwseqHC/p+7NrCkSHk4I3vaFZNOZvIcubY0dF+R+0CSzGj75kO/jtMhszh/+0NqcZ6+lfDVkqMjcjmxjwAwAAABHQZsnSeEPJlMCN//8AtD5RPCcxJhhtvitPqIz74OQtZ8I0WhBpvfz/vdzPZ91H7CxyeX9rMCN0nFbY7I3EWhbNcHJ6VALu4AAAABEQZtISeEPJlMCN//74ByFVfBVBzCrec0q51pjCVag54dw81MGWphs9/vebHu4Slqm4xB0i/esr2DZbj8Jf6ynXo8mnM0AAABbQZtpSeEPJlMCN//76z9RIyID26En9fyziADdwqGitUiUfERx0K1tUeLRCixwBHGA8E5zp96UdN+aXTYSjyBXM2d52ocrCKE+B3/tBnS29ZiVSe53MesjWFUmngAAAFpBm4pJ4Q8mUwI3//vgOOEudjsIu1r9UcGS/BbdCjokhEK4p7ca9fqqtyh9/H3PZV5npSGQTV+CX0Y/0tMJZ7Ct1LC6HPc//deUSi7xe1m4BaSc4T6i42PU9PAAAAA7QZurSeEPJlMCN//74ByEPHhakORElru9VCJYbH2Yv4/MDI9PbvboGW+izcFJ6I5f62A3mPgFpsbLHqEAAAA0QZvMSeEPJlMCN//74ByFVsZ6j9D/F5iILSjxSQW3rwCXR3PeR+KppsBmj4ZYK9QEYuzckQAAAFxBm+1J4Q8mUwI3//vhUdlhbNCiiMoi7GsJj/JUSgwY7nhDk6vwDjSJ+VmDgx/g0bFuY3yAw7VdofesWO5yJ5HEM++OyZWsYg5c7kHgGfigSgYsdX+uEbqvt5ohgQAAAFBBmg5J4Q8mUwI3//wC0LvLElkFCZ+0R4VL+qEuV2to+z9d8c68NZwBRUtJQhv7rbzrKpuzMyK/+cyHm1wEmdUHgCIDOwZNtkrWOlccD0IWKAAAAF5BmjJJ4Q8mUwI3//wIE8MhYsf9rP5Kq/l8NreVCsrDsaQlU0Sws4aOq1OMlKviW1Ryzdx7P7AeQ57sIFdLR691f8uoHHExNezH1SYct2J9kEnZgyh1ie3x8zmVrr3AAAAATEGeUEURPJ+A5qFYNR5FvP+sUOFlh6OdT+5AC/ALV9jcn7A759t2aCrwOgT123/pve/kWbUHHscn0wZSsua2ijypozcsfdeqQdpwo4EAAAAmAZ5vdER/gwyV1/sg9S+lWzZCVlXJ1bZAB2pl+X6+0J1jK93r+nYAAAAnAZ5xakR/f5zgScsRbP/KCTlPetUWjSxs9oMve1ulK8unxRGQuTh3AAAAcUGadUmoQWiZTAjf++A44rmWDsmtE6da6Z88b/lOIgyX4xpZm533n9QjU4LoQqKhjYGX/tuiDl25viW0fxsXEC1UQCYY3Rxdqst2ZmTB87g9gEeOvp/+r28Wjk/lHMrim4Fl5xsal8b5UTEFgA4fhZ1xAAAALEGek0URLN99VLWJA1xrwZZgYEEgHgZAufc/vsiPVWvEmgCIWFM+Kch/8zg4AAAANgGetGpEf4As1+KMO/6oPIpubBAR5YI/mVvTOM5vFZZyOcvnNGLH+tubJVUzxsAV/kUZ8urzgQAAAIlBmrlJqEFsmUwI3/vrPjiko5FfFdReOlnYOY+aIn0vCx/RI4Xxu3GUBJIy2hCFvR4nFjOuCPWyJ9oC+4fg21JT1abT5qVCmt7KlU2DoSWZVc/093qGhFeYaBYh1VP3nhSxd3bKcoYQ++seKOZ67SE5jHbJWSiZVndbBPLb1CknRwfGQPMcrNB8gQAAAFdBntdFFSyfgOahSerkFDO4h+HT03gu9drqkmwjsbFo15C1ddns1scB1c41Z607J8TKKQBbJ7onEPnjRqY16Vmf6zqYenNHNjGFMGswbhSFbJ9SUbjgWrYAAAA1AZ72dER/hKNnC/BciTqtZU2dJJA+iXwBG47V52G2lmuPgOczguYbeITd64lNMXnwX8HviTEAAABSAZ74akR/gQeU4JrA+ZeKVXUJDRTmUyM8cptsQXz8j1l2X0x6PNzWmC3LWN9LRSLx9v3DwnzUFYMnVL55McNf8uFZblO77WrPcvXMd+GCzMVi+AAAALpBmv1JqEFsmUwI3/vgPUVnq5reM5Ssz2GdGe////9UbdFeHz6++R6IoL51iZvWWBTEo+w1/QGffKUbsN48+o5YE+HeD/8Y5yb8QquB9cRfL+hkT6gX4h7jUqOGLkv3aekdBT0u6Oz7hhVkeKCmJJIp2Op94DIntn/GHvxlz+XmeSvT4E+MDxEl2LA/adI6b5NUJrAj+zyjLOTgs/yPrd+rgMhLzwia10NmoNTcVyZRErfLqU+jEWkhGUEAAAA9QZ8bRRUsn3sdSq3XJB2xN6AjUdfvrPxLfuDa0V+3Ef2dPSVqKYe65mOrLxPdKn/0htbtlAPjz7kRvwqK+QAAAEIBnzp0RH+KbhUp35fRKSMEG8fVZtySTaTW+ISyd3w0mPrfX5tqV1xZY/iIFRY4QnnDGyfwxM3+zO/RMboY0LQswmAAAAAyAZ88akR/hFMwr0hrrENdSs5i6TQXQ6kkP9h/VZrhl6OVIn+wZmWbKSNLJEYN6QzCxG4AAAB9QZsgSahBbJlMCN/74DjiuZY2PbTAR+Dbj531bEXlChD5lrSbsGOo82bYl0bd2pDpN3WLdD1xqLg+1WuvVraz9dukzhD2dWGkCfyEavvctphVPI8P9WmurH2tugHweQ6rl55gisLfyBenMiMJ2RMJR+lzc1/3Kri59cV/bv0AAABIQZ9eRRUs331NpBIZUVg/sF8yeBOR0xrwAbP3QN4HMGL/ULx/iGepxoAV0VvJUzUyZfsAcCY/TGky6vUdHs44yyXro/pZ+OyaAAAAMAGff2pEf3c5fYTCslq8zv/aW5r90SK19ZwBG+JkrBByKG0soG9BdH2HugcqFw9T8QAAAJJBm2RJqEFsmUwI3/vrP1EmHKvUWSjtSRZlSgrv/liQAkTTh2MMOMOBFt7k9bYXOcwf59p4EuKaEKx7Y7E4B/IgnuToImCd86g1ojTwETdli09SA1Bo1HNUGbOLU83EsefzyVWJSbRzGhntADwtCJI9oXid1IkLBW7ZtDjX8pcEztbON/vJFzL5VTnKXjD+8vULEgAAAGxBn4JFFSyfeKBLyHXG9IGRaGq39J+w00CRNYhQsfHGXw+pdnBYY3Gs6lwhAdgPpg9sKS2UPZuAS9WFZHm4tDlDX2fuE3rMgJtYMXJgf90BeLeZPecojFY3k7mIB86csvefkqVua5t3fNOkykEAAABMAZ+hdER/im4U4K6eWIXO4QmBIq/I5sOTQqiQ0+cMCYfZVcNBwzmnMTca8JpuFSs22SO0wP2GDUnITSfVdndW3vFSPjn1UhcPn2zggQAAADkBn6NqRH+E1CpDUNBG/lajRglb3oTeJln85BeqE22nEJfeJpIKOlpztR4pxR7gHZelJU6RMf+Cw0EAAAB0QZumSahBbJlMFExv++5XDhLnY69ArjxbLeYiTGAFVjwNqRrhTUG41oczZ5EyBH+7dDwYd6fxBYacxJZsYzZglHmsDhw0rqZxKveLxdRtcYBSZO+liDMbXCCTHa+6LbkPiqpeWPuCFg018OjxE3T9dFSvl00AAAArAZ/FakR/f5yCyQKtFixuQGdhfIvIW7/HBKE6HEqyYkrz7gzpEkFnbRTwQAAAAJdBm8pJ4QpSZTAjf/vEgWa+oqExUYVp1olVvNbP//+8M/kBsXizdkbJTTB/BTvKR6wFbAMmXZWZPYbZpxPe4+VXweLkpoPRtXn7vfj18z8A5fxL1tFG0YfcZNJvwmwKmP55Yy9/It5uUnKSxwWtgpa0Xl7FOVHv6yBORxpBbFCVCVvuDxli2pBcO5IupgTKpGUhLJbY1ESIAAAAWkGf6EU0TJ94mVbOfACgqZwUyCA6PmiiXKLu1snrfMONhCCh+U404cQOLSIYQFmfISdXlAdVcHufy5BepvQ5vcf8R6jFC3uBu3JcyYPQL0WMxnXzTVqebdZ5fQAAADkBngd0RH+DDmAYmD5zzrhUCgMcWoHHz6RsHLXEpnx1pjFJJfd4M1l3M6R12v+1lUKb2QeenUrWvLsAAAAtAZ4JakR/dz4yZYCJktXoPfrPf/ePA889a0VEm8L/I5Oa072z9TwIkrlHV1UnAAABQUGaDkmoQWiZTAjfx6suWx+PTqHcCWZGG6ifNKCv4tUI8MKauO6cTSadburO7W9zwXrTpt9gXX3b5tdNvqHh7J5Cvv3AhRisceNgb7YMFsR1N0Rhh6/nt+TZ5PAFkbKAp6iJMn//stQ+Yz7VXAyMKB41Sngwiu8Skg+yDGuw5z0H1q2PuUD1rmITqnNCuI9uO8eg8uswHpLKHevgR52SAYvS2/Zid6FbE0GnXdftWRsiroevQ44GTyV0fPUfROdAmGxCWF/QXCmGcCNFD14g5Bu8qQ6Z+50EprnL1v3bi6SzBTqLcyvNtZPOKr8PQlWxgIVRruPOgaVr1gzPI3yA4IoHPl+VKgdU7iUksHmCQG388/DVCqccTrz+TsolCl0IqE+rbPC6A7gaixgqv0m6YoS+bdEtfIf5PoP5qHH3emI7wQAAAGJBnixFESyfzWPAWZ3eFR2Uc7F1kcb+oikzxVIfIGvLglj++hQHAONn2Im0GJpEyeStvP2EmyHct8tdtegovJLgQJsfAYqJrPchHLtBboynly/y1VWmuyJjSjqD9pkggLaG+QAAAHQBnkt0RH/N0btmFIvxlCtkLVaZxkluWs7V94CnZHEt7HGqrMRIPPdfXob/JPjyP65+5jAvNmhHfPXIgTy/h8MzRAfvoqjTEvzPxiXoq2YJ56mYweyAUgN5cy5ot0uF6KC7kwMci3mHrUqnOmZx7LD+L0r0zQAAACkBnk1qRH+E1vUcO/PuHwIOngiM/6M9xXvQGSxikpxLdsa+ZBaQwprqGgAAAHZBmlJJqEFsmUwI3/QsHiz8vl29gYzDA4k5/ybedXu7tKu7Jmy9KPWg3HFWvQIrtmp1hFrco7VDr7pRktQQWV4z7wB7QtPmuaPl7AbJCErL+KhMJjtnkudgiRpiC2pKeIZGLJlUjeD33utWpNyRcvZCNrnmrvCAAAAALEGecEUVLJ98ZVVId0czXNe9uHzxp8h3Iok3r2Td9dl1B/uClTrEreOFzvFRAAAAGAGej3REf3+JdlJGgwv+mvTSxIdWSJzF/gAAACoBnpFqRH+BEcHjkFzBJtn43EJVcXoHHh7rOnULz+Jbw70vSl2jJMWY6YAAAADcQZqWSahBbJlMCN/HqYgKv/p9ybhCWa37OXOoId/HNKBU7lW0ZvUv8UFLcBimbwNzzimRNxyV6iYYfu9yXFwXxNgztFgbdhKv7Yaw6GOdd3/mgssr3YJxxbqYri+BQ0gsKvOU/2bHepT/BrNNhFXz42GZ1RU3Z0OvhNwjsYp+atyzw6K1RQrGQzJdsjyTkZfv1NrgMxK2Kyq4o9lOjDMLVkWn0P0yn7qosC4D7s4H2EcqLGoPHxwbUKQbXH10NPUXB5VgGo7Gr2hDQFFekB222S3MELKaTdqBDIJgwwAAADFBnrRFFSyflmG9bXlI8HDsblWlpyLDNyCFSjeTVQjsKx0UBaiRpefXBhGOTqqno+v0AAAAJgGe03REf3b+tk7jCqVAx466Oq+thoM+UPfpYn/yReLuvw263iAhAAAAQgGe1WpEf9wsc7fqSm94QxwqahE+mbpNVa4sm6pOpD7UrOlV5wtZ7l4RNFEHIy7UPZEdoW6mGZQa7151EYp0yV581QAAAEVBmtpJqEFsmUwI3/vrPjhLnNsGv1BKYkfPo4cRV2egVZSmfCgBIfMKkEiLVx6WQ/+NmwmC1TmNjrOX/bDa9yyO1iT8c5kAAABEQZ74RRUsn4DmoVg1HkdZAFs1PtxQgT4vR9+7PMyPdh8OvTRv0jh6+gWn1+BLNvVSZYdeehlg2ag49VZovj1blaVazskAAAAiAZ8XdER/gwyV1/tYO0MG9HP/fzsOk/m3SBmPS7dS9yDlmAAAABkBnxlqRH9/nILJAq0W6Uz8mfnp8RC6e96BAAAAkUGbG0moQWyZTAjf/XblCEl4icEcUzKWIv5Sdz8xTnNoz+oTMnKI1DUFY37mUJgzqPkO5fZTe0TbtfncuGXCPon6ZueMlRvrNNcOK0XcUnwUA6DgCWf7RjzKphjvKjqti5kt8Itb3RMRziXZc2powyXYrDFolCRsJlmjzet99mqjeEo9j3KbahWDJHugtfI+sDEAAABqQZs8SeEKUmUwI3/74DjiuZYOyToYj+uJfx33+jD4oAI40PfC774m4iIF8KayeVa0oDX8hLv0VogcQ+NVxVP5bwWpReIJOBcCQwcgCOGRpzCDSfOHlF2gui/AgGYMXyRtA7HKV3fvBHPk2AAAAGhBm15J4Q6JlMFNExv/++A44TsoRqu9LyjXtmglYXGeW5gtQLLkwsKg8q3kkLlUIQztAr/PynJc//JwqcgO8CVKpBFGV9xrTYudyf09uTycpUDGmoGFkEx7CA8vXK/aaRGjjNBEByt6qAAAACkBn31qRH93OX3cYVktXGX/s02n7XjMhSSUiDHyWYYBLWZFRZWZz3x48QAAAFJBm2JJ4Q8mUwI3//vgPUQ2HX+ATzBMT/Pnu9JYrDfMcgGJDPQUmDYQjMezPulMGzFlZkx2U3ZoJt844xJBEP55P2VwxxHi0Kagm2yw1aPaGpOAAAAAOkGfgEURPJ+ASbiMqvfMgbI0jPmxHlOwJfTnXYgmLO8WX2zfxIhIhMrFN5/kkUGPI8Ahlw6/NHrekA0AAABDAZ+/dER/im4U4K6eWIXO4QmA1FfI5rFYZ4x7XjV9ezMO/coIVv++VYWmiu0D0vx9YATno5hyzWlfZMIOxG+n4NgyUAAAACkBn6FqRH+E1Cri6kk8ZHaVcOy1cqQKjNJ21dDGpEKstb4T2+G8eEqVGwAAAFVBm6NJqEFomUwI3/vuVw4S52M4EcFcQqXCxPpHL56kgKVqUEL4iX1OpvXCrsV1IIbzGfkz93JglG5FvhdntnM54UGgAg1vWYaNNzIhAfhLt1NhDflBAAAANUGbxEnhClJlMCN/++AcgmTgWTEr8if5xlRqXymDzn6MNQ/cQ/xqLIQJZe8l+jwFGz7ZyzAhAAAAXkGb5UnhDomUwI3/+8GQRIyCAPHVGA6jY1LoBJIpmQFHa7VcmtTnD0dyhdFuminjvN7hC4NUT3epXOwmxGLF6aq4Ii2C54UdlPXegSNE8ZyYU/M8lkhKwaBQ3c4nA8kAAAB9QZoISeEPJlMCN//74Djg46XORVi0pon+LFQxBrPNpEf7Xo0uPcjk0LUC1j144x6PKcVHoBOoaxOjtlY0/9T0DQfS7OAr7AkgvvVG0TpjF3TYvKWzgWOhJhlDHw+N//6f8iZoaO4xHGToMil4OnpnInBqExbvXmyS2Vvee0sAAAAuQZ4mRRE8331Nr6PZDrA7iBiuQ71R3LRXqWPzXk0TAwWCC9fePi3Wd1bTnw+uPAAAAC0BnkdqRH93OX2EwrJauelrxcclUb4bVAQe5fp/eXH3gDiQzy+DEM9azx07mjMAAAB7QZpMSahBaJlMCN/76z44IUkFm51/N882W7qXe+E4ycMma6jvo926yxLryVknk2weTfZ2Kk6jWgpuPKF0slNo0l2zy51T6byfECXCpBr/SZLSr2Zvw5ZFKO5Xae+BbRDytVWPc5sse+N94UDFDnnW+6PAj/4nobZzzGDAAAAAUUGeakURLJ97IKXjDBk4ZD9O3fhiXA/r/7ECLfoxJW97egIUifkFUbHvwS6RWe47OV/aXVeX/KUQSlcRcn9TJuFfK0IGrgObTvFadJzhLN17/AAAAEkBnol0RH+KbhTSvs1uWWoAkvTwy/vyakxvNOg/BBWPV+NaHNLh+5xiy5Fr5LFPB+8S8aN/EjTnuye1l0X9H1OcBtFfbZhvpHufAAAAQgGei2pEf4TUKvaaa83Ad2VtKlPt7siVdyG4UlrNxlJdXv9DrWc6/IaeuwrDtCf++mzEfYyS38DY6xAB6l6lKKK8wQAAAHdBmo9JqEFsmUwI3/vrPjggZKIvbXzqFTYjQ0VKKQs4oElaRxg0TMLtnBuolgaQrMekht8e3yiYuzVvUWS4uxk8FlkEzYMkLXoLetWyWvtKs75yWsyyQ2ot/tDY1AEK9YY5RVC4GnKBf2CZWDbyfWUm6mixHfVzgQAAAEBBnq1FFSzfhHWkMDYBGZBTsgt1r8IRHNzCaJx/FGcgoS2gz8e56BFv96s1sEBQAIJQ/tBYOx4of4RDU8GM0X5gAAAAIgGezmpEf4RTMK9Ia7m5tNYSF2IHBXygcGjvFaWjSpcJnmAAAABXQZrQSahBbJlMCN/74DjghSQPww2RGtCahPmX81LNxJQE36SVa0/HkFpDiH/yKhx8Jf2oCjWFoB3VUBDgDk4+gZhe2X6s7RXS3cJEENdXgtyyuS/MquQRAAAASUGa8UnhClJlMCN/++AcgmH6NMQeU+cJ8XbSKWyOtUbGfsUeNz5bQi7J2bKGDZFcCIS/9wonu6aEEEC/Vv0zBzYbwCfuAFMkFd4AAABTQZsSSeEOiZTAjf/74ByCYiKau+rz9PkmS/yTzhpkUb60QzRKs8rt26pMRvSkNKiKHJNJ1FXPEAY8I2SSl8M7BoAPB1/QE5fHYPJfTYyyOFiXPTAAAABoQZszSeEPJlMCN//74DjghSM+tgpIYflbxOyTaowlDPlB6SR2Rz0K+CLVW7IJQBHpZDwt/twQggloO94udHLowQ2+xoNwlqG66Pq2mXsLqfv6rBArcwHxAtHTOAqNxvb+8RDt+s4i3eEAAABSQZtUSeEPJlMCN//77lcOEuDI5sHb9JiVElFoic4mCH248Kt3rU8OlDO8sAxjxeauU3i7fx+UKBIcj59moJiQP2LUkx/p/x+YG9GV+7aTbNWtwAAAAEZBm3VJ4Q8mUwI3//vgHIUZmOEMdb1abutjrmvCM7g6DL5L3B9CMFG4FEvH5aC7uJB89RKDxxsntlPQ94dBlp2GynTalQrRAAAAaEGbmEnhDyZTAjf/++Acgjp7C+NxYI7ubVhpgmYIDTXkpEYV3zaW3e+rWLbW+n5QGSD3eCIEPH/DXrfalOt+n6Y9n9BbjQWVz2TQZgWcsZU85NeledliQG4TudhZAEF61udo0AyWq5F5AAAANkGftkURPN+EdaQm4CV0M/rYl4NgK8z84UIqrAO/czkG8l0D2j8OwsMsAK/qF8Rm4aPdZ1aSCAAAACIBn9dqRH+ALScLlpa/+GYDTEx6TsOAEaCmCVz4kvAr3WkxAAAAfEGb2UmoQWiZTAjf+8bbiClhCfpGNHNpGB3o4p+l/b+6Z4oYQfJr38IF6u7qllXxlyI04Ql52TXK1ASkOluayplXmJPCRsnnsdY6EoVDYN1QrkAehRAHXvIw8/6QTQFSPQMoerdoDhvcYrQcyeBLl6zvPm/NUvjVAocrsKwAAAB5QZv6SeEKUmUwI3/74DjiuZY2LeS976KkuiWNhYqOhk4zW/uyrwXJteQNDFt8DdlcWf/pTc81xcC8O9/HfjSyQlZjUCjsB7Da07CzCZjkJnPR1uZh2dxHaUVulf9ID7ewjMxqhMmoJ2NS8tg5XpdDvP5Tbn3fhqRHNwAAAEpBmhtJ4Q6JlMCN//vgRKlMjfz3lEv3p9Vb9LQ7v+pEQxwC/9zazAZ5tgWuhGJQVkP+XR/kRzKBKBvW9U37G1HqvI0/phIKl6ot4QAAADtBmjxJ4Q8mUwI3//vgHI3KkJVcGVSj2/3lOtKOxCchm5X08HMnVw7u8pSwMbvYNglvmYNN2GyFVjbMcAAAAG1BmkBJ4Q8mUwI3//vgOOK5ljVPnge6cjNMAzkc8L1XcMXvFe0EH49/37e6kaHHAytVBME3/FfgqqJuUggarQZYNRM/+85bFVwuRkE/Fcn/dlzM/xeteEV2bGJ+FQYk/XavueTQrg1tVZiDMduAAAAAQkGefkURPJ+A6iXj9eVzRU5pOyeyOkOK6siJQvqqq3aZI916sjslp70mJB60/u4PD1WP91IjlLAujUAFKxopKSwbqQAAADYBnp10RH+KbhTgrp5Yhc7hCV8z4dV9+Z54gcLr1jfWaZfLnK9DZ+XvNFoFhKEO39XZNyhFmPgAAAAvAZ6fakR/hNQq3WE1NaMz7BQMa3r2kDm/kxI5W0s1zZYe/BmWXhyBy4UWxPwkoakAAACjQZqDSahBaJlMCN/7w/OhfFZHbxAlIZNdK81M9DmgvjcG9HFcEgcYE3SBH+UoGuOALJjtU1dC0GdzLYklNnQHymyVpLOjxAFGP6fgPn39YtnABx6Iuq2ORMv3owIyCbWEiR2ya42PDNeEHorS3YxGwxydzC/f/trN+g10WiIaC/CCYlyr0UAogxl0oEvestbuqfBIwybgP3u9WkPg01fJr/nOwAAAAC5BnqFFESzfgcsGNr4KTYjcPlel1/Bo6Hv1V6W/hG/R9ldaJrwij6IiqixTyhYrAAAAJgGewmpEf4RTMK9Ia6xPrVbhCu9V78UOxQddtZ1H+yVxt1YlCTitAAAAeUGaxkmoQWyZTAjf++A45y/4Q3AZy8yKUZ0ax+WW8rFph6+XgDZSKK64i8BQCeecvNGQcmtcwgPyfTLBG70A7JuS0YXsmM/BbvInVHP96P4jH4mwkYmXHax7vqt2xdUMX1/uhLmr9k5kGIQN4Okuar3BDoEYDE27DDkAAABQQZ7kRRUs331Nr6W+8fh7tCKREw4UiRr64auFmklKazVIROH/WyOZMwEYrHbLo8JiX46PwpVX+28ttm84fKTzy4PcjJuhLtZLcNIyPsq30DEAAAAwAZ8FakR/hFMwssa67x1qPHT0B/ovzqciH/v7oaLqbN40xsh1mUuAuQt+HqX4eIGgAAAAmEGbCkmoQWyZTAjf++s/UeJu9eBQwVda/BhoIvFxND3yJyVrLYLCHqElIVpP4XAimELY478ot0SaWrEjaeTwo6tQAgnQW9o85VeSuV++MiiVdty8QsF1uI0JRpePl3og3F+kKHovqBhl43XtBd/3SOTYNkTJ+qnMfrxWequhajieauKGpmiI8nGWaOCva4RwfQCy5sjxyeWAAAAAZUGfKEUVLJ+A6iXj98CjRWanezX04BDJPUQppE9ePwMqecBmkgRLAxx0WEB61woNndqUocE/nExx730z4eqHj4y9zx0y2MLBK82XzU4taZ4D8bTJZKYzQismuE2WWwmBvIKWSg7hAAAAQQGfR3REf4MMlUQLMpWe9szsSyBoDD6MEYNMsQqchdl7Zl8i/O4VYyLyiKSFtAPuL9KgbCvEV7oC6OzFcVnFHup8AAAANwGfSWpEf4TUKoZwMnlUAsRqwSiK4QsOjRuBEUWYg1Fh/OCd7nuMtjeiQ5Y0p0H3UH8K47B3zCIAAAC5QZtOSahBbJlMCN/8I+Su9ssFpVth4bdS9WpvaBw5IeSHpdzxkR2ATv4jfUsYMAFeDTkERmjPr9LCIOQ65j74gzHKGy/tLT/fSolZ/CgvRfVDG/FeQDx2Sn0fsAWhLm9uQCA9XMosomEHoAokfqWTj0xGM8pbd0O/hsjd+5tP6naL8dluhRdb+uAPr/SfD6Q7T7RzN8WnGQo8foNo+PncggQZoQZ+z4TTUAfSjZP5rs1umERs2Zq5dIEAAAA9QZ9sRRUsn3xlVUfvinbaGcgB7351ebN7ARewJep+yTczsF5e4c9MzR3BPzL1klLWjrwlgKX46g/ueGxNcQAAACoBn4t0RH9/iXZSRorN2wtOvHL3rMKNXKhCQl/5dOgxZ26rVQNQnwhs5oEAAABCAZ+NakR/gRHB0lFsfre7+Bp9uH2y3Z7UlKi36/HXi/VxAc5Xd7dL3iB/+RZjRlA4K42VY9VgxSya8NR183XysSDJAAAAaUGbkEmoQWyZTBRMb/vgRKwKj6jQ6kUl2X4UvnvSwPjz38P+ukOsefshONivg0qJBk4d954f/XJfgcHAk1n/pDYI/bf9crnrP4+4WYWy892CnxvwMZGMki+3NlF1QyLUNlnRuvpjtnNZqAAAACQBn69qRH93PjJlgIlKgY8ddHWUf1/1OqaMkBqSnNFs7onwD8EAAAB2QZu0SeEKUmUwI3/74D1FV3aEE8G1Z5xdLRpbXXTFKbRJQjbqRswcA7HOC1tolGRV5dSZGwQ06qx3Sx5PBVNDREabAfzyxszynQeyjRmijN5plDb8xwdIxI2kDBBZy4ZZsO17xE3oB1eSevH0ZRSYUMODXwfK3AAAAF5Bn9JFNEyfgOol4/fAqtD33iwUqaOGcDWLURHTyerUUMGLF9/dTKp1sM6fV6ymzQ6Mw4D1HfNW8wMEkjn4rsCZqQMIPu+olAZgJJZsnp7/524VaYHVA6xNJqbPPxWhAAAAQgGf8XREf4MOYBhh7xX1+mWbR3kBqO4P31bzBBgLVw3Z4XE+EiCqAVQWrnOF2DmSkXwx2FyUnBheGRDCuJQDXml0oQAAAC4Bn/NqRH+E1CsRJUo8Q5Zy14nOo2/Zu7iAW4sc/CZgv2ou0lLXrm+dUmUgFE56AAAAfEGb+EmoQWiZTAjf++5XDnddY2O+hkpJCpGqqN+ioRd5fh6l+P/4qd3eXmdhQXWVfefzZYfFULX7dQb8xKswxzxZH07JzrnEToy3x7fjpn5UQgftlM6VOlHdwbe4xz/rGQAsG3VCvdsv9qrf3RI0iuneCBDzjBxBQQU+fHEAAAA3QZ4WRREsn3xlVUh1l0zC3bh9rNn6CiPj9vvy2i9BnSdiA+sxg5yXAeOsEXDrU7U1jqGjoUWpWQAAAC0BnjV0RH+AbCuMI/rr1UXH8O02Fm98S4fObJMRsbWn8m9/CguTUAZ/90CM84AAAAA6AZ43akR/gRHB0sI2Gg5rXgcbmg8L8jqVYtAxxuK4okCllLmEQxPSy4J9M1DJFgcgzgIfqUc+3vAZIQAAAHlBmjxJqEFsmUwI3/vgPUUr3PZU49GT22lznRKAoTW3jOcu6hD4ioK89GfSr06PHj3gvvsDv6O0stGp0b2VSw0E8X+l5NjE09e9NsWOSzj1xWS/c2SZ/lV+QRWe9Jn6Ul6stNTb4pd3Udctlx7cQDgYj8PLbS3Ij15gAAAAPEGeWkUVLJ97HStiPWYa+Sau356/Rr91SVf6JCh4jlNlHdN0d2PuQNUBv5wh3HT7qTnVQ9xZjQCXFlPLrwAAADEBnnl0RH92+75YCJkhya66QMSa6cPWkN5ju+y8d0nfVs+H50BoCjgVtakB5xqMlZAxAAAASgGee2pEf4puFN29kxLsO36+UxP+fgCnqU/gYafq9/Xg91ZfFnrFqEyyhYJCxcIhXzmEbbtohOCddejoKdPZpuX3ZH5v3KZCY6QQAAAAkkGaYEmoQWyZTAjf++s+OK5ljY27Vg48pz5a5+KYWTkP7SvlMxka68KtKhbaJTrQL/WXyvxd5weYVY1sWu6B/pmqrj/2QsNv2zSTXylPXylMXIEg2zO3tnyu/tbvMWheX6G7CPIGp+b6nvZ76NvL2x3bbS72nPa/776VwQDIVEqplwdPzD4QLQXjlie/F4jvAz+4AAAAYEGenkUVLJ+ATVljVg17/TcUb4E+TZf2O+oJvmehu9/UXCOUucMo+X7ourEbvh9gcx2HsgqBvE+aCc3LmoUYFCKGFauVj9/DXDPNwzQtla4m8OGx+lrBEXLX2daTLsmB0QAAAE8Bnr10RH+DDJXX+1g7O/ZshIy/nY4r4hQrCWJCq/V6Esd2r/5asnJk5RR4uD0AP8YUqepXEN7bk03ujsxa0LKkPI7KOCxKQmLykHBjvGJgAAAALQGev2pEf3+cgskaLZ/6LMcK/qtVrn4NRgFIs05qa7vLpR4QkwyqPFyrTb7wcQAAAMFBmqRJqEFsmUwI3/vBnCJ2LJ1QWuVMjOxOqnfxxjGCcowTZ19I1NAFB4qvy5q6Ryyp59tnEv6iWYDfMwcl+posZ/vpl/L1PLmpbuboHvedMTA2YWPl2SGbI42h5SLocWEUqLCIWgdUiQWfje697D/3h1HLpDMRw7LLhOe089llXSaMRlYUcNK3n6wBz15SFB00Q6ihaj5OREXBbwYXwrPNfyFffYIF/ieB9Rb+wytAu0366VH851MnHmh3OAwWWubcAAAAXUGewkUVLJ94mVbEetQUbYugF4NXqOohbWdmnQgq5IikjMhBE9Q8RRsBeKAYT1t6fLH/uQHXlB1P0aEmEa9WNTZMw4CcPsqSexTdEwT9nlxz5VCl/l2owvjxAPVh+wAAADsBnuF0RH+DDmAYmD8VJbh4Nh0FynC++OPAhiHknCimadJ9Fe44UHpkWLeV30b4H+zBmAv2TIXxIi3YSQAAADABnuNqRH93OX3cYVktXGXvMuRi8Jr6Agx/PtZNe1ki1o1HHbdUiBBUvLJF5NG7lEEAAAB5QZroSahBbJlMCN/KCEGWN8UZ2OksJynlUtJe/T/Ze5KZIBmbExdmqiGR5669n82a+AtLkfKGHN3Rp9oU+HakLXqfrXevKtLZuZD5XvF8lGiaSAkwjZjoDDFe5iz5YPRcz+fZww/DpgTxvDkkfcVTbleWnl2qZXgSTQAAAEVBnwZFFSyfgOmmneDDuxdaVxDngEJwiO9Wub5u3Rp6pIgiyrZeIaoVDz56LILYAvwi5cN7K66uYsQoR7hKrNuY4y7NEoAAAABIAZ8ldER/gwyVRAsMRG9wSk8dVUzVjXcbzTmj7t7tRjQ5erWnxVd83Ef81CF3ZzwpPUsI8IPWyeBOnVhve3/z/SC41nZMKfRgAAAAPQGfJ2pEf4TUKt1hNTWjMmchYe5SAUxvqnkg1Qw/OPUc3DuBY6sbZu6KUUUigXOtNJ+lIbfnS6xPA+D1gYEAAACqQZssSahBbJlMCN/7w94eHXz2QmwfLeHxPiiF3IVqCWcYhPGBobbysZlGTAT8YM63rCVG0s/TvYgSnMzoYtwQNurg7iC/yUMmiP2Uprp7HMupZaNwHNYfBppfsDg25dFnATbutk3GeDH7PkeFPJAtlp/QQQHUqqrP5NTHvXTLswfs5DENGEt86b2DoFGS/7lW7J0rnfMCbX9Sb7lVrsBZBpHc8xP6+kRPmeAAAAA8QZ9KRRUsn3xlVUh3RIT7WdkVbVtgz/uvdxDmkzwqBzSZwQ6hkamoiDiVfFO1EuO/+qDNbLAf9gNMQ6eIAAAALwGfaXREf3+JdlJHCJoZnuoVnuSxVAFzEoggTy1eBea3xrPnNe7ErbeJ/f2BanpBAAAARAGfa2pEf4ERwdJRbH63u/xqE+Tl1J8P1I+BCos4BmXSZ8BBe4HTICM52Vgu9SxEvs5LsCSMoz5HkLWd8bnJC+JCYqHBAAAAakGbcEmoQWyZTAjf++A44rmVvKoKa41M35Ch3G13lWY0bzQloWTKokZJkHadhknWNHe6v4MeLVqdkMXc1nQjTtXrx1ehZRUoti8Ga75LuX20ESKWe4YPhUknmNLVPxY4QiKLz912RQJJScEAAABBQZ+ORRUsn3sdSu0vw0w5exICsmzddDcONsXZutRG82avccQdJ8Bo2mLUJmrRj1OGq0Hhe/tpl9GLDnt8r9hgyXAAAAAuAZ+tdER/dvu+WAiZLV5nf+zTESAGqSIRhNll372WSzssDIEYXudYpMGm62vGTgAAAEEBn69qRH+KbhTdyyn2mc7fr2krca3/fg3IzUQIeXmjyL0W7kM5SHpgQjyEjdPMwM32smfci5GMHzDsab6+RotVwQAAAPdBm7RJqEFsmUwI38EzmEnMThj01oz/8HmIHDZoyWvPgj0C4DoRsDjr2VEdnSM5F+6Es/HhGkAFmMMlxRO4cBtg+v8v225tcKWm8Y+JoMo6OE9FQeGP80ZRZKcXaFITb4modkoTFB1MOoASSKLuo3eyB/nUlTi7UcHk/YyDhoKrCsj9X4UhyECyG+WaaL4nUsfz6qAEXwJcfBhGzK5jzIx4VGoXgDJKsaf2nsV0cr72ztqjUEW7uJ8MhZ4H2jvG8l5wh+DH4SjA+Y4thclgZCtEvFf6uTRjHi/V31/zDeILkYiFDRYuc/uiN22xbZOJlfR4cWsUKVWAAAAATkGf0kUVLJ/NY7p5XjclhLFPOm5V70UHjDk0y1W8WLMxUEJtyFr8LEtGYvu5dSu368Egd9yNd6Rj70InGXN93csHtMEAObKXQji9PQYG4AAAAFgBn/F0RH/N0btmHtB4in5wHCfslGyNeY6FAHgkfpAI6sJ7Jpy/UlPtc/zQjmrUm7P2PZ2IpRrB2bO7p3QSB9jQTHvmkqdIl64ZFcKRLHTYUzBmd4ons68pAAAAIgGf82pEf3+cgskaLaDSUkmmWOh29hQ0iDCVqc+7lecaq3AAAADgQZv4SahBbJlMCN/J932y9VLbElOTIQnppSdx4sQM5Zt/kb6lOpe1/qFSIxuuTjdoDeO039Y5hjcJKGrutCBLHNOGFrDitKwHUBx+wCBz7FNfIB752PfbJllVBPkMGIlohflE1kmAWoEvdKQ5RVEIJBo0NM+7Xc4QOrGwlm1kYRsr+YzMe/a8WW7js/AQvf6/YKmK3nv/WmouN6JrifggGWUpt463j+VsZPXJT1MLmlu0ZIJheCkppBM5RK9Dn0pWoHhZqpp1LS6KmH+OIyOhplBJZEO/pM5ypaWA+VX5TGEAAABDQZ4WRRUsn81KWB6R7ATs7BLkl+8C/fhmkvrMuiNDekBNsZe2yrQ9LlYwQIG0r6PzAA8CdpaaTzI+B+gavhWMMYl1MQAAACwBnjV0RH+exIgT1LCdOIZSo931hcnbNVCgfvajMBfvkpSj6y79gzRE5BB2iAAAABwBnjdqRH93OX2EwrJDk110s66jvwel9iFQh+hHAAAAjUGaPEmoQWyZTAjfyghC9V+aCM9y0tNTu1YqY7isyMrEL1ISg0j2C7zj1UUqJZwpqqKi/AnzZgAJPEx+BbF2G96bWd4yUYwWlUiGqh7yxHAdI8eIWqxQwALIgmV6FsYG0q8g7JGYi7Gagx16ZFfTcfzINdt3vHY5lmi4F/KgeU4H+REU/9BL/jlA5mhrTgAAAF1BnlpFFSyfgOol4/XldkOz+vOrEWi1Q9ySLFxK10HVuM/uqIkihgxaTiEJ55tGiib34VbPx+F1i9KTeR0sV4TCay5VXoDlJbQJ2X/dXyUkz7tlo17BgibiD+MKM8EAAAA2AZ55dER/gwyVRAsFrTC0Y1O8nU6PfpwHrj+ZB/q+LVk4xtIzN+fxT4G1VCHKhzISBKl5tZm3AAAAKQGee2pEf4TUKw5pbspc1EHDtEesCCmJf5VnNe6KBZd4GQ+7A+bHfJOAAAAAgkGaYEmoQWyZTAjf+8Pt5W8cTDD7lNWFGUFASMmfmrkidqCky5SC9+cPiAP2S/BFQOm2nlMb+dDFgmvdAP61Ih4Y1f7BfhGuZMbC8UQVSneMohC5yiCzKNIygFEz4V6khVAoCjtwHr78chXAgv8fXLr95q5eD5Lu5Nzs/iIGtMQoXUAAAAA3QZ6eRRUsn3xlxLyHWJcFQyLMUspqSzsA73febdhh0qb7NHDVptBqRvp/0KYGMiwx7j7KbYTwgQAAABkBnr10RH9/iXZS+PSr/l8r00sSHVgOJeMGAAAALAGev2pEf4ERwdJQ0Crb1298v1xh8pKagceHwIXpnt+MpKlEEEQXCRaEef9JAAAAckGaoUmoQWyZTAjf++AchU6MHD1O3v5r6pfeZSR95Rrhxom7uPZHpFqjal5BKKKMTbN6mgvg8mD64Xft0kUDE6Cue5SSiFKoyWlVEwiX5hDItaXm0RmztEkwGguvWdr4BDwmXp/9l2g6tLg2xXeYbPfQmgAAAE5BmsJJ4QpSZTAjf/wp7xIUfno1XJ+iymWGbtpLZaw7PcYW6RhtN1hZk9b9PCNe5qkJxn7A1KYQZsy24bK3hBGA5yy6riTS/6Bggyp9yOsAAABlQZrkSeEOiZTBTRMb//vuVw4S51+pMKjT792mrZCg0a9enAYDaTD0+eYSezMml8HsNrr4W4Ypl8E3Ek6kEobjk/rXZ7COVAytpQu5ZP0ZvW5Pg7D5NsaUt6+L3onc6uiUnEZYYcMAAAA5AZ8DakR/im4U3dpdyjXpuEJWyVePMxrhvDme3b6G18sYPqqk/CL+3u1bRHyeuiRn1bp/7qnfDmkRAAAAXkGbCEnhDyZTAjf//CJ/zIEiRn1jC9hpPPLN5QuFZX1Los1jhijhPS8st288jfDlg8K2nAHAViYU4SieUCj1ME+It5LvfykfCPuvpb78LHOE+CJdbH/LeiWos4RGOk0AAAA8QZ8mRRE8n4DmoVg1Hkb/e4SXAQqfAAXFApCfAhG8pf0RtikgWpTEO9+lQ2C26jfa14IxlqzJJ16I5H6AAAAAKgGfRXREf4MMldf7IOPpaY2kRu7VY9Nr6GAEHU6Xd08+AnO38OezzPbYNgAAABoBn0dqRH9/nILJAU17xAvr1/4H4dNFOjsFwQAAAGFBm0lJqEFomUwI38qLkf22+XhQO6dWZKV+0UxaeuBPWkT9AVqu95T7qYZaV8ruFgtLIWIwIFx1O4IUmJVkQy1NeM9AOftya18H1uabejHiFxrqGQv/o7wQJwgNV5M+HntAAAAAe0GbaknhClJlMCN/++A44rmWNd9junV/0ZL8GPrsR5ejMuAeKb0C2X/4TBgkmpc5cbaAmhsRGzzPOE4Fd9ectAGJfFuKvehbLb2HVVq257BpM84fIVQhLM31NAPpq1x6jVFAQ8vyPmNq8qDOjXDoMYQIaRde1lJOu2UcnQAAAG1Bm45J4Q6JlMCN//vrPjhLnY6q+y84SEeloUKkmwZvTuvQy1vBx1cbwrmnV8fPOJEA/L74o4k+P0p/PqnHXGHiFX5kRdkSTKdwmiFccfDJFoOJFk9OTzWDgiBL+BN1Qp1Rsy9KQmgSswUoMNqBAAAAKUGfrEURPJ97HStiPWYzn6tT9FPiyYP9OyFj9PCcFZvsfLF4FBcBfJ5XAAAAHgGfy3REf3b7vlgIlKgE1B7NQWvwATKbaZAMXPLJFQAAAFQBn81qRH+E1CsMICfUaEKDBpIhdx6OnKAdCl2WDjL9pRb1pMM4S55WoAWOuHs6SIZMvTEbf6TD+l89/qm6bCPBnWGYDYrn8PXt76kldgiGMd/HEyAAAABxQZvSSahBaJlMCN/76z44IUjPWzut94XBkNuv+dieP5cTiu+U4ud4qkSdtDohXcxAqxjKAO9Sq7VU/iPClB4mMSKfAXByslZgH7DJTaF0HqcQoHsXQn4FOdmjHs65BRr5LHgnCj0HknjvGWtSlUvBUBwAAABBQZ/wRREsn3wG9Ho3WAS3Ni/Hx+OpCe/8+ukUSErgBDGT0W0PMnbTfONFyGyOrLRqgi9mASDomyObYuNuslVCT28AAAA0AZ4PdER/hRLKZLTcnylaqqMzlu66hI+mhxsbMLljcUK7FzrBO7JMAaWFrqACUm2hnz5DwAAAAC4BnhFqRH+BB5cMaSRFVafg6PSCA26I6z1H3nEcDifB9PFmvQH6EfZwAD8lDAP9AAAAfEGaFkmoQWyZTAjf++s+OBCWQr1tn75c/emk9LAHs1fTdTbpvJzb3LtU233UzJwGYEL7e8Ap8RSOlZKi1Ur6TnLiFRd10qK3EQMsUKDdzkCorRRoghT23l2ZqI/4juhRRTX/HniY+Qj/rMVC30KuoLxdRYifc+0btYjQlp8AAAA6QZ40RRUsn3iZR4RsZYniIDRxKFBpXUAJNxgE5p6+BezvDoyEfR/+vl3w9ZsZ9oIbtyEjlPrxFfc9iAAAADEBnlN0RH+DDmAasfgFq4gCUD1KaJGjFNBEJVYapagXlLZLSIzJIY2+GzlTGV7riDN1AAAAJgGeVWpEf3c5fYTCslq4y/9ptS3UG7hViDeebQUTeM3L6N0COirdAAAAYEGaV0moQWyZTAjf++A44EJZY2AK6h8ndtcC/MxKotOHWqm8LCc5nxMDTrcRe8XfRAD+sULXlYsOMbyenQJfp9B0zySWMkOE1fdeLQFXHwYudsCa1bFocAlUNzU2ll5X4AAAAFVBmnhJ4QpSZTAjf/vuV1RDrkiUKryvNy50VVcIKHyfc0rVEbcQaQOHTvicnp4trDKdvq4wTZVbX2wUqr/yMRShkhjqnvVzqia/DOdlZb2Rb0wer4BhAAAAW0GamUnhDomUwI3/++AcgS6wSqZJjrGWxPv45j0u5gayBIOWxzopmY84hnH7gzzrzeXgUYnG7EGJiE34rnk/Z0xY2sxq0mDVmu0uyDqZKWyTXKp6UClgGZ0ewkwAAAVfZYiEACf/vgy3mIDfV2I+c4OQeUCbjkIqauQ7q1hI51t1KK0Ty/hpqe1v3iAKauIQOhjuzFxyMgx7FbnM/2jBWLSkwzPM9uWS0H4jWSakogzfe9txEs4c+/QsLeTPvLzlrrhoRvk76vX6F462KFV4lQ8BLFX1JpZOU12xKsmCPjGaaOJgyXQiOhYn2hWzgcqRNjGX+V1keWAHLdIU5Kt6ThBKFN9iXo020ZIGb/b5NjIEtf5xxojLXamTjllPOcFI0jqWkWbZoIKHhmS8TjKSHCuTIx9dl0FTzgEkfjAOKV+MBkOb5p4/AbH1ylTOdgDP7a7Qz9MRQalIH6M4QHnoVD53mLhP9Mm+R7+Nv1EH6Th++wBzxhKbERneXaOozvtmrCru0n5aeRSyfurbH+PPpMF9pcN0wV6BXo60O4fisdnnNQeaTOQSwEQoA9l9G4isMAZz0MMRGPJfTWi9P7ynze7cFpAtxFXNele+QfVNTyKOBwDD7HFYOx0ouGd90huFZ2/V0BetUuzhUO0amnSSpDeMcYfW1ZAlHhEdZedtfna24MPXCAzeTxN0822JnETBRQEqo5eqoRZEG9h5BpWdwsZ15MpoPzXeKTqs8tVQC1/SK0d/0/xgwE6pNhIFwxgVY7DWsiX9Xv6tLaOudQGEMJp81OTR0L3+PIQF+o5vb/GCWDAkSsOBZs62OY1NlmA//1Wvizc5pquC5tnGGabusyA/nmG7PBFvvzvk1g6c9FZTfMWn30C0nuVgD/+SL3hOUxNRRIZsd15UgRRZCQNTNuPsNF70mEMar3F5ZGy89n5mtWtCMnx6Kxmzor+SNb9cdhFvoJ0pJL6GWtPAX3YGiIajfBHoC5Yk+AsjS+erTnaEBKxjzK9GpM93N1bO7ckHjy4hYhPKAFrXZD2uS3VtXe6oTBsitQMDgoUo2V1oa/61b7CoZXWhVK4UEX6j5mQQQ37LogaqDE0eR/hnOHk0qp1/ZLliBrAn0q0l0jYC3N9Q8Ae2vkxGonmaOmrZW/AToLHGMb0emiADcOSePLr7YHGJDiVzPIBiXNYDOLMcaDwCYGfbEsBrD2uy/d1Fv59oWvDv054FejnrCwPJNhHJardC3ukYi14OF61tBTEX4on2ntlVKj6TiEl2AY60ALC2Pewdc9kxhuM53QE/DPE04ecu5tBcDncARadW/1to7/E13lltojS2w5x9whdbhPouWVm71+s5063T2CXVUoDOAGHtwV5PBLlUV5s+9rM1C1nm6Ly8oNNr5tCZZ+xgH/cb0QtGLo2/+urcmwovBsXN81PlivdxeZpmL/iI63XLhkruWXb50jY1FpX53VswqMFVEWIGAsoxcV1hcpt5SjjgtAJB9brynD19/u2V/j6WCCVV1YJc0BsMrlVTR84gzjOn9fl/YKUcx8xd5zXtGYTTjr+P/TYuSs7g+gIPIJqW3t2VM/TsVgWKT+LKDZouSIl2nMmmD4XKzO9dTJuIBhm0f3xWeCHxstiNy8dyjXq6MgdCHeN+Zcag5SxJrW/q8LCIwYDIjdfeTEv9XV8VvX1oPi87I9rfBzE/XZtG1OijeZ7AcvsTu5Q/+VzP0izbIAexsR6SN7ywadj+XkAq9hjRaN/3QPlLeJxa7wBlxl3d1FQ8pFq96dcjnDir5ouXNKkIk/zCOM+q+YORr/PMY//AhAiSwED9KgwJz8yR+k3Go6EUpMgLgQtZ73cqHm51vCETs12mqn98YrWLHREslKlJiLjFIFQpWKlBdfkllUxz2P51TqpY4sD96jVYhYQ02ZSpJu6Tqh26CeFwNmHP1DrxMk5MJAyK6KFlKGZZi96tTwAAAE5BmiFsRv/77XFgf/gTkah1R5Us0Wnk8dDFNJtGuWmlTOExBPxdbP2u422B2Lto9nlgbofDURRsHjnidXKX9OQPQ2PYSuh0BmyDHyTamf8AAAAzQZpCPCGTKYRv++AcgTjLzdvWTXYs/nqVO/kWocgnSkHh3DwMwZdqf8C8gKNCxrEXojVQAAAAkUGaY0nhDyZTAjf/++uEdiIsFrkFNTDNvjCEHv2x8z+IOmFBPu6GwLYUtDIjwQ9PmO+bcrpj4clo9KQ/KSk3neQOZHILev1Y0jHKDWKlbBJSNC7Jskuppn7Pw9tMIqVOLcYm9Im1C+17BGV5dlf9xePKMg8Jr3Ff+/cKGd6zElUtjcoq/1384Z27KW1jNv2lQ+cAAABdQZqESeEPJlMCN//74DjhLnY7IVYsSZ54teV4M9pVBpl+jrm2vgkpxJMVoCRgAjdr9o/eR/Ca4bI5DFioJpFA7jz3z3p6Z8EP6+oWPcI57n/7rtuyon5e2HiX3e/BAAAAPkGapUnhDyZTAjf/++A44TvpQ6q1oyiGyCVbHK/f15aT3JMWQoDuJk0FKSLab/1polBzKBKCBe4A/5QO3LPgAAAAM0GaxknhDyZTAjf/++AchU+Vtkc2kb2jJl/oQwyJciND8Tq4ecQix/2xNi8PSXJvoKQxIwAAAFVBmudJ4Q8mUwI3//vrPjhLnY6qAnz8Ja7nh5BGRfd02joZMmnvwGPMnY1UtK+pb8bqcPrtmFQYzPi2qj09uQT0JTvmvYDd63U5Y2jF0v2YgFFNEVesAAAAVkGbCEnhDyZTAjf/++5XDiuJrqTwQnNXojWSFTharkNbc7gdyfMlGNhg3mVGFS0alrqG5sZCfIgbqaclT6Gfe0OipRwkPfdn4m7Thq/CqsNd+JyxTOnxAAAAcEGbLEnhDyZTAjf/++5XDhLnNsgTYfDsmrw/ZxClJj+Si9cQ6o5fvdg0roRb7xCts3+qftaz09ytTf/Fu2QxxQHWzmVyf92Io8RDVoRP2XUDjiYmvZjXggNq8HwiPdQMdvmL1lhb5fPZGj6hWNxUXuEAAAA+QZ9KRRE8n4DmoVg1HkW8/6wpqyGi45GD24TJ/za6DAQ4eU9CDfguO97+RZtQcexyfOmagRCfqhL9X81Swk8AAAAkAZ9pdER/iffqq8P35xMd1IYhD9d9sBpVn7iTWzwZs4q/5ESbAAAAIQGfa2pEf3+cgskSU0kAS0c+imyjJea5l8GNLVRLfBL7wAAAAI1Bm29JqEFomUwI3/vEhS51HjidTPHSoGoQXFVXRhNlrpyj41Xz7oGDI06DVKDH46CPr5apO2EoNwcdCUxtW3hgqmJ5/H1C2ANjl0mdlryKc9SwOs08mAQdgVi9wAa3am2ilj656liiY2Pr5+C/f/vDURY0hdLqqHzbufbgWXm0W7U5ed3s5e0CGqT0N4sAAAAsQZ+NRREs331UtYjmOPqgDvwpA+dkDRfmu4CZBFY77Ij1VrxJoJU6bh56MYEAAAAzAZ+uakR/gCQt04OPppwlRhivsWAxBpyC7wzzomqfCVfnmfzfbSoDKVZgsjBm+Kt2f+3YAAAAjUGbs0moQWyZTAjfyghBljfMMuyWTw27Mmxw1DJv9ycLWX+QHLgBj6IZtikvzi8lkYN1RnpMqOZIVf/nduC4IEoAVSEkctakaQiAU2DoUeITXzdRvYi6cLHFhXwRHpVFF9boSkQf0W8GSZnPKqex9y18cqZ0/3qjnXDpyObZscNjiTUYfl8+sVcWkDfb4AAAAFlBn9FFFSyfgOahSerkFDSgMmtywjBbMGKUIhmNiwJ7hm3fI/sR/In45K/o/zgHqjSeTGmpbhIijT92cAprRPD+g2EjPnGa2bkpYhmAbzhjtjjTRAhkkbC2YQAAADgBn/B0RH+Eo2cLrfP7Es5hEIvxy3++81JIcbjtXnYbae6sJ93XFc4LmG3iE5toWPTIgZ3Rf4u8wQAAAFEBn/JqRH+J9+qqfNhEqND9NFo1QljVM1SBAde2cQSHX/EgRDnk8wp3LTUqgB6d+8T/ceO3zoPFZUv5ty3ZB/Jay3Kd32tWe5euY78MFmYrF8EAAACzQZv3SahBbJlMCN/KCEL9LNaI8OpfBjrh+prQ5Hx5F8zmd++rK6rSSpIp/Bmp0IxjryV0UhIqqY7ythB00KcAsr8fcfrBfpnJuvV+Xgc3l0ufktx7e7w3rU7jqfUydXix+/gToV3m7PqBlYaP7wIonbR+CPd7LoCHcFtKXmQsnVlI4yUAum5TYseBCN+1u3fAF9yw2dGEPlv/60B1TfKQhsyYxA36fR0Gme6stPBqWq3qMsAAAABCQZ4VRRUsn31uHUcUqyrx5k84MB/5PPciwmpzACm2Qtdwi7HXVZEde7qqTsctIRWclAzfvXhjFp+5nog7nC999IrgAAAARAGeNHREf4ByGO+by1cZ8ELMy2ttDhtzqmeTvmfBobP77YCuVEO65NLXFqkgoV2VMcV/Dme8id5gHSZgEfW0Usi+767BAAAAMgGeNmpEf4SH6CyS5Kx3XhSoUhAXrSLodSSH+wraAIhCFkqWqwzMs1mFxDnkzNVjPfe3AAAAdkGaOkmoQWyZTAjf+8SGbo4p/8Iamyi86brFs7iSG9hkRmxUtyMqDX87BjiDhTPFbYOFwbUDOtNIYN5sBPm9LHJf40gfxrHO5b+yYb+iAzQ+D0jqzD0wUE/GPk/is1vbvFMqnhMOuYRpHxFQPFZ7rMcJvsV/ceQAAABJQZ5YRRUs331NpBIZUVg/vHpzTqi1adLx96xSVQdbyZNxZ1wEI8f41eXgBXXen0pb3PYiv7Ia00pmzCGAnR7OdtwQ61Xzb3asQQAAADIBnnlqRH93OX3cYVkh8eOukCbZ+6JFa+s4Ajf1rEP6NxJzT4vwzyoFKEMzdAXFIlOjEAAAAJhBmn5JqEFsmUwI3/vrPjitsxqnuuZB/Q3LK9EB8XZHkxOb9shEL9bJjdZ+AyCA1XlQlALxJUJQknuX92nj+nF0tYg9+8IRFjYGwbn3xaqO5g3aC8XnhoUJD7JtUL/BWe7piCPNYHDhlaejMHPjLpBeweLNivYXRppq6jwaS+E6aqBmMXuESQatm6wvSHErLtCeVTRchSPIcQAAAGlBnpxFFSyfeKBLx+yIGhVJaNqiSbf3n/EhW+vNSV7ifu7le6rz/EoChwQp7PkLC2To1qCPmWejORWShLyp///Y0qa2M3jqj7c+mZFtb+jzH2ON2C5/c8hSTBB7jEfQYULfNJnrimd91mEAAABPAZ67dER/im4U4K1F5C5u5/eTKivk5Nnz0mjnt5+KqDGfDi3wS+Mc05ibi9UxpuE1o1aqd7CTGoYA5lcK6Hr2V9Y1yJwB7+JdsC2pp0re2AAAADcBnr1qRH+E1CpSs1MPhRiIa2mtfgUCd6KO7sJt69FF+hJ5oUlgGPC0EPB1ogSuuBERwapo5fuTAAAAhUGaoEmoQWyZTBRMb/vuVw4emP9PN/4ewbFLUGiXiZoXEL5lOAJuqN63bq6Ya73SLuB6bBVPnpl/y39gFl9DALFqt3PLXqzNMiQ8uGXt/+TM86YgjzWBw4ZWnozBz4y6QXsHg/Q00M24YeHSO++uLeHaT1KOziWaUuzPIKggxZQgUTyNyYAAAAA3AZ7fakR/f5yCyRJTQtCEVMtRioHnim37d2/B1xh3Ql9UxqfT9HDmdIOoZcKX+BDfTVGuDUOJ2QAAAINBmsRJ4QpSZTAjf/vrPjitsxnTnKYcmEudJHuBU34rmoZfuXG6Cx5BfKY65GVp4NsLJ81tu0fHLYXCqgIs3Opp1W8uDzLiioBJ/PdQ+EnY54EcqZA6tP3J34zBz0T0ggJpXUzfiV7xPL0uRn40C/UnuyAQslAT5p+tnCRVSaGrqcLiOQAAAE1BnuJFNEyfeJmV5SO/7e3+IXPu9DkhOBWthtp0R6laPa9QVTtdYLuaAVCQfvIfjc6J1xsnVDkt6SMF8jhI717hP9wd4YjWpEepJyiGOAAAADkBnwF0RH+DDmAYl2uPzEbmrBU7+iRoxY56o643yA9iq0lyU4HIBaatgNSaUrc7+QCg2zU0nrhV7MAAAAA3AZ8DakR/dz4yZYCJktXmd/7L+xSWqxVX0hhWCgTdFXq4mp3c/1a9hxSgDlfk8LNj5Vw2oTK18QAAAU9BmwhJqEFomUwI38erj+EOVPHy32Fbp2Yz5YGdj+kHyBt317o+i0bT5HgD8WT9xKPFw0LmVzgeNi1FS2ii0XX5T3ObkUYS35DbQo/q8fsZOm3CpwwU/Yr0/kpevNm1zY3V4E6rdm9gjhcJayG8aqoO4g4WVxhx/xMNf2JeC2NfVjzZe2vcxSFQ0+nBoYdXCz2HGhMEqn1Vj7giKBRPbFIQfXzgNot7kOQRpn9o7GlvIh2PHH8O5b9WkObUKMB0E31SJnRODa7/x1gMeVNp9x5QG1UQTgxsISHm0WiGHaHM9FndRzybD9Ti4Axz30/xzLbVD2b0F3de51FeapbwWFhm9ug8jx3oU8tVKEI+qRwwZklPZNSM5eB1TunBJCGBjo47s/DWaVQJ4etgtBC0e4W/geamug1fhr/BERy0ommj6LhH/UwJu76416mGlYt3gAAAAGRBnyZFESyfzUpvZmoxPUsjVw6GJkSBGLuJUn+Aaj9PjxAHugzMhsCOH1qsNMVFzZkiMTpbdmVc6H9d/LXbXoSjfpgIFnVOI0u4Z1YosRun2lvX2y3QNp1C04NrWmN2xpZYm7uBAAAAeAGfRXREf83Ru2Ye0HiKfnAIZilzHLjlqsw+Mz1QC7FumLzaTF+pKfa6AzSFSUgrNZ85pfDhXgDDUnT4YfcFFRuWdSKxg2T3+RWpUKLLxFjqhPOFsq9XQnstbGEYxiunguGhZ3eFb8BcZaO/8shwvJPEZqscqq/swAAAACcBn0dqRH+KbhUp33TVx2AKpAjXBFaDxlFOYkOj2jzXxGLdVqUiJCEAAAB+QZtMSahBbJlMCN/7w1h3aeLJhoTP+9zKomPaDpIjqpuU+pG82MhTY8pgjC1+/SH9odkJKpRkMPchDcbwgE0g/0Pt03UBu2cxVOCZ7XVviNWlJE6vWTCbySBPKWxVm7F5Zgym+lfh5YEmZKBo/8QqBfrF6LcDLo0R9Hd/6IWhAAAALkGfakUVLJ98ZVVIdYm9ubdI+6CZ/d+Ekcs8ct6CLr4YDU3lQFcmRYk+y35Vg6EAAAAYAZ+JdER/f4l2UkB1Zty9NLEh1ZInMX+BAAAALgGfi2pEf4ERwdI/XqLu8/7QUPdBBLv6noXG1Jbi0GH8S3h3poGiBbJjvmVilJAAAADUQZuQSahBbJlMCN/HqXjaYdAgn7QtItVYNomimA2qDY9ZCdac+T/CWtL/WPI+ea3ZhxpIBZzHmoE7ED6cmscuOpC4asmWP/ZY4D3ZofkgXRT0JIHQ5C+jN/NawCAZBlYJYeWSrnedRhv1Aq+Ezb8GwGkCKqT77Ol/D+JE8MiWDBfvfPqV5xJ6yu3+ZMsfZri2LtSEJZnT2C1JziQTBkpSnKjEt1PGYe1Ql8Dbmxq+Ajqsnbj/1Q9G0VHb1uODbJw883wCc+9y+57YbL48lccwqZcjBHAAAABhQZ+uRRUsn9O5+lrykd1W9x10yLwnbEdC7nPGYZCdulN4uKAuREGBPh4WH7u461q3qEJSnberdlJt1MwVHaJFwOjSN0QQTPgMWg4BaWK592DS+2DuO5Vpp9sLbJu4zonEwQAAAB4Bn810RH92+74LkTJauMv/KEBnzMgyQCjXgyIf5rEAAAA3AZ/PakR/19HKwpYZLtl81TUXlOY+KofzOQxBT8cp9helRzplXim5RNQlUaFCsHjzgwNANcnGkAAAAHBBm9RJqEFsmUwI3/vrPjghSM+3zdXqrzb6QyDQBh8wNH96zteG2dJP41q9qpbbpx6VfLx6pZ+hD0bxu4vInts9gfPMBlJkiYledG68AwhZomhLOtFb6Ji8an7vaJO4LtZXK+JkUFfz95QFkyWqJ2vBAAAASkGf8kUVLJ+ASbe6rPuX5kKJVKyfzTytHiClBBvNDUS9gyZ0gJuIACm6J4/MaPYlZAhSExx1G+JA3YMjZrZGz+SE8khQ7v8Q6ZsdAAAAJAGeEXREf4MMldf7IONNzwuYXTWVXiXI1jjTphyXCSJiDcl/JQAAAB4BnhNqRH9/nILJEpexGmpP0YxoFXxtFiqwczbTViAAAACUQZoVSahBbJlMCN/7xIFmvqKhxfQ7eu13hwX7Kl/0+GfUgZl6JYRtDtsk5rFajLUh7O9Y3Q/862jv9ZqodZkpyU2BPt5pwB+6u6zvaK5mzw79FkINXTazxAwa+hbdsXMsxsTU8kbKcxOzDPlpJOZmg+4BqbKzw0yHVYCbPCfiUsYQ1v/GbNQjREVqvxvErLVTetYAIAAAAHdBmjZJ4QpSZTAjf/vgOOJ6G+EORJB/i201ADDOh8gdy7TnqGzfuGC7fVt4IG74ck4fwuW5dPJSAv+ZSwOLk765EsOlxje0d6xKBDadZI86kJlJRifx+c16vpsob8hqrISDUct32W37mGuw8GN/sXV3vuo03HQdfQAAAEJBmlhJ4Q6JlMFNExv/++BEqKNR9RoJeFo2wJjCnq8GbSskuCaZk5J9oPynQ0n9SgOlT6cKPNsF1jNv7rf+cZkgbygAAAAlAZ53akR/dzl9hMKyWrjL+phdX+F0v8Q0kOKCEPls9CPUogjSggAAAF9BmnxJ4Q8mUwI3//vrPjhLnY7GuE3FgwVQBmmbcyRjHJitYGt3XJ7ttu2Rs1JgNU2hsMGCqQuf9g78vISwHq8b2HhwRAZXrups5jQ3v4TD8YIj3H3abrar6pujeVeAuQAAAFVBnppFETyffAdbc2ZCKuLu6g8BJqH4YE5YGtjEE6lGSZYtJ5hHsbl5fBMQmN9SaUIa5J0zft9yHmPwiRAJF0kKNTHMtbuNxCSscMTsy8L22gcicp5GAAAAPQGeuXREf4puFOCvsKrmMxdXS9O7u9OTUmOHpv3PBWQVCIk/VLpaFCq8y10BD+dshRrJ+UgPns8tyn3sUsEAAAArAZ67akR/inDB6JvALVyfkNmDHBAGn8T8vQbjDxek2OXAhfP9YxVeZBClgQAAAExBmr1JqEFomUwI3/vuVw4S52KAs2D87HYORWxCgFxVZ0fUYtB1vGD8zSig7GaR6PYdlvzo5m23PCr+yMcmEnzswwPvpxVY8irlJGnwAAAAQ0Ga3knhClJlMCN/++AchUq0qS3q2sujrVLxU7zB5z9GGi4Vr204wSsd+TAv/ZDLXIheOE/uGZxoJRecWc8xTdfXR0EAAABsQZr/SeEOiZTAjf/7w767baOIWn1VtSMPFg3+VI2un7hfAiMrNCPxiyraA7+7P/b4mHhjf9frw5Yf09s97+lHoLr+J40YfcfgrY6q0fAZ7vuspNsTPyaoTWu8OCda8dlFOKXttYcmZn0MYXuAAAAAdkGbAknhDyZTAjf/yghC9V+KM7GjZ3ifEeQQyURdNhHoTXnSTZGK6/FJfpI6X0y4F2LWLeuzT+DVqTb9zTTxzOILEvOnJySq5VGcY932fKbq7VpGGOVsYCTlw9TiZnaKVEnbuEjCDdLoHCPxhxVcfUPLCUueKwsAAAArQZ8gRRE8331Nr6PZBasOYw44aaDFj80ekC22ktCh57XyldfE7Ft3UzEJXwAAAC0Bn0FqRH93OX3cYVSoGPHXSBix14TW1QEOn7Ij37SbGA7W4x774czRQNnrfMQAAABuQZtGSahBaJlMCN/76z9RJhOzd1KAOnRwvW2BRZ7n6ANrAh48/h2t3g+1nVLUJRKqkD0Z9HeHn1likKz+6VR2gWMleErAnhTwmDW0YHrepMQTwNh+0/Fx5VBvqdRhjKSWumYEJreA7iMFKrvjlcAAAABbQZ9kRREsn3iZVH1hPWCBP0IcLolfj3IK062Db1vtBmUVEFxqhE2rclqJUf/kvOICwRgf+8cuGE3RT96eNk/u+JabuLcL9MSQv341jaGaaPUYvsPjkAuCYD6NMQAAAEUBn4N0RH+KbhTdyyowLli1rdXDi2OJpb2YMOsbPWx2VrALoxL353Cur+1DqAI4VUlPg/2b8RvB3K+6VuFp2qc1xr0ccmYAAABDAZ+FakR/hNfALxWamHwtC2+RNj8gRL9mIXf4wiYmsFGPVFdbv/eg0xWIw8ntIxBwu+sEvpuIcmRgof8iOS3heddgLwAAAJVBm4lJqEFsmUwI3/vuV1RItArTKe4aHtkbhVf/+Q+mhmXrHW48v4hipTzH46Ij37Wg9Db9jYlSjcF5jNQ5v1aWn65DMLo5D8XKRv2qTPmOytr/R4nJgxMjqpDiuGEXP91sGSFXXvDL5KhIcPu1Sv5rfiRKmk75SupVGhPOKnffZOhfkY7nU8WILOh3WPGj/TL7m/sJmQAAADJBn6dFFSzfgR7dVmJmGX8GHkqNZi3rEUsY+LD/L+bIktrWSlLH0rvcGVpVDuPV/0CpgQAAABoBn8hqRH+EUzCvSGusQvuCjCEcHYR+2+CswQAAAF5Bm8pJqEFsmUwI38oIQvVfijOY7Jqv/ZothAXNYBl7PM7Lu8cHbpXbUuZ1KACFF9u9XqIPlaA9xYZISQn7YtoCcDfTxu4CUZabiiBbVAJ69uAqIzwhmC7k2sQfHOcxAAAATUGb60nhClJlMCN/++AchU+aExJFTL9PPvLMNdHTufZ2ARRjtizDUCD7M3x68CS3Qr9dPeG8VhNRm6WRPsaNFzt/4aU270d35/LBN6vBAAAAU0GaDEnhDomUwI3/++AcgmDYEE7VgijEQg9aSxHaV7+OwetiVVzv9x0Bh9mAJqRJH8Dife3qBjyBpZMx0mPHKfKwSXf1DK2eIEOyj51/Ibe43gjwAAAAcEGaLUnhDyZTAjf/++s+OEudjsH36ASmbVNFHT+He3+dsKUY4kF1Fcb94L//2T/GGBOr9knUA4Fv6E+UdIJFqatHfWfL8/R/5FZzFcRTOFGht9DgyahVNF6gH94cGhO4EjBjUyOq5QvusqmUVB+YjvAAAABZQZpOSeEPJlMCN//77ldUSEVhRtuTqSuwn4ILAX/pGTDP9IL9t9LXL4gNSLMxC/6/2jK9Kp6HiBHIDwudiAEToisNqkYpE0nFGGBP2Mwq4VRrWiM0ZUYi+BcAAABRQZpvSeEPJlMCN//74ByE6AJUf3GJyJEuF8A3Qf1kQcJRR6jInzvSF1cy5rawxyX0nBOolFDiu817lzRYe7/SVVdiebEP++yydeb7tdJ3zxI4AAAAT0GakknhDyZTAjf/++BEqEiYmRvIgNxJIRznuGsny2nrjSbaGRQNjLW/f2j0oaCQe5/+54Ig6O1mLlpOJIun3/37i+STl9TMuA2Pp+8ndHAAAAA6QZ6wRRE834R1pCbTRWlD+k4+9DOjI2AHXza1raT35vBLoKirdVqNYgQnbX4DTT+ozWwyrExyRLfzhQAAABwBntFqRH9/nILJAq0W6TichzvXKAreK4xRz2yBAAAAXUGa00moQWiZTAjf/AOiKyEjuJtL+UDu/7/1ImEqkms4m590rIJHMH/lA7MFvspdRcAY8czYbgMQ6vpXSSw24CC9TXLj4CpfdwB8SVMwSgaDmTxDdOzD05PcebFnHwAAAHxBmvRJ4QpSZTAjf/vgOOK5ljYRuKx062IYgGdxv+oJsIKJXe5wzU05EYgy6CDVnrvgrKBM4IITLfdtFi9dSb58ZA2BYSlRsjpR6nFhFesKS1PpyqD9tlHbqy54P0NR0gcxVnq+CkYWBsgu1ogKQ0vpvuycY3R6AjqPjNvYAAAAR0GbFUnhDomUwI3/++BEqWWdyZKFzbFLbFx3xidN7yP1nsbxqbqZf3+mAe1LLS7fO/2lqY5lAlA3reqb9jagV1TR1CzRq2ngAAAAMEGbNknhDyZTAjf/++Aci1WcPt8e1pWqe9fATAcvlum6kTqhjA5k6uHd3lKWBjdgiQAAAGNBm1pJ4Q8mUwI3//vgOOEudjpLQF0T5/HpcMVhuo3UrgFNwvqy0NaDRsYBORhen300dcdaxgQAhyHLkqqY21QvhsGVyf926QHjO1dTQI9IklTM6dFWv11wpl628oyD4WUbTWgAAAA+QZ94RRE8n3igS8fsiwsG3BGH71JSuKL8DGQXeoxKKRzUOLM36KStM+eXk1e/WYIgappa2TFwUOXYGRkunvAAAAA0AZ+XdER/im4U4Klw8nZaidOEir35twZNCptJt12OpowRIVZ2i141MkhqsNTXZF3ZZSs7KwAAACwBn5lqRH+E1CrjX482235Jp4YGEincKNRQy+bYAzUsHzuPxfifmdaLZN0gwAAAAIFBm51JqEFomUwI3/vrPjiuZCYQyR8N4L4ZpcVyVm5GOzBlZrs3WY0M36PIs4HlCuJLEY14KxyS8bo4xIH/xQxifCHSUw0kKwQds/H/qM/Bfv/37nK8BNWssBs2UpmS7HvInr7RQkATNzNBxyIYrpPs2K5w38HW0rEzP+G0ztOo83cAAAAqQZ+7RREs34TtSIINqX8yEwwllxjEWv5dp22DsRrlE9pSWyEg90j3GhvBAAAAHgGf3GpEf4Sa2x3nqQ9dXv2XRummgfaBLvV0z7jPcAAAAHRBm8BJqEFsmUwI3/vgOOK5ljYt21R9BjygQzFkvYgmlW7E/rzUmB3aa1R/v/sRkWdXCOgIKuKUfzXiJlEyDC3eROqOf9d/4jH4mwkcdJ8kwHn1NDcqEmz68Y5bf46nu99czcvNzhvDbgh0CK9lJoeiHSXoTwAAAFJBn/5FFSzffU3evezXtC9/ZQKRQHpR1ZwveCVG8cv+8KloL2JUK/o4OcsKclrttQWDgV9mNWZ5P+28tt6XuycTD5aBLcSOUtgKm4G46iDPAKbMAAAAMAGeH2pEf3c5fdxhWS1eZ3/p8a5pR6sqQeE6lWFP5z0K+aYlfBg11Rzf+F08p1cQ5QAAAJdBmgRJqEFsmUwI3/vrPjhLnY6O54bcEV++H1QyWusE03qBnqIsvXHnWussWXieHNrrthosgcAU0Y0F9hopOWEBv0JGXS1m3o8PuW8QsFb6/rPNSkXUkO/oFNfmFTp6wgcdbxk7t64LF//YEwizj60QETaAs6xDcg6lk3U1uH0HDMl4ih5VDERKo2WeNr7DJ6PAseHZfNpBAAAAZ0GeIkUVLJ+A5qo/ZEObD72LxnQYIuEYf1ytAEWZ9RqVWhpP9byyoVJnVZyVLVwDIFybDJ9gG7viY4976Z8PP75RopC/RDB3dZ0wq5zC/a7Qp87846DKhZFGiIsYsKK9l8waIPbfwkgAAABDAZ5BdER/gwyVRD3qoTg9Jl28dVUyLW9E4j2mbSLrdlU2wMBPhJBWiaLiTejDzYOqwm38CZG2crdvRPAPjeGIWaTVVgAAADoBnkNqRH+E1CsOUDlTRB+2knMt/XIVd1w2Lu5z2obTpdRmkuyfrXpGQRJ2yHOSmMjOiyrLDlpwJ3dZAAAAvUGaSEmoQWyZTAjf+8PysOw7IqechdCyDapN16veOkycd3tl/lOY643J4iTWQW0XQRO1oHAqDTs60fM41KeAMdMg5qceLz0xHG9o7K7+QDwexR/ef2KxGk+JPpEkkQPooks4C/flF4jhBo+xQ1NvAor/CROg6sAGmqtYsKTo2RP869Xll09Z9g0/aqKgXet/XAH1/pPh9Idp9o5m+LTjIUlHJDcUkw+f1s1KFZPXPnjINmfl4g7HW8mSl7TCcAAAADpBnmZFFSyffGVVR+zevXVKzm2vgkNswDKTj5fZQrLaGvFrUNGZo7oe3bojV6WjyvmGIqww/DM1Nn65AAAAKAGehXREf3+JdlJAdWrJHHXgk96zCjVyoQkJf+XToMVT4UbfSa64B0gAAABCAZ6HakR/iLG7Hu2qMv5WSPgOGOgEf49LPHtgU6+/FKGUK0Uj2RSz8n/2jS/nuH+a5JnCsk379xSbYaLKWkzHqqhJAAAAaUGaikmoQWyZTBRMb/vgHI2b7Fd6Ws9h26Qd1WAcJFbDOuHQlDOeOVSDtthUTx2BmQWjIawleP3YJiZxUceVqUKAtVJO3MmSt4itsmeOXkS96GA/02xr8e3XWkoQ45QprFdd1gNT1R5NgQAAACEBnqlqRH93OX3cYVSoGPHXSBIm8PYCgiS/lOkz+kEEcdUAAAB/QZquSeEKUmUwI3/74VOAVE8TO2duqVlti+FmeZePWOPchj2ByBQCHzxIwA7oRji+KqKa5n4Q6OpasFlu8pZOpT7HW7iwn891D4Ifhvw+ecPpsngXgO/F02GWm9i2sspJD5g5fSDd6gF69m+tkP3mOA8J0Iqx5r9Ybt3wCsRgDQAAAFVBnsxFNEyfgOol4/dcuaO+lc5oqdzUsBCEsAWXCEd9LQnFt83M2f4mvOGuJerHkxVQwxAPUafNM6hjcUDccUEXrpfwnpDY6bBBuWi3an7M7S8Zz5MyAAAAPAGe63REf4MOYBhgWGIje2c5UIBG+OMYNcDzO/HP5o+6xdUgK/xp8ClpOu7ZqMK+GL9wf1yEuViw3Jz5MAAAAC4Bnu1qRH+E1CqJbRfRlHBZLSjlMfrSL3+KsaKQb79wkzKWLcwNd6o48CwLbJZPAAAAuUGa8kmoQWiZTAjf/AOyX9R0mJ9SwB9iksEB7ZH/nkIpajkfaK17kN8kNdFdkevjT0goBYSx1ad7E1vuUhe0fKOO67ud4jxPyVzw9UyNsylkIBxchX2bMrVXnfEsnPA9mDRN8mbHacVFgb2ToC0gaK1kxLRZDbbQ7G9HcVMynbMoma6dSOovdmZyIzwlpap73j+GNAutYydKa0z8rYc9VMPkEA+pbgbAGRXIRlXA4YjOfmY7qK4sB46wAAAAR0GfEEURLJ99nGGl1msrnfy1MSO2So7YfJXmhMb4mHw7D/nEENpn/g8dYIuHWptpGty383qQsQOmaDb1Sr7SyZujuEtrU93EAAAARwGfL3REf4BsK1ffX11Zae3WZvfEuHzoFi2UXR12VR2woUFdnWnJRyNJzEMMur5pB08F12TS5Kqh0g6hXx3s+h9IciwahuVBAAAAQwGfMWpEf4ERwdJRbH64kx+NQysevRT1D2SC34ECzZV45BjVHUr0rPEmr61jCfTNQyQZxfD2v72JxfWvi2EhWwvZnoEAAABvQZs2SahBbJlMCN/KCEL1X5n/pbtseoLLd2WSFVdinV73wKbPDDMQGOJ7HQlNf01lmQlQxpN4Xld3ENl7/XvF+385IN+ANQzbxa9MY8ghxFpqW0UYwQhQSaaY/9FtnQOc0V/gREgSgiRtrKFqyex9AAAAN0GfVEUVLJ97HUrykd1VJNIYMgfZauRWHu7JgJIJWyjt3yOsBMa11eyBZgykRdmoIPb4fmsTH6AAAAAxAZ9zdER/dvu+WAiUqATXXSBiTXTh69DK9T96yYRRTZn7NwidgRAUaKpqXh1LkSJxnAAAAEYBn3VqRH+KbhTgrp8WYLnb/OExBB1QfQOTFVBGW40ihRipF6j5TyV0K31uIo2+OzX6N9DLAIAws+5y7pu8ISS/6hJ47suBAAAAk0GbekmoQWyZTAjf++s+OK5ljXoFiiU1XPjCnQxFyu7PryXdiZtWx/wW4N5zId9wNpopmCe9CPMLQBcd5pQEY3kNzWS2+3CovTQy4TUQSeEemdF5upIcffV6vhYfM7AT24IWEWArCyFFh7pIe8+zc8VPXEM6rc+7xdmneJ3szGOTGBGxzvpFxlTTTGKv3pJIbNGDAwAAAG9Bn5hFFSyfgOahWDUeRVYeX65PXWlnysfqmajig5ypCQFzzoXCSRAp3zgjG2NG7gxdp73zP4B6ldlcHso4oxhpX24XpT/qfkQR7KKVU4NNmqjCtXvpDalQ/00eIGvukzaHkWUGHTKp/QtbKsPGwDAAAABNAZ+3dER/gwyV1/tYOwEqTd+Pf0jD+/NLCVKy6ez+vl1KUGnCmGBaIywcEmAVF6zlPqshdQhK5FM6KDE97ysl/l1xLpPEqrALZu0g6REAAAAtAZ+5akR/f5yCyQKtFiypduyg6b/z69WowFlASrlNd/l1Q8J8ACA56sgTjZmAAAAAuUGbvkmoQWyZTAjf+/4BIhQKAXMPK6lD76MX584FxB3ojNVi5Q+urU2Vg7DDnw5TYhPAcAP+JpuOWYUZvbWvqxIf4+zhikw5W4uf+nNx3QUlfGE+qQdeKFt6AVvU+gOqNlNIKHEl5X/vEOA8B/wsTOAL7//f/G5Ez96Lbtif/QxRwf+GV/m0IscN9zKokP5tFqEx9jubCAyMXQbzIGza/AdoAcgWAzpGkkEFPVZHIn262BXUMjV6hgYRAAAAZUGf3EUVLJ94mZXaX35boOk57/Jas+YqlIwzjlszmhvu1CGdNLBRaqzGIA8eJ3iguthSW3p5XKu2MOj5ceJGlYERu0+psmSaLOFXONszzF1sh6Wy11Cr5kB2FRpfgC7N0D6P4v4lAAAAOgGf+3REf4MOYBiXa4/L66IgzTpMUW3pLiTwVe2YiMlu1Od+TX4Ego0CvBosfBkHjMBfsmQviRFuwkgAAAA1AZ/9akR/dzl93GFUqBjx10gYYvCa+gIMfz7WTXtZIsyH48Q3lDcTKBK+Eqf3cTexpuqr75EAAACKQZviSahBbJlMCN/76z44rbBJLPdsY55JpbvEurvbYmuIRcWV/rJn2ygwgU9CgdRF/043si4Zr5zKodFD1SgFp7DQqYD6tTX8dwBVP474YclgRLTJThjJ7uLHj6V/CJc4In7m8OgftrGMn+Zru0NctFHKqM1bDUTtaFM/qXypCDHtWSqgegcA8LCAAAAARkGeAEUVLJ+A6iXj9eUXdhdZVK7LV1HMdFPLHxlyPlUiuab+bMNEm5whCn7SWav9ox8Nvdiza9s3scrTm/Si0xxwSXwP4ikAAABFAZ4/dER/im4U4K6eWIXMANXzPh52kzBolbrDVKhXJMRmKIXiXvz9VXDYi5ff6Qo0gHFiqU6rW9yf7KoXUhBe4Rxlo8WvAAAAOQGeIWpEf4TW9RzxeaEIBMuIawIw7r73W/EwbmR42EU8aJ8SmWMIch19eiKXWJLO+HqvXpyEDPqD4AAAAI9BmiZJqEFsmUwI3/wj5K7y3raNV+lN1e0Gmx8L5RHw0IgYfLeHCZ5xAiPYSqSZkRczptAL0OS/Eadl7UGYnx9tFSTMq/DW1+S0JjpUGH2BBi2JBcBp9gBaH1YFApjEEAfskKGqdW6ZtsgBB02//5EzIeLDw0zMvQr1qNKsH9QHaWrVtuaIvnOOjm3+a/MpYgAAAEFBnkRFFSyffGXEvIdZdMwjUHspZ5lmIjrKqSiPV9+51l3FYjTQ30SJlU6anVjo4RznACIj/57d3zRCAGeOX36D+QAAADEBnmN0RH9/iZVJcpUEaVJ+D1Qn1J1MON4UlEEFw/V4SMOqbANXYVK9+DIT1C0sj9H8AAAAQgGeZWpEf4ERwdK7XH6zbP+0KsgLE2aJGY+DvwxwgrIkeYD20yzoRSouFKQ3gK9JnSL/2w7s61b8noAFMeJsnjdrGQAAAGhBmmpJqEFsmUwI3/vgPUUrmLiGAWYzwddaBPlC1tTCKPJyTpF5x5Urt5EikjDg/Y2Ch+L/G5axssucn+L/XqrCvrKKAbuvr0oM1XhQMnNPkrUx8G7rkqNLrOkC/mXdj9DtDsSuu7cqxAAAAEJBnohFFSyfex1K7S/556pvzaNwUuIyDg22DfvrCUNWtNpRRmvgNB5PJxjDd7uUat/w3BVRmbf9xTYsRqKfuotnayEAAAAyAZ6ndER/dv62TuMKpU1pPPlXna5sbaR+8FULtAJqz/eyyWfjyJNab+zrFLDXgmE1X7sAAABCAZ6pakR/im4U1zmIP3cv/+aqmtN99AtaR/ePPWvf3Kp9eqRfWQYoVZuV8xVd50yhmTsQSs9JdwKpAboLwg3uGj6dAAABHUGarkmoQWyZTAjfzkcmRUPipKf5HPWzd5mVSXOkf2wveByGyczca4WLzC6dJy1m5B6Il/iv02CYxAuC0JMLjf4eVD09cAC4VuqW1rQdH0jYBt9fqu8NkdwlacAwc4MltNuRgUY0PZqFq9cUnMVBRelxNywmlynkI02yNJIx1oubSc9FYoxYy1GOdo4XWxNYblENWmaGnuCBIJRx19vtPp+h4vvQVBXbfsx3OZWx9A4nT7Y56PG6CxBp9NdcYCm6+YEXmiN995AfuUddw5pT8IxHOckLKvtrExmNawZ4j/qtgJQASVXghncWygED1xlvOUMsv400IbfHtGjY+xlxEauu3NbsBWE2bmc7OC5AYARFLBZS1IMRB42qDSEAMQAAAFVBnsxFFSyfzWO6eV41HkVXWetrTujpSLSxsyet/K+xuI2mJCQpfU8AiK58PkSxjDCtX3agsIW3wcJvwWe2BOb+rXnGIMtnblWi8WfxWyNGdCgOJmIQAAAAYAGe63REf83T//106i/j9g6B7W/+9pd27v4NIhMKDiG077SiuGlpjPOE7m0C1ERXhoTXOKeyMZV/Cqhwx0+nXZkTcQvFvUXkgMZhV3TQViM0RySsmTCOTLoKGrX8MDQLgAAAACMBnu1qRH9/nILJGk/YUBDweJZSU0TGSR6vSvqHjaBT1vIf4QAAAN9BmvJJqEFsmUwI38erBKuz0hHXKlCdZjiQgCATySoqsm6ltfbP7rAFGFZ822Yw4uKROyUID51JSZ2TSrmysf59dCK0wX5bIkLdjmpkPTrcM+4OA3dv70JvEV+piUKzWy4rgthcTqUOTCLotioch4OuJRHZVarI23FHHeuIXdoOx8okOazHENHgaw6X59OheNrJiEvne4/W4SXKVQV9oGUnCUDqrTQ4hxMsBqxs+I3H3zkR1F3Wqbxyocy4mEvJ67y6N0vizCEqYxGvHdXCKAGvbYr0oZ5UmemNFdqa/+jOAAAAO0GfEEUVLJ/NSlgemZ2/i+2HE/tCoU5+SvCMe9vvnjLZqsDRHgVJCIy7oHSX0WQfEACM+t3MwE6ru1+AAAAAKAGfL3REf9fXXw1RMHznnW8D6KXJTU0SNFkICnZ+oJHgBulpiD/kF6UAAAAcAZ8xakR/dzl93GFUfKEWvCRfKO/B6X2IPGTMgQAAAIpBmzZJqEFsmUwI3/vgOOCEetSB49qntGrM9qobba9AH1IQdtyfiRpDEBqUBn30cNOTlel+ROcamSYWnUPachBf2dH/7blMdcvqBzNRPhHUnxCwzkrNRlC/5GgwjKfk/a2B/o7qVamrbXYVkHxVa8bR73b7AQGyRnIHvBjdQa2Vyk2zDB2JLzShEC8AAABbQZ9URRUsn3igS8fWMzYXk9Mq+MDqIxXVuhKB5vQkgSjtJT6JSpHmpOK9dQXV8FSdFrMVLn4/C1TOlGot8odWKsZCHVCzXaMLFwpRnRtzffgvkqf4iecFvElrWAAAADYBn3N0RH+KbhTgr6xHMY/2QndZPRpMsv5JSKrqaGIUN02aOyCA/tVS67KULKPzvQ3M2ZSKp4oAAAArAZ91akR/hNQq4woHVjdAiYoVEk1dl3y4BaGjLWpTqLq69j/cZmOzoJOR/wAAAEpBm3hJqEFsmUwUTG/77lcOCFJBa/WYM+C1k6KBz1fXTj2WWIJN3lFiTnpO7wj8DQA/i7cHqWnaZRx1Bw8Vic6inCrCA4O4XOxsvgAAABsBn5dqRH9/nILJGie4fD5UaXXmEKriUh+FqfgAAACDQZubSeEKUmUwI3/7xIFmvHCDZ8t12akwl+l9pgJioglFo/qTXNJYNkUFQp4qDJhzpCYfFAIWrXQ9O8aP+CIhDK2kFEhLIikaKbSUWzNbi6H4UrqrGmAlkZo5re9RQGhCccg1Jov/2GxYYpyEq9L39zvDE2Io/i/vPk4P9W3qT6jP+oEAAAAuQZ+5RTRM331UtYkDXGvB3dApBSvf+bMEW2P5xUOFFwLXVEf/avPuP2djgg2IoAAAAC4Bn9pqRH+ALNfijDv+qEgNt2DwKYhX+aIxYXAHwEiNC9Yk6O2btseoyQdM+47xAAAAUUGb3EmoQWiZTAjf++A44T2yC1yJtTnpkDG02asj/rdoy39ht3+VB//uMshD3UPhmeAzj9IjmzSBMZH7Bn25T3xc51+ObVJBTlLroFR0mja7ywAAAG9Bm/5J4QpSZTBREsb/++5XDhI2sYlYDT94fX90cHdUWtuMJHfC8cCICr02MBGpCutBAL6UkDKLLy7zPKWJhLoLICnT12OP5gktaUg7lCh5ilnYCb2P2H4H+nX+3GiUhUVoSBX2x9WbuvhYGX30W4AAAAA3AZ4dakR/im4U3cskkOWaidJ3UzcdYIuqiRFaqXPiJJRWssAOr87hWTOg/jtyd7zuRXHH9E6yDQAAAEpBmgJJ4Q6JlMCN//vrPjghSQWWzwVzm0Fb69aOUyH2uYwkl7uzi3xrhyoi3bG6RP0a6qHuC4sp6e7D/Sqm88josE6dK/dzf/C3/gAAADVBniBFFTyfgOahWDclg+s+Jkr9gf7ZOS+stAk+S2N/q5u0Nk+SxhbPMvBKtRp9lBeEwzIS0QAAACoBnl90RH+DDJXX+ypzivEOWRG7zcM5EY2ZDjmRy2dsh3v56A+eAR5ct6EAAAAhAZ5BakR/f5yCyRJTQ0pmRtmpfeSIf6wjFYJ0wy9Ux4VAAAAAX0GaQ0moQWiZTAjf+8Pt8hO4AJKfYhcjA9rjdnAwFf7QModT7PyzpMUK+CS0HRfcsjc2DBko1hnWHk2EmhC6FeTs1eU414OhygKFL6xPmfel0xhg6rysjXFWdjRF1jxEAAAAi0GaZEnhClJlMCN/++A9RIVzu3Ghrbc0Z62jyac4b1qAl3BBQ30vUIr9JNcG3B/7thUl3gfcZJEcDA9/o5HLdtRPY8fZ+i/Rg0HkiFZFC1tsDbnRuKWHh6GA5fZne6xDlbQRKVZqTruh+xSV166g8XuXlsnFkjpjI3LHla8Lz4NACVbazHfhnbOqwIEAAABwQZqISeEOiZTAjf/76z44S52OkshAe2P0PHhV4Dei7KErq3dgxYM8IFzbxvZbXSizGnKiEKPygFN4FMdrwWYARbLCT4h4G4eB1lTgxQGMp7418j0PFqDnRSjlz6lPtttNkfg1a3moBhbWbgd7fml1UAAAADBBnqZFETyfex1K7S+Yzn6tUA1X2wuv7z+sgh4e64i0lTn5rJfpZRmkJsUMXmXEYcMAAAAdAZ7FdER/dvu+WAiUqBj0v7NQWvwATKba0oofnS8AAABJAZ7HakR/hNQqlgeCR5WmEnTtgY9GoEa2D1hVoMAPb44LseFckgc9p/mRB2QhlABhbflpFWCIhI1jRMwGxXP5gv3zZcieyZt8sQAAAJhBmsxJqEFomUwI3/vrPjghSM6GitLPhfT3QjDIXqDR2QygB3rhZ0Opwh4x7j88E+9pDmthzmQVl93MWqAlaOPoKrUNvWPRoPXTCZXTTpy36yAiQiFXV1QqO3LKvj/ZL3s4SSgWTn43YwKoppbw9S0sfviVAvMSi0iDeQvD4QNHMF6CPVgieFql05KklKluC94yEGBUepu8pQAAAEdBnupFESyfgEm3uqz7mAS3M59dNbzoYEhAUaCO6CVGZpltV3a4ulLjDLMTNTXCXVdm075xx+6WQowma6lWj8Am7X3wHh6YdQAAADUBnwl0RH+DDJXX+AaXoY5qOgBknY4s2h3Y14HLG4oExzfORWc7aR19WMqT9yojLIGmtjRfQQAAAC0BnwtqRH9/nILJA77E0uyjL+OYRlC9eMwkZdFHKSM8oLegYjG2Mku8ZesfQpAAAACeQZsQSahBbJlMCN/7xIFmvqIPoV43rFJvWX4ODfVUK4ly5WYitO5XIzfl1xd8KVyzDSy+r0BDRR//Emv+IrjHGw2ajYgXdcYjXyNQqWjfnNEm+mjBDaTGTxJzcJx7tSgOp285AZ65BM2cdcwhRN/7j+yRSrtO21pzFo5NGhAw7XafqedlhzTYP+QC5ZhN/egGmszEX1+2B4unJ0IrXIAAAABBQZ8uRRUsn3iZldpf/xqA7EEaL/5ZgaVWGAVcD315eHZ0QAYXXrP2jWx1qG9PMHNp4FYUm6nfg0+ci6cjMQFda8EAAAA0AZ9NdER/gw5gGijNiQz/5kp5JhG8oUICnRs7uFapasLfsMKPh5eAEO7oRpw9QfNO9PihmgAAACEBn09qRH93OX2EwqlQCa65akqIwjXCrER9aHr25SKikoAAAABtQZtRSahBbJlMCN/76z9REFwH6H8EqFmA4qxAVHo6OWo4ZywYj7NSM+cdJHCwf4LAdrJSFZD3glZ52eoTwrY1x19p1bxCO7ICdQGhzEiQ96Xc9Y2SETJOD2Vn5GWGsL7J+gJXntXk/X8jEs5a0QAAAGNBm3JJ4QpSZTAjf/vuVw4IT+j8dsgkolcVXPJAv/DV7tMKPOhKW0Eb6GkMP3S2+4Qotkl28worhzeCxOdJSjc6Io+xwzEknX0Ktf9XBtxbX0nSYFhFqBO/1e1O40UYGwmBSjkAAABiQZuTSeEOiZTAjf/74DjggZKH2CCRdBnEYQWZN54htqxWZjo9aIG1TMzzTiRe4WNKcKB8WgbTaefOnNjdIP2vt10+UjnumK7PI2s37KTJhbyphwMl0U5kDcZg1Yv7FQFx5mEAAAB9QZu0SeEPJlMCN//77lcOB8tM/mnq1/U/2ketb7E4WfIqxx2WSC2Pl4TSDg8o3v0A5Vjy93uLHMTZs+BBdk8ByTQwMAo1JaoqmZVq8af7jmwnevUWJQ1A4a64zgNIiFZUXmvCBDFKozh+l/Zy9LkBa4jKOr70skQOcFAQ9YAAAABGQZvVSeEPJlMCN//760EqEjYhsbGdgimO8QcnqaVCP9qbOEFZrTmsIf2JpB4TSeaxpXl3Ulw1dscvV1EvErHcSHNSQtr58AAAADhBm/ZJ4Q8mUwI3//vgOOCLONnHJ8xun5qHA3ZmgreRBzw7h4GYMtLDZh/dmKRwhM+8ymTIp4LzwQAAAF5BmhdJ4Q8mUwI3//vBkESMggDx1VvVtLMss+aF3SXvKnVrCTWWcSrFJBN0jS1tZHuusb/RraxhnYjj1+/yAsYA0rgXmW+bCEUISclYRiHSnHWCOd1ZBmdTg6KC1ZbAAAAAZkGaOEnhDyZTAjf/++A44S5zbJqFTmyU0bct5evsZEZnwCITG77hmWTV3ATPGRlTvfx5fU+QbyuvFKr5KrASAROTbHME0sqsyGlQ6eQe5/+7Q4wQpIcxz1EsaApgkTsdRh+yx8JzHAAAAD5BmllJ4Q8mUwI3//vgHIVWfpo379KAL+bC1vac0rQQmCNEFSc2ChJ69qz7GJwQcygSggLTY2XDCMS/LkX2TwAAADpBmnpJ4Q8mUwI3//vgHIVWxnyGmcsFcA9oxoBl/brkT10aH4nVw83GGNtypUTAP/suuF7gk3SK4d4YAAAAXkGam0nhDyZTAjf/++s+OEudjscNzZV5CCe/aEyxqBVLFh5VRKVEDzmEB9Dk7ScQ40u1uCiy9OuUrKIzx+gIl6P0NG1LHJzJ/qCDx1gum2N47t9UWnFNio6Aoe0ovYEAAABcQZq8SeEPJlMCN//77lcOIMTUMrHZOr0gSKOJUf3LozHu8r1h9EcXvqCO3mOg+d/VC0z5ZYdgFjL3CZniji5n4XXibsuJI5k3acNamysNf4eKfIu8h6JjblX+LUEAAABhQZrASeEPJlMCN//77lcOK5kGh6VLT1xLqj5X9bM3RBN5M3gOtGKjSsWOVbXT9w5waJDJuT3NaaL21k2UzbmVyf92Io8RDVoRP2XUDjiYmvZj67mQ91VySU1YVL5C9Sw30gAAAEJBnv5FETyfgOahWDUeRv7uaiJL9EdNC3IbVzoaH4e6aSo1n7bs0FRXQ4JWRcXvz38ocIB8WqDlMJ9G3aBO6MMl5bkAAAAmAZ8ddER/hRLKDHq0j4d47DM5gwud4/DkjlPlVFYGld+N18XNz8AAAAAlAZ8fakR/f5yCyRJHv89kYcozAl50ypnHlpKXwY51Kj7W0wQ1HwAAAHJBmwNJqEFomUwI3/vgOOIMTVxkqPJRLQpQUqKkxcew6gejhK9dEgRRiWKoB/5wIFCYAVMLtY0lhftvZnZTOruxFhOlh/7mqhOygkTIBTqYT8F+//eGeuYR96Ww48vtz7cCy86ywFxOdvLxYRAQgapIAWkAAAAwQZ8hRREs331UtYjmVOEob1vuAVgoDzC2PbDUkYLtXfYTEoiAWJS2xEv0kGt1b0TAAAAAOQGfQmpEf3/pQAeIEEpwh7Q6XdldFbTTk0g56u0PL+bxWoNXHexxjGmHvNW4Js/aa9dUwMqYLiqLmAAAAIRBm0dJqEFsmUwI3/vrQSpSocjmBEaZ+guniyl7iU1d1HS6ptlCPKacTBMZFdsmw3nQ/2SNFSQYbakk7FMBdPhxaD1VZVNg6EmmIShve7vOeawjwMsQ6qn7zs+OxVipM4voEUAHDO+9AdjYKvS5OsP1RkpHK5H8rsOaGDiUxa546nHEZkEAAABXQZ9lRRUsn4DmoUnrvS2j5/oh09N4LvXKgJJsJGF3dTgk9NBZHHzkIto5QN/1CwwGkGBV6UhxpaON36OliiklObJaIH+XV0IYQQBpF7sgk1BWy0nUlXZ4AAAANQGfhHREf4R4tlBciUsqFFuxA8UwpTFd6S8jYZCqRb3ZWGJYP1ecFzDRMlHObUR8+QdZwRJhAAAAUQGfhmpEf4n36qqIKnWUd9Njiw0QSxoVB/oX/yBhz8h1/xIEQ55PMKdy01KoAx+nvEYqAmAd9ad3OBTPQ9pOmJCNVk/X7WrPcvXMd+GCzMVi+AAAAM1Bm4tJqEFsmUwI3/vhUdkOqEdG62jueiHqqB7YeLSYauyhCAzqdiH19ZLvxGPaZ6tgKbJdVHcaPgr406CugQLvd3FPbtzm5AxeSWxsCek54+8ViKC5KSPZpGstBmWc1F5Fz8YmM3pXrKfxiE5SEFNGyFV/qkcpgvPYbfMJYpPyMe+Mok3YdDm7tJqwEmFuxpR/c7HOjEn/q78jrYgJECTdXUMmYN4j5En5F/wwQM8kA2ap5beuNPWS5eYz2P2j4BU9tNJrSFla6y+1AItxAAAAO0GfqUUVLJ97HUqt10psKM2AYDKr6M37fOlTxCJJ2VrMROQNURvJEmBHrG4u6Jh8f7q7B191ZIjfgM19AAAAQAGfyHREf4BsKlO/qXBAryvwFqs5i0G98z4g8HSYOeOjUiUe2uLVZBQrxdTZrbxwSpFGxJ6EKPGOOvOBc2c9rsEAAAA2AZ/KakR/hIfoLL7/BzcvpolKljM37OToCQ/2FbQB5A84l3GTkJR4Rl+5ZoIT+HkMEnzHYh3BAAAAg0GbzkmoQWyZTAjf++A44rmWNhGWCL72npbHi2D0rm32fIO2vw38MGXgB7/2AwMnd/lvD81EFlh6sV3nHxYoB0OZtYyq9r2rygD71P2o6DBvoGCJwkGKOl4eFiEYvK83H9emCgm3wnANdrFoijHM4zU0e2RMJh7A5zMM4OT2vz4r/00sAAAAS0Gf7EUVLN99Ta+j2QwAR62o44atyjblHXj8qcvZZupH136TPo8NbBq8vABIfCMtlgbq+SEavzCvqU9ljoozFu3hOJeYHrkwhPV+IAAAADEBng1qRH93OX2EwrJavM7/2lua/dEitfWcARv61hjXJUf8VE2rGPeKPTvdA5ULh6n5AAAAnUGaEkmoQWyZTAjf++s+OK5ljSUOA3TFai4RJE/jHgw0W4P9CLmTx1ROtoE1Ct/u5aZKpqL5UFk907WKqU+Fq87HMy5CDtuyG1GYY7+oe/qacQmKPQggXqXzfVJZxonCiuPjr8IvShkyg9jrNQn4JR5rA4cMrT0Zg58ZdIL2DxBO+Ztm7EZbuwpX6hiLDW+Fu7jZRlkzHZVGcLW+A4AAAABoQZ4wRRUsn37EZ8k4yrEJ3gXZR38ckSC4E/G9JeY6nvqw8cni+XpetBPoOPSLCtYJ2bS4fCf8qRBknduGn/zENrDlaOySIiDPyNSjlsUVOY3Be7kM82KFFJZ4p6pqGLRuc8L/WHBJbyAAAABOAZ5PdER/gw5gGGHvGKdek4JOkgzZuCPm5S4Jttsi7GrvSbpquZEjT+l8DryKb1bKgXccBTGldbxppBvtraeOctJrix9CJg+uODiF1InjAAAAPwGeUWpEf4TUKvadqH/skJdonVCLe4CSymbvWcZPcHMhqYKXxCXq7uBXK/nUvzCA7bRhXe4Org/oKbRblHoZbwAAAIxBmlRJqEFsmUwUTG/8IvJDIWLLGwa/ZHF/KLUN5/pBVYdMU+ZmVpOULfEv18kUVSeOAd3niURef0hBEmg6OBZ0i0XF+KXTRz2y9KRZZglHmsDhwytPRmDnxl0gvYO++Jtbp40k8mBlBmde7Jtob6nZkW75fdtnP5nQj7Bc2x52eeW7AOXILyoBAXy3wwAAACIBnnNqRH9/nILJCQH8el53caksCb78m2K8PektcjdV0nLgAAAAtUGaeEnhClJlMCN/++FTgKPVy25+n5usIXnTztuq86gMcZAv//lXvQMR4OFxAH4HHTYRKq4DSRdcbN2fFwL+DmtKdirEql7t068169vI3EBCCne05YqLPOllLz90IOSrfUDiyyb5NfrDwLg+wOh7qHwq2BSxmHlV8XQlZWmA7Q1K+zPZSUHgjhs8Ax1n9XnxVLQtfRLLyA/dBKYlK9a5jC+wfdcMLbf12+YuD9YhzRWeXQP6UjIAAABLQZ6WRTRMn3iZlRG9wMbG6rulz+i3xcFmicnkVfPi3vI9sl/efAzv78X6GnDlL27msiMoZuydUOS3pIwXyOEjvXuFAt/zUCd6+qXJAAAAOQGetXREf4MOYBiYRsM/4Wn9J79PPYoQFjb6GuY8jqTLyI2PeCs+yCv34RJsSxBkwViDKVa1l5B7DgAAAC8BnrdqRH93OX2EwrJavM7/2lqioT5DC3FIJv7vm//z26CikO4tVaLBSjpb/LkdYAAAAOVBmrxJqEFomUwI38n9ga29JPKIOBHSx+YsFh8iUW27g9I27uSlq/OZWnSYhIZM7ODNk5Eyw1T9OF5rr+J2dy4gl+9RDUCQuNU7dxcC1S9rJoB2wj0ZCKt9fx0BJCscbIz9DE60T8UJcZW4IXI5PQ04rPvV3fOyHU1DTpW5sLAkcKbZY9J5we4BPqx7YVed7ploglm/l2SkYkxjUmxsPzRVLOnRXmV9hlstOpUJ/aOYKohirbc2wOqCvDKXJPhu7uw6sdlwNg7ct2PF60e8zKjuPUQcgh0U4w3KxPWl+MIuQ3Ce8JUHAAAAS0Ge2kURLJ/NY7qFXvgUaFUOZCNBf1AjC4hZbIHjRp6oas+6Qqk70vXUDe5rKS9H375z+RHnXGdynCNUDFHftXvoHTlj4VbCTJvnaAAAAGoBnvl0RH/T29sxF44A/5fnAIOAgxy45Xst+FBoNPddunOVgbfDRXfDZMjQfcvkgzJf51llDHGEOPZdOqXjlDVUZbZjfZl0loJS2YOt48R/pzXYFOhFufNJzmM0UhgMuQR/zyOciCsNb0DBAAAAKAGe+2pEf4TXwDNrCamp/ew9bCI67/4OVldsPjdTTI086/CfMW5DjVUAAACXQZrgSahBbJlMCN/5KYRFpdsBNJCPfk3shMTTbZ/+m1g3BC6EDF3hvpzFAan1Nwk0GhKdlNqVrfEyKn/lndpE6h/0uKfhJLNPRapdj7uQNAbjcketIZV80xhYt/ITlWMCxK8VuHPNA3X0fatIH0MAuSlD9/rmnTCGX1rX+/0XG+Uq7kJlIC2ViD50CtLv279/vtSKlXHogAAAADNBnx5FFSyffGVVR+wTASuTnuC0n9aZFyEaIux3eFAxaKRoR6o1P4kyonm7L6VYQyuDgUEAAAAeAZ89dER/f4l2UkCrRbpOuJ2cpJvutsxXLqDlkAIuAAAALgGfP2pEf4ERwdJRbH63u/+0Ksgh/Pd611GWPgQqLOAjTiLs6CS8xHLR7EU2U8EAAADGQZskSahBbJlMCN/Hz9Z+6Xzm3/q7oYd0G2hnycvD/dneZ7oEkBQficNkv3sQWvRjB0O3J0i7f3L9TFI5xTtQclRChNrb+NvDDZvgNHtISAsf4R3ClMzNDi8Hti27fyjTiY/1PVoJovYx9gmD2NorCNGDW85eiStJX8zbjsBw/hmP8sN8jZiXmeBRjX7HLakmUV39clUJgwqNnh23yHABH+5ni2vm8+lQKHKUjJ9gK7V4lXqq7mt6OcgdnKFMyG6IrIrsSQiBAAAAOUGfQkUVLJ+WYb1tdpfdVu0F1k4kq5RhUrdGhAFDHYPp01TbmPpwo7SUbGx5vDZ6pujdLsJlfZM4VAAAAB4Bn2F0RH92+75YCJSoGPEHs1BaPe1nKVFLx3Wq4NAAAAA2AZ9jakR/19HKwpKb3hDHCpqEpgwPiuiuAowHjiceMfYzoQjP7bagp7T/MwoVg8eb8n/Krh+xAAAAREGbaEmoQWyZTAjf++s+OEuc2vQLdK/i8ClDkp0+Xtapl/Upi4Dlsp5sQ9tLIcelXy8eqVNGGFIhyPtEhgrjW/Ytb9bgAAAAMEGfhkUVLJ+A5qFYNR5HWQmzD29FNpojTcXVb5py+ZttjK0a0ojZ+6HbGv+AY1mIwQAAACUBn6V0RH+DDJXX+aBtjXyInrteZCVnwVE2lOmdE1eqH6zby3RAAAAAIQGfp2pEf3+cgskCrQfAkzm4yXvXEhlAALbstuTsEqLFPQAAAH9Bm6lJqEFsmUwI3/vgOOKjnj2kz6wE/ERkEFrp9mvc/FnvGTzuEvBxPM3+7YFlsfMrbeAOREsmq2wIOZ2exoKE6AbcyGVU2ti5k50trMZUI2gNhJRzN+j4GQsPgGo8hxfd6F/BV5QxpjTfZWDWEBGtlz/y9p06ppT6Q14s6X6BAAAAaEGbyknhClJlMCN/++A44rmWNeEapfe5ojbjO3gLbIo9IFs/sem9m8C9hrzgTXFpAoFFoez7AxD/afkFQ9GAOeRsTI8AKIit2NJwfsgAJBBdBVBQwXTcfUZuy+cNB/YDXD9y1MyNM/ElAAAAS0Gb7EnhDomUwU0TG//74ByFSpwKu/yY/TR+8rXmM/1ctJUphchJ0QahBg9/drvLVbB1uEUK1tv6c0ET7S2VOcyfJsGdD03uaOxggQAAACQBngtqRH93OX3cYVSoBNddIFIHoxaPZCAQcGLR8c5RXh9QJpAAAABcQZoQSeEPJlMCN//76z44S5zbHmusf8GcmWpiDjGPSa9PauHTPUthM4Tq2k7DqWOl1BucpTi8VQ6775shA/8Z+1n8HnIUpmW/CGfGL/3h8FtqksT/NymcW9deRJAAAABNQZ4uRRE8n3wHW3NeA+wKIZuyYfA4eD0aAfsQ5xdegmzrUzJAr6D2LS/mkx6XUnbpQmRhSBMw8zIWgJzltDGt3NdjhobjGWShq0kKsGEAAAA8AZ5NdER/im4U3cskkOWaidPqqW535NCXBHUD07pfk065jmmezMmm4OngZila6PBDQ8SA7tNzFMqQHG+AAAAAKQGeT2pEf4TUKvcv6iTIVCcF6BiozlPa/0aj/QBDSf946iRfptC1PLluAAAAXkGaUUmoQWiZTAjf++5XDhLnNr5FeZsxlZSRTEFl1hqtFA4oqhfVWwHju6NzP7Gzpz+tHjEfnDW+Oh5YrQeKavE+wrgFqrG9jsjA3/ox2THTdhWkSqy4aSCdsW/k608AAABAQZpySeEKUmUwI3/74ByE2ccN61JFkvALjEXs6KRRlHtA05Cr+i+xeGCVyVXxdoLYm1mfGZR+dej8GUfR5noE2QAAAGxBmpNJ4Q6JlMCN//vrPjhB4PuVF9LGgPQZObH+9xBHiekie60xU2mBEcs05OcSNiww67eP7NZPCqU7vyMShW9O7t9+XYJYXFlwEsy9Uhd8xicrYd7APjwDqdkyyfwfbzlstFm80HL6Q1TQ9IEAAABvQZq2SeEPJlMCN//74DjhLnNsXLgVjQBiBDF31YkR99oQqK3IUPQox0FL5Ngyqz0vr3e7zX4D7T/IzH37YlccmItA415mW5YgU9nDtlwhJalm+PnovRivR9bYdL21zzDZV93jsLfIFoLu62yIgI+HAAAAOkGe1EURPN99TaQSGVFDP8erdHfbNvCl7pmTW1uunWoHKJh+8IYvX3j4p8BO0arrZ28AEjtFV1nfwbAAAAA1AZ71akR/dzl9hMKpUAmuulsWOvCbvqAh1l1Ee3e0VIjtX+Pd6HNzYEzwaHU95p93feOAOeEAAABQQZr5SahBaJlMCI8YVKCqLqWXOXD3Rs2/i3L4bZCiXxdmPoXkYSJoYDVtTiH8gKixVFCbBh7U/5bLigBo+Lh/IlcxdgTekBHV7sq/sMm3KbwAAABJQZ8XRREsR4puFN3JW0ydhYi4T8lJqNEyweZMIfVIyGncYAQYhG3u1bYLctfKK0mJXCFln0Vl2u1VXLGsj4tNzvwBhCF9HkDygAAAADgBnzhqRH+BB5KrHlRVwgrrBwaKJDrhThiLTrF6/6zM+QOaa7ng74x86Si4GnXs5RovnBY3UKlHgQAABX9liIIACf++DLeYgN9XYj5zg5B5QJuOQipq5DurWEjnW3UorRPL+Gmp7W/eIApq4hA6GO7MXHIyDHsVucz/aMFYtKTDM8z25ZLQfiNZJqSiDN9723ESzhz79Cwt5M+8vOWuuGhG+Tvq9foXjrYoVXiVDwEsVfUmlk5TXbEqyYI+MZpo4mDJdCI6FifaFbOBypE2MZf5XWRt96wDzE+1vc15C8Q37MxkeCbaMkDN/t8mxkCWv8440Rlwk+ofcevu9fMi+Y2lGAjdq67Z7L2nX4tpcpo31hZ+kf6KZeyBIu+wE7zfBEWJkrDn+nvizONx5DVOkgfozhAUOvJbujzhGRN82gJwvYpRNAkkauwRrAkj3H3NX7/9FmB7s42z43E2AQejmKa6top87jsooGgL2yW/vkquH26p1/ZTn8L/bLpPfHN4mI2HsSxn8Xasz+QZbRKng1fvTxkt/mpfmK/kCyCd6bnIW6xeE5iW4Je98X1bSpyRG4ZEwQQRvmmA6Yna0ada8GP1pcw4M/JCmmV3O88PrasibHCahaFYs9akqjV5DdyOsA7IxWcVi1KwMrw65H0WqhITj2I/0392KBRdDeMYXdHrGwNEJhUIlQHIAJ5ZcKZWWU9TtKJlEqVRqCAyqMi4WjV04NQ1QEfwgz+T+hEehHkIC/Uc5dPLxaRa5f0rBpWg/CaNuFXLWTTpizOXU12VDPjD0PueUI5gcFxjM7UZKqEXIER8ZxBIM2uWna3B6TinIB//JF7wnKWG3KqQAoRJfs5WvYaG3M+PsmcpsEDOU5YG0Kdgf0PU4QKwIo7CjVGXnuOCeJP65DynJB1TaFN/Ec+KGgl8BEvS9O6qZ9x0NRWtKyaKyidn+wHMOsyvRqTPd0T82Zhih55cQsPNDIk78tfmfJbq26dnj6niV7W4C0RM63t1oa/61b6ejC3GPDzgPht8QbEWOmMkGWQaqDE1JlXaV0FU3nPF+k2FhLclRGSF12k3fSd0B8xrBHuCYayWiuH8ryJrtKZxJ5MQghDgBuHJO6SC1EDjEfxZGD/r6VzWAzizHGg8AmGx2xLAaw9rsvxLPUF+/7yYJueeBW5R93rr0FqleLihl5Xj2Aq5SkobABmcpWGhmxzvsHrBwKqxvkuHcaxY3VOBxUoDZhnQfEv+mZtl/1NlDKLlxwKt0ekKf+H85FMDoy087GajtU36Yx+6OhUz9cVW+TW34lW5zJzNRP+Hc8uhJDdo2lXVyYYbqCDCYtOyMVhQIpdZz2jF3WMV035t+ZxFpEnL8ijjN9Jg8JcuWyjfxs5sP97Y4nlx4+ag24HnZ+axqomYDumc0gNVyEUz6FZ9JbgAuwpXoA5ORKsBxrFSQHm0NhO5MryD+P4NGBrI8OniGQEltNCWxKO4CaPEW/ayYPnua20xl5iBviCOZOxGovBGA5TXYq/DBpZKzwKVYDEuxwEmec9Nh+C+oW4OqD94kO3pzQXD4WNt/q7X/PFLl+l9H8cdZ55u+BvzP0FXMoLDoRw7LJfWY5c+wzINL2XuOAYkfJCpNNcv1s6D4asebxSD1Oa3fRtN5j1N6T3U1HHUr8DPuALIo5ZN8ejEA5QFEH9MczGJJOxYHf2Ea6JmC49U20+9S5lgbbfIDilnnqaUN4P9cfr0GozChKSlAt+3fH1bY8x71jdkn9w2hQSZUYbU2pB9vw4KlfdleH+KnBRal9A8sdEAByt88ulnusl0zuMQqjLVJjZRTm+bVZlUB1A17g7C1j51Ik9g4dNmA5mrD6GgZNkTb5nGM3aTVSZBRdQchSdPGxKj9iMzKtky9yCgMnQWxUthLxXktBy3hagbWow9t7gvPd5DjMYYCoMNWUQ53ExMXz5i8JP+AuTFV+WVNG8AAACJQZoibEb/++1xYQAAcZSln5nDonHJ2FY8IUkCHV3Sy+GFqZcQ6LZxLbyQ6zhy/gRLW0pWGMhKeooRyMTxiJ2j2Rfe55qa3amF1vmYBco00/ApFnQ2DT4KOcgnT4ciTok2qz2OCLOIFJiYrYM4viFJA9fmIGkJAUIM20h/dudGJ62seoc2Z9AEofEAAAAnAZ5BeRH/gQeWpd9/pOade/rN+isrvDcxP1hTmlgtc64uAfKYMixRAAAAZ0GaQzwhkymEb/vEgWa+okZYoHAhrsRiqhh///5/poi1W1PBAeIgbreMG6vERAWN1PzvCg5/c1bRzTEU+q0vqY4p4w0N29AZHonm2TuYWlH/CYj3AyJ2CEW+WckK1sxFQX9Vkp3Do98AAABmQZpkSeEPJlMCN//74DjhAvudrQV/Von+LqXSU5EnN5Ici21RizZEycc6yRI7HW2mkSAr7OhvSjQAWm9VvWhylF3l+5QdacIDtyQ0c0VNiZE8gmdnua+vNHpLu+/zui78ynwbUg5fAAAARkGahUnhDyZTAjf/++AchVbGeq15oxbCanNPtandCKIk5UFi2McwnOkyw6FFIwJZCyx9BQ6ETJstOW6MqH0KmE3QQ9ScPBAAAABQQZqmSeEPJlMCN//74ByFT6UQhh76pV1tl2CE897btCzIgaESnlCrkP/rn7NokgPeW5v5Tlj6RqOhWJLwnxuEYh/nbkWklEpVsp02UeI5hcEAAABmQZrHSeEPJlMCN//76z44S52OwRnQ4IPpaB5d9PcQ3xV3g8rMCFmB5bsXTfJyvJ25sgvf46GpdxXXkPPndvnlrxgCG3+15DsD6MgtyhVrvxbNr6GQUtl2LeFZjaZQES5OWM0rLLlBAAAAVEGa6EnhDyZTAjf/++s+OK02js7X6gKTlrPgSr7UwuS/819DrDrfrMwhlkZEIEQkkAjuhvWd6ibuHYrJ9ufrJG3PFqSfmXeftKX8TepwZpbdTG4a1gAAAFxBmwlJ4Q8mUwI3//vgLpU+4B85rBOb//EpYgZFZ6eevNZXP+wPe+bIMaDyfbTJHzD4Kxd9xRs1m454YoGfxrw/BKC4vlUK6tcXIpJFyYby74nDDHCTcY26K03YRgAAAGZBmytJ4Q8mUwURPG/74ESpX+uSC7HOS2CPeetPFAoB1Gux145k4OcRwOiiCVIhzw8zcArn6HvJ2afHpwjimmjCVOlWLWAXs6/dS2mQzr5Tg+USVx+WhFO8PWOsAE5kFw2v8j/ac4EAAAA3AZ9KakR/iffqqvm8Jc9gUquRMvf9rGuOKZnd0efesGD02uCoFuLecT1P7NzuAbHBt4lonQco4AAAADNBm0xJ4Q8mUwI3//vgHITeeM3FIGYUnOaFjLcsMmQoftZNP8Bb606uLDf3eMjrSo7Sf4EAAABQQZttSeEPJlMCN//7w/CbbaisfdZegDj0M9TyOi5+HYmOurwQpxoiBko95zRoR8EUd3PPFRCKdewA0ZC3VL2iuZhfJK/KwG4Yg0/9XrmYb5AAAABtQZuOSeEPJlMCN//74DjiuZY2PsMDIvgZTyBAM69A3+mzj1NB9ReQMa5lpIjzkgpFb30Nv8USZUa4L6VDLhd3GOAcEU6/2m71HhKIVVQxQ1IYgBzydJ7TaPB/Lp71eKZ3cme7OXKbCXDl4TYBQQAAAFFBm69J4Q8mUwI3//vgRKv7oyEofCjyt3vsnfsah2kHIdELq19J2RoKoC1XL0zZTv6sAPxkSDJ4hua2mqjbC4lGiBfWgM8EmDQTaVR78G9JBOEAAAA5QZvQSeEPJlMCN//74ByjvhR0RQiScU1Fdfdc2SHU4FOuYJlXYGqlgNrIdNY2a6p25gOjejiWvqYHAAAAZEGb9EnhDyZTAjf/++A44rmUqQo2o78x9zImFz6ycgjd223QsrF0bQ9oStMycJKa04q4jD45hvqUv+ExtcHS0/sB5B/uzSniIvGiDyArKszGSLTNmQx4zT4DAS2BkYC1Ww5Py+EAAABFQZ4SRRE8n4DqJeP15Rd2F1lVcxp2jzwtUtQ9ScvCBuIZmxQraJl3DozW8juXqxWghEa2yC/V1oAhaIA2kP/2/bNYpFpmAAAANgGeMXREf4puFOCtReQubvl/VjVVRARk0FI0mb4PyIwlMKV46WFsEaeKjqMI1+R2+DtK1F/8vgAAAC8BnjNqRH+E1Cri5ko22NzWPuDSPi1danTYsunQ9icYtQPRSlq6m6AnOlFA02op4QAAAJRBmjdJqEFomUwI3/vrPjidIX8jzxcO5M8WgNUVjZvTZqttR/yezTvM/BnIL5HWxcNwKvfguadDkLdYtXjtonK3zpSm1H7UXsq/0YHnzRgNDAYnqnNKKa3EfIz15EOoRIhWAiTMZDR7m4p4cwv3/77a/ATHMyY8Lm+4XtVBteQnl4516EwPOY7Hx9ZJsncgz7c28+7ZAAAANUGeVUURLN9/4NxQllWwS6YjGgAHhfIlm6iYyMZ/pi9Hb6DSetZgTUH6s+U8uHcuB/OfkzdgAAAAHwGedmpEf4SbRPB1qbWDavfsuHn8SwfaNy3M5M/M8YEAAAB4QZp6SahBbJlMCN/74Djh7MxwQMXA4Xa1gvMmJfG5IpGYMILlDBGG9SiDcf+miA2uUriMuCZPDw2oL49uTVSBzN1sGzND8Fw8Hhrs/70fxGPxNhJdvqFB0w+DC+aj849W7UJcjUb7VkYGXQv6/CDjihFxZP7ZmzrBAAAAUUGemEUVLN99Ta+lvuryT/+7Fb855I935eQ/NKyrIpEljglVrbW4VSwnmC7RuPcxJO4HyTlFvPN0kQgJ7FjETsgCU0+IAsrdcNinbd4czNuDZwAAADABnrlqRH93OX2EwrJavM7/2lXT+mo4pwFqeXYue0BFgaMHvinTIiRN9nQe6i5heOMAAACVQZq+SahBbJlMCN/76z44rmUqw207hd7OqeG95zjobXO1BXc8GYszEgUi91ZFitoujcqtv53F3MqCQuvkILWF7cYeyLidHTm+UXkyfQp7XBSq9+bk8rzweySNLOjwjQrKhpbObcN64LF//YEwizj60QETaAs6xDcg6lk3U1uH0HDFd4ifBkn3nk+cTzIVsSPERF68h4kAAABhQZ7cRRUsn3xlVUbBHfvOCCo61ctk1vvWUTe2GOke7+ZMC+0wb8hMaFSQHz69cBeyCx6e99M+HncZK5sJxhMcQG3iUMQJQG5qp90WppVxek0grIUNEg+nMZ/OSgHVaYGxCQAAAEgBnvt0RH+KbhTdyyn2mc7b0qS5IBg49T6i1mEqW9SGK2rLIsvaSKB6hosMeUz8+LtQpC14bO5miLujSI3IhMrcLsnaQe1F9UAAAAA4AZ79akR/im4VKaciB3zfJVSkhw/Z8IUNrw+u/pOSGKeH2tujCA/SRAiTtkOg1ajX6LWUHYFkQrIAAAC0QZriSahBbJlMCN/74D1FQ4s3n7j5n5Nuu3X//9TPET7aPn8s6EoF8yfvc/MfvxyY4pGK+0qv1Wm/xldOtP5Q+S6fps0S+m/UZnQ7EwPsBycEwP1ZVg3aRa1YrfiIsrKz8S8liNH46Caw2vDUSR+GdL+B35GCtu4RO0FhgZFHJ26mYluh/0OmxLZskzny1sgm0Ygl0PCSjypQA0smNK9TeZ6vZ59Qihh8Ae3AfAr1QlZ6NWXTAAAAPEGfAEUVLJ98ZVVIdZdMwtzfMxEdTpKGRFWgnHy+yhWW0NeLWoaMzR3O/Z2SNXpaNidD21Q02s62yRuP9QAAACYBnz90RH9/iXZSP9h8cdeKVvwRfd8hZRQQgN0TPngGS7Fwab7aQQAAAEQBnyFqRH+BEcHSusAq258vWb8w5iZsvh+pHv8c6GNrxfq4gOcru6UctZ3xaies0Y6hBXGyrHqsGGLfpOkCH5M1jDKmJQAAAGdBmyRJqEFsmUwUTG/74ESpBkuxG2KSXcpc2jk8koxEI5uEi5yoS6e9xpdAt72c8MSnXbbConlHP1BzUnJ/+3z1Aoy5efyGWrhfPczqcoqfv691EvI+hHCd2LcBKTSXuxUnnL/0bEnkAAAAJAGfQ2pEf3c5fdxhVKgE11tH5m8PYCgh4Qjt0i/xdPUGMftsgQAAAINBm0hJ4QpSZTAjf/vgPUSWjRx66POWg+1VeyDdo17n1pRGCAZv0EnMYI22P5nBPtXTAZp6Xq/Jn7fkYYL6sJtWyYB/PJ+yuGPOHKNGaRXXtSKv8ZtxjnL8aUG9NP/FDr8upgFotXBQY8KM92B40pAj4Gj1imdJHCvfbU0iETKVBhp1wAAAAFpBn2ZFNEyfgOol4/XlF6ea6k7mZM3z9p4e8X2Jj46Hpmlke/qavrQOjTwS9Z47MV26uYG6NPmmXkDbOOyLBafQL9eNMNmRUBMDuSM10MkN86FV+v+G2vJvgoEAAAA6AZ+FdER/gwyVRAsFrTv5yQfi12mftmfw7u/eMLjWn2w/45OdHFn7ezFA89/eGqdczYXgEIo3louwgQAAAC0Bn4dqRH+E1CsRGdq/m1pty/GwHijW++zyvH/Gdpk3chKR+bVbMoyVnPmV0owAAABrQZuKSahBaJlMFPG/++5XDiuZY2N6PL+cwxD3ivJ/8tkqosMlZ2kuhgf0q00kNIyHwl6OQiCcnPCaq6e7v2Qvnnewqy5mzqYUh0ZzBmS09jJQtDXRECUYUTYcu7s4jMLzcEOBfNCz9U+yd4AAAAA1AZ+pakR/f5yCy9StiwNVmr5zLjqXbDKcDqtWtTICUwtkcR5hRgZuV/4Inokvtcj6Vro4MnkAAAB3QZuuSeEKUmUwI3/76z44S52Ok28EOzD5G2y58sz7giS1bNt4y1aleBVYPkGtESwSGS22z3kj7PNRXWIQBXSfnJ9k+hqAla+toGcJ/TJgevZWzmQsCjk3toczbqwsz3LPlj/Q7tpVb/p36b3WNDT2urx5wyQ2u8AAAABDQZ/MRTRMn3iZR4RsYALI0qaMPz15tf/PCJjXSCVLzyIjWEB6NIVZuFQd9IHJUmnlM+pGzTuq0jqVN7pbBnNO/83bjQAAADkBn+t0RH+J9+qrQkFHiucZngCfYVC6NrC2q1CVtl2NFIiI9/8efIy7v0ffzAkuTuoM22cIjGJ9hWAAAAAwAZ/takR/dzl9hMKyWrjL/2mAOYr5XPeIQqNEnZH84kJ2zn5AugiKzfdzkWMm0Z7BAAAAf0Gb8kmoQWiZTAjf++s+OCFJBZyyg1Y/s//daL2cmKlc5TOgqXAetAsmctqz5P823VwTaTcwPX+/sUmF5uTMjeRSbn+NbF5LxJ6JtQsih24gjtIz0ieWd7148vl0dKSJkfFHV8qbphVazHMzamX2zJ5KbX5iaHfn+RVsiaeJeH0AAABZQZ4QRREsn4DqJeP2RDmwj76BdzDR/efXogJAcIU8SeSQHCj0dqnjrDVpTgwQJI31BTaAG6INqkujzWTGU1zd1DjtXiaRFbASYt3GqCB6jgT8nxWzUXikMYEAAABKAZ4vdER/im4U1zc+vVyb5fhhMvf/N+sn8Zk5BtnQgFm5gyE+ybdRK46vySho5BSu7O5YawlwbRtBqouRndC7tT/GyEW3yt5IFoEAAABPAZ4xakR/hNQq3d0NOvTMPHejrhYTBJBnYWLUeub3UrmhxZK/nEFqJcD7/UnWAqDmNwXcWBRsIOSn8sxa0LKyMIigPgimkAATId2ZpEAtIgAAAMZBmjVJqEFsmUwI3/vrhHZDqhGUnyzSHEzswaZ2RxqKGRynYBXMBuEuEFfHmUDj9ux0sbihOOVmVeYWgA9LOoKwJ+rMjiSQ6ZUAyFQsCjjaDAUTNJtjWai9Rc+L83o1bWP1S2yuQUsJLNLVuFrlgQAymRGcNkyxEtGUkanf24vl9rYEtUPRoVcJrHcCRxamMNaxH5ea8jkqFJL3PStn/7XHDDku9uliPPRnGOvdudC3OTfeJhZuBI5KZb1w9wEivL8R2sp5CIAAAAA5QZ5TRRUs34GrPAdCUxfCqZZ2unlDcBy8LcrzCPU4ls+ywf4Ue1Eh5IMuWUEO/crgik+To25i+g7hAAAAKAGedGpEf4RorykjSfm5OcGVQs/wa5q5I3WVt0A7JQcZioy+6Cpd3cAAAADPQZp5SahBbJlMCN/74DjhLg4zcYH6PAnbkTY8iUAl5ul5AGL6HlLmdMJTDc6cLh2fLVFs26FJQNh8DpEVTfPwRt/7mERhKyfDeSIWFU5yQMCYNHv59nvZFp/a8WegCK4HUMfdTe9BBOJ+XPTTUutM2EKDdwEAeJMZxuSKyplgd9tOuySGUaYtDrIicCWJFC5wXwmU8vxDFfE04OEohATSR2A5oLuSDW3Mvf1CjoKTHqw+jE9AVMQbxRI4vzb1WUJejTxdtN9Si/S5yQWiKU8cAAAAUUGel0UVLJ94oE82Csl+UbsHxzVXEoxsMq4cxym8W9nAZxQ5JIcYizkCGXhABajDTcmqAIpfxDxLPxOswTcGYTAAIZm4AfUvHOJyB+CdHlrCgQAAAD0BnrZ0RH9/pbSDSQoUYXbJ3rSzc+kRtLUqkKeKWWa+GfiCNMxON4cEELkIjdppVpoCwBlXx1ZSOo1lcke3AAAANgGeuGpEf4R1fILWoVH4wTcI9HBEHVSvB4bqbjhbmfh2jcyxVPpzF2b1jR4X1zi6YlYACKdbQQAAAGBBmr1JqEFsmUwI3/wDwzHCe2QWuq6WGF4P0C2Zsdw46Bt6zA57Rc2TiTbjW4H7HgKfR8Gqh82t98tMHubaH8epd0G9TvymJA0ATEGErg/d+4p+TcjdLCZHjYjKV6SX5pcAAABNQZ7bRRUsn3xW3JFluUBopv+0Jh109W67tBai2wVCw1/Fli28auwIi5iiXJElON5pRsqKGOlx/dZECZCgJwmVdX4qfM5L7ofdjxymz8IAAABAAZ76dER/gGwpqdu2nr4gydT/It9EKEbgleYAPE+8ztHAa3x9WnkO6FBi5Veh1TP1eDEtNkuvOywKMxIz1SR2hwAAADsBnvxqRH+J9+qq+bxiZHCFr0gnhkOz+ad8z4gs78ck69ZSTjmsIwQTbotwcaKIZaLcxmKWlHaf9l4JxAAAAKBBmuFJqEFsmUwI3/vEhS51ZOAPrFCl1ornx2QnJRuYe9kkqMhxGOaMMLj/bNXs6VgpAhRFmLwVR09/A7837HfA1aN6O9OTeIhLPR21+RX9CY66WwmoIi5/sqTumt7qln29/+p0OIR7TaPCsms+zm3oiZ01cw+QI1KHDn7eVyJ6j4XcPg6S1r5fXKbly75nNvXLiFj5ogktvCp1SHaqEda8AAAATkGfH0UVLJ94oEqpOpYRxdsQCq36e1PDI41WDpJPcKamY+k0pWs2/KywiDKYOYI/MrTcoCQxBFm+MjLqCzLeTpaRiENFpLsVzukTIcUPCwAAACsBnz50RH+EVDZPSGusQ1m7R8O6wN1peQ/1jS1QCsK/+3Um/nowdX2k06ZAAAAAOAGfIGpEf3/ocg0UgjqvtSdJTqly3l/5RQ31A5IZqdimmnTC5MIJTLPa6Xa8Tb9vHN03oEBvLxevAAABDUGbJUmoQWyZTAjfx0V6DnpfBXTYnQk2bwKX2bwMtCO06TgUqyoBuPFXgEpXFMPAzWMC08ZZ56ZhxoN9TkSIiXVTokTYYuz3pKV7lAe/FQPey7N+dNtFiUjQWvY15ek+//wU0OU7OCIKAvTzIaRU2/1uvJ32Jwtf6MeG6udoHVMDrga2SELkoyovsSaeoA8+NLOPdIlZ9IkteJ1UZv0f5rSTvZTQ5nm5XNAnjE/38OYo1ggnhiEji+Cgk5Y2BrVp0F8ZOcAg3273NFXpfm3+x8FlaagyvTugb1BaDOo9dxr8EQG1u2ACk3eEl2m7Cw/3wccsdqmv1k5rlStFNyS7rxGwIGSghx6CdSt/U31jAAAAT0GfQ0UVLJ/Twd08p/VyChpS8T9IWptOSDvdLPszfbix+67WAXfjDNjm/bHE91nO3g6zlnZrZDZR0Gz1rsuN02ikA6mQRG4q5w3HL2ZCEhwAAAAkAZ9idER/hHi2UFyJjt4ot2IIOgG+Vq+0x5nM3Yla/iCdTgZJAAAANwGfZGpEf9fRGvKk48/Ke4CgTzeekpncWwo56eW/sFEV91i3WBPnz0ynsff3Tnr+fRiCryeLNz8AAAD4QZtnSahBbJlMFExv9Cvq/twdBueYn3Ee3pjczv7OR6i3WU+SGv/yONzlAdl0TMgBYUraQumePMz3mddrVgKe39/UH0dZMWofZ9Mqui4P/wvLxsUppGHrrazsvTR+f2rODWARjtdkCSvFbtfvMMzeLFsQ13oDAGG2vvjL5m95QTZnRKv1OnYK93SNtLC5O5AQU0iYLnxfpUikyvxOvC8fzzbFhAqGYgHL1GBJ3KgSybvxeNLFYx0n3LIy1+V+pUzTJ/EUuTURErNUiE759HI2TbRLFPHYcqtlrP1/TS0NNfBRxkAXAcx2r7lawk5xdWWia+VA11/z9cEAAAA2AZ+GakR/mJ002N+vm8JdEQuzLaLtUIOIHfz7FHdVElv3dJhMuXpT4IkFixsgNNJKwy7EeXqbAAAA4EGbi0nhClJlMCN/x0W1i/GN6GepRrlUQYVWJ5EJgT8Zlnuh2q0V4+3OOPM4/4/MCjdcBbKHis9PBt/yefxMxHgw7FScKctBHvFoR1EawxWUsaHfzfSnnalnY4m9Ak14jYSJiHQ5ToCi7Y4bqJeCGTsIs3t/Q3xTdD//Dx8soX29zf1N5xgAr/qPmK+dU6/y5sivZgEvOoh/r0LHDfok77DJX4jZRZR016pPj/OKEWHWFz2SRnFT4/7nXgVNFiCLYupDDD8vVEru70qqarHLzZrSjSEw3CIA0xIhUFDHlk5sAAAAMkGfqUU0TJ/TtTGaqT3w3TTw07mec9pdUO+qick9Inm7YO6odtgQD/RhYzoYrtniSB/IAAAAEwGfyHREf4TAtHqZwf6daDH8ihsAAAAwAZ/KakR/zdHy96S6V8otDkejDD6GAoE1dBa3yj/ms4XveSTIAKMwyhRIvm1XthnnAAAAfkGbz0moQWiZTAjf9BLVe/EEalTabFqMzxVTAPfnxJ/Zlk5dL1vhj03RqKWw3suOlU5fZYsZNbQTFyM5VPD5D34ws73lho1n4IFelgz9POTbop+0V7yvkVIdPx9z1ZXKiB6WJVdOUzkbyhc1ThMFffRcKHS2W7razGQSoY0BSQAAAFhBn+1FESyfgOahSeu9LaQmIfCcRMhY/HlRnhZaQPy6CAtyQ+LszE9EabEU3RQtsAcY3sdxq8ilGwwrV6BXotZB+DgqfBZjqOBne2bdeY+xHF3ipKcm5shAAAAAMgGeDHREf3p9slhDFwgFsyLLeY0fCtie4WUMzC6rVyKWDS2ciTpdi3cETt3ZfbuvDXChAAAAKwGeDmpEf4n36qp82ESoc+OmMW76ljVM0FgrkSvJn6xJsKLGiU64smxwxzEAAABzQZoTSahBbJlMCN/0EtUjX4qLA/VxpTkfRuOUpW531Ao8SwkqNXlLmRBGxN1IsecXQYmRrqrhzuWJINDZbuHe7a3EXT2PLlFrMUbyi7HcW331ihx5HV0pCo1UdCwPmKqK6tJGWYXzqyTyWsq03jrcYpi3wQAAADRBnjFFFSyfex1KrddINRpEPynlrUA1hgUxfwc/hqlijWK0BwD23AC6+DLASM32cUAM8GGBAAAANQGeUHREf4ByGO+by1cZ8ELLxtGybJtzqprjV2BYPrAXGxgEvx6Yxh6AyzUdErjXJBHKQFDAAAAAIwGeUmpEf4RorykjhEu7Dyr89cWrOYZIKdrrF5QUKCEwPBeAAAAAckGaVUmoQWyZTBRMb/vgOOEudjsI3Btv/VSW39BLCWXy94pOkzAzXgbOIzUOMjzvnTjsYbNLA9pAvZ9YoFxYYhx9h4eU9NZk38/IlYerTZE/6bkDdbgNWkejxbItyYDZBfXrFL08jsMcNfK3iihUtFthcQAAADUBnnRqRH9/6HINFiMdEuSVNpUqBczjZC+frF9/+VOzdYvwD3y/RUO7jtNMvopLiPWtfsfkhQAAAFZBmnZJ4QpSZTAjf/vgRKik2M+QxjuXx6xjPBvXPi+CYy/3jhDMFE5SsMyyTR0PdQ+GZ5DvFQ+4McBlrhdYQyRJoj/S1jsCJTCCYb8261AmPc/xNGhEYwAAAH1BmphJ4Q6JlMFNExv/++5XDhLmEjtL29ZAM9do3FyhDOoEv9H/h3RF9bsYANZinaVx61rvr6dcFSPOYKYVa0Ky3w6DZJzzBXVJRPeXQdWDKgQpImvJu97y8iZ+2lh5SMVTWZyUhJjoYcacZWmU6rIRgEt/EHai+6H6pE6egQAAADwBnrdqRH+KbhTgtKWIdHh3CgwGor5ORfdsM8dLegC9vy9X/NkeqmFpzE3INqVqG2xva8onEHyYu3G7LjMAAABIQZq8SeEPJlMCN//76z44IUjPWuSuXQG9n5T17mOUqH+IrXRyER2829XfIa9IKsVCuw/i9Uft+dhAIOotXMZgMVH6ntznsvIRAAAAQEGe2kURPJ+A6aXZxXJX+dIXeGpcopCzqg9ywRig3tyiB2DWkAg1Nn19JHMqupfHrhRdStTdinM2zM8a/E1bWFQAAAAvAZ75dER/hRLKKjdRBBNZ2S51o0KrXqTH62/XmFWxCqMc5WhSfqLQHzwmuHVfuIAAAAAbAZ77akR/f5yCyQJ9t0mSfiBytXO6KNT8CYfxAAAAgEGa/UmoQWiZTAjf+8PbyScRYg1RKyMpfbNX/CZRmm7Hq5VPl9Kuk8m7UOZdAbbrLswPQZQjheXgSbH4nLjepdplv51yrDISiOwhOpa5KSaf4Oh7414o8y9zXZHdmfzx+QS1lrmt24zuAg09D9q0y9iOg2/VIJnCofp+Dcv/U7ggAAAAcEGbHknhClJlMCN/++A44S52J3BJbw5nJLJfrjdwX6x1tDYZqj16z7INfCLPFEvnVSYK+EgFy5iJXZMiPitdWn9FpnmJqvFHhXzj4TTJ0U4uxYajaipJlltSfuzvOgecTBXwsPWX+UIqDZy0B/uWYEAAAABsQZsiSeEOiZTAjf/8In/MgnTsdVF4hM+h4LwTtR11DWQhRcJNRvQrtL/nWVC8Fu9eFlLAp8tLIZNEfkIXRbXsvYnQqfL8udnCweG1H8XL1oX20j7WR86CHE+9Ke8Jhtdy1bbfdOfKjqHqgB/RAAAAM0GfQEURPJ97HUrykeGrxIFqUtgUr7SYug/yEk3MgZ5o4qOnfx8yJ6ewAL36Oxl32rzUXAAAACEBn390RH92+75YCJktXmd/7NQWvwATKbauC51Qh/fxn8EAAABWAZ9hakR/im4U3dpeEMcKmoRT6qqm2/JijnAse/V8GPlbeJNnpdMFOlnqWvqie1+Z5e2AUboKtScIhZJGToOZ4DYrujj6Kul4w2TJ9Hh7ENnXsHCbwRkAAABWQZtmSahBaJlMCN/76z44EJY/u4nS+fFAtqU8yWqvWTE8GR+nbQ2pXN+qpaQkJCk9IKk89H1bE//13lfA/iJjEiHwWexUi5OP/Wk+Wl82rT8IeKMB/agAAABKQZ+ERREsn4DmoVg1HkW87stKnuJEPWC6LUKIZAqca58O77RVjao/vMVW8zN5+SBSMrUHBYqA3TCqOVCAQjR32PKFDGNqCbkULzsAAAAxAZ+jdER/gw4tmzW7rzcxfEkNyf/onzmAWVc3vPO+lyds9aaITrQ3fCOV9PNwrTxg0QAAACkBn6VqRH9/nILJAq0V22CwA26I7F68ZhwvvzudCOtW9UTJodNhQc7hgQAAAI9Bm6pJqEFsmUwI3/vEgWa+oh2wHUBNFWOi0kAxeW/oBNECdxapz6X4Ulak31gejD9icbk8DhRm0Hs2hpnHJ2o0E9aOoy4Z+ZKHeuQbkVZ3BAG80uP6pot9Q7nbRo/Sr6mvqnH8mE8zpvT0kWpYJYh5VOQG6NR3X0yMAeVL+tP9aFjzEgYaAOOYI13h/D/pQQAAAFlBn8hFFSyfeJmV5SQCgqT7iG5bhfyYEe/eRaEbB7n8Zt1Ka0FoANimziTzHCgJBfnDtij3/czGvfJsLwpDb7MAjV+SrguqOiQF3BNZwA/WhjksDFwtQJpmoAAAADcBn+d0RH+DDmAasguYEJlS+qlA/VHzkU9DE5VIEBd8g6r1fmm3k/DZypjK98baTmgzNHfZVmKFAAAAHwGf6WpEf3c5fYTCqVAJrrpdDQdDLHRuhKusF09cRKEAAABvQZvrSahBbJlMCN/74DjghLmmO2mUnbfWaW5B8Mb92g3kEwH8rIP/BLmQ9SIIzp0Z/9N2kUyz8oEJGs/Ap+sPATqqFFZLVvfg+PmnCmip/KLJjlyWyzFDoI36aRp8zoUQbAabU90Z2YMWlc5ECtZiAAAAVEGaDEnhClJlMCN/++5XVEO1lWujg8lVyQxzv7cDyj9Khzwt+GmZK7MW+odXd1sxR2cKlqoYmmu2Vz7BYmxOdJPgwJSdZxassfDNt9Cqk8Ou7kbHwQAAAGRBmi1J4Q6JlMCN//vgHII+SiBmB1jZ92ZKw687olAG63mvy4JF68C/BtdOHNqo9oOcgCDap8yVUyFaCvNPKBjx5Y12mHem5KjGuR1HE1vwiWnlm7LaNIt7dT5FKWs5/g4ZnkYzAAAAc0GaTknhDyZTAjf/++5XDghKblpByuukqCy+P01EbidExQWS9H7GxldpGPDpK7V+tBUBN0kUkqNhKVgVQcUsvuBYPIwMAl+a8Q4xkIhl/9reTev/yuiRaZox+SduCU+GYb7D5dve6JG2bETSCoHHzLVhoOEAAABGQZpvSeEPJlMCN//76zcgkiFnycdhsLEw0asP63rfGuqZvt04llNZeVygNUg8Jr3NY0ry9QaO7iXi3konyubaDZ6qMEZf4QAAADZBmpBJ4Q8mUwI3//vgHIJlMhOje7UYw2E7Ne9jyk9kHh3DzUwY2oalRKALKuqLHKNjTZEyfXEAAABdQZqxSeEPJlMCN//76z44RALE0DW553rg5eSpXu4lPlg3qaIlhzcDVr/CwZnQ+H1LWa2hx8yylsTU3OCdaOErX+9UvWtlkHhNfA7/4IxFk9dSDJPX2R0DfM6/cnV5AAAAWEGa0knhDyZTAjf/++A44S52Ox7QAqqAU+wlZbuVvsj7Y8xPJ/oIVLMj3KgFlKtIHmn1xsMKiZMfby2S2aO0xU7D3AaY+dFVaek1REHuf/uwXVRA/9Oom4AAAAA5QZrzSeEPJlMCN//74ESopM95J7cOXH+Lihd7kB1WsgMIaYuk4/P93ZFft876I1IMoEoIC02Nlj1AAAAAMkGbFEnhDyZTAi//+98ri6ylM95Xz+FrnToIvklvE5kKaL4Gyoqmk5OFcr+nIm3Aq8lFAAAAWUGbNUnhDyZTAi//++tE3K4Iv8XvdFgAw9nY/t8iJypWqoPs8SfKSv1NUm5Aud8Hq91WXCVfMYTS48S1kcJQTJb7gwxOICnLQAyqaV+gSJ/vuLhZm00gZbygAAAAV0GbVknhDyZTAi//++5Y+Bvd0edhu05xaJ4bHAypArfk42W21z0Hw9aIvP5GIJ7U1/Z42XP07srgXfJu1+InIWELVunkmOM0UPSk9U0pMQXyEMc8Ph6NIAAAAGJBm3pJ4Q8mUwIv//vuWLcrkUYnrfzKvY6YyeTO9Rk4qyfjY77IkI55iiBIrunpAkA7045O9WmOwt1TUqx7i+IPJ9eh/xnWjecTeoRQNm+CQoQo46ThJC/lHxBJ3Ma6WMJhgQAAAEVBn5hFETyfgEm1lLG500zAj+otoaEuWn4LIfRFhyaAA9CwtVZ6mp1AZb89j27+RZtQcexyfGBpeuTXTM9UC0Wa3f0yZmEAAAAlAZ+3dER/im4VKaciB4AJOzHjFym3oODn6PO6EfFl40Bc8IqeXQAAACUBn7lqRH93OX3LIRNDM9tasR3koAjrPHMOiuw7i9TDEgskYPw/AAAAiUGbvUmoQWiZTAifwAj3GUsedIbwtVPmC6UOtk0BDMV7Fztcurfnw+Ukl/88z4rxasXNQ9PFoO/5nE8qDt2fJK6LMxkaoX4RjOKmCD3wJkQpTcQNJfK33lRLTK724nIXgRJ5xYIwCOoOErOutudRLF4PZ/ViMvI+XwSlh7aUB0gxVLJooJL+QGvAAAAAL0Gf20URLN99VLWN00toRsi1JSCmlbiDfGAKxMwuqu+ejxaDbCFeBpddWwXK34eBAAAAOgGf/GpEf3+3wq4dDu7w/BsOHSau1FuQnVGdVhqVK3yiZQNkNPtat/J4UIGt5UWW8ZQo/jCP/r1Nr3AAAACHQZvhSahBbJlMCJ9uIMp0i44t0raDO9/qWe5SIVgIlo7RXxku4hPZq6uBGB4yOHnSn7I/dfcyQ7IQL8iwlPxXakCthkEjBcrQSK1+YKDZ1aeYZj1E6kswhNqi3qb+Xp8S04OXkru7TYYvfQY8IlY1dIAa1uyqVN5tJyc690K5tFol+v8sP3r8AAAAWUGeH0UVLJ+A5qFJ7VTSGZaxh09N4LvIJtQj3iBd9smy2aE0j4YJdcN+aL86nrecX6XJe9x9zozcD5zJ3+OWlY6VcS3j059iPKQYmFD66vdkRvd6PnTkL4nBAAAAMQGePnREf4R4tlBciUsnWu6mI8oSE9LvSXimcHaU1ew88XBcw2xze23nLBJw1I7DAmAAAABTAZ4gakR/gQeUyXbanD30/po8AnM4qaiU66bgB4fpv+VLJIx/KYTExAZQTmPLSKx1c76Lfl3wOXm3/JkWtAbgIRqsn6/a1Z7l65jvwwWZio2lQcEAAACtQZolSahBbJlMCf/mV66kDVGRg/gckK5KqDoGjG/Rc0jTpLhTmGpUH6p6+6Lg2rz4jmaD/nRZb7pbBze8wvCCpGUZHUhbf8B87j6IYVHLv2NWZ2HtpdsJpaK+aplUwQpQ2jyOwJc8FMrzBW66sb7D7eHQNNKX2TQDpjQOPBX2qpgf8m4FZujU9qlQGljaykfczBPPyrMOa6hPI30+YisxV90mdGge0/yWBpepB/EAAAA5QZ5DRRUsn3sdSq3KxWQxmQudZqrOmd/ZinGlzs6ATElOuuq0U0+75PEkS4CwMI3s7/bFIEfrMlMuAAAARQGeYnREf4ByGO+bxiuCCgeG77QV8cdQPoGcUaIvNKoixhEtWoRDrlt89Zpb+CvpUPEpQWLYG2sl7ltGjzmAbh7BeK7WEwAAADMBnmRqRH+EaK8pI0WvJO3oqC4LpNBdDqSQ/2H8yLxBYfp6xHMaIZZrOGpnEjIG9IZgYtcAAAB1QZpoSahBbJlMCX+Lwr9oP9EDpqimHn50oJzlNSRyBVnJc7Vs6Id8pb1K4EjPXOvWC39X4znnb5gA6pxzGbQIqrjG1MBa6dnTE5rY8vR7mZCBx+zMoAaGfd4sMr9WM2xq44dO8HqaOxPZ9945Sd/152ym2cFBAAAASkGehkUVLN99Ta+j2R91Ew92HLRzMWd5j8GehO+60tjrq8bx/jm0obpABIT+nkTNTJlaL+14ziVE2QZYV4VvHO4XQvOYtn92rvxBAAAAMAGep2pEf3c5fYTCskPjx10tNs/dEitfWcAR2pkb+X6knhPNb494o6ye6ByoXD+voAAAAIZBmqxJqEFsmUwL/waaFZ1EyQOlLmgdat7bD4DlnDKE5EZeA7wS8KuScmLjUp2EbKQvkspxKeGj6wCV4/o9afBe/9NGPieBtZO1xGmVIfxYKRK/87tsBmbY+CKUcMExGgKwo9oXid1IeYBEUO08HCareCjE9Ukys7EXjqpBhqzoxi7rDqSeDQAAAGtBnspFFSyfgOol4/fAqtD33XIsmEONOHHMdFPLHo8kgNBLKWb9Ym0a2pfYCaWEGCwtlD2bnmGnRxwnkF0/3ikz9RRWbpi5VR6qekgem9YtTpaFhaa4okfsIeRm5VyV92Bz5W5+vfvQGlzKQQAAAE4Bnul0RH+KbhTf9HyRxrjP7gTLkjfovmjjipEuxkhrSfiXFbTj+bdRKyxdbpt6VAaA+pPkrRqInGqSekN/aHLTqgaJTdQcGWwTKKS0O/AAAAA7AZ7rakR/hNQrERnav5stEZ3ol4KYJIAtAl5O8ChfhNtpxCXz+ANkDpac7UeKcUe4PCnSrmKSFibwLDUAAAB6QZruSahBbJlMFE//Ca2F/yEY9AmJs0u6nGYXXc/tlS+Ep6R9fuGO2/jOCP4GVYBDYfNYov80JOewYiS/s/vrHJa0Qz01zjkBZYXCzefVUhU29LngYko5T1hwFRoyeH8iTqTzThvYOCNdd/EJ3oSIlNm/FwdpeovOsTgAAAAwAZ8NakR/gQeWyVZmnrxk/Y85AYHkLo/YCh9kxyYfbQDb4iHTGTcQHbUUBaOiMgGBAAAAU0GbEknhClJlMCI/FWGmIo6t5jEJYH9imbohlas+DBV/nvXtxmYQEaER8fEGt1ChymPpiTYpdh51Cmh/YiLHIUzWJgypPkYhnUaomR/HCwbaiXipAAAAWUGfMEU0TJ94n01vVRnAY040HFEBoYfXPVz/vno2VX9xlArIIXFkMxlTVQQFmfCo6f94H5UswTk7N1qNbBjEf5ti7RycrMWYZI8EZmojies4I8dHu6teptD5AAAANQGfT3REf4MMlUlILcS/8oa2RUszwulzz8mTh84lHFOM1E/jfwpW/Z9ANgKIGdGUmnAXkT5BAAAALQGfUWpEf36Uk5AyZXqKVAUafPJRrljZB7JJE29//0psy6/KKcUFyP4LnFh/YAAAUAttb292AAAAbG12aGQAAAAAAAAAAAAAAAAAAAPoAALlBAABAAABAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAABPNXRyYWsAAABcdGtoZAAAAAMAAAAAAAAAAAAAAAEAAAAAAALlBAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAEAAAAAAUAAAAIQAAAAAACRlZHRzAAAAHGVsc3QAAAAAAAAAAQAC5QQAAAgAAAEAAAAATq1tZGlhAAAAIG1kaGQAAAAAAAAAAAAAAAAAACgAAB2kAFXEAAAAAAAtaGRscgAAAAAAAAAAdmlkZQAAAAAAAAAAAAAAAFZpZGVvSGFuZGxlcgAAAE5YbWluZgAAABR2bWhkAAAAAQAAAAAAAAAAAAAAJGRpbmYAAAAcZHJlZgAAAAAAAAABAAAADHVybCAAAAABAABOGHN0YmwAAACoc3RzZAAAAAAAAAABAAAAmGF2YzEAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAUACEAEgAAABIAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY//8AAAAyYXZjQwFkAAr/4QAZZ2QACqzZRRP58BEAAAMAAQAAAwAUDxIllgEABmjr48siwAAAABBwYXNwAAAAAQAAAAEAAAAYc3R0cwAAAAAAAAABAAAHaQAABAAAAAAwc3RzcwAAAAAAAAAIAAAAAQAAAPsAAAH1AAAC7wAAA+kAAATjAAAF3QAABtcAAC84Y3R0cwAAAAAAAAXlAAAAAQAACAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAADAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAAEAAAIAAAAAAEAAAwAAAAAAQAABAAAAAAHAAAIAAAAAAEAABAAAAAAAgAABAAAAAAHAAAIAAAAAAEAABAAAAAAAgAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAAGAAAIAAAAAAEAAAwAAAAAAQAABAAAAAANAAAIAAAAAAEAABAAAAAAAgAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAABAAACAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAMAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAAGAAAIAAAAAAEAAAwAAAAAAQAABAAAAAAFAAAIAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAACAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAACAAAIAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAASAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAIAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAQAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAADAAAIAAAAAAEAABAAAAAAAgAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAAGAAAIAAAAAAEAAAwAAAAAAQAABAAAAAAFAAAIAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAgAACAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAEAAACAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAABAAACAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAMAAAgAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAA0AAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAACAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAACAAAIAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAwAAAgAAAAAAQAAEAAAAAACAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAABAAACAAAAAABAAAQAAAAAAIAAAQAAAAAAQAADAAAAAABAAAEAAAAAAIAAAgAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAcAAAgAAAAAAQAADAAAAAABAAAEAAAAAAUAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAACAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAQAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAgAACAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAEAAACAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAMAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAADAAAIAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAABgAACAAAAAABAAAMAAAAAAEAAAQAAAAABQAACAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAIAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAgAACAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAARAAAIAAAAAAEAABAAAAAAAgAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAMAAAgAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAYAAAgAAAAAAQAAEAAAAAACAAAEAAAAAAQAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAgAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAACAAAIAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAwAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAgAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAADAAAIAAAAAAEAABAAAAAAAgAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAAGAAAIAAAAAAEAABAAAAAAAgAABAAAAAAEAAAIAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAIAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAANAAAIAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAIAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAwAACAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAABgAACAAAAAABAAAQAAAAAAIAAAQAAAAABAAACAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAACAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAIAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAADAAACAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAACAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAMAAAgAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAYAAAgAAAAAAQAAEAAAAAACAAAEAAAAAAQAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAACAAAIAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAwAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAgAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAADAAAIAAAAAAEAABAAAAAAAgAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAABwAACAAAAAABAAAMAAAAAAEAAAQAAAAABQAACAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAIAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAADAAACAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAHHN0c2MAAAAAAAAAAQAAAAEAAAdpAAAAAQAAHbhzdHN6AAAAAAAAAAAAAAdpAAAGRwAAADUAAAAhAAAAJAAAAbwAAABeAAAAIgAAAE0AAAA1AAAAMAAAACQAAAAxAAAAogAAABgAAACGAAAAOQAAAC8AAAAiAAAAbQAAAHYAAAA1AAAAcwAAAD8AAABwAAAAigAAADIAAABOAAAAYgAAADQAAABqAAAAIwAAADsAAABoAAAANAAAAFQAAAA5AAAAUwAAAFcAAABkAAAAXQAAAGMAAAA7AAAARQAAAHsAAABtAAAAYQAAAGQAAABuAAAAZAAAAHkAAAApAAAARQAAAEsAAABmAAAAUQAAAFsAAABSAAAAewAAAF4AAACHAAAAKwAAAFcAAACBAAAALwAAAIAAAABQAAAAPwAAADMAAABSAAAATAAAAGUAAABEAAAAPwAAAGAAAABjAAAAPAAAADoAAABfAAAAXgAAAFsAAAB+AAAATwAAADcAAABVAAAAbQAAAEQAAAA0AAAATgAAAF0AAABsAAAAIgAAAEcAAABtAAAAKAAAASIAAABWAAAAPQAAADcAAACpAAAAOQAAADwAAAAvAAAAvAAAACoAAAB8AAAAPwAAACgAAADAAAAAVgAAAEEAAAAtAAAAOgAAACQAAACSAAAAbgAAAFQAAABLAAAAYQAAAEgAAAA+AAAAOQAAAEQAAAA4AAAAaQAAAIAAAABbAAAALQAAACwAAABmAAAARAAAAHcAAAAyAAAAOQAAACgAAABaAAAAXAAAAE4AAABfAAAAUAAAAEcAAABjAAAANgAAADMAAABkAAAAaQAAAFIAAAA2AAAATwAAAEQAAAA1AAAAMwAAAEsAAAAlAAAAkAAAAJkAAABOAAAATgAAADkAAAB9AAAATgAAAJcAAAA+AAAAOAAAADgAAABsAAAAUQAAACsAAACFAAAAYQAAADkAAAA1AAAAawAAAEkAAACIAAAAeAAAAFAAAABAAAAAOAAAAH0AAABJAAAAqQAAAGUAAABLAAAALgAAAMoAAABZAAAARAAAAD0AAACEAAAAQAAAAIMAAABAAAAAQwAAADUAAAB0AAAAUgAAADwAAAAzAAABCAAAADwAAADNAAAAQwAAAJgAAAAkAAAArgAAAEIAAAA1AAAAigAAAGEAAAA3AAAAMgAAAEIAAAByAAAAIgAAAJ8AAAA4AAAAZwAAAGIAAAA9AAAAVwAAAEgAAAAtAAAAJwAAAG4AAAB+AAAAagAAADwAAAAoAAAAVwAAAEsAAABYAAAAOgAAACwAAABsAAAAZgAAAGoAAABCAAAAWAAAAHEAAABmAAAAcAAAAE0AAAA7AAAAbAAAAFUAAABFAAAAQAAAAFoAAABuAAAANQAABO8AAABjAAAAKQAAAKIAAAArAAAARAAAAIYAAABbAAAARQAAAFYAAACyAAAAQgAAAEkAAAA4AAAAdgAAAFUAAAA8AAAAigAAAHcAAABSAAAARQAAAGYAAAAyAAAAfwAAAQYAAABqAAAAQgAAAD0AAADAAAAAOAAAAF4AAAAyAAAAZgAAACEAAAB8AAAASwAAACoAAACaAAAARgAAADUAAAAvAAAATgAAABsAAABqAAAAZwAAAFsAAABPAAAAXQAAAEoAAAA+AAAAOwAAADwAAABAAAAAaAAAAGwAAAA9AAAAOQAAAHIAAABxAAAATwAAAEsAAACgAAAAMgAAACoAAABlAAAAUQAAAEwAAABiAAAAaAAAAFgAAABUAAAAPAAAADEAAABUAAAAbwAAAFEAAAA+AAAAVQAAAEUAAAAzAAAANAAAAIsAAAAsAAAAKQAAAKEAAABQAAAATwAAAD8AAACJAAAAUAAAAKsAAABIAAAAOwAAADIAAAB0AAAAWgAAAC0AAAB1AAAAUgAAAD8AAAA1AAAAZwAAAEoAAACBAAAAewAAAFEAAABFAAAAPAAAAHoAAABOAAAArgAAAF0AAABKAAAALQAAALoAAABXAAAARAAAADsAAABrAAAAPAAAAMcAAABCAAAAPwAAADYAAABoAAAAUQAAAD8AAAA0AAABBQAAAEIAAAC0AAAAPgAAAIMAAAAdAAAApQAAAD8AAAAkAAAAiAAAAFgAAABGAAAAMQAAADwAAABQAAAAHAAAAF4AAAA4AAAAWQAAAGQAAAA5AAAAVwAAADwAAAAzAAAAOgAAAKAAAABXAAAAigAAADcAAAAoAAAASAAAAEwAAABAAAAAOgAAACwAAABxAAAAdwAAAF4AAABLAAAAaAAAAGcAAABmAAAAdgAAAEoAAAA+AAAAYAAAAFMAAABFAAAAQwAAAFwAAABWAAAAXAAAAE4AAAArAAAALAAAAHgAAAAxAAAASgAAAIoAAABaAAAARAAAAFYAAADDAAAARgAAAEcAAAA4AAAAegAAAFIAAAA/AAAAjQAAAGoAAABUAAAAQgAAAG0AAAAwAAAAigAAAUYAAABWAAAAPQAAAD0AAACvAAAAMwAAAGAAAAA0AAAAYQAAACEAAAByAAAAQQAAACcAAADKAAAAWwAAADoAAAAvAAAARwAAABsAAACSAAAAagAAAGAAAABeAAAAVwAAAE0AAAA3AAAALAAAAGIAAABEAAAAcAAAAGQAAAAxAAAANQAAAIgAAABcAAAARgAAAEYAAACfAAAAMQAAADIAAABRAAAAUgAAAEsAAABiAAAAaQAAAF8AAATrAAAAUgAAADIAAACIAAAAjQAAAFUAAAAzAAAAVAAAAEUAAAA0AAAANQAAAKwAAAAtAAAAKgAAAJQAAABRAAAASgAAADkAAACGAAAASQAAAJwAAABDAAAAOQAAADcAAACMAAAAYgAAAC8AAAB9AAAAeAAAAEQAAAAyAAAAbAAAAEsAAABmAAAAeAAAAE0AAABDAAAAOQAAAHUAAABEAAAAsgAAAF8AAABKAAAALwAAAMEAAABVAAAARgAAADsAAABzAAAAOgAAAJsAAAA7AAAAPwAAADQAAAB0AAAAUQAAADsAAAA1AAAA8QAAADoAAADCAAAAPwAAAHcAAAA6AAAAoQAAAFEAAAA2AAAAiQAAAGAAAAA3AAAAKwAAAEAAAABpAAAAGwAAAH0AAAA5AAAAYAAAAGsAAAA4AAAAPAAAAEQAAAAxAAAAIQAAAHcAAABeAAAAaAAAAEAAAAAsAAAASAAAAGUAAABOAAAAKwAAACsAAACGAAAATAAAACgAAAApAAAAXgAAAE8AAABiAAAAawAAAFsAAAA7AAAAeQAAAFkAAABKAAAAQAAAAFYAAABeAAAAWgAAAC4AAAA4AAAAoAAAAC0AAABbAAAASQAAAH0AAABXAAAAPgAAAFYAAACvAAAAQgAAAEkAAAA3AAAAfQAAAFYAAAA3AAAAoAAAAE8AAABUAAAAQgAAAHUAAAAnAAAAgAAAAFIAAAAzAAAANwAAAR4AAABTAAAAbwAAAC0AAAA+AAAAJAAAAK0AAABBAAAAKgAAACwAAACfAAAAWAAAAD8AAAAsAAAAXwAAACEAAACOAAAAawAAAGcAAABQAAAAagAAAD0AAAAtAAAARgAAAEAAAABNAAAAawAAAGQAAABOAAAAPwAAAIcAAABOAAAAQgAAAEMAAABdAAAAKwAAAHAAAABjAAAAXAAAAE0AAABtAAAATgAAAE0AAABkAAAAMQAAADAAAABaAAAAggAAAFQAAAA8AAAAVAAAAEYAAAA0AAAANgAAAI8AAAAuAAAALAAAAJoAAABLAAAASQAAADsAAACRAAAARwAAAM0AAABDAAAAOQAAADcAAABpAAAAXQAAACoAAAB7AAAAZwAAADUAAAAuAAAAggAAADAAAACHAAAAhwAAAGAAAABIAAAAQAAAAGcAAAA3AAAAtAAAAGYAAABZAAAALAAAALEAAABVAAAARQAAADsAAABoAAAAPAAAAKgAAAA9AAAAQAAAADoAAABrAAAATwAAADsAAAAwAAAA+gAAAEAAAADRAAAAPQAAAHwAAAAoAAAAoAAAAEcAAAAyAAAAWgAAAD0AAAAvAAAE+QAAAEMAAAA2AAAAYgAAAJ8AAAA2AAAAYAAAAHYAAAA9AAAAQAAAAD0AAAAtAAAAJQAAAIAAAABkAAAAcAAAADMAAAAoAAAASgAAAGAAAABJAAAAMgAAADIAAAB/AAAAaQAAAGYAAABQAAAAXQAAAF0AAABqAAAAiAAAAEsAAAA5AAAAaQAAAFEAAABKAAAAQAAAAGAAAABYAAAAXwAAAEcAAAAoAAAAKgAAAIgAAAAiAAAAOwAAAIIAAABVAAAAPgAAAFsAAADQAAAARwAAAEYAAAA6AAAAhQAAAFEAAAA2AAAAjAAAAEsAAABTAAAARgAAAGsAAAAlAAAAgwAAAOgAAABNAAAARwAAAEkAAAC0AAAAPAAAAG0AAAA1AAAAXwAAACUAAACQAAAAOwAAAD0AAAAlAAAAlAAAADcAAACJAAAAJQAAADMAAAAeAAAAYQAAAGsAAABLAAAAVAAAAEkAAAA6AAAALgAAADoAAABHAAAAcwAAAJgAAABhAAAAMgAAADIAAABgAAAAOgAAAK8AAABNAAAARgAAAC0AAABlAAAAVAAAAFQAAAB2AAAAZQAAAF4AAABWAAAAPAAAADgAAABYAAAAggAAAFIAAAA7AAAAVwAAAEsAAAAzAAAANAAAAEsAAAAnAAAAlwAAAJMAAABWAAAATQAAADoAAACIAAAASwAAAJ0AAAA9AAAARAAAADUAAABkAAAATwAAACwAAACGAAAAZgAAAEsAAAAsAAAAbgAAADQAAABdAAAAdwAAAFYAAABQAAAAPAAAAFoAAABKAAAAqwAAAFoAAABJAAAALQAAALEAAABVAAAAQwAAADoAAABmAAAAPwAAAKoAAABAAAAAQwAAADIAAAB8AAAASwAAAD4AAAAvAAABTQAAAD8AAADGAAAARAAAAFoAAAAdAAAAugAAADYAAAAnAAAAeAAAAF4AAAA6AAAALwAAAGAAAABOAAAAIgAAAHYAAAA0AAAAawAAAGYAAAA0AAAAWgAAAEQAAAAvAAAAJAAAAHkAAABZAAAAYQAAADUAAAAwAAAATwAAAGQAAABEAAAAPgAAAEIAAAByAAAAbwAAAG0AAAA4AAAAZgAAAGwAAABgAAAAXwAAAE0AAAA8AAAAUQAAAFkAAABaAAAAQAAAAFgAAABcAAAAVwAAAC8AAAA1AAAAfQAAAC8AAABnAAAASwAAAHEAAABUAAAAOwAAAFUAAAC2AAAASgAAAEgAAAA4AAAAfwAAAFcAAAA0AAAAlwAAAGkAAABJAAAAPgAAAHkAAAAnAAAAeAAAAFAAAAAyAAAANwAAAIoAAAB+AAAAOAAABWYAAABIAAAAIAAAAPUAAABhAAAANwAAACQAAADrAAAATwAAADkAAAAtAAAAtQAAACkAAAAfAAAAjAAAAHMAAAAlAAAAcQAAAEMAAAA/AAAAMAAAAEYAAABAAAAAXQAAAJAAAAA7AAAANgAAAHwAAABVAAAASQAAAD8AAAB+AAAAOQAAACoAAABoAAAAVAAAAFYAAABrAAAAUAAAAE0AAABiAAAAOQAAAB4AAABhAAAAhQAAAEoAAAA2AAAAdQAAAEQAAAA4AAAAMwAAAJQAAAA2AAAAKAAAAHgAAABLAAAAOQAAAJoAAABjAAAASwAAADoAAADBAAAAQgAAAC4AAAA5AAAAbwAAACwAAACTAAAAfgAAAEYAAAA3AAAAtQAAAD4AAAAyAAAAQwAAAH8AAABDAAAANgAAAFAAAACTAAAAagAAAFYAAAAzAAAAzwAAAF4AAAA+AAAAOQAAAJcAAABMAAAASgAAAD0AAACSAAAARQAAADIAAABGAAAAcgAAAEcAAAA1AAAARQAAAREAAABWAAAAXgAAACUAAAEAAAAAPQAAACsAAAAgAAAAmwAAAGEAAAA7AAAALQAAAIIAAAA6AAAAHgAAADIAAABxAAAAVwAAAG4AAAA9AAAAdAAAAEIAAAAuAAAAIQAAAFwAAACVAAAAbQAAADEAAAAmAAAASgAAAGwAAABGAAAAOAAAADEAAACTAAAARwAAADoAAAAkAAAAcwAAAFYAAABnAAAAZwAAAE4AAABIAAAAawAAAF8AAABDAAAAPQAAAF4AAABmAAAAXwAAAEcAAAAmAAAAIwAAAJYAAAAuAAAANwAAAI4AAABeAAAAPAAAAFYAAACzAAAAPwAAAEoAAAA3AAAAhAAAAFUAAAAxAAAAqQAAAGgAAABTAAAAPwAAAJMAAAAnAAAAqQAAAFEAAAA8AAAANwAAAQwAAABNAAAAbwAAACcAAABHAAAALwAAALQAAABHAAAALQAAACQAAACoAAAAWQAAADsAAAAzAAAAdgAAAB8AAABxAAAAdAAAAEgAAAArAAAAeQAAAF0AAABPAAAALAAAAFYAAAA+AAAAhAAAAHQAAAA6AAAAOwAAAHQAAABZAAAASQAAAEYAAACZAAAAOAAAACgAAABfAAAAUwAAAFkAAAB+AAAAWwAAAFUAAABpAAAAQgAAACEAAABeAAAAdAAAAFIAAAA0AAAAbAAAAEsAAAA7AAAANAAAAH0AAAAxAAAAKAAAAHoAAABeAAAAMwAAAJ4AAABmAAAASQAAADoAAAChAAAARQAAACcAAABBAAAAUQAAACsAAABSAAAATwAAAC4AAAWcAAAAiQAAAC8AAACaAAAAXQAAAEoAAAAzAAAAYwAAAFMAAABMAAAAUgAAAKMAAAA/AAAALAAAALAAAABVAAAAOQAAADkAAAByAAAARwAAAD4AAAA7AAAArQAAAFMAAAAzAAAANgAAAQkAAABSAAAAKQAAADsAAADTAAAAOwAAAN4AAAA4AAAAHgAAAC0AAAB9AAAAXAAAADsAAAAuAAAAcwAAADUAAAA0AAAAIQAAAIYAAAAyAAAAVwAAAG8AAABFAAAAXgAAAEAAAAAzAAAALAAAAGQAAACCAAAAggAAADAAAAAjAAAASwAAAGsAAAA/AAAAOwAAAL4AAABWAAAAMQAAADUAAABHAAAAYwAAAFgAAABoAAAAYwAAAEsAAABIAAAAXwAAAF4AAAA/AAAAOAAAAGAAAABUAAAAYgAAAFAAAAAqAAAAKwAAAHUAAAAwAAAAOgAAAI0AAABbAAAAOQAAAFYAAAC+AAAAQQAAAEYAAAA2AAAAgQAAAEwAAAA0AAAAlgAAAHAAAABQAAAAPQAAAHgAAAAvAAAAmwAAAF4AAAA9AAAAMQAAAUUAAABmAAAAeAAAAC0AAAB6AAAAMAAAABwAAAAuAAAA4AAAADUAAAAqAAAARgAAAEkAAABIAAAAJgAAAB0AAACVAAAAbgAAAGwAAAAtAAAAVgAAAD4AAABHAAAALQAAAFkAAAA5AAAAYgAAAIEAAAAyAAAAMQAAAH8AAABVAAAATQAAAEYAAAB7AAAARAAAACYAAABbAAAATQAAAFcAAABsAAAAVgAAAEoAAABsAAAAOgAAACYAAACAAAAAfQAAAE4AAAA/AAAAcQAAAEYAAAA6AAAAMwAAAKcAAAAyAAAAKgAAAH0AAABUAAAANAAAAJwAAABpAAAARQAAADsAAAC9AAAAQQAAAC4AAABGAAAAbQAAACgAAAB6AAAAYgAAAEYAAAAyAAAAgAAAADsAAAAxAAAAPgAAAH0AAABAAAAANQAAAE4AAACWAAAAZAAAAFMAAAAxAAAAxQAAAGEAAAA/AAAANAAAAH0AAABJAAAATAAAAEEAAACuAAAAQAAAADMAAABIAAAAbgAAAEUAAAAyAAAARQAAAPsAAABSAAAAXAAAACYAAADkAAAARwAAADAAAAAgAAAAkQAAAGEAAAA6AAAALQAAAIYAAAA7AAAAHQAAADAAAAB2AAAAUgAAAGkAAAA9AAAAYgAAAEAAAAAuAAAAHgAAAGUAAAB/AAAAcQAAAC0AAAAiAAAAWAAAAHUAAABFAAAAOAAAADIAAACAAAAAPgAAADUAAAAqAAAAZAAAAFkAAABfAAAFYwAAAFIAAAA3AAAAlQAAAGEAAABCAAAANwAAAFkAAABaAAAAdAAAAEIAAAAoAAAAJQAAAJEAAAAwAAAANwAAAJEAAABdAAAAPAAAAFUAAAC3AAAARgAAAEgAAAA2AAAAegAAAE0AAAA2AAAAnAAAAG0AAABTAAAAOwAAAIkAAAA7AAAAhwAAAFEAAAA9AAAAOwAAAVMAAABoAAAAfAAAACsAAACCAAAAMgAAABwAAAAyAAAA2AAAAGUAAAAiAAAAOwAAAHQAAABOAAAAKAAAACIAAACYAAAAewAAAEYAAAApAAAAYwAAAFkAAABBAAAALwAAAFAAAABHAAAAcAAAAHoAAAAvAAAAMQAAAHIAAABfAAAASQAAAEcAAACZAAAANgAAAB4AAABiAAAAUQAAAFcAAAB0AAAAXQAAAFUAAABTAAAAPgAAACAAAABhAAAAgAAAAEsAAAA0AAAAZwAAAEIAAAA4AAAAMAAAAIUAAAAuAAAAIgAAAHgAAABWAAAANAAAAJsAAABrAAAARwAAAD4AAADBAAAAPgAAACwAAABGAAAAbQAAACUAAACDAAAAWQAAAEAAAAAyAAAAvQAAAEsAAABLAAAARwAAAHMAAAA7AAAANQAAAEoAAACXAAAAcwAAAFEAAAAxAAAAvQAAAGkAAAA+AAAAOQAAAI4AAABKAAAASQAAAD0AAACTAAAARQAAADUAAABGAAAAbAAAAEYAAAA2AAAARgAAASEAAABZAAAAZAAAACcAAADjAAAAPwAAACwAAAAgAAAAjgAAAF8AAAA6AAAALwAAAE4AAAAfAAAAhwAAADIAAAAyAAAAVQAAAHMAAAA7AAAATgAAADkAAAAuAAAAJQAAAGMAAACPAAAAdAAAADQAAAAhAAAATQAAAJwAAABLAAAAOQAAADEAAACiAAAARQAAADgAAAAlAAAAcQAAAGcAAABmAAAAgQAAAEoAAAA8AAAAYgAAAGoAAABCAAAAPgAAAGIAAABgAAAAZQAAAEYAAAAqAAAAKQAAAHYAAAA0AAAAPQAAAIgAAABbAAAAOQAAAFUAAADRAAAAPwAAAEQAAAA6AAAAhwAAAE8AAAA1AAAAoQAAAGwAAABSAAAAQwAAAJAAAAAmAAAAuQAAAE8AAAA9AAAAMwAAAOkAAABPAAAAbgAAACwAAACbAAAANwAAACIAAAAyAAAAygAAAD0AAAAiAAAAOgAAAEgAAAA0AAAAKQAAACUAAACDAAAAbAAAAE8AAAAoAAAAYAAAAFEAAABAAAAALQAAAGIAAABEAAAAcAAAAHMAAAA+AAAAOQAAAFQAAABNAAAAPAAABYMAAACNAAAAKwAAAGsAAABqAAAASgAAAFQAAABqAAAAWAAAAGAAAABqAAAAOwAAADcAAABUAAAAcQAAAFUAAAA9AAAAaAAAAEkAAAA6AAAAMwAAAJgAAAA5AAAAIwAAAHwAAABVAAAANAAAAJkAAABlAAAATAAAADwAAAC4AAAAQAAAACoAAABIAAAAawAAACgAAACHAAAAXgAAAD4AAAAxAAAAbwAAADkAAAB7AAAARwAAAD0AAAA0AAAAgwAAAF0AAABOAAAAUwAAAMoAAAA9AAAALAAAANMAAABVAAAAQQAAADoAAABkAAAAUQAAAEQAAAA/AAAApAAAAFIAAAAvAAAAPAAAAREAAABTAAAAKAAAADsAAAD8AAAAOgAAAOQAAAA2AAAAFwAAADQAAACCAAAAXAAAADYAAAAvAAAAdwAAADgAAAA5AAAAJwAAAHYAAAA5AAAAWgAAAIEAAABAAAAATAAAAEQAAAAzAAAAHwAAAIQAAAB0AAAAcAAAADcAAAAlAAAAWgAAAFoAAABOAAAANQAAAC0AAACTAAAAXQAAADsAAAAjAAAAcwAAAFgAAABoAAAAdwAAAEoAAAA6AAAAYQAAAFwAAAA9AAAANgAAAF0AAABbAAAAZgAAAEkAAAApAAAAKQAAAI0AAAAzAAAAPgAAAIsAAABdAAAANQAAAFcAAACxAAAAPQAAAEkAAAA3AAAAeQAAAE4AAAA0AAAAigAAAG8AAABSAAAAPwAAAH4AAAA0AAAAVwAAAF0AAAA5AAAAMQAAABRzdGNvAAAAAAAAAAEAAAAwAAAAYnVkdGEAAABabWV0YQAAAAAAAAAhaGRscgAAAAAAAAAAbWRpcmFwcGwAAAAAAAAAAAAAAAAtaWxzdAAAACWpdG9vAAAAHWRhdGEAAAABAAAAAExhdmY1Ny44My4xMDA=", + "ok": true, + "headers": [ + [ + "content-type", + "video/mp4" + ] + ], + "status": 200.0, + "status_text": "" + } + }, + "base_uri": "https://localhost:8080/", + "height": 501.0 + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 24, + "metadata": { + "tags": [] + }, + "output_type": "execute_result" + } + ], + "source": [ + "play_video('pong_pretrained/0.avi')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "U-SyGcZBCmPn", + "colab_type": "text" + }, + "source": [ + "# Train your policy (model-free training)\n", + "Training model-free on Pong (it takes a few hours):" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "id": "WIQazd5aCocc", + "colab_type": "code", + "outputId": "0a440c18-affc-4b2a-d6e1-c3cda84465bc", + "executionInfo": { + "status": "ok", + "timestamp": 1.553254256733E12, + "user_tz": -60.0, + "elapsed": 19957.0, + "user": { + "displayName": "Piotr Kozakowski", + "photoUrl": "", + "userId": "01014928596539690143" + } + }, + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1516.0 + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\n", + "For more information, please see:\n", + " * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n", + " * https://github.com/tensorflow/addons\n", + "If you depend on functionality not listed there, please file an issue.\n", + "\n", + "2019-03-22 11:30:42.987149: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 2300000000 Hz\n", + "2019-03-22 11:30:42.987392: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x30323c0 executing computations on platform Host. Devices:\n", + "2019-03-22 11:30:42.987491: I tensorflow/compiler/xla/service/service.cc:158] StreamExecutor device (0): , \n", + "2019-03-22 11:30:43.082876: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", + "2019-03-22 11:30:43.083442: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x3032100 executing computations on platform CUDA. Devices:\n", + "2019-03-22 11:30:43.083493: I tensorflow/compiler/xla/service/service.cc:158] StreamExecutor device (0): Tesla K80, Compute Capability 3.7\n", + "2019-03-22 11:30:43.083843: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1433] Found device 0 with properties: \n", + "name: Tesla K80 major: 3 minor: 7 memoryClockRate(GHz): 0.8235\n", + "pciBusID: 0000:00:04.0\n", + "totalMemory: 11.17GiB freeMemory: 11.10GiB\n", + "2019-03-22 11:30:43.083879: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0\n", + "2019-03-22 11:30:43.475526: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix:\n", + "2019-03-22 11:30:43.475601: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 0 \n", + "2019-03-22 11:30:43.475629: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0: N \n", + "2019-03-22 11:30:43.476026: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:42] Overriding allow_growth setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.\n", + "2019-03-22 11:30:43.476131: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 10754 MB memory) -> physical GPU (device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7)\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Colocations handled automatically by placer.\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/rl/envs/py_func_batch_env.py:122: py_func (from tensorflow.python.ops.script_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "tf.py_func is deprecated in TF V2. Instead, use\n", + " tf.py_function, which takes a python function which manipulates tf eager\n", + " tensors instead of numpy arrays. It's easy to convert a tf eager tensor to\n", + " an ndarray (just call tensor.numpy()) but having access to eager tensors\n", + " means `tf.py_function`s can use accelerators such as GPUs as well as\n", + " being differentiable using a gradient tape.\n", + " \n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/utils/t2t_model.py:1358: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.cast instead.\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/function.py:1007: calling Graph.create_op (from tensorflow.python.framework.ops) with compute_shapes is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Shapes are always computed; don't use the compute_shapes as it has no effect.\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/layers/common_layers.py:277: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.cast instead.\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/models/research/rl.py:598: conv2d (from tensorflow.python.layers.convolutional) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use keras.layers.conv2d instead.\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/models/research/rl.py:602: flatten (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use keras.layers.flatten instead.\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/models/research/rl.py:603: dropout (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use keras.layers.dropout instead.\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/models/research/rl.py:604: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use keras.layers.dense instead.\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/layers/common_layers.py:2887: multinomial (from tensorflow.python.ops.random_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.random.categorical instead.\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/rl/ppo_learner.py:479: Print (from tensorflow.python.ops.logging_ops) is deprecated and will be removed after 2018-08-20.\n", + "Instructions for updating:\n", + "Use tf.print instead of tf.Print. Note that tf.print returns a no-output operator that directly prints the output. Outside of defuns or eager mode, this operator will not be executed unless it is directly specified in session.run or used as a control dependency for other operators. This is only a concern in graph mode. Below is an example of how to ensure tf.print executes in graph mode:\n", + "```python\n", + " sess = tf.Session()\n", + " with sess.as_default():\n", + " tensor = tf.range(10)\n", + " print_op = tf.print(tensor)\n", + " with tf.control_dependencies([print_op]):\n", + " out = tf.add(tensor, tensor)\n", + " sess.run(out)\n", + " ```\n", + "Additionally, to use tf.print in python 2.7, users must make sure to import\n", + "the following:\n", + "\n", + " `from __future__ import print_function`\n", + "\n", + "2019-03-22 11:30:49.903512: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0\n", + "2019-03-22 11:30:49.903591: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix:\n", + "2019-03-22 11:30:49.903620: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 0 \n", + "2019-03-22 11:30:49.903639: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0: N \n", + "2019-03-22 11:30:49.903898: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 10754 MB memory) -> physical GPU (device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7)\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/saver.py:1266: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use standard file APIs to check for files with this prefix.\n", + "2019-03-22 11:30:51.335217: I tensorflow/stream_executor/dso_loader.cc:152] successfully opened CUDA library libcublas.so.10.0 locally\n", + "mean_score: [0][0][0]\n", + "^C\n" + ] + } + ], + "source": [ + "!python -m tensor2tensor.rl.trainer_model_free \\\n", + " --hparams_set=rlmf_base \\\n", + " --hparams=game=pong \\\n", + " --output_dir=mf_pong" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "FbSjwVAtCvLY", + "colab_type": "text" + }, + "source": [ + "Hyperparameter sets are defined in `tensor2tensor/models/research/rl.py`. You can override them using the hparams flag, e.g.\n", + "\n", + "```\n", + "--hparams=game=kung_fu_master,frame_stack_size=5\n", + "```\n", + "\n", + "As in model-based training, the periodic evaluation runs with timestep limit of 1000. To do full evaluation after training, run:" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "id": "jppi4FE5C2nB", + "colab_type": "code", + "outputId": "a10afb7c-edd6-4a93-eee4-e3876977e825", + "executionInfo": { + "status": "ok", + "timestamp": 1.553254412202E12, + "user_tz": -60.0, + "elapsed": 15104.0, + "user": { + "displayName": "Piotr Kozakowski", + "photoUrl": "", + "userId": "01014928596539690143" + } + }, + "colab": { + "base_uri": "https://localhost:8080/", + "height": 4083.0 + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\n", + "For more information, please see:\n", + " * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n", + " * https://github.com/tensorflow/addons\n", + "If you depend on functionality not listed there, please file an issue.\n", + "\n", + "INFO:tensorflow:Overriding hparams in rlmf_tiny with game=pong,eval_max_num_noops=0,eval_sampling_temps=[0.5]\n", + "INFO:tensorflow:Evaluating metric mean_reward/eval/sampling_temp_0.5_max_noops_0_unclipped\n", + "2019-03-22 11:33:23.214052: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 2300000000 Hz\n", + "2019-03-22 11:33:23.214294: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x2d07020 executing computations on platform Host. Devices:\n", + "2019-03-22 11:33:23.214335: I tensorflow/compiler/xla/service/service.cc:158] StreamExecutor device (0): , \n", + "2019-03-22 11:33:23.309948: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:998] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n", + "2019-03-22 11:33:23.310546: I tensorflow/compiler/xla/service/service.cc:150] XLA service 0x2d067e0 executing computations on platform CUDA. Devices:\n", + "2019-03-22 11:33:23.310585: I tensorflow/compiler/xla/service/service.cc:158] StreamExecutor device (0): Tesla K80, Compute Capability 3.7\n", + "2019-03-22 11:33:23.310991: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1433] Found device 0 with properties: \n", + "name: Tesla K80 major: 3 minor: 7 memoryClockRate(GHz): 0.8235\n", + "pciBusID: 0000:00:04.0\n", + "totalMemory: 11.17GiB freeMemory: 11.10GiB\n", + "2019-03-22 11:33:23.311027: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0\n", + "2019-03-22 11:33:23.707039: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix:\n", + "2019-03-22 11:33:23.707114: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 0 \n", + "2019-03-22 11:33:23.707139: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0: N \n", + "2019-03-22 11:33:23.707459: W tensorflow/core/common_runtime/gpu/gpu_bfc_allocator.cc:42] Overriding allow_growth setting because the TF_FORCE_GPU_ALLOW_GROWTH environment variable is set. Original config value was 0.\n", + "2019-03-22 11:33:23.707523: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 10754 MB memory) -> physical GPU (device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7)\n", + "INFO:tensorflow:Using DummyPolicyProblem for the policy.\n", + "INFO:tensorflow:Setting T2TModel mode to 'train'\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Colocations handled automatically by placer.\n", + "INFO:tensorflow:Using variable initializer: orthogonal\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/utils/t2t_model.py:1358: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.cast instead.\n", + "INFO:tensorflow:Transforming feature 'input_action' with symbol_modality_6_64.bottom\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/function.py:1007: calling Graph.create_op (from tensorflow.python.framework.ops) with compute_shapes is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Shapes are always computed; don't use the compute_shapes as it has no effect.\n", + "INFO:tensorflow:Transforming feature 'input_reward' with symbol_modality_3_64.bottom\n", + "INFO:tensorflow:Transforming feature 'inputs' with video_modality.bottom\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/layers/common_video.py:495: py_func (from tensorflow.python.ops.script_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "tf.py_func is deprecated in TF V2. Instead, use\n", + " tf.py_function, which takes a python function which manipulates tf eager\n", + " tensors instead of numpy arrays. It's easy to convert a tf eager tensor to\n", + " an ndarray (just call tensor.numpy()) but having access to eager tensors\n", + " means `tf.py_function`s can use accelerators such as GPUs as well as\n", + " being differentiable using a gradient tape.\n", + " \n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/layers/common_layers.py:277: to_float (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.cast instead.\n", + "INFO:tensorflow:Transforming feature 'target_action' with symbol_modality_6_64.targets_bottom\n", + "INFO:tensorflow:Transforming feature 'target_policy' with identity_modality.targets_bottom\n", + "INFO:tensorflow:Transforming feature 'target_reward' with symbol_modality_3_64.targets_bottom\n", + "INFO:tensorflow:Transforming feature 'target_value' with identity_modality.targets_bottom\n", + "INFO:tensorflow:Transforming feature 'targets' with video_modality.targets_bottom\n", + "INFO:tensorflow:Building model body\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/models/research/rl.py:598: conv2d (from tensorflow.python.layers.convolutional) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use keras.layers.conv2d instead.\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/models/research/rl.py:602: flatten (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use keras.layers.flatten instead.\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/models/research/rl.py:603: dropout (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use keras.layers.dropout instead.\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/models/research/rl.py:604: dense (from tensorflow.python.layers.core) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use keras.layers.dense instead.\n", + "INFO:tensorflow:Transforming body output with identity_modality.top\n", + "INFO:tensorflow:Transforming body output with identity_modality.top\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensor2tensor/layers/common_layers.py:2887: multinomial (from tensorflow.python.ops.random_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use tf.random.categorical instead.\n", + "2019-03-22 11:33:24.564271: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1512] Adding visible gpu devices: 0\n", + "2019-03-22 11:33:24.564350: I tensorflow/core/common_runtime/gpu/gpu_device.cc:984] Device interconnect StreamExecutor with strength 1 edge matrix:\n", + "2019-03-22 11:33:24.564376: I tensorflow/core/common_runtime/gpu/gpu_device.cc:990] 0 \n", + "2019-03-22 11:33:24.564410: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1003] 0: N \n", + "2019-03-22 11:33:24.564687: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1115] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 10754 MB memory) -> physical GPU (device: 0, name: Tesla K80, pci bus id: 0000:00:04.0, compute capability: 3.7)\n", + "INFO:tensorflow:Restoring checkpoint mf_pong/model.ckpt-9\n", + "WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow/python/training/saver.py:1266: checkpoint_exists (from tensorflow.python.training.checkpoint_management) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "Use standard file APIs to check for files with this prefix.\n", + "INFO:tensorflow:Restoring parameters from mf_pong/model.ckpt-9\n", + "2019-03-22 11:33:24.985295: I tensorflow/stream_executor/dso_loader.cc:152] successfully opened CUDA library libcublas.so.10.0 locally\n", + "INFO:tensorflow:Step 5, mean_score: 0.000000\n", + "INFO:tensorflow:Step 10, mean_score: 0.000000\n", + "INFO:tensorflow:Step 15, mean_score: 0.000000\n", + "INFO:tensorflow:Step 20, mean_score: 0.000000\n", + "INFO:tensorflow:Step 25, mean_score: 0.000000\n", + "INFO:tensorflow:Step 30, mean_score: 0.000000\n", + "INFO:tensorflow:Step 35, mean_score: 0.000000\n", + "INFO:tensorflow:Step 40, mean_score: 0.000000\n", + "INFO:tensorflow:Step 45, mean_score: 0.000000\n", + "INFO:tensorflow:Step 50, mean_score: 0.000000\n", + "INFO:tensorflow:Step 55, mean_score: 0.000000\n", + "INFO:tensorflow:Step 60, mean_score: 0.000000\n", + "INFO:tensorflow:Step 65, mean_score: -1.000000\n", + "INFO:tensorflow:Step 70, mean_score: -1.000000\n", + "INFO:tensorflow:Step 75, mean_score: -1.000000\n", + "INFO:tensorflow:Step 80, mean_score: -1.000000\n", + "INFO:tensorflow:Step 85, mean_score: -1.000000\n", + "INFO:tensorflow:Step 90, mean_score: -1.000000\n", + "INFO:tensorflow:Step 95, mean_score: -1.000000\n", + "INFO:tensorflow:Step 100, mean_score: -2.000000\n", + "INFO:tensorflow:Step 105, mean_score: -2.000000\n", + "INFO:tensorflow:Step 110, mean_score: -2.000000\n", + "INFO:tensorflow:Step 115, mean_score: -2.000000\n", + "INFO:tensorflow:Step 120, mean_score: -2.000000\n", + "INFO:tensorflow:Step 125, mean_score: -2.000000\n", + "INFO:tensorflow:Step 130, mean_score: -2.000000\n", + "INFO:tensorflow:Step 135, mean_score: -3.000000\n", + "INFO:tensorflow:Step 140, mean_score: -3.000000\n", + "INFO:tensorflow:Step 145, mean_score: -3.000000\n", + "INFO:tensorflow:Step 150, mean_score: -3.000000\n", + "INFO:tensorflow:Step 155, mean_score: -3.000000\n", + "INFO:tensorflow:Step 160, mean_score: -3.000000\n", + "INFO:tensorflow:Step 165, mean_score: -3.000000\n", + "INFO:tensorflow:Step 170, mean_score: -4.000000\n", + "INFO:tensorflow:Step 175, mean_score: -4.000000\n", + "INFO:tensorflow:Step 180, mean_score: -4.000000\n", + "INFO:tensorflow:Step 185, mean_score: -4.000000\n", + "INFO:tensorflow:Step 190, mean_score: -4.000000\n", + "INFO:tensorflow:Step 195, mean_score: -4.000000\n", + "INFO:tensorflow:Step 200, mean_score: -4.000000\n", + "INFO:tensorflow:Step 205, mean_score: -5.000000\n", + "INFO:tensorflow:Step 210, mean_score: -5.000000\n", + "INFO:tensorflow:Step 215, mean_score: -5.000000\n", + "INFO:tensorflow:Step 220, mean_score: -5.000000\n", + "INFO:tensorflow:Step 225, mean_score: -5.000000\n", + "INFO:tensorflow:Step 230, mean_score: -5.000000\n", + "INFO:tensorflow:Step 235, mean_score: -5.000000\n", + "INFO:tensorflow:Step 240, mean_score: -6.000000\n", + "INFO:tensorflow:Step 245, mean_score: -6.000000\n", + "INFO:tensorflow:Step 250, mean_score: -6.000000\n", + "INFO:tensorflow:Step 255, mean_score: -6.000000\n", + "INFO:tensorflow:Step 260, mean_score: -6.000000\n", + "INFO:tensorflow:Step 265, mean_score: -6.000000\n", + "INFO:tensorflow:Step 270, mean_score: -6.000000\n", + "INFO:tensorflow:Step 275, mean_score: -7.000000\n", + "INFO:tensorflow:Step 280, mean_score: -7.000000\n", + "INFO:tensorflow:Step 285, mean_score: -7.000000\n", + "INFO:tensorflow:Step 290, mean_score: -7.000000\n", + "INFO:tensorflow:Step 295, mean_score: -7.000000\n", + "INFO:tensorflow:Step 300, mean_score: -7.000000\n", + "INFO:tensorflow:Step 305, mean_score: -7.000000\n", + "INFO:tensorflow:Step 310, mean_score: -8.000000\n", + "INFO:tensorflow:Step 315, mean_score: -8.000000\n", + "INFO:tensorflow:Step 320, mean_score: -8.000000\n", + "INFO:tensorflow:Step 325, mean_score: -8.000000\n", + "INFO:tensorflow:Step 330, mean_score: -8.000000\n", + "INFO:tensorflow:Step 335, mean_score: -8.000000\n", + "INFO:tensorflow:Step 340, mean_score: -8.000000\n", + "INFO:tensorflow:Step 345, mean_score: -9.000000\n", + "INFO:tensorflow:Step 350, mean_score: -9.000000\n", + "INFO:tensorflow:Step 355, mean_score: -9.000000\n", + "INFO:tensorflow:Step 360, mean_score: -9.000000\n", + "INFO:tensorflow:Step 365, mean_score: -9.000000\n", + "INFO:tensorflow:Step 370, mean_score: -9.000000\n", + "INFO:tensorflow:Step 375, mean_score: -9.000000\n", + "INFO:tensorflow:Step 380, mean_score: -10.000000\n", + "INFO:tensorflow:Step 385, mean_score: -10.000000\n", + "INFO:tensorflow:Step 390, mean_score: -10.000000\n", + "INFO:tensorflow:Step 395, mean_score: -10.000000\n", + "INFO:tensorflow:Step 400, mean_score: -10.000000\n", + "INFO:tensorflow:Step 405, mean_score: -10.000000\n", + "INFO:tensorflow:Step 410, mean_score: -10.000000\n", + "INFO:tensorflow:Step 415, mean_score: -11.000000\n", + "INFO:tensorflow:Step 420, mean_score: -11.000000\n", + "INFO:tensorflow:Step 425, mean_score: -11.000000\n", + "INFO:tensorflow:Step 430, mean_score: -11.000000\n", + "INFO:tensorflow:Step 435, mean_score: -11.000000\n", + "INFO:tensorflow:Step 440, mean_score: -11.000000\n", + "INFO:tensorflow:Step 445, mean_score: -11.000000\n", + "INFO:tensorflow:Step 450, mean_score: -12.000000\n", + "INFO:tensorflow:Step 455, mean_score: -12.000000\n", + "INFO:tensorflow:Step 460, mean_score: -12.000000\n", + "INFO:tensorflow:Step 465, mean_score: -12.000000\n", + "INFO:tensorflow:Step 470, mean_score: -12.000000\n", + "INFO:tensorflow:Step 475, mean_score: -12.000000\n", + "INFO:tensorflow:Step 480, mean_score: -12.000000\n", + "INFO:tensorflow:Step 485, mean_score: -13.000000\n", + "INFO:tensorflow:Step 490, mean_score: -13.000000\n", + "INFO:tensorflow:Step 495, mean_score: -13.000000\n", + "INFO:tensorflow:Step 500, mean_score: -13.000000\n", + "INFO:tensorflow:Step 505, mean_score: -13.000000\n", + "INFO:tensorflow:Step 510, mean_score: -13.000000\n", + "INFO:tensorflow:Step 515, mean_score: -13.000000\n", + "INFO:tensorflow:Step 520, mean_score: -14.000000\n", + "INFO:tensorflow:Step 525, mean_score: -14.000000\n", + "INFO:tensorflow:Step 530, mean_score: -14.000000\n", + "INFO:tensorflow:Step 535, mean_score: -14.000000\n", + "INFO:tensorflow:Step 540, mean_score: -14.000000\n", + "INFO:tensorflow:Step 545, mean_score: -14.000000\n", + "INFO:tensorflow:Step 550, mean_score: -14.000000\n", + "INFO:tensorflow:Step 555, mean_score: -15.000000\n", + "INFO:tensorflow:Step 560, mean_score: -15.000000\n", + "INFO:tensorflow:Step 565, mean_score: -15.000000\n", + "INFO:tensorflow:Step 570, mean_score: -15.000000\n", + "INFO:tensorflow:Step 575, mean_score: -15.000000\n", + "INFO:tensorflow:Step 580, mean_score: -15.000000\n", + "INFO:tensorflow:Step 585, mean_score: -15.000000\n", + "INFO:tensorflow:Step 590, mean_score: -16.000000\n", + "INFO:tensorflow:Step 595, mean_score: -16.000000\n", + "INFO:tensorflow:Step 600, mean_score: -16.000000\n", + "INFO:tensorflow:Step 605, mean_score: -16.000000\n", + "INFO:tensorflow:Step 610, mean_score: -16.000000\n", + "INFO:tensorflow:Step 615, mean_score: -16.000000\n", + "INFO:tensorflow:Step 620, mean_score: -16.000000\n", + "INFO:tensorflow:Step 625, mean_score: -17.000000\n", + "INFO:tensorflow:Step 630, mean_score: -17.000000\n", + "INFO:tensorflow:Step 635, mean_score: -17.000000\n", + "INFO:tensorflow:Step 640, mean_score: -17.000000\n", + "INFO:tensorflow:Step 645, mean_score: -17.000000\n", + "INFO:tensorflow:Step 650, mean_score: -17.000000\n", + "INFO:tensorflow:Step 655, mean_score: -17.000000\n", + "INFO:tensorflow:Step 660, mean_score: -18.000000\n", + "INFO:tensorflow:Step 665, mean_score: -18.000000\n", + "INFO:tensorflow:Step 670, mean_score: -18.000000\n", + "INFO:tensorflow:Step 675, mean_score: -18.000000\n", + "INFO:tensorflow:Step 680, mean_score: -18.000000\n", + "INFO:tensorflow:Step 685, mean_score: -18.000000\n", + "INFO:tensorflow:Step 690, mean_score: -18.000000\n", + "INFO:tensorflow:Step 695, mean_score: -19.000000\n", + "INFO:tensorflow:Step 700, mean_score: -19.000000\n", + "INFO:tensorflow:Step 705, mean_score: -19.000000\n", + "INFO:tensorflow:Step 710, mean_score: -19.000000\n", + "INFO:tensorflow:Step 715, mean_score: -19.000000\n", + "INFO:tensorflow:Step 720, mean_score: -19.000000\n", + "INFO:tensorflow:Step 725, mean_score: -19.000000\n", + "INFO:tensorflow:Step 730, mean_score: -20.000000\n", + "INFO:tensorflow:Step 735, mean_score: -20.000000\n", + "INFO:tensorflow:Step 740, mean_score: -20.000000\n", + "INFO:tensorflow:Step 745, mean_score: -20.000000\n", + "INFO:tensorflow:Step 750, mean_score: -20.000000\n", + "INFO:tensorflow:Step 755, mean_score: -20.000000\n", + "INFO:tensorflow:Step 760, mean_score: -20.000000\n" + ] + } + ], + "source": [ + "!python -m tensor2tensor.rl.evaluator \\\n", + " --loop_hparams_set=rlmf_tiny \\\n", + " --hparams=game=pong \\\n", + " --policy_dir=mf_pong \\\n", + " --debug_video_path=mf_pong \\\n", + " --num_debug_videos=4 \\\n", + " --eval_metrics_dir=mf_pong/full_eval_metrics" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "id": "mDoR0C0ZKCOn", + "colab_type": "code", + "outputId": "aba41a4d-2957-4ea0-d511-eae7ea4e238e", + "executionInfo": { + "status": "ok", + "timestamp": 1.553254513355E12, + "user_tz": -60.0, + "elapsed": 3908.0, + "user": { + "displayName": "Piotr Kozakowski", + "photoUrl": "", + "userId": "01014928596539690143" + } + }, + "colab": { + "resources": { + "http://localhost:8080/nbextensions/vid.mp4": { + "data": "AAAAIGZ0eXBpc29tAAACAGlzb21pc28yYXZjMW1wNDEAAAAIZnJlZQAA6u9tZGF0AAACrgYF//+q3EXpvebZSLeWLNgg2SPu73gyNjQgLSBjb3JlIDE1MiByMjg1NCBlOWE1OTAzIC0gSC4yNjQvTVBFRy00IEFWQyBjb2RlYyAtIENvcHlsZWZ0IDIwMDMtMjAxNyAtIGh0dHA6Ly93d3cudmlkZW9sYW4ub3JnL3gyNjQuaHRtbCAtIG9wdGlvbnM6IGNhYmFjPTEgcmVmPTMgZGVibG9jaz0xOjA6MCBhbmFseXNlPTB4MzoweDExMyBtZT1oZXggc3VibWU9NyBwc3k9MSBwc3lfcmQ9MS4wMDowLjAwIG1peGVkX3JlZj0xIG1lX3JhbmdlPTE2IGNocm9tYV9tZT0xIHRyZWxsaXM9MSA4eDhkY3Q9MSBjcW09MCBkZWFkem9uZT0yMSwxMSBmYXN0X3Bza2lwPTEgY2hyb21hX3FwX29mZnNldD0tMiB0aHJlYWRzPTMgbG9va2FoZWFkX3RocmVhZHM9MSBzbGljZWRfdGhyZWFkcz0wIG5yPTAgZGVjaW1hdGU9MSBpbnRlcmxhY2VkPTAgYmx1cmF5X2NvbXBhdD0wIGNvbnN0cmFpbmVkX2ludHJhPTAgYmZyYW1lcz0zIGJfcHlyYW1pZD0yIGJfYWRhcHQ9MSBiX2JpYXM9MCBkaXJlY3Q9MSB3ZWlnaHRiPTEgb3Blbl9nb3A9MCB3ZWlnaHRwPTIga2V5aW50PTI1MCBrZXlpbnRfbWluPTEwIHNjZW5lY3V0PTQwIGludHJhX3JlZnJlc2g9MCByY19sb29rYWhlYWQ9NDAgcmM9Y3JmIG1idHJlZT0xIGNyZj0yMy4wIHFjb21wPTAuNjAgcXBtaW49MCBxcG1heD02OSBxcHN0ZXA9NCBpcF9yYXRpbz0xLjQwIGFxPTE6MS4wMACAAAADkWWIhABvrNdXNvEPmO7lwVl73sPl0EDBzzvrz1O9Sgfa49FGnVhGNj4PrUzIEjAsiR14q5boH034au6fMfeHzW8BQIdLu5D8GWFcvhnUQvMLIDm/5fDlJWNI1pLZ0KekyKgRZvEg10IZZePvLcj64kGJzCMbJi6QZbX4WMzyM/ZwsXoWWPBmmlKBzFixHWdkptjcAYhpgDpXSILlIffpBFr5Fmv8Xdrl5eZtB/U18q6RE0tX2BrhekKyOZ5lJWnXZWIEICkLYIda8x0l/aAug9zkJAN2UJ5v8AfQgXgS7iPy41I11UQneH59QQ6r2Fy+bXVz7hKXvFUUQUW2NfwyAHSubAtKRV8FgrIBnKXwxAjc8zc/00LsdZVdehIaL1eI9qZtyap5GmVpF7ZJdkQbo7j2k9/o8Ztr6lwZrODqoujHSJK6V9bK0u9Et564zU+wWgftergJVAEl4m/D3N6/lD6Tni/a6bLzIcdcVjnfWLPAUBwAoj19NpxhAbe1VyiybbzF11k65OpExrnTpeyfXnWi2YKXmv6NMcvP6YS8WOK4pM7nWhyKetjJvO69p10oeh7Pv3PuQBq3kARIBKQ+MPYmymnbhgmxG/6w3hJ2A2Urz2k1DctVq7TiUCWnAReHSDqSpYcdQwxCm/lIpIwtl/dffgss5v+hhFs6NSNe3zqLc+wa/P6fKKBzHBPA6mZtXbiaJH0Y+5hMHtf92lFc+6I4pZ1q2XpI5Nr1V7em9lfehnp6KwZCFUTCrCle3ZgVn3/WlL0hiX3HqF/qGx1rSRBE7lqG2nGEQXx7BFJGNLF0vFVi0j2agV+lVqGOVlIxAjK3E9wGWVM0V7xAFGXQtxAYJ6qA6zMuM1AzlTqoEWcy+zkYm6Z2/Vn8RMHtpHaCW7GF05Wujcn0D05dR11MQem50GDiKxlzGighyGKWmfeex/qNBXelV1apol1nwDCUiSbC9fPUu70YI94kit+OnCdHe588u+9o5tqmvG+4ju2D1U0YtGzBJLwNtKIxTj+ycim3c7lWMz9gyNpdcRw85nQOO+UebN2j6KDuTy4XNxidtzFIcvo67EYGfl+q3WaPfQzFQLuOqvybvDRViyMxNwUidf7UCNcjzUMa0RFtd4HPTD4pR9pL0oOHG0XMOwvMlfvI/0tUl0nH8gKjxa10D+0pCAG9Sq76K3xNRb2QQ4PhDp7u3P+U7CY7JpR9qHasfUEAAAAxQZojbEb/+8M7YXuyASh7Kplen5y7UyO14JIrbok4XTbRQe3ORruR41lvOoDou8ClgAAAAB1BnkF4m/+AoGMvgzynj7iP1aLLDUqgN6Jo/suYQQAAACABnmJqRH+I4ID6PupU5REY3sf/aa003+ohQ9ie2tSAgAAAAa9BmmZJqEFomUwI3/3q7CUD4uvUkflAlpSidF3UDZUIJZsuBftR/Ot+q3GhwTn9egJDu/Q0u302gbAsB/IV/AFE9llDfK6lUW9694v3+SwMl6mllWP/WwEuWwr3bvYMZvmSan6TfqkNjfFQ2gIUyqpE9/WshNO04YsB0gSlJqYcVRMCqNuW9LOcW33er/NiW98OL0en+UGHVMigjfMRwtvQwEBx2TCEvqNHiHUZK47Ql9EBEwOC+9RX+RfTR+dz/gd9jggR1VC/W7S6VbwMD6OLZQ6pAbUGRTytAu/D3nUIHJpw2KDyU/GpGdKbj2WQLEoSWl/G1erWEOohpWvfDxHkGcHJyES1MZ1DvjVLlpUz1LfdDNETW9u5oytpQuaaon1mmF0VFggCZuK84NA/jjnYLjbiGa2mQVNuCz0xJcY1SlKU0HHZWh4xyJUcesClYPnBpMFzn1F47bfO2af9hq6jk1pG03lTPGyovR1gotc0zYA9fMfxEfYVjeBaSNZ7jIelcTidTkL9rr66l4XIEEMH3QknOAml0XZzRLxPdNkS67lJssdtIGefZbu54FuzpwAAADRBnoRFESzfiNYKja/0xDhfTAiTpHLhMdYxhLBGYN2yGAaE7v0DPPtKPN4N0Bh9SMs1vQXBAAAAIgGepWpEf4EHlYJarHuonGoEjcLs+tCeMaGFsuILlYL+uWEAAABrQZqqSahBbJlMCN/oMK3pi6Gt530QeupQ59ezKe10FpZIms3to+JEJWnpAJhBGOAA9pffTpSVzrDTkYei+dF/0XoAHErlseRXMtx8drWM8lgtra4QnL5SnQQH5ZVFNtwaHUoOeGKot2pc/HkAAAA2QZ7IRRUsn36T35XM0R9+0te5g6nnf972YKyQEsNo7mvZhGpibk4k6GYi5Xw1pjTrITOOCgNgAAAALwGe53REf4pxDGIcj/61i91Rdmi2jQszTK6GJmg9LxPIKqXtoMSsvmXH+U1e8ZQgAAAAJAGe6WpEf4Z1uDH/dTIseAsGQv4aVaLug7WXzVJtvrBkNRseHwAAADNBmutJqEFsmUwI3/QsGvLB67IEGcjaf9jQmmp2xwadCZqIc+QsmxjXnMZRrdlnOWXkniAAAAC0QZsNSeEKUmUwUVLG//1hfa7H0q8GiMhKKARrmw6Z4xdeyr7ujJme+WFMGuspP/+ISBWO3g4rVjHKMZczL9N457VGygcsdj7HL9XSQg7I9GyDcuezylwimiV/wFo/tnsP85jKo1N9u1U/c32ZRoJ6MLzOYkUW9bgpdQwc/vwurbPO6DqdSp7R4DyF3mA4Y/IOgh69Sz6dcgXGl/wwDmRo72yeNgYcXZp2lUk8XqHfEbj36uBFAAAAHwGfLGpEf4jgOwq2BPvs/N5sWutoQxXrlD0LcAJHkzEAAABWQZsxSeEOiZTAjf/7wc1PkbwRgr+fjnyNR8pfsXICQZIWSD29LO9OdoFeaCGC7kNQcI6QVUvVoNv//1awF0I8OAUiwMZgFzApKdTCW820SqjcXXuKH/0AAAAtQZ9PRRU8n31uIe20/lZp7IOycKeMuVxD421aM+Nt0XYWYsoR6FTF8OFoiDbtAAAAIQGfbnREf4ew+VpnMnXvUfVz1z4//JFTxQ6bZXAMaAhfWAAAABgBn3BqRH+E1CtCDqsru18CnQc9Zgwu5C0AAABbQZtySahBaJlMCN/8A8MxwHsdgYxkG6fD5jfkGVPerg1XibHNvThF43JXYYlxb2Qoy9V2WnWwkQ/OqnQwi/YBBYBEZaAn0hrUVg4yiuTQbTao5OXYZIcHYrYRQQAAAFRBm5RJ4QpSZTBREsb/+8P3/LU5AR1llSwpTA2p+sot/Ap2Yo/gxiVR9YF8xLoAHZjM8457AqBIx7NgPEKnQG3AmhGkbhUoZZDnZk8SW2DttxUzXAwAAAAmAZ+zakR/h60AWug79Ot7YB0Hk7/PuwF00L7Iy+IMt5Tq30K+2UAAAABTQZu1SeEOiZTAjf/9dtYVwOWMQEFcGvjV4Syzg11kPr1s9125wYmt9rvuf3mx7RYJ6jF6ErfUzdz0/3v9ae/ANOnW4wjLgPCIol0QSIsPYy4da+kAAAAwQZvWSeEPJlMCN//9fMnM4gfdWuAOqRQ2CfzqA5eQQ8IrF1fJShsXtjQZPj/+EEk+AAAAWUGb90nhDyZTAjf/++FR2EKSC1to8pc6YSO3kyM61vFVTIqwBRLCgvhTW2uELbv4P1t6j3j6R5JEnH93R+6ccuNOfX9oCM+dXHFLeM1xbdq83XfyShy+a/CBAAAAXUGaGUnhDyZTBRE8b/vgOOB3OQFcxg/ZQD7brmyDZG+0lanuFt5eWNnI+DlTWOnKhAAySzB0u8CurDZF8oIWX6RlKX6AlFm3XANEhpl6OfqvPFvE4O+fARgP5ChhkQAAACgBnjhqRH+HsRNxvly6e2k2NMUXC/vU7UPeiHPyDV2qBx4cRcnapKmEAAAAOkGaOknhDyZTAjf/++A44Ilb6dcZ4EfRuTLTuDvo3J2im59tzroCzqfsO8SsZ8WggmVkE0/TN30peYEAAABDQZpcSeEPJlMFETxv/AOiKyBDYvuQQEynViyLAUY1Ib85NXoM2W5LW9IU2aIRq+bew64gtHymArgzox7IataLMEs58QAAACsBnntqRH+HsR7W7gx/uLfAQSDqjarp3zTZOCfEh+6Eb54T4PDy1sS+MHUHAAAAREGaf0nhDyZTAjf/++s+OA7zprt9FalqM30xdIjlcH2DZkkFtbEKP2KnwPRDHCudTxGL9Qj/prFtXdQZd/8kyvQgWVbBAAAAGkGenUURPN99TaObeIE6LTTxi495G3R7GPmQAAAAMgGevmpEf4k5No92pOx4d3tVe/jt/92QHqWDJ1gLa+IuwyWHPUwqtoP8fV9Ym9imBVegAAAAVEGaoUmoQWiZTBTxv/vhUdghqkJVKiFoDxnkwJdI5EfJ1rXs+3IWgFyiLClNqrjfdF5LRESoN+/5vYqeA8+ZDoM0LHM+p62tsd2sj1HySk6QgTd2RQAAAB4BnsBqRH+JNh+VxVwUJ+Y3lV+ryJZRr2sjHdcr6hMAAABLQZrDSeEKUmUwUsb/++A44DvMfbctqjDdsKqNf0xOA39UYyLAzYIZyyzAIzc51s3jIS5ItuDvEtTcwkuqq/36WLfiOMPxF1mVVotBAAAANwGe4mpEf4EPa4byEQdMkh18OfNdNbI0980SSbNKs1ZKN+RKPSfGS6ySQe8jTiCjRi2a1L2bI0AAAABTQZrkSeEOiZTAjf/74VHYIlYiKMdyQZmvfSW5HcZkgM1RFiY3gEH71z/3fVBzC8liqsmHHacePA4zlFgRGs62G32VHnZQ/DXMUNXZSe6AM+37MoEAAABCQZsFSeEPJlMCN//7w+5d2niwPbxnXcPGjmeLv2gdbVqjCSUoEJNhCpLEDmpftWkeZV+eoSYNoxZWdF8ucDxxQt1tAAAAUUGbJknhDyZTAjf/+8OhtVOQEg0JinGp/3aOKBnRKunbXz9YVqPZNGUQc6qywYwzQClzX3GGGmt+n+j6fKijedTiog/HS3groeaf1ZLhN7MSZwAAAEFBm0dJ4Q8mUwI3//vEgdmkZAQXhaU5Ka/mwO9DUeQ8nbhvyYsA1HsjVV69Yl5uJ7Z+5cENl5oKK+xgTk4zv6uteQAAAD1Bm2lJ4Q8mUwURPG/74DjgQlevXVZC8R8NNBU5lXz/5evdLx/xJ2R3nKEWhnQTJ2MhCGZTRB2wGJ2TPpnAAAAAMQGfiGpEf4exElW+Y0oRl/FZgSZ/zHaDUN2en4eI/45oExvCn0eIC8T7xwpFCCD22pAAAAA0QZuKSeEPJlMCN//74DjgJBslHpy/fK99xLLgEoSJoPeLiwa05OsKQgsTdDRtpBJScyBfZQAAAFtBm6tJ4Q8mUwI3//vBcoj4CbBCQT2RExz0aNVXfJZgEMRjG13pvbjo3lZ6TLZP87/4osMgfYBX228XW1pfmU8k2l4pXWkF2ONrCPtLhUtLA/Hya2TerrONeACzAAAAUEGbzEnhDyZTAjf/++A9RB901G+fHcU/WnLCfcsk74GCTFIVwJ+BLzEBzmOXsONP0vNgTAP2KwhTXJSx3MerL/qd/oYBMUklPzb3D+iSqTSoAAAAQEGb7UnhDyZTAjf/++A9RCB74KMJGvAkjJTldXY5HMHa5jfjN8NHs9hBd2fEIGPYdqirJKwimKLpiuNnGPY71cEAAABDQZoOSeEPJlMCN//74DjgQ+tokgudjQu5NRNWobxjhH6Kh3ypTnRXjwJga7RHFZxm4LSd8nQrtx1GfqIwsNleIlTJwQAAAF1Bmi9J4Q8mUwI3//vBnCJ2LA71GF25ddxrRcaDbZ6JrFKdXAbBcwenVA97XR386r26bMwGidvp2PmbPeLJeVTrLs40YO/0dkENxamY3tNtYwvFfXlRKOo9yyo8A4EAAABRQZpQSeEPJlMCN//7w3v8sjICMILXR5ECQLilrI3AMHhNoAl8fjML6XnsQAd3GcNYf/0Vg/WkUZMoFlfh0iQSiC4/lahyE8PO4Mqsxu0If+cpAAAASkGac0nhDyZTAjf//APDMcB3mPxGGZH0fTP3ycONiP81zu+xkY8BF29QMHZMJZp5xyqLnicMHS675xoi5qplhfLQsVW1n9dVmaldAAAAHkGekUURPN99TaObeIE82Q23ingUfeUAohKj+s7K1QAAADgBnrJqRH+HsRJVgDz377tnoNimjhTZvPY9yHf2fmv0wsOMF+wp88Zp9A47UCY9LcsoRhIHvx+VqgAAAD9BmrRJqEFomUwI3/vgHICtV4/N+3TLBHjt+Fcn7DWjz8soUTNhnfkqkcMHOJu59OaFxhuVf+ti1/1ybHdmF1AAAABfQZrVSeEKUmUwI3/7wf8MHhAR1o8LZVW6pfXYDg2dpGT5Rvg+Cl2lQj+IQxqhhQEfFhAQwlKUGoZJ0yY1+WX5L6BuA3zmdozX8D1TAHaS+xXRLIKO1jwkjAd/JNceRMEAAABNQZr2SeEOiZTAjf/8ESF/UQffCua7E+r+NL8HxgRR3d+rdy5oOmnMWV4zw6CD9Z7mbT2yUeu59zFDbP46SRq9klvWZ1ZbOSW+2vZf9fQAAABHQZsXSeEPJlMCN//74EfgEPLRNeKMrWaAkB9b5moj+s74rojfFold/VwZwZnjRlAT49c0fDe4ZV3UBIGBK9CK3ytLN0Y42oEAAABDQZs4SeEPJlMCN//74DjgOFPapEipwNQ5JLf/iFkT7vTC27zO0O1Hhr7Be9x5uGF5a9SD1TJTW/IfLAT7Nh7DJMuMeQAAAGpBm1lJ4Q8mUwI3//vBm7tPFge4qVrTYzB1Rf4j8SwaiUd3Vn91V8IbddfFnSdsq/LMm7+bcOxpHHp/+sF3L+oZtRUH+68r3BQroy9k+62TGFClrSWs36FP/9yav//StMk6n2lq+8O7DNsoAAAATEGbeknhDyZTAjf/+8Ohm1TlEJHbNU/mWRKFgYzLN2yQ8jXAB9wCzOsfbVP2lU1EY8QOgEzKqXo0R7iSjvfThP3Ejb6DhTio3axVsWsAAABXQZudSeEPJlMCN//77lcOA7zH4jDF8og00F+0xFfHR4VDZOJrgOjozJdR/bpg8saNaDwaxnfv4X/eRhAQ/CW8F20/hfAqOk7ganOxPkKbRibKABEgisd4AAAAIkGfu0URPN99VLWJAgSGMMD0PyKXcxUT3cFaf6NQH7WvfiEAAABEAZ/cakR/h7ESVb5jSubSAV8FnD8x4gGanIIpXwVIvISOGwpzao9rHxOAHmz8XBzh60OGZXgi//qWtWwRLulsxilCnuEAAABsQZvfSahBaJlMFPG//Blf8ICQaEccs93WPEeRPsjPuDf4gDC1lzaA9vG8/MDesnCCrO+qwrWg0Z041Lq6MnjtnVfjvIt0qF1hF5tlhQF/0d8e9aqpY2UcQgjUy9Hd3+CTWS0I7CWSzCsCaZnAAAAAFwGf/mpEf4jgOwqq40N+HXrqPTGH7PlEAAAAU0Gb4knhClJlMCN/++A9RBAtNsNSCFkg711//JWWvGFsLrGyK1H7zXz+lTaC4eBFVxUDg/Op1j5vlFjWLyVGxDTLWQWt7v06Slq9bNE1cmz/5OS1AAAAMEGeAEU0TN99Tam3C0rJYqxDWOyBOLJHIckz5MqYD+DjRZpqPdIIw0nQ9LZT+KG3YAAAACABniFqRH+BB5WG0Yk+JGFIqTc/MKybQhljSSoiM7L+dQAAARtBmiZJqEFomUwI39P1UQmO/CHKa5fG9B98DyBd2q//xdlw3GXkmq/tlCxKXGJhBrqImPX+Tw62HdRUoCKQH1/ixC6DIupugrK3/kza+ntSNKpOnTIb2YWwrbEfjsAUf/8Csikkf00Yy4JNpq0NT3r3G73dUSTcvN2hSQC1jpvA4PpqiSqoyt1rg4K5nEb2X3Ta2J5m01MrHsJWKap7ArXaL/fag2Yxz1DnrBGNF0N64688FzTsfy3aMFyBNTwHxDcOM5VxEYpgPbQx0UoRaoEjPSOfPfZWW0CeOiVTmPWdE40N3Fq2/t5cedvB6NP7Q1mjnmICrga1HKXTUnIgZVEERRT/lQNzJZZSENnwR7l7OYZdbPaS+0iJ0PvAAAAANkGeREURLJ/NXH5jrirbiTEhY9Y/tlMwwq1venEm0Vym9wIwyi7kBVs/cNPG6n7cE56ATPcwWQAAAHcBnmN0RH/N1YmW42fk1rr5dWrvg0hpI+7/lxaqBQ9u9qPMWMe//elYZNo+NLjg8cYBUA/d4tfmi8Qz2UXcEo8Lvkc59K0YQAe1JOntXnqOjM4McR1Gx6UrMCz14vMSat1IO5CZ6UXFyvmTyeshwVAiRewmoKI5gQAAACABnmVqRH+HrQBVU2y7OiaX26aU/4ah2bLxWm+Y90YzwQAAADxBmmdJqEFsmUwI3/wDwzHA7nIUz9EUj2ygWhqxgWMh9LPrZxgjmvIJeUBCqf7Hl5oCspr9BQrYw3VwQWEAAACvQZqJSeEKUmUwUVLG/8dHUh70tVYbmJAP5Yav75x8KgRRrVZqGBAEqyIrK6LRnrpZlSZkXNqRr0UFWwbqNCFOpYqLeS1RQ2PyEkC+dBVplJUfZiuhOQAvvreTkE7X7a6IRT6oN2yIUD3gEVfN9FOVcWA6Z8kXNa18/OFVTCJp5CwGEmxWr1WMNJva0wrVNHOS92/kbWx21BB21E9sgNNFnB189Z9RulpZSpgE8SAYOAAAACABnqhqRH/N9J+2J1XcPpDCijwctlpIFM7gAfQRhV49uAAAAHJBmq1J4Q6JlMCN//kgYHc058NIdrA4tZt28lo3D3meW1zCAqwidHRRteHMGFHB/D9M0MCX00dwKKmnXGyIjSiIo/6dF3Oz1KEMYwbbPPtUPovm6LbF9t2EyWlXZ2AhyytqaBHYUktsnfMGjBar9/t8GUEAAAA3QZ7LRRU8n36t1fJ1HlQISEIEfpaQSyOL40QRE9Vs9zvdbKrtZIXDJfPzuaeN4LIza42Lez2eIAAAACcBnup0RH+HsPlbmN8Ea52A1bNIOYqtlLjflwD1gV/uybFUQRKpUOAAAAAdAZ7sakR/hNQrQyxiUawcG/LrF0fwKrQNveY8OGEAAABTQZrvSahBaJlMFPG//APDOoh1u+/wA6/FZcjU/jvz0FUQ/BieNWgawp32ZtUYldJMEVjFO412vb48n9u19LQOw5J+xi/1yuC/hu68Dsm1661iVvEAAAApAZ8OakR/h7DkMUQbpFoFWKV9q8E5k5LkELQbh4pZCjhTQyjdPLum4gMAAABAQZsRSeEKUmUwUsb/++A44EJXr11WJCw7ECvX60Zrh07UIdiv7aDfa0prRgD6jv/0Wc4+K9FDJsKP/KqGWZJW8AAAADABnzBqRH+JfH18ZqS57WtbAe/IO8nxI2HXbfpO2Nkfx7T6UrglP5FtwO/Jq+dpJWAAAABuQZszSeEOiZTBRMb/+8dnvoPCAeCb2/rWCrRRMAZhYjQsyCGOaWbOiXjQ45Hd4KBD69SXBo/G1jQPyXicID3GsLjYV+7vn2Od6v4G+WQqMHMoRfr8vCzfPrEsm0u23BOlwjpYpIOmf//S7gManfcAAAAYAZ9SakR/iOA7CrDrXGjeuosMdCPQVofsAAAAY0GbVUnhDyZTBTxv++A44DvT323Jx0SqVNJ82IBpyyo7DXCG+RxgT1NMsMLKQax8K4yQWP/71kziHHgGscwzo5L2jLizRX09ig4TiJCfNOrU3b1tM7YaGwugP+oXNvCezwnqQAAAAC0Bn3RqRH+IvbTI3y+IN9Ohxp8NHgN3aoRoF0CyJ7Kizh4lPfrWVzPrEIihMfEAAAA1QZt2SeEPJlMCN//74DjgQ+tokgude4jclMN4O+jlwgqt2WyQlDcvOp1uGtyVo8FNYf8DxygAAABYQZuXSeEPJlMCN//7w+5d2niwPcY4fRW/HK6IFjrt0h2pzlDEeWsYZI51tgKX93CPYsiPqPq3X5B1aWcUk55oKJGxvjXmYgG2872vqJP978jdfl4d7IIZYQAAAGpBm7pJ4Q8mUwI3//vD7kjX2UR2WhZ7zwuAd6E2YZpVsaXqf/9ms8pdq2AcVeqHU+2duGYw6XN6gJJPmwjaRjOqn/963zMAo7BbLbQ0xWrehzgNm86QkFh5F4CJPtfJrXi2qfvexEjl9SQZAAAAPEGf2EURPN99VLXrSyKjUixfkxdL90ti/7QhhH5aJuIgfKR9AwA9eKagET/7LDiGl5JEDu2HE71HainiPAAAACQBn/lqRH+HrQBVU2zahqo4JDrA0Wsth2bQ6i1pu5g6vv10b/kAAABIQZv7SahBaJlMCN/7/bCFg4EB8mDknVh3Li4nF+3GJT89JkDhouv2updeuOWXeNe5p0zRxNBaBS36FPmUKGpjSDMCDJPbtDwsAAAAREGaHUnhClJlMFESxv/74VHYRK6xf9nuJ/mW8vBkEYktQx5ng60k0zpkEVATXt/+Il4+t1Y/gWE4ff2KkBugUa5RlCmVAAAAGQGePGpEf4k2H5WycQ87dp8VYGfF0/IU9Z0AAABiQZohSeEOiZTAjf/7/gOFgQm+EEcUp0ZlHiPiNyhbnVoqZ+8senN2fT/GHn0i0DJAHVHDeAAPPdb6+08ywf9ggCftEN0KTRcMP6zvyMME1GrdoLnzWjL9J+Li8Z7zUjdev4AAAABAQZ5fRRU8n3sgptQ/K33IpJWP93HYo6zTTQPcVWgvT/tt2dtXG8r9XMctnwTJYMXBZrnQX8wcobNb8TVLLKjwQAAAACsBnn50RH+HsPlaZzJzUbRAKINcL+9T91jJ1Gd5JZXKKMYQPHi5kXraXljhAAAAIgGeYGpEf4TUK0IOqy+T8lgawG4WgIsZVeplQdYOkTmpMeAAAABJQZpjSahBaJlMFPG//AgTwyAUQ/9Od+649emXNJgblrSImgV1dWIgYyUeX0WkGz1oPOdc6UKgLvfxF2udL9r+3vyOJ3zOsvfmOQAAADQBnoJqRH+HsOQxRCCMCTKsMv80pUcMEWMaBnG+bZLo/o7gAggPnWMrK71nR0MaLVpDsOS6AAAAMUGahUnhClJlMFLG//vgHIBhjoEviMguc3ZwqTYQjvB+dEBD+tTd/LD/U79/aezoU08AAAAyAZ6kakR/h7ESVYA0yC7NG/OtKYdKqWLeGmJMPcZhbIRuFc+rxG1R7C8fxWQE6y89g50AAAE9QZqoSeEOiZTAjf/V0GkzH5jjHogSzwV8Ur8f+Ec2ZqF42vRmT3+0XBKgmcsowSJtIfdoeCmqACWCkOKlAnt3SySxtVHTz5bNRvdUm7O7h2Utc/NeW1ORiPi5ahyiTp6pzvTB7gpoYxSr8OJvwgt07bwjElPFl2/DXqzJ7mlN8Ko4FuS3th/HSK3m8xjdy9Pd2HG/G/ifzmmcbp4pvEyDSaJK1W2uFie5GND/WdfCyg3jjBDikafVsZZsB9db1ELXgOz6/mtW1PO65LJeMpnL03zTJn4LERpRc9KcXLD8eihY+pbPtstJ8ymI7vFDm0fdHk9VglqCfCuOm5LOR1Xte0qY4v0ZF497FEg6J2gXBmk0KeY7+LdmgHYEcz0qRZkyA93gRlWQCcg2bdHG3R1RsZFfPnVqyGpeaMJQsD8AAABgQZ7GRRU839CfjYe8gADwWSBEZ0dUg8ycG32E01qLxeAqrHnZPUJ5Lyx+8BUnDWj0XZtSKfpUSpAhT/szG+CsJq3bkVF7YO0bZ1YvXCQjDz1cgeQteorKqspdc1Fi4XCBAAAAGQGe52pEf6jUd6g4XFW5qqvsTJAPqVNfayAAAACQQZrsSahBaJlMCN/UtXwfA78SpsbvqlpyWOlTh6QrFvdRJjjmVnCQz861RBv+whwL1CGfpCv6GfEMxGCpC9HYQQUE0dQXgCNgcSS6mD0fn/+UCPvjhBVhxi7NZ4U2QtlGo9RKKkNi+HxgY6TR164zEIP510Utju+3WmWp8LCMIzVG7g95lqRKQwTZXOyoblZEAAAAPUGfCkURLJ+megsfuYeXW7Xti51RtkOkbffzL/ASgnb6g1Hr5xJiwX7/7XlPkdirFvrO694xVrxkHWvRxaEAAAAdAZ8pdER/gGwq2QxVj1MPQwZguW2S81h08qjXqxAAAAArAZ8rakR/3Cspb5/QadRcLmNZSh6g5dIyMVdtWcxCmcKN8VSOxyyA+VX1BgAAAD1BmzBJqEFsmUwI3/1ikeaEjHOd45AIQeOuawfhOqyYDx3N0zAkiYWZShFSr60ONAwyK1zoY65cTb4j02w5AAAALkGfTkUVLJ99jGQnHgDM8ln+aBB+E2IcxAM5SUB34rzqVNrLOXd+6bM2xuY4uoEAAAAbAZ9tdER/gw4tjgY4RdE2+ViqVUsAWJkRCr/RAAAAFgGfb2pEf4EHlYPO0DBg6RcG870j464AAABUQZtxSahBbJlMCN/OlXvvG5yLTHBTaPrAMVQaCfuKj0j5S7bXX6yX7qQf5RBo8No2FXkM/qWMiQOljutkadwskYcyP7bS28Hd6oB0nybXNXLow16IAAAAR0GblUnhClJlMCN//R5j7oKUrIUIBCbU00bLa+xJg3Zc+8xHG2utLSrY8Lzn10Z4sXQk4HLrp+BfVVniEmXvgjPxoP1/+SNTAAAAM0Gfs0U0TJ9+tCxJ1Gn0jDb8mTrU/wrX4mC1oGx1U6eFX27MccNul90QPfzPIXxTBWHVoAAAACUBn9J0RH+Jee66QMi55jOocyx//JCSfKN8+wOm/wGvvJAu+yXAAAAAGwGf1GpEf4TUKzbRXPU6nraxj9iMWSV9IgbTbQAAAFBBm9ZJqEFomUwI3/wICGIsD2OziqyuzNNpqzvABec/jYnGKj/psjvJAnKx6B88nuodchjeQKU8/Sf4fxPeI3Bur+63wb/oqD2JcdFb2ce6gAAAAFFBm/hJ4QpSZTBREsb/+8PuSK8cBB148viFWw/NHqyGs+rm2ceC8lLcGprCfFWoP4uRxFnVQBO6X5Km47W1B7f7HR4MpVRMW/C/8Bu03QteuBkAAAAjAZ4XakR/h60AVVNs3VhXxKqVaJQ3kpIsIMaGLSdIpeJrRq8AAABIQZoZSeEOiZTAjf/7w6KlpQgJLKb7CVQpIKCX2GtUUOMn6NT46JbGsSJDqhqGAUlsdEP5mjnTYaodhrLt7P8/yMykEazbfVizAAAAM0GaOknhDyZTAjf/++AcgTWnNhxEl9OeuJnrXVsKR/mhF9lVb2NCffZ/IdUboGwovrVQewAAAFlBmltJ4Q8mUwI3//vhUdhAycJLcpdZgvrBHbuS0ELA2qR1GWV/sKmX9lqbykcrOwXH6Qp5yOSFSCovTOHfR4H66+O3WFhrSEGUCxI2T37abEC4N7SLdnmMQAAAAEpBmn1J4Q8mUwURPG/74Djgdzkgaa9RAdjKXG4f20PXgc0gPoX4Vj0Ucek2XSjMdrwZw3mpyUgcp/uoOq3e0VHPfh7Zubi3pdTi5QAAACoBnpxqRH+HsRNxvnokvr1IBRBrhf3qfussoHJ2FXwia8L0og/7L34YHvkAAAAxQZqeSeEPJlMCN//74ESoSq306qyBH0bjlOnxLGbrkGJ2Ws39CdVP/9jJZ6U/XdORQAAAAElBmqBJ4Q8mUwURPG/8A6IrIENlUdGDC2rGxYKvs0Zv/PoytQYJJmfkiM01RixdLevQCF9AnGptUZtShDxvMVuLKVNUIVwk3Z75AAAALQGe32pEf4egL3/1jAGor8/AQZ6N6eggOUr7LqhLAXV7FfOYIjSYAD8KRrCokQAAAD9BmsNJ4Q8mUwI3//wC0LvCDOPxGGZH0fTTrqCs2EFRlT6/L9YpDrTovL58/OT2vGOyWsqXu1aV/a7fySUgV4AAAAAeQZ7hRRE8331No5t4g23Ywzxi4929Dj8IQsijl2gJAAAALAGfAmpEf4k5No92peKr/htFkwdNuhonMp0Ro1YlnQcnCewvp2SzmqBS8yiAAAAAXUGbBUmoQWiZTBTxv/vhUdhCtFQj3CF8I0Z8KP7PO2SHsB6lgIAgPVTE9oxFKS1kdE31Ian66qP+yqf9hh734mZwin70dY7QE5chfscqW3JQE3iewy17sVw7qZM5wQAAAB0BnyRqRH+JNh+VxVvSpS/NWVqjqTRtbJ2NrINTKwAAAQVBmylJ4QpSZTAjf8EwsBpzW3+EphZT+sMjYK72pqFmMagiJ/6RxdCOrdIwWTYN0R3uDTSb/XRdzEteeehW0X8cK0bfRzlGLg54Bih5AVBjp8JIdS+frbm7i9b7XCr5CBhzbRO0uWlX13WMTPZSuvGoTUMrRVgG628K7rQSE3qBkBMgw+CcCNefjGn6Zkx+q5pfh8Mt0QqMzqj7mnSuGudADa/xLYXydiZCYQ7/ewnsEpGkOK9yVq6rK50LcAUK+I3NgBMWmbuoV9kh2ewa2IxwBTrP8/IrrKTnIq0axfEx25XaS1gBYRb6kDVOAPsog/1jZSwYZ4GatQjuLuVTjthxX68wVF8AAABLQZ9HRTRMn9O9y352oflZsxy6BAxflZu0pxOibLVcNnE+wS7cYiWlcJ/txGIxRrd+RdfvjrOjPfDoy3gMEaOA1ktKWZzpAcG2KsiDAAAAKQGfZnREf4ew+VpnMnMtuJjFWqP/yRU8Vfsm/SA+y58AHgHPfjY6Ic5gAAAAKQGfaGpEf9fFlYagcVz5uBzD/QIPWYbzU9vkHWdHRQQyeCK9nXQ4I2EsAAAAikGba0moQWiZTBTxv8gMsVZymC78zcwH990XdZZmZzJ4wtrIvyzMXIOULxsiqy3MuQ/I48r4n72WrGhLg75YH6WEsBqONLX6BjtJ8TjNYAtjYKLrQKQl6vGS9ZfD13YsDE3Jvu673GbAbsZboZ/13vUZzF8zSZOV9IwR2roprdQI0feIeu7cnUYSMQAAACgBn4pqRH+o0ZioFg2l6PKcHO/zygwvcEiwx436ZjD+vu4Dcnlr22pAAAAAQUGbjUnhClJlMFLG//vgOOCFHL75XjgbP/nLa3NdHqfl9wTp94oAO8T7lQOqg3O0Mx03n3NRR7pYlze6hVEf4hs4AAAAMgGfrGpEf4ERwY+Gt77r+yEJ4SvPAYi+RMSplok3KwSDGQpSuB95O2ZN6aV0x9y74RwxAAAAZEGbr0nhDomUwUTG//vHT9FLCBA2nxris8BwI6TDKNzQGtw992rw5prohMGR5qiMwrOY4KxeyFVh4Z5nsHZDDLBJCMqHd6qBQyfBqvHpiFjePAt/3JK/Ps50Y7xjr5QVjDmLLnEAAAAWAZ/OakR/hNQrQmu509DykZGMqHC4wQAAAJZBm9JJ4Q8mUwI3/8evureZ7lc3KXdmxnQXgMijUWF3ZgneFpdm9nM3WOKWFmTpBzrecoyXK1AK3/TzKgoznmiqaxTbi+HkZzv39k56Xd8Rwloi2NmfZ/xAvKYkrgc7Yj9489acf/vZrf+rHiwEDuf4P34wfjU87tMB0gm0+ejzliNtxzSXYbeAmmP64RveFZ816fJhWEQAAAAuQZ/wRRE839BbCo5vTqlWMLNw7JP9qz7O6uD4nu1gs/O9P9c5x1S1ytTVzTI2+AAAAB0BnhFqRH+BB5XFIVzyB9wJwDYiu0A8ZNtKI6DGeQAAAEJBmhZJqEFomUwI3/18y04SEQRzJO8XoEYWKQTqvMsTVgq6JgxNfx+qCvDs/Eb18miZtc9keAkyj4LDbnj+9YUtWYAAAAAzQZ40RREsn32aKglvf2C9/3iUkf/5fFdsKyz6B6M9ZyCIDP+BGGOtWUef8hos++ANyp2AAAAAKwGeU3REf4exAhsvjHgSnTXrK0yG7hQMOWbFoITHQQMMz45J+HIhTgvG3EEAAAAiAZ5VakR/hnW4Mf925RATlfTwJxSqw6b0//nC+VfvnPeXgAAAACpBmldJqEFsmUwI3/vuVw4CBhG+4lg3Es59Okupr9RDias/wbkJjrkoqxEAAAAuQZp4SeEKUmUwI3/74DjgJAb7CXLbzf5nq0/dretTIIHlFI1cAGN8WOxHbKwvXQAAAH5BmplJ4Q6JlMCN//vBGT5YQEgiNI4YfqwxY7pw7Y2LpcvMCmuuwZiEyCprEorTJzgFnI1fTLxSf+rSP+efSERrkuP/Gkp1S5WCPd1cY+R8nU2/Ecise1u7zn7FazbzJRLufU5Eeprb8Vk57B7YxKp6pppaYCg0Zui1dDn3N58AAABvQZq7SeEPJlMFETxv++A44DvMfbdU1hKfbdntKLt5PPLOQy9ifPEv2J3HrARgxvABi0D1MxB+8N2XXdutIkzk+EXbiNPhxpbHQRvoMb11pMEpF0dzIxjytdVY2pHHvt4vqc8m5kctbxoCGPrGzq7BAAAAOgGe2mpEf4l71gLl090r4xvvAf/rji6zKN85FiDpcbWZht1n1na/FcE7BBvTXNdoIy6yLTUk5BdVxIAAAABWQZrcSeEPJlMCN//74DjgQ+tokgubHJTmcS2bnfPkzdAbeefDjnKfV7MFzXTULGE7gZCZQWfJ6qOVkkF8AtxRAEmR69//H0c3PlHntplsbW9FUKAYTRcAAABbQZr9SeEPJlMCN//7w+5d2niwPcKLWmwiVkBeI/d3EBe4uOe41g3mCl0o4kirBlvctec2F+QjLJhaKWtN6qGqc6uLOLJ+QX+s7b6lWU0b6H0eQNGAmGh+ktnyUwAAAGJBmwBJ4Q8mUwI3//vDoZtTkBBrgFeRatJUfKwOw/xfRodt9aPbXcnC8nF6plsAvzoY3VEtvh6EDBUsUk66y8/erm9TY2A5NAgUOAFC29sW8XnubtkD7UGheuC3NYe+ACX3zAAAADlBnz5FETzfgcr6UK161fjdbXl6k8H/DlXPiuIwC6b9ntAFIpHPdd+z6GlVWgcCRI2u8oYOOGNhC4AAAAAlAZ9fakR/h60AW7YGdQtuoFCe7a0uOvlc1NbK2CvlDazwoRCj+QAAAEFBm0FJqEFomUwI3/vDoqVTkBH89pCmTZspbW1QWm3JfZZ0UQobs65Mf5LlrQRMt+qH8hg6HnjvEpO2ssyZ0Y2DKAAAAFRBm2NJ4QpSZTBREsb/+8HrVh2DcyHMmT3dUQJoZsKjKvT90DkkLfs6f0DlI9Vo6Noy8G4U70dLwbnSCcJHA+s7ZIbEbAz5i2KHdv5BjXfmYaICxMEAAAAYAZ+CakR/hNQrQmu509OcsGZ40siB/cfgAAAAT0GbhknhDomUwI3/++A44Hc4/O2JB6m19DmjiM44bKO1947whNbBWqUtCS8/5jm7dHXzg/GKHpWAQzxNsp/1CPUSWvt3lZLPoW2mRgyciF8AAAAtQZ+kRRU834HOcQ6HnuhEEjNaVKp2xQF7WlSmlAM2ebIQcM+HcEqhiiFkUxiZAAAAIAGfxWpEf4EHlcUhXPIKbaQkC9FdA677TaPsDtJppmm7AAAAa0GbykmoQWiZTAjf+8HDrHeCOtIL6oV0eH0f5bCxgOtFfLX4aVpxgeHP3XvPB/PFVi0PRBD1cFMejAR7FtsN/v1VPGtv4Q5R6r3IoLYn6BFezGJqahiRYmgCTMTFmr0X4+/IlITNyh6ZhGBjAAAAS0Gf6EURLJ99PcOlKzJo8mgZiaFQ/cRdc7LruQ9PEd+7B2bhHdjrU2WOtGZMYKSDh35QT0pidHubWQAg3xDn8inXd71eAkmkAv1mZQAAADMBngd0RH+HoC9/1Oz3bWZ3fidkWX4f6J+c3kNLc5E/BUMQnT52L5poTTPp/CeS+I7NQNgAAAAqAZ4JakR/hnW4Mf925IfVrMHCbM+zJMcVzMO8153EB9P9wj2YcBm3d5ATAAABFUGaDEmoQWyZTBRMb8Em7juH6SagJhLTIoH9PuCUpPS4Xhl1dciUnk7fRxj9of4QGmTPb0WIBMBKQd3a1O1Yc1N/4kQ5x2J4nCSJXbRNmnX5rUdd/QcMYbapL5XWBGEC8Ck5/94+EirqJFZDiSW1qDre4E/cm0Cn57dgTQx4ezV9WnDqeA8qNeZT1cxuSXLNrFWIMllZp/93Xxk4j6Boviamv91IgB12vJyIkQkUqR+e7+pn8A3J1nM0FWyTrzkO7GJqjke9Z+zeEFS+E9FCk1HBH1Z9GwPLuDpVRLSp8hvtMrExkYwEqFJ7b8rI62OQzrSoZT3+EHwb1gJEd+fqytM5WmU4uf9eEg5WPacy8CyFQCjn58sAAAAdAZ4rakR/3Cdw0sHnaByzNI/DvCcZ5/JboTOZcBgAAAC3QZotSeEKUmUwI3/IDapfcEaIOp2yhhwVS+etbAWw+lDUW+XfaSyWSVIb9I+3RUZO9EZN/RG2xvytqz5YiVBaQOmzJiMDTSFB1apCB91XRO3SZTj1OyHGJzMKmR0UueNYpWGgXCDTBd912jNrkePuUoCV/pwa+tGQblTlC/7Mntowg9/HoHjvG3CnAA6FIBd1q7bFjLrs2BRVwa6HPk3/SE5LLvUOVOCcaqjJwOK9HolGgmBx/uLZAAAAa0GaUEnhDomUwI3/+SpVI7R3snQyETVUSNJhOfrvBOYWFqawEPOShEdffbNNhj/UU5hbs9Q5x/bRP+L8yTojPljMi35wxQSoHRofUyY5upTIiGlJUlsx7SzyXtNALaPJF5KQsK7ukuM7gVr7AAAAMEGebkURPN+Wev9EAG9OqVVseJN+Srxy0ekOaNhA4RLaQQxTYVhStfIpPBuX4Jw3IQAAACwBno9qRH+BB5XFIVzyCkOiEfbn4C8Yo2klpwwoQ9Hc0Mg62lIXOy4KcuI5ZAAAAKlBmpRJqEFomUwI38epbng7UbhTYq8oWxUQirssnQMSb7P6LX7rQapmaLnTKX/YQ+mvBQNdrilXpDana8P/ipoHFw1c/59iIYR8R4DQJ3J7cU7mE0EUO/5HJiOmapC64SNTZLeKkcj/wYvW9CxQjNGOHbzdRsVXG5vsdQcdxC46I2UJ+EPeA96UgnLDLarb/ye4UGev0CmekJlKpnTVesJsW30eCABpwrUgAAAAPUGeskURLJ/TvhMx1yLmtTPxEmDZ/zbA0Szadw/YOlgT6D9Dej7fdLCUUYjrPMIemsVWLNdcv1K6VaGlVIEAAAAvAZ7RdER/h6Avf/WMAZTJgkbiUU2GBEoThRTgMuuNR0WaohOWxAZ8OhULFKorD9YAAAAqAZ7TakR/3Ck9CeH/dWEJcswcJsz6v+24f6f0uV9+xslbm5g70jXAFOIQAAAAWEGa1kmoQWyZTBRMb/wIE8MgEocd6yv5CokI+/G0GUpxgPsND4Bh5ENvi1KT8dhbDutyYfbbj0VildYU+yRxq6FTnj94kjyaTRWbO1POXfK9v3D656X4I0kAAAAtAZ71akR/gQeVg87SZ4hSEgBWkj+ZBwvgo+Jen45iep+n2pmujJBvuAKIzNpAAAAAW0Ga90nhClJlMCN/++FR2BweCG1oRIssFvf41wc63i/NokGGJ2DfzyRKo+/2yaVmu/JJW0gloXeQ5hZa5thWPWMobrf/C7m5IKYTrfpZbz3D6/1t8557AoqeOYEAAABKQZsaSeEOiZTAjf/74D1EHaAmN4EWir8LxdLPbAZPO0M2+PY2QoG/X+gqbY2OfphX+3ikmCHy9qBRinr117Pf3bBw8DsA034PWVEAAAAvQZ84RRE8331NqbcOYV8ovUPdy4IB1YpDWJQ+wSvnfZOYkxxmYn/OzBmtYmp2A4AAAAAeAZ9ZakR/gQeVxSFc8i68ngUmW/lZdC6KspEQb2WPAAAATEGbXEmoQWiZTBTxv/vD7cBveCN3NVCXGk0YJpTWI6S27wxEVPlc6/1wnjM+kCdJnnhPo14+G+H7r7GYvUMBsla4e7HrvMr8M+H/u8gAAAAvAZ97akR/h6Avf+N3Bj9G4GRPrOabGjKMwWGQh7C506fOxYn6ZCu0xmofqwUIi4EAAABTQZt+SeEKUmUwUsb/+8WeTlkZAQajP8sPFAhMqTrywyxwavokyr3SLrM+i39/I0saTn/n2QD6z2f0Tpf5o4KXxn3mPA9D17rJX36vYMwHwHjQhLEAAAAoAZ+dakR/h60oZYAy4TexgRX9qffNaY7PWMw/K4c8McCTpcC8j63SIAAAADlBm59J4Q6JlMCN/8oIQcmggO56mz4fWIMz/u1LoEQTzV5bZxKWzyYuNw0VUour5rIsDiU90umHFkAAAABPQZugSeEPJlMCN//74VHYPx3QaH7LO6y4C4GizeyIGkLKZnJtuowmRMNw+Y2sUwAe7deUwGTWQ/Q5vriZGTkIsv3ToEeu8MQHLbiiuEa7gQAAAGdBm8NJ4Q8mUwI3//wC1hCBKrZCFEPTQr0tZ5jK1ORrDiuY02WzVUIDXNEWNY+1UnXo/FFfdn9MboNzrW4gnI9f6iOOt0mZ3nsie9H+/u+vO5CY8Fj2uueEyKX8U3QhLU3/TcKrIh9gAAAAHUGf4UURPN+B+FaPyFFuXZbgYj0u945+bqtESVlZAAAAKwGeAmpEf4i9r/hMofsoVdV7ULdGYi0qwviD0eMbDVTnrmwEwqTMGF7q03gAAAAxQZoESahBaJlMCN/74DjgiVvp1xngahySXAFM70j0RNfE5vodU8e8SsZ/26Vt1OWDKQAAAEhBmiZJ4QpSZTBREsb/++5XDgeRcDFSaseJcUAQLJ6v6GMzRDVOjaHtSG54x4li8aIVzsEHeZQB/Ro7xKkMpXenDvr+1lP2ymEAAAAvAZ5FakR/inEMYhyP/rt01Hns1oE+sW1Ax0NveAZCmuIIkSUdWkHaVhqEgSKY0rkAAABIQZpJSeEOiZTAjf/KCEPOyTOHP3Ga0L1Cjd69G+Yy89VyBy1dtUAy8MuVk4n3KI+TABbfvtiT/pPhtktWSGJDgIe68yWWzEpvAAAAHkGeZ0UVPN99TaObeIE6IhnjGIo8WCgt9DhhPUr/BAAAAC4BnohqRH+HsRJVvmNKEZfxWUIT/qJm6pPNGuzqg2ax/lk4U+jw9QOaXPUFfo64AAAAj0Gai0moQWiZTBTxv/1jexkZY1WGJiT87bz5jKDKBIPIUpqiOKoFqMU9xuIH4219MWUk+QJUsSOay780Yn710gUCq/8q5pJ2Qna2z31AIaW/7r/7nw3VuvmUI84z2bsgf+2HLYAKweSx9T91NAbzXY1OYhhH/Rhoq7G9wO4NOvgpiqkSQhrt64wDHDxeNgBhAAAAGwGeqmpEf5BXF/8DaTeNmUt5+Es7Qo54X0Q20AAAAGVBmq5J4QpSZTAjf/1jexZqwShLchuDwYT5GB8R7EoFbWKry1qGHy2qItJHoYSzhYHpftREHwxHK/JzKBYIGEaS6NFr4w6720GZ4EwFtOtZcCzxJ8lwWGRhMcXbe390w6J20G6KwAAAADRBnsxFNEzfiNYJXadE4qFM8TYpcvRqNU9B/tg74vA6eRce+4rX2wUXpty0yO3D1hzy0b//AAAAJAGe7WpEf4EHlcUhXPIKbzh/PPJ8bNtdgqDOQTgASBJuMgwAoQAAAPtBmvJJqEFomUwI38HgcMuPufQ+1zSgVSxP44j8Xw0s65lEdmNlEJPiZc5zpqLY2ASG2JgZLJLCVojwLtyVBJ2Djl4lz8/HuoMrqAhvSAdjKQWYwWvku/PylYEHSBWY+TA2i9oIGaNmjGx6LG1Kj+E8emazrbQW4uXowHf6FzN0wi4FUL7pYGixlMvdCCV20iEhjFpxLFL7zMrQiJXs0FePfh470wQo3VD0x5R78tx3albjYGEvRrqiobpY8RLCYk5k9JRliWcwQKwaEOM4PxOill4WkG8tUs4AocgZR6pVN8VW+TjxJGdBFTt3zP5oF1jV/E0tbkUBVpsIQwAAADRBnxBFESyfzVx+Y64rGY1MwatWM/mzoG1t/TCEbE6JSTBCJehjrlgVbP29VWiv1os6uGraAAAAcwGfL3REf9Pc6p27yHwywlkNj5DchvZ7/p56d3iHqPJJy0TZlJpjk/4Pz69n5WmRBfZn9bTbyeeOj4mF9//0Kaye4vxJwYkk6gt36PZDZ4v1fnhhsDUliUbwZt3CzWRiFHU6yepOVv1u7QQgijOhrLbxJsAAAAAmAZ8xakR/h+Qj8Y/7ryVtMCNL7ETyKPTlzq4JQ5ZB/FO0LOIakpEAAABFQZszSahBbJlMCN/zu7OUTa9XLb3eW75wn7Kv236GCYDyaFkEi5prdIBVAhBOujVHZ91Dr3M5z7YWFsFIPaTIAQW6IvvAAAAAcEGbVUnhClJlMFFSxv/8EDLdkB8H1mLAmhRVDhMcY2mJZi7xKwfk8PJDVJGGU0diyt/1T05IeVjQbyr4y3cfABc+R55miDu8jv+ke/z5Wlghc6LtRzgp8YEXwp4pBzCx8KkBF/UR9Q5Fhyu0T3/n7sAAAAAXAZ90akR/iOA7CrYE++zmxFeP15r8ZlkAAACYQZt5SeEOiZTAjf/HqZ9anCKYumtLnkGnytBtxceH+Tab7WDld8HDYn4TZQTl+0UF3pmdah258gjA7l14kM0QSfHRYvwKPR5YYx3eazaokeo5MLOsjv1A7a/dr6fcvO8RrTfuoTK4JG2FPtdS+FD/W7GxeAJSunV30h3c7FLYa2Yf/MWZdi1pjZRZxjRsrX+SukflJqQC/GIAAAA2QZ+XRRU8n6Z5caH3IhlDPEGgwBap5BKO3zzvTuKfuLPQfl8w4HkfPygPVlVEzHop4sE0RruBAAAAJwGftnREf4Bxg3qaOZGuOib2Lt1Wma/ikk81jKfwGZSD4w4Dnqo5LQAAAB4Bn7hqRH/N5HsNDLGJUxpLAK9mT9gX6FWVFLp4DnAAAABhQZu7SahBaJlMFPG/8pGuAUUgcuCRH/OwAPFSZJ3zCRS17ZFc1C9yqHmc8/pVK7IEw/oV7yJdMcMIavNsCKHsF1NBZJBQvHr63RBRk+tra9PDpuRrNWg1DBO3hQlugGNbeQAAAC0Bn9pqRH+BEcGOCVJY/QJGN/iSBG3l/ieze7n+BOFxVwCwANjCJKx4MnZIv8AAAAA/QZvdSeEKUmUwUsb/++A44IUf87s3whHef/K9eDWAMU8ldp4l040VzlBC65n7nr79QcRWreur7WcWsMhEQu2fAAAAKgGf/GpEf4l71o92pOxqkP3CQfV7f8+wwfDv0Vwj8bTm1R7NQsii5g4zwQAAAGJBm/9J4Q6JlMFExv/74VHYQpILXs6KXOmEj6xleFO7YS9ET5DIDmczDCW5+M6UZA4DyHmCTz5FoaK0i4R5P7y599cypfH8m6UCrK8Bf/vgLazNz1wC/16tdStMHxBIjZ4P1AAAABkBnh5qRH+I4DsKsOugF12qomW/SycjPJtcAAAAZEGaAUnhDyZTBTxv++A44DvMfnbE2rCH23XL0djoBAvFeci3xno+OtABO36XM5KyvnpfzxKvle/8J3308zrG6JVwhHh4rsYQUFRysfHizQb86GKutGNCt+OY9sPNteGFKlv4dXcAAAAqAZ4gakR/h7ETcb56JL659u40VH/5IqeKyAkelvuqVBBk49Saqe7VmZuAAAAAMEGaIknhDyZTAjf/++AcgTWtokgude4jcmomud9G6Ir5iVSH+0BGgJZK3LZX0JscoQAAAFJBmkNJ4Q8mUwI3//vD7l3aeLA78Ouck9tQPwA8lswErUECEWb2BfP9eCGIuUSrHm2tubC/IOXP44SUuC0wRBPcOUZ6UwEfr45QL2uWpkMDOFPAAAAAbEGaZknhDyZTAjf/+8PuSNeECUbzFqDoHVeVghCqcIHbUioPt8t0kMbXawr+fm53In7aCpqLDhgohy3fxI7sPPRi+v4k4BfCt+ArbIn/7hr/fNgQ82kiJyjRCR88G6lkZxT3zjOqC80cK2gQDwAAADlBnoRFETzfhHJ+XgcrH6mSiINjyp//kgke8KUWO3XO8bzOwBXQN1PZPssKbb2Qy1CNfZzQq7z2F0EAAAAsAZ6lakR/h60AXDOWhD5PePsAKefz0nC6cwFfCc9G9GK1nNdoyU44ymIpUb8AAABDQZqnSahBaJlMCN/7w6grIDgahJq1KP+AGMxDG43cWoTqfOrO+8pS10UHq7xCbX4a4pJt454b+M0H1Eo62OjD2EdzxwAAAFVBmslJ4QpSZTBREsb/ygqpP0TAwFGuhMe0H+LKq01NhOFINpQwf2KjhEvSCqmG9OMZE2/Qqy0272Gt0nHLi11coL4E4EoCisDk2gIBnTbHmELqhvwwAAAAGAGe6GpEf4k2H5XFXBQn5jeU7JdWsFUrSQAAAGBBmu1J4Q6JlMCN//vBm7tPgAJMRPqV++IZbHFD8LyLSaFG1Fj3xnwOAwhoQ0X3rcSTj34Hin1WxPUwQOdGW/+/l7yZtILXAd5Z9Wgqf/aUbVivhneizf3wbIgcVChpYYEAAABCQZ8LRRU8n31uIe20/kHBd7snCnkEo5gkxddxXoKnJ5QnndNiDb5+E/ApR6QfVh6OvB748Hzqtcox/cwTiLlMZwRAAAAAKwGfKnREf4ew+Vu++ZOvbsHGmKLhf3qdqHvTmJsewon5VlSMTQ0CLsoGQswAAAAgAZ8sakR/hNQrQyxiUcxzYxkWBaHD9jMxUSoH24CdrYEAAABJQZsvSahBaJlMFPG//AgTxcoiqckUP9wRELdF44LQ+J6zoeYQmvRkGAschCcjUxhb6c6E9RwD3q5AdXgn/otCYxPZzTf5u3+JwQAAADEBn05qRH+BB40VhOFmnLMmN/Rt6KeYRcltPwhBw6Yj8gKW5Be+d4emYrBg0QKVVh+BAAAAOUGbUUnhClJlMFLG//vgOOBCV69hkAISts4hjQjQR4n/taVosJ353hMn/bQatLAc7vxjVVNjpBAL4AAAAC8Bn3BqRH+HoDXutdsAXaf9vPXmz+vq9nHMZG8uHAniuS2VQVH0wdR1pyOJp2ZkQAAAAOVBm3NJ4Q6JlMFExv/BURmgKmQ2sem8M/cS5YiFuDEKj58ESA2Ioo541n9schk3ne2Kz27RHR2Mtl7sfZ6Tk8CR5fodz+unZYWDbUKNmf3Gr1+o9Rxy9lYH9eJLD5VCN+c3svum1sTzNtsJGBK1cHItHWUqHplu24Ckc7vqxHOe0ZtkSJdUThqkuknVrcKcY1ewS9SB7XN0a/r2y0t00YwIYLkLxxLwPmaULQNo/ape3s5adm82wYXnY+bUcZi6E22itPWO92rOH7IP21AdVJhrsPeiqRAczmuYnL6vkPzP4adcC7BhAAAAWAGfkmpEf84wSggSH8mtdkNj5CRqWoGie3lqD7CWgy3dy9dUCO//W9YZNfE6Xiu5GIKYFfDcUA0T2ZBvdwLfngiXt+EDkfDWv0K7BOXReZCjX9nSExoLgSoAAABZQZuXSeEPJlMCN//74DjgOprY+5Fmn26ag8A76dO6JZsciGMnQ2pYcNEnea2hGeomPmCENu5nJeGhNjlWP/GqpX7N4n3KhFSvZLLumw3qoPc7g3O8DwTNGcAAAAA4QZ+1RRE8n309hYNYZQ7+HfhECBi/K0X4frDrq0X/KbwmZ8BThyQ2+fhu0YmOJCJCHAKCHj32xcEAAAAlAZ/UdER/h7D5Wmcz5GDrLvRDAsCvzGdqHHvdNLLc9bD46lxTMAAAAB0Bn9ZqRH+E1CtQOK5zZksA34p0LQK+zTMJUM7eYQAAADxBm9lJqEFomUwU8b/KCEPdL34SY/510oR6jdtPc2spsXUY6gpzzsPQOWCHtIKBf9RsoPGI0GagH7nZSdUAAAAlAZ/4akR/h7DkMUOCGBJh+lP5pSg5ck9EFnrQLuSi5xBprxkDQAAABQJliIIACf++DLeYgN9XYj5zg5B5QJuOQipq5DurWEjnW3UorRPL+Gmp7W/eIApq4hA6GO7MXHIyDHsVucz/aMFYtKTDM8z25ZLQfiNZJqSiDN9723ESzhz79Cwt5M+8vOWuuGhG+Tvq9foXjrYoVXiVDwEsVfUmlk5TXbEqyYI+MZpo4mDJdCI6FifaFbOBypE2MZf5ZLIBH7g8U1rXJzlzobXWQwBVEZ3H50m7+bGCm/s/TO02Npz6eSKcRnoIL6uejcV8scEPRiZKvQslaZioRDQNI3E8UN6+/c9cAekW3/GkyyVmNiyROqcKtiIhBHi7pAk2+Q/TQP/YwlHw0o4KIS19ytAyX2tYiRc1FzBcAlpBmEdwA9/TY2cHx8Zwfg5om1WDRf7F7wfrHwkDprLxXYIXoXvv2/us1OW+b+Xd99mIui98EnMS3BL3wkn/NUHomZCIfEFUCZVjLemRQTcW2M7/WlzDgz8mCOUhc7zw+tq0JUXSMkS1L/cOl+y1DmBMgMzcbhhNbzDLeKWkrUH5XaHyryAkCyr9U1Xo/mAgTLsnExuVa/P4R7zZdXvaMIk7ek35j2b1yb5zOOXYieE3ARP5O5ZjAZBq4ndpSi6jkftKCBVFGUAJHA0yJ3SOwTwLlD33y2ioFahycU0S2U1an8v7S5ljSJFgUt4xqxRzXPLMm5Q/lf3uq1K+w5f2w1oQlGAPQWlYXk6xSti7jEz02BQEwIn/1VSW1lDXiIsLQ9/68Lmyq7+KqScF8M2ttOd6ikUYm63rAEQ/1uwF2tw+vDoK58ZLDjGGY+DsvOCVmKReWQXo2mJU3Z/+dukASHHoLCtd6cudmKCocwPIOFIrZIpuWFPYP7N5w3TQNI2J7uOoaE0UlH0FyMvlyJKCSxK2KXdZbEjdWLzEYIhD/rSHAXZ83AHb4RVPmbBpaD4FR9EhjzYQ5kM67XQ4jgiM+83E3jJ1tuOTwVjEUXOTiZCx8wy+C0dX3lqduBIJOztDCxzLLUgy1pDJ11agmN4hiYEyREcCB9PLz7Gu5NIl+dxSpct15aKEYrYOog2sW/3UjxYhz6/+6vpRxNYFBe2YVFfEse/JAPrCdAPUQXwCIpcGvCAqIe9H6yayOc2/eUy00kYB9Hqrr4iHiednpjGYIa/wBQn2kawkd5eYwvCd09N//TBsRiPySS7TyptSNcNkl3kSijqYAzDU0cTjAlo7gSKsBb2500bskV+Fa6qC7WYdX+uHeAkkUfK7djG3sDH96VRFsoJGnc9eiCjPYyGt4n1nklz/pm47DKZfzdVcEQVVqXfQk182IfGwqQ6YPNZNc85BxPoa5UG0Nc7aBMBm/EEqGdEA+1bpcQALD7p+Xw3OdXvS2i01L2TZwR76H4FwPsv/kg5K4Q+gtukTkAv8aXBY908KoaNfjcQLsJI9nUsxo8CIfcaHaV3fe+aAg0mPXJT7WoUH3EqFX9KWMCffuLgk141zvZdeWb9G2OAayn61mciuEFWUB3unf//3UT9ZWH3tY8Odtz5knWw9WcA/7IUpQHs7AGXSZHFSuMbAJ7IeW5YgcJ4/2xVyVfqWwGS8VXHS84Y1Oc2iGWIYRH4DJUk2xI3a7Xnxr04yGYzMm4F14EhpEJRU19w2fQAabSuyr93+75ace0n91G5DJsmYh8MQGBLpKZioZADEdyOWOL/b3dH1hKiEdZsQsnfs/yhdAAqZAAAAPEGaIWxG//vtcWB39W5QeWKwGeLr7PMDikgXstlWyUqYOYjnV7wDPmyxfP1O0kL6PoxR7zo1Gkb+C4TG+AAAALJBmkM8IZMphG/Hqe1RFY5rt5egm2nn3JQcBwuaJRr84/ddLi9/76agb7ejonTAqd67XVYfnqN4BuUaC+fAk+1lcholPMzUb84TknMEw30GkmM7GgprsKqCgwcaNTAOWdS32AuH1x/E2QaetB+/mEk87Hq8PlJqqlYpWPshK+oBJK6KuTSGIwg/9Bv+LVgT1pALr72MIHMQXmCPnTdlDJUk4bEuuLIqk2to3LjxIlmPkYtBAAAAGAGeYmpEf830n5JzFkX3j1Qd8Q+AUGDVgQAAAEhBmmdJ4Q8mUwI3//vBm7tPFgd78v7l86Um29hR9xxADhgMD9LzlgdAOtozhLW7UEkoFYf5otHinj6odtZRtbm/yoQHLKnpnZ8AAAAzQZ6FRRE8n31uIfah+VmyyuHZOFPIEoZWWjwctTHilRP/EBaLGght8/KC14RVIcYtmzmgAAAAJwGepHREf4i5UoIN1KmuYE1Jdm1Hq0jw330ncp4Hx2M/vKMq5Z4tgQAAAB0BnqZqRH+E1CtVyjEqWYFcr+nHuyCeliO2EXDZLgAAAE9BmqhJqEFomUwI3/wDoisgI4RECc7scuWX932y6q/Gcps10izFusxOOvMniGaXl24boq3k+UZg8rPMUT3iQxdNcNfEd4A2W68ovwz4f+7zAAAAVkGayknhClJlMFESxv/7w+5IrxwHtyq44n5qzWH5tokp7F/MIurN4zLf3qsBy/LP1c33i4OFn3CMZ1TBLIwXvbEHj14Vcb6Wt30UGVr0GhXroArD8I7AAAAAJAGe6WpEf4etAFVTbNo8fLWftyKUew1Jo8zvS8XWIpeI7UlrUQAAAElBmutJ4Q6JlMCN//vE9pkb6iHbDeYYscsOWTLED8gfFsN2l+iXqUwnGAUp7SGWRWf+Tj1MgMpeHoo49mO3nGzGBc/JZpGbbwjNAAAAM0GbDEnhDyZTAjf/++A44EO2R9S7lNO7TCCSBTAoqxeSZe0BGgDXxryEEgFwFd7IWXy8YQAAAFVBmy1J4Q8mUwI3//vhUdg9fr4QWmZ38DJvZypVj9XjHyiWE0qfucSUpcKpbBOOJb2lRZHjFXqbLB/AAyfZJCzDiBXSwSe2d3nKLM/QAOEJb8mw3ifBAAAAT0GbT0nhDyZTBRE8b8oIQ86YuCDOQEpyy9UWaSGWOxDO7wiSCJDYIhWcih6YPXwwChxsQuyPkj7FvLL/pgLFAoSeOsn9+9F+L3cY6vDMfiAAAAAqAZ9uakR/gQ9o5WBUzd28w1hPr/+56V66bgLLEK74aalrWqpQrcrjTCoOAAAAM0GbcEnhDyZTAjf/++A44Ilb6pCfLUOd7CmgqVP5gaCrD6c4HElsj+qTov37vq7C0KSjKQAAAEZBm5JJ4Q8mUwURPG/8A6IrIENjLKcjAdSvTZ0AUa2HabW/T6JywFoZYZ38XBzMVTWT9Boz5CqJWPYr9zgIZJzhObYT/U75AAAALwGfsWpEf4pxDGIf1f+tYtz0i7E23tkVLGF0sTK0M/3IyamfbQgfSCSb1BPWNUB8AAAAPkGbtUnhDyZTAjf//CJ/zIEGcd9wsgkXNNOYwvx4RPo8KfLe4zM2exTFz5+cPtcVtktZUvjRO+buNFeyS0d/AAAAHUGf00URPN99TaObeEbxmT4L02e3ocfhCFkUcu0BAAAAMAGf9GpEf4exElW+Y0rm0PJ/wJM/5jtBqG7M53mIMo8kEOBG1R7FTsO3uy/8wE9rgQAAAIJBm/dJqEFomUwU8b/74VHYNYZ3gTviajBNW4911XFIgnHuPPvf6PuINmTDgKw6tl4y1gHxr/o1Eoc443t4pg43rfeXg457+frQjkKJwwuU0Dg28hgOuxXzoy3/7GNwWDiho5fxpVul4JYlW/s2wZ6+FIFz9RB3Pxla/HA9ajISXcDsAAAAGQGeFmpEf4eNe2TaTV+is9Z+r3dj/4M5IrQAAAEtQZobSeEKUmUwI3/OMDPDLD7+9LV1rvQO8dIyBgu6gBZzKIHOqwdcYW0mj4/p+KVMgoNINR6fpQkbB+e7J43ZN6Thfy8C35cIcvE6ulfkdYr0ZP5p+0EdUB5C7vhx0r6X9DkZA0F/t8XcJ1Ay1jkXzIafX5I9JMZyOPPT23eliytMDQN3Nmm15DczfsII5lelhwcySIf4ku6wzRULjnVZWRMxcaOWnCVmvPK0WKnXS6hAGrBovrD5c40CFPHBrLy9JtUVvwuELo3rnQUyLyChZip9Wl/I5F161BBhavdM3/GZPS1ybPzPYbRmbIdt1SZq/ybmP8py+DhwJs6/zeOqKGDQvWpP3NX7JQuvuIBzABOHx7UbDGk+BICall/zPVb7YjEOQFeJndkK8GmaDQAAAEdBnjlFNEyf079+wBmd8pEhgLG8Xc7r/4rqUnzQQEv0ohGUAL7kLhkvedlsNVqrHka/nxHyyL78FF+w3UmoM16jfBFXZPl4yAAAACoBnlh0RH+AbCj0Y5cPjkp7DicIuUO2PQpkoPz3B9y9RBSHaE7ieceKLYAAAAAtAZ5aakR/3CsoGURp1Gbf6xetbBvbdg7XQAomNjTNFLLYoSnN1T9WTArX1a5xAAAAf0GaXUmoQWiZTBTxv8gax37l9sNbrPn6iWwUf8RxgPTzKiAAtM7FGqChNHUkeqgcz5deTS6OtdOTIhcU10sqfJ/G7PWcIpym3Fr6VN/qN2kQwEQKlPOHuE2NC5vUo2j48TIVNP/sgwP3kyf759nBR/KFqquMTQDl4oofPV/0HkAAAAAtAZ58akR/qMvbvw4JUlkDvjD/f4ktFYfXNpM4bGfm2nM37h+CiGSGIelwUC+BAAAAQ0Gaf0nhClJlMFLG//kqHGZpISriFH/O7LMfxPF3levzQUoyR8IdbCT4Om5XQ61r7CJghz72IBV8PYi+fqlXXR+phCsAAAAuAZ6eakR/iXvWj3al4qv+G0JMwV4sy5KJ/S3ER7xINYVeI2qPYr2GLSVtouwGmAAAAGBBmoFJ4Q6JlMFExv/9fMnXUwOYhp70FTBXdRxtKOUP+0QmwlUd3WHIaIY4UFj8Yp5d9xxsyZ9jD2OI0QWf2S1Zcu0qWkg1dQDmX7aUzQyVEEdscleAHvns65gIbvQxIesAAAAWAZ6gakR/h417ZNpNX6Kz2wcSbK+6JgAAADRBmqRJ4Q8mUwI3/8oIQ86YuCDOQ6JMWiYf/iRlpY+LK2B96JBM88YxxpmUYIE8NLeGUY5dAAAAK0GewkURPN+D+9PfvvPTrHvZhI32OEWXdhkGCruwTO5SkxQve5GtKHjiB1sAAAAdAZ7jakR/gQeVxSFc8gptpCQL0Wxcfx1JTwVzldcAAACEQZroSahBaJlMCN/HrvlPtuKGl8J03skzn9tX3N+UGKgPtt5f1yW1mhtqYtpj2nU5x+dydduAMrmraSE5zwuU/zBlhXh5g8x8c38YYs2MriPKi55w4mf0Kwy5yX9U4VJwu6z5gBqEnviXVdXlHGiM76Z3+hcdzSXdSABOlyYkF+z/YctAAAAAM0GfBkURLJ/NXH5jrirbiX21Z3CtP5YcQMlKXKnVSfznQBXI1IR3YSzO0uaD7W46BIwddQAAACsBnyV0RH+vXBhX1AzTfh+ibFtqz6eGMQ/DTS0BafSOlfYuavjTxTNGlR1QAAAAIgGfJ2pEf4Z1ktqM7YEw3wBj96oC8se5g/yL18ispByD6DEAAAA3QZsqSahBbJlMFExv87zR2otPwnC4OQ4HEYtHszJf56ewlbVNYDdRLi7DaNM07knXvZOBJlAsDQAAABoBn0lqRH+BB5WDzvLCGv4OSr3ZyhW71ibSYQAAAEpBm0tJ4QpSZTAjf/vhUdhErrGwgYAPXwOxHLcvgTv8A/Zx9DjubfQaVDTVMebAKQ+RkqueYnX184COJ7NhYucgTvasYPFK4F93kQAAAFRBm21J4Q6JlMFNExv/++A44DvMfnbRI+5Ux9vnvb9fZlh0+yV0sGFmgEwKg3swNSCp7N5faAgb86AHOGr8Z8DiL5UcrnP4kp/lO962eXNLUHqjhHcAAAAmAZ+MakR/h7ETb+sxp7t4uJPRUf/kip4odeYTuzhEga5MwAislMEAAAAyQZuOSeEPJlMCN//74DjgQ+tokMdI2I2w4XB31uWN9xpSf/b7WfaAjQAslbjZLMjZGYAAAABOQZuvSeEPJlMCN//7w+5d2niwexjKrUXs6RzXh1b13XA1usrXuo4Q+o9ouvQ1D2Q2Wm0PpqZJ/HVh07u1x+848SMV9TQUPFzbtS13xBB6AAAAW0Gb0knhDyZTAjf/+8ParGvCBK2RNZj2l1x+jAR0apeuLZtFzhogEMtR9DC/p+fqK8dILLP+IbGFXeex19kT5IY4p8S3kRMYF4bXDgihVw55Ypd4XX0hDtrHn2EAAAA5QZ/wRRE834ISoy161qatNvHcx2Sov/bKprDXTckBV7PTdQhKXDndGlf3VqRlNangjqwTorzS3wjwAAAAJwGeEWpEf4etAFVTZCVL4SV8n9Kg6fE2aMda+98WXMvpKkUyXESP/AAAAElBmhNJqEFomUwI3/vDp1Hm8IcqtaXumQqIJt/IdZM/LaCqQDp5QDE0HJXXxYKzqroGplUEOXiTI56df1TOqn/+xhhrsArE1GyBAAAAX0GaNUnhClJlMFESxv/9Y3sbc/g1iqUD5e5Uke9bb1C6YGB9z3awZ4zQI0M0unHOdJnHA1rrnsh4SimxV5KpEdhAQ9gFOWZITFvHKgu1okFYOADb9R2Rozp0tWuEyuxNAAAAGwGeVGpEf5BbOR6mhp16rXzs2+e4d71TyW9KQQAAAGZBmlhJ4Q6JlMCN//vgOOB3OR7sTCb+I0045cZJEQ4U+L3XCpcnHOJwFb1e+Q0W3Q7uM0aQ6yv11+YTtxoOdsy7RQ+LYYJDbR6Rd7L5XFyxpKDB+0OWkFw+YLFioVUh95SYqxqecX8AAAA8QZ52RRU834P7bhmByo0v4xgbLCddxYxYbspCwfmTeE4h4m6gzc5yXll0xuqHmzfhwYk8ZpP+UzAb50WNAAAAIQGel2pEf4EHlcUhXPIKbaQj7c/ATAoSkel2SJNrDV4kgQAAAGxBmpxJqEFomUwI3/125QhMIg/aNp4kCmUKfLpf/QrlLIP5B/oHjyqjvBOioUOoMoL20buyufrF/UIt3sv6YVyWShlKuej/5mcBImU0X9mB2jkyIoDrWUZBjFA6haFPIB5+QhgWaDpI61usiRgAAABMQZ66RREsn32aKglvf2C9+Bdon/+X9WLOPlx7m1yaoyHmMFFCEI61yhs2ICjfrbx1xwg+UNHTSx3O+VuFL6oUO/x14wF/ouCv7NddvAAAADEBntl0RH+KcQxiH9X/rcK9KC9kRdt+HJ+G/iNZPTGZ+diB+2gxK2Dg5bXzZO90e7yZAAAAJwGe22pEf4Z1uDH/deStpiTEc9tyHnyaVWKk+d3zLNz5R2rfH3mWEAAAADpBmt1JqEFsmUwI3/wDwzHAd5jvuFkDKQEVOoZiZuzsM3wXcA8F3ucQwarCn13TkhsIYJMiT3kh+1+BAAABTEGa/0nhClJlMFFSxv/BJAf7z+EOU2CWqKManlZ0LpOsfJcBa0PMmIuvVH9dbvAcE5UzEdHVQbTrz1N1jb9WiMOxu5R2cy5A81OxB+5ZBnN5eAsC32urtdTTKK7QubeiVOo7VylTo1bcPy3DSeA7L7gshvKbqb6yZRCFyi4whKSJ7RPi0+A7uR9tDdfvNWqXsP4NZEdkTw7nJJscByribz64QmP/ICkD3ZxZqkoqOTVn6dlY+P0SHMy6N2fn26w0kDbCjyPfS1dZblvZxWjU616LYFDlf6NoIU40eHrlDbvSs4WEm5sqj6lAdmcaMt1R2DDkFv3lJfP04qVFUwKkb4diB0AgVCwqoCaqflufpSnzlUP5zpGT6XXwKzdvxhIiAGd/HKUkMg36acC0SDhcMpk2kz57Q3kYvM63o6ggTKO91qvRby/6+hO4M9+BAAAAWAGfHmpEf84wSggd11AiFSufQxM9Luw26hRYDDKsE9vSwAn7Z3kp0GuF0Eqpea5n5unP9KN0c3L3GkYNAsaXrACiTeM7/yz8z1LY9wjvbK9PNQeYtR6wMIAAAABfQZsDSeEOiZTAjf/zunZ7sMLNRD1X4hTb1ZFO384nK86xDwxI5Vpgs+jNIRsolIEfvY6lsaVpHcZvcrFP1//9wB89zGosviWcqEVLIZ7/+WmTYZWLoPL9M4Sq38lHkYEAAAAyQZ8hRRU8n3sgpbafys2Rudk4U/87NYQWWpO4rxUES/JyvoT/lfbQ9OleOWWIZeQm2IAAAAAoAZ9AdER/gHHeAx5KQ6o8Bkl+v/9P4zpRB6XdjaQVIDAbYhBhxO6UwQAAAB0Bn0JqRH+E1Cs20Vz506WBTrufbbvFjJQQkJ28wQAAAD5Bm0VJqEFomUwU8b/KCEPdL34SY/52bZ/rGfsXPf5yCTDido8G77Go9+K1BJRLL67CRLKDtmUDogd5/yp2dQAAACcBn2RqRH+BD26tG3o2sOTqJd//LOf/0exfYfKLxm7J3qgsoJYfg5gAAAAzQZtnSeEKUmUwUsb/++A44IUf87wFjEcFriJhVhBdOObtZMRA7H8L7O85Qi1Ir/23xQWBAAAALAGfhmpEf4exElWANW8vMvakG0lwp25Oh8MHyFQGujxVcqIRtUex9+Z6rsJYAAAAUUGbiUnhDomUwUTG//vhUdhCkgtD/OKNA6dG1osSc7TN7EVmJ8mHYKHJo0kuzmVJ2OcDMUMCsDRqChpG0X62F+dsmKA1zveQZSPh2EvbMNzsMQAAABUBn6hqRH+I4DsKsOugF12nJUDYjiAAAABoQZusSeEPJlMCN//07ziWU3i/kpvzHFCOn2CcItTi1yFM4xbCfhV3Hl7kKDI/9hW4d+LvO5FlZCITgCrsyrc2JS6CCKYTrtDuaRaAvGVwNn+giCSu/6I71EcdF6AAzs8YoRZvHfx+r8EAAAAuQZ/KRRE836foBN+l6dUqFIqLR7fdXGqeg/3ZJ/83TbhEn5pkA8K9kzYNkm5sAwAAAB0Bn+tqRH+BB5XFIVzyCm84fzzyfGIzavoEg2o1vQAAAFdBm+5JqEFomUwU8b/5u5inuMF7Xq8EOtQO4YzuHe2WcPkEeIzleyoHzUSRtXdZDfWefPY5FxPuebWlVj4LVrBtGv0RP79YCO7uGRp8no60SRyqygnK4c0AAAAxAZ4NakR/mJ3s34Q5H/1uFexTk3VIjFEo/fwfhqCP78QfAaOXWnodXpVWh/EbwDilHAAAAEtBmhBJ4QpSZTBSxv/8AoC0RB/Gq3N+gqfQl07//0asYn4GjHzgmabjVHeCeHimF9bt89uzqXPNoLLVfk5lK+SEWZBMH+jdIMD7NJYAAAApAZ4vakR/h+Qj8Y/7ryZBazBlCQSff9dCgZHsO8153DG/1Iu47qZ9y1EAAAA4QZoxSeEOiZTAjf/8A8O+HA7nHfcLIGMIzn/UI6nxZeLzvaZpNm4NR++zRTzqakzFTJjhdFc2sWYAAAAvQZpSSeEPJlMCN//74ByBNNlS+GOnj/iYE7uR3e+eaDjQFr415CBPX8resUSqg9AAAABEQZpzSeEPJlMCN//74VHYRK5/s8oAjSJlNV182JSgSuYDsNFFvPml8Z2TC06yUjgNGaIT+MGdO85RZn6ABwhLfk2G8T8AAABHQZqVSeEPJlMFETxv++A44Hc4/O2I/eN0x9t9al1x12ezWSFYh9FLhxu8aTceePsZOcOf/noBS072io579hr6GIKYfVDIPsEAAAArAZ60akR/gQ9vOA+UWsy1Mn3l//+f5lQBPWWuO+aJJugzYNwOMU04nd4nwQAAADtBmrZJ4Q8mUwI3//vhUdhCQoq5E2kM7tSSvPdRsTqN2Lqyx1YynzDOfYXeFZW5mbzBzov4IZCErRmMoAAAAElBmthJ4Q8mUwURPG/8A6IrIENlUdHg1vXava9lPZfzExXzXgqZrbgK0CpfLrF03F1J4CM9W6Uc/jlRwpOi+VZSpqhCt8/y775AAAAAKwGe92pEf4egL4A9rxjL4OGkdU1tP9jDyD8YL7cVkl98cgRXNXC0itKOXK8AAABMQZr8SeEPJlMCN//76z44Hc4/EYYtcgPBH9RgO0Iwi8OXMzKNpI9izDnv5+poRBie+02otvR+9Zng2UG2aLxEiEAQa2EyHRWuU4x0ZgAAADBBnxpFETyffSt6tLJT1q5nVhquTgXJSFs0fGTiWZMvJijPSmPEtnIL2xGLEmLILUAAAAAjAZ85dER/gw5gGJbwM9dZrSXxrWmk7SS7YSzCpHXtnb7SsiUAAAAXAZ87akR/gQeVg87QMSja2dC/csmeH6AAAABxQZsgSahBaJlMCN/74VHYPZq6tmtxLh1sxyPaSRfT8XGQqvC/Q4343DJxZXe1Sjk7F3JK8Ga+iJBjybN1BsuRRnEENt4FtlTjDMiyslD/QE2S+ie1BjE0ye1VekieYTcbeA5uWUCc9oAS0oHJc7DDprEAAAA9QZ9eRREsn3/Spxn+3+PADN8tr/k8dm4t2BxiP+1thpygNS5/1ulcsz7YQxXLmulOZ9bSD8leHIfywjbusQAAACQBn310RH+HvdOD1Xbhj4zYGbutZl5v6HqyOaqspaMKaK5qWeQAAAAwAZ9/akR/gQeVhtGJPhygpFSbn4CX0RYdxtJU+44KMHn0Oo8JLVH4iFlYBJyRuWktAAAAhUGbZEmoQWyZTAjfx83w+eo3udPz/wWGJzqAxttakOUQ5KIzV5rlta9UDHrdCKjDiNALKgEJA6puZDaqChxqlBxSo1lMmdEYH9x6+Iw/G7cBXYYH/vbSuijBJl7Iv1SdG2SxEz7WKOVaXQ7edDngcN7BTtsHxNL9kKKH2vdnV7h3Up4bBiUAAAA2QZ+CRRUsn81bxhrs9aLlJLIhMRyrlgrjTzbZxY1kexOB+js4nC73oqm2oS8KXxFXxDDK0y6hAAAAdQGfoXREf9PejGW7yeA3K+Ashsi9mc5rzdirkZ1i8F7plCYqBBB3h2i/Y5FflOyMyOlgUqYL/G9xIzGl8z+0IWKUH+x+Fv9B7/aREDydtwyQInEGxGayCrl3zIGiZ6TFkkOalquRg4SIEHzOpZAOwkut3sA6cQAAACYBn6NqRH+Gdbgx/3XkraYElmuoUqexzkujzvNedwvPC5NXjL0QwAAAADlBm6ZJqEFsmUwUTG/8A8MxwHeY77hY7su4Js5ePegnvwLeGgOmnRghK+XuEJ9p1ZqTpeEmvxKPv2gAAAAWAZ/FakR/gQeVg87QMT0JOabD8/BfTQAAAFtBm8dJ4QpSZTAjf/vhUdg9mx2D7TmdFz8qn2yy4xD/XK0YWq6bXtpAIDSMH89fnlAnLzoUK41nLcHaRGQ9VYKHwPVgSIejbid4zu0EWxJ7r5+L5ivAS0Wc7KiKAAAAU0Gb60nhDomUwI3/+8Gbu08WB3vy/AdqEakb767uJfNyZA4+pjSLzSTlkccggQWj3N8llt825rufA9wokyOuXzQUeglOEArfFlY1GPqmrtZTf6vhAAAAMUGeCUURPJ97IKW2n8rNmOXQIGJer6mMWBu7bBqazEhQPzV3PBt8Hffu6rEFJycnZ/AAAAAiAZ4odER/h7D5WmcyS/Um4Sed4v71PLKjq2xzxVZO257qOQAAABwBnipqRH+E1Cs20VzmQO2GD7g9FZUkcZEXqdpxAAAAR0GaLUmoQWiZTBTxv/wDwzHBCj/nXShHqN21NSADJU/Jvj4ZToB/1ZTomGKrdxyAf/iOIoo9DdUgNWCwuF0p6LBGjn9Ou8lBAAAAJgGeTGpEf4EHjJ+LiOn4U4Y39SmH/rJxyXApfMRHQp7a1U3JiFaDAAAAMkGaT0nhClJlMFLG//vgOOBCV69dVkGYj4aWcO7kQz+XVCeG9zDp5MQEvFDCbSD/y8jgAAAALQGebmpEf4n37kAo6ubDhZOY+8dpADGCpKbr5jcNPPpTe79afnKaDNgbJdD1QAAAAFpBmnFJ4Q6JlMFExv/7wetWHYG/jzvTdZyGtgH/DXdI0sh+SMrklDv0lY1O4TvP0MR8DIsygALU9Fs2WbnRqnA2PULv/h+6+vnPyCATdnYbIXw/4lIPbTxVj/MAAAAYAZ6QakR/iOA7CrB7i4FdAnsFNG/HYixgAAAAVkGak0nhDyZTBTxvyghDzpi4EXYDnbEfvG6Y+2zMQ/Hb9ovvg/0chC5MpSxOmJq9zFtkv9T+YO8D9ACgwbrIT3dU6sd/k0sYujXXO5r3U7FCF0bBVqsgAAAAJgGesmpEf4exE2/oQsw3fbuNFR/+SKnih1Q5+0sivYgWYAQIdsxBAAAAMkGatEnhDyZTAjf/++AcgTWtokgude4vE93Jud9I9whdUFDZJTfR+J6YiF8XdIzfNb/FAAAATEGa1UnhDyZTAjf/+8PuXdp4sDvx77lN0KgU/fpK8+IINR1opM0Pu4CozS8l0Ud/us9fntwO4IG0oZYvNBRI2N8a8g/0sIn+6f7QQekAAABZQZr4SeEPJlMCN//KCEPQpIQEI+hBvIFMoU+XSr3pPEb6tM+0KTmzi2hUR7j3nqTXSRaGGG+1IgCzYffvSdjj9k3eSImzEFiL89xBSAuBHuAan1qCm0SbL2AAAAA7QZ8WRRE834Ryfl4EPlupkoiD3UHAn/tyNF9cYKC+bqyZpWmU1htKRBn8v1v9le4Ox3FhOYCg5NB+ZdkAAAAsAZ83akR/hNda3bhOJUZIUUY2jVhVdrmxzmILX98ciz/mLhHdJSmfczV6U8EAAABFQZs5SahBaJlMCN/7w6grIDges6Q6N8LLKBBR/r+r01RScaGlerMw4mNRGZeM8kNv5hlT2J2qEHTq9XsVBx6y0AugLYMoAAAAZUGbW0nhClJlMFESxv/74VHYNZiODju5GRyMj9wlHqF1Lr7pV20VHiAMrHfGA6p0v/Zr0/dcP6yMHwgdbTTN5Pj6VJC/71UkDQHF2SgOkPs+SUevM06PMV4Dc/8UKpkKq5MzAfyAAAAAGgGfempEf4k2KJe5dxcZpOfMZDO0cRrAfBG/AAAAcUGbf0nhDomUwIv/+6w9u1GKETDvOv56IQEhONuu4MWW8U5koOsb6z0DMrcU8frYGb496r+0cR0j8/9YT3bwj6KnqNIzzKxPxz20387LsrGHGueynDGovxAkh6APLCz5JN0en8GfJGJgIpTpHDn4oF+AAAAAP0GfnUUVPJ99biH2ofqCeWdcOycKd3M3sDTTOXcd9oRK3Tz1jn02+G3H+x161bG+i7O4manzYReZ5zgb4/OHSQAAACoBn7x0RH+AbCaaY6JFZrmbJvY5TSCOfppisvf7jhPHXneiHxtzcLARd1EAAAAfAZ++akR/hNQrQyxiVLMCu/aBfsIZgv1fU2C7T3Ir9AAAAEZBm6FJqEFomUwU8b/8A8MxwQo/52bZ9Gsz+sY4O2RS3UuD8X0CNvCqI3uAP4dESewlMKkkQ0sYA5go9F7wlfSnLQu+tTWBAAAALQGfwGpEf4ew5DFDghAjpjv5rOLhqU4poz0tQCo2nvYk1MNk0SbxLDlMLbaRYgAAADhBm8NJ4QpSZTBSxf/730txLjWi6sL/8WkZ0SxdX+xgko4N3jmSClvhOh2sSC6GI34PiVLH5H6DQQAAAC4Bn+JqRH+HsRJVgDVvKzmQczvZIZd7ldG4fIdAbM1M+rxG1R7C8fxWQE6gkEOdAAABb0Gb5UnhDomUwUTG/2eshp/CHKaTLMVIv8yljz56U5Qg3GnKqoZs4y9mnt42jrRlYQiQvaz8jEhuvC1SoxRX3jdtVE7CW2y7mknEG5FfETEOLay9NAEvDLcXz3Kvp5o6r1Ecn5DKuRfL94ZwgC1vfOYsAR70mWBfVzPYmO5bWIXHrsivWz4TmSfJX8BTws++We/rWq+eEIZTOpQXO18Z4TBZJoJNh+NQKQ888eOePfvsvOd7ExGYc72AMj8IB5nA3qIIpZ26bWxdEigqp5WqA3CPmQ1ArglG8O23AW9a0mGskFVKIURyO7SDrMfvuSpM2Ufg2ko07dDeVY280dgvmukE3rlhobBVZ9eHnJpB/rDydTtHtcgUHxLRRN2WPaWUwfFExO2NlPxDNJNoifurDRR1TOE2xSpff831Zq9dXYJqU1zKACLUfsS7gTYxNNWcQ7fT37PoiXtJMLOR39uru4L0h58owlQKSE0dbvzeGUAAAABVAZ4EakV/6f4hhh5j4pezXGmc10booa3ca783iOnUPxnKk5Y3HfubpAwe+uDDln7x2eceSurPShjo5iSao0hBxT60iJArFLPcDKmG1k+1SrnGiBiThwAAAKNBmghJ4Q8mUwI3/2esg9XV1nn6OqvkurteiZEF/cX1g6vwMjx6DR+rv8oeaoyhuEGc4M80j7FCjt+C+gCKRU1EONaFpssJsG4r4wx6++Vl4OFVBDT83lw8NrDATf3vTsciLFGM8eGrhKs8pvOB4zbD2z/wbkYod2zVrCHakD3w/D1EFZvcK9tKs1Nr9e2ax3We6/gOaYcpR2bYNG1etHQuzTshAAAAMUGeJkURPN/smxOo9ZgcqNMHlkhDvC9wqzrKarKnUnQpTSCQZIsbYKJtUJkWtnazMvAAAAAlAZ5HakV/hwRZbYwm85/cCb2d75a9rssTV2GyXh2ERuMyRAZOQQAAAGpBmkxJqEFomUwI3/vEgWa+og3x1dnJ4PoDdBe3/3YoMTUoB3TyU4Ej/zJxdOfoW4aHRQ/I+nHKZhqGdhrBqoC886/vF7jEJplVhBx/pyxBeWOV/VwqfkCrurEj7aiFkmd0eJF8WUso2Z6AAAAALkGeakURLN+CHmGW7/MKreRIEd/8QfLllKWohrf2XGUEjDty3V/zICAlat/0cmEAAAAmAZ6JdEV/jY0N0RG/BawsJgKYUujP+rhnqWcqJ30YzX0c3+Yd3EUAAAAkAZ6LakV/j/S/wWIll3CGdQpmmnlHJJJMDXC3Dufc23w6T4+RAAAAUkGajUmoQWyZTAjf++5XDgdzj8QavWtNM5/v7SWzbYKyz+gr1lvvDdabJmin54HFA6FS5USZmz7Qe5i8GrS/OeBhEFUi7sU8j3do+iQgzZ6NN4EAAABqQZqvSeEKUmUwUVLG//vhUdhErrGwMwT2ApJmJaluur8WZXKPf3cxs7c2sP/Xbjfjlt+FJSs7mUuT13MbzM+TrSACCZSMBoDthPFifoGGtkxEnj+e8h73JprN7phM0yaCdgAO7cfoh5J18AAAABgBns5qRX+OkjwIvXb1vb5y58tHlKJ2YgMAAABUQZrTSeEOiZTAjf/7wZu7TxYHcd1Yn912Q4e6YwUip/A050OaFjhX6O2qQXp5VjUL059lIsYYgSQ2jExa1CDIwChKJheC4ssji0HjIrmDdIWl9Q3JAAAAOEGe8UUVPN9/4NuyfQi2HxhNnY/bPczyhQbbExzc0zWVB0vRCS6njJQu39UWTWCDaVRAWBLJEnaAAAAANwGfEHRFf452ZgVPqCEm6Wp074F/bJrDI8pmAHDsFuIJwlvPUbZo3jO+9e0mbvECpBl/WwaROjAAAAAkAZ8SakV/isvpAhCb6whUJeGpnwag9sUF2UycKFAAIxuGur9dAAAAVEGbFEmoQWiZTAjf/AOiKyAj+gnj0a3Gva9CNJ4js1U2x0aCm2qxyr5dY19k1pZuImuv/8peW/h4jD+21kSZLMKNT8K6Qp1NQB1Oz3dayRj8a04cwQAAAFJBmzZJ4QpSZTBREsb/+8PuSK8cB7lWURgB2UuJkbbsVfmYf6uGo0ihSHCGRIUeOkGwszI/RafeeJLVFb72xB56AeE7RHwA6KDK2FMZEiHn9cDBAAAAJAGfVWpFf42Vo2qm00Dddqve1Cwh67h8F7hLVO0VUOGCP37hFAAAAEtBm1dJ4Q6JlMCN//vDfCqSMgQ2VRj3pLESWqFJI9xvPW4RklANZ4xZHF71Z6iQq6DzlYFf0+Q/xmZ/fOpxmMDAoxgM2EezYMdxGYAAAAAyQZt4SeEPJlMCN//74DjgQ7ZH1LvBkl9O69N9KO37jJz7QEaANfGvJaL8Blb2Um8UToEAAACFQZuZSeEPJlMCN//9Y1z/dD7lReDd/jih+Ld0YtRndvMbeeMTxfw1CN28oAhAs3BFnwK+WZTwEg2f//cT8FjRJI69DXHiU0hpAJ1L4ZcWUkQNn9/F3WTm780WFIBn83SpljWZ8Vra05F6qUNyjAMjVTOd4yHsZRD4BpeH6ERAEEj9uWg3dAAAAEhBm7tJ4Q8mUwURPG/74Djgdzj87YkIIqfbdVCHiXmoEJTcLeAuy7kDg1f7oEo7JIcrQiC/U+8t/yN57HX2mDX0MQUw+qGQfYAAAAApAZ/aakV/jnhoov57JnRJzUQ582/A+KqjcuQgc99QQQxsklKC4RUIecEAAAAxQZvcSeEPJlMCN//74DjgiVvqHRYbDnew3k/WoU8O62m6cW1f4z4f2KkM/6uwtCkoygAAAGNBm/5J4Q8mUwURPG/9Y3sbYDUySihk0ZwYHPZc98GT3bkkuNK+kxG9hw6QhCPLCTOPu0cTsYadgUqI051fWxMxsY+qGfVFtfK/8hGLOVkVoaB0SjGYoDGFSY+reuTO1+te1tMAAAAvAZ4dakV/j/S/kR07/7I2ONOQxpj1EA6tGW/N30/w4dgSvirINWao4hRz+KM4mYkAAABMQZoCSeEPJlMCN//8AtC7wRdgH2cIbrCoMs/ZXvDYX8DxXmXaY94n5P78/TKQXUbvgpq5JuPge7/U+LSYd+1eMFPL7ErSRU/SQWbYYAAAADVBniBFETzfgcsI33hBxsPekc3ZDBzYdeklj7D+yHKGhCeLw2I2cmn91BAltpq2zXE35Ex0sQAAABwBnl90RX+JF8eO8NhiMY7e3XCBBxtbicm//Ab6AAAAGQGeQWpFf4cEWNCU5dbW4vKx29yJfhj97H0AAABhQZpDSahBaJlMCN/7x1VfiFwgJBqttUqkagOHWBpqzu3I3Z1ug1iDen9RqMLuWwSJns/FSjmbZC32wpaRGK22xjoTIf8DNdWNE7Xda1dD+nvhdspBUZZ+XrBk1Hqe2E0wKwAAARpBmmdJ4QpSZTAjf8FqjzHg2B+Dqry9XK4oFY4kpye5WzL+gZiytMQXb5+G1o3IxzRZhxW9aZpjLbUsacNdqbjHaYZ9CmpTlK7FK44jbJ1larK+ACOQSr42285xajESkZG6raHieCShaT5/xNHmQa+kW25//2rjWsefjIOVuKFkvGla54JhR+X5yYFqm/bC16PSKR/h99ZwXzxEyHSds+3pr2CMYM14vyO0YKx2HjFF2rVw+hoZHoyQ0JcL/ReGkwZ2fNhkLNreyaduEAjHZAAX9t8IFb0Oz30qFk8O7KgS9ZdqStAL+KUDJIhD3HAAp3xmSs+BqTgOrQ5PS+Ace+fz0i3bKIPCU8i8AJwZcWiUwBhyv7knwrO1Y4AAAABMQZ6FRTRM39foEqx/3JF1qqZd8W//5kxNE6DYERz3YbVOw/1jyVC2gk+UmHuYnrw/mUZrr6ik8rqKlUQKXKQUw5GLsaQyVoSX08YJgAAAACoBnqR0RX+Gfrvv7yJuu0tSB+P/+h4yK0TNhid5Xn9+DciKgmECSbWRRssAAAAtAZ6makV/2fIPxtyYTe2F7AUed/9kq/9NVDnFjpKyZwUbJZWypoABH7rTJteAAAAAnkGaqUmoQWiZTBTxv8gMvylDZaeIJIsMYkC0ZGm9WiRczW/gYZr7/QtVjFGHEuTg4I7VYpSYfgz+C64TnuAQXBvRwRlD7R7BrUKTMlAKgwmktl2YT4IHIivf+PBMSAKaN8pB9Tfiw85AUHSJGSwlVnsXlyHQ+LCtY7tNz+KOQQRHBQtvFemUE7/owFlAZAQX4ZhH7H8hq2bMUPNDHTHxAAAAJQGeyGpFf6zmbbzG6yAo5agB815PT4NIYmF3dRluJ6VKQoZFLbUAAACKQZrLSeEKUmUwUsb/x6mJAIU6OOaejU6jOCEaJq93ye5SDx3rwHIMSwuPbeEPkFOdU1+52yWbGid0yNY2LO00ed66IioKOPe6doVimRketNB1H1k9nKGLy24SyswHs7o8zUFs33/dimS2jZD9vA6qWcVwUdD9loJxKlVTdWWCY05iyBBnRI+HStFBAAAANwGe6mpFf9DFBS+peC/yI0i5BOXZ0Kb0Jtornv1yHk4b9Hwis05VfRcTVsB0+56ooWThOraMfVkAAABWQZrtSeEOiZTBRMb/++FR2EKSC17OilzphaOnJfb+b2v5DMpyB0LE4N5XbFR41vnnYVmX71a1TgMbvI4lkgGITPRwjYOjeSXOpCDC8xaYEVB5J45bl6EAAAAXAZ8MakV/jpI8CL128HDcV88rEWHEY1EAAABRQZsRSeEPJlMCN//7wZu36cWD2WN+3hTaxcsivWm9AgbVBHu/qp/N6ueiOGMMY/MGj/ljaJqYSQxBu7OQessdVRPOqKh231KwV4eUePFzdTULAAAANUGfL0URPN9/4LLaSW8U2BK02dj928jKfghGK1PeCa/SOciAikbQHsE/9VxvCM+EdP4P3D1QAAAAKAGfTnRFf4Z5voDwlwQuL1OkS/asB8/ACxEz3gJmWfQAj0g1qyhfBTkAAAAfAZ9QakV/isvpBhS2mK1fAdwbtVb5YgmyDpirrorcHAAAAEdBm1NJqEFomUwU8b/7w/gmRt4SJHtq/4CnNuN3iEw3xnwnXZMEbxFj861j5ny0cBWKgDehupwGrBYXAeK776eEXeWDNQa8lAAAACYBn3JqRX+NmH8JJKZfXQ2CWI5tPFL8LiOYvVF01NUI0yeIVFZaRQAAADBBm3VJ4QpSZTBSxv/74DjghR/zuyzH8Txd5XruzDOHv9RKkKOyGWsN/oiFcRlf2fEAAAAsAZ+UakV/jZiVZ6ef8q5U0fD2jgZreGAFGFOTGZSMcvYdOVWw2qIqwnqo5s0AAABPQZuXSeEOiZTBRMb/++FR2CH1+D8+1PRw2eC7cPzHLlSr4N5kdbXkTkaLMOtNZYDvN0G+BcnrfD919fNbcNCPlQajdZJesGGRw6UvTyq3mAAAABsBn7ZqRX+O5cRG3XQzJOw4cZkp7na6PmOyn1AAAABTQZu5SeEPJlMFPG/74DjgO8yArnokfcqY+3z7vgO3k81KQrKISGhM7SavcwZDj+Lf/9hb+KMuOUdKKwHApugri91Y0dc7DkP6jYD4KNCu/ZrJqskAAAAnAZ/YakV/hww5FAKjLlfq6Ljf/+1coHIsbogAeQqZ3SIn7qkD8EJiAAAAL0Gb2knhDyZTAjf/++AcgTWtokUyBqD6lhgvnfnJDd8Cx3z95yizbJFdyDlHg9jlAAAAT0Gb+0nhDyZTAjf/+8PuXcpxiwPb1g8GbiEE9IET2gffzB52f8EdNr3u9CTCJQDIJOrkhycCCgNaBG2HkcB5oPyRivqaDJ0+sBXnYINEToEAAABZQZoeSeEPJlMCN//7w+5IrxwHpn4mcnp3qC8i1PB17ucNb58sAdjpq0E8ox6I6nyB4tsTME3vaKjnv2HOUOKh1xJET0cCW0OPsSokk6kSsHBKYZjYC7L8lyAAAAA5QZ48RRE834ISoy161qatNvHcx2P0v+2schVH4rQF94qpR/q56ZD/z6y5NPUUhWN3Zgno8YGf8JZBAAAALAGeXWpFf4/0v5DikvANg1WUUB+NcTU/sSU3fSaYjuWP6ked8ltU06WZ15X9AAAAWEGaX0moQWiZTAjf/AKPrvBH9CwqRiXu1U258fBKSfqjqjBQd2g4Hb33Y/0j/xa9Lykql+WIXGWh5MAUZG4/duV0oRy89osMMauqyiZdhBQPluQQB/BxcR4AAAB9QZphSeEKUmUwURLG//vBRkdJOweuoGYmpH5xFZVLyGMWYq+kIwnpHpVXYdaSzJAWmGTPK1sFf6Znkp4MDLWKTIwxvdVJ52cC1rehXCiJGg1PAy47lCc5h0zQ7J3gBJjV4y3ubnbO8ylb6w+rjLP8+xSx/E5wYWiF5YEZn0kAAAAeAZ6AakV/isvoz+8v1Hb/P0q6S2yOKt6t2F+aO/ZDAAAAfkGahUnhDomUwI3/+8SDUbxwHf2NVhEpR83q6f+XXT0emIrLG/GjTA0ZQEsuf4ilRudQYS+kcaQP6NH/n47oeDxDgr+9I1JylC2mc1pGpx+CT9kp21tNNfIR9M3h5hKBoKhhH2mmDCHWwTlWZrU8XQRkROFjiGHTXpnI2TQPfwAAAE1BnqNFFTzff+Cy2kloXh3IfGPf7xzw0VsS6ephFl5SkLNj4sIWol/tW/UCeEA0PDXCAF0fB2wHJQCr+HvgtkKvByBuJ7pSQYYcBBA06QAAACsBnsJ0RX+GfrSKAZRt3+eUjEP/tYzGZCzSsy+iSHSWdDdvkdj7BDVckq9AAAAAHwGexGpFf4rL6TwoJvceXyel974RIYNEAeX014ZGez0AAABJQZrHSahBaJlMFPG//Xblp7QgISvXq/43tYQswnlCiPCtggpbgu8Gcd0XnodgB573BnhWBSscJCb+QLXOkHexkQ6chMI3P2elzQAAACoBnuZqRX+NmHWKNnyOWkElh6MU5PvSNQ8zujRaeaaC8dcoGksLYZPxasAAAAA1QZrpSeEKUmUwUsb/++A44EJXr12WY/1R9NcvW6Qg+sKr/ahi7wg47WJVdC+uliZS+rcEC7sAAAAwAZ8IakV/jZiVZ8+203fAhmP6SQ+/2wesW6W1rbx/GmEeycqK/2uzkQSaEM7KzV5AAAABAUGbCknhDomUwI3/wSZXK3peGISKDGuEfaX/ruGdURoMWCqwPVNEpOv+ST4slfLn0vfGnqFijtJwvKuTeo4gMwMlZ12WAx97eksVcC/xZyoYY8gEMuJoVZGXraJXsn2AWjQmljY++Yn0UGbvnrgKZ/T7K9u68fYiopDXks1XdSedLDYdCPWc87cOG2LaWaYKA+J/Rcrt93JbbJphn/RaqafF3b1J+VrUGnuLXN10mq9Ji4vhD2gcCVDgU8QP7fngj8k++0v6zvjDx4MM30Unq0Ro37cLz9Fp4dYmyVWi9EBFqKvmSgFrSgSu7qNhL23/SWg/5DKBGi7SSMBZOnX9WLFZAAAAs0GbK0nhDyZTAjf/8pF6MHN8xvhomJb9VTbd1oufHXVFvOUqiH8wtZul84FilJh+DP4Lre0QjSMquDwJB1Cd2xjlRf6f/EfPHC1oSOv7JiC13e/9H5Er1hNYw4FigKpqQ6OH9tzFrQITEYT/f3gAuicOt4g9cvzkdFsMYBTJT4tnA5QEdXJC8nEAXWHq3Dr4PL5tNfHtaPyUqNoDKx0fsKN4D/mNRoRUv/gxlV5JxljuF+HlAAAAnkGbT0nhDyZTAjf/0Y/fwfBPYIt2Ih+oVDWysEi1BfcGsNc42DZ+iX7NLlxL5cxymUJMWBhkKF68OtNiy9cj9+WIlc5Mhajqm9tEk7Q5HF5llNkQ03EIGx1nktYjyHoOPCRnfHGTijsZ7HClYxAmgkK+zsXNRz61pxBrMmmixPZw347116XCKl706m6iOHYY0uNsJzXzWYUyQe9L9FINAAAAMUGfbUURPN+sXvx5/CiDiEacNaS9U6HfVvnuP/a5UQJlGkFDYR2J/hU3JICcol6Lc4EAAAAnAZ+MdEV/jylvx+CPXBU+8gKTE/8VIElasIGY33llIOtASe54iQcmAAAAHQGfjmpFf9C2JxDQM2FrtfcernQzbgQo6cOgAiXIAAAAPEGbkUmoQWiZTBTxv/vFEDSrIEiP+dm2fRrEThE5lxbkgROmjfPtCbfhnR3G4ID1iPeJh7HY3Y16Odh6bQAAACMBn7BqRX+NmH8JJKZfXHhkLwpdR6P4XTfNRE6MatJ7PVwVpgAAADJBm7NJ4QpSZTBSxv/74DjghR/zvAWMRwWuIY0ILoghoK+2U2wGoNOA06nWHBwAcUpagAAAACsBn9JqRX+NmJVnp7PpUa8FkOcbCyJNunfz/M06nD3e8NZpyq2EIfrhbd7BAAAAT0Gb1UnhDomUwUTG//vB61Ydg3N6LfuunSF3AkvNLoQBhoTQ7aoVG6tsYzS+JiPd3mYuiPkzCyWnK401zAoTSSFfgBeqE3OpyvPlsMfUq1EAAAAVAZ/0akV/jX4K9aDUHTkIn8NGrTJxAAAAOEGb+EnhDyZTAjf/++A44DvMh5EHnmI+0qZng7g7n1G6u5/Aqyb8TemoaU8nwXBzvg6L4ceEadnwAAAALUGeFkURPN+D+24e0TinWPeueGoG4sapy+ObB3xDR00TdCay97Igcx39n3zDVgAAAB0BnjdqRX+HBFlCUE3nRNtGVVInyej3t3vz7uZ2wQAAAEtBmjpJqEFomUwU8b/8IvJDICKyxCcdzT/mboi+flPNWjFbInDCJbwHXv4CBeLXeVVCSD4rO9Def9jVQKiZ3a5b5Bie9xheg2SLH+QAAAAvAZ5ZakV/jY0N0MOVPt8h6WcoZV1S20MbM4DsMfLQUbNE8rolkKvz6B5iwJ8mvWAAAABXQZpcSeEKUmUwUsb//CJ/zICOuOx4SMR3pARtPhD3XWj3rBkO6OrvomzLoOGgKYE4i0wu3UHRLX77PtaCBTI4Tfpc3gtTPNcmsPDp9zCw//wDxv5JJCJZAAAAJwGee2pFf4x17dolDZL3wxmEFIgT8nhorh3Yq+I4oAacvjWdHciZNQAAADVBmn1J4Q6JlMCN//vuVw4Hc5yyvTItaaZzkkkNHLJkI0HEoJdIeD3dJbzkXmvtq7jUQZd45QAAADlBmp5J4Q8mUwI3//vgOOCIxgOqHGQ4dPM9u73R4ALKr9BQr/nU61eDXxry868wQq2NQQB2Cw7y8YEAAABaQZq/SeEPJlMCN//74VHYNYZ3DjtyB3KfJ/ehNVbvQ16Epn16L7ahC4CDOiOWjEm08H+S+lGobDN1/4ONf5WIIOky9RhRHLV6H2eUZ80HGf0olbdDoIPuaghAAAAATUGawUnhDyZTBRE8b/vgOOB3Ocvj1S4/IKY+GUDepFZA7JQ6D5zJgHdmU0gV4AyG3PnUUlwhcyp1sj/2sUr8E80bHPfsNfQw3EPqhkH3AAAALAGe4GpFf4cEPpZ+ayGHCrzmIFkFJJ5iOG3d7ko318enKcteEWyK6vDU0azwAAAAM0Ga4knhDyZTAjf/++A44Ilb6dcZ4GoPqU601qIovFnKC7lkIeBkBzqp//sZLPSn67pyKQAAAExBmwRJ4Q8mUwURPG/8A6IrIENlQqhLiwoaJhrnWZk18jQ7ylRVYz4q3Y43ERkjNUVHICbJU3W2fPPQUmUyKqzovlWUqaoQrc8y075BAAAAKgGfI2pFf42NDdDcJbtDdK7SWJH5HwS2s2t/lM7DAAfmUTKKRr+T4pF7/AAAAEZBmydJ4Q8mUwI3//wC0LvCDPTwaCjaXZ2btf9j1UTqkuJrlFvJUAop1+VQkGMNp2GtkRPC5V/i7vZhSjenabTSd4ZI6Z77AAAAHkGfRUURPN99TaObewep0G/CenyT8ihy+yfmI16uhQAAAC8Bn2ZqRX+NmJ1nz4vAdigsChyYtloC5NV8EcFNRJflhn+2noyyo44OPhMN8LgKTAAAAIpBm2lJqEFomUwU8b/7wetRFII6Zm6QidC0oo+c0Ns7riq1h4ZBWxZZD6JzeHG5pZuCUPFJCaRVGlMRP6GhKFXz9x3DH2KAgdWA1aMZo1pkrtviY17lc9l1KlNpbFrOjLf/sY3BYOhEQqMpFO89k6PKn8sRKcq75K2ccdT4r+p6kGpG4mZrOF/98uEAAAAZAZ+IakV/jX4K9aBpZl6QE2eOe/h3YVFcgAAAAEtBm4xJ4QpSZTAjf/vgOOB3OPztiP2ULSp7XrKrWw3l4Bc6H2W69Q6oSoF8O1hPrRXjY7we9rAJKPYETi17uxoi3v25T98McjvFdpkAAAA0QZ+qRTRM331NqbcLGtWbMNY3WScBZQDj4EVhJKd+eBPd5akdHADpGT8hVb2bsA3vvcUMnQAAADMBn8tqRX+HBFjWKW0eRGEWy1dqsM/Zh2+r0j8TMNGs/fPMii2zXF0N6KhYyAEYzOLy8UEAAADpQZvQSahBaJlMCN/HqbEHRtdjNTKn+1Ks8Fd7U1CyW72yxoAL49N0mQ/rxd6hIp3QUmPFE+CBT6W6w2fg+GvkDoA5rxlfTH5iWO7O7X1aIQrUycIpZ26bWxdE69CfvW5Ump7bO49JM3Ojp1ba+sFXZJgF2H/6c/uab0RGBZJFVPWM6/2iD4z0L1ee78rMROduc0TjmUMv4F5e8iqzbVmWZxa47HR2pX34hnX7bqBD1Y05md2DIEIMx7XWDS2zFGguo8fbZRpICoATJ0iBpYXY65NY+onyXag/KvGTn9dqOkIZN5bgNwodskEAAAAwQZ/uRREs39BT+V5BdRxLLjvRBZ/8QK0LAnU4ryUT3nxiHiAhHRZibTVIPm+0xrfhAAAAbAGeDXRFf9BX5Uw8x8UvZrjTOa6N0KNc735HzeH8dCL7LysfJAtHc6CJl64MM87h+XsJUfWPW3okcUNQiLbRiB7rS8Stpkk7rAmn2NBtaNQkEAWDZlBKfeymcqvzqnPiuE+on+yiIr3BB71tTAAAACYBng9qRX+NxKann1KePz6kv+QI+9/UjpccvAs8AqKrEhiFftqoFwAAADRBmhFJqEFsmUwI38oIRG/MCA7nH4jDF8og0z98ssIrvm0ivszdpDx+Nnn5Qb+Dz0hEkbmcAAAAj0GaM0nhClJlMFFSxv/HrNHv/cjBycDl2vih6keQw56fCiMs0YdNmpp7imPBF1y/lLKBnxMxn3M9E+3yBzgA4qoPmgxAa8dNdml0VORbm0KUx6CwEuL013ZY0JgiO+ELoSSDqSlaWOxREn4lWygn7nk/erfNPMDpvIC2jYz5OlVHNcLkNnKjrSDpnPb4eSDAAAAAGQGeUmpFf9DEtYTuNuuc8MFIfiTupN1BdFcAAABQQZpXSeEOiZTAjf/5KwLantfVy3VySM0SMHvsr0YmBrv0UL6DlEtaxQQHzUXE30YsdNChYm2O6V+hEk4F96/91dLwu/ndfGK7GATkcf47DIEAAAAxQZ51RRU833/gstpJbxTdFgmzsfvEjjhs3dxWp4ZZ4kPG8NZeKU7GYzN2OvKw1tlnqQAAACUBnpR0RX+NmIM6D8IxZFFGEALyP/EQHxDjOvu6mfZR+X/XdtvYAAAAHgGelmpFf4rL6VHGE31hHDEGQjtVce1yEtA1rtWqwAAAAE5BmplJqEFomUwU8b/77lcOCFH/OzbPo1iJwiLqg8angbnwoKqYau4/Tr0/Su9hg7+dXmJB7llgNXjWmAvP+6nn9ZyA2iVrXntcHxIEctUAAAAkAZ64akV/jZh1ijZ8jl2T9B54JFoOBhttQMJEZVln9G5lKq8BAAAFQmWIhAAn/74fcEJGW81yyeQ3yjIJuOQipq5DurWEjnW3UorRPL+Gmp7W/eH/ZijezhQ2AIPIQl4aHdQM9A6M5/MKi9xGlN69wV4R5VA4TQl0Sllg8188tv6vFr5S+P9cmRKtZwlxsQrO/PUH7ZGWYYybHVXFlEMY6Dl5808LYw/EiDr5JaoSWsHu4ebowTZo7v7qaSld+s+T31N91+CVDr6kktqJbNFOPAo3BvxQkltR4XwG+/rxvXbxkWnnZ4k7z5WpzDWYcBFPvUfTgrd7n6SLHLTp7bJf8LFMRC7GnoqmMyaPAzg75u8JidsVPIR29wsvpk/Mt9R6nnF5IJistydnX1nYc673c2swNm0ubNLgupd+k5ttRQ152rHLvBca63qjaEYz5bO6uRdZGmNbrdIR9DOFJYzEJ3T0VRVhLzRbgQhmzINEkMybfXp29t1bR5qya1mRWgLdBC5l3drBLQCcVoq19LY64FqmtYeURGFQ+dxCLISD69Irv9a0uyOxS7lVS3qeo5/gPB9YhIYgKQn6bpUQv8jq9Ep36A1DYX5VFcN9XqGkifkeY7ZbzJGH1tSe7aDoRQIiDO6HnmttmwDbytG0SzChvNEtecWmFp6+Ng7jqZmJ+LpXMqaVGcBbl1ehkdm5Vr84GAQKEq97RhEnaEuoPQsiUhi7y5CzHQA/q9/VpbR1zqAOP/78KNEfdl56fss7Y4UfTIZBj/Ef7ScMR78inLAl3j2HLP1BrnE2c1Zfp3fmi51t7IntWomhhRLlKfnfJrB056GmNbnqvtmZCvlymbrND2EqUvjJ1EJ+n02dodaM+1EsP8F2MT2Y4tjt1aBsu9gTs10CfVny7PzZrIe7ZNf33r8LTwpXSowlA4nQt9opFQ/ZJxaz8Upq6XgXe6Uqzu7x6Z4G+LEevAbdMu5y5ewVRMuV7HqXwlZX3FmRKJTwfg2u/onfLxKJGGPlejg4w6xVcIvQWpCCY94rqqRweESq52TT21mJ3ZWZPYd/bW9E4HoKBWndnm57+UuTSHIPQ35BT9W1Xnl9r9CViMvIQ8AUlOp7+ZZmJtCkSP14cMcCJhNiLGBPwta/Oyraa2eYe4xSkWML2OCXs+aPKBwLcn9R4ao5D1S55sxD7n6+Mg+z9jwufgfWX3u9ii7EiGIFM7wV1HcRH3cSIQZVgHx3bNHeN9HrMdz5R4HKc+XWQ6nfPa5qVL+jrteHlNOWPkjRWBZgYim28+DOXpSL63VQKM8wCalx/7UFzpmPLYi2k25Tn4L4RY2kTpMyq852ckgqQ/JuHNjmCM0OZxxusMdxMp9ba0q6z4Fov+fjMAzn8vKAAJKzFCfdFJg2MeZhfwlKaitFkiyEY8mnat72LBWhX8y2pFqa9wBfrk6vSpsSIPf1ItXOV5dsbkmQaOOvVHpr39w9kGJ06MPgDiO7B/qm2Bs3AlNEuL+WG8zu/vIRJdDz+RTRIk7vLQr0Rq8jEQWZ/euf8FD6BUNHSXZGM949sWmTK0eoMcmoTzlyGGCQgdVdRYqeVPDF7SHBzjIXvdTeaPeF8rdbHCdMCUN23hsQBtwTi/WxUcz0MCULeSfoCuRD9EomV9UBsyxfCUqSYYFieuAbg/IP3y2xj+T/9E9kqvzPRPQa2y1WxDCMsnMIy37FnZ/7LXXOsxfbVeew3U1YASM9XkT/y50YD3lR6WGJ8MYfYM98D8exb8s0epkXWDnGeNbeKqayRpRsYTvsgzojgHxpi5+wGkEyHCLHk/j2PjOYfUfWmKhOwj053UqWFuvky53Z+AuX9yWhjuzKAAAAOkGaIWxG//vEnVTkBINY1xBnYqV2GYveJxG6pO1ytvZdPbEXt5LdA7TGVX0iYnM5z2Z+J5AJNCh79n0AAACQQZpDPCGTKYRvygg25A/mod1FrlSV9Bg4fSRIk5ApsXKnR4Lw1hYENtNp2IdrhL36xanS2cwpV3ghaBQ0ENK/+61iJnFzrglvYgyAeUPfvh4+H91OONFJ0BQNQP7S+T1dX3IH4RbbT1dKFwTvZ3ZM7M9HZq2WxWBFVnr9ZP/zxD+foUgnXU6TFFBNN/llQnxAAAAAGQGeYmpFf5XT+htGpxm+ttzhrI2RGOltiWEAAABzQZplSeEPJlMFPG9uJuUZbUPQJIkFcwKAtahG4ohsmW9O7pJ3QXEf2moUd4qKRCCL0ZKGjcHbIVUkGfh0CDhI2ibslmbDuzOMAhXG/zv/6v/5h1pF6Fnk4OU0BdEQ7aG8XM909Bk3XHm49yHVcqbBFgXZxQAAACoBnoRqRX+RRVaIVD1YrnHAZf+m1ih7YZmVdEVTUohPyoJ0YoX7wwFZW8AAAAAvQZqGSeEPJlMCN//74ByBNa2iRTIKCh6OJLaoh8ps3rY7B+aDjQEslblmkDmrpFMAAABYQZqnSeEPJlMCN//7/0a5QmmqHJOeKUzB2eNo5H9glW5oLygWzEVYpridRvXn+2AASXaouZZC4r0hn95olSpqHwrE6msLwa+V4Ttq+yvbjDu1Y6/bkwQegAAAAGFBmspJ4Q8mUwI3/8oIQPYmOP/wRrPxM5PH+fJ0mFWAYBFh1Pe6xJT8//aEn0KlIlhE0/6putAFmsffwcLGQqaabW0OMe6iwCBaM3Kq2If4TnEQ/lSd0QI6JJrQELGDib/jAAAAO0Ge6EURPN+Ecn5eBysgKBkhDY8qsL/25Qam4wDRxrYU5Di/5y+wAXkIetqyGrdnqZuLCcwFByy7VklxAAAAKAGfCWpFf42Vo24OWiO+wC6Bf1+vC57IXLwDKAPdiUIokWez79lHX4AAAABIQZsLSahBaJlMCN/7w6dR5vCGznNtXd5bO5Zy4LxzmfFyTxf8VH6vp60ELz2qk/FRPVhaQsbXkKgrOqn/+1roIltiQuL600R9AAAAakGbLUnhClJlMFESxv/74VOAECNfo3TrZkzqu0KkrOZ9CVw2gjMRNMj1E9vxW5V1Ecvm/9qH28Rc3ffR5r+zqVMsLdHEwsswCE/8g29m7hFIyoSbEObNEnebhqoXvPeZCDCweyiSrLwsRskAAAAXAZ9MakV/jpI8CNGNvrae4VovZPXKoXMAAABnQZtRSeEOiZTAjf/KCEDu+rkuXwR0ltpBueVY4aihZ6XD3gGzDCyVqxs5vXh7SVn70nLyVRSeCD7n1+k76tXWOU8Q8cvlYby2Hnvldl8uqwEjayKEwX50N3/0maF6ny5kGq0f7Ao02QAAADxBn29FFTzff+Dbsn0Iut+RPjHv9yMXPXi4QWBtUuz84rCf2ne6MMkRmFjCTOGYGkE76dyp3SNBzeilrswAAAApAZ+OdEV/jZiDOWM1Uy9Vu0nWL0V/4HEe7fSSKWekx5hQYtiC/xgqvYEAAAAjAZ+QakV/isvo1lhN9Vf9YXwem/sU/GB4tK3zn/fFUPpEfgUAAABJQZuTSahBaJlMFPG//APDMcCEr16wAPFV1G7zGhko0fBO1LBEbSo51CNZnpYS3MHF50L4MAKNELOJvnOk8fJx9M+aEnAehC4L4QAAADcBn7JqRX+HDf42NiA1e6IoNY9dd0+pse3XDs23QkJmYymj6mByBQBlbsNBH/n+C/Zf0yypZhu9AAAANUGbtUnhClJlMFLG//vgOOBCV69dm+FVLfTWcUGQr+lSqCflwO8IOO1iVXQvrpYmUcqC/Y4LAAAALQGf1GpFf48rV2va6Z6CX7HZbnuYgcin2b1jZsuvhj8zrWfr2ZgMZiNJsrNVEAAAARJBm9dJ4Q6JlMFExv/BJu5KzDErXKYS1RifWXWjNyUkZ7LuAhIUnAIaOsVEMMQXit7waW63wzdcUIeRyXc0ns6O4xhmDwZ3ukGLdi9EU7w+8z26SXTja3sGWB+uU2PQaFr9su4rpTbgme55GdSV5Hium1b/ZY7wn0ziN7L7ptbE80Ax28UKDugysLJcSwfJeTU+FsCjXuJC35Kp+sq+nt2e0C5pikCgY4m4RGYJRGW+kGDU+yhW9YEtPxxAt90wlj4Nc2+02VtndN2Z1aSOvivKWQperyfJK+RiYH2Ua4M1Mbvt+F/gIq/S9qIVS9LvpsrQ+7+gxnXyJvs7t2yv/J7ljPiE5MYPyl8h+jNIjrv8R+MhAAAAUwGf9mpFf9Ei2Luv/RNJcaZxTnDa10UNbuQPAlqODVB2VxsIpmJnfjPkMJPHAq7sCkJAYLQ9TOcWdYY/w4T9TToTqOJl5XsxpaKH0nZtSdWfNN6EAAAAUEGb+0nhDyZTAjf/+8NYd2niwO+rMxV+zWAA+1JtHTaumj+P2U+PhhfgTzByUQbNihyr6FdaXSQST2LLyoRUr2SQPrdTZkfv31QscDSS0oRnAAAAM0GeGUURPN+E8DKIQ4/14uOVqEAMADp0zauP0LtNNslqo9KyDu+ewIdMlXYKjxCCIZ2RgQAAACYBnjh0RX+NmIM6D/Z46G0zEmw183/eHh4odUNrt8tMZebEjulWUwAAABwBnjpqRX+Ky+k78Xe04HkuIKPhhbbazcKxG4nlAAAAi0GaPUmoQWiZTBTxv8epXycoOFMLM/d1FqVzTzgCDwFATJBP1U6ge28/eu//0BsTgIFsoE4M0gMczwnHzJtkURt64dKffbzBHukviZ6wJ1DD10/gHeIjw54CCzmpIwySvy0eKrn1sxtgjxrAjk40cMg2w3WH2SwB2gwRBdRsg0MrzvNgn9GcNQO1ZTQAAAAnAZ5cakV/0J4+WxsQPcekdWueGIREStNdZlzFmXh0KEniThU6N9wlAAAASUGaQUnhClJlMCN/++FR2ESuhE+RatyVDTTzS0MEydH3+Vpq2cFMFPi5QKgi9czxuhaGemqsD7/JcIUhVdt886og+ljZbrvCIWEAAAAiQZ5/RTRM34OpqLzLMBpZQ293KhCxlhbjxhPOxHd4q2TqwAAAACsBnp50RX+NmIKsD88pu+EZBTc434H+uuuvMbHJAB0tuUvJ1rP2bn0fEH47AAAAFAGegGpFf41+CvWgaWHE5ho1aZOBAAAAOkGahEmoQWiZTAjf++A44DvMfnQGvr5aabtMPFRgZrO0PAPTbXug3OYp1ediQc1+3GfuUR2A+YYULrgAAAAtQZ6iRREs331NqcrGn20AwIJmTxeb5XHCMknUBifcKi/SGXaUJ0L8hQfqmM7vAAAAHAGew2pFf4cEWNYpbR5EYMikZF8nmrBIkdnpZdsAAABOQZrGSahBbJlMFExv/AOiKyAj+gnj0a3Gp6zI2CjNAAJIWgyCThvil0kvhxqkZ6Ht1BxxqXfrjYLInpyi9Q1SHRusH5zdhyC+Nu7rF+RwAAAALQGe5WpFf42NDdDDlJ55Cx+Tl+h4NevQbGGML2wsKXdErYWj9A80AousAs0tNwAAAFhBmuhJ4QpSZTBSxv/7xrXt8jlCNDUuxflUiLTs/g36A9cpqCK92W6XZsUT9pVvqv4iI3XXl/siKAd7P64Kbe2lHxIRZwTXchYXnFFv6PVahPdtBRwdIEdgAAAAJwGfB2pFf4x17dokp5DlQDbrpLgfa2hCiFMmKnp8wA05fGs6beVH/wAAADVBmwlJ4Q6JlMCN//wDwzHAd5j8QdbEjP/MD54/nxaJsM3etRkV7PtoitdWOIiK4oua7/OJmQAAADRBmypJ4Q8mUwI3//vgOOBDtkfUu7/INxPIL+S5L2s8+0BGgDXxrzOfaBdvcLkOu9kv7y8YAAAAhUGbTUnhDyZTAjf//CIBv4QEgt0jB6sYApnNKebF3UduVNlxp8UDV72R8VAklM98n5lOWEw+T8M4WRe//ioMQECXsBcH5k+vJghjfT5jqlmoeL/l20XeVcwcUIuafvxnJkzhBlcn/d9IuK1j3bCY8QS0oTVT1N2fu8R5Tv1RgAbc5tBoWuAAAAAhQZ9rRRE834H4ctY9gURT1dXKSAIxLkivCpURU347a3c3AAAALwGfjGpFf42YllmapQL/etyKspbtOBGV8f4EVMMX0hi4K6l6e+YACOn1K36gQd74AAAAMkGbjkmoQWiZTAjf++A44EPraJILnXuLxMWdODvz9zlGHYLFuS/WCzov37vtypupywZRAAAASkGbsEnhClJlMFESxv/77lcOA9jsDFR8Xdn5jdZFKfM583oBslJ49SZxnRGp+moI9oSjotvQRWZD8gOf558xXUMpWRXJ/cWn7ZTAAAAALwGfz2pFf42YnyFLdokqnTK0mFdGbVl5+BNzXz+jPJsmLNM19mnectkM0k+LwyFJAAAAPkGb00nhDomUwI3//ALQu8EXYDhpIvttv8TbZDOP8OLBMAO70WexTFz5+cnteMdktY7P65A4tZlIDdklK0GBAAAAH0Gf8UUVPN99TaObewetr3iFfIM29vQ4/CELIo5doCEAAAAtAZ4SakV/hw3+jEmnmFEtcwfltCMFzrX9aTK9FUCy1cKpO11Befk4O1gAKZFBAAAAe0GaFUmoQWiZTBTxv/vBGT5YQIcUNUZpARyxqc+rS5mmS+x2rMYuZzxaaNzjVUhTcMNlid1G47Ff7Gber1siDAOxLdQ+mOx+lSM//HIlVdSI36FhpMg3eSscveUe/EzN0NXsxq+B/YFxJBXKltyUDxpD4j5KmVec/I5HTgAAABoBnjRqRX+O5cRFOX/rc19dAGfoPBoZZ9JRwAAAATFBmjlJ4QpSZTAjf9RUJUwmPx6d1qkD5IiUog3fdDYG+lhD6PhB6357xt0eAhnE79OehpzS9hNg+0rld2gpMfPN78NladMHIMJnu+IgwcdY5qjgIx0rHEFP382Z+0LcY8oZUMOK+1SYe1NyN3/A/j9w4eoQ3PNSo4Ux/LOL3AlYQIiar3GTf0B95Y8t+So3rBxsftentdD21ZJ7hAbvgAAT3Nv7jjPdzJ/O1zUUPXbzz+p7nvdFPTkoO1Dqocj0bjwXKlp/xDJw9QeJ9FCKJkVoMOYhplLX41md7oQ/Y+qGl5+5p8L8BxRb1OTUS0Pldi3oqvL9d/Zrz+oryE/RRst8Em/0e7bxnWHG6Mx81D5GpO0jZK4XqbwX+2UQU4boZy2cX+Sjuvn0MsMkR236Z2K1zQAAAEVBnldFNEzf1+oL/lUB43iexxWj6oMO+zCK0ceAc+a/F/PKiFsvR2BDrZ2PrBeRSYr2NA9GNN54oqHc2LpSV/DC5yGHm8AAAAAnAZ52dEV/jZiDOV4WjRVuqfuRLr/L1bWuM4bOVk+yHw8xIsSbICVWAAAAKQGeeGpFf95p6Ti9RhN9YQqEMUYPvg2cqLZ+fsaBkkhn7KTT2jvNMkWBAAAAgEGae0moQWiZTBTxv/KSaxhdVGtNZuOiRf+JqEcNFkbAv/3toQnjkNAaWg4K8+iRR1zNbt1yIR3tJg8Hh+JLp9Fo/JoyYU3FI5nF35wWUzt3yx1MISi809oOoCzZrj//fWbdhPS2477QVN0UR6uqQ2+uEBXxub0LmMRP6aOCVtOBAAAAJwGemmpFf6zfg2l3Ygdwg0DWPIpQmdHx99XujRSHBNtWXm69NnlO/wAAAEJBmp1J4QpSZTBSxv/74DjghR/zgYSVgQLW28saHEGZfNPuk6gQX9d7Fl5UDqoN11nbQFj7zXG9wSS5vhSv3gAKUUAAAAAwAZ68akV/jZiVZ8+10ObdgxOx3183mwEhK7HkZqcvSQ1ycqtgp8jF+U3zQBWvpsMwAAAAU0Gav0nhDomUwUTG//vB61Ydg9A/wlrkMlWCPDd7QTOqj1/1NiCSa8ruC8UBK8w3Ph2mInOfYuW+cn88B5LL/toEaKTUnUAV8fIMLomlaMvI1rt7AAAAFgGe3mpFf46SPAi9fOZie0tFVOsqcMAAAACWQZrCSeEPJlMCN//HqYcZuGVy+Xdov07HU6KX5bS+1WmnLQvbAvY9UlOzUvt+HiM8nLp39Z11G9J72MQOlYTjkOf+SeXNLe5vidAHlSjT/Ore0st82H8qHv9wvch4z033oRI1qboMoq9jVzWQ3z/b49jZCWWEEv8uwxf9swldbmArV1EwuuF91pmeCP9WmkCZPwED0CvAAAAALUGe4EURPN/QWwldz0TioUzxJwTL1DjVPQjAq9Y54uZHmIdkd8NkQOEM7KiMsQAAAB4BnwFqRX+HBFltjCbzyK+FRKnf/mLOjOcjzR76r8AAAABFQZsGSahBaJlMCN/9YpHGMYUb5j/NfcgCQnnnUME/+uYEFab7R5PJRwSPzdjTXR8fgJDcETNoWNTQyIj4LAz2Alc7KxLVAAAALkGfJEURLN+CHmGW8iX1nC8et2f/B+8KsVycqqolFcdZbi0GJHLqpjMjlxYW8t0AAAAsAZ9DdEV/jZiI7Ev5h6E6AXxVvyU+Xec7Xzf+Ujz4nooqJFL612j/wKfuy7gAAAAiAZ9FakV/jZXGc7dkFSNTQS86kNC5UhVk5V114XqW+n6aNwAAAC1Bm0dJqEFsmUwI3/vuVw4DvMd9xLBIuaZ/zxsUAT+oe+xkeDynj6QmOuSirEAAAABtQZtpSeEKUmUwUVLG//vhUdgbBTvAngor8pfCekIPa/E8Y+CM1csCM1vgERqhAjVxRBDaVo8NX0IcDkRQM7Fc+VrjXHrEEHSAqDVNvfJe+IHgsFDNZNns8smESREry41It9enn5zKZn+/bjH+QQAAABgBn4hqRX+Nfgr1oNIjL185KPI+d0MY3WEAAABQQZuLSeEOiZTBRMb/++A44DvMfbdU1ETVNwuYBfSvlXeOFbA6GbqsdQXHZrecjnlZBPP40eDLe0BA3X8hhdWV4c0xWlDkOf5flpvIBUQRHYAAAAAoAZ+qakV/jnhriwfjEBC0Cyoph0KvuuH/nOiAglQQP/F8Pzo5XgfuSAAAAEBBm6xJ4Q8mUwI3//wp75kBPbfTrjPAkRSSgH3g76N+IPxHc4xKee632cn0Is7nuf/sPmuSHgjxKqRUscLPVxNhAAAAWkGbzUnhDyZTAjf/+8Gbu08WB32S1i5TV1gbt9xeW+dzaP8gfVQwdBZ2FJC6dNZtf8f0Hg+4rzvoWkTdTJuDVTqcnkDXxrzOGZQBT4SetMErEEP9qYQ3b8ROgAAAAF9Bm/BJ4Q8mUwI3//vD7kivHAemh1CS8p9HXZNoOrlrUrukxAJgTBabe3Uq+xKUtXAoE33EFB1Jjy8zf4fiuWPtUn1WH8f/CY8QlZPFmMmcQSG4fjuXN/B+qNi/1vyFEQAAADVBng5FETzfhPBPL0/8xgE055y0cUfkH7gbD0hvkO9CUgUIYF9ILO4onW0KnZIlS4Q5+PTxIAAAACgBni9qRX+NlaNuljs9U7M/SW9U7+gZciTVHqQBZINn/LlwAI3T67KhAAAAQ0GaMUmoQWiZTAjf+8GQRIyA+EC0i53Uv0awLgBrmCYaTtddMfGzDz0tG737dau/+uhw5YMVYL7inRfSxfcFiKBNgykAAABfQZpTSeEKUmUwURLG//vhUdg1hne5jlt8Hnsw/yzG9BlYA+2BCvdNQCaqq/vU4NJus/26Yiqay8cHVINjOHjrvNVNNQMyKgANVegEHkn0Atv5BheYp5J6LroUHacyJH0AAAAaAZ5yakV/isvpAo+X7EhbLzLMVxMoH0gf/oEAAABsQZp3SeEOiZTAjf/7wZu7TxYO5AYsHfwK5RdvkSVEEn21E6ZLxQXmV2KUm/dt5YseWbLYmF6cycseQ/zRHyWaDXSPfsWYc9/L5FXY8bkH+03r0tQyCwxscnMoNSAc8JSQ1LuDhPTTMWEyb+wwAAAAQEGelUUVPN+B+HGW0ktTlKD5U7sftibyzFU0itTwzBOsr8jinvAm3X+xweVleORM4VRCN4J1niSdrhdyNeGimYAAAAAoAZ60dEV/hn60g6kYGvRqVwcf/9DU0cnE/S71LZRHgHSKCWLRPYzu+wAAAB8BnrZqRX+Ky+kGFLaTu3V1h5vNMrHepiZHt++d3TYwAAAAUkGauUmoQWiZTBTxv/wIE8MgSI/5zkdt2xF39AwC2NCzH/ix26Qf9Z2is2kaMgsh4st50ymHALuUDhcWz8HLl5Mv7cFHUPup58bPZYY2bD/A0L4AAAAqAZ7YakV/jZh1ijZ8jlpBJYejDv1xMTNJL8wwhvpmAwdAZHfqRUgGccktAAAAwEGa3UnhClJlMCN/wWJmEnMThj01oz/8HmHqSy1C8Xnv2nkeHB2q5boSEX7n9tutZzT+CbMC2woNyw43AypzG01uaeEWSh0sldqT/FFT7HItLnN5/sxDby4ws3/+X5ukIG6x07NMTkKBgu9pujusd2uIIBTzr4TtqDAIoj5L/81usPjC+yPrZ6BqtdjHQj00WB66maxuzdpr2LURBzdggU7/6dRUoibhHrHu27B3o2Nx4NpnulOOXMgFyxfruOZowQAAACxBnvtFNEzf1+r+1FYhZh13lr8fS3v18bH9CE4mbV7h417LGGti+gpJ8stneQAAADEBnxp0RX+NmIKr+C9nWHyDrxwMHXm6plPWpdYz9mPCwis05VbBSSZGa7pefK68Un2SAAAAYAGfHGpFf9E0GDu+ktvFMGyuyP+/q8a08x7N3S8hze5kbadJPQ4J/VcgfzmVJ+wGbfISiHM3iOG4kNfnUv4V+1AHLRdp1XpMOeQfEcujnC8EllvBq6VYdpBNSXI+IGkPIgAAAE1BmwBJqEFomUwI3/kqHHwSjggzj87YkHr4a87jSRladhRJ0sPf1vzUl8LaQuieyIjiu6qDqBhKWXMid6Xg8mRn7LGXUL7awd9GqdtR0QAAADBBnz5FESzfg/tuGYHKjTB5ZIF5DlRZ/mG18mLNwoqUKDqUjlTWk6UxETTkhiQI1o0AAAAmAZ9fakV/hwRZbYwm8UV+fNHe+WvQ7LE1dhsl0Kkl2dgkJM2xQPQAAACVQZtESahBbJlMCN/HqjpQil7yyLFLFu10qfJCouDtF1Xmx2Qi5MZ8wiTfvWELZh8dDesXTIOc3puWHO2V2s4k4HdAKOR4ngqVZUA0LYM5DJacVR8U9Naj7I7kVfa6bltDE6u1WaZn72qoZRJsNFn5BmTYbmEO/mI8pJzVOUzeRtqmdX+yIakKz1bUKdoGm4/J27cXeasAAAAxQZ9iRRUs36xetRXUJJO0H33aYF1R/wgPQCJEkQv6Oc57u5yk1XK8xotcqZMjal7waAAAADIBn4F0RX+P9L+RFJf/ZGxnwlZxxRpnlAdZ4MTNBABowPPX1SEIZbQRP1HKfV32CEsflwAAACUBn4NqRX/Z96jvz6lPIdxMK5ZxEfdrk4JnwEbzrXk8OtLZKf2BAAAAN0GbhkmoQWyZTBRMb/vuVw4DvMhdaXt49Ohno7dC9VWUVXFssBv/tAEi1GxHEtwjRSRy8OIHuSYAAAAXAZ+lakV/hwRY0JTl1qnyRaLRk+dkPKEAAABqQZunSeEKUmUwI3/9Y3sZBuQxneh/rGW4lE9nuaCObjg+K6/Ef2UAcSMCaL0//e80Z5sMPf/FLcue0CI5qB5M2c2QWFN1gHozimkN8YvezsbDlRHLpDuEB8yJGwaUJGSNmiArYmFEwpDpPAAAAD1Bm8pJ4Q6JlMCN//1ikeX7HlErA1F/NBuaVPRthECIky6bnR/zbhvjqszzH8XsGKtqLqtEjwRPCnmhPsXXAAAAMEGf6EURPN+D+24ZgcqNMHlkgXkOV4kgZTIIkxduFFShDLukcqa0nSuPAX02qo7ZcQAAAB0BnglqRX+HBFltjCbxRX580d76sSrub4Ej3N0tuwAAAEtBmgxJqEFomUwU8b/8CAhiLA9lUJ1ZhKmJlcycBCqNW82vUrKjwfRLn3brEJAyFEiDnxP6vgG4feHLTjXsLOkn44268wtNImWMf5AAAAAtAZ4rakV/jYz8hJwlu0SinQC47vIeaSqHA97ANM9n5sjiQxvenF11Aous9wBFAAAAUkGaLknhClJlMFLG//vDoZtU5Qj04CcZokNlVJpbn+xpxn5vnomGzZJoOL36cb95LTn2GQWez/KCm44eCBSdizIJg/0bpdMf7OAdSKNj8pTKjsAAAAAoAZ5NakV/jHXt2iSnj8+pK5UPI/1KAk39osMlGVfJu+lmZ0egJ6vDwQAAADRBmk9J4Q6JlMCN//wDwzHAd5j8NJF8og0066g+rXTd5exDKM2MCnecCF4E6scY10T/iZu4AAAASEGacEnhDyZTAjf//WN7F6EssGLL9aRq9GMDYogP8Xzv0eGCplNOy5xCXRSUZ3whgiU8wFDYxIZ/nwDS8NdljQdsca56LkdmQQAAAHxBmpNJ4Q8mUwI3//vrhHYHuWOrKSbgXljf8SHjA7pt3GOVNXvZVrQnA9aKMhM1s43sy2s+zopKgZQuR3OXh19uu03zK43qCzT1h20dR/R17wlmMK6XGC4UZ+neex19jtPD/686wWjM+ofcRmWrGJuZFnTOmBpsxXPMq7oNAAAAHUGesUURPN+B+FaPuhgDkzcf9gPd1YURPpDElZWBAAAALQGe0mpFf42Yll2maqZeq3XThpLr/MMrxEP4wtMxgfA47nZIAB0Jg2xgA4T3wQAAADdBmtRJqEFomUwI3/vgOOCJW+nXGeBH0bjrWPxLGdQkrwjNTNgde2mMiCqnPmK8M/7dGPanLBlAAAAASEGa9knhClJlMFESxv/8A6IrIEOZqgng5VRuFA82zmJM8KL0rKYQRJ7Z23FpDej7QMQVdrodIfe0IE+0l4nF1TNAXWq6QYt1HQAAAC0BnxVqRX+NmJ8hS3aWVfP2c1jQ9YmStz/ocuUcfXzyBUtM57gT1Y3KU8wl/ZkAAABKQZsaSeEOiZTAjf/76z44DvMd9wsd+BXDr5TM0gGx6Y5mxrdaiR+xZhz38vjjy0wqFQLRtVm6z4tKSoPcbC90J+/nHLhkajPTm4AAAAA5QZ84RRU834HLEsIWvPinvfHxN11tVvEp3U0V5MHr+u9+xSzIWHqzyYWqp/rDE7G1eS8KR5LYfoqgAAAAJAGfV3RFf4kZCgkjcPXSmeg5eQB5cdFgpKJnWyRjJ0LC8/6RsQAAABgBn1lqRX+HBFjQlOXXoyp+tUCmnUXoUiEAAABUQZtbSahBaJlMCN/74VHYQpILVM82IUwsR3tP1BZNdPf7nkM/rWUZ7Vjytb4wtxQrFU0OUMs2AySifB/5qpsYFfAbi+7wg9W7xVeK6NYvm492GehPAAAASkGbfknhClJlMCN/++A44DvMfbctm/iNKou1bR3qaQPdbYPAwRr192Z0c7+EQI4zvYW462uiNmrbmQcLA9GkcsKw1co4BTWHqy2AAAAAMEGfnEU0TN+D+24e0TioUzxNily9Go1T0H+2EjDCXB2dGC0c6xM99Lg6Jduz8xAlJwAAACEBn71qRX+HBFltjCbzxryJ9mcJ8tdrhFmiT0jMMVk0HmEAAAD/QZuiSahBaJlMCN/BI78lscgq/Rbp6OxwvfwmtoGweCC3o0q532h4ICIwjwcptqf9EK/+79F9P8RvOlrjacSPiqIg7tu0F2k6OuQHzHZomSpzp/xnEFVc3YpD8D7mVeTAU9Xkf0URAjJ5lfHVioB1x0ui2g+17Y3BGxACMJ3vWNmNMsSB6yWO0rku1Z5c+jFLkY2xMy95ck9Xi4MpgR/rgU5kie+i3djqs+2ji7H6EQ+ogtPCUzbkhOsAieuOLwevqHshJvQds8aT+lB97TlMWAfZprX9Mrv3feABeyzfKAEPhJ3BSA1JjxJgX63hdba2srRrtpcoYk81dOBi6AEYAAAALUGfwEURLN/QU/leQXMu9VvVKf5/iBcuPswhbCCq/njMDoMNrlzDXkngFpZCXAAAAHEBn/90RX/WyxZvpLbxTBsrsj/wSTqMIPKPLDvNeh/1F4bmWrWmiWrQyZcVh27RFCrcMqxQPKxSxC/gK2atA4KwaYxGxim8cYsJdnYB2lEgpZEZ9RrWqfitTU8KyDfSoH8YyYLFXyK9azzCP++iehmJGQAAACUBn+FqRX+Ky9y9AWM9M+//Zr43fS7wb55u5yXHFaE5OBuT0DsMAAAAREGb40moQWyZTAjf87uziMWmfzdbE8J87clOeHoYJgH3Cx3Zbf5fu+tjc/rFA2s6Zalj2+4xEwd5fOTh9i8yAEFuj/ORAAAARUGaBUnhClJlMFFSxv/74VHYOCF6yTT6YYA389+T2IXKVeD0g4f5YiUfdwqkE9LXLSW0Wg9bBFbTjG9iud0FpzmEuq0RQQAAABcBniRqRX+OkjwIpFLTj3OvmKtI9JIoYAAAAJJBmilJ4Q6JlMCN/8epy8Hmt5nkvHwUaGRnirECn1F6kQRmAK+NpNIq1+wKlemlXjXEhlgMZ4GsP1pm9vvJhG7Qc9ADDBVLkvTUXiuLhFe47rYgESCog4vHJ3mr48vRHWscHlSyjaWuoADQyZ2fVR3rFtDDYeIJSS40gQKiB4RIui2G6WIFhug4v4SxqnhvpgyxgQAAADBBnkdFFTzf1+NzzhJSSLZXInxj28fVa9sZ3iOeP2ue1tve6YTVpohEK9SBnnnNjSIAAAAmAZ5mdEV/hn7CXSp3/tuQ/gVx//v4sxBQVVQo1NUSFtg3nNLgRrMAAAAgAZ5oakV/0LYnEaxS2mK1fAK4ZH+xIFCrT1VZklQbIOkAAAA2QZprSahBaJlMFPG//APDMcCEr16uk80rHrFFfOOZRse1TbxZVz1dCEySWEu+EN2piNYDrHViAAAAJwGeimpFf4cEPjJJVPrWo1K1z1x0dfojX58CSEdNp6MuCQeaYl5k4AAAACxBmo1J4QpSZTBSxv/74DjgQlevXVYpZHYgY5ILdPK/4Pa1MNf4OiS8dl1R5QAAAC4BnqxqRX+NmJVnz7XQ5t2DE0nG/9r+8NOf3WQ8rd79EUL5OVWw3o6JYHnJty2AAAAAa0Gar0nhDomUwUTG//vBQYpYQEgvj00Y4sfEOWfwyh/OqKfKANvwZZWb7+Zzvu2GXqL7tu9BEtBF1qEI4yW/avkwQegNVC2ROsxmqz3xMUT7hq6sPN5B7n/7jXSLByhN4LaYlMrIVP2Gme+pAAAAFwGezmpFf46SPAjbqplSkyFjtDCI/KdAAAAAUEGa0UnhDyZTBTxv++A44D22Hq7hOPzCa+Q/0uZ8jWnVjeMaXFL05CiAEmr94qQQKh64dWL5oOBuv5DC0B686Pn9ihyHP8aNBy14cSqPoOJZAAAAKAGe8GpFf4cEPpZCBxpCqtM230lpYDo4d21o37/cFMIRIifuqQPwQmMAAAA4QZrySeEPJlMCN//74DjgQ+vwaqsgahBQipjp6SW8umNWKvqDeH4JFmwRnU6qYAsliRyGwuOB45UAAABOQZsTSeEPJlMCN//7w+5hE7FgdvuOKx17H1EIu8P6dhdyNR3Bp44IixcplMPwMmRmTbkZKRLRSf3U7jRY95x56Yr6mgyfOofav4khhB6BAAAAV0GbNknhDyZTAjf/+8UQAxkZAR4MbGRlJS3hcrKcA7AnvEOlxAMEgQ++P4BRPl+kFk9FtV5R/aKjnv2HOToKh1xJET0cChc94nwhXYYLk8OsF9FeDw4hcgAAADhBn1RFETzfghKjLXhVTVpt3odhU1/9thHbD1IrQDd5C/7EbNj0Cg37PlRRDbIVjd2YJ6YmirkTIAAAAC0Bn3VqRX+KzvglsL6ygpfSqzjv723KBBWB5nY6qSoX/1oYogCyBnZuxHtpF3EAAABDQZt3SahBaJlMCN/KCEDzIZX/gifClN7W6rOMfzBCSLAvaxS3ajNlnvJx6yl9zKnNgrw9/FvayjPOvsK8P/j0jTsPuAAAAClBm5hJ4QpSZTAjf/vgHIEFoP7SF8/0hA40oETmSgI6eYrw0ApjXdIPoAAAAGJBm7lJ4Q6JlMCN//vBGT19kCHNXXJVpZIVW2dOsxg+4nBxXYKH8lgiSB4xOvJRCAiVFRsPHQ0NMqcJypOPU3YoZF/tZgsowrc3AiXB3rAlfqaAL7OjHoF9SqtbiXfhpU4xMQAAAGJBm91J4Q8mUwI3//121hr05LoQDvslraTvUwVhvuZbxxeVmAmt8i59vy9EIHQrpGFHmbLnzjIT69y4Yxu/aorB5LH0qX9SvpgCyK/xkpHH537XWLW78sVsfAh6XcctCpaW8QAAADxBn/tFETzfgfhy3ZPoRbD4wmzsftxxUTshAaOeG2I0yWL50v57GYufV/HImcKo1kE6CdyqZME4g3D6gpEAAAAmAZ4adEV/jZiDOg/2dUiFLHxw+J/iID4obTfYetWmDphQQWMG2L8AAAAgAZ4cakV/isvpPCgm6QyWAb6SK+HCMEJXEyPb987umxgAAABPQZofSahBaJlMFPG/yghDLjqnQ+U5Iog63cQgD0Zbv41jiJ1Bb0LhUXR3RIPLhvolGP5fCuJk+gAJDaNEK0tSdXgn/kDyA9nx+m/zdv65gQAAACgBnj5qRX+NmHWKX61xnqOPLP7m1eVao9/Jf8Igt0BHxeMZyj2uGarsAAAAQ0GaIUnhClJlMFLG//vgOOBCV69dVilkdh/3zGtCpgFkyYUdvr7GH5COYLGX7YS8knmvd86QQESRdt876rG5nJMdX+4AAAAwAZ5AakV/jY0SA6wrwXjljlqnIHhE5NeeEAmpe3+gSytfIFQvKbbmEuo179PCD/OBAAABDUGaQ0nhDomUwUTG/85HJkVD4qSGOwcA3K5VwP1c6xtTR4VzmQ0nBu1gOtLRFBdIfSP3an9P1gmsInCPe79j4zuUsL7DK//x4TUhKV8qs9aaEd6tCVykWUn5Gopj46/K++Ykslr+v5UodumlbuShmmcRvZfdNrYnmbVICpyTGYIOI3oAkq6vllklL4brVgHvyURSStEFd3HHzSm7Ib3PtR4E5I89jUSkUoRqP6CDkJUiXvc83P9RW253rviuhvzqjem5Q+hPFgGTABoRk2ZLNhrce5zzCxaYC6gToCoWrl4lzpLNEOsFnUNIz4XxjMda/67iz7TaE3NHffWa7Y/p9uDhqQF1fsC6zz4vKXmUAAAAWQGeYmpFf9Ei2Luv/RNJcaZzW23Slv1beS/YB+iADhqJnO8XGwim1UkZ/5DCTycTZV+kD7aTYfAV6mID48v5zZgCA1mNEtUUWkqJoLo+Ei7fqexybID5ptuhAAAAZUGaZknhDyZTAjf/++A44Hc4/O2JB6+GsntZ/QVS7FVbo5y6gb6CO1Sl0lysiHpH6tBqj4+RV0j1AkhNZawWUW7mw1Ydkn1vMjA/KVOpoHVQwnkQTNeO2ZHz8y7tU10XuEBdXu1VAAAALUGehEURPN99TaxHbgQG60QG5CYHG0EwwVTLS+WEIzdD/zb2JngNyA/ValMZgAAAACIBnqVqRX+HBFltjCbzxqFwcAaE+pOC4nnIXAWqYK0KnVEZAAAAWkGaqkmoQWiZTAjf+8SBZr6iDxTgWdHsgrtUrFBwt9rOo3XBJP8ewDzX8GtwlWBgrC6irWNVrTLY+WKlSir9pSwV4EguL0xV3Wcj0/zw/APuEjV4PAH1Af4Z4AAAAC5BnshFESzfgh5hlu/y+9zSPV3/9spsZbsjm+DaIPLk3e+H8QEI6F/J3CJy6zchAAAALAGe53RFf42NDc3tk/BfLpHtYrknWRGdfCfWGF2hHwQDNNRoM7TAEbExLqzBAAAAIQGe6WpFf4x17dokp4/JyHzCbIpQYkeLYHCEqpiCFaz+wAAAADJBmutJqEFsmUwI3/vD+CADeEGcfiMMXyiDTP3ydcyGMsECu6OwPlhc6nEUtvJobaLBYAAAAKdBmw1J4QpSZTBRUsb/x6w8INv7ObYOML18RRf9FGE1XcGr6KXtfZJPIpBQ3Zddf/xcPS72YLTyF1aF1OgpE73KGCe4EKRaaP6+28u2fFYqgdEy089G2c5o40yj0KCcaF2ckf3qEd7wZbN7tzArbJ3ihmBK1aiIZDAZIELsAMy+cnsNyfdeXk6vZoOzvGLtwNlOVFAWfFH7l4z8o6rut3fiMJEBlecPgQAAABgBnyxqRX/QxMwwr1BpDDFywfg+dwOr6eIAAABJQZsxSeEOiZTAjf/74DjgIHS2PuRVzH01Z0BzuHiEMxkS+es2RQpgPq4cAkl78+syHU05veIX5apNghHwp2dHWeQBuTkHc9KPKQAAADRBn09FFTzfgfhxltJIut7cibOx//YvRw99xcIjnvocl0u27U4ZQtRxhVb+2ZI+NBhFvmHQAAAAJAGfbnRFf4Z+u/AqLSLb8iSXq//onmci1775jnjU+EnU1sAD/wAAAB0Bn3BqRX+Ky+lRxhN9YRwoFckJ8K7wgeXkfQ8c+QAAAE9Bm3JJqEFomUwI3/wDoisgI4JCqEuNC9+tPU5T6akQg8IizB7OrAsSlrnkH+zL9hIrZLMG+0x3pM9DfD7xIYuRzu7XLhI5qDban3u9P/d5AAAAVUGblEnhClJlMFESxv/7w+5IrxwG7Vm0o9WyHVPRTEK7sPH3Tlc6BafENz9DO3jdvcZDYYjxJ/xezZD/wVOgNlWr1ovR60fW2n2QPDIrXRl7wbJGuBkAAAAnAZ+zakV/jZWjbgdMtmBO6kL9v/+iv11IkCbeIE0lsf2Q4YI/jwZ1AAAAREGbtUnhDomUwI3/+8OipWXVEHfdmUcs3voSdMpPrWh0B7x/buw/7bfWTU2SnRyzDOlwhY0AHOKdTjMVTnbawMzXUYswAAAALkGb1knhDyZTAjf/++AcgTTZH1Q4wq20ZHYluqF2d5yizb76mgyembcA7BYepeMAAABaQZv3SeEPJlMCN//74VHYNYZ3xEb5VWTtwzMtJdYmcfzq/85J9tfpgIKEqieYPeY/MEIfk5XhJ1koykG1DVrGfyuRE/hx6dLKPysfIvtARn9KJW3Q6CD7moIQAAAAY0GaGUnhDyZTBRE8b/vgOOB3OPztiPZIA+2600lWyYtu5etLlp5Z1h7gvaM9dZAwSh88F2MaIP+Gb/x+JzyfKCfhrWJAkMuh2vjzRb7d/N7ALJMAooNT/8nzsWXFsQPjIw4BwAAAACsBnjhqRX+OeGijP1BCTddBMbj/ecV0Or4a5xYPj/vYTAUTsVBHuE5kBqvhAAAAOkGaOknhDyZTAjf/++Acgl11jEgudcSnSrhTGl+N28Vhdn1HYhWUAuZUXRLf5VeB/cE3pIjWL90Bv0kAAABeQZpcSeEPJlMFETxv+8PaaJ1sN1uxRNIGqNtU2k3406usYCk1bEtXHnFK6NYx2++uiXE5dDpIBIRXDOXY8QkTWaxAEokog4eXK891HbBLLwiFuVsjJhcQDmLu/1BPQQAAAC8BnntqRX+NjQ3Qw5SeeQtnahQCGRD+tBtlepn/thplttEshgUAcjhCMocOSDNHoAAAAE1Bmn9J4Q8mUwI3//vDpMA3iJWBxGCoVqH005LOHvO3JnF0uPAF/1BdrAmcA6ADeTiMuCgJLUTG5+vRnVljzqS04Al1sJB4spczTDLw8AAAAB9Bnp1FETzffU2jm3iDcWTT8O+Q1NZfwE4BDhgPUoUdAAAAMAGevmpFf42YlWfPtdDm3YMTsd9f+YkkWkcv8aPIfR7QyuqOLTk5HzNqkb26HGVg0AAAAGdBmqFJqEFomUwU8b/74VHYnLGfY5FAo39Th64FzFtgZ97sVu925ubsyqHBRHBm16zii8aJWj58APV75EudDjr7xLleCLLq7/RtBuOXcv77e28R66Sg0C5+ohD0irjcvKrApu8KWKOAAAAAGAGewGpFf46SPAinGc0bJk9Hz2z1Me3AgQAAAQNBmsVJ4QpSZTAjf8fMy8FXp/MBuvzFgdo0Sga+yREhRElJ1uA9sIsY4ra6c6fc00xs5DNAd8Y8yfothafu7uajsv9X2+oe8VfvZheOdZXtqkEIPdjDT49hK59jghSaTBGRE3a2q4B8vzpyVLIVGWiucmJcTr7iO+Rs1GKykq2Hqpg/7wGE9uYxrJNr28ZDTVBHR/4CyCAPQk6jWNczPh78rCn0z6G9a1RjzePKZYANdrkvuc+8zCzahQHbPyJ3wSnp4GBi0FZoLY5fHqWlr8b307+UI2CHj0yzFgHzBe7+BMVC9y83a86LgcLsFvxDSFgPEYch+RqKUMFJwfV/KNr0okJZAAAAQUGe40U0TN/X6axmThJSSLrDjCbOx+2e33lIIgsTG2JtszOBl/PYEZ41qb1VwqSpMkCnW2Br5kIl2A3Uu0IPihiHAAAAJQGfAnRFf42Ygzli/toiEQB9cbIH+nvDxV+o1J32C6oS5Fi80REAAAAoAZ8EakV/3mnpOLtzNha7JYFMdaGbfLAP/sxzAhGKBTfBtRpxTI6dgAAAAIFBmwdJqEFomUwU8b/0EQS/wgjd+gtxcvUsjBZ/KvLmmxO81sipaPVWNzgoVqy31nJce7cJ78sV7DeSUW+b67IBOE8jk8HFII54ZV3rWpp/dqQpuWbfoc2fWtU2v+/yK+ia43/m1sEOBWkif02dhVPEQ9NMhk2dwH5CcI/zvbk/eCkAAAAoAZ8makV/rN+DaXdiJ3Sfbla5z0prYIJJdn9L5fmjpAFnOrwj7T71gAAAAEdBmylJ4QpSZTBSxv/74DjhLmEQjvo7MOz/6KwI8CxsOXVk34E9cwzHoe+ypu4pyoHVVLROmPuXefirSAYX2k1/KeHKYNsg4QAAAC0Bn0hqRX+NmJVnz7XQ5t2DE0nG/A/2tAvwsk191IaIPxh05qExET5GLoDSl9kAAACTQZtLSeEOiZTBRMb//WN7GRlg7p5tdxIOcz4ktWOw25b+IeCEiFor+loK1sW6QJ67CqjJiEE34iTVIck02sn81tu+PGvjsyHqi2o+NT4inWWVtqWkpr7f/3c+vKVzlj1mjdhK8OP4T7BUoxX0iIL7Ms5LBKBxFY04P++ZvxW8cml6mweqdCiwy+yuvk365ebqBsgsAAAAGAGfampFf5XYaTriDUHbSM4/2rI012dTgAAAADxBm25J4Q8mUwI3//vgPUQ9+HnYzR0ReHsw8528FstVpSX9I30YzTO1HQX7vIeLO4umR1DvPGSkhtbazhcAAAAqQZ+MRRE8331S+se7kbW6AxPsrNhK/dtL9AZtyTBgH5jIFFvIWlbz+Y1vAAAAHQGfrWpFf4cEWNYpbR3VxAdD0L/IP/FPtuyuyqOpAAAAgUGbskmoQWiZTAjfx9CcA6khDjKpbsCmICblPVjONIzX1mXC3smr+Dq5fXiG9Xv8kFQyUglCZwie8jt9HTy9u/pPWV6Eg7yLHBH4vZV7UNGZW/41bil6995IKcn441hEAdqxiSHI4vuUl9rENOzJKWLV2U30qhtdcyYWhWRdSq79ngAAADFBn9BFESzf0IJ5kmO08++CFl4DaPf738KLIygT84teS4UEhuc0uV5bKc3qgHgZU8MlAAAALwGf73RFf7OB2xJd3tC/Pqn8/HmIdT3BpECF6yr/lbOPB+Zy1K11SUfd4VAGF2JZAAAAIQGf8WpFf4x17dokofjXiSEcvI/1LMfeM2Env+wlhWYh/wAAAEBBm/RJqEFsmUwUTG/9YpGqI6+gv8B9xK7sh6JffgqkidB+uyLMXamYT/v0gM2Bl0cBW+zOs7+meEYWSU7mTdahAAAAFwGeE2pFf4cEWNCU6Hlhkpwoe3NC1OR+AAAAfEGaFUnhClJlMCN/x85ZyjCkCh8uzK70/iv22/5Y3C+WTfpvUCrA+KzvU4pHDm4sP40BAbPg567zdr1Wu7EEBUuR9mVRdZyCPSUp6JfGVxbeoCt4oVKZm8KmBNWkYt6ZDzaPhh8MV+sn/5rn/+5/sTVujqfTRrIRFR+q1bAAAABYQZo3SeEOiZTBTRMb//vgOOB3OPztiPYLGqbk5d2BqIQU3KblqkAke1WFGNwX7uSXHgcpobdidZRPvUHXnKBlNvG1whZJdIUyHR2g5/kj6iGYMgjOKwfAwQAAACgBnlZqRX+NmJZdpmrtcZ0NTAN84Rf1N4fqx73Qed7w+gCCXwdxAjG4AAAANEGaWEnhDyZTAjf/++A44Ilb6dVP1x6RsYOzc76QVwcX2znv5faz7QEaAFkrcI7CaujLN3AAAABVQZp5SeEPJlMCN//7w+5d2nwAIi2iZMsWHBHowYX4aPQ815YaFI7qF/qVwMK0/Txt1GjsuoBrg0JYfv/uwERjL/ROpxdPOR5x3xoTlVPE5qTT470ToQAAAFlBmpxJ4Q8mUwI3//vG3LzDXKIqZ9TYmttr5CBr315a/0BUuW50ROufAOdVNE+12YSjQ+/9Zk6dVP/79hzk6CodcSRE5KWMo3xZZ6kBcB3u5L6hIx3KSQR9gQAAADlBnrpFETzfgcr6UK161qa1NGdzHnS/SFXblAfYkkTUgGALg0oSw2I9ZdDSKhlEZD82KSrfFpA+v6EAAAAnAZ7bakV/jZWjblZD8+kx6s9vusQNFqTkiIzV2G+Y2RxdmUUlRu/AAAAAVUGa3UmoQWiZTAjf+8OnUzwggj/qGfOg+4iB+MF4Y5ta94QwdwgcgpS5V9IyYNFhVGlpN2bjMQ4DNhuvd/qEmbi/FxB9D9m8UL/JpA3iUwbQMpYlEjoAAABmQZr/SeEKUmUwURLG//vB2iYdh5ImpKHSwtHernzZCXrV5CEdMKE6oB+B0fwylF2lU+/yPss1M1i+5F7zxa3LLlnpb90pQhlGEO/tirOcGbuioWk+zu7MXqDCxg3B7BB3iJfYGJshAAAAFwGfHmpFf46SPAi9dvB8N1WaHbwo5+8wAAAAaUGbA0nhDomUwI3/+8Gbu08WIicdkBLuDqjcxTRGWFEHK1YMItB/Yxti7mXMwKkgf/cswQBjw/L2b1agYYBo9t6xn6Eetw4bxPxz38vjh5Ug36pxN1P+Jdu6y5/v+SSnY/wBOAf2anagvwAAAD1BnyFFFTzfgcrcmZwgJIut+RPjHv9yMW/NWZe0k8Bi3Zgre6X89gTtwG/OmXBXHUG6b/SiwXTP+8AECry3AAAAKAGfQHRFf42Ygzli/0aMsY2QSz83/bIH9oh1NSd2YfdN+fUQOgXqQV4AAAAgAZ9CakV/isvpBhS2mIvgYfwGKq3zW41GlvjW/evQpscAAABJQZtFSahBaJlMFPG//APDMcJc0yddeUYH6nOgY2NbwtKavnsY+nBe+fxVlILL0NiRXrOhPUiA969gXTMpBWKdBefNzN3yeWc1gQAAADABn2RqRX+NmH8N/4LEDrqsxxCUlA+/C6d/hrlaE3Eni4AjiTNfEyqi1KVhrZEtHyoAAAA8QZtnSeEKUmUwUsb/++A44S5pk6Zc8mpbajeY3FQlOl5eERU4NlZQ7KCHXnQqL1aUm+ZXyoGSi1CXUAu7AAAALQGfhmpFf48rV2va6HNuwYm2A5WJIWicynQ6BVYFp9FWnKrYE2sDGYjSbKzVRAAAASVBm4lJ4Q6JlMFExv/HSJgSC1nZxJ/p92h7iaNLjO1oWGnVb7mdLzqjQegiZZ8BFVoHleRS9opigDSoevdqf/HRbxPUT937O3gQ14DzaDIrquleqdGqhoMap2XYPTsxa/wSkDdVT0ssgNi0XqyHVXOCMqt31kydwNB3y5PU4UE0NqfePmpADjVY3oiJGyZaYPsi/X4ym21aC/XEA8g6cHPxA5l4ZzRa4mnIZk+87eHMPyVrzjVuzsYf3L5Mdth1vbIMZuHrbzmxDF4Id95FDUpJLeBKHbiVKXlKbVlORXdE3XDZHj4OQ44bzKNetjD2jssvKy2arGt8LqXFFRWGchvabmDpRlO+TJnoh6Qi34c/JAelcxWZTHtnFBN5/wh1aZMwTQYfgQAAAF4Bn6hqRX/RIti7sAopo312LMs22rFhBmyBC8ZUUUusB76jdtD7pxWchSlEaZcpo+z3LSOsqF+TjWIbKyoOjcJ0BvP5Un6zNu7CYyOjMKakeOmtBeHghnZ0Lt7ZKgmBAAAAZUGbrUnhDyZTAi//87FwbhKNgmMp3eNkrIgo+BdeIZ2RH7aVgRWSRB8VGoJF1H932TYjOcWyo9X8eYosRzKDsLYZ8OElkDGMElhddcoxE4Cuo16C400E76DzMxmRGQrfcMoXwPeAAAAANUGfy0URPN+DBf3EVFfJcIvLYYm40FJuHP41gUVpmbF9867RDcpbG+hUwmLp76j9ncDp8t5SAAAAIwGf6nRFf4Z+tIoBTm+ZzUi4P/L4NbInHV4wNqEejzvRIyMbAAAAHAGf7GpFf4rL6VHGE3tN/0VO4T4Y92cJmTGqjpgAAABFQZvvSahBaJlMFPF/+G0p3IyKrpYoACoREvabxZ2Sa1j/PX4Ig63AFh5hVcDy6lyqDJVazyv9k0A3UEbpQ4kElhDfQ/kDAAAAJQGeDmpFf42Yfwkkpl9ce7oX5djjxlYebpvmnM3if/h60uGBIJAAAAA0QZoRSeEKUmUwUsX/+99LcS41uOHpiqjPsmwGusCKQ6BF1AYtz/WtQ7b6AecL2sZs2DeFgQAAACoBnjBqRX+PK1dr2uhzbsGJtgOViSFonOVO2XvUQkyMOnNQmIoMiU2QLcEAAABnQZozSeEOiZTBRMX/++B7/JR3TzHmkys6OPVr3j60/Ducpwbn9LZ8EMRIp1AAs7+bDOf0XhisAXR+MHkgbVlZgREbbfkD3LjS6EKrXc0GPULfC2RHIyE230IZnBmWLkBNNdGdzIhkLQAAABcBnlJqRX+OkjwI26tNPqSi/HkmF/g/gQAAAHJBmlZJ4Q8mUwIn//Dz42spRFihMPfTkj2TNj0lStYXk3a+C5Wtkfypzo0uM/feYzEOqkQzy1NzoYXThBgSRvjrEcejioh4gzCm8rCbzMI2wWFHN/yLtF3p6q9/Ec94jMIwPBQYnzCY283aqNgEwQc+asIAAAAwQZ50RRE836foBRLD06goiSPhmYv9qz7O6uD4czg9Q3XDksNdfK5w0IuoG1k3GS0sAAAAHAGelWpFf4cEWNYpbQS22jK73/5IyrwB5npkP/EAAABQQZqYSahBaJlMFPE/+81+QTCAevbatPJvZfPu+5FInLiQmsx/vsDDnlk+n51NMYohIZisWJ/S3zrGQtmid1/VGv24Rk3j0Dp9U4U6eejQnKAAAAAuAZ63akV/jZifIUt2huke1h+nmbVlvSl8BsecDgLV4KKiXsuB44t0CtoWXXNbfgAAACtBmrlJ4QpSZTAivyCC9u0SU8fn1JmlqS4H0O/V2QQ8C23N8Hq2eJg+n3lBAAAEpWWIggAb/6zQYZLfQPgNc0iYMN3sPl0EDBzzvrz1O9Sgfa49FGnVhJ8qLkhVezFoQxJKbc0nUGNZR098qd7OS+y89EcOWVfDbC64tS8Y9KVBO5bX3i8BNZhBqqdfBpIeL6SUDjC2QpVPdT+4GdjwHm+HVEddJzn4mxN+ZHi1zAImnjZYMgtVcr/8KsM8y6i5AmAGJNcUB7ayE0bca3a3R6hdrr0XJr+AAAXHQ/kXcVlmP6pUrkjcKn/5MsrYsfvEq74eZ4HZz2CqbE+TyZ4bBsWkF/G8EsnWTdMeCaT0oSOggGj6F6GH2koqwzCw8EE/jxjH/fgpvYdskan80JjRYRAJC3SwAtTkEnddIDJhOwFLkmhXRf1WZtt88VL7WgZg+Do7We/RCfv4cfPN57fJ/eEcGoPIdjySktCXmVnqHKZZhx61mqXrYt2ccmalI5pFwxn7yIk4NanyDkRuoRQdzVy7V/FgNDX+/OG313omVQVM1D6TfnNy1isPsCV96//sVHChbSvRvo9Op4uXUT2Vnp2NbBUnPOlIQwYUmnls4/qAf1DSyr347ve+HuWjGSfL2uAJLxNkKGL7jaqU87H350pTIU4+n/FRrP8UOsWWuZ9oqEO+lS3qvWMNvTA00dfS9Og3CEQ1lhvXgqkfbgmWS7Q7BrqjPYxv+usZZhR4mTsySOLjIGUswJByOkD9sXOn034oNMHPSMTs90mvpwuhhZCES9QIA0MY4mVJAH4U1175JsR0JGrNT2cG+kevDwhB7L2Ps9sq35Ja1G1YIr1fCqBH1NhFZbh3r/JJ6IfmaKkwRe/x35k1btKb+ZRfDYCZwD/U+DVYV4ve4/ux5EMulf81TrZ2iS/AeNl9oIVL7FLsYt0SbM4Gdwujxy/cVa/9DfeeCmTLktfb85Jq/PPzoWfkT+3MSEL52ahvl5dyZ3P9Gjqc/3V3E3UcMDLHXaFjLji1XATHryNndFbY++diGv2rJsu8jbVZYN/lqAhbLHtMl/sxkHV0L1mnPx14QjgEVCWYb8PE3T+6bY0rrXT6WTEzJq/uaxvgC3UliUMREWCB/LHkW4/FwMNKpTt+KgL7DKMzlTFtOl01XRpLhfNYIv4yqp/8/0Ljp/cAHvBSTd0o2zGUJLgBlXpkC7hkWNvVKuNH3gZDe7aBWygB4hHI5MtXigV2BDYnOAj1gC8+xGfGlAw3JTy1PkTj3bbr+p6m1jzipdInXtuw8i53LkL/+t/cDhYKza5lgkrWZvnpedOPwE+SDsrxeadOx3w6iN4kI3PIREK2JArXFqTIikdsxGeR30xC/R4MnKZUvO9n1HHNmqCdKQY/t8N97D5MniGESaR7uA4AEebTLOvxLuJghYdjoSxajwIKjHQQZ/HnrodJI5KNoj6gM3UJuupZ/7uHrjf0Dx0Tsc7GUns7O78xmwOt1zDx4hBNRbDjREElJr9eJlRZ3FMTZgL28DJXX2Ukq+M+ZYpT8CiXX8w7XSQxcMg4cJ6c0AGluEsCz3tAkAQZLU/VxMQpKTJR8+ULaCRGp3sYhJhT5mx6NspTrLO4jl7T5GLVFULkYWq1jSnmblAgEE58LcEAAAA9QZohbE//55dAJChmGePT0n0Li66IQjlgCcvSeXFOTWduetNIn5ncKj2uvzaGAWdM4YtqdWioqkA0TyHz0QAAADBBmkI8IZMphP/nR8JCz0aek9id9vrF4lLN2RgQjyZr432P7Kp14LSDqmMhMkWW0IAAAABgQZpjSeEPJlMCf+dJU6LZgkV02QLvEfeXB5dv4nCjjykgCXjsebUglI9rrNk3mZJmTaQFGdOlZR2+jF2hfcbd4HXQBsGGhxrxbbB//345uw0C7pMaLkJqkj5rLI3WN+aIAAAAUEGahUnhDyZTBRE8/+ZYKy4kVT7wb4JNhryfYT+xBrDfh8zeOyqxLHImSnk1hEFx+vGFOK6nSbea/LiCsJT6jX4eofbWT/4UXXlaQ7ns7HmfAAAAKgGepGpFf42Yll7P81LVQddgA183/eHh4rICR6Rm/TAP+50pi9D2H4nE8AAAADBBmqZJ4Q8mUwJfi8K/Q8CzpoXb9z2cRUhHfcB/4ijSNB3DB8kDQxNhedtHQQegeFAAAABHQZrISeEPJlMFETy/jBNpyKcpBz2UWq9NhKZ8OdqT4vlDJSja2L4KsxrGpSrA2yYsC6BONsZSjEWJ7nESRHswZooLCvZWL7cAAAAtAZ7nakV/jY0N0M9lu0TcpcLEt4G2uP1I1ad/p8M0BAfmcyRE+XWn8kbnbCP/AAAAQ0Ga7EnhDyZTA/8JnI6fa/XOcPoz6Eunvm3DSqWZut6xANl//x1bLCAKYLkthnFxJePmxk5tfDOwQ/OimiJzESuj2PEAAAAyQZ8KRRE834HLEsIWvPiTtw1hFA7vKrrccI3560FD1CXoViI/ZgtgMQkFL76i8/+MS6kAAAAdAZ8pdEV/iRfHjugOkdFKyjh/gRLx8g9LelE/H6AAAAAYAZ8rakV/hwRYbMohViYe1/b50qjneEmNAAAAWEGbLUmoQWiZTAr/DOcGwr30MLcyf4RisZLdUWnlUYvXJVKV7AncbyAoLODntBs0+K4WaaoVpZfqfI2XI0wXb+fJSpC5XehdM+mTV3yL+1xrM/enVV5ivWAAAAAzQZtQSeEKUmUwIr8dln8w8D8m9uEVJ8VaXrlpwtQMUcjOxV139A/A0FOJb3veKqfxYs2BAAAALkGfbkU0TN+BznEOeLmqf8Sjo1rSbw27tYVeLjYdb2CxT8VnvpcHn1axdLPZBJ0AAAAtAZ+PakV/hwRZbYwm6h1dIRfbz8pmPJ5q1aY4QcQjnAtS8tukkKVgqkBfhK/oAAAkC21vb3YAAABsbXZoZAAAAAAAAAAAAAAAAAAAA+gAASucAAEAAAEAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAIAACM1dHJhawAAAFx0a2hkAAAAAwAAAAAAAAAAAAAAAQAAAAAAASucAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAQAAAAABQAAAAhAAAAAAAJGVkdHMAAAAcZWxzdAAAAAAAAAABAAErnAAACAAAAQAAAAAirW1kaWEAAAAgbWRoZAAAAAAAAAAAAAAAAAAAKAAAC/wAVcQAAAAAAC1oZGxyAAAAAAAAAAB2aWRlAAAAAAAAAAAAAAAAVmlkZW9IYW5kbGVyAAAAIlhtaW5mAAAAFHZtaGQAAAABAAAAAAAAAAAAAAAkZGluZgAAABxkcmVmAAAAAAAAAAEAAAAMdXJsIAAAAAEAACIYc3RibAAAAKhzdHNkAAAAAAAAAAEAAACYYXZjMQAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAABQAIQASAAAAEgAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABj//wAAADJhdmNDAWQACv/hABlnZAAKrNlFE/nwEQAAAwABAAADABQPEiWWAQAGaOvjyyLAAAAAEHBhc3AAAAABAAAAAQAAABhzdHRzAAAAAAAAAAEAAAL/AAAEAAAAACBzdHNzAAAAAAAAAAQAAAABAAAA+wAAAfUAAALvAAAU8GN0dHMAAAAAAAACnAAAAAEAAAgAAAAAAQAAEAAAAAACAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAwAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAABAAACAAAAAABAAAMAAAAAAEAAAQAAAAABwAACAAAAAABAAAQAAAAAAIAAAQAAAAABwAACAAAAAABAAAQAAAAAAIAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAIAAAgAAAAAAQAAEAAAAAACAAAEAAAAAAEAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAADAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAwAACAAAAAABAAAMAAAAAAEAAAQAAAAAAgAACAAAAAABAAAQAAAAAAIAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAgAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAgAAAAAAQAAEAAAAAACAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAgAACAAAAAABAAAQAAAAAAIAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAACAAAIAAAAAAEAABAAAAAAAgAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAIAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAwAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAACAAAIAAAAAAEAABAAAAAAAgAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAMAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAIAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAIAAAgAAAAAAQAAEAAAAAACAAAEAAAAAAEAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAADAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAACAAAIAAAAAAEAABAAAAAAAgAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAgAACAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAMAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAgAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAIAAAgAAAAAAQAAEAAAAAACAAAEAAAAAAEAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAACAAAIAAAAAAEAABAAAAAAAgAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAIAAAgAAAAAAQAAEAAAAAACAAAEAAAAAAEAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAAEAAAAAACAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAgAAAAAAQAAEAAAAAACAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAgAACAAAAAABAAAQAAAAAAIAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAIAAAAAAEAABAAAAAAAgAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAABQAAAAAAQAACAAAAAABAAAAAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAIAAAgAAAAAAQAAEAAAAAACAAAEAAAAAAMAAAgAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAIAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAgAAAAAAQAADAAAAAABAAAEAAAAAAMAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAAAgAAAAAAQAADAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAQAAAAAAIAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAgAACAAAAAABAAAQAAAAAAIAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAUAAAAAAEAAAgAAAAAAQAAAAAAAAABAAAEAAAAAAEAAAwAAAAAAQAABAAAAAABAAAMAAAAAAEAAAQAAAAAAQAADAAAAAABAAAEAAAAAAEAABAAAAAAAgAABAAAAAABAAAMAAAAAAEAAAQAAAAABQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAACAAAAAABAAAMAAAAAAEAAAQAAAAAAQAAFAAAAAABAAAIAAAAAAEAAAAAAAAAAQAABAAAAAABAAAIAAAAAAEAABAAAAAAAgAABAAAAAAcc3RzYwAAAAAAAAABAAAAAQAAAv8AAAABAAAMEHN0c3oAAAAAAAAAAAAAAv8AAAZHAAAANQAAACEAAAAkAAABswAAADgAAAAmAAAAbwAAADoAAAAzAAAAKAAAADcAAAC4AAAAIwAAAFoAAAAxAAAAJQAAABwAAABfAAAAWAAAACoAAABXAAAANAAAAF0AAABhAAAALAAAAD4AAABHAAAALwAAAEgAAAAeAAAANgAAAFgAAAAiAAAATwAAADsAAABXAAAARgAAAFUAAABFAAAAQQAAADUAAAA4AAAAXwAAAFQAAABEAAAARwAAAGEAAABVAAAATgAAACIAAAA8AAAAQwAAAGMAAABRAAAASwAAAEcAAABuAAAAUAAAAFsAAAAmAAAASAAAAHAAAAAbAAAAVwAAADQAAAAkAAABHwAAADoAAAB7AAAAJAAAAEAAAACzAAAAJAAAAHYAAAA7AAAAKwAAACEAAABXAAAALQAAAEQAAAA0AAAAcgAAABwAAABnAAAAMQAAADkAAABcAAAAbgAAAEAAAAAoAAAATAAAAEgAAAAdAAAAZgAAAEQAAAAvAAAAJgAAAE0AAAA4AAAANQAAADYAAAFBAAAAZAAAAB0AAACUAAAAQQAAACEAAAAvAAAAQQAAADIAAAAfAAAAGgAAAFgAAABLAAAANwAAACkAAAAfAAAAVAAAAFUAAAAnAAAATAAAADcAAABdAAAATgAAAC4AAAA1AAAATQAAADEAAABDAAAAIgAAADAAAABhAAAAIQAAAQkAAABPAAAALQAAAC0AAACOAAAALAAAAEUAAAA2AAAAaAAAABoAAACaAAAAMgAAACEAAABGAAAANwAAAC8AAAAmAAAALgAAADIAAACCAAAAcwAAAD4AAABaAAAAXwAAAGYAAAA9AAAAKQAAAEUAAABYAAAAHAAAAFMAAAAxAAAAJAAAAG8AAABPAAAANwAAAC4AAAEZAAAAIQAAALsAAABvAAAANAAAADAAAACtAAAAQQAAADMAAAAuAAAAXAAAADEAAABfAAAATgAAADMAAAAiAAAAUAAAADMAAABXAAAALAAAAD0AAABTAAAAawAAACEAAAAvAAAANQAAAEwAAAAzAAAATAAAACIAAAAyAAAAkwAAAB8AAABpAAAAOAAAACgAAAD/AAAAOAAAAHcAAAAqAAAASQAAAHQAAAAbAAAAnAAAADoAAAArAAAAIgAAAGUAAAAxAAAAQwAAAC4AAABmAAAAHQAAAGgAAAAuAAAANAAAAFYAAABwAAAAPQAAADAAAABHAAAAWQAAABwAAABkAAAARgAAAC8AAAAkAAAATQAAADUAAAA9AAAAMwAAAOkAAABcAAAAXQAAADwAAAApAAAAIQAAAEAAAAApAAAFBgAAAEAAAAC2AAAAHAAAAEwAAAA3AAAAKwAAACEAAABTAAAAWgAAACgAAABNAAAANwAAAFkAAABTAAAALgAAADcAAABKAAAAMwAAAEIAAAAhAAAANAAAAIYAAAAdAAABMQAAAEsAAAAuAAAAMQAAAIMAAAAxAAAARwAAADIAAABkAAAAGgAAADgAAAAvAAAAIQAAAIgAAAA3AAAALwAAACYAAAA7AAAAHgAAAE4AAABYAAAAKgAAADYAAABSAAAAXwAAAD0AAAArAAAATQAAAGMAAAAfAAAAagAAAEAAAAAlAAAAcAAAAFAAAAA1AAAAKwAAAD4AAAFQAAAAXAAAAGMAAAA2AAAALAAAACEAAABCAAAAKwAAADcAAAAwAAAAVQAAABkAAABsAAAAMgAAACEAAABbAAAANQAAAE8AAAAtAAAAPAAAADMAAABIAAAASwAAAC8AAAA/AAAATQAAAC8AAABQAAAANAAAACcAAAAbAAAAdQAAAEEAAAAoAAAANAAAAIkAAAA6AAAAeQAAACoAAAA9AAAAGgAAAF8AAABXAAAANQAAACYAAAAgAAAASwAAACoAAAA2AAAAMQAAAF4AAAAcAAAAWgAAACoAAAA2AAAAUAAAAF0AAAA/AAAAMAAAAEkAAABpAAAAHgAAAHUAAABDAAAALgAAACMAAABKAAAAMQAAADwAAAAyAAABcwAAAFkAAACnAAAANQAAACkAAABuAAAAMgAAACoAAAAoAAAAVgAAAG4AAAAcAAAAWAAAADwAAAA7AAAAKAAAAFgAAABWAAAAKAAAAE8AAAA2AAAAiQAAAEwAAAAtAAAANQAAAGcAAAAzAAAAUAAAADkAAAAgAAAAHQAAAGUAAAEeAAAAUAAAAC4AAAAxAAAAogAAACkAAACOAAAAOwAAAFoAAAAbAAAAVQAAADkAAAAsAAAAIwAAAEsAAAAqAAAANAAAADAAAABTAAAAHwAAAFcAAAArAAAAMwAAAFMAAABdAAAAPQAAADAAAABcAAAAgQAAACIAAACCAAAAUQAAAC8AAAAjAAAATQAAAC4AAAA5AAAANAAAAQUAAAC3AAAAogAAADUAAAArAAAAIQAAAEAAAAAnAAAANgAAAC8AAABTAAAAGQAAADwAAAAxAAAAIQAAAE8AAAAzAAAAWwAAACsAAAA5AAAAPQAAAF4AAABRAAAAMAAAADcAAABQAAAALgAAAEoAAAAiAAAAMwAAAI4AAAAdAAAATwAAADgAAAA3AAAA7QAAADQAAABwAAAAKgAAADgAAACTAAAAHQAAAFQAAAA1AAAAKQAAACIAAABSAAAAKAAABUYAAAA+AAAAlAAAAB0AAAB3AAAALgAAADMAAABcAAAAZQAAAD8AAAAsAAAATAAAAG4AAAAbAAAAawAAAEAAAAAtAAAAJwAAAE0AAAA7AAAAOQAAADEAAAEWAAAAVwAAAFQAAAA3AAAAKgAAACAAAACPAAAAKwAAAE0AAAAmAAAALwAAABgAAAA+AAAAMQAAACAAAABSAAAAMQAAAFwAAAArAAAAOQAAADgAAACJAAAAJQAAADMAAAA2AAAATgAAADMAAABCAAAAIwAAADEAAAB/AAAAHgAAATUAAABJAAAAKwAAAC0AAACEAAAAKwAAAEYAAAA0AAAAVwAAABoAAACaAAAAMQAAACIAAABJAAAAMgAAADAAAAAmAAAAMQAAAHEAAAAcAAAAVAAAACwAAABEAAAAXgAAAGMAAAA5AAAALAAAAEcAAABjAAAAHgAAAHAAAABEAAAALAAAACMAAABWAAAALgAAAMQAAAAwAAAANQAAAGQAAABRAAAANAAAACoAAACZAAAANQAAADYAAAApAAAAOwAAABsAAABuAAAAQQAAADQAAAAhAAAATwAAADEAAABWAAAALAAAADgAAABMAAAAgAAAACEAAAAxAAAAOwAAAEwAAAAxAAAATgAAAD0AAAAoAAAAHAAAAFgAAABOAAAANAAAACUAAAEDAAAAMQAAAHUAAAApAAAASAAAAEkAAAAbAAAAlgAAADQAAAAqAAAAJAAAADoAAAArAAAAMAAAADIAAABvAAAAGwAAAFQAAAAsAAAAPAAAAFIAAABbAAAAPAAAADEAAABHAAAALQAAAGYAAABmAAAAQAAAACoAAAAkAAAAUwAAACwAAABHAAAANAAAAREAAABdAAAAaQAAADEAAAAmAAAAXgAAADIAAAAwAAAAJQAAADYAAACrAAAAHAAAAE0AAAA4AAAAKAAAACEAAABTAAAAWQAAACsAAABIAAAAMgAAAF4AAABnAAAALwAAAD4AAABiAAAAMwAAAFEAAAAjAAAANAAAAGsAAAAcAAABBwAAAEUAAAApAAAALAAAAIUAAAAsAAAASwAAADEAAACXAAAAHAAAAEAAAAAuAAAAIQAAAIUAAAA1AAAAMwAAACUAAABEAAAAGwAAAIAAAABcAAAALAAAADgAAABZAAAAXQAAAD0AAAArAAAAWQAAAGoAAAAbAAAAbQAAAEEAAAAsAAAAJAAAAE0AAAA0AAAAQAAAADEAAAEpAAAAYgAAAGkAAAA5AAAAJwAAACAAAABJAAAAKQAAADgAAAAuAAAAawAAABsAAAB2AAAANAAAACAAAABUAAAAMgAAAC8AAASpAAAAQQAAADQAAABkAAAAVAAAAC4AAAA0AAAASwAAADEAAABHAAAANgAAACEAAAAcAAAAXAAAADcAAAAyAAAAMQAAABRzdGNvAAAAAAAAAAEAAAAwAAAAYnVkdGEAAABabWV0YQAAAAAAAAAhaGRscgAAAAAAAAAAbWRpcmFwcGwAAAAAAAAAAAAAAAAtaWxzdAAAACWpdG9vAAAAHWRhdGEAAAABAAAAAExhdmY1Ny44My4xMDA=", + "ok": true, + "headers": [ + [ + "content-type", + "video/mp4" + ] + ], + "status": 200.0, + "status_text": "" + } + }, + "base_uri": "https://localhost:8080/", + "height": 501.0 + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 31, + "metadata": { + "tags": [] + }, + "output_type": "execute_result" + } + ], + "source": [ + "play_video('mf_pong/0.avi')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NQmZEVKGF4Hh", + "colab_type": "text" + }, + "source": [ + "# Model-based training\n", + "\n", + "The `rl` package offers many more features, including model-based training. For instructions on how to use them, go to our [README](https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/rl/README.md)." + ] + } + ], + "metadata": { + "colab": { + "name": "hello_t2t-rl.ipynb", + "version": "0.3.2", + "provenance": [ + { + "file_id": "1nQvfx1EzY3ElJUy-FVF1G16okSbkeUa2", + "timestamp": 1.553274233669E12 + } + ], + "collapsed_sections": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "accelerator": "GPU" + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/tensor2tensor/notebooks/hello_t2t.ipynb b/tensor2tensor/notebooks/hello_t2t.ipynb new file mode 100644 index 000000000..b69fd48c1 --- /dev/null +++ b/tensor2tensor/notebooks/hello_t2t.ipynb @@ -0,0 +1,1482 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "odi2vIMHC3Rm" + }, + "source": [ + "# Welcome to the [Tensor2Tensor](https://github.com/tensorflow/tensor2tensor) Colab\n", + "\n", + "Tensor2Tensor, or T2T for short, is a library of deep learning models and datasets designed to make deep learning more accessible and [accelerate ML research](https://research.googleblog.com/2017/06/accelerating-deep-learning-research.html). T2T is actively used and maintained by researchers and engineers within the [Google Brain team](https://research.google.com/teams/brain/) and a community of users. This colab shows you some datasets we have in T2T, how to download and use them, some models we have, how to download pre-trained models and use them, and how to create and train your own models." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "s19ucTii_wYb" + }, + "outputs": [], + "source": [ + "#@title\n", + "# Copyright 2018 Google LLC.\n", + "\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "OPGni6fuvoTj" + }, + "outputs": [], + "source": [ + "# Install deps\n", + "!pip install -q -U tensor2tensor\n", + "!pip install -q tensorflow matplotlib\n" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "oILRLCWN_16u" + }, + "outputs": [], + "source": [ + "# Imports we need.\n", + "import sys\n", + "if 'google.colab' in sys.modules: # Colab-only TensorFlow version selector\n", + " %tensorflow_version 1.x\n", + "import tensorflow as tf\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import os\n", + "import collections\n", + "\n", + "from tensor2tensor import models\n", + "from tensor2tensor import problems\n", + "from tensor2tensor.layers import common_layers\n", + "from tensor2tensor.utils import trainer_lib\n", + "from tensor2tensor.utils import t2t_model\n", + "from tensor2tensor.utils import registry\n", + "from tensor2tensor.utils import metrics\n", + "\n", + "# Enable TF Eager execution\n", + "tfe = tf.contrib.eager\n", + "tfe.enable_eager_execution()\n", + "\n", + "# Other setup\n", + "Modes = tf.estimator.ModeKeys\n", + "\n", + "# Setup some directories\n", + "data_dir = os.path.expanduser(\"~/t2t/data\")\n", + "tmp_dir = os.path.expanduser(\"~/t2t/tmp\")\n", + "train_dir = os.path.expanduser(\"~/t2t/train\")\n", + "checkpoint_dir = os.path.expanduser(\"~/t2t/checkpoints\")\n", + "tf.gfile.MakeDirs(data_dir)\n", + "tf.gfile.MakeDirs(tmp_dir)\n", + "tf.gfile.MakeDirs(train_dir)\n", + "tf.gfile.MakeDirs(checkpoint_dir)\n", + "gs_data_dir = \"gs://tensor2tensor-data\"\n", + "gs_ckpt_dir = \"gs://tensor2tensor-checkpoints/\"" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "0a69r1KDiZDe" + }, + "source": [ + "# Download MNIST and inspect it" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1241 + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 505, + "status": "ok", + "timestamp": 1512371452348, + "user": { + "displayName": "Lukasz Kaiser", + "photoUrl": "//lh3.googleusercontent.com/-CbWIwcQ_VsA/AAAAAAAAAAI/AAAAAAAAAB8/jloHVR1qOhg/s50-c-k-no/photo.jpg", + "userId": "109750154298538986950" + }, + "user_tz": 480 + }, + "id": "RYDMO4zArgkz", + "outputId": "f0f13103-a437-4b95-ac9d-38f2b57a5f4c" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "['algorithmic_addition_binary40',\n", + " 'algorithmic_addition_decimal40',\n", + " 'algorithmic_cipher_shift200',\n", + " 'algorithmic_cipher_shift5',\n", + " 'algorithmic_cipher_vigenere200',\n", + " 'algorithmic_cipher_vigenere5',\n", + " 'algorithmic_identity_binary40',\n", + " 'algorithmic_identity_decimal40',\n", + " 'algorithmic_multiplication_binary40',\n", + " 'algorithmic_multiplication_decimal40',\n", + " 'algorithmic_reverse_binary40',\n", + " 'algorithmic_reverse_binary40_test',\n", + " 'algorithmic_reverse_decimal40',\n", + " 'algorithmic_reverse_nlplike32k',\n", + " 'algorithmic_reverse_nlplike8k',\n", + " 'algorithmic_shift_decimal40',\n", + " 'audio_timit_characters_tune',\n", + " 'audio_timit_tokens8k_test',\n", + " 'audio_timit_tokens8k_tune',\n", + " 'image_celeba',\n", + " 'image_cifar10',\n", + " 'image_cifar10_plain',\n", + " 'image_cifar10_plain8',\n", + " 'image_cifar10_tune',\n", + " 'image_fsns',\n", + " 'image_imagenet',\n", + " 'image_imagenet224',\n", + " 'image_imagenet32',\n", + " 'image_imagenet64',\n", + " 'image_mnist',\n", + " 'image_mnist_tune',\n", + " 'image_ms_coco_characters',\n", + " 'image_ms_coco_tokens32k',\n", + " 'image_ms_coco_tokens8k',\n", + " 'img2img_cifar10',\n", + " 'img2img_imagenet',\n", + " 'languagemodel_lm1b32k',\n", + " 'languagemodel_lm1b8k_packed',\n", + " 'languagemodel_lm1b_characters',\n", + " 'languagemodel_ptb10k',\n", + " 'languagemodel_ptb_characters',\n", + " 'languagemodel_wiki_full32k',\n", + " 'languagemodel_wiki_scramble128',\n", + " 'languagemodel_wiki_scramble1k50',\n", + " 'languagemodel_wiki_scramble8k50',\n", + " 'librispeech',\n", + " 'multinli_matched',\n", + " 'multinli_mismatched',\n", + " 'ocr_test',\n", + " 'parsing_english_ptb16k',\n", + " 'parsing_english_ptb8k',\n", + " 'parsing_icelandic16k',\n", + " 'programming_desc2code_cpp',\n", + " 'programming_desc2code_py',\n", + " 'sentiment_imdb',\n", + " 'summarize_cnn_dailymail32k',\n", + " 'translate_encs_wmt32k',\n", + " 'translate_encs_wmt_characters',\n", + " 'translate_ende_wmt32k',\n", + " 'translate_ende_wmt32k_packed',\n", + " 'translate_ende_wmt8k',\n", + " 'translate_ende_wmt_bpe32k',\n", + " 'translate_ende_wmt_characters',\n", + " 'translate_enfr_wmt32k',\n", + " 'translate_enfr_wmt32k_packed',\n", + " 'translate_enfr_wmt8k',\n", + " 'translate_enfr_wmt_characters',\n", + " 'translate_enfr_wmt_small32k',\n", + " 'translate_enfr_wmt_small8k',\n", + " 'translate_enfr_wmt_small_characters',\n", + " 'translate_enmk_setimes32k',\n", + " 'translate_enzh_wmt8k']" + ] + }, + "execution_count": 4, + "metadata": { + "tags": [] + }, + "output_type": "execute_result" + } + ], + "source": [ + "# A Problem is a dataset together with some fixed pre-processing.\n", + "# It could be a translation dataset with a specific tokenization,\n", + "# or an image dataset with a specific resolution.\n", + "#\n", + "# There are many problems available in Tensor2Tensor\n", + "problems.available()" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 306 + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 21361, + "status": "ok", + "timestamp": 1512371478309, + "user": { + "displayName": "Lukasz Kaiser", + "photoUrl": "//lh3.googleusercontent.com/-CbWIwcQ_VsA/AAAAAAAAAAI/AAAAAAAAAB8/jloHVR1qOhg/s50-c-k-no/photo.jpg", + "userId": "109750154298538986950" + }, + "user_tz": 480 + }, + "id": "JKc2uSk6WX5e", + "outputId": "7e0cafb5-d035-49a7-9ff4-7f4150c905c7" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz to /content/t2t/tmp/train-images-idx3-ubyte.gz\n", + "100% completed\n", + "INFO:tensorflow:Successfully downloaded train-images-idx3-ubyte.gz, 9912422 bytes.\n", + "INFO:tensorflow:Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz to /content/t2t/tmp/train-labels-idx1-ubyte.gz\n", + "113% completed\n", + "INFO:tensorflow:Successfully downloaded train-labels-idx1-ubyte.gz, 28881 bytes.\n", + "INFO:tensorflow:Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz to /content/t2t/tmp/t10k-images-idx3-ubyte.gz\n", + "100% completed\n", + "INFO:tensorflow:Successfully downloaded t10k-images-idx3-ubyte.gz, 1648877 bytes.\n", + "INFO:tensorflow:Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz to /content/t2t/tmp/t10k-labels-idx1-ubyte.gz\n", + "180% completed\n", + "INFO:tensorflow:Successfully downloaded t10k-labels-idx1-ubyte.gz, 4542 bytes.\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/train-images-idx3-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/train-labels-idx1-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/t10k-images-idx3-ubyte.gz\n", + "INFO:tensorflow:Not downloading, file already found: /content/t2t/tmp/t10k-labels-idx1-ubyte.gz\n", + "INFO:tensorflow:Shuffling data...\n" + ] + } + ], + "source": [ + "# Fetch the MNIST problem\n", + "mnist_problem = problems.problem(\"image_mnist\")\n", + "# The generate_data method of a problem will download data and process it into\n", + "# a standard format ready for training and evaluation.\n", + "mnist_problem.generate_data(data_dir, tmp_dir)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 381 + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 471, + "status": "ok", + "timestamp": 1512371501917, + "user": { + "displayName": "Lukasz Kaiser", + "photoUrl": "//lh3.googleusercontent.com/-CbWIwcQ_VsA/AAAAAAAAAAI/AAAAAAAAAB8/jloHVR1qOhg/s50-c-k-no/photo.jpg", + "userId": "109750154298538986950" + }, + "user_tz": 480 + }, + "id": "VW6HCRANFPYV", + "outputId": "3b33057c-5082-4377-ec83-79f67e5a8e84" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Reading data files from /content/t2t/data/image_mnist-train*\n", + "Label: 7\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAUsAAAFKCAYAAACU6307AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvpW3flQAAEhNJREFUeJzt3V1IlPn7x/HP/J2VGir85arQYrtL\nGCtpBwuFGj1YEriwlNHDJiULHRRLkVmESA8LQZa5Rm4HqT0crCzMNkcdBErEQrQ6sR6EemJ1UCKt\naUkl2W7J/A9+/GTbHfVympn7nun9Ag+85+s918V3+nQ/zHfGEwqFQgIATOn/nC4AABIBYQkABoQl\nABgQlgBgQFgCgAFhCQAWoTiQFPanu7t70scS9ScZe0rWvugpcX7i1ddUPPF4n6XH4wm7PRQKTfpY\nokrGnqTk7IueEke8+poqDr2R7vTkyZO6e/euPB6PampqtHTp0kh3BQCuF1FY3rlzRw8fPpTf79eD\nBw9UU1Mjv98f7doAwDUiusHT0dGhkpISSdKiRYv0/PlzjY6ORrUwAHCTiI4sh4eHtWTJkonf58+f\nr6GhIc2ZMyfs+O7ubuXl5YV9LA6XTOMuGXuSkrMvekocTvcV8TXLv5uuifz8/En/LtkuRidjT1Jy\n9kVPicMNN3giOg3PzMzU8PDwxO9PnjxRRkZGJLsCgIQQUViuWLFCbW1tkqTe3l5lZmZOegoOAMkg\notPwL7/8UkuWLNE333wjj8ej48ePR7suAHAV3pQeZcnYk5ScfdFT4kjYa5YA8KEhLAHAgLAEAAPC\nEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhLADAgLAHAgLAEAAPCEgAMCEsA\nMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhLADAgLAHAgLAEAAPCEgAMCEsAMCAsAcCA\nsAQAA8ISAAwISwAwICwBwICwBAADwhIADAhLADAgLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAA8IS\nAAy8kfxRMBjU/v37lZOTI0lavHixjh49GtXCAMBNIgpLSVq+fLkaGxujWQsAuBan4QBgEHFY3r9/\nX3v27NH27dt1+/btaNYEAK7jCYVCoZn+0eDgoLq6ulRaWqr+/n5VVFSovb1dqampYcf39PQoLy/v\nvYsFAKdEFJb/tHnzZp09e1bZ2dnhn8TjCbs9FApN+liiSsaepOTsi54SR7z6mioOIzoNv3btmi5d\nuiRJGhoa0tOnT5WVlRVZdQCQACI6shwdHdWhQ4f04sULvXnzRnv37tXq1asnfxKOLBNeMvZFT4nD\nDUeWUTkNnw5hmfiSsS96ShxuCMuI32cJJIrJrqWHe8z6zo6p9vlPDQ0N5rEHDx40j0V88T5LADAg\nLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAwICwBwIDljkh6Uy1h/OdjM1nGaBUMBqO+\nT8QfR5YAYEBYAoABYQkABoQlABgQlgBgQFgCgAFhCQAGhCUAGBCWAGDAtztGWTL2JMWvL+sKGr/f\nb95nYWFhpOVMqqOjwzy2qKgo6s8/GV5/7/88k+HIEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAwICwB\nwICwBAADwhIADAhLADDgC8vwjq1bt5oe++STT8z73LJli3lsLJYmzkR/f79pXDyXMMIdOLIEAAPC\nEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAwICwBwICwBAADwhIADFju6DK//fabaZwTywJn8o2Kierq\n1atOlwCXMh1Z9vX1qaSkRK2trZKkx48fa+fOnSovL9f+/fv1119/xbRIAHDatGH56tUrnThx4p0j\nmcbGRpWXl+vnn3/Wp59+qkAgENMiAcBp04ZlamqqWlpalJmZObEtGAxq3bp1kqTi4uIZfeE8ACSi\naa9Zer1eeb3vDhsbG1NqaqokKT09XUNDQ7GpDgBc4r1v8IRCoWnHdHd3Ky8vL+K/TzTJ2NOHoqqq\nKqrjnJCsrz+n+4ooLH0+n16/fq1Zs2ZpcHDwnVP0cPLz88NuD4VC8ng8kZTgWu/bk5vvhn8IGhoa\nTOMOHjwY40oik4z/pqT49TVVIEf0PsuioiK1tbVJktrb27Vy5crIKgOABDHtkWVPT49Onz6tgYEB\neb1etbW1qb6+XtXV1fL7/VqwYIE2btwYj1oBwDHThmVeXp5++umnf22/cuVKTAoCADdiBY/LOH0t\ncrIv7MrOzn7nsbNnz5r3OTAwYB77yy+/mMbF6mJ/MBiMyX6R+FgbDgAGhCUAGBCWAGBAWAKAAWEJ\nAAaEJQAYEJYAYEBYAoABYQkABoQlABh4QnH4kLjJPlopGT9O6n17+uGHH0zjZrIsz7qEcCpum6tY\nvWwXLlxoGjfZslCnuW2eoiVhP6INAD40hCUAGBCWAGBAWAKAAWEJAAaEJQAYEJYAYEBYAoABYQkA\nBoQlABiw3DHKkrEnyX19xepl66YeI+G2eYoWljsCQIIgLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQA\nA8ISAAwISwAwICwBwICwBAADwhIADAhLADAgLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAA8ISAAy8\nThcAJKKCggLz2IGBAfPY/v7+SMpBHHBkCQAGprDs6+tTSUmJWltbJUnV1dX6+uuvtXPnTu3cuVO/\n/vprLGsEAMdNexr+6tUrnThxQoWFhe9sr6qqUnFxccwKAwA3mfbIMjU1VS0tLcrMzIxHPQDgStMe\nWXq9Xnm9/x7W2tqqK1euKD09XUePHtX8+fMn3Ud3d7fy8vLCPhYKhWZQbmJIxp6k5O3r75Khx2To\nIRyn+4robviGDRuUlpam3NxcNTc36/z58zp27Nik4/Pz88NuD4VC8ng8kZTgWsnYk+S+vmL1D8fa\no1vvhrttnqIlXn1N9bqK6G54YWGhcnNzJUlr165VX19fZJUBQIKIKCz37ds38T9gMBhUTk5OVIsC\nALeZ9jS8p6dHp0+f1sDAgLxer9ra2rRjxw5VVlZq9uzZ8vl8qq2tjUetAOAYTygOV00nu9aQjNdX\nkrEnyX19cc0yPLfNU7S44Zolyx0RkUePHpnHZmdnm8devXo1knKiJhYhPJOetm7dGvXnR3Sw3BEA\nDAhLADAgLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAwYLkjXGXLli1Ol2AykyWMBw8e\njGEliBeOLAHAgLAEAAPCEgAMCEsAMCAsAcCAsAQAA8ISAAwISwAwICwBwIBvd4yyZOxJ+ndfM/kS\nss2bN0e9noaGhqjvU5IWLlxoGve+38IYKx/K6y+WzzMZjiwBwICwBAADwhIADAhLADAgLAHAgLAE\nAAPCEgAMCEsAMCAsAcCAsAQAA5Y7Rlky9iS5r69YvWzd1GMk3DZP0cJyRwBIEIQlABgQlgBgQFgC\ngAFhCQAGhCUAGBCWAGBAWAKAAWEJAAaEJQAYeJ0uAHCTgoIC07jOzs4YVwK3MYVlXV2durq69Pbt\nW+3evVv5+fk6fPiwxsfHlZGRoTNnzig1NTXWtQKAY6YNy87OTt27d09+v18jIyMqKytTYWGhysvL\nVVpaqoaGBgUCAZWXl8ejXgBwxLTXLJctW6Zz585JkubNm6exsTEFg0GtW7dOklRcXKyOjo7YVgkA\nDps2LFNSUuTz+SRJgUBAq1at0tjY2MRpd3p6uoaGhmJbJQA4zHyD58aNGwoEArp8+bLWr18/sd3y\nuYLd3d3Ky8sL+1gcPk4z7pKxJyl5+/q7ZDhLStZ5crovU1jeunVLFy5c0MWLFzV37lz5fD69fv1a\ns2bN0uDgoDIzM6f8+/z8/LDbk/GDSpOxJ8l9fcXqH05hYaFpnFvvhrttnqIlIT789+XLl6qrq1NT\nU5PS0tIkSUVFRWpra5Mktbe3a+XKlVEqFQDcadojy+vXr2tkZESVlZUT206dOqUjR47I7/drwYIF\n2rhxY0yLBACn8R08UZaMPUnu64vT8PDcNk/R4obTcFbwICH19/ebx2ZnZ5vHDgwMRFIOPgCsDQcA\nA8ISAAwISwAwICwBwICwBAADwhIADAhLADAgLAHAgLAEAAPCEgAMWO6IhDSTtdkzWe5oXRs+k+WW\nSA4cWQKAAWEJAAaEJQAYEJYAYEBYAoABYQkABoQlABgQlgBgQFgCgAFhCQAGhCUAGBCWAGBAWAKA\nAWEJAAaEJQAYEJYAYEBYAoABYQkABoQlABgQlgBgQFgCgAFhCQAGhCUAGBCWAGBAWAKAAWEJAAaE\nJQAYEJYAYEBYAoABYQkABoQlABh4nS4AiEQgEDCPLSgoCLs9Oztb/f3972x79OjRe9WF5GUKy7q6\nOnV1dent27favXu3bt68qd7eXqWlpUmSdu3apTVr1sSyTgBw1LRh2dnZqXv37snv92tkZERlZWUq\nKChQVVWViouL41EjADhu2rBctmyZli5dKkmaN2+exsbGND4+HvPCAMBNpr3Bk5KSIp/PJ+m/14lW\nrVqllJQUtba2qqKiQgcOHNCzZ89iXigAOMkTCoVCloE3btxQU1OTLl++rJ6eHqWlpSk3N1fNzc36\n448/dOzYsUn/tqenR3l5eVErGgDizRSWt27d0rlz53Tx4sWJmzr/c//+fX3//fdqbW2d/Ek8nrDb\nQ6HQpI8lqmTsSXJfX1u3bjWPra+vD7s93N1w6347OzvNzx9PbpunaIlXX1PF4bSn4S9fvlRdXZ2a\nmpomgnLfvn0TL7JgMKicnJwolQoA7jTtDZ7r169rZGRElZWVE9s2bdqkyspKzZ49Wz6fT7W1tTEt\nEgCcNm1Ybtu2Tdu2bfvX9rKyspgUBABuxHJHADAw3w1/ryfhBk/CS8a+6ClxJMQNHgAAYQkAJoQl\nABgQlgBgQFgCgAFhCQAGhCUAGBCWAGBAWAKAAWEJAAaEJQAYEJYAYEBYAoABYQkABoQlABgQlgBg\nQFgCgAFhCQAGhCUAGBCWAGAQly8sA4BEx5ElABgQlgBgQFgCgAFhCQAGhCUAGBCWAGDgdeJJT548\nqbt378rj8aimpkZLly51ooyoCgaD2r9/v3JyciRJixcv1tGjRx2uKnJ9fX367rvv9O2332rHjh16\n/PixDh8+rPHxcWVkZOjMmTNKTU11uswZ+WdP1dXV6u3tVVpamiRp165dWrNmjbNFzlBdXZ26urr0\n9u1b7d69W/n5+Qk/T9K/+7p586bjcxX3sLxz544ePnwov9+vBw8eqKamRn6/P95lxMTy5cvV2Njo\ndBnv7dWrVzpx4oQKCwsntjU2Nqq8vFylpaVqaGhQIBBQeXm5g1XOTLieJKmqqkrFxcUOVfV+Ojs7\nde/ePfn9fo2MjKisrEyFhYUJPU9S+L4KCgocn6u4n4Z3dHSopKREkrRo0SI9f/5co6Oj8S4DU0hN\nTVVLS4syMzMntgWDQa1bt06SVFxcrI6ODqfKi0i4nhLdsmXLdO7cOUnSvHnzNDY2lvDzJIXva3x8\n3OGqHAjL4eFh/ec//5n4ff78+RoaGop3GTFx//597dmzR9u3b9ft27edLidiXq9Xs2bNemfb2NjY\nxOlcenp6ws1ZuJ4kqbW1VRUVFTpw4ICePXvmQGWRS0lJkc/nkyQFAgGtWrUq4edJCt9XSkqK43Pl\nyDXLv0uW1ZafffaZ9u7dq9LSUvX396uiokLt7e0Jeb1oOskyZxs2bFBaWppyc3PV3Nys8+fP69ix\nY06XNWM3btxQIBDQ5cuXtX79+ontiT5Pf++rp6fH8bmK+5FlZmamhoeHJ35/8uSJMjIy4l1G1GVl\nZemrr76Sx+PRwoUL9fHHH2twcNDpsqLG5/Pp9evXkqTBwcGkOJ0tLCxUbm6uJGnt2rXq6+tzuKKZ\nu3Xrli5cuKCWlhbNnTs3aebpn325Ya7iHpYrVqxQW1ubJKm3t1eZmZmaM2dOvMuIumvXrunSpUuS\npKGhIT19+lRZWVkOVxU9RUVFE/PW3t6ulStXOlzR+9u3b5/6+/sl/fea7P/eyZAoXr58qbq6OjU1\nNU3cJU6GeQrXlxvmypFPHaqvr9fvv/8uj8ej48eP64svvoh3CVE3OjqqQ4cO6cWLF3rz5o327t2r\n1atXO11WRHp6enT69GkNDAzI6/UqKytL9fX1qq6u1p9//qkFCxaotrZWH330kdOlmoXraceOHWpu\nbtbs2bPl8/lUW1ur9PR0p0s18/v9+vHHH/X5559PbDt16pSOHDmSsPMkhe9r06ZNam1tdXSu+Ig2\nADBgBQ8AGBCWAGBAWAKAAWEJAAaEJQAYEJYAYEBYAoABYQkABv8PicrBdxpy97QAAAAASUVORK5C\nYII=\n", + "text/plain": [ + "\u003cmatplotlib.figure.Figure at 0x7f9a730a8210\u003e" + ] + }, + "metadata": { + "tags": [] + }, + "output_type": "display_data" + } + ], + "source": [ + "# Now let's see the training MNIST data as Tensors.\n", + "mnist_example = tfe.Iterator(mnist_problem.dataset(Modes.TRAIN, data_dir)).next()\n", + "image = mnist_example[\"inputs\"]\n", + "label = mnist_example[\"targets\"]\n", + "\n", + "plt.imshow(image.numpy()[:, :, 0].astype(np.float32), cmap=plt.get_cmap('gray'))\n", + "print(\"Label: %d\" % label.numpy())" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "gXL7_bVH49Kl" + }, + "source": [ + "# Translate from English to German with a pre-trained model" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 170 + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 2843, + "status": "ok", + "timestamp": 1512371509946, + "user": { + "displayName": "Lukasz Kaiser", + "photoUrl": "//lh3.googleusercontent.com/-CbWIwcQ_VsA/AAAAAAAAAAI/AAAAAAAAAB8/jloHVR1qOhg/s50-c-k-no/photo.jpg", + "userId": "109750154298538986950" + }, + "user_tz": 480 + }, + "id": "EB4MP7_y_SuQ", + "outputId": "8fbdcd05-a8b6-45e5-88b2-ce6fdfec0351" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\r\n", + "\r\n", + "Updates are available for some Cloud SDK components. To install them,\r\n", + "please run:\r\n", + " $ gcloud components update\r\n", + "\n", + "Copying gs://tensor2tensor-data/vocab.translate_ende_wmt32k.32768.subwords...\n", + "/ [1 files][316.4 KiB/316.4 KiB] \n", + "Operation completed over 1 objects/316.4 KiB. \n" + ] + } + ], + "source": [ + "# Fetch the problem\n", + "ende_problem = problems.problem(\"translate_ende_wmt32k\")\n", + "\n", + "# Copy the vocab file locally so we can encode inputs and decode model outputs\n", + "# All vocabs are stored on GCS\n", + "vocab_name = \"vocab.translate_ende_wmt32k.32768.subwords\"\n", + "vocab_file = os.path.join(gs_data_dir, vocab_name)\n", + "!gsutil cp {vocab_file} {data_dir}\n", + "\n", + "# Get the encoders from the problem\n", + "encoders = ende_problem.feature_encoders(data_dir)\n", + "\n", + "# Setup helper functions for encoding and decoding\n", + "def encode(input_str, output_str=None):\n", + " \"\"\"Input str to features dict, ready for inference\"\"\"\n", + " inputs = encoders[\"inputs\"].encode(input_str) + [1] # add EOS id\n", + " batch_inputs = tf.reshape(inputs, [1, -1, 1]) # Make it 3D.\n", + " return {\"inputs\": batch_inputs}\n", + "\n", + "def decode(integers):\n", + " \"\"\"List of ints to str\"\"\"\n", + " integers = list(np.squeeze(integers))\n", + " if 1 in integers:\n", + " integers = integers[:integers.index(1)]\n", + " return encoders[\"inputs\"].decode(np.squeeze(integers))" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "g2aQW7Z6TOEu" + }, + "outputs": [], + "source": [ + "# # Generate and view the data\n", + "# # This cell is commented out because WMT data generation can take hours\n", + "\n", + "# ende_problem.generate_data(data_dir, tmp_dir)\n", + "# example = tfe.Iterator(ende_problem.dataset(Modes.TRAIN, data_dir)).next()\n", + "# inputs = [int(x) for x in example[\"inputs\"].numpy()] # Cast to ints.\n", + "# targets = [int(x) for x in example[\"targets\"].numpy()] # Cast to ints.\n", + "\n", + "\n", + "\n", + "# # Example inputs as int-tensor.\n", + "# print(\"Inputs, encoded:\")\n", + "# print(inputs)\n", + "# print(\"Inputs, decoded:\")\n", + "# # Example inputs as a sentence.\n", + "# print(decode(inputs))\n", + "# # Example targets as int-tensor.\n", + "# print(\"Targets, encoded:\")\n", + "# print(targets)\n", + "# # Example targets as a sentence.\n", + "# print(\"Targets, decoded:\")\n", + "# print(decode(targets))" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 408 + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 496, + "status": "ok", + "timestamp": 1512371515918, + "user": { + "displayName": "Lukasz Kaiser", + "photoUrl": "//lh3.googleusercontent.com/-CbWIwcQ_VsA/AAAAAAAAAAI/AAAAAAAAAB8/jloHVR1qOhg/s50-c-k-no/photo.jpg", + "userId": "109750154298538986950" + }, + "user_tz": 480 + }, + "id": "WkFUEs7ZOA79", + "outputId": "f8be52a4-e85c-4daf-9f77-24d75eea3ab0" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "['resnet50',\n", + " 'lstm_seq2seq',\n", + " 'transformer_encoder',\n", + " 'attention_lm',\n", + " 'vanilla_gan',\n", + " 'transformer',\n", + " 'gene_expression_conv',\n", + " 'transformer_moe',\n", + " 'attention_lm_moe',\n", + " 'transformer_revnet',\n", + " 'lstm_seq2seq_attention',\n", + " 'shake_shake',\n", + " 'transformer_ae',\n", + " 'diagonal_neural_gpu',\n", + " 'xception',\n", + " 'aligned',\n", + " 'multi_model',\n", + " 'neural_gpu',\n", + " 'slice_net',\n", + " 'byte_net',\n", + " 'cycle_gan',\n", + " 'transformer_sketch',\n", + " 'blue_net']" + ] + }, + "execution_count": 9, + "metadata": { + "tags": [] + }, + "output_type": "execute_result" + } + ], + "source": [ + "# There are many models available in Tensor2Tensor\n", + "registry.list_models()" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "9l6hDQbrRUYV" + }, + "outputs": [], + "source": [ + "# Create hparams and the model\n", + "model_name = \"transformer\"\n", + "hparams_set = \"transformer_base\"\n", + "\n", + "hparams = trainer_lib.create_hparams(hparams_set, data_dir=data_dir, problem_name=\"translate_ende_wmt32k\")\n", + "\n", + "# NOTE: Only create the model once when restoring from a checkpoint; it's a\n", + "# Layer and so subsequent instantiations will have different variable scopes\n", + "# that will not match the checkpoint.\n", + "translate_model = registry.model(model_name)(hparams, Modes.EVAL)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 13020, + "status": "ok", + "timestamp": 1512371536282, + "user": { + "displayName": "Lukasz Kaiser", + "photoUrl": "//lh3.googleusercontent.com/-CbWIwcQ_VsA/AAAAAAAAAAI/AAAAAAAAAB8/jloHVR1qOhg/s50-c-k-no/photo.jpg", + "userId": "109750154298538986950" + }, + "user_tz": 480 + }, + "id": "FEwNUVlMYOJi", + "outputId": "86747a09-e83d-4a5f-d938-2fef25e4ce2f" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "u'/content/t2t/checkpoints/transformer_ende_test/model.ckpt-350855'" + ] + }, + "execution_count": 11, + "metadata": { + "tags": [] + }, + "output_type": "execute_result" + } + ], + "source": [ + "# Copy the pretrained checkpoint locally\n", + "ckpt_name = \"transformer_ende_test\"\n", + "gs_ckpt = os.path.join(gs_ckpt_dir, ckpt_name)\n", + "!gsutil -q cp -R {gs_ckpt} {checkpoint_dir}\n", + "ckpt_path = tf.train.latest_checkpoint(os.path.join(checkpoint_dir, ckpt_name))\n", + "ckpt_path" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 68 + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 11397, + "status": "ok", + "timestamp": 1512371578480, + "user": { + "displayName": "Lukasz Kaiser", + "photoUrl": "//lh3.googleusercontent.com/-CbWIwcQ_VsA/AAAAAAAAAAI/AAAAAAAAAB8/jloHVR1qOhg/s50-c-k-no/photo.jpg", + "userId": "109750154298538986950" + }, + "user_tz": 480 + }, + "id": "3O-8E9d6TtuJ", + "outputId": "cee729b7-8237-45bb-ac6f-dfadce9916b4" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Greedy Decoding\n", + "Inputs: The animal didn't cross the street because it was too tired\n", + "Outputs: Das Tier überquerte die Straße nicht, weil es zu müde war, weil es zu müde war.\n" + ] + } + ], + "source": [ + "# Restore and translate!\n", + "def translate(inputs):\n", + " encoded_inputs = encode(inputs)\n", + " with tfe.restore_variables_on_create(ckpt_path):\n", + " model_output = translate_model.infer(encoded_inputs)[\"outputs\"]\n", + " return decode(model_output)\n", + "\n", + "inputs = \"The animal didn't cross the street because it was too tired\"\n", + "outputs = translate(inputs)\n", + "\n", + "print(\"Inputs: %s\" % inputs)\n", + "print(\"Outputs: %s\" % outputs)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "X3mkIEcbfiTP" + }, + "source": [ + "## Attention Viz Utils" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "r6GPPFy1fL2N" + }, + "outputs": [], + "source": [ + "from tensor2tensor.visualization import attention\n", + "from tensor2tensor.data_generators import text_encoder\n", + "\n", + "SIZE = 35\n", + "\n", + "def encode_eval(input_str, output_str):\n", + " inputs = tf.reshape(encoders[\"inputs\"].encode(input_str) + [1], [1, -1, 1, 1]) # Make it 3D.\n", + " outputs = tf.reshape(encoders[\"inputs\"].encode(output_str) + [1], [1, -1, 1, 1]) # Make it 3D.\n", + " return {\"inputs\": inputs, \"targets\": outputs}\n", + "\n", + "def get_att_mats():\n", + " enc_atts = []\n", + " dec_atts = []\n", + " encdec_atts = []\n", + "\n", + " for i in range(hparams.num_hidden_layers):\n", + " enc_att = translate_model.attention_weights[\n", + " \"transformer/body/encoder/layer_%i/self_attention/multihead_attention/dot_product_attention\" % i][0]\n", + " dec_att = translate_model.attention_weights[\n", + " \"transformer/body/decoder/layer_%i/self_attention/multihead_attention/dot_product_attention\" % i][0]\n", + " encdec_att = translate_model.attention_weights[\n", + " \"transformer/body/decoder/layer_%i/encdec_attention/multihead_attention/dot_product_attention\" % i][0]\n", + " enc_atts.append(resize(enc_att))\n", + " dec_atts.append(resize(dec_att))\n", + " encdec_atts.append(resize(encdec_att))\n", + " return enc_atts, dec_atts, encdec_atts\n", + "\n", + "def resize(np_mat):\n", + " # Sum across heads\n", + " np_mat = np_mat[:, :SIZE, :SIZE]\n", + " row_sums = np.sum(np_mat, axis=0)\n", + " # Normalize\n", + " layer_mat = np_mat / row_sums[np.newaxis, :]\n", + " lsh = layer_mat.shape\n", + " # Add extra dim for viz code to work.\n", + " layer_mat = np.reshape(layer_mat, (1, lsh[0], lsh[1], lsh[2]))\n", + " return layer_mat\n", + "\n", + "def to_tokens(ids):\n", + " ids = np.squeeze(ids)\n", + " subtokenizer = hparams.problem_hparams.vocabulary['targets']\n", + " tokens = []\n", + " for _id in ids:\n", + " if _id == 0:\n", + " tokens.append('\u003cPAD\u003e')\n", + " elif _id == 1:\n", + " tokens.append('\u003cEOS\u003e')\n", + " elif _id == -1:\n", + " tokens.append('\u003cNULL\u003e')\n", + " else:\n", + " tokens.append(subtokenizer._subtoken_id_to_subtoken_string(_id))\n", + " return tokens" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "wfF8_cW-OXPN" + }, + "outputs": [], + "source": [ + "def call_html():\n", + " import IPython\n", + " display(IPython.core.display.HTML('''\n", + " \u003cscript src=\"/static/components/requirejs/require.js\"\u003e\u003c/script\u003e\n", + " \u003cscript\u003e\n", + " requirejs.config({\n", + " paths: {\n", + " base: '/static/base',\n", + " \"d3\": \"https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.8/d3.min/",\n", + " jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min',\n", + " },\n", + " });\n", + " \u003c/script\u003e\n", + " '''))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "T7UJzFf6fmhp" + }, + "source": [ + "## Display Attention" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 2006, + "resources": { + "http://localhost:8080/static/components/requirejs/require.js": { + "data": "/** vim: et:ts=4:sw=4:sts=4
 * @license RequireJS 2.1.22 Copyright (c) 2010-2015, The Dojo Foundation All Rights Reserved.
 * Available via the MIT or new BSD license.
 * see: http://github.com/jrburke/requirejs for details
 */
//Not using strict: uneven strict support in browsers, #392, and causes
//problems with requirejs.exec()/transpiler plugins that may not be strict.
/*jslint regexp: true, nomen: true, sloppy: true */
/*global window, navigator, document, importScripts, setTimeout, opera */

var requirejs, require, define;
(function (global) {
    var req, s, head, baseElement, dataMain, src,
        interactiveScript, currentlyAddingScript, mainScript, subPath,
        version = '2.1.22',
        commentRegExp = /(\/\*([\s\S]*?)\*\/|([^:]|^)\/\/(.*)$)/mg,
        cjsRequireRegExp = /[^.]\s*require\s*\(\s*["']([^'"\s]+)["']\s*\)/g,
        jsSuffixRegExp = /\.js$/,
        currDirRegExp = /^\.\//,
        op = Object.prototype,
        ostring = op.toString,
        hasOwn = op.hasOwnProperty,
        ap = Array.prototype,
        isBrowser = !!(typeof window !== 'undefined' && typeof navigator !== 'undefined' && window.document),
        isWebWorker = !isBrowser && typeof importScripts !== 'undefined',
        //PS3 indicates loaded and complete, but need to wait for complete
        //specifically. Sequence is 'loading', 'loaded', execution,
        // then 'complete'. The UA check is unfortunate, but not sure how
        //to feature test w/o causing perf issues.
        readyRegExp = isBrowser && navigator.platform === 'PLAYSTATION 3' ?
                      /^complete$/ : /^(complete|loaded)$/,
        defContextName = '_',
        //Oh the tragedy, detecting opera. See the usage of isOpera for reason.
        isOpera = typeof opera !== 'undefined' && opera.toString() === '[object Opera]',
        contexts = {},
        cfg = {},
        globalDefQueue = [],
        useInteractive = false;

    function isFunction(it) {
        return ostring.call(it) === '[object Function]';
    }

    function isArray(it) {
        return ostring.call(it) === '[object Array]';
    }

    /**
     * Helper function for iterating over an array. If the func returns
     * a true value, it will break out of the loop.
     */
    function each(ary, func) {
        if (ary) {
            var i;
            for (i = 0; i < ary.length; i += 1) {
                if (ary[i] && func(ary[i], i, ary)) {
                    break;
                }
            }
        }
    }

    /**
     * Helper function for iterating over an array backwards. If the func
     * returns a true value, it will break out of the loop.
     */
    function eachReverse(ary, func) {
        if (ary) {
            var i;
            for (i = ary.length - 1; i > -1; i -= 1) {
                if (ary[i] && func(ary[i], i, ary)) {
                    break;
                }
            }
        }
    }

    function hasProp(obj, prop) {
        return hasOwn.call(obj, prop);
    }

    function getOwn(obj, prop) {
        return hasProp(obj, prop) && obj[prop];
    }

    /**
     * Cycles over properties in an object and calls a function for each
     * property value. If the function returns a truthy value, then the
     * iteration is stopped.
     */
    function eachProp(obj, func) {
        var prop;
        for (prop in obj) {
            if (hasProp(obj, prop)) {
                if (func(obj[prop], prop)) {
                    break;
                }
            }
        }
    }

    /**
     * Simple function to mix in properties from source into target,
     * but only if target does not already have a property of the same name.
     */
    function mixin(target, source, force, deepStringMixin) {
        if (source) {
            eachProp(source, function (value, prop) {
                if (force || !hasProp(target, prop)) {
                    if (deepStringMixin && typeof value === 'object' && value &&
                        !isArray(value) && !isFunction(value) &&
                        !(value instanceof RegExp)) {

                        if (!target[prop]) {
                            target[prop] = {};
                        }
                        mixin(target[prop], value, force, deepStringMixin);
                    } else {
                        target[prop] = value;
                    }
                }
            });
        }
        return target;
    }

    //Similar to Function.prototype.bind, but the 'this' object is specified
    //first, since it is easier to read/figure out what 'this' will be.
    function bind(obj, fn) {
        return function () {
            return fn.apply(obj, arguments);
        };
    }

    function scripts() {
        return document.getElementsByTagName('script');
    }

    function defaultOnError(err) {
        throw err;
    }

    //Allow getting a global that is expressed in
    //dot notation, like 'a.b.c'.
    function getGlobal(value) {
        if (!value) {
            return value;
        }
        var g = global;
        each(value.split('.'), function (part) {
            g = g[part];
        });
        return g;
    }

    /**
     * Constructs an error with a pointer to an URL with more information.
     * @param {String} id the error ID that maps to an ID on a web page.
     * @param {String} message human readable error.
     * @param {Error} [err] the original error, if there is one.
     *
     * @returns {Error}
     */
    function makeError(id, msg, err, requireModules) {
        var e = new Error(msg + '\nhttp://requirejs.org/docs/errors.html#' + id);
        e.requireType = id;
        e.requireModules = requireModules;
        if (err) {
            e.originalError = err;
        }
        return e;
    }

    if (typeof define !== 'undefined') {
        //If a define is already in play via another AMD loader,
        //do not overwrite.
        return;
    }

    if (typeof requirejs !== 'undefined') {
        if (isFunction(requirejs)) {
            //Do not overwrite an existing requirejs instance.
            return;
        }
        cfg = requirejs;
        requirejs = undefined;
    }

    //Allow for a require config object
    if (typeof require !== 'undefined' && !isFunction(require)) {
        //assume it is a config object.
        cfg = require;
        require = undefined;
    }

    function newContext(contextName) {
        var inCheckLoaded, Module, context, handlers,
            checkLoadedTimeoutId,
            config = {
                //Defaults. Do not set a default for map
                //config to speed up normalize(), which
                //will run faster if there is no default.
                waitSeconds: 7,
                baseUrl: './',
                paths: {},
                bundles: {},
                pkgs: {},
                shim: {},
                config: {}
            },
            registry = {},
            //registry of just enabled modules, to speed
            //cycle breaking code when lots of modules
            //are registered, but not activated.
            enabledRegistry = {},
            undefEvents = {},
            defQueue = [],
            defined = {},
            urlFetched = {},
            bundlesMap = {},
            requireCounter = 1,
            unnormalizedCounter = 1;

        /**
         * Trims the . and .. from an array of path segments.
         * It will keep a leading path segment if a .. will become
         * the first path segment, to help with module name lookups,
         * which act like paths, but can be remapped. But the end result,
         * all paths that use this function should look normalized.
         * NOTE: this method MODIFIES the input array.
         * @param {Array} ary the array of path segments.
         */
        function trimDots(ary) {
            var i, part;
            for (i = 0; i < ary.length; i++) {
                part = ary[i];
                if (part === '.') {
                    ary.splice(i, 1);
                    i -= 1;
                } else if (part === '..') {
                    // If at the start, or previous value is still ..,
                    // keep them so that when converted to a path it may
                    // still work when converted to a path, even though
                    // as an ID it is less than ideal. In larger point
                    // releases, may be better to just kick out an error.
                    if (i === 0 || (i === 1 && ary[2] === '..') || ary[i - 1] === '..') {
                        continue;
                    } else if (i > 0) {
                        ary.splice(i - 1, 2);
                        i -= 2;
                    }
                }
            }
        }

        /**
         * Given a relative module name, like ./something, normalize it to
         * a real name that can be mapped to a path.
         * @param {String} name the relative name
         * @param {String} baseName a real name that the name arg is relative
         * to.
         * @param {Boolean} applyMap apply the map config to the value. Should
         * only be done if this normalization is for a dependency ID.
         * @returns {String} normalized name
         */
        function normalize(name, baseName, applyMap) {
            var pkgMain, mapValue, nameParts, i, j, nameSegment, lastIndex,
                foundMap, foundI, foundStarMap, starI, normalizedBaseParts,
                baseParts = (baseName && baseName.split('/')),
                map = config.map,
                starMap = map && map['*'];

            //Adjust any relative paths.
            if (name) {
                name = name.split('/');
                lastIndex = name.length - 1;

                // If wanting node ID compatibility, strip .js from end
                // of IDs. Have to do this here, and not in nameToUrl
                // because node allows either .js or non .js to map
                // to same file.
                if (config.nodeIdCompat && jsSuffixRegExp.test(name[lastIndex])) {
                    name[lastIndex] = name[lastIndex].replace(jsSuffixRegExp, '');
                }

                // Starts with a '.' so need the baseName
                if (name[0].charAt(0) === '.' && baseParts) {
                    //Convert baseName to array, and lop off the last part,
                    //so that . matches that 'directory' and not name of the baseName's
                    //module. For instance, baseName of 'one/two/three', maps to
                    //'one/two/three.js', but we want the directory, 'one/two' for
                    //this normalization.
                    normalizedBaseParts = baseParts.slice(0, baseParts.length - 1);
                    name = normalizedBaseParts.concat(name);
                }

                trimDots(name);
                name = name.join('/');
            }

            //Apply map config if available.
            if (applyMap && map && (baseParts || starMap)) {
                nameParts = name.split('/');

                outerLoop: for (i = nameParts.length; i > 0; i -= 1) {
                    nameSegment = nameParts.slice(0, i).join('/');

                    if (baseParts) {
                        //Find the longest baseName segment match in the config.
                        //So, do joins on the biggest to smallest lengths of baseParts.
                        for (j = baseParts.length; j > 0; j -= 1) {
                            mapValue = getOwn(map, baseParts.slice(0, j).join('/'));

                            //baseName segment has config, find if it has one for
                            //this name.
                            if (mapValue) {
                                mapValue = getOwn(mapValue, nameSegment);
                                if (mapValue) {
                                    //Match, update name to the new value.
                                    foundMap = mapValue;
                                    foundI = i;
                                    break outerLoop;
                                }
                            }
                        }
                    }

                    //Check for a star map match, but just hold on to it,
                    //if there is a shorter segment match later in a matching
                    //config, then favor over this star map.
                    if (!foundStarMap && starMap && getOwn(starMap, nameSegment)) {
                        foundStarMap = getOwn(starMap, nameSegment);
                        starI = i;
                    }
                }

                if (!foundMap && foundStarMap) {
                    foundMap = foundStarMap;
                    foundI = starI;
                }

                if (foundMap) {
                    nameParts.splice(0, foundI, foundMap);
                    name = nameParts.join('/');
                }
            }

            // If the name points to a package's name, use
            // the package main instead.
            pkgMain = getOwn(config.pkgs, name);

            return pkgMain ? pkgMain : name;
        }

        function removeScript(name) {
            if (isBrowser) {
                each(scripts(), function (scriptNode) {
                    if (scriptNode.getAttribute('data-requiremodule') === name &&
                            scriptNode.getAttribute('data-requirecontext') === context.contextName) {
                        scriptNode.parentNode.removeChild(scriptNode);
                        return true;
                    }
                });
            }
        }

        function hasPathFallback(id) {
            var pathConfig = getOwn(config.paths, id);
            if (pathConfig && isArray(pathConfig) && pathConfig.length > 1) {
                //Pop off the first array value, since it failed, and
                //retry
                pathConfig.shift();
                context.require.undef(id);

                //Custom require that does not do map translation, since
                //ID is "absolute", already mapped/resolved.
                context.makeRequire(null, {
                    skipMap: true
                })([id]);

                return true;
            }
        }

        //Turns a plugin!resource to [plugin, resource]
        //with the plugin being undefined if the name
        //did not have a plugin prefix.
        function splitPrefix(name) {
            var prefix,
                index = name ? name.indexOf('!') : -1;
            if (index > -1) {
                prefix = name.substring(0, index);
                name = name.substring(index + 1, name.length);
            }
            return [prefix, name];
        }

        /**
         * Creates a module mapping that includes plugin prefix, module
         * name, and path. If parentModuleMap is provided it will
         * also normalize the name via require.normalize()
         *
         * @param {String} name the module name
         * @param {String} [parentModuleMap] parent module map
         * for the module name, used to resolve relative names.
         * @param {Boolean} isNormalized: is the ID already normalized.
         * This is true if this call is done for a define() module ID.
         * @param {Boolean} applyMap: apply the map config to the ID.
         * Should only be true if this map is for a dependency.
         *
         * @returns {Object}
         */
        function makeModuleMap(name, parentModuleMap, isNormalized, applyMap) {
            var url, pluginModule, suffix, nameParts,
                prefix = null,
                parentName = parentModuleMap ? parentModuleMap.name : null,
                originalName = name,
                isDefine = true,
                normalizedName = '';

            //If no name, then it means it is a require call, generate an
            //internal name.
            if (!name) {
                isDefine = false;
                name = '_@r' + (requireCounter += 1);
            }

            nameParts = splitPrefix(name);
            prefix = nameParts[0];
            name = nameParts[1];

            if (prefix) {
                prefix = normalize(prefix, parentName, applyMap);
                pluginModule = getOwn(defined, prefix);
            }

            //Account for relative paths if there is a base name.
            if (name) {
                if (prefix) {
                    if (pluginModule && pluginModule.normalize) {
                        //Plugin is loaded, use its normalize method.
                        normalizedName = pluginModule.normalize(name, function (name) {
                            return normalize(name, parentName, applyMap);
                        });
                    } else {
                        // If nested plugin references, then do not try to
                        // normalize, as it will not normalize correctly. This
                        // places a restriction on resourceIds, and the longer
                        // term solution is not to normalize until plugins are
                        // loaded and all normalizations to allow for async
                        // loading of a loader plugin. But for now, fixes the
                        // common uses. Details in #1131
                        normalizedName = name.indexOf('!') === -1 ?
                                         normalize(name, parentName, applyMap) :
                                         name;
                    }
                } else {
                    //A regular module.
                    normalizedName = normalize(name, parentName, applyMap);

                    //Normalized name may be a plugin ID due to map config
                    //application in normalize. The map config values must
                    //already be normalized, so do not need to redo that part.
                    nameParts = splitPrefix(normalizedName);
                    prefix = nameParts[0];
                    normalizedName = nameParts[1];
                    isNormalized = true;

                    url = context.nameToUrl(normalizedName);
                }
            }

            //If the id is a plugin id that cannot be determined if it needs
            //normalization, stamp it with a unique ID so two matching relative
            //ids that may conflict can be separate.
            suffix = prefix && !pluginModule && !isNormalized ?
                     '_unnormalized' + (unnormalizedCounter += 1) :
                     '';

            return {
                prefix: prefix,
                name: normalizedName,
                parentMap: parentModuleMap,
                unnormalized: !!suffix,
                url: url,
                originalName: originalName,
                isDefine: isDefine,
                id: (prefix ?
                        prefix + '!' + normalizedName :
                        normalizedName) + suffix
            };
        }

        function getModule(depMap) {
            var id = depMap.id,
                mod = getOwn(registry, id);

            if (!mod) {
                mod = registry[id] = new context.Module(depMap);
            }

            return mod;
        }

        function on(depMap, name, fn) {
            var id = depMap.id,
                mod = getOwn(registry, id);

            if (hasProp(defined, id) &&
                    (!mod || mod.defineEmitComplete)) {
                if (name === 'defined') {
                    fn(defined[id]);
                }
            } else {
                mod = getModule(depMap);
                if (mod.error && name === 'error') {
                    fn(mod.error);
                } else {
                    mod.on(name, fn);
                }
            }
        }

        function onError(err, errback) {
            var ids = err.requireModules,
                notified = false;

            if (errback) {
                errback(err);
            } else {
                each(ids, function (id) {
                    var mod = getOwn(registry, id);
                    if (mod) {
                        //Set error on module, so it skips timeout checks.
                        mod.error = err;
                        if (mod.events.error) {
                            notified = true;
                            mod.emit('error', err);
                        }
                    }
                });

                if (!notified) {
                    req.onError(err);
                }
            }
        }

        /**
         * Internal method to transfer globalQueue items to this context's
         * defQueue.
         */
        function takeGlobalQueue() {
            //Push all the globalDefQueue items into the context's defQueue
            if (globalDefQueue.length) {
                each(globalDefQueue, function(queueItem) {
                    var id = queueItem[0];
                    if (typeof id === 'string') {
                        context.defQueueMap[id] = true;
                    }
                    defQueue.push(queueItem);
                });
                globalDefQueue = [];
            }
        }

        handlers = {
            'require': function (mod) {
                if (mod.require) {
                    return mod.require;
                } else {
                    return (mod.require = context.makeRequire(mod.map));
                }
            },
            'exports': function (mod) {
                mod.usingExports = true;
                if (mod.map.isDefine) {
                    if (mod.exports) {
                        return (defined[mod.map.id] = mod.exports);
                    } else {
                        return (mod.exports = defined[mod.map.id] = {});
                    }
                }
            },
            'module': function (mod) {
                if (mod.module) {
                    return mod.module;
                } else {
                    return (mod.module = {
                        id: mod.map.id,
                        uri: mod.map.url,
                        config: function () {
                            return getOwn(config.config, mod.map.id) || {};
                        },
                        exports: mod.exports || (mod.exports = {})
                    });
                }
            }
        };

        function cleanRegistry(id) {
            //Clean up machinery used for waiting modules.
            delete registry[id];
            delete enabledRegistry[id];
        }

        function breakCycle(mod, traced, processed) {
            var id = mod.map.id;

            if (mod.error) {
                mod.emit('error', mod.error);
            } else {
                traced[id] = true;
                each(mod.depMaps, function (depMap, i) {
                    var depId = depMap.id,
                        dep = getOwn(registry, depId);

                    //Only force things that have not completed
                    //being defined, so still in the registry,
                    //and only if it has not been matched up
                    //in the module already.
                    if (dep && !mod.depMatched[i] && !processed[depId]) {
                        if (getOwn(traced, depId)) {
                            mod.defineDep(i, defined[depId]);
                            mod.check(); //pass false?
                        } else {
                            breakCycle(dep, traced, processed);
                        }
                    }
                });
                processed[id] = true;
            }
        }

        function checkLoaded() {
            var err, usingPathFallback,
                waitInterval = config.waitSeconds * 1000,
                //It is possible to disable the wait interval by using waitSeconds of 0.
                expired = waitInterval && (context.startTime + waitInterval) < new Date().getTime(),
                noLoads = [],
                reqCalls = [],
                stillLoading = false,
                needCycleCheck = true;

            //Do not bother if this call was a result of a cycle break.
            if (inCheckLoaded) {
                return;
            }

            inCheckLoaded = true;

            //Figure out the state of all the modules.
            eachProp(enabledRegistry, function (mod) {
                var map = mod.map,
                    modId = map.id;

                //Skip things that are not enabled or in error state.
                if (!mod.enabled) {
                    return;
                }

                if (!map.isDefine) {
                    reqCalls.push(mod);
                }

                if (!mod.error) {
                    //If the module should be executed, and it has not
                    //been inited and time is up, remember it.
                    if (!mod.inited && expired) {
                        if (hasPathFallback(modId)) {
                            usingPathFallback = true;
                            stillLoading = true;
                        } else {
                            noLoads.push(modId);
                            removeScript(modId);
                        }
                    } else if (!mod.inited && mod.fetched && map.isDefine) {
                        stillLoading = true;
                        if (!map.prefix) {
                            //No reason to keep looking for unfinished
                            //loading. If the only stillLoading is a
                            //plugin resource though, keep going,
                            //because it may be that a plugin resource
                            //is waiting on a non-plugin cycle.
                            return (needCycleCheck = false);
                        }
                    }
                }
            });

            if (expired && noLoads.length) {
                //If wait time expired, throw error of unloaded modules.
                err = makeError('timeout', 'Load timeout for modules: ' + noLoads, null, noLoads);
                err.contextName = context.contextName;
                return onError(err);
            }

            //Not expired, check for a cycle.
            if (needCycleCheck) {
                each(reqCalls, function (mod) {
                    breakCycle(mod, {}, {});
                });
            }

            //If still waiting on loads, and the waiting load is something
            //other than a plugin resource, or there are still outstanding
            //scripts, then just try back later.
            if ((!expired || usingPathFallback) && stillLoading) {
                //Something is still waiting to load. Wait for it, but only
                //if a timeout is not already in effect.
                if ((isBrowser || isWebWorker) && !checkLoadedTimeoutId) {
                    checkLoadedTimeoutId = setTimeout(function () {
                        checkLoadedTimeoutId = 0;
                        checkLoaded();
                    }, 50);
                }
            }

            inCheckLoaded = false;
        }

        Module = function (map) {
            this.events = getOwn(undefEvents, map.id) || {};
            this.map = map;
            this.shim = getOwn(config.shim, map.id);
            this.depExports = [];
            this.depMaps = [];
            this.depMatched = [];
            this.pluginMaps = {};
            this.depCount = 0;

            /* this.exports this.factory
               this.depMaps = [],
               this.enabled, this.fetched
            */
        };

        Module.prototype = {
            init: function (depMaps, factory, errback, options) {
                options = options || {};

                //Do not do more inits if already done. Can happen if there
                //are multiple define calls for the same module. That is not
                //a normal, common case, but it is also not unexpected.
                if (this.inited) {
                    return;
                }

                this.factory = factory;

                if (errback) {
                    //Register for errors on this module.
                    this.on('error', errback);
                } else if (this.events.error) {
                    //If no errback already, but there are error listeners
                    //on this module, set up an errback to pass to the deps.
                    errback = bind(this, function (err) {
                        this.emit('error', err);
                    });
                }

                //Do a copy of the dependency array, so that
                //source inputs are not modified. For example
                //"shim" deps are passed in here directly, and
                //doing a direct modification of the depMaps array
                //would affect that config.
                this.depMaps = depMaps && depMaps.slice(0);

                this.errback = errback;

                //Indicate this module has be initialized
                this.inited = true;

                this.ignore = options.ignore;

                //Could have option to init this module in enabled mode,
                //or could have been previously marked as enabled. However,
                //the dependencies are not known until init is called. So
                //if enabled previously, now trigger dependencies as enabled.
                if (options.enabled || this.enabled) {
                    //Enable this module and dependencies.
                    //Will call this.check()
                    this.enable();
                } else {
                    this.check();
                }
            },

            defineDep: function (i, depExports) {
                //Because of cycles, defined callback for a given
                //export can be called more than once.
                if (!this.depMatched[i]) {
                    this.depMatched[i] = true;
                    this.depCount -= 1;
                    this.depExports[i] = depExports;
                }
            },

            fetch: function () {
                if (this.fetched) {
                    return;
                }
                this.fetched = true;

                context.startTime = (new Date()).getTime();

                var map = this.map;

                //If the manager is for a plugin managed resource,
                //ask the plugin to load it now.
                if (this.shim) {
                    context.makeRequire(this.map, {
                        enableBuildCallback: true
                    })(this.shim.deps || [], bind(this, function () {
                        return map.prefix ? this.callPlugin() : this.load();
                    }));
                } else {
                    //Regular dependency.
                    return map.prefix ? this.callPlugin() : this.load();
                }
            },

            load: function () {
                var url = this.map.url;

                //Regular dependency.
                if (!urlFetched[url]) {
                    urlFetched[url] = true;
                    context.load(this.map.id, url);
                }
            },

            /**
             * Checks if the module is ready to define itself, and if so,
             * define it.
             */
            check: function () {
                if (!this.enabled || this.enabling) {
                    return;
                }

                var err, cjsModule,
                    id = this.map.id,
                    depExports = this.depExports,
                    exports = this.exports,
                    factory = this.factory;

                if (!this.inited) {
                    // Only fetch if not already in the defQueue.
                    if (!hasProp(context.defQueueMap, id)) {
                        this.fetch();
                    }
                } else if (this.error) {
                    this.emit('error', this.error);
                } else if (!this.defining) {
                    //The factory could trigger another require call
                    //that would result in checking this module to
                    //define itself again. If already in the process
                    //of doing that, skip this work.
                    this.defining = true;

                    if (this.depCount < 1 && !this.defined) {
                        if (isFunction(factory)) {
                            try {
                                exports = context.execCb(id, factory, depExports, exports);
                            } catch (e) {
                                err = e;
                            }

                            // Favor return value over exports. If node/cjs in play,
                            // then will not have a return value anyway. Favor
                            // module.exports assignment over exports object.
                            if (this.map.isDefine && exports === undefined) {
                                cjsModule = this.module;
                                if (cjsModule) {
                                    exports = cjsModule.exports;
                                } else if (this.usingExports) {
                                    //exports already set the defined value.
                                    exports = this.exports;
                                }
                            }

                            if (err) {
                                // If there is an error listener, favor passing
                                // to that instead of throwing an error. However,
                                // only do it for define()'d  modules. require
                                // errbacks should not be called for failures in
                                // their callbacks (#699). However if a global
                                // onError is set, use that.
                                if ((this.events.error && this.map.isDefine) ||
                                    req.onError !== defaultOnError) {
                                    err.requireMap = this.map;
                                    err.requireModules = this.map.isDefine ? [this.map.id] : null;
                                    err.requireType = this.map.isDefine ? 'define' : 'require';
                                    return onError((this.error = err));
                                } else if (typeof console !== 'undefined' &&
                                           console.error) {
                                    // Log the error for debugging. If promises could be
                                    // used, this would be different, but making do.
                                    console.error(err);
                                } else {
                                    // Do not want to completely lose the error. While this
                                    // will mess up processing and lead to similar results
                                    // as bug 1440, it at least surfaces the error.
                                    req.onError(err);
                                }
                            }
                        } else {
                            //Just a literal value
                            exports = factory;
                        }

                        this.exports = exports;

                        if (this.map.isDefine && !this.ignore) {
                            defined[id] = exports;

                            if (req.onResourceLoad) {
                                var resLoadMaps = [];
                                each(this.depMaps, function (depMap) {
                                    resLoadMaps.push(depMap.normalizedMap || depMap);
                                });
                                req.onResourceLoad(context, this.map, resLoadMaps);
                            }
                        }

                        //Clean up
                        cleanRegistry(id);

                        this.defined = true;
                    }

                    //Finished the define stage. Allow calling check again
                    //to allow define notifications below in the case of a
                    //cycle.
                    this.defining = false;

                    if (this.defined && !this.defineEmitted) {
                        this.defineEmitted = true;
                        this.emit('defined', this.exports);
                        this.defineEmitComplete = true;
                    }

                }
            },

            callPlugin: function () {
                var map = this.map,
                    id = map.id,
                    //Map already normalized the prefix.
                    pluginMap = makeModuleMap(map.prefix);

                //Mark this as a dependency for this plugin, so it
                //can be traced for cycles.
                this.depMaps.push(pluginMap);

                on(pluginMap, 'defined', bind(this, function (plugin) {
                    var load, normalizedMap, normalizedMod,
                        bundleId = getOwn(bundlesMap, this.map.id),
                        name = this.map.name,
                        parentName = this.map.parentMap ? this.map.parentMap.name : null,
                        localRequire = context.makeRequire(map.parentMap, {
                            enableBuildCallback: true
                        });

                    //If current map is not normalized, wait for that
                    //normalized name to load instead of continuing.
                    if (this.map.unnormalized) {
                        //Normalize the ID if the plugin allows it.
                        if (plugin.normalize) {
                            name = plugin.normalize(name, function (name) {
                                return normalize(name, parentName, true);
                            }) || '';
                        }

                        //prefix and name should already be normalized, no need
                        //for applying map config again either.
                        normalizedMap = makeModuleMap(map.prefix + '!' + name,
                                                      this.map.parentMap);
                        on(normalizedMap,
                            'defined', bind(this, function (value) {
                                this.map.normalizedMap = normalizedMap;
                                this.init([], function () { return value; }, null, {
                                    enabled: true,
                                    ignore: true
                                });
                            }));

                        normalizedMod = getOwn(registry, normalizedMap.id);
                        if (normalizedMod) {
                            //Mark this as a dependency for this plugin, so it
                            //can be traced for cycles.
                            this.depMaps.push(normalizedMap);

                            if (this.events.error) {
                                normalizedMod.on('error', bind(this, function (err) {
                                    this.emit('error', err);
                                }));
                            }
                            normalizedMod.enable();
                        }

                        return;
                    }

                    //If a paths config, then just load that file instead to
                    //resolve the plugin, as it is built into that paths layer.
                    if (bundleId) {
                        this.map.url = context.nameToUrl(bundleId);
                        this.load();
                        return;
                    }

                    load = bind(this, function (value) {
                        this.init([], function () { return value; }, null, {
                            enabled: true
                        });
                    });

                    load.error = bind(this, function (err) {
                        this.inited = true;
                        this.error = err;
                        err.requireModules = [id];

                        //Remove temp unnormalized modules for this module,
                        //since they will never be resolved otherwise now.
                        eachProp(registry, function (mod) {
                            if (mod.map.id.indexOf(id + '_unnormalized') === 0) {
                                cleanRegistry(mod.map.id);
                            }
                        });

                        onError(err);
                    });

                    //Allow plugins to load other code without having to know the
                    //context or how to 'complete' the load.
                    load.fromText = bind(this, function (text, textAlt) {
                        /*jslint evil: true */
                        var moduleName = map.name,
                            moduleMap = makeModuleMap(moduleName),
                            hasInteractive = useInteractive;

                        //As of 2.1.0, support just passing the text, to reinforce
                        //fromText only being called once per resource. Still
                        //support old style of passing moduleName but discard
                        //that moduleName in favor of the internal ref.
                        if (textAlt) {
                            text = textAlt;
                        }

                        //Turn off interactive script matching for IE for any define
                        //calls in the text, then turn it back on at the end.
                        if (hasInteractive) {
                            useInteractive = false;
                        }

                        //Prime the system by creating a module instance for
                        //it.
                        getModule(moduleMap);

                        //Transfer any config to this other module.
                        if (hasProp(config.config, id)) {
                            config.config[moduleName] = config.config[id];
                        }

                        try {
                            req.exec(text);
                        } catch (e) {
                            return onError(makeError('fromtexteval',
                                             'fromText eval for ' + id +
                                            ' failed: ' + e,
                                             e,
                                             [id]));
                        }

                        if (hasInteractive) {
                            useInteractive = true;
                        }

                        //Mark this as a dependency for the plugin
                        //resource
                        this.depMaps.push(moduleMap);

                        //Support anonymous modules.
                        context.completeLoad(moduleName);

                        //Bind the value of that module to the value for this
                        //resource ID.
                        localRequire([moduleName], load);
                    });

                    //Use parentName here since the plugin's name is not reliable,
                    //could be some weird string with no path that actually wants to
                    //reference the parentName's path.
                    plugin.load(map.name, localRequire, load, config);
                }));

                context.enable(pluginMap, this);
                this.pluginMaps[pluginMap.id] = pluginMap;
            },

            enable: function () {
                enabledRegistry[this.map.id] = this;
                this.enabled = true;

                //Set flag mentioning that the module is enabling,
                //so that immediate calls to the defined callbacks
                //for dependencies do not trigger inadvertent load
                //with the depCount still being zero.
                this.enabling = true;

                //Enable each dependency
                each(this.depMaps, bind(this, function (depMap, i) {
                    var id, mod, handler;

                    if (typeof depMap === 'string') {
                        //Dependency needs to be converted to a depMap
                        //and wired up to this module.
                        depMap = makeModuleMap(depMap,
                                               (this.map.isDefine ? this.map : this.map.parentMap),
                                               false,
                                               !this.skipMap);
                        this.depMaps[i] = depMap;

                        handler = getOwn(handlers, depMap.id);

                        if (handler) {
                            this.depExports[i] = handler(this);
                            return;
                        }

                        this.depCount += 1;

                        on(depMap, 'defined', bind(this, function (depExports) {
                            if (this.undefed) {
                                return;
                            }
                            this.defineDep(i, depExports);
                            this.check();
                        }));

                        if (this.errback) {
                            on(depMap, 'error', bind(this, this.errback));
                        } else if (this.events.error) {
                            // No direct errback on this module, but something
                            // else is listening for errors, so be sure to
                            // propagate the error correctly.
                            on(depMap, 'error', bind(this, function(err) {
                                this.emit('error', err);
                            }));
                        }
                    }

                    id = depMap.id;
                    mod = registry[id];

                    //Skip special modules like 'require', 'exports', 'module'
                    //Also, don't call enable if it is already enabled,
                    //important in circular dependency cases.
                    if (!hasProp(handlers, id) && mod && !mod.enabled) {
                        context.enable(depMap, this);
                    }
                }));

                //Enable each plugin that is used in
                //a dependency
                eachProp(this.pluginMaps, bind(this, function (pluginMap) {
                    var mod = getOwn(registry, pluginMap.id);
                    if (mod && !mod.enabled) {
                        context.enable(pluginMap, this);
                    }
                }));

                this.enabling = false;

                this.check();
            },

            on: function (name, cb) {
                var cbs = this.events[name];
                if (!cbs) {
                    cbs = this.events[name] = [];
                }
                cbs.push(cb);
            },

            emit: function (name, evt) {
                each(this.events[name], function (cb) {
                    cb(evt);
                });
                if (name === 'error') {
                    //Now that the error handler was triggered, remove
                    //the listeners, since this broken Module instance
                    //can stay around for a while in the registry.
                    delete this.events[name];
                }
            }
        };

        function callGetModule(args) {
            //Skip modules already defined.
            if (!hasProp(defined, args[0])) {
                getModule(makeModuleMap(args[0], null, true)).init(args[1], args[2]);
            }
        }

        function removeListener(node, func, name, ieName) {
            //Favor detachEvent because of IE9
            //issue, see attachEvent/addEventListener comment elsewhere
            //in this file.
            if (node.detachEvent && !isOpera) {
                //Probably IE. If not it will throw an error, which will be
                //useful to know.
                if (ieName) {
                    node.detachEvent(ieName, func);
                }
            } else {
                node.removeEventListener(name, func, false);
            }
        }

        /**
         * Given an event from a script node, get the requirejs info from it,
         * and then removes the event listeners on the node.
         * @param {Event} evt
         * @returns {Object}
         */
        function getScriptData(evt) {
            //Using currentTarget instead of target for Firefox 2.0's sake. Not
            //all old browsers will be supported, but this one was easy enough
            //to support and still makes sense.
            var node = evt.currentTarget || evt.srcElement;

            //Remove the listeners once here.
            removeListener(node, context.onScriptLoad, 'load', 'onreadystatechange');
            removeListener(node, context.onScriptError, 'error');

            return {
                node: node,
                id: node && node.getAttribute('data-requiremodule')
            };
        }

        function intakeDefines() {
            var args;

            //Any defined modules in the global queue, intake them now.
            takeGlobalQueue();

            //Make sure any remaining defQueue items get properly processed.
            while (defQueue.length) {
                args = defQueue.shift();
                if (args[0] === null) {
                    return onError(makeError('mismatch', 'Mismatched anonymous define() module: ' +
                        args[args.length - 1]));
                } else {
                    //args are id, deps, factory. Should be normalized by the
                    //define() function.
                    callGetModule(args);
                }
            }
            context.defQueueMap = {};
        }

        context = {
            config: config,
            contextName: contextName,
            registry: registry,
            defined: defined,
            urlFetched: urlFetched,
            defQueue: defQueue,
            defQueueMap: {},
            Module: Module,
            makeModuleMap: makeModuleMap,
            nextTick: req.nextTick,
            onError: onError,

            /**
             * Set a configuration for the context.
             * @param {Object} cfg config object to integrate.
             */
            configure: function (cfg) {
                //Make sure the baseUrl ends in a slash.
                if (cfg.baseUrl) {
                    if (cfg.baseUrl.charAt(cfg.baseUrl.length - 1) !== '/') {
                        cfg.baseUrl += '/';
                    }
                }

                //Save off the paths since they require special processing,
                //they are additive.
                var shim = config.shim,
                    objs = {
                        paths: true,
                        bundles: true,
                        config: true,
                        map: true
                    };

                eachProp(cfg, function (value, prop) {
                    if (objs[prop]) {
                        if (!config[prop]) {
                            config[prop] = {};
                        }
                        mixin(config[prop], value, true, true);
                    } else {
                        config[prop] = value;
                    }
                });

                //Reverse map the bundles
                if (cfg.bundles) {
                    eachProp(cfg.bundles, function (value, prop) {
                        each(value, function (v) {
                            if (v !== prop) {
                                bundlesMap[v] = prop;
                            }
                        });
                    });
                }

                //Merge shim
                if (cfg.shim) {
                    eachProp(cfg.shim, function (value, id) {
                        //Normalize the structure
                        if (isArray(value)) {
                            value = {
                                deps: value
                            };
                        }
                        if ((value.exports || value.init) && !value.exportsFn) {
                            value.exportsFn = context.makeShimExports(value);
                        }
                        shim[id] = value;
                    });
                    config.shim = shim;
                }

                //Adjust packages if necessary.
                if (cfg.packages) {
                    each(cfg.packages, function (pkgObj) {
                        var location, name;

                        pkgObj = typeof pkgObj === 'string' ? {name: pkgObj} : pkgObj;

                        name = pkgObj.name;
                        location = pkgObj.location;
                        if (location) {
                            config.paths[name] = pkgObj.location;
                        }

                        //Save pointer to main module ID for pkg name.
                        //Remove leading dot in main, so main paths are normalized,
                        //and remove any trailing .js, since different package
                        //envs have different conventions: some use a module name,
                        //some use a file name.
                        config.pkgs[name] = pkgObj.name + '/' + (pkgObj.main || 'main')
                                     .replace(currDirRegExp, '')
                                     .replace(jsSuffixRegExp, '');
                    });
                }

                //If there are any "waiting to execute" modules in the registry,
                //update the maps for them, since their info, like URLs to load,
                //may have changed.
                eachProp(registry, function (mod, id) {
                    //If module already has init called, since it is too
                    //late to modify them, and ignore unnormalized ones
                    //since they are transient.
                    if (!mod.inited && !mod.map.unnormalized) {
                        mod.map = makeModuleMap(id, null, true);
                    }
                });

                //If a deps array or a config callback is specified, then call
                //require with those args. This is useful when require is defined as a
                //config object before require.js is loaded.
                if (cfg.deps || cfg.callback) {
                    context.require(cfg.deps || [], cfg.callback);
                }
            },

            makeShimExports: function (value) {
                function fn() {
                    var ret;
                    if (value.init) {
                        ret = value.init.apply(global, arguments);
                    }
                    return ret || (value.exports && getGlobal(value.exports));
                }
                return fn;
            },

            makeRequire: function (relMap, options) {
                options = options || {};

                function localRequire(deps, callback, errback) {
                    var id, map, requireMod;

                    if (options.enableBuildCallback && callback && isFunction(callback)) {
                        callback.__requireJsBuild = true;
                    }

                    if (typeof deps === 'string') {
                        if (isFunction(callback)) {
                            //Invalid call
                            return onError(makeError('requireargs', 'Invalid require call'), errback);
                        }

                        //If require|exports|module are requested, get the
                        //value for them from the special handlers. Caveat:
                        //this only works while module is being defined.
                        if (relMap && hasProp(handlers, deps)) {
                            return handlers[deps](registry[relMap.id]);
                        }

                        //Synchronous access to one module. If require.get is
                        //available (as in the Node adapter), prefer that.
                        if (req.get) {
                            return req.get(context, deps, relMap, localRequire);
                        }

                        //Normalize module name, if it contains . or ..
                        map = makeModuleMap(deps, relMap, false, true);
                        id = map.id;

                        if (!hasProp(defined, id)) {
                            return onError(makeError('notloaded', 'Module name "' +
                                        id +
                                        '" has not been loaded yet for context: ' +
                                        contextName +
                                        (relMap ? '' : '. Use require([])')));
                        }
                        return defined[id];
                    }

                    //Grab defines waiting in the global queue.
                    intakeDefines();

                    //Mark all the dependencies as needing to be loaded.
                    context.nextTick(function () {
                        //Some defines could have been added since the
                        //require call, collect them.
                        intakeDefines();

                        requireMod = getModule(makeModuleMap(null, relMap));

                        //Store if map config should be applied to this require
                        //call for dependencies.
                        requireMod.skipMap = options.skipMap;

                        requireMod.init(deps, callback, errback, {
                            enabled: true
                        });

                        checkLoaded();
                    });

                    return localRequire;
                }

                mixin(localRequire, {
                    isBrowser: isBrowser,

                    /**
                     * Converts a module name + .extension into an URL path.
                     * *Requires* the use of a module name. It does not support using
                     * plain URLs like nameToUrl.
                     */
                    toUrl: function (moduleNamePlusExt) {
                        var ext,
                            index = moduleNamePlusExt.lastIndexOf('.'),
                            segment = moduleNamePlusExt.split('/')[0],
                            isRelative = segment === '.' || segment === '..';

                        //Have a file extension alias, and it is not the
                        //dots from a relative path.
                        if (index !== -1 && (!isRelative || index > 1)) {
                            ext = moduleNamePlusExt.substring(index, moduleNamePlusExt.length);
                            moduleNamePlusExt = moduleNamePlusExt.substring(0, index);
                        }

                        return context.nameToUrl(normalize(moduleNamePlusExt,
                                                relMap && relMap.id, true), ext,  true);
                    },

                    defined: function (id) {
                        return hasProp(defined, makeModuleMap(id, relMap, false, true).id);
                    },

                    specified: function (id) {
                        id = makeModuleMap(id, relMap, false, true).id;
                        return hasProp(defined, id) || hasProp(registry, id);
                    }
                });

                //Only allow undef on top level require calls
                if (!relMap) {
                    localRequire.undef = function (id) {
                        //Bind any waiting define() calls to this context,
                        //fix for #408
                        takeGlobalQueue();

                        var map = makeModuleMap(id, relMap, true),
                            mod = getOwn(registry, id);

                        mod.undefed = true;
                        removeScript(id);

                        delete defined[id];
                        delete urlFetched[map.url];
                        delete undefEvents[id];

                        //Clean queued defines too. Go backwards
                        //in array so that the splices do not
                        //mess up the iteration.
                        eachReverse(defQueue, function(args, i) {
                            if (args[0] === id) {
                                defQueue.splice(i, 1);
                            }
                        });
                        delete context.defQueueMap[id];

                        if (mod) {
                            //Hold on to listeners in case the
                            //module will be attempted to be reloaded
                            //using a different config.
                            if (mod.events.defined) {
                                undefEvents[id] = mod.events;
                            }

                            cleanRegistry(id);
                        }
                    };
                }

                return localRequire;
            },

            /**
             * Called to enable a module if it is still in the registry
             * awaiting enablement. A second arg, parent, the parent module,
             * is passed in for context, when this method is overridden by
             * the optimizer. Not shown here to keep code compact.
             */
            enable: function (depMap) {
                var mod = getOwn(registry, depMap.id);
                if (mod) {
                    getModule(depMap).enable();
                }
            },

            /**
             * Internal method used by environment adapters to complete a load event.
             * A load event could be a script load or just a load pass from a synchronous
             * load call.
             * @param {String} moduleName the name of the module to potentially complete.
             */
            completeLoad: function (moduleName) {
                var found, args, mod,
                    shim = getOwn(config.shim, moduleName) || {},
                    shExports = shim.exports;

                takeGlobalQueue();

                while (defQueue.length) {
                    args = defQueue.shift();
                    if (args[0] === null) {
                        args[0] = moduleName;
                        //If already found an anonymous module and bound it
                        //to this name, then this is some other anon module
                        //waiting for its completeLoad to fire.
                        if (found) {
                            break;
                        }
                        found = true;
                    } else if (args[0] === moduleName) {
                        //Found matching define call for this script!
                        found = true;
                    }

                    callGetModule(args);
                }
                context.defQueueMap = {};

                //Do this after the cycle of callGetModule in case the result
                //of those calls/init calls changes the registry.
                mod = getOwn(registry, moduleName);

                if (!found && !hasProp(defined, moduleName) && mod && !mod.inited) {
                    if (config.enforceDefine && (!shExports || !getGlobal(shExports))) {
                        if (hasPathFallback(moduleName)) {
                            return;
                        } else {
                            return onError(makeError('nodefine',
                                             'No define call for ' + moduleName,
                                             null,
                                             [moduleName]));
                        }
                    } else {
                        //A script that does not call define(), so just simulate
                        //the call for it.
                        callGetModule([moduleName, (shim.deps || []), shim.exportsFn]);
                    }
                }

                checkLoaded();
            },

            /**
             * Converts a module name to a file path. Supports cases where
             * moduleName may actually be just an URL.
             * Note that it **does not** call normalize on the moduleName,
             * it is assumed to have already been normalized. This is an
             * internal API, not a public one. Use toUrl for the public API.
             */
            nameToUrl: function (moduleName, ext, skipExt) {
                var paths, syms, i, parentModule, url,
                    parentPath, bundleId,
                    pkgMain = getOwn(config.pkgs, moduleName);

                if (pkgMain) {
                    moduleName = pkgMain;
                }

                bundleId = getOwn(bundlesMap, moduleName);

                if (bundleId) {
                    return context.nameToUrl(bundleId, ext, skipExt);
                }

                //If a colon is in the URL, it indicates a protocol is used and it is just
                //an URL to a file, or if it starts with a slash, contains a query arg (i.e. ?)
                //or ends with .js, then assume the user meant to use an url and not a module id.
                //The slash is important for protocol-less URLs as well as full paths.
                if (req.jsExtRegExp.test(moduleName)) {
                    //Just a plain path, not module name lookup, so just return it.
                    //Add extension if it is included. This is a bit wonky, only non-.js things pass
                    //an extension, this method probably needs to be reworked.
                    url = moduleName + (ext || '');
                } else {
                    //A module that needs to be converted to a path.
                    paths = config.paths;

                    syms = moduleName.split('/');
                    //For each module name segment, see if there is a path
                    //registered for it. Start with most specific name
                    //and work up from it.
                    for (i = syms.length; i > 0; i -= 1) {
                        parentModule = syms.slice(0, i).join('/');

                        parentPath = getOwn(paths, parentModule);
                        if (parentPath) {
                            //If an array, it means there are a few choices,
                            //Choose the one that is desired
                            if (isArray(parentPath)) {
                                parentPath = parentPath[0];
                            }
                            syms.splice(0, i, parentPath);
                            break;
                        }
                    }

                    //Join the path parts together, then figure out if baseUrl is needed.
                    url = syms.join('/');
                    url += (ext || (/^data\:|\?/.test(url) || skipExt ? '' : '.js'));
                    url = (url.charAt(0) === '/' || url.match(/^[\w\+\.\-]+:/) ? '' : config.baseUrl) + url;
                }

                return config.urlArgs ? url +
                                        ((url.indexOf('?') === -1 ? '?' : '&') +
                                         config.urlArgs) : url;
            },

            //Delegates to req.load. Broken out as a separate function to
            //allow overriding in the optimizer.
            load: function (id, url) {
                req.load(context, id, url);
            },

            /**
             * Executes a module callback function. Broken out as a separate function
             * solely to allow the build system to sequence the files in the built
             * layer in the right sequence.
             *
             * @private
             */
            execCb: function (name, callback, args, exports) {
                return callback.apply(exports, args);
            },

            /**
             * callback for script loads, used to check status of loading.
             *
             * @param {Event} evt the event from the browser for the script
             * that was loaded.
             */
            onScriptLoad: function (evt) {
                //Using currentTarget instead of target for Firefox 2.0's sake. Not
                //all old browsers will be supported, but this one was easy enough
                //to support and still makes sense.
                if (evt.type === 'load' ||
                        (readyRegExp.test((evt.currentTarget || evt.srcElement).readyState))) {
                    //Reset interactive script so a script node is not held onto for
                    //to long.
                    interactiveScript = null;

                    //Pull out the name of the module and the context.
                    var data = getScriptData(evt);
                    context.completeLoad(data.id);
                }
            },

            /**
             * Callback for script errors.
             */
            onScriptError: function (evt) {
                var data = getScriptData(evt);
                if (!hasPathFallback(data.id)) {
                    var parents = [];
                    eachProp(registry, function(value, key) {
                        if (key.indexOf('_@r') !== 0) {
                            each(value.depMaps, function(depMap) {
                                if (depMap.id === data.id) {
                                    parents.push(key);
                                }
                                return true;
                            });
                        }
                    });
                    return onError(makeError('scripterror', 'Script error for "' + data.id +
                                             (parents.length ?
                                             '", needed by: ' + parents.join(', ') :
                                             '"'), evt, [data.id]));
                }
            }
        };

        context.require = context.makeRequire();
        return context;
    }

    /**
     * Main entry point.
     *
     * If the only argument to require is a string, then the module that
     * is represented by that string is fetched for the appropriate context.
     *
     * If the first argument is an array, then it will be treated as an array
     * of dependency string names to fetch. An optional function callback can
     * be specified to execute when all of those dependencies are available.
     *
     * Make a local req variable to help Caja compliance (it assumes things
     * on a require that are not standardized), and to give a short
     * name for minification/local scope use.
     */
    req = requirejs = function (deps, callback, errback, optional) {

        //Find the right context, use default
        var context, config,
            contextName = defContextName;

        // Determine if have config object in the call.
        if (!isArray(deps) && typeof deps !== 'string') {
            // deps is a config object
            config = deps;
            if (isArray(callback)) {
                // Adjust args if there are dependencies
                deps = callback;
                callback = errback;
                errback = optional;
            } else {
                deps = [];
            }
        }

        if (config && config.context) {
            contextName = config.context;
        }

        context = getOwn(contexts, contextName);
        if (!context) {
            context = contexts[contextName] = req.s.newContext(contextName);
        }

        if (config) {
            context.configure(config);
        }

        return context.require(deps, callback, errback);
    };

    /**
     * Support require.config() to make it easier to cooperate with other
     * AMD loaders on globally agreed names.
     */
    req.config = function (config) {
        return req(config);
    };

    /**
     * Execute something after the current tick
     * of the event loop. Override for other envs
     * that have a better solution than setTimeout.
     * @param  {Function} fn function to execute later.
     */
    req.nextTick = typeof setTimeout !== 'undefined' ? function (fn) {
        setTimeout(fn, 4);
    } : function (fn) { fn(); };

    /**
     * Export require as a global, but only if it does not already exist.
     */
    if (!require) {
        require = req;
    }

    req.version = version;

    //Used to filter out dependencies that are already paths.
    req.jsExtRegExp = /^\/|:|\?|\.js$/;
    req.isBrowser = isBrowser;
    s = req.s = {
        contexts: contexts,
        newContext: newContext
    };

    //Create default context.
    req({});

    //Exports some context-sensitive methods on global require.
    each([
        'toUrl',
        'undef',
        'defined',
        'specified'
    ], function (prop) {
        //Reference from contexts instead of early binding to default context,
        //so that during builds, the latest instance of the default context
        //with its config gets used.
        req[prop] = function () {
            var ctx = contexts[defContextName];
            return ctx.require[prop].apply(ctx, arguments);
        };
    });

    if (isBrowser) {
        head = s.head = document.getElementsByTagName('head')[0];
        //If BASE tag is in play, using appendChild is a problem for IE6.
        //When that browser dies, this can be removed. Details in this jQuery bug:
        //http://dev.jquery.com/ticket/2709
        baseElement = document.getElementsByTagName('base')[0];
        if (baseElement) {
            head = s.head = baseElement.parentNode;
        }
    }

    /**
     * Any errors that require explicitly generates will be passed to this
     * function. Intercept/override it if you want custom error handling.
     * @param {Error} err the error object.
     */
    req.onError = defaultOnError;

    /**
     * Creates the node for the load command. Only used in browser envs.
     */
    req.createNode = function (config, moduleName, url) {
        var node = config.xhtml ?
                document.createElementNS('http://www.w3.org/1999/xhtml', 'html:script') :
                document.createElement('script');
        node.type = config.scriptType || 'text/javascript';
        node.charset = 'utf-8';
        node.async = true;
        return node;
    };

    /**
     * Does the request to load a module for the browser case.
     * Make this a separate function to allow other environments
     * to override it.
     *
     * @param {Object} context the require context to find state.
     * @param {String} moduleName the name of the module.
     * @param {Object} url the URL to the module.
     */
    req.load = function (context, moduleName, url) {
        var config = (context && context.config) || {},
            node;
        if (isBrowser) {
            //In the browser so use a script tag
            node = req.createNode(config, moduleName, url);
            if (config.onNodeCreated) {
                config.onNodeCreated(node, config, moduleName, url);
            }

            node.setAttribute('data-requirecontext', context.contextName);
            node.setAttribute('data-requiremodule', moduleName);

            //Set up load listener. Test attachEvent first because IE9 has
            //a subtle issue in its addEventListener and script onload firings
            //that do not match the behavior of all other browsers with
            //addEventListener support, which fire the onload event for a
            //script right after the script execution. See:
            //https://connect.microsoft.com/IE/feedback/details/648057/script-onload-event-is-not-fired-immediately-after-script-execution
            //UNFORTUNATELY Opera implements attachEvent but does not follow the script
            //script execution mode.
            if (node.attachEvent &&
                    //Check if node.attachEvent is artificially added by custom script or
                    //natively supported by browser
                    //read https://github.com/jrburke/requirejs/issues/187
                    //if we can NOT find [native code] then it must NOT natively supported.
                    //in IE8, node.attachEvent does not have toString()
                    //Note the test for "[native code" with no closing brace, see:
                    //https://github.com/jrburke/requirejs/issues/273
                    !(node.attachEvent.toString && node.attachEvent.toString().indexOf('[native code') < 0) &&
                    !isOpera) {
                //Probably IE. IE (at least 6-8) do not fire
                //script onload right after executing the script, so
                //we cannot tie the anonymous define call to a name.
                //However, IE reports the script as being in 'interactive'
                //readyState at the time of the define call.
                useInteractive = true;

                node.attachEvent('onreadystatechange', context.onScriptLoad);
                //It would be great to add an error handler here to catch
                //404s in IE9+. However, onreadystatechange will fire before
                //the error handler, so that does not help. If addEventListener
                //is used, then IE will fire error before load, but we cannot
                //use that pathway given the connect.microsoft.com issue
                //mentioned above about not doing the 'script execute,
                //then fire the script load event listener before execute
                //next script' that other browsers do.
                //Best hope: IE10 fixes the issues,
                //and then destroys all installs of IE 6-9.
                //node.attachEvent('onerror', context.onScriptError);
            } else {
                node.addEventListener('load', context.onScriptLoad, false);
                node.addEventListener('error', context.onScriptError, false);
            }
            node.src = url;

            //For some cache cases in IE 6-8, the script executes before the end
            //of the appendChild execution, so to tie an anonymous define
            //call to the module name (which is stored on the node), hold on
            //to a reference to this node, but clear after the DOM insertion.
            currentlyAddingScript = node;
            if (baseElement) {
                head.insertBefore(node, baseElement);
            } else {
                head.appendChild(node);
            }
            currentlyAddingScript = null;

            return node;
        } else if (isWebWorker) {
            try {
                //In a web worker, use importScripts. This is not a very
                //efficient use of importScripts, importScripts will block until
                //its script is downloaded and evaluated. However, if web workers
                //are in play, the expectation is that a build has been done so
                //that only one script needs to be loaded anyway. This may need
                //to be reevaluated if other use cases become common.
                importScripts(url);

                //Account for anonymous modules
                context.completeLoad(moduleName);
            } catch (e) {
                context.onError(makeError('importscripts',
                                'importScripts failed for ' +
                                    moduleName + ' at ' + url,
                                e,
                                [moduleName]));
            }
        }
    };

    function getInteractiveScript() {
        if (interactiveScript && interactiveScript.readyState === 'interactive') {
            return interactiveScript;
        }

        eachReverse(scripts(), function (script) {
            if (script.readyState === 'interactive') {
                return (interactiveScript = script);
            }
        });
        return interactiveScript;
    }

    //Look for a data-main script attribute, which could also adjust the baseUrl.
    if (isBrowser && !cfg.skipDataMain) {
        //Figure out baseUrl. Get it from the script tag with require.js in it.
        eachReverse(scripts(), function (script) {
            //Set the 'head' where we can append children by
            //using the script's parent.
            if (!head) {
                head = script.parentNode;
            }

            //Look for a data-main attribute to set main script for the page
            //to load. If it is there, the path to data main becomes the
            //baseUrl, if it is not already set.
            dataMain = script.getAttribute('data-main');
            if (dataMain) {
                //Preserve dataMain in case it is a path (i.e. contains '?')
                mainScript = dataMain;

                //Set final baseUrl if there is not already an explicit one.
                if (!cfg.baseUrl) {
                    //Pull off the directory of data-main for use as the
                    //baseUrl.
                    src = mainScript.split('/');
                    mainScript = src.pop();
                    subPath = src.length ? src.join('/')  + '/' : './';

                    cfg.baseUrl = subPath;
                }

                //Strip off any trailing .js since mainScript is now
                //like a module name.
                mainScript = mainScript.replace(jsSuffixRegExp, '');

                //If mainScript is still a path, fall back to dataMain
                if (req.jsExtRegExp.test(mainScript)) {
                    mainScript = dataMain;
                }

                //Put the data-main script in the files to load.
                cfg.deps = cfg.deps ? cfg.deps.concat(mainScript) : [mainScript];

                return true;
            }
        });
    }

    /**
     * The function that handles definitions of modules. Differs from
     * require() in that a string for the module should be the first argument,
     * and the function to execute after dependencies are loaded should
     * return a value to define the module corresponding to the first argument's
     * name.
     */
    define = function (name, deps, callback) {
        var node, context;

        //Allow for anonymous modules
        if (typeof name !== 'string') {
            //Adjust args appropriately
            callback = deps;
            deps = name;
            name = null;
        }

        //This module may not have dependencies
        if (!isArray(deps)) {
            callback = deps;
            deps = null;
        }

        //If no name, and callback is a function, then figure out if it a
        //CommonJS thing with dependencies.
        if (!deps && isFunction(callback)) {
            deps = [];
            //Remove comments from the callback string,
            //look for require calls, and pull them into the dependencies,
            //but only if there are function args.
            if (callback.length) {
                callback
                    .toString()
                    .replace(commentRegExp, '')
                    .replace(cjsRequireRegExp, function (match, dep) {
                        deps.push(dep);
                    });

                //May be a CommonJS thing even without require calls, but still
                //could use exports, and module. Avoid doing exports and module
                //work though if it just needs require.
                //REQUIRES the function to expect the CommonJS variables in the
                //order listed below.
                deps = (callback.length === 1 ? ['require'] : ['require', 'exports', 'module']).concat(deps);
            }
        }

        //If in IE 6-8 and hit an anonymous define() call, do the interactive
        //work.
        if (useInteractive) {
            node = currentlyAddingScript || getInteractiveScript();
            if (node) {
                if (!name) {
                    name = node.getAttribute('data-requiremodule');
                }
                context = contexts[node.getAttribute('data-requirecontext')];
            }
        }

        //Always save off evaluating the def call until the script onload handler.
        //This allows multiple modules to be in a file without prematurely
        //tracing dependencies, and allows for anonymous module support,
        //where the module name is not known until the script onload event
        //occurs. If no context, use the global queue, and get it processed
        //in the onscript load callback.
        if (context) {
            context.defQueue.push([name, deps, callback]);
            context.defQueueMap[name] = true;
        } else {
            globalDefQueue.push([name, deps, callback]);
        }
    };

    define.amd = {
        jQuery: true
    };

    /**
     * Executes the text. Normally just uses eval, but can be modified
     * to use a better, environment-specific call. Only used for transpiling
     * loader plugins, not for plain JS modules.
     * @param {String} text the text to execute/evaluate.
     */
    req.exec = function (text) {
        /*jslint evil: true */
        return eval(text);
    };

    //Set up with config info.
    req(cfg);
}(this));
", + "headers": [ + [ + "content-type", + "text/javascript" + ] + ], + "ok": true, + "status": 200, + "status_text": "" + } + } + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 4242, + "status": "ok", + "timestamp": 1512371597785, + "user": { + "displayName": "Lukasz Kaiser", + "photoUrl": "//lh3.googleusercontent.com/-CbWIwcQ_VsA/AAAAAAAAAAI/AAAAAAAAAB8/jloHVR1qOhg/s50-c-k-no/photo.jpg", + "userId": "109750154298538986950" + }, + "user_tz": 480 + }, + "id": "OJKU36QAfqOC", + "outputId": "0b3f497f-040f-41ef-8a32-70b4adf7d7d0" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WARNING:tensorflow:From /usr/local/lib/python2.7/dist-packages/tensor2tensor/layers/common_layers.py:1671: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "\n", + "Future major versions of TensorFlow will allow gradients to flow\n", + "into the labels input on backprop by default.\n", + "\n", + "See tf.nn.softmax_cross_entropy_with_logits_v2.\n", + "\n" + ] + }, + { + "data": { + "text/html": [ + "\n", + " \u003cscript src=\"/static/components/requirejs/require.js\"\u003e\u003c/script\u003e\n", + " \u003cscript\u003e\n", + " requirejs.config({\n", + " paths: {\n", + " base: '/static/base',\n", + " \"d3\": \"https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.8/d3.min/",\n", + " jquery: '//ajax.googleapis.com/ajax/libs/jquery/2.0.0/jquery.min',\n", + " },\n", + " });\n", + " \u003c/script\u003e\n", + " " + ], + "text/plain": [ + "\u003cIPython.core.display.HTML object\u003e" + ] + }, + "metadata": { + "tags": [] + }, + "output_type": "display_data" + }, + { + "data": { + "text/html": [ + "\n", + " \u003cspan style=\"user-select:none\"\u003e\n", + " Layer: \u003cselect id=\"layer\"\u003e\u003c/select\u003e\n", + " Attention: \u003cselect id=\"att_type\"\u003e\n", + " \u003coption value=\"all\"\u003eAll\u003c/option\u003e\n", + " \u003coption value=\"inp_inp\"\u003eInput - Input\u003c/option\u003e\n", + " \u003coption value=\"inp_out\"\u003eInput - Output\u003c/option\u003e\n", + " \u003coption value=\"out_out\"\u003eOutput - Output\u003c/option\u003e\n", + " \u003c/select\u003e\n", + " \u003c/span\u003e\n", + " \u003cdiv id='vis'\u003e\u003c/div\u003e\n" + ], + "text/plain": [ + "\u003cIPython.core.display.HTML object\u003e" + ] + }, + "metadata": { + "tags": [] + }, + "output_type": "display_data" + }, + { + "data": { + "application/javascript": [ + "window.attention = {\"inp_out\": {\"top_text\": [\"The_\", \"animal_\", \"didn_\", \"'_\", \"t_\", \"cross_\", \"the_\", \"street_\", \"because_\", \"it_\", \"was_\", \"too_\", \"tire\", \"d_\"], \"att\": [[[[0.01107952743768692, 0.002038179198279977, 0.02572617679834366, 0.043437324464321136, 0.026865433901548386, 0.008821134455502033, 0.05896050110459328, 0.006038360297679901, 0.05802087485790253, 0.05262080207467079, 0.021981995552778244, 0.01655607670545578, 0.007265332620590925, 0.017941446974873543, 0.19668635725975037], [0.4201550781726837, 0.0003083523770328611, 0.003427971852943301, 0.027074502781033516, 0.0025770263746380806, 0.0006525526405312121, 0.0672224909067154, 0.0006329934694804251, 0.002376251621171832, 0.007315297145396471, 0.0018543159822002053, 0.0002170451043639332, 5.486799182108371e-06, 8.465739665552974e-05, 0.018722370266914368], [6.826388562330976e-05, 0.41254693269729614, 8.318798791151494e-05, 0.00021303755056578666, 2.6623651137924753e-05, 1.3030116861045826e-06, 3.3524677292007254e-06, 9.95700816019962e-07, 0.00025696202646940947, 0.00021154701244086027, 4.0387480112258345e-05, 7.382633339148015e-05, 0.0001871670683613047, 0.0001393109851051122, 0.00044668230111710727], [0.0012913167010992765, 0.46178945899009705, 0.0011929792817682028, 0.0014885100536048412, 0.001382660586386919, 0.00010778238356579095, 4.841455302084796e-05, 4.8626650823280215e-05, 0.0007912410655990243, 0.0019299217965453863, 0.0002972490037791431, 0.0004315593687351793, 0.013707359321415424, 0.0025058358442038298, 0.00208207662217319], [0.0008573953527957201, 5.803010481031379e-06, 0.0034995940513908863, 0.007113253697752953, 4.1040249925572425e-05, 0.48505696654319763, 0.0009781911503523588, 2.57480514846975e-05, 0.0006811833591200411, 0.011991027742624283, 0.013829604722559452, 0.02649468183517456, 0.018967876210808754, 0.008940043859183788, 0.0023627132177352905], [3.2793446735013276e-05, 4.91645641886862e-06, 0.0003670089063234627, 0.0005689052632078528, 0.0004337447171565145, 0.6979628205299377, 0.00025133590679615736, 1.3211038094596006e-05, 0.001040837960317731, 0.0008422345272265375, 0.00011131400242447853, 0.0007033413276076317, 0.00044049491407349706, 0.0004404923238325864, 0.00032976132933981717], [0.002877118531614542, 0.0015123215271160007, 0.21683953702449799, 0.042356427758932114, 0.09360139071941376, 0.7325531840324402, 0.007687804754823446, 0.0004983373219147325, 0.0008397439960390329, 0.018263472244143486, 0.01633409783244133, 0.06572946161031723, 0.029279880225658417, 0.13710656762123108, 0.013406738638877869], [0.09384340792894363, 0.002295592101290822, 0.05245966836810112, 0.10398446023464203, 0.13232196867465973, 0.2621823251247406, 0.7299563884735107, 0.01621837355196476, 0.008298774249851704, 0.019108427688479424, 0.013038183562457561, 0.008606976829469204, 0.0014156820252537727, 0.008462491445243359, 0.08448491245508194], [7.994164479896426e-05, 9.660106115916278e-06, 1.3390360436460469e-05, 0.0009496311540715396, 7.498388185922522e-06, 0.0023292596451938152, 0.0033705621026456356, 0.45610299706459045, 0.00048403104301542044, 0.0003956609289161861, 6.013430538587272e-05, 1.5610943592037074e-05, 4.899038231087616e-06, 1.0044974260381423e-05, 0.0011326958192512393], [0.0021254755556583405, 0.025354469195008278, 0.0505821667611599, 0.04718977212905884, 0.3544465899467468, 0.27984359860420227, 0.10468283295631409, 0.03827415779232979, 0.0065247067250311375, 0.003615353489294648, 0.001024437602609396, 0.02404061146080494, 0.00031744904117658734, 0.011979974806308746, 0.06911104917526245], [0.06793052703142166, 0.04423084855079651, 0.009074175730347633, 0.010606715455651283, 0.023761747404932976, 0.06765440851449966, 0.048715878278017044, 0.13498826324939728, 0.15846557915210724, 0.01835249364376068, 0.0033974519465118647, 0.011923078447580338, 0.0035463334061205387, 0.036997705698013306, 0.15195232629776], [0.00013637961819767952, 0.00010623007256072015, 0.00015417735266964883, 0.00014589299098588526, 0.0007127521676011384, 0.0008950252668000758, 0.00038585966103710234, 0.002901369472965598, 0.34460243582725525, 0.00040915730642154813, 0.00017379666678607464, 9.334777860203758e-05, 0.0002283527428517118, 0.0001650981866987422, 0.0021401161793619394], [0.03951041400432587, 0.015644539147615433, 0.002765331417322159, 0.020979223772883415, 0.001914863707497716, 0.049360573291778564, 0.010446744039654732, 0.06006397679448128, 0.18512527644634247, 0.5769777894020081, 0.07455664873123169, 0.016840822994709015, 0.21517987549304962, 0.030672460794448853, 0.04319411888718605], [0.0012064727488905191, 0.0013226938899606466, 0.002064700936898589, 0.008003294467926025, 0.002116014016792178, 0.0028530799318104982, 0.006337625440210104, 0.0002913604548666626, 0.0004794643900822848, 0.0026383439544588327, 0.0038926906418055296, 0.3737375736236572, 0.002772320294752717, 0.007620541378855705, 0.003997606225311756], [1.0432314411445986e-05, 4.745730166177964e-06, 1.672162215982098e-05, 2.360623693675734e-05, 4.496370820561424e-06, 1.767691173881758e-06, 4.21794857174973e-06, 1.7029789205480483e-06, 2.8430429665604606e-05, 7.409282261505723e-05, 0.00010478614422027022, 0.00017224416660610586, 0.480630487203598, 0.017292670905590057, 3.8113743357826024e-05], [0.00031966043752618134, 7.799067680025473e-05, 0.0005293181748129427, 0.0002383182873018086, 6.09634407737758e-05, 1.622732997930143e-05, 0.0001254813396371901, 4.548055585473776e-05, 0.0002202334435423836, 0.0014038329245522618, 0.008373874239623547, 0.0005300238262861967, 0.8584288358688354, 0.0721927285194397, 0.0012385909212753177], [0.008336205966770649, 0.000929497298784554, 0.060522519052028656, 0.02858084999024868, 0.004865946713835001, 0.19429318606853485, 0.006222299765795469, 0.00020022530225105584, 0.03241097182035446, 0.2199898362159729, 0.40489089488983154, 0.12284909188747406, 0.04783688485622406, 0.16652296483516693, 0.03165041282773018], [0.06735408306121826, 0.02395833097398281, 0.022876637056469917, 0.059418935328722, 0.020556019619107246, 0.006657767109572887, 0.01686989888548851, 0.03750348463654518, 0.0929105281829834, 0.11066772043704987, 0.07383746653795242, 0.04306775704026222, 0.1764260083436966, 0.2488536387681961, 0.14264866709709167], [0.00023218609567265958, 9.724824485601857e-05, 0.00017837552877608687, 0.000249945733230561, 0.00043016509152948856, 0.0002728255931288004, 0.0002596308768261224, 0.0021448382176458836, 0.33870813250541687, 0.0012523159384727478, 0.0004828754754271358, 7.525486580561846e-05, 0.001232807757332921, 0.00022845527564641088, 0.0029908884316682816], [0.044313203543424606, 0.014693659730255604, 0.001713237608782947, 0.01787775754928589, 0.001054717693477869, 0.03111616149544716, 0.005932849366217852, 0.035437386482954025, 0.10908837616443634, 0.6214090585708618, 0.11623460799455643, 0.018710769712924957, 0.26884767413139343, 0.036007944494485855, 0.04555344209074974], [0.0014647350180894136, 0.0016486160457134247, 0.001705971430055797, 0.008203698322176933, 0.0011827786220237613, 0.001036314177326858, 0.004107706248760223, 0.00018337460642214864, 0.0005908485618419945, 0.004427316598594189, 0.0075510423630476, 0.37528446316719055, 0.0045065670274198055, 0.01084148045629263, 0.0047609396278858185], [1.1546462701517157e-05, 6.3197094277711585e-06, 1.3665205187862739e-05, 2.3049220544635318e-05, 3.1024922009237343e-06, 9.712728115118807e-07, 4.2468768697290216e-06, 1.4032799526830786e-06, 2.1501631636056118e-05, 0.00011254433775320649, 0.00014821428339928389, 0.00021640797785948962, 0.4815296530723572, 0.022970588877797127, 4.596232975018211e-05], [0.0004618540406227112, 0.00011890243331436068, 0.0008028792799450457, 0.0003817373653873801, 7.645944424439222e-05, 2.0059787857462652e-05, 0.00017321997438557446, 3.885024489136413e-05, 0.00016429855895694345, 0.0017073642229661345, 0.011983372271060944, 0.0008083870052359998, 0.8495219349861145, 0.07573292404413223, 0.0017974229995161295], [0.00848880223929882, 0.0010204557329416275, 0.06384890526533127, 0.030244439840316772, 0.004545390605926514, 0.2111765593290329, 0.007047791499644518, 0.00020413362653926015, 0.03285042569041252, 0.2096482813358307, 0.40160003304481506, 0.12425301223993301, 0.05433715134859085, 0.2013336718082428, 0.03489448130130768], [0.018106432631611824, 0.01663283444941044, 0.006966447923332453, 0.06288447231054306, 0.008926548063755035, 0.0005806194385513663, 0.004527462646365166, 0.00047311693197116256, 0.010450053960084915, 0.008817908354103565, 0.02498125471174717, 0.02475220151245594, 0.006219316273927689, 0.034688226878643036, 0.15510374307632446]], [[0.011485431343317032, 0.057214245200157166, 0.11445975303649902, 0.035292237997055054, 0.17235025763511658, 0.21079879999160767, 0.08683252334594727, 0.33144259452819824, 0.2781406342983246, 0.07864350080490112, 0.10017280280590057, 0.0828540250658989, 0.17722147703170776, 0.21101748943328857, 0.15805292129516602], [0.041519034653902054, 0.11474552005529404, 0.04909001290798187, 0.1299373209476471, 0.06295691430568695, 0.0239214189350605, 0.22038953006267548, 0.6809458136558533, 0.03295678645372391, 0.34942832589149475, 0.1847512274980545, 0.22206875681877136, 0.13646042346954346, 0.277276873588562, 0.1334262192249298], [0.0764331966638565, 0.004937899298965931, 0.049346037209033966, 0.05165911093354225, 0.051789041608572006, 0.11632981896400452, 0.3382570743560791, 0.21805666387081146, 0.5269062519073486, 0.05627245828509331, 0.1284114420413971, 0.3053610324859619, 0.058564696460962296, 0.14431920647621155, 0.19175130128860474], [0.08274618536233902, 0.009897814132273197, 0.07511309534311295, 0.03663979470729828, 0.16369661688804626, 0.04579350724816322, 0.04420214146375656, 0.06866282969713211, 0.17000554502010345, 0.09549596160650253, 0.07313749194145203, 0.06223462149500847, 0.11603321135044098, 0.07143211364746094, 0.2059532254934311], [0.41769060492515564, 0.07210511714220047, 0.40716952085494995, 0.22363832592964172, 0.48781970143318176, 0.015007800422608852, 0.4504202902317047, 0.4675638973712921, 0.24936619400978088, 0.5447031855583191, 0.4296078681945801, 0.07025930285453796, 0.1902965009212494, 0.3567025065422058, 0.12464861571788788], [0.3858333230018616, 0.06937354803085327, 0.5601253509521484, 0.30969470739364624, 0.36272186040878296, 0.005774383433163166, 0.16290897130966187, 0.16338182985782623, 0.1734752655029297, 0.10127251595258713, 0.6812319159507751, 0.35078492760658264, 0.26554787158966064, 0.3089393675327301, 0.12310608476400375], [0.047016799449920654, 0.04388514533638954, 0.010725832544267178, 0.029561294242739677, 0.04913409426808357, 0.007112162187695503, 0.045616600662469864, 0.09563170373439789, 0.021758677437901497, 0.05606407672166824, 0.023780539631843567, 0.2586848735809326, 0.1317795366048813, 0.13214319944381714, 0.18490085005760193], [0.024271933361887932, 0.10952932387590408, 0.01092300284653902, 0.005798409227281809, 0.03478696197271347, 0.015390553511679173, 0.005925341974943876, 0.04537563398480415, 0.00714160455390811, 0.005484140943735838, 0.00704369880259037, 0.04858299717307091, 0.06617175042629242, 0.13874217867851257, 0.17208275198936462], [0.1448126882314682, 0.16020630300045013, 0.02696153335273266, 0.06902630627155304, 0.03837759047746658, 0.07682601362466812, 0.15773272514343262, 0.005734406877309084, 0.16041570901870728, 0.10849703103303909, 0.08964504301548004, 0.4313186705112457, 0.12084108591079712, 0.20548132061958313, 0.1913137137889862], [0.03147122263908386, 0.06498080492019653, 0.03835386037826538, 0.021906379610300064, 0.004580754786729813, 0.08777225762605667, 0.06548282504081726, 0.0501156747341156, 0.09960248321294785, 0.05812418833374977, 0.04425663501024246, 0.12932318449020386, 0.040425609797239304, 0.10523593425750732, 0.20731014013290405], [0.03185653313994408, 0.014990762807428837, 0.012671640142798424, 0.014554454945027828, 0.005096337758004665, 0.025306345894932747, 0.015522593632340431, 0.012109486386179924, 0.014945329166948795, 0.0111803337931633, 0.010501275770366192, 0.010505528189241886, 0.013426732271909714, 0.01895906589925289, 0.16498495638370514], [0.05249502509832382, 0.3800218403339386, 0.048091597855091095, 0.01820666529238224, 0.10161028057336807, 0.18240275979042053, 0.03954629600048065, 0.08666953444480896, 0.00239415536634624, 0.05545663461089134, 0.11899324506521225, 0.03552442044019699, 0.037884730845689774, 0.08727249503135681, 0.23120805621147156], [0.06818026304244995, 0.06384387612342834, 0.013627037405967712, 0.017488455399870872, 0.04112459346652031, 0.37204819917678833, 0.2269488275051117, 0.050778258591890335, 0.07564288377761841, 0.002337054116651416, 0.03256889060139656, 0.017944803461432457, 0.02268233709037304, 0.05458826571702957, 0.17415940761566162], [0.3350563049316406, 0.14807114005088806, 0.16856855154037476, 0.0634150505065918, 0.6115131974220276, 0.8617944717407227, 0.4784194529056549, 0.271447092294693, 0.44727417826652527, 0.03638387843966484, 0.0791390910744667, 0.0010650564217939973, 0.10882135480642319, 0.07249648869037628, 0.16217634081840515], [0.6229478120803833, 0.11473710834980011, 0.9313594102859497, 0.6977004408836365, 0.7760463953018188, 0.5547962784767151, 0.2850213646888733, 0.12024195492267609, 0.6867435574531555, 0.3715392053127289, 0.5383524894714355, 0.04410971701145172, 0.001209885231219232, 0.03505939990282059, 0.07057712972164154], [0.12039526551961899, 0.15183398127555847, 0.23466746509075165, 0.07534174621105194, 0.09489727020263672, 0.12723755836486816, 0.06088049337267876, 0.06659132242202759, 0.24534910917282104, 0.08624531328678131, 0.05703657865524292, 0.031156441196799278, 0.0026320687029510736, 0.016870809718966484, 0.16136524081230164], [0.024926312267780304, 0.055538877844810486, 0.0035579875111579895, 0.006728078704327345, 0.10179352015256882, 0.12386216968297958, 0.08368373662233353, 0.17138876020908356, 0.13290183246135712, 0.025975322350859642, 0.0007942751399241388, 0.08679928630590439, 0.006940893363207579, 0.006668384652584791, 0.2167840152978897], [0.03079223819077015, 0.008776835165917873, 0.025623725727200508, 0.02996702678501606, 0.076390340924263, 0.11722294241189957, 0.03722265735268593, 0.06894396245479584, 0.023492204025387764, 0.02721765637397766, 0.02432498149573803, 0.009946721605956554, 0.02367306686937809, 0.02709045261144638, 0.15603508055210114], [0.050754088908433914, 0.38707080483436584, 0.056088101118803024, 0.022330837324261665, 0.19594413042068481, 0.356031596660614, 0.05540256202220917, 0.17031489312648773, 0.002592364326119423, 0.0904960110783577, 0.17009596526622772, 0.02688765898346901, 0.05266827344894409, 0.09536514431238174, 0.2306852787733078], [0.052731066942214966, 0.07647765427827835, 0.009669344872236252, 0.013631273992359638, 0.037963252514600754, 0.40968915820121765, 0.1877974420785904, 0.06287717074155807, 0.06925270706415176, 0.0021469732746481895, 0.03106895461678505, 0.02147551439702511, 0.022071314975619316, 0.058794401586055756, 0.17150944471359253], [0.2993965446949005, 0.1887350082397461, 0.17583680152893066, 0.06075390800833702, 0.6836855411529541, 0.8825634121894836, 0.44942814111709595, 0.3110062777996063, 0.6245057582855225, 0.04149743914604187, 0.08928828686475754, 0.0010537458583712578, 0.13885420560836792, 0.09175378829240799, 0.16601231694221497], [0.6222140192985535, 0.13893182575702667, 0.9335290789604187, 0.7374492883682251, 0.8253674507141113, 0.5633905529975891, 0.4091120660305023, 0.12903769314289093, 0.8090996742248535, 0.490604043006897, 0.6206711530685425, 0.06171489879488945, 0.0013746770564466715, 0.055387232452631, 0.07617512345314026], [0.1216169223189354, 0.17628714442253113, 0.21903447806835175, 0.08471400290727615, 0.12100206315517426, 0.12684285640716553, 0.060168445110321045, 0.05725802481174469, 0.204857736825943, 0.07119028270244598, 0.04997517541050911, 0.046147700399160385, 0.002665548352524638, 0.01769380457699299, 0.1595369428396225], [0.02323095127940178, 0.05151251330971718, 0.002836216241121292, 0.007343180477619171, 0.11471041291952133, 0.09745588153600693, 0.08793136477470398, 0.19987791776657104, 0.2081962525844574, 0.026029428467154503, 0.0006721516838297248, 0.15218332409858704, 0.008676346391439438, 0.009503011591732502, 0.20713838934898376], [0.07751920074224472, 0.05964339151978493, 0.026831025257706642, 0.018057459965348244, 0.1489739865064621, 0.27560925483703613, 0.15271086990833282, 0.29336896538734436, 0.2548864185810089, 0.015449506230652332, 0.02643660455942154, 0.05839552357792854, 0.06659974157810211, 0.1841144859790802, 0.1324990689754486]], [[0.006645309738814831, 0.043047573417425156, 0.04108792915940285, 0.028674451634287834, 0.10265154391527176, 0.03326163440942764, 0.05858607590198517, 0.06312219053506851, 0.013714859262108803, 0.017589740455150604, 0.02732386440038681, 0.11026919633150101, 0.028857730329036713, 0.054291173815727234, 0.19011041522026062], [0.006623337976634502, 0.06184479594230652, 0.014693422242999077, 0.03981047496199608, 0.08752858638763428, 0.01962500624358654, 0.06706372648477554, 0.011501927860081196, 0.0061228955164551735, 0.013949333690106869, 0.018435969948768616, 0.03678559139370918, 0.022487374022603035, 0.0660797506570816, 0.28934401273727417], [0.04245300590991974, 0.10349805653095245, 0.03407163918018341, 0.007511724252253771, 0.011565770022571087, 0.010817471891641617, 0.05971734598278999, 0.00459411833435297, 0.00350962788797915, 0.021488210186362267, 0.02298545651137829, 0.06376963108778, 0.036461468786001205, 0.1865386664867401, 0.16962040960788727], [0.014149562455713749, 0.03299444913864136, 0.007003516890108585, 0.004260434303432703, 0.018919609487056732, 0.008522795513272285, 0.018369171768426895, 0.015471882186830044, 0.0008095644298009574, 0.012402600608766079, 0.0075600892305374146, 0.03885417431592941, 0.05682341009378433, 0.0525624044239521, 0.22132590413093567], [0.01582285761833191, 0.013434984721243382, 0.0299182441085577, 0.03647983819246292, 0.009840411134064198, 0.06101881340146065, 0.04943924769759178, 0.3809337913990021, 0.027872184291481972, 0.07177315652370453, 0.06987256556749344, 0.014244881458580494, 0.18650749325752258, 0.16280896961688995, 0.16209137439727783], [0.018014581874012947, 0.11459828168153763, 0.013770120218396187, 0.021584663540124893, 0.02155740186572075, 0.03133949637413025, 0.03938381373882294, 0.28105995059013367, 0.02592163160443306, 0.026603924110531807, 0.010026685893535614, 0.009953479282557964, 0.004658891819417477, 0.014652709476649761, 0.16460371017456055], [0.001359884045086801, 0.029354294762015343, 0.0013457777677103877, 0.0026418184861540794, 0.008543581701815128, 0.003654624568298459, 0.0034977763425558805, 0.039957791566848755, 0.00108401442412287, 0.0005604945472441614, 0.0003877367707900703, 0.0033066808246076107, 0.007358025759458542, 0.007617549039423466, 0.20286646485328674], [0.015068605542182922, 0.027786174789071083, 0.015096615999937057, 0.048349082469940186, 0.03296791389584541, 0.0033369800075888634, 0.004459223244339228, 0.01348987128585577, 0.0010384898632764816, 0.013556106016039848, 0.015940798446536064, 0.042712315917015076, 0.02055070362985134, 0.042082786560058594, 0.17761820554733276], [0.09032934159040451, 0.007927155122160912, 0.08835490047931671, 0.21186837553977966, 0.05379607528448105, 0.23637458682060242, 0.16646702587604523, 0.022663533687591553, 0.024165447801351547, 0.08468358218669891, 0.07286331057548523, 0.016201749444007874, 0.031014403328299522, 0.026781529188156128, 0.21159759163856506], [0.014649872668087482, 0.032003261148929596, 0.1914098560810089, 0.17710277438163757, 0.07542474567890167, 0.05287592485547066, 0.14732114970684052, 0.08320016413927078, 0.025441674515604973, 0.02800501137971878, 0.0780739113688469, 0.04154554009437561, 0.017996925860643387, 0.08907850831747055, 0.17056028544902802], [0.29397615790367126, 0.03400568664073944, 0.3242063522338867, 0.3681035339832306, 0.48163339495658875, 0.025333818048238754, 0.20042747259140015, 0.06051841378211975, 0.2913966476917267, 0.19229580461978912, 0.12739360332489014, 0.07057002186775208, 0.012750222347676754, 0.053084854036569595, 0.09877952188253403], [0.2290111482143402, 0.04351853206753731, 0.4067046046257019, 0.12047477811574936, 0.3140789866447449, 0.03630740940570831, 0.1768438071012497, 0.13207398355007172, 0.0676346942782402, 0.07621245086193085, 0.1797569841146469, 0.24804529547691345, 0.009716867469251156, 0.01671340875327587, 0.15996301174163818], [0.0448942668735981, 0.015721717849373817, 0.04864601418375969, 0.03494936227798462, 0.016112152487039566, 0.06668571382761002, 0.05302642658352852, 0.07182876765727997, 0.006946365814656019, 0.011091585271060467, 0.1120418831706047, 0.008756275288760662, 0.055249348282814026, 0.03253563493490219, 0.187040314078331], [0.3104230761528015, 0.04545353353023529, 0.3986057937145233, 0.6762936115264893, 0.03838818892836571, 0.03300129249691963, 0.27034318447113037, 0.21517230570316315, 0.008858010172843933, 0.2650390863418579, 0.2720700800418854, 0.005442188587039709, 0.06764175742864609, 0.053534120321273804, 0.18754751980304718], [0.011383982375264168, 0.11127021163702011, 0.0030386100988835096, 0.0067845494486391544, 0.013927198015153408, 0.08719860762357712, 0.03287587687373161, 0.5690041184425354, 0.03855481743812561, 0.020931608974933624, 0.01293823029845953, 0.047187648713588715, 0.021772168576717377, 0.1471272110939026, 0.18776896595954895], [0.005892250686883926, 0.03474593162536621, 0.023128867149353027, 0.002957691205665469, 0.03212961554527283, 0.015600761398673058, 0.0076070488430559635, 0.04006163775920868, 0.012522950768470764, 0.00397108681499958, 0.004476191475987434, 0.01931026391685009, 0.006290406920015812, 0.014653924852609634, 0.17843826115131378], [0.030382098630070686, 0.14396639168262482, 0.0023552696220576763, 0.003069670405238867, 0.03293609246611595, 0.010766614228487015, 0.04698408767580986, 0.0892328992486, 0.010764017701148987, 0.01645551063120365, 0.0007101192022673786, 0.14693684875965118, 0.10194381326436996, 0.06734117865562439, 0.21650707721710205], [0.11579495668411255, 0.04704239219427109, 0.08932461589574814, 0.10469675809144974, 0.3945455551147461, 0.10528933256864548, 0.15413445234298706, 0.13012593984603882, 0.37207290530204773, 0.07726370543241501, 0.08641648292541504, 0.07665102183818817, 0.02378079853951931, 0.06452124565839767, 0.12331708520650864], [0.20921318233013153, 0.07137931883335114, 0.3537597060203552, 0.1065746620297432, 0.30610421299934387, 0.07002534717321396, 0.22329437732696533, 0.23702743649482727, 0.06014438346028328, 0.05975072830915451, 0.17522762715816498, 0.3013332188129425, 0.02163097821176052, 0.016774384304881096, 0.15580035746097565], [0.037447404116392136, 0.022215796634554863, 0.033449236303567886, 0.026462113484740257, 0.01563168875873089, 0.07434160262346268, 0.05695066228508949, 0.11209315806627274, 0.007291351445019245, 0.008904322981834412, 0.08964232355356216, 0.01435061078518629, 0.07215401530265808, 0.030404584482312202, 0.17889626324176788], [0.35028940439224243, 0.06261257082223892, 0.400876522064209, 0.6601436138153076, 0.0364767424762249, 0.0348673090338707, 0.3584212362766266, 0.3042086958885193, 0.012779565528035164, 0.3784087598323822, 0.29859334230422974, 0.00785628892481327, 0.11913719773292542, 0.06971576809883118, 0.17937220633029938], [0.014627714641392231, 0.1739588975906372, 0.0033204040955752134, 0.007496224716305733, 0.011711684986948967, 0.10170583426952362, 0.050673384219408035, 0.6495208740234375, 0.040652137249708176, 0.03492900729179382, 0.01829371228814125, 0.07074988633394241, 0.02588740922510624, 0.18312060832977295, 0.1794223189353943], [0.006626310292631388, 0.049714479595422745, 0.02355029061436653, 0.0033578642178326845, 0.02970620058476925, 0.020507775247097015, 0.008351391181349754, 0.03789898753166199, 0.008593969978392124, 0.004206442274153233, 0.004605707712471485, 0.02678176388144493, 0.006028715055435896, 0.012980426661670208, 0.1725957691669464], [0.029822910204529762, 0.18419219553470612, 0.002088941168040037, 0.00302593014203012, 0.028257815167307854, 0.012486547231674194, 0.051940228790044785, 0.10161811858415604, 0.01137576438486576, 0.02022942155599594, 0.0007436276064254344, 0.2113851010799408, 0.1359580010175705, 0.08821411430835724, 0.2053057849407196], [0.016353517770767212, 0.03170220926403999, 0.014149405062198639, 0.013441388495266438, 0.037340469658374786, 0.010170645080506802, 0.0053974115289747715, 0.025274941697716713, 0.017184404656291008, 0.0020940443500876427, 0.006704597268253565, 0.009430822916328907, 0.030376460403203964, 0.024553189054131508, 0.15533798933029175]], [[0.005564282648265362, 0.001319661969318986, 0.028383644297719002, 0.01146539393812418, 0.028919272124767303, 0.012663042172789574, 0.023019153624773026, 0.0018097365973517299, 0.0143426563590765, 0.021044740453362465, 0.015969598665833473, 0.03200899809598923, 0.013908782042562962, 0.03448842838406563, 0.20206299424171448], [0.3364894986152649, 0.00033270660787820816, 0.017299778759479523, 0.02505551464855671, 0.00914769060909748, 0.0018482855521142483, 0.040363892912864685, 0.0008854345069266856, 0.020481230691075325, 0.022734129801392555, 0.016724254935979843, 0.0011141380527988076, 5.783090819022618e-05, 0.0005799515638500452, 0.07228588312864304], [0.0004661931307055056, 0.4122284948825836, 0.0022180580999702215, 0.00018468582129571587, 0.00030452435021288693, 5.825214248034172e-05, 0.0012309255544096231, 0.0017770789563655853, 1.19774986160337e-05, 0.0001907332189148292, 0.0007099026697687805, 0.0006694658659398556, 1.216385771840578e-05, 0.00011785236711148173, 0.00036971797817386687], [0.04950903728604317, 0.2967310845851898, 0.021222729235887527, 0.01289455872029066, 0.009955117478966713, 0.008917939849197865, 0.011312013491988182, 0.01272521447390318, 0.0006359940161928535, 0.011413054540753365, 0.006479735020548105, 0.0053005279041826725, 0.001741865067742765, 0.0027997863944619894, 0.08213357627391815], [0.020872987806797028, 3.087984805461019e-05, 0.009670623578131199, 0.0253498163074255, 0.010817835107445717, 0.4320962131023407, 0.017970044165849686, 0.0021109851077198982, 0.0003069202939514071, 0.008261006325483322, 0.006166533567011356, 0.7898750901222229, 0.11304597556591034, 0.12737329304218292, 0.011856237426400185], [0.06067817285656929, 0.005839335732161999, 0.025896329432725906, 0.03351203724741936, 0.025002295151352882, 0.25514867901802063, 0.4275963008403778, 0.0194717925041914, 0.0888834074139595, 0.04690318927168846, 0.03570560738444328, 0.0850825086236, 0.0388353131711483, 0.24394167959690094, 0.10019046813249588], [0.014415884390473366, 0.001141559099778533, 0.0678224116563797, 0.024646559730172157, 0.08796916157007217, 0.022639306262135506, 0.07784608006477356, 0.02605922892689705, 0.014093886129558086, 0.0286162830889225, 0.09674176573753357, 0.04692256450653076, 0.03519048914313316, 0.20982496440410614, 0.1800668090581894], [0.02086471952497959, 0.0008324789232574403, 0.01815967448055744, 0.002886975882574916, 0.0020961007103323936, 0.004472428001463413, 0.033020272850990295, 0.0047500282526016235, 0.012928733602166176, 0.014328529126942158, 0.015946470201015472, 0.06593997031450272, 0.00855537410825491, 0.07526978105306625, 0.1768130511045456], [0.0009654826717451215, 0.000225315525312908, 0.0006124225910753012, 0.0007836261647753417, 0.0007428302778862417, 0.003282200777903199, 0.008662715554237366, 0.45239004492759705, 4.857195381191559e-05, 0.0006357804522849619, 0.0010122592793777585, 0.0006606358801946044, 0.00025698603712953627, 0.0011707579251378775, 0.0028539940249174833], [0.0025523374788463116, 0.0009212270379066467, 0.09748471528291702, 0.057154957205057144, 0.4982932209968567, 0.000552327954210341, 0.02918482944369316, 0.0039253802970051765, 0.00450148293748498, 0.0014971394557505846, 0.009822547435760498, 0.0017059196252375841, 0.001570553402416408, 0.005804183427244425, 0.00957300141453743], [0.016401896253228188, 0.00043752315104939044, 0.0039018490351736546, 0.005885160993784666, 0.0023499932140111923, 0.0031332974322140217, 0.055512603372335434, 0.003903925186023116, 0.10197419673204422, 0.009071548469364643, 0.023729920387268066, 0.002627716166898608, 0.01914973370730877, 0.02837507426738739, 0.1623656302690506], [0.0004865071678068489, 2.4051656509982422e-05, 0.00020084556308574975, 0.0003736558719538152, 0.000646126689389348, 9.209318523062393e-05, 0.009753170423209667, 9.854567178990692e-05, 0.34485483169555664, 0.00047165394062176347, 0.0012700805673375726, 0.000479432987049222, 0.0015819557011127472, 0.0008011643076315522, 0.0017131956992670894], [0.03442303463816643, 0.014513631351292133, 0.003174385754391551, 0.00478995218873024, 0.0017101461999118328, 0.003900717245414853, 0.05713852494955063, 0.013628470711410046, 0.0976317971944809, 0.28217896819114685, 0.01894235610961914, 0.009533336386084557, 0.003816690994426608, 0.005922130309045315, 0.12864208221435547], [0.01004086248576641, 0.01997406780719757, 0.005450551863759756, 0.006583535112440586, 0.0027623113710433245, 0.002903316868469119, 0.03531726077198982, 0.008635452017188072, 0.029197845607995987, 0.02162068709731102, 0.013219092041254044, 0.2711889445781708, 0.00537630682811141, 0.006846235599368811, 0.06079954653978348], [0.00031272557680495083, 8.196506314561702e-06, 4.237617031321861e-05, 0.00043677922803908587, 0.00024717405904084444, 0.022641032934188843, 0.002573953475803137, 0.0004433683061506599, 0.0013428670354187489, 0.00034036010038107634, 0.0007929583080112934, 0.0033021108247339725, 0.4761846959590912, 0.05593165382742882, 0.00081905338447541], [0.00267792004160583, 4.751862070406787e-05, 0.014043050818145275, 0.02037942036986351, 0.04410611465573311, 0.04370833560824394, 0.06117184832692146, 0.01571183279156685, 0.11117196083068848, 0.006906491704285145, 0.0029646854382008314, 0.15407170355319977, 0.010935205966234207, 0.03797803074121475, 0.16977860033512115], [0.011722833849489689, 0.005004812031984329, 0.007801789790391922, 0.0020204312168061733, 0.004946417640894651, 0.000467105332063511, 0.11018845438957214, 0.016256244853138924, 0.05208335816860199, 0.08122430741786957, 0.4447634816169739, 0.0032620911952108145, 0.0036480925045907497, 0.02699565887451172, 0.038189876824617386], [0.024071840569376945, 0.0004321316082496196, 0.023504342883825302, 0.020648522302508354, 0.021508874371647835, 0.012214796617627144, 0.024360070005059242, 0.0013747027842327952, 0.0815734788775444, 0.08039785921573639, 0.06951787322759628, 0.017521949484944344, 0.04566040262579918, 0.08389204740524292, 0.15396325290203094], [0.0014979105908423662, 4.0405931940767914e-05, 0.0008743218495510519, 0.001329930848442018, 0.0032007889822125435, 0.0002464030694682151, 0.015361684374511242, 0.00014017200737725943, 0.3369258642196655, 0.0015512423124164343, 0.003011554479598999, 0.0010034784208983183, 0.0037561107892543077, 0.0018123533809557557, 0.0037892721593379974], [0.03386643901467323, 0.015328249894082546, 0.002211565151810646, 0.003828595858067274, 0.0012934240512549877, 0.004837968852370977, 0.04463785141706467, 0.014559985138475895, 0.04106945917010307, 0.26340487599372864, 0.017707379534840584, 0.01015215553343296, 0.0033097255509346724, 0.0058202859945595264, 0.13427288830280304], [0.011043943464756012, 0.029788998886942863, 0.004548549186438322, 0.006417197175323963, 0.0019613932818174362, 0.0028304944280534983, 0.02768276073038578, 0.006805655546486378, 0.02553243562579155, 0.0314837321639061, 0.015709027647972107, 0.2568790316581726, 0.008081428706645966, 0.009137820452451706, 0.06746803224086761], [0.0003306480939500034, 1.1417017958592623e-05, 3.816767639364116e-05, 0.000435528316302225, 0.00020690191013272852, 0.02179853804409504, 0.002864222740754485, 0.0005160043947398663, 0.001080053043551743, 0.0004847492673434317, 0.0009861867874860764, 0.003908392507582903, 0.47703394293785095, 0.07113853842020035, 0.000873323529958725], [0.0030808241572231054, 6.38188939774409e-05, 0.011707174591720104, 0.023645061999559402, 0.038246914744377136, 0.047200631350278854, 0.04958858713507652, 0.012573646381497383, 0.04961754009127617, 0.005252092145383358, 0.002489157486706972, 0.17429526150226593, 0.008030706085264683, 0.02717452496290207, 0.1679786741733551], [0.01455691922456026, 0.008012487553060055, 0.006938801147043705, 0.00259140832349658, 0.004911262542009354, 0.0004763725446537137, 0.10579084604978561, 0.021042171865701675, 0.03971559554338455, 0.07511086016893387, 0.43185338377952576, 0.0035418386105448008, 0.004437423776835203, 0.03184036538004875, 0.04226255044341087], [0.055085837841033936, 0.014846320264041424, 0.06939522176980972, 0.036867137998342514, 0.13156765699386597, 0.04343622922897339, 0.18117153644561768, 0.04244613274931908, 0.04596249759197235, 0.13158053159713745, 0.047130946069955826, 0.549620509147644, 0.24813801050186157, 0.3232562243938446, 0.11823604255914688]], [[0.7448275089263916, 0.00023065913410391659, 0.0003700565139297396, 0.0002745355886872858, 0.0005768057890236378, 1.0151054993912112e-05, 1.3715341992792673e-05, 7.643950084457174e-06, 0.0004341531603131443, 5.2913601393811405e-05, 5.353476808522828e-05, 8.812115265754983e-05, 1.1566834245968494e-06, 5.744800546381157e-06, 5.576572584686801e-05], [8.114575030049309e-05, 0.06691394746303558, 0.04036417603492737, 0.022258125245571136, 0.055233534425497055, 0.050445422530174255, 0.048324622213840485, 0.00889397319406271, 0.1270352452993393, 0.04156908392906189, 0.20929713547229767, 0.21122632920742035, 0.414194792509079, 0.12628954648971558, 0.25567519664764404], [0.0012628535041585565, 0.0008597301202826202, 0.036364536732435226, 0.0971999391913414, 0.04217860475182533, 0.10421664267778397, 0.16082510352134705, 0.03283625468611717, 0.09032318741083145, 0.09653837233781815, 0.21890851855278015, 0.06589526683092117, 0.47985169291496277, 0.21388037502765656, 0.21010825037956238], [0.0002990703214891255, 0.001862871926277876, 0.010526847094297409, 0.01025421917438507, 0.05592086538672447, 0.02697981521487236, 0.01570008136332035, 0.02568165771663189, 0.010194454342126846, 0.048093631863594055, 0.04421652480959892, 0.02353351190686226, 0.21245922148227692, 0.0448865108191967, 0.23352482914924622], [0.00015855174569878727, 0.013162538409233093, 0.006567019037902355, 0.004201928153634071, 0.006268346216529608, 0.00024757537175901234, 0.012954139150679111, 0.003747382666915655, 0.03740423545241356, 0.007960616610944271, 0.013323514722287655, 0.06273993849754333, 0.048431456089019775, 0.13987915217876434, 0.20342004299163818], [0.013553211465477943, 0.03824196010828018, 0.02278091199696064, 0.09299258887767792, 0.0559159517288208, 0.00022306715254671872, 0.031003709882497787, 0.010444254614412785, 0.16168788075447083, 0.03666102886199951, 0.00852662418037653, 0.4432809352874756, 0.009321487508714199, 0.024379035457968712, 0.17351986467838287], [0.00026768012321554124, 0.015254812315106392, 0.007090381346642971, 0.006173381581902504, 0.006773150525987148, 0.0008773274021223187, 0.00638232659548521, 0.016591282561421394, 0.004996343981474638, 0.009327422827482224, 0.008862738497555256, 0.05876166746020317, 0.009527520276606083, 0.00578573253005743, 0.20356230437755585], [0.0008312691352330148, 0.012717761099338531, 0.013986560516059399, 0.007093494758009911, 0.004876464139670134, 0.0027259632479399443, 0.0033886858727782965, 0.01589561626315117, 0.00876854918897152, 0.005017295014113188, 0.023178039118647575, 0.05755693465471268, 0.05451130494475365, 0.06928746402263641, 0.1796484887599945], [0.00016753048112150282, 0.011822681874036789, 0.005686081480234861, 0.011659285984933376, 0.004307762254029512, 0.0031254058703780174, 0.009316416457295418, 0.0016170619055628777, 0.012603488750755787, 0.0245236624032259, 0.01756892167031765, 0.011099276132881641, 0.11892349272966385, 0.02075323462486267, 0.2549600899219513], [0.00017647366621531546, 0.053185176104307175, 0.007304554805159569, 0.004834755789488554, 0.000954066461417824, 0.025718921795487404, 0.02985404059290886, 0.09960591793060303, 0.010695043951272964, 0.016483109444379807, 0.018774237483739853, 0.05090473219752312, 0.01008983701467514, 0.028674444183707237, 0.22871088981628418], [0.0008755451999604702, 0.020039640367031097, 0.003969491925090551, 0.007670485880225897, 0.006173306610435247, 0.012295764870941639, 0.0076020946726202965, 0.012137084268033504, 0.010956642217934132, 0.010541083291172981, 0.018125493079423904, 0.03226908668875694, 0.02587633579969406, 0.016216130927205086, 0.1660052388906479], [5.4335410823114216e-05, 0.03367479890584946, 0.004507457371801138, 0.004544241353869438, 0.00623831432312727, 0.002192543353885412, 0.004128816071897745, 0.021106822416186333, 0.0003909784718416631, 0.00830051489174366, 0.018183842301368713, 0.009683135896921158, 0.0325237475335598, 0.00792472343891859, 0.25227075815200806], [0.0006012204103171825, 0.01188816037029028, 0.023532994091510773, 0.00770517997443676, 0.007410787045955658, 0.007087987381964922, 0.021027186885476112, 0.013456426560878754, 0.03266710042953491, 0.001251929672434926, 0.09021235257387161, 0.024440091103315353, 0.024299103766679764, 0.02338516153395176, 0.1967199146747589], [0.0009616355528123677, 0.059039004147052765, 0.04997482895851135, 0.013552234508097172, 0.03981975466012955, 0.020335622131824493, 0.014380398206412792, 0.07606764137744904, 0.07161007821559906, 0.024130970239639282, 0.06891870498657227, 0.0008635766571387649, 0.023193923756480217, 0.02981526218354702, 0.21020111441612244], [0.0013424595817923546, 0.0746709555387497, 0.011544802226126194, 0.027912717312574387, 0.0729047879576683, 0.10483764857053757, 0.07119728624820709, 0.010606798343360424, 0.044552259147167206, 0.05723145231604576, 0.034647323191165924, 0.38214871287345886, 0.003923356998711824, 0.08778946846723557, 0.19581711292266846], [0.0016638260567560792, 0.01581355184316635, 0.08943041414022446, 0.02092832513153553, 0.021133122965693474, 0.012408973649144173, 0.01347691286355257, 0.00275444146245718, 0.027862150222063065, 0.01225491613149643, 0.018322426825761795, 0.008929668925702572, 0.00015579524915665388, 0.0014782899525016546, 0.18181975185871124], [0.0008640239248052239, 0.06174946948885918, 0.004653214477002621, 0.002717669354751706, 0.015129820443689823, 0.00935456808656454, 0.016078660264611244, 0.08089328557252884, 0.017857585102319717, 0.0025031790137290955, 0.00012101473839720711, 0.013123439624905586, 0.005499868653714657, 0.001559562049806118, 0.22764776647090912], [0.0008687095833010972, 0.025285501033067703, 0.01658034697175026, 0.02363765239715576, 0.02393241412937641, 0.0657346174120903, 0.015298763290047646, 0.01792113669216633, 0.021707117557525635, 0.018967296928167343, 0.037634264677762985, 0.013209421187639236, 0.02256513573229313, 0.007774183992296457, 0.15961462259292603], [0.0001073219973477535, 0.04253393039107323, 0.010077103972434998, 0.007349912542849779, 0.00879223458468914, 0.004757148679345846, 0.008167163468897343, 0.03753674402832985, 0.00042728587868623435, 0.014237778261303902, 0.029898250475525856, 0.006872681900858879, 0.045794516801834106, 0.007500257343053818, 0.2562271058559418], [0.0005320480559021235, 0.010701313614845276, 0.020972738042473793, 0.007364482618868351, 0.006165153346955776, 0.00950621161609888, 0.022682208567857742, 0.018515970557928085, 0.03319491446018219, 0.00125269521959126, 0.07773777842521667, 0.022826068103313446, 0.02051766775548458, 0.020874740555882454, 0.1872510462999344], [0.0008804904646240175, 0.05573932081460953, 0.06578188389539719, 0.01897181011736393, 0.043492771685123444, 0.026308609172701836, 0.016426166519522667, 0.09104844927787781, 0.12495335191488266, 0.04637341946363449, 0.0944451242685318, 0.0008321930072270334, 0.03243781998753548, 0.03530845418572426, 0.2013196051120758], [0.001610875129699707, 0.08435038477182388, 0.014167247340083122, 0.03493078798055649, 0.07050123810768127, 0.10772886872291565, 0.09850788861513138, 0.013066386803984642, 0.05027954652905464, 0.10465669631958008, 0.04533415287733078, 0.47037968039512634, 0.004505114629864693, 0.12196572870016098, 0.18816377222537994], [0.0018758929800242186, 0.019657986238598824, 0.1020394116640091, 0.033738646656274796, 0.024869924411177635, 0.012215637601912022, 0.015038376674056053, 0.002843664726242423, 0.02175789885222912, 0.01636381261050701, 0.01989913359284401, 0.01190999522805214, 0.00020280842727515846, 0.0016855570720508695, 0.17570628225803375], [0.0009206020040437579, 0.08179444819688797, 0.00436751963570714, 0.003652991494163871, 0.019383452832698822, 0.008280212059617043, 0.016885409131646156, 0.10377784073352814, 0.023152435198426247, 0.0037028237711638212, 0.0001251623034477234, 0.018928401172161102, 0.009926089085638523, 0.002465219935402274, 0.21539123356342316], [0.0005496710073202848, 0.039492249488830566, 0.016358638182282448, 0.007983607240021229, 0.006420070305466652, 0.0012171968119218946, 0.003928476013243198, 0.005028040148317814, 0.010722441598773003, 0.0025004756171256304, 0.015696601942181587, 0.006085758097469807, 0.0033880609553307295, 0.0056163351982831955, 0.1572248637676239]], [[0.09555985033512115, 0.6603901982307434, 0.4109249413013458, 0.6857163310050964, 0.16377028822898865, 0.1341286301612854, 0.19969937205314636, 0.28269705176353455, 0.14764364063739777, 0.41980865597724915, 0.4319525361061096, 0.3789142668247223, 0.49345141649246216, 0.26345306634902954, 0.00909768883138895], [0.1460653841495514, 0.2758752405643463, 0.2826583981513977, 0.551855206489563, 0.05612415447831154, 0.19304026663303375, 0.0849798247218132, 0.038316093385219574, 0.02312053181231022, 0.46154478192329407, 0.36433619260787964, 0.35877159237861633, 0.1596277803182602, 0.0554661750793457, 6.483463948825374e-05], [3.716628270922229e-05, 1.9402585849093157e-07, 1.0113188182003796e-05, 6.318590021692216e-05, 6.053787728887983e-07, 2.5790013751247898e-06, 0.00022986173280514777, 1.074662236533186e-06, 6.082240361138247e-06, 3.35614299729059e-06, 2.225729804194998e-05, 7.863033715693746e-06, 1.555537892272696e-06, 3.881560041918419e-05, 0.23657216131687164], [0.6150763630867004, 0.041665952652692795, 0.4174444377422333, 0.4949702024459839, 0.20794649422168732, 0.3307763934135437, 0.8098993897438049, 0.2721010744571686, 0.7274996042251587, 0.4779607057571411, 0.6233283281326294, 0.7560765147209167, 0.3628612458705902, 0.7672091722488403, 5.392584171204362e-06], [5.640763447445352e-06, 2.5884469323500525e-07, 1.2724142379738623e-06, 8.170181899913587e-06, 1.2345621769327408e-07, 1.310836523771286e-07, 1.02673438959755e-05, 9.661080184741877e-07, 6.520539272969472e-07, 7.602448022225872e-07, 2.058099425994442e-06, 6.885502301656743e-08, 1.0175665465794737e-06, 1.7383708836860023e-05, 0.20754273235797882], [9.27566077280062e-07, 5.395870630309219e-07, 1.8455818917573197e-07, 1.2775643654094893e-06, 2.105696061960316e-08, 3.1680112755338996e-08, 6.263408067752607e-06, 4.3284012463118415e-07, 1.918825773827848e-06, 1.694104128091567e-07, 3.363936968980852e-07, 9.135120215830739e-09, 4.4058825920956224e-08, 7.840970965844463e-07, 0.18219269812107086], [0.7144812345504761, 0.6739043593406677, 0.2952970862388611, 0.49478814005851746, 0.17151717841625214, 0.06989942491054535, 0.5132517218589783, 0.30886489152908325, 0.5621734261512756, 0.5728412866592407, 0.576314389705658, 0.34687095880508423, 0.25617536902427673, 0.29690253734588623, 7.371841547865188e-06], [0.6291437745094299, 0.5982875823974609, 0.4885888695716858, 0.5792520046234131, 0.2514877915382385, 0.5298613905906677, 0.11972777545452118, 0.6076628565788269, 0.04243328422307968, 0.5940482020378113, 0.6775911450386047, 0.3496588468551636, 0.4937344789505005, 0.40163323283195496, 2.9517783332266845e-05], [0.6414378881454468, 0.20530864596366882, 0.8448930978775024, 0.5841984748840332, 0.48009997606277466, 0.48003992438316345, 0.4468145966529846, 0.036266062408685684, 0.3466547429561615, 0.521195650100708, 0.7532409429550171, 0.14529024064540863, 0.3844791650772095, 0.46825459599494934, 2.1059213395346887e-05], [0.7977450489997864, 0.5162288546562195, 0.513008177280426, 0.6203657984733582, 0.04621165990829468, 0.2237500697374344, 0.10730908066034317, 0.17203836143016815, 0.028481170535087585, 0.5342445969581604, 0.7256113290786743, 0.5827998518943787, 0.755642294883728, 0.511749804019928, 0.00015279543003998697], [0.5001324415206909, 0.7283154129981995, 0.6225411295890808, 0.5096700191497803, 0.4470505714416504, 0.6475648880004883, 0.4919697046279907, 0.42729777097702026, 0.22966071963310242, 0.4533919394016266, 0.5539101958274841, 0.2698501944541931, 0.3532210886478424, 0.2643750309944153, 2.9741322578047402e-05], [0.42266348004341125, 0.20205438137054443, 0.42841264605522156, 0.6724829077720642, 0.29094210267066956, 0.4464052617549896, 0.24126748740673065, 0.22405968606472015, 0.21308888494968414, 0.3085091710090637, 0.4672502279281616, 0.14604215323925018, 0.09687051922082901, 0.12085973471403122, 2.7047781259170733e-05], [0.5077533721923828, 0.4866065979003906, 0.8742184638977051, 0.805268406867981, 0.8406472206115723, 0.45863693952560425, 0.3596036732196808, 0.36316972970962524, 0.38783764839172363, 0.03767421096563339, 0.43841618299484253, 0.3401361405849457, 0.3197961747646332, 0.20812755823135376, 7.5720936365542e-06], [0.12348711490631104, 0.49926623702049255, 0.1342328041791916, 0.07936512678861618, 0.11133208125829697, 0.032334309071302414, 0.028592387214303017, 0.036310840398073196, 0.036252155900001526, 0.10585709661245346, 0.19267472624778748, 0.34429997205734253, 0.16909800469875336, 0.2464863359928131, 3.1697504709882196e-06], [4.5035082507638435e-07, 4.8253248507990065e-08, 2.1990938847693542e-08, 4.3766593194050074e-07, 1.1283042766763174e-07, 2.4235429663121977e-08, 4.6985369408503175e-06, 1.5805973418991925e-07, 1.1619090578562918e-08, 1.9516033233912822e-08, 1.8456361772223318e-07, 2.2261544074808626e-07, 2.278205402106437e-09, 7.143006541809882e-07, 0.21044957637786865], [0.71169513463974, 0.2780396640300751, 0.44078493118286133, 0.7963916063308716, 0.6933308839797974, 0.5056049823760986, 0.7329073548316956, 0.810703694820404, 0.551677942276001, 0.6459015607833862, 0.6943050622940063, 0.2817550301551819, 0.10247289389371872, 0.7378624677658081, 8.274764695670456e-06], [0.723514199256897, 0.08602748066186905, 0.6093902587890625, 0.8655006289482117, 0.42677831649780273, 0.03823491558432579, 0.30262306332588196, 0.036271825432777405, 0.12300263345241547, 0.2776595950126648, 0.07632125169038773, 0.06917709112167358, 0.14498986303806305, 0.06881040334701538, 2.5871422622003593e-06], [0.7111753225326538, 0.8019941449165344, 0.7984396815299988, 0.6959745287895203, 0.34880974888801575, 0.5955101251602173, 0.6658092141151428, 0.5378626585006714, 0.35595381259918213, 0.5855972766876221, 0.5757258534431458, 0.133575439453125, 0.3884122669696808, 0.11617641150951385, 8.579120731155854e-06], [0.43439850211143494, 0.1714652180671692, 0.4214288294315338, 0.6560039520263672, 0.15961043536663055, 0.25604698061943054, 0.26937225461006165, 0.1702796220779419, 0.22940081357955933, 0.327440470457077, 0.3977930247783661, 0.08873222768306732, 0.13160161674022675, 0.07058954238891602, 2.3103428247850388e-05], [0.48717519640922546, 0.4504354000091553, 0.9026078581809998, 0.8262973427772522, 0.8697957992553711, 0.4322546720504761, 0.47440072894096375, 0.40584686398506165, 0.6554202437400818, 0.04447361081838608, 0.5114831924438477, 0.4020007252693176, 0.3586147725582123, 0.19603849947452545, 5.424046776170144e-06], [0.09346597641706467, 0.41046077013015747, 0.13097965717315674, 0.06711046397686005, 0.09538185596466064, 0.021688319742679596, 0.027864748612046242, 0.029869627207517624, 0.07506763935089111, 0.13717295229434967, 0.21322546899318695, 0.3559926152229309, 0.19059841334819794, 0.24045485258102417, 2.0756003777933074e-06], [4.6634454520244617e-07, 5.573102512812511e-08, 2.3018172257138758e-08, 3.889360016273713e-07, 9.709493298259986e-08, 2.4796046105279856e-08, 7.192591056082165e-06, 1.7916640615567303e-07, 1.8580767147113875e-08, 3.5935642017648206e-08, 2.774728216081712e-07, 3.801677337378351e-07, 2.8816848907098347e-09, 9.808413778955583e-07, 0.2028982788324356], [0.6667957305908203, 0.327456533908844, 0.4202725291252136, 0.7458598613739014, 0.6837785840034485, 0.5435037612915039, 0.7794858813285828, 0.849186360836029, 0.6942030787467957, 0.7531007528305054, 0.7604266405105591, 0.4857816696166992, 0.12311270833015442, 0.7958275079727173, 7.400509275612421e-06], [0.704485297203064, 0.08825523406267166, 0.5944071412086487, 0.8510531783103943, 0.4262540936470032, 0.04518446326255798, 0.38849392533302307, 0.055145543068647385, 0.277063250541687, 0.40566664934158325, 0.09198901802301407, 0.13750647008419037, 0.24822941422462463, 0.1165834292769432, 3.5331499930180144e-06], [0.5231692790985107, 0.6706213355064392, 0.7785398364067078, 0.7122241258621216, 0.34260621666908264, 0.579698920249939, 0.5863306522369385, 0.4822496175765991, 0.5804131031036377, 0.7801564335823059, 0.7983464002609253, 0.22512593865394592, 0.4790371060371399, 0.2274763584136963, 1.8860177078749985e-05]], [[0.12044757604598999, 0.22699733078479767, 0.3625817894935608, 0.18942511081695557, 0.468371719121933, 0.5971034169197083, 0.5581120252609253, 0.29680517315864563, 0.4773823618888855, 0.4035939574241638, 0.3702273666858673, 0.3751682937145233, 0.267861545085907, 0.4069889783859253, 0.040672045201063156], [0.0243044663220644, 0.4273812174797058, 0.5286219716072083, 0.05566978082060814, 0.4582313597202301, 0.5064847469329834, 0.09591992199420929, 0.1787465512752533, 0.7349562644958496, 0.00692495983093977, 0.04355573281645775, 0.04027868062257767, 0.03415951877832413, 0.02788657508790493, 0.03653726726770401], [0.1999487727880478, 0.02213704027235508, 0.750217854976654, 0.5677059292793274, 0.8556592464447021, 0.6869031190872192, 0.2201639711856842, 0.6947058439254761, 0.2711787521839142, 0.21462410688400269, 0.3783731162548065, 0.39328378438949585, 0.3796219229698181, 0.27560317516326904, 0.052095912396907806], [0.17733721435070038, 0.1195838525891304, 0.4294462502002716, 0.41039443016052246, 0.45686641335487366, 0.5433338284492493, 0.08341590315103531, 0.5749803781509399, 0.0773383378982544, 0.2876206338405609, 0.19534848630428314, 0.10015372186899185, 0.2102438062429428, 0.04678432643413544, 0.044711172580718994], [0.4523387849330902, 0.8917949795722961, 0.4903220534324646, 0.5869925022125244, 0.47626572847366333, 0.006232858635485172, 0.41125378012657166, 0.13404546678066254, 0.6460333466529846, 0.32553666830062866, 0.3429105877876282, 0.031081799417734146, 0.42998504638671875, 0.16709895431995392, 0.08821719139814377], [0.49767979979515076, 0.7566660642623901, 0.25263193249702454, 0.4967457056045532, 0.47193706035614014, 0.006824302952736616, 0.2858791947364807, 0.18135732412338257, 0.4390898644924164, 0.7668571472167969, 0.15391138195991516, 0.08414287865161896, 0.5640745759010315, 0.35628020763397217, 0.09142898768186569], [0.18697474896907806, 0.23196713626384735, 0.23554784059524536, 0.34321168065071106, 0.5325552225112915, 0.15430577099323273, 0.2887123227119446, 0.4957616627216339, 0.36584702134132385, 0.2891024053096771, 0.08069057762622833, 0.18119029700756073, 0.4536079466342926, 0.16425864398479462, 0.03777371346950531], [0.17079660296440125, 0.16765500605106354, 0.28291502594947815, 0.16039209067821503, 0.2695491909980774, 0.16163654625415802, 0.08897912502288818, 0.28747832775115967, 0.8989478349685669, 0.26775097846984863, 0.17184530198574066, 0.3264879584312439, 0.31386569142341614, 0.1549917310476303, 0.05264737084507942], [0.04084352031350136, 0.5361505150794983, 0.018223807215690613, 0.03828004375100136, 0.3140276074409485, 0.08277524262666702, 0.07094793766736984, 0.012667819857597351, 0.3304368853569031, 0.10053964704275131, 0.03868165612220764, 0.31755131483078003, 0.22644393146038055, 0.07613880187273026, 0.12961620092391968], [0.07373615354299545, 0.19122207164764404, 0.06966950744390488, 0.01624569669365883, 0.017842771485447884, 0.2144099771976471, 0.24285149574279785, 0.3761756718158722, 0.8141085505485535, 0.27487871050834656, 0.09974052757024765, 0.10127317160367966, 0.16323235630989075, 0.21032299101352692, 0.10343435406684875], [0.06651142984628677, 0.1456020176410675, 0.01741747185587883, 0.07566884905099869, 0.018790215253829956, 0.20801369845867157, 0.16892337799072266, 0.33592528104782104, 0.1834612786769867, 0.29906225204467773, 0.2579277753829956, 0.5998365879058838, 0.5642448663711548, 0.572043240070343, 0.0891154333949089], [0.03234146162867546, 0.1962265521287918, 0.0277019701898098, 0.06972747296094894, 0.10650040954351425, 0.07791601866483688, 0.38205334544181824, 0.4892197549343109, 0.003444283502176404, 0.414199560880661, 0.16890743374824524, 0.4916560649871826, 0.8149713277816772, 0.7298122048377991, 0.14976243674755096], [0.07799918204545975, 0.2381461262702942, 0.01647050306200981, 0.08363308757543564, 0.05209676921367645, 0.02968973107635975, 0.11220219731330872, 0.32446831464767456, 0.1546868085861206, 0.06510066986083984, 0.1935844123363495, 0.5264057517051697, 0.34881067276000977, 0.6311980485916138, 0.09822507947683334], [0.1688770204782486, 0.13700607419013977, 0.20374003052711487, 0.12288741022348404, 0.15864238142967224, 0.039533428847789764, 0.12642242014408112, 0.35126128792762756, 0.365562379360199, 0.48467183113098145, 0.3247453570365906, 0.003142370842397213, 0.5969579219818115, 0.5533550977706909, 0.1647837609052658], [0.3052995800971985, 0.6539703607559204, 0.022321274504065514, 0.1902511715888977, 0.05963977798819542, 0.17083951830863953, 0.5218495726585388, 0.2573777139186859, 0.17107829451560974, 0.46426069736480713, 0.3389802873134613, 0.4338558316230774, 0.014936042949557304, 0.6202957630157471, 0.13899832963943481], [0.12219581007957458, 0.5012378692626953, 0.06702763587236404, 0.06399006396532059, 0.07401375472545624, 0.24048954248428345, 0.08739905059337616, 0.050457850098609924, 0.030934542417526245, 0.1506662517786026, 0.1536494344472885, 0.49837279319763184, 0.018043117597699165, 0.11216632276773453, 0.12939369678497314], [0.11525271832942963, 0.521948516368866, 0.007329752668738365, 0.008543604053556919, 0.05213259160518646, 0.04235774278640747, 0.2166471928358078, 0.528154194355011, 0.42159566283226013, 0.22446103394031525, 0.0032521234825253487, 0.5035390257835388, 0.365617960691452, 0.44961339235305786, 0.15735329687595367], [0.03232282027602196, 0.08449342846870422, 0.004147443920373917, 0.050799064338207245, 0.037334948778152466, 0.08206064254045486, 0.07099173963069916, 0.19771835207939148, 0.021330662071704865, 0.08051090687513351, 0.1005825400352478, 0.700605034828186, 0.3027697801589966, 0.4364767074584961, 0.10480254143476486], [0.034268103539943695, 0.16091260313987732, 0.0168391652405262, 0.06967493146657944, 0.0915973111987114, 0.051104262471199036, 0.2385529726743698, 0.3295409679412842, 0.0004638703539967537, 0.22104156017303467, 0.13362999260425568, 0.5110065937042236, 0.7347238063812256, 0.7763577103614807, 0.15897347033023834], [0.08530293405056, 0.1988343894481659, 0.010091865435242653, 0.07736483961343765, 0.030177433043718338, 0.023718634620308876, 0.06320804357528687, 0.20902810990810394, 0.020835628733038902, 0.026085397228598595, 0.10371798276901245, 0.427949994802475, 0.2465561032295227, 0.6410334706306458, 0.12414435297250748], [0.17881684005260468, 0.09949745982885361, 0.17292529344558716, 0.14197823405265808, 0.0994792953133583, 0.022899990901350975, 0.07621151208877563, 0.20277591049671173, 0.059071850031614304, 0.23252709209918976, 0.2142648547887802, 0.0016634195344522595, 0.4786902368068695, 0.5105896592140198, 0.1802191287279129], [0.29184988141059875, 0.5299537181854248, 0.01714717224240303, 0.1581006944179535, 0.034420810639858246, 0.1480618417263031, 0.35555243492126465, 0.16130897402763367, 0.0352683924138546, 0.2384539395570755, 0.22334522008895874, 0.274210661649704, 0.008749962784349918, 0.5107676982879639, 0.16247788071632385], [0.1536586880683899, 0.39876002073287964, 0.060627128928899765, 0.08434724807739258, 0.06138864532113075, 0.18170806765556335, 0.0558285117149353, 0.026850836351513863, 0.004648242145776749, 0.05450701341032982, 0.08679821342229843, 0.24500715732574463, 0.009806739166378975, 0.06359081715345383, 0.14997224509716034], [0.1216418668627739, 0.4058372378349304, 0.00597163662314415, 0.009731672704219818, 0.04685758054256439, 0.030955728143453598, 0.14503908157348633, 0.4122965633869171, 0.13539999723434448, 0.08889995515346527, 0.0017191163497045636, 0.24694381654262543, 0.23039060831069946, 0.2996818721294403, 0.1837962418794632], [0.2966727912425995, 0.1567845344543457, 0.07310101389884949, 0.14124755561351776, 0.2961083948612213, 0.07968501001596451, 0.06122228875756264, 0.14724984765052795, 0.06047076731920242, 0.055829375982284546, 0.06430483609437943, 0.11614347994327545, 0.15107537806034088, 0.15706941485404968, 0.12527146935462952]], [[0.004390498157590628, 0.00876205787062645, 0.016465701162815094, 0.005714573431760073, 0.036494653671979904, 0.0032131776679307222, 0.01477664802223444, 0.018077310174703598, 0.010320773348212242, 0.006645719520747662, 0.03231831267476082, 0.004141036421060562, 0.011432528495788574, 0.011813640594482422, 0.20326180756092072], [0.024762088432908058, 0.05259820073843002, 0.06384432315826416, 0.1483391523361206, 0.26820069551467896, 0.20398226380348206, 0.37573596835136414, 0.08007726073265076, 0.052950888872146606, 0.09653404355049133, 0.1610451638698578, 0.12953783571720123, 0.2330068051815033, 0.4463363587856293, 0.19394421577453613], [0.679330587387085, 0.043791741132736206, 0.12768849730491638, 0.27546241879463196, 0.03847555071115494, 0.08167082816362381, 0.21957245469093323, 0.04802798852324486, 0.10780715942382812, 0.6106712222099304, 0.2505488693714142, 0.1709391176700592, 0.04529926925897598, 0.17936259508132935, 0.13903558254241943], [0.05959116667509079, 0.03547457605600357, 0.03805014118552208, 0.02909783646464348, 0.08531224727630615, 0.035567909479141235, 0.017052877694368362, 0.03032829985022545, 0.012725351378321648, 0.06508343666791916, 0.04963213950395584, 0.013415418565273285, 0.026129938662052155, 0.011819864623248577, 0.21026377379894257], [0.0922531858086586, 0.009465531446039677, 0.05285167694091797, 0.11621613800525665, 0.008946871384978294, 0.0003396931570023298, 0.056973982602357864, 0.011571673676371574, 0.03833528608083725, 0.02977353148162365, 0.12428728491067886, 0.005304301157593727, 0.012764646671712399, 0.03717968612909317, 0.1998610943555832], [0.024207258597016335, 0.015275360085070133, 0.12442810088396072, 0.044900182634592056, 0.06243159621953964, 0.002727220067754388, 0.05297050252556801, 0.34427115321159363, 0.10989916324615479, 0.020859790965914726, 0.11048608273267746, 0.02605186030268669, 0.1171213760972023, 0.05136575922369957, 0.16462838649749756], [0.03260662034153938, 0.00298042013309896, 0.16533112525939941, 0.056620776653289795, 0.049906134605407715, 0.008958332240581512, 0.05700542405247688, 0.016634995117783546, 0.029206881299614906, 0.025224529206752777, 0.19688823819160461, 0.03853357210755348, 0.07708126306533813, 0.04636078327894211, 0.17741571366786957], [0.04517968371510506, 0.08089613169431686, 0.11787059158086777, 0.09224344044923782, 0.27191361784935, 0.020393863320350647, 0.01454318780452013, 0.009129227139055729, 0.020442765206098557, 0.08070629835128784, 0.07541637122631073, 0.10045406222343445, 0.04119513928890228, 0.10953037440776825, 0.15667563676834106], [0.08136362582445145, 0.07834970951080322, 0.015254710800945759, 0.0832342654466629, 0.10864067077636719, 0.11524737626314163, 0.1366880238056183, 0.012557982467114925, 0.1251911222934723, 0.15952906012535095, 0.026927798986434937, 0.07786250859498978, 0.11803606152534485, 0.2014097422361374, 0.2085045427083969], [0.07754338532686234, 0.11610410362482071, 0.032187070697546005, 0.05519983917474747, 0.0022462301421910524, 0.11507689952850342, 0.2733137607574463, 0.17666463553905487, 0.010644900612533092, 0.08315187692642212, 0.02269633859395981, 0.06840697675943375, 0.010724963620305061, 0.0371541827917099, 0.21114735305309296], [0.022315502166748047, 0.012378118932247162, 0.0062178960070014, 0.0078407758846879, 0.015144318342208862, 0.010697844438254833, 0.011326298117637634, 0.013119788840413094, 0.009139686822891235, 0.006104558240622282, 0.005014281254261732, 0.002417754614725709, 0.007784656248986721, 0.009948876686394215, 0.16676713526248932], [0.2628116309642792, 0.1443735957145691, 0.08422664552927017, 0.11404431611299515, 0.17927099764347076, 0.25378888845443726, 0.1460212618112564, 0.04387032985687256, 0.023589681833982468, 0.13644081354141235, 0.045464351773262024, 0.06847606599330902, 0.006222521886229515, 0.036451175808906555, 0.20291540026664734], [0.22663825750350952, 0.15363532304763794, 0.01756531558930874, 0.025186356157064438, 0.038983430713415146, 0.01259024627506733, 0.15960636734962463, 0.10260611027479172, 0.059462085366249084, 0.02338782697916031, 0.039677273482084274, 0.055942799896001816, 0.010165784507989883, 0.013570738956332207, 0.1720115691423416], [0.04994741827249527, 0.08986728638410568, 0.03736276924610138, 0.029899757355451584, 0.03542618826031685, 0.007244490087032318, 0.040187276899814606, 0.040814109146595, 0.04076588898897171, 0.05965813249349594, 0.045340292155742645, 0.0002602309104986489, 0.026138437911868095, 0.02984587848186493, 0.21049101650714874], [0.058702513575553894, 0.04533839225769043, 0.03167680650949478, 0.07689032703638077, 0.07722999900579453, 0.05968516319990158, 0.08647314459085464, 0.04232413321733475, 0.05769982933998108, 0.08562258630990982, 0.07418374717235565, 0.08922348916530609, 0.0013435373548418283, 0.0365031398832798, 0.1955317258834839], [0.035160183906555176, 0.01820351555943489, 0.1303882896900177, 0.019772829487919807, 0.040328264236450195, 0.05493366718292236, 0.03643186390399933, 0.013673724606633186, 0.020261095836758614, 0.09265058487653732, 0.06087178364396095, 0.005874141119420528, 0.0010416797595098615, 0.00679743243381381, 0.17795756459236145], [0.0850016176700592, 0.12483492493629456, 0.30438917875289917, 0.08283902704715729, 0.36141735315322876, 0.5806636810302734, 0.21757252514362335, 0.0776025652885437, 0.2093839943408966, 0.1517311930656433, 0.0691467672586441, 0.05431315675377846, 0.323522686958313, 0.21248842775821686, 0.11186490952968597], [0.017619943246245384, 0.008017263375222683, 0.019503258168697357, 0.014857600443065166, 0.07692210376262665, 0.015309707261621952, 0.015313221141695976, 0.008549719117581844, 0.03095930442214012, 0.019377540796995163, 0.031960610300302505, 0.0054225618951022625, 0.016712497919797897, 0.015215321443974972, 0.15961019694805145], [0.2695287764072418, 0.16650046408176422, 0.14075446128845215, 0.1364857405424118, 0.23432065546512604, 0.261515349149704, 0.18958930671215057, 0.053015366196632385, 0.031337250024080276, 0.28422990441322327, 0.08986067771911621, 0.06408891826868057, 0.008591849356889725, 0.031372129917144775, 0.19151051342487335], [0.2586316764354706, 0.21131351590156555, 0.019284198060631752, 0.02717362530529499, 0.037918541580438614, 0.014535612426698208, 0.14439015090465546, 0.14164134860038757, 0.06384728103876114, 0.03232301026582718, 0.05240772292017937, 0.08253412693738937, 0.007928711362183094, 0.011026060208678246, 0.1583670824766159], [0.0646420493721962, 0.15151722729206085, 0.04734531044960022, 0.03642117232084274, 0.03833956643939018, 0.007805521599948406, 0.03985777497291565, 0.05410199984908104, 0.07749858498573303, 0.1281091719865799, 0.06692291796207428, 0.0004382343322504312, 0.02769407443702221, 0.03219819441437721, 0.20084568858146667], [0.06935474276542664, 0.07278740406036377, 0.0317843034863472, 0.061563972383737564, 0.057788632810115814, 0.05731336027383804, 0.08327846229076385, 0.046548519283533096, 0.06359860301017761, 0.13075897097587585, 0.09122113883495331, 0.1188196912407875, 0.0009191188146360219, 0.03464866429567337, 0.18994329869747162], [0.04588386043906212, 0.027941085398197174, 0.16196617484092712, 0.023955674842000008, 0.04093120992183685, 0.06800121814012527, 0.031365618109703064, 0.013349683955311775, 0.016157155856490135, 0.09367228299379349, 0.06382262706756592, 0.009268027730286121, 0.0006308736628852785, 0.005314440466463566, 0.17240527272224426], [0.09685268998146057, 0.17937548458576202, 0.31954076886177063, 0.09235721081495285, 0.3550800085067749, 0.5939842462539673, 0.19687135517597198, 0.10603781044483185, 0.27224627137184143, 0.17071248590946198, 0.0712975338101387, 0.10525800287723541, 0.3080449402332306, 0.250378280878067, 0.11120767891407013], [0.012543261051177979, 0.010277148336172104, 0.014658409170806408, 0.007294217124581337, 0.028056686744093895, 0.009602113626897335, 0.004711315967142582, 0.003909323364496231, 0.019910220056772232, 0.0035717461723834276, 0.016398703679442406, 0.01044577918946743, 0.015165981836616993, 0.04322582483291626, 0.1563079059123993]]], [[[0.017177388072013855, 0.0003127168456558138, 0.004294774029403925, 0.0025685238651931286, 0.0020048224832862616, 0.0018501998856663704, 0.004262528382241726, 0.00010045748058473691, 0.004143967293202877, 0.0026836262550204992, 0.0008790316642262042, 0.0012905423063784838, 8.68891947902739e-05, 0.00021419797849375755, 0.16245633363723755], [0.12795236706733704, 0.00371668953448534, 0.02831968478858471, 0.025539351627230644, 0.0009935664711520076, 0.0005314573645591736, 0.0308157317340374, 4.653090945794247e-05, 0.004544692113995552, 0.02307700179517269, 0.014357739128172398, 0.0017676070565357804, 1.5830510164960288e-05, 0.0005655316635966301, 0.23366259038448334], [0.0012442924780771136, 0.6349257826805115, 1.560185046400875e-05, 0.0005892697954550385, 2.671209358595661e-06, 1.747990245348774e-05, 0.00010909549746429548, 9.000968930195086e-06, 1.720580803521443e-05, 0.0008049540338106453, 0.00025925427326001227, 4.468534825718962e-06, 5.9764097386505455e-06, 7.895294402260333e-05, 0.00020540088007692248], [0.014811321161687374, 0.6550174951553345, 5.4754978918936104e-05, 0.0013682727003470063, 7.1730828494764864e-06, 3.513193587423302e-05, 0.00030579010490328074, 4.0161107790481765e-06, 8.621193410363048e-05, 0.0020331761334091425, 0.00018049145000986755, 1.5370842447737232e-05, 2.3058303213474574e-06, 3.803792060352862e-05, 0.0004018820764031261], [0.0038746336940675974, 0.000324725842801854, 0.0051879663951694965, 0.009153621271252632, 0.0008864403935149312, 0.6781038641929626, 0.057408660650253296, 0.0010902854846790433, 0.00043091498082503676, 0.000930881651584059, 0.00047575533972121775, 0.0024355631321668625, 0.0005705857765860856, 0.0003382607828825712, 0.0010924984235316515], [3.359095899213571e-06, 1.5333833403019526e-07, 3.112653939751908e-05, 0.00013510043208952993, 6.284327810135437e-06, 0.7821753025054932, 0.0016732696676626801, 2.949555346276611e-05, 1.1825303545265342e-06, 2.2443591660703532e-06, 4.938602842230466e-07, 8.253279020209447e-07, 2.1931487026449759e-07, 9.422030302630446e-07, 3.409375494811684e-06], [0.00014056767395231873, 5.100669682178705e-07, 0.0031089531257748604, 0.006296438630670309, 0.00044245802564546466, 0.5631491541862488, 0.006006886251270771, 0.00015836386592127383, 1.0129460861207917e-05, 9.741926623973995e-05, 8.02019567345269e-05, 2.8800504878745414e-05, 2.2740101485396735e-05, 9.966635116143152e-05, 5.9340749430703e-05], [0.07201159745454788, 9.12444302230142e-05, 0.07167930901050568, 0.07350550591945648, 0.008381813764572144, 0.32997292280197144, 0.32325229048728943, 0.006826527416706085, 0.005964158568531275, 0.01031426526606083, 0.0041834041476249695, 0.0003298712254036218, 2.8659975214395672e-05, 0.00019656911899801344, 0.02016262151300907], [0.0011574724921956658, 3.413460092360765e-07, 0.00010100962390424684, 0.0058910842053592205, 3.088227913394803e-06, 0.01394782867282629, 0.16852441430091858, 0.6476468443870544, 4.158269439358264e-05, 0.002217742381617427, 3.1430703529622406e-05, 8.318846812471747e-05, 7.552150123046886e-07, 2.136993316526059e-06, 0.00013183141709305346], [0.056869976222515106, 0.00018767332949209958, 0.07251239567995071, 0.21200358867645264, 0.5404223799705505, 0.01658189669251442, 0.03565289452672005, 0.0015120785683393478, 0.002293382305651903, 0.005935561377555132, 0.012055100873112679, 0.005193157121539116, 0.003556813346222043, 0.007320231292396784, 0.018532630056142807], [0.37012216448783875, 0.0030506134498864412, 0.585090160369873, 0.3774729073047638, 0.6362679600715637, 0.12865976989269257, 0.340728759765625, 0.01963443122804165, 0.11373940855264664, 0.0405576266348362, 0.04042620584368706, 0.006893007550388575, 0.0011100739939138293, 0.004035779275000095, 0.12706774473190308], [0.01695789396762848, 0.00023016006161924452, 0.013878279365599155, 0.04998883232474327, 0.0032932739704847336, 8.226843783631921e-05, 0.014781651087105274, 0.00017401285003870726, 0.4112556278705597, 0.007095593959093094, 0.01393651869148016, 0.000858593441080302, 0.0009966455399990082, 0.006141065154224634, 0.004614917561411858], [0.023780474439263344, 4.510316648520529e-05, 0.013797261752188206, 0.087004654109478, 0.0004407854867167771, 0.0013536562910303473, 0.04187630116939545, 0.0028901200275868177, 0.06213926523923874, 0.3483656048774719, 0.03705320879817009, 0.005524389911442995, 0.0004139445663895458, 0.0025706440210342407, 0.012163926847279072], [0.017730457708239555, 8.937691018218175e-05, 0.00767871318385005, 0.02321789041161537, 0.00010702417785068974, 0.004407694097608328, 0.0538853257894516, 0.011079255491495132, 0.003184565110132098, 0.026336153969168663, 0.005110009107738733, 0.3480301797389984, 0.002053677337244153, 0.01653059385716915, 0.00945478305220604], [0.00016590843733865768, 4.410037217894569e-05, 0.0031412369571626186, 0.0015988551313057542, 0.002399750053882599, 0.0004506838449742645, 0.001152031123638153, 0.00021803524577990174, 0.00054850586457178, 0.0001300607982557267, 0.001143390079960227, 0.0023531741462647915, 0.6484718322753906, 0.061944324523210526, 1.8855764210456982e-05], [5.492825607689156e-07, 1.991102926979238e-08, 2.3713612335996004e-06, 1.7095164366764948e-05, 8.657893886265811e-07, 3.6805211323098774e-08, 1.598790731804911e-06, 2.0731313554733788e-07, 4.274500042811269e-07, 5.490248440764844e-06, 0.00014167907647788525, 5.53526615476585e-06, 0.5851997137069702, 0.22563536465168, 1.0684430407081891e-07], [0.01633528247475624, 0.0006067559006623924, 0.047781698405742645, 0.1674666851758957, 0.0008243213524110615, 0.0007217283127829432, 0.005900595337152481, 0.0001012250068015419, 0.006910703144967556, 0.1343279927968979, 0.5695670247077942, 0.0034049933310598135, 0.008110514841973782, 0.0796104148030281, 0.00713667506352067], [0.02614973485469818, 0.001497315475717187, 0.11498566716909409, 0.08699594438076019, 0.006599655374884605, 0.0011878651566803455, 0.009639720432460308, 0.0002812722814269364, 0.014351817779242992, 0.06119270250201225, 0.19180962443351746, 0.06391202658414841, 0.4759237766265869, 0.44549837708473206, 0.058810409158468246], [0.041024841368198395, 0.0016396299470216036, 0.05072889104485512, 0.1323171705007553, 0.0024413676001131535, 0.00023246044293045998, 0.02059599943459034, 0.00033336327760480344, 0.7358176708221436, 0.04226389154791832, 0.0658484548330307, 0.002587914001196623, 0.013076293282210827, 0.0423613116145134, 0.051219869405031204], [0.025904469192028046, 0.00014531973283737898, 0.014812517911195755, 0.11958510428667068, 0.0003183217777404934, 0.0012536202557384968, 0.031174438074231148, 0.0025010022800415754, 0.045685503631830215, 0.4334242641925812, 0.057037968188524246, 0.005963113158941269, 0.0007164725102484226, 0.00356480129994452, 0.02565825544297695], [0.04193783551454544, 0.0005606984486803412, 0.01569434627890587, 0.058890990912914276, 0.00016686622984707355, 0.0032934362534433603, 0.10695304721593857, 0.011062747798860073, 0.008127261884510517, 0.04922156408429146, 0.01035262644290924, 0.3408533036708832, 0.003045044606551528, 0.019185535609722137, 0.046415992081165314], [0.00012501348101068288, 4.870840712101199e-05, 0.0024386774748563766, 0.001847597537562251, 0.0017206922639161348, 0.0002501157287042588, 0.0009360458934679627, 0.00021343374100979418, 0.0004799730086233467, 0.00017777700850274414, 0.0013057318283244967, 0.0019216074142605066, 0.7016423344612122, 0.059743087738752365, 1.6802117897896096e-05], [1.7574552657606546e-06, 9.272354617451128e-08, 1.001089003693778e-05, 5.891482942388393e-05, 3.3656547202554066e-06, 1.2065736143540562e-07, 6.7727110035775695e-06, 6.411150366147922e-07, 1.3192883443480241e-06, 1.1707085832313169e-05, 0.00026830541901290417, 1.0283902156515978e-05, 0.6812964081764221, 0.27208930253982544, 4.838558993469633e-07], [0.01900503970682621, 0.0008953948272392154, 0.09836827963590622, 0.2858547866344452, 0.0013939865166321397, 0.0011423979885876179, 0.011685764417052269, 0.00014273256238084286, 0.010754182003438473, 0.15914513170719147, 0.6438553333282471, 0.002441136632114649, 0.008362390100955963, 0.07132171094417572, 0.011131932027637959], [0.12417581677436829, 0.0153038389980793, 0.12986266613006592, 0.6406017541885376, 0.009386910125613213, 0.057520631700754166, 0.09723392128944397, 0.0041757188737392426, 0.030985616147518158, 0.12765046954154968, 0.052563395351171494, 0.09427980333566666, 0.010530965402722359, 0.01615813747048378, 0.110444575548172]], [[0.05668458715081215, 0.013551714830100536, 0.3300224542617798, 0.22417771816253662, 0.24923239648342133, 0.16107039153575897, 0.07639153301715851, 0.036736860871315, 0.044193096458911896, 0.14611276984214783, 0.15061600506305695, 0.035221245139837265, 0.0397845022380352, 0.06225845590233803, 0.12414046376943588], [0.29422780871391296, 0.3258638381958008, 0.027477310970425606, 0.10906420648097992, 0.003920723684132099, 0.020042676478624344, 0.05157224088907242, 0.0009247793932445347, 0.005282218102365732, 0.1744423359632492, 0.0761384516954422, 0.0033416510559618473, 0.0003361533163115382, 0.0012587645323947072, 0.013668928295373917], [0.19355924427509308, 0.1259031891822815, 0.004604514688253403, 0.04003702849149704, 0.0129036083817482, 0.019794460386037827, 0.06589072942733765, 0.0014933310449123383, 0.012753497809171677, 0.06252782791852951, 0.0361945815384388, 0.011655895970761776, 0.01012047752737999, 0.02639157697558403, 0.16549569368362427], [0.4293937385082245, 0.07181306928396225, 0.003158864099532366, 0.04697505012154579, 0.01354672759771347, 0.09221473336219788, 0.24058710038661957, 0.0037424738984555006, 0.07543525844812393, 0.0656844824552536, 0.01989266835153103, 0.06512395292520523, 0.01137665193527937, 0.029709961265325546, 0.18951866030693054], [0.052543047815561295, 0.03695955500006676, 0.100065678358078, 0.07546547800302505, 0.053252771496772766, 0.11382242292165756, 0.28551623225212097, 0.14051520824432373, 0.12815484404563904, 0.15533913671970367, 0.11139650642871857, 0.09512985497713089, 0.017796501517295837, 0.04266834259033203, 0.1351824700832367], [0.002040643012151122, 0.005490712355822325, 0.024769198149442673, 0.007002650294452906, 0.0020249236840754747, 0.03913044556975365, 0.01487613096833229, 0.09424738585948944, 0.010089649818837643, 0.05513475462794304, 0.0488949678838253, 0.007691625505685806, 0.002344577107578516, 0.012510538101196289, 0.20307941734790802], [0.04981796815991402, 0.13342007994651794, 0.4189896881580353, 0.06767702847719193, 0.007763800676912069, 0.11641503125429153, 0.029343493282794952, 0.11072052270174026, 0.06700066477060318, 0.1429358571767807, 0.3406253457069397, 0.00571059063076973, 0.0006326772854663432, 0.004126383922994137, 0.17491626739501953], [0.008032058365643024, 0.009898788295686245, 0.0165096465498209, 0.015990890562534332, 0.001612947671674192, 0.07025154680013657, 0.1309722512960434, 0.45684561133384705, 0.020022952929139137, 0.014566164463758469, 0.01627122238278389, 0.001012062537483871, 0.003352430183440447, 0.006583840120583773, 0.0849505066871643], [0.027854006737470627, 0.008844887837767601, 0.011581032536923885, 0.014227867126464844, 0.0022522227372974157, 0.6803511381149292, 0.24682462215423584, 0.11913055926561356, 0.0028406307101249695, 0.006190288811922073, 0.00574448611587286, 0.0012344244169071317, 0.010572707280516624, 0.00985674187541008, 0.11121391505002975], [0.11111988872289658, 0.0035893325693905354, 0.4007861316204071, 0.2033512443304062, 0.1986382007598877, 0.15137647092342377, 0.12109687924385071, 0.007575488183647394, 0.021906785666942596, 0.03087061457335949, 0.08533017337322235, 0.07086688280105591, 0.06729871034622192, 0.045789312571287155, 0.1673528403043747], [0.06468851119279861, 0.006587199401110411, 0.23617494106292725, 0.19800357520580292, 0.15495024621486664, 0.06172433868050575, 0.05180465057492256, 0.01833559013903141, 0.016546709463000298, 0.05746111273765564, 0.0824536681175232, 0.007550883572548628, 0.007943101227283478, 0.011712267994880676, 0.33849596977233887], [0.09414701163768768, 0.10295354574918747, 0.0844656303524971, 0.06548816710710526, 0.08529236167669296, 0.06227908656001091, 0.030192906036973, 0.010874724946916103, 0.025562399998307228, 0.005146168638020754, 0.014559037052094936, 0.013559900224208832, 0.06781303137540817, 0.05153109133243561, 0.33232951164245605], [0.314544141292572, 0.6832185983657837, 0.07794945687055588, 0.042061515152454376, 0.015504884533584118, 0.1916494369506836, 0.006379975005984306, 0.0006176759488880634, 0.0012508369982242584, 0.01929312013089657, 0.022219885140657425, 0.0019787217024713755, 0.01769268326461315, 0.008809820748865604, 0.08711312711238861], [0.027118999511003494, 0.07309459149837494, 0.04486501216888428, 0.012266037985682487, 0.024303032085299492, 0.030924739316105843, 0.021004648879170418, 0.003694491693750024, 0.01517508551478386, 0.025275954976677895, 0.0075909653678536415, 0.24021397531032562, 0.04135901853442192, 0.07603362947702408, 0.11061857640743256], [0.025165440514683723, 0.019109023734927177, 0.008520743809640408, 0.015198510140180588, 0.007751345168799162, 0.005125374533236027, 0.008160223253071308, 0.0017721926560625434, 0.08641061931848526, 0.07765892893075943, 0.017936453223228455, 0.020675569772720337, 0.0024341135285794735, 0.023971976712346077, 0.16557703912258148], [0.22320780158042908, 0.05348529666662216, 0.01734296977519989, 0.1172923669219017, 0.004340981598943472, 0.003372892737388611, 0.033841460943222046, 0.024162178859114647, 0.05216863751411438, 0.3090120553970337, 0.2295515090227127, 0.014075365848839283, 0.020010780543088913, 0.20773397386074066, 0.12411301583051682], [0.1383964717388153, 0.05579448863863945, 0.1563209742307663, 0.09128513187170029, 0.039257608354091644, 0.009886945597827435, 0.006391164381057024, 0.0007081980584189296, 0.006523598916828632, 0.16335614025592804, 0.02935076504945755, 0.023180969059467316, 0.19186609983444214, 0.2336183488368988, 0.16814255714416504], [0.1625337302684784, 0.007939358241856098, 0.11928629875183105, 0.1341797411441803, 0.005670298356562853, 0.0033473502844572067, 0.022544465959072113, 0.005534132476896048, 0.007299710530787706, 0.08667418360710144, 0.07403960824012756, 0.004230144899338484, 0.002401313977316022, 0.005503634922206402, 0.20701391994953156], [0.08204744011163712, 0.04882703348994255, 0.048393696546554565, 0.02867632359266281, 0.012730585411190987, 0.02805456519126892, 0.014470821246504784, 0.008571655489504337, 0.011637779884040356, 0.011116313748061657, 0.015620187856256962, 0.00444953003898263, 0.038398172706365585, 0.021771300584077835, 0.25556278228759766], [0.3818233609199524, 0.6690115928649902, 0.07648678869009018, 0.0345233753323555, 0.011518634855747223, 0.1436365395784378, 0.005264819134026766, 0.000502048700582236, 0.0017500953981652856, 0.03918173909187317, 0.04129163548350334, 0.0023984990548342466, 0.020183494314551353, 0.008427903987467289, 0.09516369551420212], [0.02332407608628273, 0.06938373297452927, 0.035716570913791656, 0.008126936852931976, 0.012537641450762749, 0.0137803228572011, 0.01513306051492691, 0.00204691500402987, 0.029820755124092102, 0.05474912002682686, 0.016170548275113106, 0.22342036664485931, 0.05026429146528244, 0.06863567978143692, 0.11948796361684799], [0.020166568458080292, 0.015762973576784134, 0.006330324336886406, 0.008625769056379795, 0.005781465210020542, 0.00451312493532896, 0.007413441780954599, 0.0018466140609234571, 0.14846709370613098, 0.1376892477273941, 0.02431248314678669, 0.03153817355632782, 0.0025850962847471237, 0.026987632736563683, 0.15984071791172028], [0.11904438585042953, 0.03637225553393364, 0.013324074447154999, 0.04586002975702286, 0.00359557312913239, 0.002297254279255867, 0.02453085221350193, 0.019205793738365173, 0.07615289092063904, 0.3510056436061859, 0.24748629331588745, 0.0179043747484684, 0.015299135819077492, 0.16336295008659363, 0.13914434611797333], [0.0598345547914505, 0.028141267597675323, 0.11996681243181229, 0.04193190485239029, 0.03001757152378559, 0.006633914541453123, 0.005910022184252739, 0.0007469199481420219, 0.010509159415960312, 0.18832749128341675, 0.032145459204912186, 0.022126449272036552, 0.16793787479400635, 0.1917877346277237, 0.16885708272457123], [0.30011340975761414, 0.029496116563677788, 0.21246175467967987, 0.11388618499040604, 0.019265230745077133, 0.011386800557374954, 0.02386542037129402, 0.0049255480989813805, 0.002113579073920846, 0.2235003262758255, 0.1410367637872696, 0.022971738129854202, 0.009332037530839443, 0.01034344732761383, 0.12311729788780212]], [[0.03517069295048714, 0.03549245744943619, 0.004381549544632435, 0.008797217160463333, 0.007323419209569693, 0.042320944368839264, 0.004849699325859547, 0.003679578425362706, 0.011580413207411766, 0.009367180056869984, 0.006541883572936058, 0.022973380982875824, 0.023761657997965813, 0.02892483025789261, 0.1581033319234848], [0.01528994832187891, 0.20408181846141815, 0.11101088672876358, 0.08111120015382767, 0.07986893504858017, 0.010126215405762196, 0.020366966724395752, 0.1417536586523056, 0.04787333309650421, 0.04340335354208946, 0.2409791648387909, 0.04442436248064041, 0.005909040104597807, 0.014603852294385433, 0.18931475281715393], [0.21622280776500702, 0.09626477211713791, 0.10110790282487869, 0.31975099444389343, 0.2572377920150757, 0.630383312702179, 0.1336757242679596, 0.17725828289985657, 0.02378956414759159, 0.22253809869289398, 0.13939163088798523, 0.30914127826690674, 0.35968318581581116, 0.48164138197898865, 0.09301326423883438], [0.168080672621727, 0.1516411453485489, 0.07150255143642426, 0.32225823402404785, 0.2490793913602829, 0.30686429142951965, 0.032337237149477005, 0.16698232293128967, 0.04405289515852928, 0.2310783565044403, 0.10561788827180862, 0.2769646644592285, 0.19830158352851868, 0.1653461754322052, 0.09653043746948242], [0.04038669914007187, 0.16624715924263, 0.3317047655582428, 0.3851986229419708, 0.42305275797843933, 0.008450526744127274, 0.09501849114894867, 0.24002836644649506, 0.4256587326526642, 0.15410973131656647, 0.19127053022384644, 0.04389801248908043, 0.030224177986383438, 0.05971052870154381, 0.11478950828313828], [0.04527302458882332, 0.15370813012123108, 0.46266382932662964, 0.06791326403617859, 0.6029869914054871, 0.018879592418670654, 0.07514301687479019, 0.07948564738035202, 0.6243545413017273, 0.11254889518022537, 0.24916931986808777, 0.08612842112779617, 0.07598677277565002, 0.13317255675792694, 0.04299912229180336], [0.03695433586835861, 0.028389452025294304, 0.2721908688545227, 0.07653216272592545, 0.6730886697769165, 0.004614274017512798, 0.004165990743786097, 0.01533985324203968, 0.28992146253585815, 0.028840038925409317, 0.055076081305742264, 0.024787841364741325, 0.0010191021719947457, 0.0022868094965815544, 0.030124979093670845], [0.005083801224827766, 0.09139324724674225, 0.28116321563720703, 0.08195066452026367, 0.6340349316596985, 0.012272918596863747, 0.0005934475339017808, 0.010692326352000237, 0.1514793336391449, 0.016046250239014626, 0.04672969505190849, 0.014393122866749763, 0.002580928150564432, 0.007409923244267702, 0.12582267820835114], [0.00605103699490428, 0.11548061668872833, 0.2870264947414398, 0.061026521027088165, 0.8064441084861755, 0.2189176380634308, 0.020241523161530495, 0.07779920846223831, 0.08952271938323975, 0.0073190852999687195, 0.02372264862060547, 0.038144610822200775, 0.07446137070655823, 0.09413070231676102, 0.030171062797307968], [0.08316895365715027, 0.6715664267539978, 0.04549514129757881, 0.17856287956237793, 0.018127189949154854, 0.38010329008102417, 0.16956135630607605, 0.5726994872093201, 0.1473512202501297, 0.13756032288074493, 0.044131502509117126, 0.03872460126876831, 0.13646697998046875, 0.07963203638792038, 0.10255669057369232], [0.0817432552576065, 0.2031053900718689, 0.02472570165991783, 0.02598942257463932, 0.05427335575222969, 0.43315476179122925, 0.06398319453001022, 0.14792829751968384, 0.18555517494678497, 0.020227503031492233, 0.03572608157992363, 0.008726409636437893, 0.33127138018608093, 0.0956021174788475, 0.032814960926771164], [0.36652442812919617, 0.4977355897426605, 0.09286413341760635, 0.21385566890239716, 0.18058304488658905, 0.4562758207321167, 0.4738945960998535, 0.2067655473947525, 0.17124009132385254, 0.035114847123622894, 0.05785587430000305, 0.03289380669593811, 0.3892229497432709, 0.2459530532360077, 0.0885753259062767], [0.3338637053966522, 0.241106316447258, 0.10183558613061905, 0.16975384950637817, 0.22215212881565094, 0.1208982765674591, 0.12069278955459595, 0.027770178392529488, 0.12589573860168457, 0.018161755055189133, 0.05639319866895676, 0.024462532252073288, 0.08646970242261887, 0.18506868183612823, 0.2994369864463806], [0.24999171495437622, 0.7484717965126038, 0.1908620148897171, 0.6611655354499817, 0.24442408978939056, 0.0825357735157013, 0.5622089505195618, 0.4391622543334961, 0.045715928077697754, 0.2250336855649948, 0.3067566156387329, 0.014471310190856457, 0.06388252228498459, 0.21674634516239166, 0.13583892583847046], [0.05097173899412155, 0.16686855256557465, 0.15120531618595123, 0.3698476254940033, 0.35846272110939026, 0.6895467042922974, 0.8159933686256409, 0.843620777130127, 0.6904561519622803, 0.307870090007782, 0.450530469417572, 0.6275950074195862, 0.15986312925815582, 0.5293903350830078, 0.07888244837522507], [0.3532100319862366, 0.1141892597079277, 0.06207668036222458, 0.23437273502349854, 0.13035829365253448, 0.16457295417785645, 0.6610441207885742, 0.6354422569274902, 0.6703211069107056, 0.18266227841377258, 0.16635818779468536, 0.1048990935087204, 0.1468038111925125, 0.17976891994476318, 0.0709633082151413], [0.18437133729457855, 0.20806346833705902, 0.06752406805753708, 0.15831130743026733, 0.3405534625053406, 0.0627271831035614, 0.3717433214187622, 0.3913803696632385, 0.5862330794334412, 0.29396724700927734, 0.02299528755247593, 0.060014016926288605, 0.08232607692480087, 0.15418194234371185, 0.15275102853775024], [0.07671413570642471, 0.17070698738098145, 0.13325846195220947, 0.07402658462524414, 0.6503690481185913, 0.1330946981906891, 0.165133535861969, 0.2397843301296234, 0.6370089054107666, 0.09848601371049881, 0.09929761290550232, 0.10903115570545197, 0.14141131937503815, 0.14783106744289398, 0.08112896233797073], [0.1416744738817215, 0.274202436208725, 0.13295260071754456, 0.20105819404125214, 0.3945937156677246, 0.333781898021698, 0.3556738793849945, 0.2839928865432739, 0.10343024134635925, 0.07706140726804733, 0.054361648857593536, 0.05752982571721077, 0.2817353904247284, 0.27278265357017517, 0.13429909944534302], [0.22879131138324738, 0.1777554452419281, 0.09183042496442795, 0.14726729691028595, 0.1873711347579956, 0.05672184377908707, 0.08326486498117447, 0.01781904511153698, 0.0835406556725502, 0.02614605240523815, 0.06876543164253235, 0.03439611196517944, 0.0621294341981411, 0.16512615978717804, 0.26481878757476807], [0.1532706916332245, 0.5982866883277893, 0.18050755560398102, 0.5800401568412781, 0.22030943632125854, 0.025230426341295242, 0.3744361996650696, 0.265155166387558, 0.03173244372010231, 0.2068646252155304, 0.27338433265686035, 0.012270096689462662, 0.05047086998820305, 0.14277896285057068, 0.15170519053936005], [0.04688200727105141, 0.12437571585178375, 0.1870293915271759, 0.4533093273639679, 0.3565751910209656, 0.5648568868637085, 0.7852934002876282, 0.7657470703125, 0.5417794585227966, 0.4419334828853607, 0.632922887802124, 0.7103447914123535, 0.15686877071857452, 0.6169639825820923, 0.08483293652534485], [0.2884610891342163, 0.10604135692119598, 0.07176870107650757, 0.2240629643201828, 0.12294583767652512, 0.10159854590892792, 0.6051279902458191, 0.5541971921920776, 0.5623130798339844, 0.16405576467514038, 0.18055777251720428, 0.13399486243724823, 0.12637703120708466, 0.18360036611557007, 0.09598042815923691], [0.10626664012670517, 0.1478983461856842, 0.07806308567523956, 0.11814259737730026, 0.31690794229507446, 0.03372211009263992, 0.30042603611946106, 0.29277828335762024, 0.44479742646217346, 0.216581329703331, 0.023049354553222656, 0.0511498898267746, 0.08494822680950165, 0.14207273721694946, 0.16419102251529694], [0.048457998782396317, 0.0638582855463028, 0.20956584811210632, 0.021124709397554398, 0.09014897048473358, 0.11662621796131134, 0.3483109474182129, 0.4503737986087799, 0.17136822640895844, 0.02997676283121109, 0.21708470582962036, 0.05856599286198616, 0.2859736979007721, 0.41663405299186707, 0.12262307107448578]], [[0.01622859761118889, 0.0033176897559314966, 0.006228303536772728, 0.003451053285971284, 0.011415286920964718, 0.016942020505666733, 0.0027556640561670065, 0.001647507306188345, 0.0010015909792855382, 0.0013629572931677103, 0.004746851045638323, 0.009338179603219032, 0.00885467603802681, 0.006604180671274662, 0.16180677711963654], [0.17455320060253143, 0.026163265109062195, 0.2041780799627304, 0.027548620477318764, 0.4711945950984955, 0.5480062365531921, 0.10718726366758347, 0.032194506376981735, 0.08035919070243835, 0.010791448876261711, 0.11821587383747101, 0.04372825473546982, 0.5788823962211609, 0.10199426859617233, 0.06844703108072281], [0.023936308920383453, 0.03560526669025421, 0.007881848141551018, 0.022994371131062508, 0.003501775674521923, 0.000663262908346951, 0.0027445319574326277, 0.0008202926255762577, 0.002215484855696559, 0.014335977844893932, 0.06139073148369789, 0.0039900378324091434, 0.004902976099401712, 0.006251698825508356, 0.21882350742816925], [0.01501577626913786, 0.026870740577578545, 0.007700353395193815, 0.02517320215702057, 0.005199552513659, 0.0040618558414280415, 0.0018289085710421205, 0.0005822794046252966, 0.008953371085226536, 0.004845716059207916, 0.02605423890054226, 0.010851072147488594, 0.011600007303059101, 0.011058725416660309, 0.2679094076156616], [0.05198093131184578, 0.026691097766160965, 0.04745011776685715, 0.02099662832915783, 0.007765383925288916, 0.0017653746763244271, 0.002459246199578047, 0.0005052239284850657, 0.0007161727407947183, 0.00449666241183877, 0.00950489193201065, 0.002728741616010666, 0.007593079470098019, 0.0031749741174280643, 0.1993207037448883], [0.0031879025045782328, 0.001219254801981151, 0.007273980416357517, 0.0029734931886196136, 9.794573998078704e-05, 0.0006066279602237046, 0.000905939843505621, 0.0002116545947501436, 0.00022416051069740206, 0.001432110439054668, 0.00046862047747708857, 0.0008043517009355128, 0.00010411434050183743, 0.0003457288257777691, 0.22099417448043823], [0.020157048478722572, 0.026601465418934822, 0.04540588706731796, 0.04344630241394043, 0.0022944926749914885, 0.0010618591913953424, 0.00406603142619133, 0.0029086798895150423, 0.0019963555969297886, 0.010005260817706585, 0.0020353682339191437, 0.0019374215044081211, 0.0013613863848149776, 0.001661884132772684, 0.34173521399497986], [0.09776000678539276, 0.012011643499135971, 0.12930582463741302, 0.019725820049643517, 0.03450663015246391, 0.44516250491142273, 0.09379248321056366, 0.011904217302799225, 0.012111036106944084, 0.007218031212687492, 0.028761520981788635, 0.011232447810471058, 0.17035166919231415, 0.022308414801955223, 0.055901553481817245], [0.0270126610994339, 0.0034831874072551727, 0.03977394104003906, 0.025583824142813683, 0.0007700100541114807, 0.002870001830160618, 0.0027750579174607992, 0.0016644555144011974, 0.0016086471732705832, 0.001177149242721498, 0.00746855279430747, 0.002065857872366905, 0.0016993783647194505, 0.0015537800500169396, 0.32808277010917664], [0.16020068526268005, 0.019860466942191124, 0.3786206543445587, 0.04546584561467171, 0.22538548707962036, 0.035959187895059586, 0.022749971598386765, 0.0223965086042881, 0.010994979180395603, 0.013655508868396282, 0.08095952123403549, 0.07914181798696518, 0.5184871554374695, 0.24710357189178467, 0.059729527682065964], [0.002354596508666873, 0.013563946820795536, 0.0012282072566449642, 0.0011236226418986917, 0.004269973374903202, 0.05393142253160477, 0.010044331662356853, 0.012847290374338627, 0.23206481337547302, 0.0042032524943351746, 0.002388538094237447, 0.005051162093877792, 0.004106870852410793, 0.003583247307687998, 0.0021634430158883333], [0.1318124532699585, 0.006612265948206186, 0.026151085272431374, 0.15551267564296722, 0.006537565030157566, 0.045402105897665024, 0.08115606755018234, 0.020273711532354355, 0.2617640495300293, 0.03846455365419388, 0.42425140738487244, 0.0063036843203008175, 0.045534029603004456, 0.06594183295965195, 0.0061628553085029125], [0.0171976238489151, 0.0023818486370146275, 0.036466922610998154, 0.011855212040245533, 0.019672302529215813, 0.007386004086583853, 0.02982362173497677, 0.0045198979787528515, 0.02385052479803562, 0.25256073474884033, 0.2446560561656952, 0.0453505739569664, 0.08819476515054703, 0.09139581024646759, 0.0022182920947670937], [0.023948049172759056, 0.006307430099695921, 0.014840157702565193, 0.01758965104818344, 0.0009477039566263556, 0.00178795016836375, 0.005927308928221464, 0.0026511158794164658, 0.00012311375758145005, 0.04321818798780441, 0.0496363490819931, 0.3416200280189514, 0.001097637927159667, 0.007029203698039055, 0.007338459137827158], [0.1633826345205307, 0.005062526557594538, 0.04231903329491615, 0.24309031665325165, 0.0009563505300320685, 0.0008045694557949901, 0.004994159564375877, 0.0011061460245400667, 0.0013372766552492976, 0.023061903193593025, 0.044598180800676346, 0.0017028035363182425, 2.3589664124301635e-05, 0.0003540365141816437, 0.16737498342990875], [0.1106855720281601, 0.005593962036073208, 0.014953872188925743, 0.19064223766326904, 0.0008905718568712473, 0.002549833618104458, 0.019427485764026642, 0.019940704107284546, 0.0020017458591610193, 0.029780413955450058, 0.01774613931775093, 0.00061158457538113, 0.0022336822003126144, 0.007989613339304924, 0.2558586895465851], [0.07112060487270355, 0.029737049713730812, 0.09336916357278824, 0.07307538390159607, 0.023197662085294724, 0.022866347804665565, 0.060328319668769836, 0.04474486783146858, 0.0006379868718795478, 0.027103934437036514, 0.2942929267883301, 0.011375843547284603, 0.07746338844299316, 0.09051978588104248, 0.11258094012737274], [0.15941812098026276, 0.02997875213623047, 0.08360203355550766, 0.10365118086338043, 0.03050130233168602, 0.39312028884887695, 0.3065427839756012, 0.2912093997001648, 0.135236918926239, 0.18899840116500854, 0.13724294304847717, 0.1948302835226059, 0.07353706657886505, 0.12220755219459534, 0.10422825068235397], [0.24064786732196808, 0.0051915524527430534, 0.09652373939752579, 0.2287912219762802, 0.019215410575270653, 0.13947954773902893, 0.15343742072582245, 0.07055477797985077, 0.05467608571052551, 0.10673969984054565, 0.5659986138343811, 0.014077076688408852, 0.1709020584821701, 0.23944324254989624, 0.026877261698246002], [0.019817974418401718, 0.002034382661804557, 0.04978875443339348, 0.009913384914398193, 0.033772312104701996, 0.0069160182029008865, 0.027356693521142006, 0.004301261156797409, 0.005268980748951435, 0.24062182009220123, 0.2975090742111206, 0.09841412305831909, 0.13523375988006592, 0.1965852826833725, 0.004198803100734949], [0.017094334587454796, 0.005556214600801468, 0.011722622439265251, 0.009952181950211525, 0.0008346029790118337, 0.0009373819339089096, 0.006794091779738665, 0.0019291864009574056, 4.7701923904241994e-05, 0.0364256277680397, 0.035398196429014206, 0.3890627920627594, 0.0013647697633132339, 0.008012092672288418, 0.013173048384487629], [0.12328237295150757, 0.0036286553367972374, 0.03202027454972267, 0.16562366485595703, 0.0006255045300349593, 0.00061140360776335, 0.00499368691816926, 0.0010923785157501698, 0.0008833102765493095, 0.03177933022379875, 0.04344986379146576, 0.00255553494207561, 2.260845576529391e-05, 0.0005036385264247656, 0.16160868108272552], [0.050196755677461624, 0.002699600299820304, 0.009293685667216778, 0.06999042630195618, 0.0006182404467836022, 0.0013977399794384837, 0.014421526342630386, 0.010930507443845272, 0.0008620836888439953, 0.015927143394947052, 0.008692404255270958, 0.0006625624373555183, 0.0011245491914451122, 0.0053406055085361, 0.2061784416437149], [0.04101766273379326, 0.020672734826803207, 0.08772061765193939, 0.04009746387600899, 0.01892852783203125, 0.017910925671458244, 0.057973578572273254, 0.03737492114305496, 0.00047206622548401356, 0.021084431558847427, 0.21054430305957794, 0.013546224683523178, 0.08985017240047455, 0.10610225051641464, 0.1389981210231781], [0.018278781324625015, 0.03789714351296425, 0.00408195098862052, 0.005283118225634098, 0.009515376761555672, 0.11360906809568405, 0.008760524913668633, 0.006613489706069231, 0.018946174532175064, 0.008831392042338848, 0.015675490722060204, 0.021136337891221046, 0.13481837511062622, 0.08728663623332977, 0.15406787395477295]], [[0.05651351809501648, 0.11774645000696182, 0.026926513761281967, 0.04848615080118179, 0.10334916412830353, 0.4247743785381317, 0.21147629618644714, 0.6254463195800781, 0.10587190836668015, 0.08194849640130997, 0.04674661532044411, 0.35135090351104736, 0.35409873723983765, 0.43208518624305725, 0.11939813196659088], [0.05609016492962837, 0.06931670010089874, 0.1576625108718872, 0.27308744192123413, 0.04202406853437424, 0.2399596869945526, 0.3320065140724182, 0.6272499561309814, 0.09423039108514786, 0.144412100315094, 0.2769482433795929, 0.05643320456147194, 0.11388154327869415, 0.32551372051239014, 0.13187405467033386], [0.1798395812511444, 0.02382134646177292, 0.024498937651515007, 0.28730508685112, 0.19651466608047485, 0.13693250715732574, 0.34929007291793823, 0.1055094301700592, 0.08990196883678436, 0.5189381837844849, 0.3313819468021393, 0.34343984723091125, 0.21719343960285187, 0.21188895404338837, 0.15588119626045227], [0.26584357023239136, 0.03035559318959713, 0.026536965742707253, 0.20298171043395996, 0.23938016593456268, 0.24181482195854187, 0.31930428743362427, 0.10626629739999771, 0.13103167712688446, 0.4636806845664978, 0.393515944480896, 0.3422740399837494, 0.342117577791214, 0.5495904088020325, 0.14030353724956512], [0.30834218859672546, 0.3875667452812195, 0.32842832803726196, 0.16462059319019318, 0.416511207818985, 0.03730625659227371, 0.23662680387496948, 0.5092235207557678, 0.08549848943948746, 0.3278381824493408, 0.507111668586731, 0.0415511280298233, 0.5590415596961975, 0.6185146570205688, 0.0664283037185669], [0.0765935555100441, 0.29552146792411804, 0.05705742537975311, 0.01913047581911087, 0.15779250860214233, 0.030224651098251343, 0.08988720178604126, 0.3389361500740051, 0.08153010904788971, 0.05811480060219765, 0.09408371150493622, 0.19600677490234375, 0.6126919388771057, 0.623294472694397, 0.13969288766384125], [0.4304950535297394, 0.5688965320587158, 0.09143517911434174, 0.09618712961673737, 0.13307496905326843, 0.014428870752453804, 0.040250685065984726, 0.15830516815185547, 0.10923942923545837, 0.23653797805309296, 0.3180045783519745, 0.5594316720962524, 0.5058388710021973, 0.3866141140460968, 0.14058275520801544], [0.31169822812080383, 0.7707167863845825, 0.30778199434280396, 0.10994993895292282, 0.18047340214252472, 0.01769133098423481, 0.014783667400479317, 0.009741406887769699, 0.1340220719575882, 0.11223828792572021, 0.46960482001304626, 0.360332190990448, 0.56731116771698, 0.5470200181007385, 0.18929171562194824], [0.2397254854440689, 0.361926406621933, 0.24345533549785614, 0.18179422616958618, 0.10373111069202423, 0.014045567251741886, 0.08654272556304932, 0.018043776974081993, 0.02193235233426094, 0.07134812325239182, 0.19312754273414612, 0.6192790865898132, 0.6039608716964722, 0.673239529132843, 0.15608295798301697], [0.32110491394996643, 0.2706402838230133, 0.034645695239305496, 0.029830342158675194, 0.00933478306978941, 0.25964564085006714, 0.17791348695755005, 0.11580535769462585, 0.07073061913251877, 0.10197918862104416, 0.06440304219722748, 0.2378954440355301, 0.09358810633420944, 0.24307624995708466, 0.22625915706157684], [0.18688960373401642, 0.6521251797676086, 0.05505351349711418, 0.05518023297190666, 0.07190049439668655, 0.15721110999584198, 0.11867944896221161, 0.2974295914173126, 0.018550140783190727, 0.1645369827747345, 0.09910324215888977, 0.499615877866745, 0.34706613421440125, 0.5406060218811035, 0.24014075100421906], [0.24844318628311157, 0.24823600053787231, 0.41713690757751465, 0.05438315495848656, 0.5823535323143005, 0.1801777333021164, 0.13823869824409485, 0.16278210282325745, 0.035736992955207825, 0.017554355785250664, 0.03778500482439995, 0.09959819167852402, 0.18642207980155945, 0.26950401067733765, 0.24913227558135986], [0.21744470298290253, 0.04392259195446968, 0.5108200907707214, 0.27167755365371704, 0.5572997331619263, 0.30860280990600586, 0.5083038210868835, 0.6815038919448853, 0.3754148483276367, 0.01992654800415039, 0.0589066781103611, 0.07934294641017914, 0.15649113059043884, 0.3772245943546295, 0.25267744064331055], [0.11088164150714874, 0.06568774580955505, 0.49295517802238464, 0.06175035238265991, 0.3928946256637573, 0.306259423494339, 0.1265336275100708, 0.29877781867980957, 0.061930101364851, 0.053618840873241425, 0.02546272985637188, 0.011733881197869778, 0.4200928509235382, 0.25557151436805725, 0.12701815366744995], [0.06005493924021721, 0.46575742959976196, 0.4922090172767639, 0.06956527382135391, 0.3788193464279175, 0.21330630779266357, 0.06565267592668533, 0.10461793839931488, 0.1200915202498436, 0.07597928494215012, 0.08451344817876816, 0.06952610611915588, 0.03487509861588478, 0.12158560007810593, 0.14820002019405365], [0.11028759926557541, 0.4027779996395111, 0.8237467408180237, 0.1328621804714203, 0.7811888456344604, 0.5416622757911682, 0.16887041926383972, 0.2001309096813202, 0.08848496526479721, 0.05607001483440399, 0.13165172934532166, 0.10739479213953018, 0.052385441958904266, 0.05461856350302696, 0.16259506344795227], [0.12960980832576752, 0.21605639159679413, 0.13754284381866455, 0.0687912181019783, 0.2001095861196518, 0.7652902007102966, 0.3308810591697693, 0.3389359712600708, 0.07430214434862137, 0.036511119455099106, 0.010612682439386845, 0.005050503648817539, 0.1584991067647934, 0.036481909453868866, 0.18724960088729858], [0.16838932037353516, 0.47491130232810974, 0.21776747703552246, 0.05912807583808899, 0.16565343737602234, 0.34125030040740967, 0.2414778620004654, 0.28169524669647217, 0.03973108157515526, 0.03921183571219444, 0.02238578163087368, 0.02449338510632515, 0.05498792976140976, 0.03159895911812782, 0.17659053206443787], [0.14295107126235962, 0.27777984738349915, 0.30436068773269653, 0.03198731318116188, 0.38494178652763367, 0.27411460876464844, 0.18790900707244873, 0.29966217279434204, 0.029011890292167664, 0.012050352990627289, 0.008839968591928482, 0.009298003278672695, 0.09229473769664764, 0.05935056507587433, 0.2074589878320694], [0.185210719704628, 0.0802093893289566, 0.4863169491291046, 0.24164138734340668, 0.5185936689376831, 0.381059467792511, 0.5372542142868042, 0.6922534108161926, 0.40473121404647827, 0.015452258288860321, 0.03550630062818527, 0.023993153125047684, 0.09803077578544617, 0.14391310513019562, 0.25199130177497864], [0.08245678246021271, 0.1390499472618103, 0.5461503863334656, 0.060220371931791306, 0.43899697065353394, 0.5144884586334229, 0.22183947265148163, 0.5088672041893005, 0.09321429580450058, 0.05354699492454529, 0.02214067056775093, 0.004303250927478075, 0.39110496640205383, 0.12463895231485367, 0.1568218618631363], [0.043030936270952225, 0.498334676027298, 0.5084810853004456, 0.06107298657298088, 0.3904430866241455, 0.35258427262306213, 0.08483341336250305, 0.17738159000873566, 0.1815967708826065, 0.09597334265708923, 0.08432064205408096, 0.040181081742048264, 0.02593160979449749, 0.08670566976070404, 0.14764654636383057], [0.0785449668765068, 0.4015392065048218, 0.8182658553123474, 0.10243776440620422, 0.7659414410591125, 0.5735372304916382, 0.16621330380439758, 0.21339072287082672, 0.12523002922534943, 0.05685745179653168, 0.1081186980009079, 0.07184037566184998, 0.02847907319664955, 0.031456008553504944, 0.15293413400650024], [0.07311940938234329, 0.15430475771427155, 0.1386927217245102, 0.04823235049843788, 0.20945730805397034, 0.8191487193107605, 0.33371293544769287, 0.3618466258049011, 0.1152336597442627, 0.031010858714580536, 0.008395140990614891, 0.002998974174261093, 0.13362915813922882, 0.02411211095750332, 0.1613900512456894], [0.2622520923614502, 0.7386532425880432, 0.41215938329696655, 0.08539438247680664, 0.7665934562683105, 0.5218235850334167, 0.42940571904182434, 0.4037780165672302, 0.7456067204475403, 0.07961834967136383, 0.02781907096505165, 0.02608557976782322, 0.15701159834861755, 0.05025498941540718, 0.11428551375865936]], [[0.5009713768959045, 0.11806200444698334, 0.543484628200531, 0.29247328639030457, 0.5261343717575073, 0.23446989059448242, 0.5474087595939636, 0.062012095004320145, 0.8189043998718262, 0.538780152797699, 0.6200674176216125, 0.43515679240226746, 0.24830776453018188, 0.341129869222641, 0.04290800169110298], [0.018064359202980995, 0.030848585069179535, 0.08071158826351166, 0.0676560178399086, 0.13447926938533783, 0.11551786214113235, 0.17043589055538177, 0.10128363966941833, 0.6618390679359436, 0.2855142652988434, 0.0971621423959732, 0.23388729989528656, 0.21859601140022278, 0.46025529503822327, 0.182326078414917], [0.04308566823601723, 0.03711610287427902, 0.06502576172351837, 0.10632220655679703, 0.09326566010713577, 0.08777783066034317, 0.3412204086780548, 0.6204424500465393, 0.8231819868087769, 0.09377399832010269, 0.1541169434785843, 0.21222646534442902, 0.11298450827598572, 0.15309588611125946, 0.11645805835723877], [0.07351326197385788, 0.05497964471578598, 0.07563240081071854, 0.32393333315849304, 0.057468246668577194, 0.2634526193141937, 0.3780488967895508, 0.7154850363731384, 0.7017503976821899, 0.20895157754421234, 0.29085400700569153, 0.06311048567295074, 0.03268700838088989, 0.14748480916023254, 0.03694311901926994], [0.15202973783016205, 0.07260382175445557, 0.07307075709104538, 0.01561899296939373, 0.03831832483410835, 0.04392734169960022, 0.07259247452020645, 0.03668325021862984, 0.315115749835968, 0.14016768336296082, 0.147903710603714, 0.09513753652572632, 0.08079177141189575, 0.04876280575990677, 0.1678115576505661], [0.20334205031394958, 0.03987862542271614, 0.2323523759841919, 0.08299659937620163, 0.11007620394229889, 0.049821991473436356, 0.05303451418876648, 0.020633194595575333, 0.20804192125797272, 0.621069610118866, 0.6013453006744385, 0.6998922824859619, 0.30664384365081787, 0.1810489445924759, 0.12484823167324066], [0.33830341696739197, 0.10967365652322769, 0.03348035365343094, 0.09579410403966904, 0.07735400646924973, 0.09874830394983292, 0.15181724727153778, 0.11190870404243469, 0.4600948095321655, 0.5270871520042419, 0.27297794818878174, 0.3748718500137329, 0.4609748125076294, 0.5019738078117371, 0.0790465772151947], [0.18835663795471191, 0.05185278132557869, 0.06106729805469513, 0.04512745887041092, 0.04466439411044121, 0.025852244347333908, 0.031750425696372986, 0.022515133023262024, 0.5077425837516785, 0.6734393835067749, 0.37964752316474915, 0.35936975479125977, 0.19831591844558716, 0.216437429189682, 0.2985125184059143], [0.5560556054115295, 0.47877317667007446, 0.15116584300994873, 0.40482252836227417, 0.04176756739616394, 0.04773563891649246, 0.13619393110275269, 0.07804162055253983, 0.07037016749382019, 0.5527278780937195, 0.486864298582077, 0.22204715013504028, 0.2625967860221863, 0.19855597615242004, 0.060070205479860306], [0.21585102379322052, 0.028776921331882477, 0.056070148944854736, 0.3207121789455414, 0.0078024002723395824, 0.016524065285921097, 0.3710367977619171, 0.14693383872509003, 0.12693363428115845, 0.6266815662384033, 0.6993157863616943, 0.5497558116912842, 0.14310741424560547, 0.3664083480834961, 0.047443971037864685], [0.28475576639175415, 0.10818006843328476, 0.08735410869121552, 0.329417884349823, 0.02252645045518875, 0.04752267897129059, 0.3733118176460266, 0.39454737305641174, 0.029050499200820923, 0.6059318780899048, 0.7311877012252808, 0.44807982444763184, 0.29598307609558105, 0.33838847279548645, 0.16424106061458588], [0.08968453854322433, 0.11453098803758621, 0.20413988828659058, 0.368092805147171, 0.07694120705127716, 0.048818718641996384, 0.12943927943706512, 0.036333490163087845, 0.04509947448968887, 0.25635746121406555, 0.2806471586227417, 0.5608395338058472, 0.1390012502670288, 0.28897786140441895, 0.04701472818851471], [0.05315335839986801, 0.017116300761699677, 0.1720367670059204, 0.3916313052177429, 0.05510414391756058, 0.2876152992248535, 0.22692401707172394, 0.14989952743053436, 0.3368622660636902, 0.0913245752453804, 0.3484038710594177, 0.3637443780899048, 0.007217096630483866, 0.103476881980896, 0.036375418305397034], [0.5125223994255066, 0.07351671159267426, 0.21591535210609436, 0.21059465408325195, 0.3288169205188751, 0.5466507077217102, 0.21618640422821045, 0.15017350018024445, 0.8681062459945679, 0.2442341297864914, 0.06865198910236359, 0.019835328683257103, 0.10077274590730667, 0.12228173017501831, 0.1682003289461136], [0.4846254289150238, 0.17620818316936493, 0.23995715379714966, 0.09631974995136261, 0.22585628926753998, 0.04512355476617813, 0.06700992584228516, 0.01503949984908104, 0.07369402050971985, 0.03452376648783684, 0.04930250719189644, 0.1451164036989212, 0.010093613527715206, 0.020862746983766556, 0.16003692150115967], [0.12189289927482605, 0.3658526837825775, 0.06606122851371765, 0.1638106107711792, 0.07819290459156036, 0.27624964714050293, 0.09599297493696213, 0.08126427978277206, 0.14055852591991425, 0.02327289618551731, 0.03783821687102318, 0.2963305115699768, 0.13405835628509521, 0.09205315262079239, 0.12166540324687958], [0.278896301984787, 0.1438806802034378, 0.46959513425827026, 0.3356979489326477, 0.3651174008846283, 0.1071292906999588, 0.18117688596248627, 0.20183299481868744, 0.29131460189819336, 0.13872042298316956, 0.021824011579155922, 0.06362087279558182, 0.34404000639915466, 0.13715140521526337, 0.1120462715625763], [0.2151702344417572, 0.2682046890258789, 0.2758127450942993, 0.20445802807807922, 0.06759822368621826, 0.058143485337495804, 0.21948587894439697, 0.1328936666250229, 0.04737214744091034, 0.09880322962999344, 0.06969184428453445, 0.0649414211511612, 0.09957331418991089, 0.08072139322757721, 0.15442174673080444], [0.10625648498535156, 0.3580685555934906, 0.2235240340232849, 0.2717205584049225, 0.14765356481075287, 0.1302592158317566, 0.182493656873703, 0.07402253895998001, 0.044094108045101166, 0.28373098373413086, 0.09141446650028229, 0.13240621984004974, 0.1622740924358368, 0.2716645896434784, 0.09359043836593628], [0.08181191235780716, 0.05183182656764984, 0.18780435621738434, 0.39972010254859924, 0.11086275428533554, 0.3443254232406616, 0.26716044545173645, 0.2157517671585083, 0.3917877972126007, 0.09846898168325424, 0.25891563296318054, 0.25942671298980713, 0.008535100147128105, 0.11220833659172058, 0.06895694881677628], [0.4507053792476654, 0.10277862101793289, 0.16431982815265656, 0.2027788907289505, 0.318918377161026, 0.4106469452381134, 0.24116744101047516, 0.1587350070476532, 0.8309358358383179, 0.2625651955604553, 0.047453198581933975, 0.009295494295656681, 0.07160880416631699, 0.07481760531663895, 0.19364440441131592], [0.5336673855781555, 0.18865860998630524, 0.19927646219730377, 0.10614699125289917, 0.21258802711963654, 0.035614922642707825, 0.07572873681783676, 0.021095039322972298, 0.08985494822263718, 0.061252057552337646, 0.05201297253370285, 0.10173538327217102, 0.008337927050888538, 0.017984798178076744, 0.15578274428844452], [0.11776354163885117, 0.337507039308548, 0.055947914719581604, 0.144154354929924, 0.09536269307136536, 0.2646341919898987, 0.10820504277944565, 0.0982295498251915, 0.1891198456287384, 0.027041049674153328, 0.03162495046854019, 0.2652260959148407, 0.10165920853614807, 0.07911970466375351, 0.1373925358057022], [0.20648452639579773, 0.10074114054441452, 0.42538517713546753, 0.26027214527130127, 0.3658106029033661, 0.09280957281589508, 0.23363487422466278, 0.27985435724258423, 0.3744349181652069, 0.1453229784965515, 0.02015594393014908, 0.05169985443353653, 0.3284047245979309, 0.12707991898059845, 0.12262601405382156], [0.019576620310544968, 0.03319034352898598, 0.0111849969252944, 0.010870445519685745, 0.03222370147705078, 0.13807591795921326, 0.0675833523273468, 0.0615379698574543, 0.013822048902511597, 0.008804764598608017, 0.004974161274731159, 0.01815059222280979, 0.1774466335773468, 0.06282598525285721, 0.15396134555339813]], [[0.07712388038635254, 0.042244281619787216, 0.004363007377833128, 0.0015959119191393256, 0.019252488389611244, 0.02118455246090889, 0.001846740604378283, 0.0012080060550943017, 0.0007866616360843182, 0.001261864323168993, 0.002815018408000469, 0.017323212698101997, 0.00286104716360569, 0.004067797679454088, 0.15733002126216888], [0.176344633102417, 0.3271441161632538, 0.08498391509056091, 0.04002806171774864, 0.06676299124956131, 0.008946515619754791, 0.012590638361871243, 0.0061616976745426655, 0.010515754111111164, 0.042563267052173615, 0.024306243285536766, 0.009260479360818863, 0.0002838150830939412, 0.0009972971165552735, 0.0829070582985878], [0.3345734477043152, 0.016792800277471542, 0.785018265247345, 0.16747814416885376, 0.3955724537372589, 0.09289640188217163, 0.041390396654605865, 0.004024161957204342, 0.04094661772251129, 0.023736434057354927, 0.20348279178142548, 0.041674140840768814, 0.012969214469194412, 0.03994787111878395, 0.04405270516872406], [0.027460135519504547, 0.0009503767942078412, 0.8045902252197266, 0.05251304432749748, 0.4111766219139099, 0.08071836084127426, 0.01928381621837616, 0.0005491983611136675, 0.029575586318969727, 0.001678029540926218, 0.033282194286584854, 0.007144003175199032, 0.012064780108630657, 0.008930332958698273, 0.0033295771572738886], [0.18455208837985992, 0.0566692017018795, 0.08522135764360428, 0.2798183560371399, 0.013304274529218674, 0.0006802850402891636, 0.09522412717342377, 0.0060977875255048275, 0.002369458321481943, 0.017453324049711227, 0.0036190226674079895, 2.9809654733981006e-05, 0.0002128492487827316, 0.0002820969675667584, 0.18610867857933044], [0.6536933779716492, 0.3485175371170044, 0.2007695585489273, 0.8106443881988525, 0.12433423846960068, 0.008092332631349564, 0.6807736158370972, 0.40895989537239075, 0.04516575112938881, 0.1387551873922348, 0.004862201400101185, 0.0003120531910099089, 0.00022667655139230192, 0.00031860917806625366, 0.07640787214040756], [0.08564082533121109, 0.05155009403824806, 0.10021068900823593, 0.5880905985832214, 0.0823356956243515, 0.0626063123345375, 0.7381499409675598, 0.566346287727356, 0.04188016802072525, 0.02469027414917946, 0.004355741199105978, 0.00042968738125637174, 2.4299803044414148e-05, 2.7212277927901596e-05, 0.001896930974908173], [0.03975995257496834, 0.012421448715031147, 0.08890707790851593, 0.605818510055542, 0.05048904940485954, 0.017510779201984406, 0.24702893197536469, 0.39587050676345825, 0.06098005548119545, 0.052625395357608795, 0.013424866832792759, 0.0005194320692680776, 0.000250102486461401, 0.0003063087642658502, 0.0010793216060847044], [0.11902385950088501, 0.011114073917269707, 0.22151720523834229, 0.2006509006023407, 0.03878694027662277, 0.01363028772175312, 0.3268369734287262, 0.04311302676796913, 0.8067907094955444, 0.34777864813804626, 0.25920552015304565, 0.09021251648664474, 0.035271789878606796, 0.0031717135570943356, 0.004271878860890865], [0.006270309444516897, 0.0001492560259066522, 0.00045137249981053174, 0.0007612273329868913, 7.476524478988722e-05, 0.013270817697048187, 0.04344405606389046, 0.014117085374891758, 0.6041488647460938, 0.07304701954126358, 0.010559855960309505, 0.0026350386906415224, 0.02638809196650982, 0.002994539914652705, 0.00020572090579662472], [0.002078789984807372, 0.000502656155731529, 0.00018232718866784126, 0.0008548289188183844, 0.0009249084978364408, 0.02029070071876049, 0.012032798491418362, 0.024348178878426552, 0.2300865352153778, 0.10343841463327408, 0.007660495117306709, 0.0012821657583117485, 0.0114271380007267, 0.0009412667131982744, 7.524124521296471e-05], [0.022463228553533554, 0.0013134862529113889, 0.00013891702110413462, 0.002816978842020035, 0.0011811865260824561, 0.0014538302784785628, 0.0005458829691633582, 0.0004073161107953638, 0.000992793939076364, 0.626685380935669, 0.1310541182756424, 0.1785772740840912, 0.1327074021100998, 0.014590581879019737, 3.459410072537139e-05], [0.004299411084502935, 0.00014757749158889055, 0.0013493087608367205, 0.003552102018147707, 0.004041418433189392, 0.004232631530612707, 0.00022051982523407787, 5.3625211876351386e-05, 0.008671559393405914, 0.2003454566001892, 0.2010745257139206, 0.20048564672470093, 0.327506959438324, 0.12215141952037811, 7.573522452730685e-05], [0.011497906409204006, 0.0014132088981568813, 0.002270179335027933, 0.006387166678905487, 5.5530636018374935e-05, 0.0020248510409146547, 0.0021348590962588787, 0.001147052156738937, 0.0024277162738144398, 0.3687064051628113, 0.5298402905464172, 0.006611559074372053, 0.3372868299484253, 0.2915361225605011, 0.0002606022753752768], [0.043351031839847565, 0.015730101615190506, 0.006545424461364746, 0.11301398277282715, 0.001535893650725484, 0.0002994980022776872, 0.002417969051748514, 0.0027875620871782303, 0.007663458585739136, 0.4366588592529297, 0.29866132140159607, 0.03879629448056221, 0.0005757116014137864, 0.10755223035812378, 0.15693426132202148], [0.05824243649840355, 0.00918568018823862, 0.004823020659387112, 0.12202360481023788, 0.001364732626825571, 0.009540650062263012, 0.017077280208468437, 0.02250218391418457, 0.031557418406009674, 0.39489659667015076, 0.4118596911430359, 0.4739699363708496, 0.04330656677484512, 0.22410848736763, 0.009354491718113422], [0.10114194452762604, 0.055991608649492264, 0.0056193675845861435, 0.044799599796533585, 0.005612906999886036, 0.0018076150445267558, 0.0035521595273166895, 0.003050913568586111, 0.014126029796898365, 0.18568304181098938, 0.044660091400146484, 0.8178999423980713, 0.12312521040439606, 0.22830259799957275, 0.0015339198289439082], [0.17329555749893188, 0.022842630743980408, 0.03050464764237404, 0.3040459156036377, 0.023058682680130005, 0.05675753578543663, 0.012084487825632095, 0.018060212954878807, 0.012510768137872219, 0.4205268621444702, 0.403047114610672, 0.5196431279182434, 0.14466160535812378, 0.15726853907108307, 0.003281315555796027], [0.21814380586147308, 0.013853680342435837, 0.0011839027283713222, 0.02006133459508419, 0.0059941732324659824, 0.004335244186222553, 0.0006587213138118386, 0.0008069095201790333, 6.766151636838913e-05, 0.4439576268196106, 0.16648612916469574, 0.7347545623779297, 0.19459886848926544, 0.05657987296581268, 0.0006026092451065779], [0.034262340515851974, 0.0017182001611217856, 0.005656392779201269, 0.017169898375868797, 0.0156857930123806, 0.01468763966113329, 0.0007699507405050099, 0.00017933807976078242, 0.002019587904214859, 0.09474337100982666, 0.21286551654338837, 0.39837440848350525, 0.44769343733787537, 0.30061447620391846, 0.0009720441303215921], [0.1974877417087555, 0.05350746586918831, 0.02080627717077732, 0.07140190154314041, 0.0007820951868779957, 0.021851971745491028, 0.023295408114790916, 0.011020028032362461, 0.0015720969531685114, 0.3204348385334015, 0.5890824198722839, 0.011122598312795162, 0.40923523902893066, 0.5521805882453918, 0.009284045547246933], [0.04384012520313263, 0.020103074610233307, 0.00601673498749733, 0.10121199488639832, 0.0015372235793620348, 0.00047879578778520226, 0.0028034253045916557, 0.0035304632037878036, 0.0019347126362845302, 0.15543726086616516, 0.10060140490531921, 0.012154079042375088, 0.00020098914683330804, 0.049742307513952255, 0.15931616723537445], [0.33183732628822327, 0.07794758677482605, 0.02364480309188366, 0.3878714144229889, 0.007764760870486498, 0.055411770939826965, 0.07855504751205444, 0.09397301822900772, 0.02721172571182251, 0.38145557045936584, 0.42047446966171265, 0.5078706741333008, 0.03859835863113403, 0.25985077023506165, 0.0625251829624176], [0.4473247230052948, 0.3730325996875763, 0.029895052313804626, 0.15908104181289673, 0.02762797847390175, 0.008889964781701565, 0.016516737639904022, 0.012883803807199001, 0.01523641124367714, 0.22003965079784393, 0.05771813541650772, 0.8456536531448364, 0.1770154982805252, 0.31127816438674927, 0.007925343699753284], [0.2188224196434021, 0.06026163697242737, 0.01674255169928074, 0.1205059364438057, 0.017392028123140335, 0.033714599907398224, 0.013199009001255035, 0.035441260784864426, 0.006878681946545839, 0.5097362399101257, 0.5390803217887878, 0.7098195552825928, 0.20610427856445312, 0.34404870867729187, 0.06464894115924835]], [[0.24012988805770874, 0.6692726612091064, 0.08029869198799133, 0.41845017671585083, 0.08128808438777924, 0.09738753736019135, 0.15100885927677155, 0.2691691815853119, 0.013517879880964756, 0.21848294138908386, 0.16758716106414795, 0.12734578549861908, 0.32224464416503906, 0.12471552193164825, 0.07385692000389099], [0.13747748732566833, 0.012865100987255573, 0.3056560158729553, 0.3759651184082031, 0.20075583457946777, 0.056869279593229294, 0.27502477169036865, 0.09038521349430084, 0.09535539150238037, 0.27579623460769653, 0.15189220011234283, 0.6071571111679077, 0.0820951759815216, 0.09481122344732285, 0.09779953956604004], [0.007538634352385998, 0.02957071363925934, 0.011847163550555706, 0.055522944778203964, 0.04100131243467331, 0.031534671783447266, 0.06567902117967606, 0.09044305235147476, 0.007193693891167641, 0.06334451586008072, 0.07378207892179489, 0.07786792516708374, 0.28214019536972046, 0.08070375770330429, 0.20607011020183563], [0.005881547927856445, 0.008371960371732712, 0.010823756456375122, 0.024797217920422554, 0.024142105132341385, 0.01083815935999155, 0.008304014801979065, 0.006388344801962376, 0.009114595130085945, 0.022048065438866615, 0.1306026130914688, 0.23451638221740723, 0.3918500244617462, 0.08784151822328568, 0.2650633752346039], [0.20629070699214935, 0.2529377341270447, 0.028870999813079834, 0.049127642065286636, 0.04690879210829735, 0.11594393104314804, 0.15515393018722534, 0.06585636734962463, 0.0420556403696537, 0.1996643990278244, 0.028717953711748123, 0.7190893292427063, 0.30376943945884705, 0.22654840350151062, 0.12926629185676575], [0.01586613617837429, 0.15566423535346985, 0.015082520432770252, 0.009204044006764889, 0.002680863719433546, 0.07106906920671463, 0.08370621502399445, 0.05749649554491043, 0.03059268370270729, 0.012942377477884293, 0.0011753733269870281, 0.00916373822838068, 0.0020018015056848526, 0.049308281391859055, 0.19197486340999603], [0.03849078342318535, 0.08146823942661285, 0.03517843410372734, 0.025976145640015602, 0.02364599145948887, 0.1389763057231903, 0.02619975060224533, 0.034312427043914795, 0.02985706366598606, 0.029806064441800117, 0.00684476038441062, 0.03280223533511162, 0.030126189813017845, 0.10321015119552612, 0.23163792490959167], [0.2772977352142334, 0.05161405727267265, 0.04358568787574768, 0.047931231558322906, 0.04583681374788284, 0.08128579705953598, 0.15782645344734192, 0.0856042429804802, 0.10767779499292374, 0.11355230212211609, 0.041377030313014984, 0.252811074256897, 0.05780917406082153, 0.19973745942115784, 0.22427907586097717], [0.023119861260056496, 0.02037731558084488, 0.0453791618347168, 0.1060030460357666, 0.006244942545890808, 0.0085020512342453, 0.012060720473527908, 0.014560479670763016, 0.00689319521188736, 0.011241135187447071, 0.023835573345422745, 0.02693312056362629, 0.011436404660344124, 0.019489392638206482, 0.30997538566589355], [0.045414164662361145, 0.005229660775512457, 0.011418518610298634, 0.009312640875577927, 0.0002147085906472057, 0.12653864920139313, 0.05854451283812523, 0.11896014213562012, 0.0156405046582222, 0.010270207189023495, 0.0032450463622808456, 0.015787174925208092, 0.011106730438768864, 0.007675709668546915, 0.3779195249080658], [0.007367350626736879, 0.012884993106126785, 0.01019106525927782, 0.011957473121583462, 0.054886650294065475, 0.09750530868768692, 0.029414953663945198, 0.08492925018072128, 0.17440666258335114, 0.003643231000751257, 0.00105402956251055, 0.02280060388147831, 0.0010922637302428484, 0.005130939185619354, 0.09500079602003098], [0.02996714971959591, 0.028387926518917084, 0.16122521460056305, 0.0898616760969162, 0.06381779164075851, 0.20551051199436188, 0.13175098598003387, 0.562389075756073, 0.04834860563278198, 0.013581722043454647, 0.03991095721721649, 0.10736902058124542, 0.03830268979072571, 0.05736052244901657, 0.27213579416275024], [0.03571658954024315, 0.012061648070812225, 0.08574458211660385, 0.022463832050561905, 0.12578466534614563, 0.07826194912195206, 0.06577891856431961, 0.13274507224559784, 0.06591502577066422, 0.05002211779356003, 0.03129255399107933, 0.27911075949668884, 0.31601372361183167, 0.10930214822292328, 0.30993908643722534], [0.04630875587463379, 0.03141915798187256, 0.03061339072883129, 0.007028677500784397, 0.008451082743704319, 0.02540888637304306, 0.012118873186409473, 0.09331455826759338, 0.0033372503239661455, 0.01357665192335844, 0.0069510783068835735, 0.017483821138739586, 0.033454760909080505, 0.014270796440541744, 0.44127020239830017], [0.1722828894853592, 0.15122008323669434, 0.056102070957422256, 0.09136570990085602, 0.02421834133565426, 0.045343294739723206, 0.034619707614183426, 0.030837759375572205, 0.019798463210463524, 0.04411705583333969, 0.05331422761082649, 0.09423463046550751, 0.1436629444360733, 0.13433872163295746, 0.1229754090309143], [0.022473091259598732, 0.0489150770008564, 0.010993139818310738, 0.03897916153073311, 0.003662768052890897, 0.002051829593256116, 0.0037445707712322474, 0.016557298600673676, 0.014907213859260082, 0.004300208762288094, 0.004852794576436281, 0.0027131394017487764, 0.016001524403691292, 0.008091894909739494, 0.25544992089271545], [0.08012817800045013, 0.2898695766925812, 0.022246699780225754, 0.06057273969054222, 0.025327028706669807, 0.02957070618867874, 0.04002644121646881, 0.019245512783527374, 0.01995179057121277, 0.020330116152763367, 0.006697094067931175, 0.015452835708856583, 0.014569609425961971, 0.04013357311487198, 0.2585589587688446], [0.01832924410700798, 0.023918962106108665, 0.024782713502645493, 0.033514510840177536, 0.050549402832984924, 0.013098560273647308, 0.023091215640306473, 0.030541786924004555, 0.1064886748790741, 0.006106832530349493, 0.0024854408111423254, 0.018918434157967567, 0.0075035663321614265, 0.009370497427880764, 0.21452490985393524], [0.027254067361354828, 0.020437292754650116, 0.14233240485191345, 0.08538791537284851, 0.03242940828204155, 0.0897425189614296, 0.08476056158542633, 0.2620556950569153, 0.02126460149884224, 0.023079702630639076, 0.03143052011728287, 0.04489685967564583, 0.046720463782548904, 0.03604652360081673, 0.23038896918296814], [0.042377930134534836, 0.017293933779001236, 0.08730384707450867, 0.030179454013705254, 0.12187745422124863, 0.05139933153986931, 0.047754548490047455, 0.066692054271698, 0.06521614640951157, 0.05196157470345497, 0.028108397498726845, 0.17703385651111603, 0.22747749090194702, 0.06955988705158234, 0.28824013471603394], [0.03372317552566528, 0.030876630917191505, 0.025082340463995934, 0.008588657714426517, 0.007454049773514271, 0.009771045297384262, 0.010381288826465607, 0.041183773428201675, 0.004549690056592226, 0.01619204692542553, 0.0060179769061505795, 0.009672058746218681, 0.022905999794602394, 0.009750566445291042, 0.30946746468544006], [0.18900562822818756, 0.14908763766288757, 0.05840699374675751, 0.10216160118579865, 0.03072887472808361, 0.04109037667512894, 0.03799780085682869, 0.02909342385828495, 0.03500371053814888, 0.0757574513554573, 0.061073921620845795, 0.09956928342580795, 0.10441071540117264, 0.14136889576911926, 0.13095542788505554], [0.014150185510516167, 0.03789284825325012, 0.007744992151856422, 0.02556411363184452, 0.0037681234534829855, 0.001123085618019104, 0.002939486177638173, 0.010072565637528896, 0.019109029322862625, 0.003645692951977253, 0.0027771664317697287, 0.002490789396688342, 0.007166225463151932, 0.005180294159799814, 0.2058444321155548], [0.0469474196434021, 0.1743137687444687, 0.021908296272158623, 0.046387769281864166, 0.02985612489283085, 0.019742406904697418, 0.040140021592378616, 0.01437240932136774, 0.02856219932436943, 0.018488112837076187, 0.004136314615607262, 0.01038376335054636, 0.009851893410086632, 0.026245350018143654, 0.22488054633140564], [0.00832295510917902, 0.021339448168873787, 0.00394090311601758, 0.002333499025553465, 0.05547437444329262, 0.007243151310831308, 0.011641105636954308, 0.0331541933119297, 0.010278979316353798, 0.011881710961461067, 0.001766148954629898, 0.04899042472243309, 0.01878243498504162, 0.01244808267802, 0.15685127675533295]]], [[[0.04773104563355446, 0.01963546872138977, 0.16452182829380035, 0.04063690826296806, 0.1849776655435562, 0.08088860660791397, 0.11659693717956543, 0.038044340908527374, 0.2744975686073303, 0.003083554795011878, 0.019721103832125664, 0.08137688785791397, 0.0169991385191679, 0.03939461708068848, 0.14168404042720795], [0.09676018357276917, 0.018249453976750374, 0.657112717628479, 0.5890088677406311, 0.5712416768074036, 0.2744671702384949, 0.48642322421073914, 0.26345524191856384, 0.23708243668079376, 0.03475205600261688, 0.15204745531082153, 0.0676480308175087, 0.050043635070323944, 0.0665324404835701, 0.036993421614170074], [0.04065309092402458, 0.0025235058274120092, 0.11838234961032867, 0.27863210439682007, 0.37560757994651794, 0.7046668529510498, 0.12516380846500397, 0.1912177950143814, 0.14992743730545044, 0.05949303135275841, 0.056387268006801605, 0.04353337734937668, 0.17471297085285187, 0.07017815858125687, 0.12025584280490875], [0.015422305092215538, 0.000844803755171597, 0.015767300501465797, 0.11098357290029526, 0.273564875125885, 0.3235251009464264, 0.14805495738983154, 0.17132841050624847, 0.25568780303001404, 0.034506767988204956, 0.046862825751304626, 0.03818853572010994, 0.025031423196196556, 0.027911247685551643, 0.009120252914726734], [0.01866327039897442, 0.11290711164474487, 0.007440958172082901, 0.031009642407298088, 0.059622399508953094, 0.035299621522426605, 0.012064317241311073, 0.17540854215621948, 0.06399405747652054, 0.010346408933401108, 0.023967623710632324, 0.006549614481627941, 0.015476463362574577, 0.017944032326340675, 0.15624091029167175], [0.115133136510849, 0.5564319491386414, 0.0024013265501707792, 0.014839398674666882, 0.027623601257801056, 0.003712957026436925, 0.11139625310897827, 0.4320802688598633, 0.18111301958560944, 0.025198934599757195, 0.05914938822388649, 0.029404014348983765, 0.1131783202290535, 0.1630096137523651, 0.14384765923023224], [0.047323077917099, 0.01987922191619873, 0.021367410197854042, 0.0816798061132431, 0.11104802042245865, 0.01310664601624012, 0.37855657935142517, 0.16697411239147186, 0.31461480259895325, 0.04616151005029678, 0.27547621726989746, 0.04939346760511398, 0.02232075110077858, 0.15515512228012085, 0.01579722762107849], [0.13229456543922424, 0.031869739294052124, 0.26943540573120117, 0.2586674690246582, 0.3796730637550354, 0.127562016248703, 0.20277942717075348, 0.05910756066441536, 0.14354895055294037, 0.08293455094099045, 0.2214740365743637, 0.23150987923145294, 0.18035069108009338, 0.2860051393508911, 0.07895194739103317], [0.09224988520145416, 0.07457923144102097, 0.05282874405384064, 0.09438028931617737, 0.06849074363708496, 0.012997711077332497, 0.007214613724499941, 0.004257954657077789, 0.2309093326330185, 0.38276976346969604, 0.5917518734931946, 0.7830951809883118, 0.8438952565193176, 0.7586230039596558, 0.04145537316799164], [0.014161140657961369, 0.027171263471245766, 0.0029068312142044306, 0.020549731329083443, 0.0005743438960053027, 0.00417140731588006, 0.003657599212601781, 0.00956815481185913, 0.34446486830711365, 0.5171273946762085, 0.39057764410972595, 0.2845093309879303, 0.1669711321592331, 0.5306525230407715, 0.015455210581421852], [0.02566671371459961, 0.00907080341130495, 0.0006065603229217231, 0.03001752682030201, 0.00023783017240930349, 0.0005533608491532505, 0.013808660209178925, 0.003767948364838958, 0.06461481004953384, 0.1359771490097046, 0.08153439313173294, 0.572087287902832, 0.36045318841934204, 0.44234389066696167, 0.0030113777611404657], [0.03087739646434784, 0.012099061161279678, 0.004942088853567839, 0.038267359137535095, 0.0023591304197907448, 0.0037323227152228355, 0.04966888204216957, 0.012427400797605515, 0.16158415377140045, 0.020882699638605118, 0.05600592866539955, 0.367767333984375, 0.24262923002243042, 0.38281354308128357, 0.00973587203770876], [0.04249054566025734, 0.0069285486824810505, 0.006088858004659414, 0.044397544115781784, 0.05390672758221626, 0.006144464481621981, 0.018320903182029724, 0.01545354351401329, 0.05193139612674713, 0.03221629932522774, 0.02379259280860424, 0.27246853709220886, 0.22103002667427063, 0.23179520666599274, 0.005589436274021864], [0.04184036701917648, 0.03700190782546997, 0.008264865726232529, 0.02439146116375923, 0.00799429602921009, 0.12502151727676392, 0.05032283812761307, 0.18101848661899567, 0.07329469919204712, 0.08409427851438522, 0.10790428519248962, 0.011960207484662533, 0.20496119558811188, 0.19276422262191772, 0.0069670299999415874], [0.06364590674638748, 0.06483624875545502, 0.015260975807905197, 0.1278582364320755, 0.006228389218449593, 0.02756887674331665, 0.020600903779268265, 0.015440343879163265, 0.018087223172187805, 0.017098410055041313, 0.025406692177057266, 0.0007098353235051036, 0.00014885497512295842, 0.0013503700029104948, 0.15608660876750946], [0.6220619678497314, 0.6306124329566956, 0.6737340092658997, 0.49940165877342224, 0.1517823040485382, 0.8503586649894714, 0.705633282661438, 0.6629571914672852, 0.11157920956611633, 0.39899003505706787, 0.3173867464065552, 0.027327625080943108, 0.014980590902268887, 0.009274562820792198, 0.08523338288068771], [0.15005189180374146, 0.04609784111380577, 0.17501141130924225, 0.21113994717597961, 0.26919078826904297, 0.6422000527381897, 0.7493206858634949, 0.2162598967552185, 0.010351919569075108, 0.09728528559207916, 0.09688232094049454, 0.028558582067489624, 0.10305432975292206, 0.05914681404829025, 0.11260810494422913], [0.09041088819503784, 0.052050016820430756, 0.08856991678476334, 0.2977358102798462, 0.04025371000170708, 0.3506464660167694, 0.6434463858604431, 0.25059518218040466, 0.01933867670595646, 0.04819375276565552, 0.07508239895105362, 0.04970608279109001, 0.02890131063759327, 0.02355407178401947, 0.12558245658874512], [0.18765486776828766, 0.021713200956583023, 0.21844394505023956, 0.3042432367801666, 0.17823228240013123, 0.1673380434513092, 0.8088975548744202, 0.46762967109680176, 0.05706785246729851, 0.009645337238907814, 0.0322297103703022, 0.09777479618787766, 0.08048812299966812, 0.10106904059648514, 0.17228879034519196], [0.4792143702507019, 0.09839366376399994, 0.1882246881723404, 0.4093988239765167, 0.7147246599197388, 0.24897223711013794, 0.4705742597579956, 0.4205995500087738, 0.01958448253571987, 0.026842152699828148, 0.02239188365638256, 0.15106931328773499, 0.08969185501337051, 0.10003618896007538, 0.1635625958442688], [0.40625429153442383, 0.3796224594116211, 0.2515096962451935, 0.36165565252304077, 0.24774380028247833, 0.8824228644371033, 0.8048573136329651, 0.857955813407898, 0.058371078222990036, 0.07109472155570984, 0.11402199417352676, 0.0021524245385080576, 0.019929109141230583, 0.030590593814849854, 0.11712031066417694], [0.04390633478760719, 0.032843075692653656, 0.010515165515244007, 0.11869800090789795, 0.005461697466671467, 0.023131608963012695, 0.01705162413418293, 0.008547519333660603, 0.003713170997798443, 0.008410640992224216, 0.009457322768867016, 0.00015943740436341614, 3.361727431183681e-05, 0.0002994383394252509, 0.1532706469297409], [0.6348351836204529, 0.5127235651016235, 0.5931673645973206, 0.5543242692947388, 0.12377271056175232, 0.8264753222465515, 0.6941898465156555, 0.5687963962554932, 0.03150533139705658, 0.12843358516693115, 0.11884576827287674, 0.005231617949903011, 0.0018767286092042923, 0.0011644444894045591, 0.11210005730390549], [0.10790421068668365, 0.016916295513510704, 0.09771728515625, 0.22749783098697662, 0.26325535774230957, 0.49138790369033813, 0.6275916695594788, 0.08931886404752731, 0.0033968419302254915, 0.024402111768722534, 0.018104346469044685, 0.003288157982751727, 0.010537534020841122, 0.006979967001825571, 0.12102893739938736], [0.028179557994008064, 0.011468129232525826, 0.016789404675364494, 0.00803140178322792, 0.00952040497213602, 0.02960360422730446, 0.24957160651683807, 0.03544437885284424, 0.005487674381583929, 0.0028927521780133247, 0.005656986031681299, 0.0040698484517633915, 0.04730471968650818, 0.0667993351817131, 0.1372966766357422]], [[0.11859580129384995, 0.07486707717180252, 0.21083025634288788, 0.32276296615600586, 0.08426652103662491, 0.03581860288977623, 0.24113436043262482, 0.608397364616394, 0.13584911823272705, 0.45509204268455505, 0.594833254814148, 0.30372148752212524, 0.8448506593704224, 0.7470672726631165, 0.09252076596021652], [0.04140070080757141, 0.00858838576823473, 0.11639615148305893, 0.1280786097049713, 0.2722368836402893, 0.21025919914245605, 0.4195333421230316, 0.631318211555481, 0.6560773253440857, 0.29341432452201843, 0.6862512230873108, 0.7675639986991882, 0.8915717005729675, 0.8601328730583191, 0.23356862366199493], [0.23441848158836365, 0.1666196584701538, 0.16664288938045502, 0.25857093930244446, 0.13334479928016663, 0.17917701601982117, 0.8257887363433838, 0.7395779490470886, 0.6802234053611755, 0.8125103712081909, 0.671615719795227, 0.8831866383552551, 0.6773648858070374, 0.7102506160736084, 0.08689045161008835], [0.24967892467975616, 0.48421844840049744, 0.036505091935396194, 0.17128480970859528, 0.01777578890323639, 0.09479225426912308, 0.36135032773017883, 0.0868472084403038, 0.16740600764751434, 0.523710310459137, 0.24439233541488647, 0.42307958006858826, 0.6259368062019348, 0.3662186563014984, 0.20058651268482208], [0.28931790590286255, 0.4439229369163513, 0.24370647966861725, 0.6020305752754211, 0.17363131046295166, 0.338454008102417, 0.5701692700386047, 0.33999428153038025, 0.68463534116745, 0.8701388239860535, 0.7831944823265076, 0.9611375331878662, 0.9679895043373108, 0.9072677493095398, 0.0468842089176178], [0.1225743219256401, 0.062406159937381744, 0.03387807682156563, 0.02868799865245819, 0.01787530817091465, 0.04143121838569641, 0.5920179486274719, 0.08798510581254959, 0.2968905568122864, 0.7129084467887878, 0.4609105885028839, 0.29060137271881104, 0.7909923791885376, 0.5701599717140198, 0.13614380359649658], [0.0705394446849823, 0.02209068462252617, 0.0211530439555645, 0.008882923051714897, 0.0033682750072330236, 0.08319123089313507, 0.11070933192968369, 0.0025125632528215647, 0.10380591452121735, 0.17744502425193787, 0.10391969978809357, 0.12427430599927902, 0.5562515258789062, 0.49710196256637573, 0.3223192095756531], [0.15847322344779968, 0.015464702621102333, 0.13866224884986877, 0.053395166993141174, 0.03494010120630264, 0.13738934695720673, 0.02684560976922512, 0.03214175999164581, 0.5759801864624023, 0.1755424290895462, 0.13409779965877533, 0.035038210451602936, 0.6489107012748718, 0.4460716247558594, 0.4074119031429291], [0.00857736449688673, 0.012718217447400093, 0.01174219325184822, 0.012934550642967224, 0.006551709491759539, 0.24597492814064026, 0.030029013752937317, 0.05923602730035782, 0.04650798439979553, 0.02447274886071682, 0.019859377294778824, 0.003505804343149066, 0.04937520623207092, 0.05625420808792114, 0.28037816286087036], [0.0015372766647487879, 0.015295127406716347, 0.018696704879403114, 0.004789609462022781, 0.19481690227985382, 0.04769033566117287, 0.01355075929313898, 0.02196505106985569, 0.08700259774923325, 0.020393503829836845, 0.02400771528482437, 0.18789233267307281, 0.15418098866939545, 0.08713112771511078, 0.19334079325199127], [0.04759770259261131, 0.04375501722097397, 0.02714523859322071, 0.05194481834769249, 0.05246514454483986, 0.14355513453483582, 0.17152011394500732, 0.14246520400047302, 0.1098044142127037, 0.013531663455069065, 0.008927365764975548, 0.03807468339800835, 0.10050502419471741, 0.02236531302332878, 0.3381733298301697], [0.10647730529308319, 0.04246760904788971, 0.08123224973678589, 0.13003453612327576, 0.07854175567626953, 0.24148082733154297, 0.6790831685066223, 0.7492273449897766, 0.28685522079467773, 0.03681188449263573, 0.15954196453094482, 0.2672117054462433, 0.11099980026483536, 0.04468434303998947, 0.4826459586620331], [0.2962004542350769, 0.47284576296806335, 0.11245852708816528, 0.23689918220043182, 0.10807513445615768, 0.8532499074935913, 0.5788733959197998, 0.6375027894973755, 0.33168625831604004, 0.06381742656230927, 0.004373080097138882, 0.015940984711050987, 0.3371734917163849, 0.06828418374061584, 0.21185840666294098], [0.3828115463256836, 0.12613584101200104, 0.47516295313835144, 0.4473835527896881, 0.17031393945217133, 0.6938255429267883, 0.7945614457130432, 0.34594833850860596, 0.5323623418807983, 0.34808266162872314, 0.11382761597633362, 0.1349307745695114, 0.013382190838456154, 0.0600610226392746, 0.30783677101135254], [0.7362364530563354, 0.8323087096214294, 0.9336822032928467, 0.7739728689193726, 0.8897883296012878, 0.9609381556510925, 0.9334329962730408, 0.9553548693656921, 0.7747710943222046, 0.4005538523197174, 0.5586770176887512, 0.25099167227745056, 0.4200068712234497, 0.1631680577993393, 0.06528117507696152], [0.07449624687433243, 0.061402805149555206, 0.09389828145503998, 0.048646457493305206, 0.024208296090364456, 0.10819891840219498, 0.10563155263662338, 0.1243496686220169, 0.048523951321840286, 0.14693649113178253, 0.06614942103624344, 0.0066792843863368034, 0.2858017086982727, 0.04383772611618042, 0.15409637987613678], [0.02467108517885208, 0.049052223563194275, 0.08135215938091278, 0.013768618926405907, 0.01176412496715784, 0.15210841596126556, 0.004693970084190369, 0.0041237217374145985, 0.018837640061974525, 0.03490369766950607, 0.036496780812740326, 0.0011750683188438416, 0.018557026982307434, 0.02382473833858967, 0.22122804820537567], [0.012043171562254429, 0.03080524504184723, 0.02248452790081501, 0.008785543963313103, 0.00550604984164238, 0.05614035204052925, 0.015958979725837708, 0.01727765053510666, 0.03423915058374405, 0.017799094319343567, 0.029912255704402924, 0.01144923735409975, 0.09533664584159851, 0.02436906285583973, 0.20283196866512299], [0.01959865354001522, 0.003073114436119795, 0.06498773396015167, 0.027286570519208908, 0.019540993496775627, 0.052237618714571, 0.08713454008102417, 0.28957968950271606, 0.3906492590904236, 0.044482238590717316, 0.17143161594867706, 0.1301742047071457, 0.10445850342512131, 0.03699616342782974, 0.2442801147699356], [0.11208802461624146, 0.11668127030134201, 0.09828943759202957, 0.10754654556512833, 0.015885351225733757, 0.38998937606811523, 0.183034285902977, 0.3230077624320984, 0.20506803691387177, 0.08733018487691879, 0.007069121580570936, 0.010435528121888638, 0.30221423506736755, 0.047303054481744766, 0.19994190335273743], [0.1682588905096054, 0.051582805812358856, 0.4415716230869293, 0.2735750675201416, 0.07878735661506653, 0.06776249408721924, 0.15038572251796722, 0.03211068734526634, 0.6709542274475098, 0.37688353657722473, 0.1879340261220932, 0.04096703231334686, 0.011627858504652977, 0.03471425548195839, 0.19384095072746277], [0.8205305933952332, 0.9214023947715759, 0.9559677839279175, 0.7988566160202026, 0.9105063080787659, 0.9672437906265259, 0.9506043195724487, 0.9735420346260071, 0.9064961075782776, 0.6156813502311707, 0.6370130777359009, 0.18943972885608673, 0.3681671619415283, 0.1194160059094429, 0.08283783495426178], [0.10534824430942535, 0.08027994632720947, 0.1381307989358902, 0.07063161581754684, 0.01806548424065113, 0.10409632325172424, 0.12885765731334686, 0.2072904407978058, 0.09267445653676987, 0.23836983740329742, 0.11645739525556564, 0.006059943698346615, 0.1595546454191208, 0.017974214628338814, 0.14464683830738068], [0.026579611003398895, 0.02949470281600952, 0.04954056441783905, 0.017031243070960045, 0.008355016820132732, 0.09075918793678284, 0.0036468924954533577, 0.0022332987282425165, 0.050134338438510895, 0.049380820244550705, 0.028885982930660248, 0.0007559077348560095, 0.015549316070973873, 0.013319555670022964, 0.1734825074672699], [0.05047497898340225, 0.027197130024433136, 0.11470095813274384, 0.007973222993314266, 0.12679167091846466, 0.4866730570793152, 0.17132264375686646, 0.15032453835010529, 0.14889459311962128, 0.01696154847741127, 0.0735161080956459, 0.0034290377516299486, 0.05194668471813202, 0.06144191324710846, 0.13309471309185028]], [[0.005987181328237057, 0.0011158415582031012, 0.0026756690349429846, 0.0011391430161893368, 0.0021053741220384836, 0.0005449134623631835, 0.0017384873935952783, 0.000736464629881084, 0.00014482461847364902, 0.0008784460369497538, 0.0008941806154325604, 0.0009559267782606184, 0.00015614555741194636, 0.00044419756159186363, 0.16329224407672882], [0.3448674976825714, 0.07203025370836258, 0.011963781900703907, 0.012941744178533554, 0.011539866216480732, 0.003333584638312459, 0.005511423572897911, 0.0016478801844641566, 0.003020848147571087, 0.006189296022057533, 0.0020935258362442255, 0.00048376841004937887, 8.994764357339591e-05, 0.00040787423495203257, 0.2113737165927887], [0.44219815731048584, 0.8124432563781738, 0.1900549679994583, 0.3808274269104004, 0.045300956815481186, 0.024617541581392288, 0.0172295980155468, 0.03488133102655411, 0.004235385917127132, 0.05999733507633209, 0.03787413239479065, 0.0011567235924303532, 0.0017442036187276244, 0.008845857344567776, 0.004224383272230625], [0.07874103635549545, 0.02866651676595211, 0.3287397623062134, 0.27984437346458435, 0.10563887655735016, 0.003691220423206687, 0.005916049238294363, 0.0007406381191685796, 0.0005066083394922316, 0.0481056272983551, 0.029072491452097893, 0.000652547983918339, 0.0003529583918862045, 0.0009863339364528656, 0.002192106796428561], [0.030638281255960464, 0.02597089111804962, 0.6577842831611633, 0.16596756875514984, 0.48041173815727234, 0.6114144921302795, 0.028207998722791672, 0.053615398705005646, 0.1417267620563507, 0.03454216569662094, 0.023575417697429657, 0.004873087164014578, 0.0009616028983145952, 0.00223313900642097, 0.0011337294708937407], [0.29477018117904663, 0.14754106104373932, 0.8534399271011353, 0.9182198643684387, 0.6083860993385315, 0.9389832019805908, 0.12579986453056335, 0.03590020909905434, 0.012173496186733246, 0.16479530930519104, 0.15366923809051514, 0.0035958383232355118, 0.002988115418702364, 0.026292480528354645, 0.0003885648038703948], [0.2897806465625763, 0.01695333980023861, 0.6714832782745361, 0.4471692144870758, 0.24303969740867615, 0.15563154220581055, 0.008645682595670223, 0.0004950988804921508, 0.0001695932005532086, 0.13566477596759796, 0.030448369681835175, 0.00021736785129178315, 9.297585347667336e-05, 0.0014399208594113588, 5.083655923954211e-05], [0.1102917492389679, 0.0027466323226690292, 0.13646264374256134, 0.07094646990299225, 0.17040857672691345, 0.6033481955528259, 0.41631338000297546, 0.013031017035245895, 0.00012492973473854363, 0.005976412910968065, 0.0002816450723912567, 4.682707003667019e-05, 0.00021861463028471917, 0.00019605428678914905, 0.001022772048600018], [0.7042187452316284, 0.49455204606056213, 0.43194010853767395, 0.7080989480018616, 0.382207989692688, 0.06800723820924759, 0.48792970180511475, 0.12651333212852478, 0.0012585417134687304, 0.07895761728286743, 0.01729964278638363, 0.0006471746601164341, 0.00013743228919338435, 0.00039039706462062895, 0.00010207234299741685], [0.5233215093612671, 0.7835124135017395, 0.3596530258655548, 0.5502080917358398, 0.589034378528595, 0.24138878285884857, 0.4714515507221222, 0.13250088691711426, 0.08884716778993607, 0.06473898142576218, 0.12478159368038177, 0.001717525301501155, 0.01358798798173666, 0.004862584639340639, 0.0004225081647746265], [0.0975094586610794, 0.14095744490623474, 0.009511731564998627, 0.03128954395651817, 0.01951521448791027, 0.0017430862644687295, 0.033708807080984116, 0.009512575343251228, 0.3042309582233429, 0.0025639990344643593, 0.0006334132049232721, 2.5987004846683703e-05, 0.0001574041525600478, 1.1997842193522956e-05, 1.5690195141360164e-05], [0.536220133304596, 0.12877297401428223, 0.013534938916563988, 0.13534405827522278, 0.015604051761329174, 0.0035537974908947945, 0.02344023622572422, 0.008398037403821945, 0.2580391466617584, 0.2587551474571228, 0.014949243515729904, 0.0010696486569941044, 0.00046315763029269874, 0.0013398011215031147, 8.422375685768202e-05], [0.028944578021764755, 0.013114584609866142, 0.0438210591673851, 0.05079193785786629, 0.03694206848740578, 0.0008442872785963118, 0.0030779552180320024, 0.002579997293651104, 0.01023491844534874, 0.21445545554161072, 0.2806929349899292, 0.00855539832264185, 0.03333647921681404, 0.06091907247900963, 1.9560096916393377e-05], [0.0058769844472408295, 0.06350620836019516, 0.003568005282431841, 0.0076079596765339375, 0.0037217612843960524, 0.004286385141313076, 0.03584115207195282, 0.14617407321929932, 0.0030082303564995527, 0.12143123894929886, 0.0793885663151741, 0.1555183082818985, 0.14442139863967896, 0.29275521636009216, 7.129996811272576e-05], [0.034930020570755005, 0.09419079124927521, 0.0127689428627491, 0.008763227611780167, 0.0065171802416443825, 0.008632887154817581, 0.02612082101404667, 0.02043459191918373, 0.0836663544178009, 0.5329904556274414, 0.3228733241558075, 0.7184357047080994, 0.5793755650520325, 0.783859133720398, 0.0001531920424895361], [0.0009532110998407006, 0.0024861039128154516, 7.189704774646088e-05, 0.00014637503772974014, 2.8552024105010787e-06, 3.0342853278853e-05, 0.0007709002820774913, 0.0005337693146429956, 6.919851330167148e-06, 0.02619163505733013, 0.02381032705307007, 0.008668542839586735, 0.39639002084732056, 0.7824769616127014, 1.1539431170604075e-06], [0.02785377763211727, 0.15845024585723877, 0.19323119521141052, 0.06543393433094025, 0.014044036157429218, 0.040286585688591, 0.07583826035261154, 0.6567350029945374, 0.004159754142165184, 0.35265031456947327, 0.6287637948989868, 0.12951745092868805, 0.32439297437667847, 0.653313934803009, 0.0008144593448378146], [0.02927210181951523, 0.04805546626448631, 0.295967698097229, 0.060625556856393814, 0.014990724623203278, 0.10397231578826904, 0.12186732143163681, 0.5237559080123901, 0.0203724168241024, 0.43874940276145935, 0.4409005343914032, 0.09095493704080582, 0.5531511306762695, 0.5263633728027344, 0.0002321143983863294], [0.5664732456207275, 0.02422192506492138, 0.3148367702960968, 0.37531769275665283, 0.06290365755558014, 0.02708868682384491, 0.03764869272708893, 0.06476183980703354, 0.09221415221691132, 0.3172641098499298, 0.088014617562294, 0.02202794700860977, 0.004314645659178495, 0.0619816817343235, 0.0017959593096747994], [0.04828598350286484, 0.01127469539642334, 0.1758044958114624, 0.0725238099694252, 0.01880812831223011, 0.003422890789806843, 0.0039800796657800674, 0.008112750947475433, 0.0007020575576461852, 0.0960424467921257, 0.3098883628845215, 0.03193678706884384, 0.03351299837231636, 0.2577627897262573, 0.0005041947006247938], [0.008833246305584908, 0.03231082111597061, 0.009648996405303478, 0.01135926228016615, 0.004257569555193186, 0.002696139505133033, 0.026390861719846725, 0.07894735038280487, 0.0002903220884036273, 0.05877671018242836, 0.0971919596195221, 0.32856324315071106, 0.08294347673654556, 0.6861463785171509, 0.00047716210247017443], [0.020260397344827652, 0.03928471356630325, 0.012783887796103954, 0.0091601787135005, 0.005565040744841099, 0.007968534715473652, 0.020862603560090065, 0.012279938906431198, 0.01832268387079239, 0.3204420506954193, 0.28696081042289734, 0.7937509417533875, 0.6314787864685059, 0.8277974724769592, 0.00014348741387948394], [0.00497927563264966, 0.011739314533770084, 0.0009416648535989225, 0.0009133343119174242, 2.0598932678694837e-05, 0.00024278588534798473, 0.00463896244764328, 0.0027787971775978804, 1.9694551156135276e-05, 0.026842234656214714, 0.05824153125286102, 0.023767979815602303, 0.7019069194793701, 0.8979114294052124, 1.5536308637820184e-05], [0.06832221150398254, 0.18812543153762817, 0.5426309108734131, 0.237625390291214, 0.041615329682826996, 0.11611851304769516, 0.16301436722278595, 0.827357828617096, 0.011619587428867817, 0.35340800881385803, 0.8248108625411987, 0.22083298861980438, 0.4978465139865875, 0.8379470109939575, 0.008811386302113533], [0.7676634788513184, 0.8615484237670898, 0.768317461013794, 0.9594964981079102, 0.36958935856819153, 0.4649639129638672, 0.5634418725967407, 0.8043064475059509, 0.6601962447166443, 0.9397303462028503, 0.8348119258880615, 0.9867405295372009, 0.7646960020065308, 0.8154686689376831, 0.03640103340148926]], [[0.5194346308708191, 0.08715501427650452, 0.09860441088676453, 0.08100719004869461, 0.11848669499158859, 0.14280925691127777, 0.19592297077178955, 0.1196337640285492, 0.2793996334075928, 0.0691760703921318, 0.09539081901311874, 0.05545644089579582, 0.02620256133377552, 0.03735822066664696, 0.09928011149168015], [0.002687783446162939, 0.2585922181606293, 0.004556892905384302, 0.0005560630816034973, 0.0013625096762552857, 0.000865808455273509, 2.095674426527694e-05, 0.013363445177674294, 1.4331720194604713e-05, 0.00023233501997310668, 0.013212678954005241, 0.00027388104354031384, 2.99917119264137e-05, 5.10126119479537e-05, 0.0653858631849289], [0.010489544831216335, 0.001751396106556058, 0.2775154411792755, 0.0030420231632888317, 0.08156438916921616, 0.0006471106316894293, 1.7804295566747896e-05, 0.00014657371502835304, 0.00035265504266135395, 0.00129506376106292, 0.018553601577878, 0.0019669390749186277, 0.009056665003299713, 0.05091148242354393, 0.1541917622089386], [0.0025869093369692564, 0.008571458049118519, 0.38431695103645325, 0.030530055984854698, 0.03365315869450569, 0.005854337941855192, 0.00010941662185359746, 4.1041937947738916e-05, 0.000364075880497694, 0.0011989381164312363, 0.014197473414242268, 0.0010815636487677693, 0.0004893331206403673, 0.0013785242335870862, 0.011478900909423828], [0.20589935779571533, 0.03613102436065674, 0.009011336602270603, 0.09399610757827759, 0.042497485876083374, 0.000576009857468307, 0.0040712482295930386, 0.00162220629863441, 0.00015305644774343818, 0.0034409475047141314, 0.025435233488678932, 2.175084773625713e-05, 1.0188268788624555e-05, 5.634217450278811e-05, 0.160919189453125], [0.00994176883250475, 0.015379102900624275, 0.000435269670560956, 0.004355194512754679, 0.002023787936195731, 4.86412636746536e-06, 0.0007220985717140138, 0.0004895065212622285, 0.0005591813242062926, 0.009127096273005009, 0.023014724254608154, 0.0003639610658865422, 3.1703839340480044e-05, 0.00036040451959706843, 0.1469942033290863], [0.31647789478302, 0.5689504742622375, 0.010991040617227554, 0.29046669602394104, 0.008814695291221142, 0.008600234054028988, 0.094898521900177, 0.02089405618607998, 0.005384301766753197, 0.1224634200334549, 0.2525540888309479, 0.011421876028180122, 9.89354812190868e-05, 0.00020726426737383008, 0.3419104218482971], [0.006757077760994434, 0.1354868859052658, 0.002759847091510892, 0.009205225855112076, 0.0038083188701421022, 0.0014255000278353691, 0.0007299972930923104, 0.2051592320203781, 0.00020230394147802144, 0.001623967313207686, 0.006681961473077536, 0.0021689198911190033, 5.557909025810659e-05, 0.000162289768923074, 0.20840437710285187], [0.010027364827692509, 0.02789497748017311, 0.0041139991953969, 0.012661347165703773, 0.0013435317669063807, 0.0034407242201268673, 0.0064836894161999226, 0.007366063538938761, 0.29601985216140747, 0.053567804396152496, 0.040060218423604965, 0.004607491660863161, 0.00018677859043236822, 3.186250978615135e-05, 0.10952453315258026], [0.19971387088298798, 0.012958711944520473, 0.001638519112020731, 0.17775660753250122, 0.0022716999519616365, 0.03685721755027771, 0.06948257982730865, 0.005452410783618689, 0.037147630006074905, 0.19678887724876404, 0.21911752223968506, 0.02466990426182747, 0.0004891769494861364, 6.33890085737221e-05, 0.21250228583812714], [0.05692211166024208, 0.036700569093227386, 0.0015533106634393334, 0.01848980039358139, 0.002404581755399704, 0.008354752324521542, 0.023693444207310677, 0.02836945652961731, 0.29948922991752625, 0.005321406293660402, 0.0022319734562188387, 0.0005214664852246642, 0.00019869217067025602, 5.8369230828247964e-05, 0.008838840760290623], [0.011123275384306908, 0.003955129534006119, 0.0015235289465636015, 0.011223106645047665, 0.002481319010257721, 0.000903434120118618, 0.0006720115779899061, 0.00024289102293550968, 0.010115177370607853, 0.26232361793518066, 0.014199022203683853, 0.0005582758458331227, 0.0001542939426144585, 5.357913687475957e-05, 0.050008371472358704], [0.025191567838191986, 0.009952094405889511, 0.015023785643279552, 0.0893990620970726, 0.006299919448792934, 0.0077370950020849705, 0.0004422276106197387, 0.00010742250742623582, 0.001807618304155767, 0.052116382867097855, 0.33116668462753296, 0.0029348258394747972, 0.004942082799971104, 0.0017646296182647347, 0.009777115657925606], [0.12133541703224182, 0.0033125760965049267, 0.008441481739282608, 0.0257105715572834, 0.005432062782347202, 0.020603680983185768, 0.0008238950395025313, 0.00019463927310425788, 0.0001117472565965727, 0.011082900688052177, 0.4118730425834656, 0.0024717452470213175, 0.21560189127922058, 0.015253315679728985, 0.03452993184328079], [0.00568122835829854, 0.003583817044273019, 0.0009402501164004207, 0.0034319525584578514, 0.014700439758598804, 0.00014027200813870877, 5.928567406954244e-05, 0.0005310353590175509, 0.001004774123430252, 0.00433507701382041, 0.003991644363850355, 0.0015378128737211227, 6.231402221601456e-05, 0.02625701017677784, 0.15481357276439667], [0.00503728911280632, 0.004739185329526663, 0.021364033222198486, 0.04603096470236778, 0.004565324168652296, 0.021244995296001434, 0.07592181116342545, 0.027910754084587097, 0.008603491820394993, 0.004941265098750591, 0.03103908710181713, 0.035909827798604965, 0.01818632334470749, 0.04406380280852318, 0.17931725084781647], [0.21416018903255463, 0.005411786492913961, 0.02111194096505642, 0.07001130282878876, 0.04736214876174927, 0.09187527745962143, 0.1399366855621338, 0.030981194227933884, 0.02342112548649311, 0.07424263656139374, 0.02716991677880287, 0.5710572600364685, 0.007255392149090767, 0.005560784600675106, 0.054831843823194504], [0.3339015245437622, 0.03176174685359001, 0.25991618633270264, 0.31748515367507935, 0.17923809587955475, 0.2977932095527649, 0.14185847342014313, 0.09826549887657166, 0.4168005883693695, 0.09961694478988647, 0.1390676498413086, 0.191667839884758, 0.0443519689142704, 0.10075851529836655, 0.08045557886362076], [0.018510108813643456, 0.0015040059806779027, 0.011199833825230598, 0.021222928538918495, 0.02421635016798973, 0.004175371024757624, 0.0007807075162418187, 0.0005349562270566821, 0.0038052168674767017, 0.3727143108844757, 0.022828511893749237, 0.01009275484830141, 0.0012628438416868448, 0.0009096930734813213, 0.10904579609632492], [0.05896773934364319, 0.023542853072285652, 0.0776505172252655, 0.15385140478610992, 0.011508575640618801, 0.0939982458949089, 0.0018089915392920375, 0.0003290986060164869, 0.0005636389250867069, 0.029514340683817863, 0.35146546363830566, 0.007090898230671883, 0.012099701911211014, 0.006742698606103659, 0.052738532423973083], [0.18205131590366364, 0.00472951028496027, 0.03192766383290291, 0.059333182871341705, 0.028221452608704567, 0.033883631229400635, 0.00131422549020499, 0.0001085989861167036, 5.632251122733578e-05, 0.004554648417979479, 0.2950275242328644, 0.0014449548907577991, 0.2329740822315216, 0.0520821250975132, 0.1361607313156128], [0.0063572716899216175, 0.002779513830319047, 0.0009721479145810008, 0.0035897656343877316, 0.019835324957966805, 0.00021187934908084571, 8.435463678324595e-05, 0.00043589723645709455, 0.0004945950931869447, 0.004414541646838188, 0.0027602717746049166, 0.0008482423145323992, 5.171148222871125e-05, 0.021799515932798386, 0.15211130678653717], [0.005286877974867821, 0.008391096256673336, 0.025823507457971573, 0.030178312212228775, 0.00857502967119217, 0.042816706001758575, 0.07608389109373093, 0.03679429367184639, 0.0067360359244048595, 0.0038807345554232597, 0.03710461035370827, 0.037315309047698975, 0.018847206607460976, 0.0415174663066864, 0.15352587401866913], [0.2992006242275238, 0.008802352473139763, 0.027079692110419273, 0.08564624935388565, 0.11560814827680588, 0.22971339523792267, 0.1826445311307907, 0.033842965960502625, 0.06175734102725983, 0.11205370724201202, 0.04016120731830597, 0.5851526856422424, 0.016921253874897957, 0.011652404442429543, 0.08951538056135178], [0.12446854263544083, 0.0009617851465009153, 0.004788657650351524, 0.0008746102685108781, 0.16037316620349884, 0.003065474098548293, 0.0056405095383524895, 0.005250739399343729, 0.05696318671107292, 0.013819074258208275, 0.028642717748880386, 0.0011808956041932106, 0.08446037769317627, 0.03008313849568367, 0.13710428774356842]], [[0.005261753685772419, 0.005328452680259943, 0.1075906753540039, 0.007504252251237631, 0.18196941912174225, 0.2677178680896759, 0.18533208966255188, 0.041308093816041946, 0.04052837938070297, 0.0018225060775876045, 0.004738607443869114, 0.028365809470415115, 0.07867489755153656, 0.032602421939373016, 0.14697469770908356], [0.024903474375605583, 0.2637169063091278, 0.01148936152458191, 0.01806865818798542, 0.010384032502770424, 0.05497525632381439, 0.01011874619871378, 6.159161421237513e-05, 0.03404803201556206, 0.01315199863165617, 0.004086918197572231, 0.033981483429670334, 0.0007253359071910381, 0.0010365481721237302, 0.023150891065597534], [0.03176039457321167, 0.002004105830565095, 0.011469452641904354, 0.003235333366319537, 0.011606591753661633, 0.01332010142505169, 0.007885226979851723, 0.0010319099528715014, 0.0026684575714170933, 0.003885145066305995, 0.002207087352871895, 0.010414022952318192, 0.015553043223917484, 0.01973811537027359, 0.1639232188463211], [0.24842531979084015, 0.031220050528645515, 0.028132880106568336, 0.029530569911003113, 0.01766534335911274, 0.36354437470436096, 0.06892471760511398, 0.02528339996933937, 0.01102821622043848, 0.15825842320919037, 0.13755246996879578, 0.07390110194683075, 0.19022952020168304, 0.1824880689382553, 0.1432848572731018], [0.0013664831640198827, 0.001714985934086144, 0.0013615208445116878, 0.0015855998499318957, 0.0011547008762136102, 0.007221538573503494, 0.01537399459630251, 0.020302001386880875, 0.0011185031617060304, 0.001242821803316474, 0.0004577837826218456, 0.0013307477347552776, 6.100967220845632e-05, 3.943840420106426e-05, 0.16435295343399048], [0.0006725311395712197, 0.000846685899887234, 0.001614874112419784, 0.000348375499015674, 0.0019150535808876157, 0.01370947528630495, 0.026421356946229935, 0.08118636161088943, 0.0008913385099731386, 0.0004401778569445014, 0.0003709472657646984, 0.0007744845934212208, 0.002328733913600445, 0.0003664834948722273, 0.14579549431800842], [0.011207095347344875, 0.029191432520747185, 0.015348215587437153, 0.012354064732789993, 0.002485303906723857, 0.7150441408157349, 0.0764552503824234, 0.14450958371162415, 0.0016117440536618233, 0.008765846490859985, 0.011787951923906803, 0.002862851833924651, 0.022502094507217407, 0.007210019044578075, 0.007054056040942669], [0.006926322355866432, 0.0050496323965489864, 0.010020078159868717, 0.021360181272029877, 0.0027102867607027292, 0.028520535677671432, 0.05918040871620178, 0.23060235381126404, 0.019199691712856293, 0.09477535635232925, 0.013206732459366322, 0.0014817069750279188, 0.0153219448402524, 0.01803957298398018, 0.07950127124786377], [0.009242678992450237, 0.05580667033791542, 0.014326682314276695, 0.04630666971206665, 0.010674487799406052, 0.5850453972816467, 0.4108324944972992, 0.4116209149360657, 0.007144990377128124, 0.20661039650440216, 0.037308260798454285, 0.054067905992269516, 0.037599414587020874, 0.03113422356545925, 0.22261686623096466], [0.0023711349349468946, 0.019731320440769196, 0.027566438540816307, 0.03758935630321503, 0.022646954283118248, 0.06538618355989456, 0.01152126956731081, 0.014797273091971874, 0.003413880243897438, 0.024214325472712517, 0.019466044381260872, 0.007235943805426359, 0.0008611958473920822, 0.0011126803001388907, 0.268255352973938], [0.08772679418325424, 0.02003292553126812, 0.09465871006250381, 0.41126132011413574, 0.07995565980672836, 0.5143890976905823, 0.1155472919344902, 0.01320470031350851, 0.02149844542145729, 0.06702866405248642, 0.6884661316871643, 0.09638151526451111, 0.35587188601493835, 0.2170087993144989, 0.019593046978116035], [0.01343127153813839, 0.0019279895350337029, 0.01925632171332836, 0.04226915165781975, 0.005290344823151827, 0.5555825233459473, 0.06846548616886139, 0.006453313864767551, 0.019162334501743317, 0.0017575293313711882, 0.2967261075973511, 0.11721283942461014, 0.4438721835613251, 0.1899448037147522, 0.007863422855734825], [0.12789316475391388, 0.004323228262364864, 0.03538274019956589, 0.05581461265683174, 0.020947236567735672, 0.09860846400260925, 0.11394336074590683, 0.010361305437982082, 0.011101406998932362, 0.33580121397972107, 0.13689599931240082, 0.038663506507873535, 0.19725953042507172, 0.10533706098794937, 0.008538279682397842], [0.007053391542285681, 0.012331487610936165, 0.008611395955085754, 0.031008008867502213, 0.004283395130187273, 0.0029549654573202133, 0.00849387887865305, 0.008564120158553123, 0.02629040740430355, 0.009985123760998249, 0.00761940935626626, 0.003499145619571209, 0.0015691317385062575, 0.005600257311016321, 0.5214234590530396], [0.0007030746201053262, 0.0001308645587414503, 0.0001913319865707308, 0.00016671058256179094, 0.000299752748105675, 0.0001608166057849303, 0.004501530434936285, 0.0010771069210022688, 0.003937124740332365, 0.001599485520273447, 0.0007339937728829682, 0.0030779645312577486, 3.4502605558373034e-05, 9.700484952190891e-05, 0.15641583502292633], [0.027913473546504974, 0.10055015236139297, 0.005828284192830324, 0.007361504249274731, 0.0010143647668883204, 0.000654859293717891, 0.0101061025634408, 0.029607031494379044, 0.04485415667295456, 0.09235014766454697, 0.05163425952196121, 0.03075464628636837, 0.027050884440541267, 0.021472401916980743, 0.18064866960048676], [0.0011193754617124796, 0.03864011913537979, 0.0033454783260822296, 0.0006957795703783631, 0.001480268081650138, 0.0012079592561349273, 0.00020605533791240305, 0.0011212154058739543, 0.0015670693246647716, 0.0014121911954134703, 0.0012700740480795503, 0.0019415348069742322, 0.001359732006676495, 0.0011440571397542953, 0.23876120150089264], [0.012943120673298836, 0.020876264199614525, 0.04825761169195175, 0.03707631304860115, 0.015636419877409935, 0.11923719942569733, 0.021652603521943092, 0.026653259992599487, 0.020431919023394585, 0.03287035599350929, 0.10921605676412582, 0.11103712767362595, 0.08490956574678421, 0.05352960154414177, 0.1791488379240036], [0.010143280029296875, 0.0011783033842220902, 0.07699523866176605, 0.04151652753353119, 0.013031265698373318, 0.6595657467842102, 0.04001229628920555, 0.015414847061038017, 0.05828738585114479, 0.00582495890557766, 0.39538952708244324, 0.3540988564491272, 0.5535411834716797, 0.14920510351657867, 0.05510678142309189], [0.10365689545869827, 0.011393263004720211, 0.09083462506532669, 0.05552159622311592, 0.021694108843803406, 0.23093751072883606, 0.12655670940876007, 0.02638416364789009, 0.016898566856980324, 0.4334920644760132, 0.1302367001771927, 0.07987051457166672, 0.26015403866767883, 0.07882147282361984, 0.06412448734045029], [0.0009046280756592751, 0.006186267826706171, 0.001710598124191165, 0.0040000369772315025, 0.0010556421475484967, 0.00010012275743065402, 0.000467440317152068, 0.00034073027200065553, 0.012450831942260265, 0.001776019111275673, 0.0016348852077499032, 0.0004490323772188276, 0.00023723821504972875, 0.0005369102582335472, 0.2610536217689514], [0.00040706052095629275, 5.995776882627979e-05, 0.00011266738147241995, 0.00010974665929097682, 0.00022393744438886642, 7.468188414350152e-05, 0.00239625689573586, 0.0004222780407872051, 0.002755024004727602, 0.0011263962369412184, 0.0004159261588938534, 0.0013214137870818377, 1.3015362128498964e-05, 3.146446033497341e-05, 0.15343648195266724], [0.02487853355705738, 0.06922142952680588, 0.005931189749389887, 0.005149703938513994, 0.0007503133383579552, 0.00046759017277508974, 0.004864065907895565, 0.010271446779370308, 0.03885169327259064, 0.0494176521897316, 0.032662954181432724, 0.015474021434783936, 0.005468437913805246, 0.0031831569503992796, 0.16160887479782104], [0.0006016235565766692, 0.010655699297785759, 0.0012552555417641997, 0.0004406629304867238, 0.0006771506741642952, 0.0004804672207683325, 8.584682655055076e-05, 0.00018533790716901422, 0.0020008538849651814, 0.0008522755815647542, 0.0005471827462315559, 0.0006654397584497929, 0.0003326669684611261, 0.00020969027536921203, 0.18202657997608185], [0.0006660889484919608, 0.0011989487102255225, 0.006168409250676632, 0.0007392434636130929, 0.002072105184197426, 0.0013732375809922814, 0.001215140800923109, 8.942947169998661e-05, 0.0032219376880675554, 0.00034276655060239136, 0.0006051870877854526, 0.0004003554640803486, 0.0006330502219498158, 9.228585986420512e-05, 0.13989190757274628]], [[0.17597882449626923, 0.03865775838494301, 0.04927876219153404, 0.19269852340221405, 0.07631995528936386, 0.03202155977487564, 0.04315444082021713, 0.0381813645362854, 0.14437337219715118, 0.14268529415130615, 0.12548406422138214, 0.22065725922584534, 0.007455701474100351, 0.012540786527097225, 0.13194040954113007], [0.12168548256158829, 0.12690430879592896, 0.03319493681192398, 0.044549524784088135, 0.022643521428108215, 0.12293753027915955, 0.012858373112976551, 0.056580886244773865, 0.0409478023648262, 0.5390252470970154, 0.04499629884958267, 0.010665545240044594, 0.0012580851325765252, 0.0006077282596379519, 0.16003872454166412], [0.004976227879524231, 0.0016218257369473577, 0.10218203067779541, 0.005807417444884777, 0.025330372154712677, 0.00805770605802536, 0.0010953968157991767, 0.007808555383235216, 0.03332183510065079, 0.01014297641813755, 0.0378553569316864, 0.0012688467977568507, 0.0070253219455480576, 0.006525768432766199, 0.1611432433128357], [0.018298039212822914, 0.043392445892095566, 0.026758581399917603, 0.06685060262680054, 0.007846164517104626, 0.0070086256600916386, 0.0011090404586866498, 0.0016357558779418468, 0.015295942313969135, 0.022091375663876534, 0.08676162362098694, 0.0013220091350376606, 0.0007799563463777304, 0.0005145008908584714, 0.5814905166625977], [0.16791731119155884, 0.01838838867843151, 0.03170344606041908, 0.04746389389038086, 0.024931352585554123, 0.002624210435897112, 0.3320338726043701, 0.32248422503471375, 0.021048149093985558, 0.02857070416212082, 0.11922428011894226, 4.079664358869195e-05, 0.0002566495386417955, 0.0005197013379074633, 0.1538068950176239], [0.03376027196645737, 0.001082546659745276, 0.003266592975705862, 0.006257645785808563, 0.023632841184735298, 0.00021245618700049818, 0.033721838146448135, 0.15340450406074524, 0.009442711248993874, 0.006162047851830721, 0.09923229366540909, 0.0001386175281368196, 0.0008165750186890364, 0.0010916005121544003, 0.14602994918823242], [0.04221357777714729, 0.03857824206352234, 0.004161412362009287, 0.06419923156499863, 0.010648604482412338, 0.008165394887328148, 0.04070910066366196, 0.34736329317092896, 0.0012154168216511607, 0.1630050241947174, 0.07001504302024841, 0.0033116117119789124, 0.00023883172252681106, 0.00045473958016373217, 0.2740376889705658], [0.007271567825227976, 0.0015110730892047286, 0.0014769553672522306, 0.0053740208968520164, 0.0038654205854982138, 0.0024983601178973913, 0.049697574228048325, 0.27208074927330017, 0.0006182760698720813, 0.014045008458197117, 0.00131281279027462, 0.00040628391434438527, 0.00037906834040768445, 0.0001199298130813986, 0.006693295668810606], [0.08829134702682495, 0.11286511272192001, 0.004967967513948679, 0.006996258161962032, 0.0014454894699156284, 0.006397548597306013, 0.01389994379132986, 0.27431485056877136, 0.0018983082845807076, 0.09154568612575531, 0.022492842748761177, 0.0017391144065186381, 0.000634143827483058, 4.5783879613736644e-05, 0.318096399307251], [0.02142007276415825, 0.007001234218478203, 0.00761477230116725, 0.018849696964025497, 0.010492328554391861, 0.01844215951859951, 0.008208145387470722, 0.01109394058585167, 0.006335548125207424, 0.01884968765079975, 0.01652243174612522, 0.016355833038687706, 0.0014795949682593346, 0.0011322565842419863, 0.27169719338417053], [0.17013461887836456, 0.14343884587287903, 0.017679741606116295, 0.10850679129362106, 0.01231957133859396, 0.010847942903637886, 0.04900640249252319, 0.023357992991805077, 0.014735743403434753, 0.014097570441663265, 0.012582896277308464, 0.0010529988212510943, 0.00046457236749120057, 0.0006211225991137326, 0.5663455724716187], [0.1586649864912033, 0.08337923884391785, 0.0181503314524889, 0.22676831483840942, 0.016727542504668236, 0.015186772681772709, 0.0050455182790756226, 0.00688449339941144, 0.025511443614959717, 0.20239992439746857, 0.024231791496276855, 0.0023393011651933193, 0.0011192933889105916, 0.0005647524958476424, 0.390881210565567], [0.3443087935447693, 0.28029316663742065, 0.23536846041679382, 0.34415915608406067, 0.11761639267206192, 0.006012732163071632, 0.008058828301727772, 0.005314267706125975, 0.013309409841895103, 0.09906232357025146, 0.10091385245323181, 0.018941059708595276, 0.025248508900403976, 0.014945760369300842, 0.7436007857322693], [0.0022638223599642515, 0.004991845227777958, 0.004655482713133097, 0.0007185174035839736, 0.0013901105849072337, 0.011776956729590893, 0.0005479936371557415, 0.00022604972764384001, 0.00024645475787110627, 0.009541304782032967, 0.011744895949959755, 0.0007132806931622326, 0.27867355942726135, 0.02834550105035305, 0.007979176938533783], [0.024570701643824577, 0.00167787482496351, 0.004072254989296198, 0.00223688711412251, 0.007143567781895399, 0.00014352552534546703, 0.0004634522774722427, 0.0016921478090807796, 0.003620122792199254, 0.007754941936582327, 0.011850811541080475, 0.0027722271624952555, 9.3724018370267e-05, 0.02145184949040413, 0.15506701171398163], [0.01723022572696209, 0.08018677681684494, 0.007713299244642258, 0.004271229729056358, 0.0005464836140163243, 0.00456921337172389, 0.0031762931030243635, 0.009469777345657349, 0.000385247083613649, 0.01870143786072731, 0.033109456300735474, 0.004042719956487417, 0.004976211115717888, 0.005646048113703728, 0.19230251014232635], [0.016216034069657326, 0.04777013510465622, 0.01620146818459034, 0.010810854844748974, 0.16034351289272308, 0.006931359879672527, 0.0032006967812776566, 0.032106515020132065, 0.0003033989341929555, 0.015325331129133701, 0.006036583799868822, 0.12791146337985992, 0.19952742755413055, 0.023708127439022064, 0.18307197093963623], [0.014499284327030182, 0.035677529871463776, 0.009275808930397034, 0.01653297245502472, 0.006223962642252445, 0.0020693510305136442, 0.007680083625018597, 0.013822571374475956, 0.00040966575033962727, 0.0038025544490665197, 0.013774569146335125, 0.006069935858249664, 0.004488381557166576, 0.005977130029350519, 0.217429518699646], [0.03237156197428703, 0.013441890478134155, 0.0194883793592453, 0.09343220293521881, 0.05379915237426758, 0.004893247038125992, 0.0011929833563044667, 0.009432576596736908, 0.015330814756453037, 0.14898745715618134, 0.018398255109786987, 0.01228779274970293, 0.00492482166737318, 0.0038985873106867075, 0.2601524889469147], [0.08357361704111099, 0.18220724165439606, 0.10462122410535812, 0.08245989680290222, 0.03124452568590641, 0.002170282183215022, 0.0020384257659316063, 0.004550496581941843, 0.003485089400783181, 0.036062099039554596, 0.0278666652739048, 0.011443988420069218, 0.01760544627904892, 0.013599698431789875, 0.3874043822288513], [0.001995340920984745, 0.011527596041560173, 0.005334027577191591, 0.0006887424970045686, 0.0023407095577567816, 0.00276917009614408, 0.00029977987287566066, 0.00012230046559125185, 0.00026578022516332567, 0.008239910937845707, 0.009819538332521915, 0.000393931899452582, 0.605858564376831, 0.08989311754703522, 0.011135715991258621], [0.021298440173268318, 0.001658836961723864, 0.004600299056619406, 0.0025729055050760508, 0.015332063660025597, 0.00017298871534876525, 0.0005721640191040933, 0.00186175387352705, 0.0037871075328439474, 0.009124312549829483, 0.01116581168025732, 0.0031747270841151476, 0.00012207991676405072, 0.029056062921881676, 0.15163807570934296], [0.020229021087288857, 0.11621151119470596, 0.015550180338323116, 0.006284819450229406, 0.0013723199954256415, 0.013658476993441582, 0.005685316864401102, 0.02063058130443096, 0.001440295367501676, 0.022225895896553993, 0.07092871516942978, 0.007373427972197533, 0.00771017000079155, 0.006927240639925003, 0.16024509072303772], [0.014029471203684807, 0.02389930933713913, 0.011611595749855042, 0.012217668816447258, 0.2477317750453949, 0.006976675242185593, 0.0035841658245772123, 0.022232146933674812, 0.0018886715406551957, 0.01750483363866806, 0.005654812324792147, 0.10889071226119995, 0.19916927814483643, 0.022882532328367233, 0.16074435412883759], [0.0032621105201542377, 0.006088452413678169, 0.012619324028491974, 0.008848619647324085, 0.17461968958377838, 8.660123421577737e-05, 0.0006109846872277558, 0.0007747155614197254, 0.003163054818287492, 0.017787659540772438, 0.029563669115304947, 0.0032195982057601213, 0.013336165808141232, 0.013171130791306496, 0.1387031376361847]], [[0.09661699831485748, 0.7619754076004028, 0.05676787346601486, 0.020180072635412216, 0.10883769392967224, 0.42711278796195984, 0.09064477682113647, 0.10612691193819046, 0.04782179743051529, 0.06935178488492966, 0.027948519214987755, 0.00755169615149498, 0.007339869160205126, 0.025803416967391968, 0.09292053431272507], [0.042798254638910294, 0.23223945498466492, 0.062359996140003204, 0.01933804154396057, 0.04838808253407478, 0.30189236998558044, 0.0354127362370491, 0.019764740020036697, 0.00920741818845272, 0.0097093116492033, 0.0160877276211977, 0.0032758424058556557, 0.005296806804835796, 0.011010169051587582, 0.02110680378973484], [0.02002989500761032, 0.001048662350513041, 0.03834937512874603, 0.030392715707421303, 0.09750902652740479, 0.056120067834854126, 0.008173296228051186, 0.006944228895008564, 0.004440560005605221, 0.005061029922217131, 0.007118762470781803, 0.008411978371441364, 0.023608768358826637, 0.04182775691151619, 0.16016238927841187], [0.041295986622571945, 0.19780276715755463, 0.03777160495519638, 0.1712082475423813, 0.20935285091400146, 0.158755823969841, 0.3937656581401825, 0.684601902961731, 0.2584594190120697, 0.11237194389104843, 0.1112959012389183, 0.09882687777280807, 0.05429066717624664, 0.24210131168365479, 0.016339490190148354], [0.26312491297721863, 0.2720799446105957, 0.005703570321202278, 0.0481516495347023, 0.027902500703930855, 0.0034437666181474924, 0.03425572067499161, 0.03555849939584732, 0.028000997379422188, 0.0429554246366024, 0.002753790933638811, 0.0017769382102414966, 0.002218457870185375, 0.003535473719239235, 0.1597488671541214], [0.22248251736164093, 0.03185709938406944, 0.000688861298840493, 0.005810217931866646, 0.007679672911763191, 0.0008787074475549161, 0.07858764380216599, 0.14273476600646973, 0.07306984066963196, 0.02433006465435028, 0.011720307171344757, 0.013396549038589, 0.017704129219055176, 0.034836068749427795, 0.1453055441379547], [0.1531120240688324, 0.15391655266284943, 0.006810865830630064, 0.07720811665058136, 0.008951452560722828, 0.01149735413491726, 0.2822602391242981, 0.30408379435539246, 0.48283058404922485, 0.33028021454811096, 0.16095426678657532, 0.031167738139629364, 0.03355513513088226, 0.13962571322917938, 0.012790725566446781], [0.03593587130308151, 0.03233448788523674, 0.22662676870822906, 0.405829519033432, 0.014032814651727676, 0.02822977490723133, 0.09231841564178467, 0.1225365549325943, 0.20093639194965363, 0.2508411109447479, 0.5826555490493774, 0.037383783608675, 0.07952429354190826, 0.10720134526491165, 0.15212680399417877], [0.037364520132541656, 0.04119153320789337, 0.0012645104434341192, 0.021537767723202705, 0.000536995125003159, 0.0011436643544584513, 0.019049961119890213, 0.06139632686972618, 0.385105162858963, 0.13276730477809906, 0.24771228432655334, 0.04952799528837204, 0.04911990836262703, 0.11973114311695099, 0.021608887240290642], [0.004867227748036385, 0.009626063518226147, 0.0003137234307359904, 0.0026314754504710436, 0.00027048110496252775, 0.000934475683607161, 0.007251756265759468, 0.03575620427727699, 0.40781450271606445, 0.05584407597780228, 0.040446195751428604, 0.005334825720638037, 0.007708138320595026, 0.06401336193084717, 0.010240204632282257], [0.19358457624912262, 0.2328234314918518, 0.0017398587660863996, 0.10100623220205307, 0.0019695234950631857, 0.1674531251192093, 0.4513051509857178, 0.6547151803970337, 0.030009860172867775, 0.7025956511497498, 0.1685936599969864, 0.03178222477436066, 0.13270388543605804, 0.23426049947738647, 0.010277668945491314], [0.09463346004486084, 0.5257620811462402, 0.0045187450014054775, 0.07222570478916168, 0.0025188177824020386, 0.1410406231880188, 0.06597349792718887, 0.0719805508852005, 0.09957849979400635, 0.17567123472690582, 0.18618373572826385, 0.02195402979850769, 0.042485080659389496, 0.12470933794975281, 0.00617468124255538], [0.027796348556876183, 0.06599752604961395, 0.002643989399075508, 0.029425768181681633, 0.008861851878464222, 0.013279970735311508, 0.25377023220062256, 0.2656356692314148, 0.055540941655635834, 0.027583830058574677, 0.004816746339201927, 0.3890189528465271, 0.12020140886306763, 0.33882811665534973, 0.0040408894419670105], [0.4147956669330597, 0.5514373779296875, 0.09636387228965759, 0.29775112867355347, 0.03436855599284172, 0.08799602836370468, 0.07023341208696365, 0.10276275128126144, 0.25543972849845886, 0.10302554070949554, 0.05857125297188759, 0.029829595237970352, 0.114840567111969, 0.33078575134277344, 0.07371985912322998], [0.07031518220901489, 0.001305539975874126, 0.0025430582463741302, 0.010662226937711239, 0.0007357596186921, 0.000663888524286449, 0.0014398572966456413, 0.0005107407923787832, 0.005960140842944384, 0.0030986208003014326, 0.0017578504048287868, 0.00018377922242507339, 1.743367283779662e-05, 4.847845411859453e-05, 0.15638960897922516], [0.24421003460884094, 0.03331591188907623, 0.07573812454938889, 0.33240795135498047, 0.006838400848209858, 0.008697851561009884, 0.06428743898868561, 0.06466686725616455, 0.006176145281642675, 0.06394235789775848, 0.09260299056768417, 0.19959890842437744, 0.02154124155640602, 0.021672323346138, 0.15025706589221954], [0.5462155342102051, 0.545982301235199, 0.3341628611087799, 0.5788259506225586, 0.08809857815504074, 0.06356553733348846, 0.022417092695832253, 0.0164126455783844, 0.00386660173535347, 0.10154324769973755, 0.14015790820121765, 0.0864240974187851, 0.34186482429504395, 0.22899740934371948, 0.05407746881246567], [0.48888036608695984, 0.6578190326690674, 0.030819885432720184, 0.2205304652452469, 0.004883326590061188, 0.0656682699918747, 0.04461565986275673, 0.05094402655959129, 0.0005314986919984221, 0.15455113351345062, 0.10763049870729446, 0.1186080202460289, 0.14419804513454437, 0.1328149437904358, 0.09490374475717545], [0.15812784433364868, 0.9118645191192627, 0.022590545937418938, 0.05952226370573044, 0.00360964541323483, 0.07875056564807892, 0.013187792152166367, 0.02020449750125408, 0.0020393244922161102, 0.033818699419498444, 0.0449705570936203, 0.02132066898047924, 0.0717315599322319, 0.12101268768310547, 0.06353376060724258], [0.07771441340446472, 0.4748976230621338, 0.012594498693943024, 0.043653786182403564, 0.006564431358128786, 0.024485116824507713, 0.20463299751281738, 0.1550481915473938, 0.0016144687542691827, 0.005543926265090704, 0.0017496985383331776, 0.3491710126399994, 0.23835937678813934, 0.3316482901573181, 0.08539295196533203], [0.22228576242923737, 0.3581831455230713, 0.10504736006259918, 0.2062736451625824, 0.015430409461259842, 0.007369442842900753, 0.009848481975495815, 0.0027359407395124435, 0.003257193835452199, 0.004766176920384169, 0.0058546122163534164, 0.0040231142193078995, 0.032162997871637344, 0.05548902228474617, 0.22239458560943604], [0.040305208414793015, 0.0008039010572247207, 0.001399470493197441, 0.006614126265048981, 0.0003286598657723516, 0.0002559607964940369, 0.0005696980515494943, 0.00010972175368806347, 0.0006102611077949405, 0.0009710662416182458, 0.0004746906051877886, 5.0628168537514284e-05, 6.201828455232317e-06, 1.1841932064271532e-05, 0.15342259407043457], [0.18667390942573547, 0.05485990643501282, 0.06146723031997681, 0.2094709873199463, 0.003188095986843109, 0.005957009736448526, 0.04363764822483063, 0.02604665607213974, 0.0011390803847461939, 0.022857926785945892, 0.035827361047267914, 0.07732249796390533, 0.00673074834048748, 0.004807854071259499, 0.15350142121315002], [0.46625471115112305, 0.6644052863121033, 0.19963930547237396, 0.36004284024238586, 0.06144074350595474, 0.06362717598676682, 0.016601700335741043, 0.006137203890830278, 0.0020489897578954697, 0.041981395334005356, 0.042364589869976044, 0.04546959325671196, 0.25786423683166504, 0.1048446074128151, 0.10812478512525558], [0.01868601329624653, 0.08739857375621796, 0.016145089641213417, 0.000850466953124851, 0.0035631621722131968, 0.013478883542120457, 0.0006747889565303922, 0.0010685214074328542, 0.013735192827880383, 0.0029910006560385227, 0.017663421109318733, 0.0005569100612774491, 0.0335303470492363, 0.010939561761915684, 0.13854636251926422]], [[0.03039383515715599, 0.011264979839324951, 0.30973049998283386, 0.33407092094421387, 0.24303670227527618, 0.013086382299661636, 0.12547586858272552, 0.047571711242198944, 0.07738520950078964, 0.2579103410243988, 0.13098950684070587, 0.3019145727157593, 0.018321001902222633, 0.10478901118040085, 0.1313871294260025], [0.32489657402038574, 0.01967906951904297, 0.10292623937129974, 0.18745845556259155, 0.06220339238643646, 0.03126899152994156, 0.030121171846985817, 0.013807957991957664, 0.01960192248225212, 0.10352540761232376, 0.08122410625219345, 0.11610747873783112, 0.05098450556397438, 0.06022121384739876, 0.24838198721408844], [0.21547414362430573, 0.011987588368356228, 0.09540344774723053, 0.03949207067489624, 0.22973625361919403, 0.013393656350672245, 0.014646085910499096, 0.018391601741313934, 0.12483032047748566, 0.04761500656604767, 0.16838808357715607, 0.0500614158809185, 0.09093409031629562, 0.09172232449054718, 0.14920873939990997], [0.3455514907836914, 0.20528344810009003, 0.14200778305530548, 0.1397678107023239, 0.3345029056072235, 0.04282815381884575, 0.020769812166690826, 0.02952164225280285, 0.29125186800956726, 0.09975660592317581, 0.3298649489879608, 0.36294782161712646, 0.10288939625024796, 0.1784013956785202, 0.03550736606121063], [0.023072484880685806, 0.08888474851846695, 0.04328835755586624, 0.009794876910746098, 0.18984860181808472, 0.0009663040982559323, 0.0038235578685998917, 0.05101485177874565, 0.059323158115148544, 0.00876270979642868, 0.021391507238149643, 0.02426949329674244, 0.013026251457631588, 0.06840420514345169, 0.15691325068473816], [0.20066522061824799, 0.18445545434951782, 0.10427504032850266, 0.02148139849305153, 0.3108636438846588, 0.0010669901967048645, 0.031332992017269135, 0.06621930748224258, 0.42585986852645874, 0.05703788995742798, 0.1919325739145279, 0.6617251038551331, 0.07196007668972015, 0.2038833349943161, 0.13549473881721497], [0.06934618204832077, 0.15043997764587402, 0.24868465960025787, 0.0180400051176548, 0.61164391040802, 0.0047634197399020195, 0.0077652581967413425, 0.01316747348755598, 0.09036756306886673, 0.016214115545153618, 0.09484434872865677, 0.7773507833480835, 0.3649398386478424, 0.19880527257919312, 0.026039909571409225], [0.5420496463775635, 0.775536835193634, 0.21455605328083038, 0.17522192001342773, 0.3905614912509918, 0.07102629542350769, 0.15213513374328613, 0.06534071266651154, 0.05938922241330147, 0.3742612600326538, 0.040289394557476044, 0.6919643878936768, 0.07523911446332932, 0.14220400154590607, 0.06588775664567947], [0.05002814158797264, 0.18039211630821228, 0.4788157641887665, 0.0970841720700264, 0.5287489891052246, 0.07699278742074966, 0.024560611695051193, 0.055294524878263474, 0.031155720353126526, 0.029308732599020004, 0.023515479639172554, 0.10280930250883102, 0.01905171573162079, 0.033789344131946564, 0.006217750255018473], [0.2326076328754425, 0.12470381706953049, 0.5816100239753723, 0.187625452876091, 0.17989297211170197, 0.58512943983078, 0.4148763120174408, 0.7688660621643066, 0.02497384324669838, 0.10204316675662994, 0.16508084535598755, 0.4722842574119568, 0.654721736907959, 0.31103214621543884, 0.02808636985719204], [0.32085803151130676, 0.3732209801673889, 0.8471049070358276, 0.2474840134382248, 0.8311324715614319, 0.1531035155057907, 0.14141014218330383, 0.12460694462060928, 0.15561653673648834, 0.05888388305902481, 0.03703024983406067, 0.2600737512111664, 0.049645353108644485, 0.08333000540733337, 0.053744472563266754], [0.048572178930044174, 0.20163586735725403, 0.8568418025970459, 0.3438677489757538, 0.8764770030975342, 0.038519736379384995, 0.10765119642019272, 0.14438603818416595, 0.13915397226810455, 0.04139794409275055, 0.24816225469112396, 0.22188685834407806, 0.1582770049571991, 0.255889892578125, 0.05260627716779709], [0.10717450082302094, 0.14654512703418732, 0.5492125749588013, 0.149112731218338, 0.6473506689071655, 0.014123019762337208, 0.023513145744800568, 0.06304500997066498, 0.5243880152702332, 0.17494699358940125, 0.11734810471534729, 0.2534768283367157, 0.06080847606062889, 0.1781260073184967, 0.01657547615468502], [0.024022793397307396, 0.20128284394741058, 0.39493197202682495, 0.16542883217334747, 0.7724959254264832, 0.05353498458862305, 0.039175428450107574, 0.21511156857013702, 0.10924636572599411, 0.3127569556236267, 0.20907098054885864, 0.6610769033432007, 0.026550091803073883, 0.07443477213382721, 0.04747246578335762], [0.0639173686504364, 0.0019661476835608482, 0.03054100275039673, 0.07290788739919662, 0.07458660751581192, 0.0017515828367322683, 0.01338117104023695, 0.0049591753631830215, 0.10895326733589172, 0.03256915882229805, 0.07470867037773132, 0.022291045635938644, 0.00026081688702106476, 0.003768018214032054, 0.15579301118850708], [0.00809751357883215, 0.08670660853385925, 0.12165205925703049, 0.06173386052250862, 0.8110419511795044, 0.006245153024792671, 0.03447260707616806, 0.08050490915775299, 0.779870867729187, 0.2479465901851654, 0.38426774740219116, 0.6870184540748596, 0.2310730367898941, 0.07155610620975494, 0.05814361199736595], [0.01971210353076458, 0.10859540849924088, 0.17558348178863525, 0.04931360110640526, 0.4077165424823761, 0.001824796199798584, 0.004386546555906534, 0.0422598272562027, 0.9374924302101135, 0.3226373493671417, 0.06322266161441803, 0.05341457948088646, 0.0039883931167423725, 0.004304073750972748, 0.13460686802864075], [0.018049566075205803, 0.12295468151569366, 0.24470828473567963, 0.04122815281152725, 0.7332677245140076, 0.004472800530493259, 0.0029204280581325293, 0.018685931339859962, 0.4878760874271393, 0.20441682636737823, 0.08441592752933502, 0.4205068051815033, 0.04466289281845093, 0.13263334333896637, 0.0994158536195755], [0.007120466325432062, 0.02300306409597397, 0.2714575231075287, 0.07745856046676636, 0.6446666717529297, 0.0059507740661501884, 0.011145476251840591, 0.13244189321994781, 0.38060593605041504, 0.06726288050413132, 0.22673718631267548, 0.3522229492664337, 0.17927831411361694, 0.524927020072937, 0.09379637986421585], [0.03649899363517761, 0.08160936087369919, 0.2519805133342743, 0.07504414021968842, 0.1795702874660492, 0.006024391856044531, 0.0073743402026593685, 0.061968039721250534, 0.7520835995674133, 0.28517279028892517, 0.1493321657180786, 0.3589819371700287, 0.04636238142848015, 0.16408585011959076, 0.046330999583005905], [0.009416425600647926, 0.1558573991060257, 0.15325002372264862, 0.08311447501182556, 0.6221630573272705, 0.0029961667023599148, 0.006436231546103954, 0.027678541839122772, 0.2543543577194214, 0.47390833497047424, 0.28851544857025146, 0.6220062375068665, 0.014266690239310265, 0.05054754391312599, 0.0578170008957386], [0.04693470522761345, 0.0011674511479213834, 0.01364858541637659, 0.06039872020483017, 0.0427468940615654, 0.0009404723532497883, 0.007858873344957829, 0.0028007859364151955, 0.06382106244564056, 0.03982963413000107, 0.05175205320119858, 0.011254650540649891, 0.0001272865483770147, 0.001588277518749237, 0.15313954651355743], [0.017768997699022293, 0.1465732455253601, 0.15898801386356354, 0.12304693460464478, 0.8442554473876953, 0.006285809446126223, 0.04204265773296356, 0.12739135324954987, 0.8276333808898926, 0.5079721808433533, 0.5299316644668579, 0.8274551630020142, 0.09790517389774323, 0.02651425078511238, 0.11435628682374954], [0.017107579857110977, 0.05770094692707062, 0.07052541524171829, 0.059498131275177, 0.2613165080547333, 0.0009367912425659597, 0.0028308003675192595, 0.01869240775704384, 0.8671534061431885, 0.40041688084602356, 0.03947103023529053, 0.0349445715546608, 0.00177917187102139, 0.002164072822779417, 0.1562660187482834], [0.006599111016839743, 0.004138579126447439, 0.06047067046165466, 0.013185898773372173, 0.15347044169902802, 0.000755132467020303, 0.007522573694586754, 0.002741254400461912, 0.10833818465471268, 0.005474736914038658, 0.009540018625557423, 0.00040286476723849773, 0.004092549905180931, 0.002003892557695508, 0.13896189630031586]]], [[[0.010830877348780632, 0.011870973743498325, 0.10922139137983322, 0.013140714727342129, 0.060979437083005905, 0.24213501811027527, 0.056873127818107605, 0.0565403513610363, 0.1606917381286621, 0.004471848253160715, 0.04391508549451828, 0.16444265842437744, 0.14521700143814087, 0.12183647602796555, 0.18165212869644165], [0.1442122757434845, 0.026047294959425926, 0.4262431859970093, 0.3211715519428253, 0.7946609258651733, 0.48857852816581726, 0.31943926215171814, 0.3322535455226898, 0.8442224860191345, 0.37700119614601135, 0.4491288661956787, 0.725179135799408, 0.5425247550010681, 0.7077597379684448, 0.47353750467300415], [0.004308484960347414, 0.0038143862038850784, 0.01376394834369421, 0.007213444449007511, 0.0352218858897686, 0.009065943770110607, 0.00796457938849926, 0.009648038074374199, 0.012818497605621815, 0.005304576829075813, 0.00578665267676115, 0.025514552369713783, 0.003588201943784952, 0.005116589833050966, 0.1385156214237213], [0.37350767850875854, 0.33144617080688477, 0.1264321357011795, 0.21400198340415955, 0.32627996802330017, 0.09132378548383713, 0.05067773535847664, 0.05911920592188835, 0.47554144263267517, 0.5285797715187073, 0.055136121809482574, 0.07909779250621796, 0.0048016151413321495, 0.023815851658582687, 0.05086187273263931], [0.026979738846421242, 0.17144815623760223, 0.016802728176116943, 0.011190843768417835, 0.05719228833913803, 0.006600439548492432, 0.02541169337928295, 0.056367360055446625, 0.2566111385822296, 0.13847731053829193, 0.02390860766172409, 0.10821771621704102, 0.004193281754851341, 0.024024199694395065, 0.1485961675643921], [0.010539665818214417, 0.02736317366361618, 0.020729688927531242, 0.012272891588509083, 0.037458207458257675, 0.020133765414357185, 0.006475721951574087, 0.0135318823158741, 0.14018985629081726, 0.043190933763980865, 0.014518915675580502, 0.06027117371559143, 0.013409063220024109, 0.008036705665290356, 0.12864065170288086], [0.06693296134471893, 0.05517994612455368, 0.31718623638153076, 0.09396946430206299, 0.13595829904079437, 0.09244473278522491, 0.0043823812156915665, 0.004134675953537226, 0.9252469539642334, 0.10048755258321762, 0.12945091724395752, 0.21572811901569366, 0.034586720168590546, 0.0726432204246521, 0.04207848384976387], [0.07686225324869156, 0.019675375893712044, 0.2417416274547577, 0.08641211688518524, 0.27890217304229736, 0.038729339838027954, 0.01047417800873518, 0.015033761039376259, 0.4832261800765991, 0.05870191380381584, 0.2969569265842438, 0.6193534731864929, 0.12871475517749786, 0.22289764881134033, 0.5152896642684937], [0.27357029914855957, 0.46676310896873474, 0.3964380621910095, 0.19407758116722107, 0.11257106065750122, 0.014855606481432915, 0.047355495393276215, 0.03237777575850487, 0.3466991186141968, 0.3347361087799072, 0.40522828698158264, 0.5460160970687866, 0.16927282512187958, 0.30020883679389954, 0.04839835315942764], [0.03550037741661072, 0.12907657027244568, 0.07532694190740585, 0.016156595200300217, 0.003630127990618348, 0.01967703178524971, 0.04095811769366264, 0.0179570484906435, 0.39472800493240356, 0.07661326229572296, 0.4370958209037781, 0.4819755256175995, 0.022724222391843796, 0.033822834491729736, 0.04362141340970993], [0.021909046918153763, 0.030848275870084763, 0.046106528490781784, 0.06202828511595726, 0.0325893796980381, 0.03412875533103943, 0.03159455209970474, 0.053456224501132965, 0.16627800464630127, 0.058593228459358215, 0.13071225583553314, 0.20816291868686676, 0.06561117619276047, 0.04416830837726593, 0.03868245705962181], [0.012810717336833477, 0.0013835412682965398, 0.03224228695034981, 0.08643268793821335, 0.03331959247589111, 0.030278367921710014, 0.07819522172212601, 0.03789946064352989, 0.1521843820810318, 0.04584735259413719, 0.022775838151574135, 0.3594759702682495, 0.37505412101745605, 0.4203481376171112, 0.0833948627114296], [0.12084313482046127, 0.009313090704381466, 0.17649081349372864, 0.125856414437294, 0.03634244203567505, 0.028733352199196815, 0.006864639464765787, 0.002353896852582693, 0.16829386353492737, 0.1124483197927475, 0.061692144721746445, 0.19240431487560272, 0.09329058974981308, 0.18641597032546997, 0.018957242369651794], [0.026597192510962486, 0.005893908906728029, 0.12369649112224579, 0.06400194019079208, 0.07115989178419113, 0.0058293454349040985, 0.008344992063939571, 0.00957680307328701, 0.04244829714298248, 0.036994293332099915, 0.07189996540546417, 0.04466360807418823, 0.12661096453666687, 0.2742233872413635, 0.042464204132556915], [0.0012156351003795862, 0.0009695529006421566, 0.021633058786392212, 0.003243132960051298, 0.017804604023694992, 0.006560572423040867, 0.00960883591324091, 0.043045539408922195, 0.008467147126793861, 0.0006170565611682832, 0.0028031598776578903, 0.004630656447261572, 1.7895566998049617e-05, 0.00023196694382932037, 0.14134538173675537], [0.3736850321292877, 0.29077818989753723, 0.43184730410575867, 0.4823248088359833, 0.7379603385925293, 0.5093098282814026, 0.5006043910980225, 0.3135696351528168, 0.5183887481689453, 0.13794882595539093, 0.04961319640278816, 0.12779268622398376, 0.1589212864637375, 0.22346213459968567, 0.1422436237335205], [0.15325459837913513, 0.1614270806312561, 0.4186149537563324, 0.16462315618991852, 0.44647181034088135, 0.7114150524139404, 0.12785741686820984, 0.04132780805230141, 0.047578196972608566, 0.12349404394626617, 0.3133608400821686, 0.35326144099235535, 0.30924320220947266, 0.31196898221969604, 0.028064150363206863], [0.06399086862802505, 0.06306004524230957, 0.1948489397764206, 0.12845031917095184, 0.26295408606529236, 0.38098499178886414, 0.0839061513543129, 0.02110268920660019, 0.07144157588481903, 0.01679118163883686, 0.14834797382354736, 0.479995995759964, 0.24741992354393005, 0.2288939356803894, 0.04729384183883667], [0.041305530816316605, 0.00217662681825459, 0.29091107845306396, 0.12698692083358765, 0.3031243085861206, 0.1103614866733551, 0.14891935884952545, 0.018863126635551453, 0.033797744661569595, 0.008303376846015453, 0.009713392704725266, 0.31765925884246826, 0.4755025804042816, 0.4005468487739563, 0.10761724412441254], [0.4954506754875183, 0.04642331227660179, 0.603453516960144, 0.26468321681022644, 0.3210473358631134, 0.15078485012054443, 0.027168329805135727, 0.004181328695267439, 0.10826757550239563, 0.10845811665058136, 0.053085505962371826, 0.20335085690021515, 0.12072784453630447, 0.17107200622558594, 0.059424202889204025], [0.21408557891845703, 0.03960772231221199, 0.43507251143455505, 0.10961537808179855, 0.42240580916404724, 0.06637464463710785, 0.08428787440061569, 0.03856734186410904, 0.0027873425278812647, 0.012926235795021057, 0.019708000123500824, 0.017574653029441833, 0.10679914057254791, 0.20499441027641296, 0.14648839831352234], [0.002137779025360942, 0.0005492505733855069, 0.03787382319569588, 0.004300523083657026, 0.03090864233672619, 0.003432363970205188, 0.010591491125524044, 0.028211969882249832, 0.003533262060955167, 0.0003883022291120142, 0.0014010752784088254, 0.0010855919681489468, 8.133743904181756e-06, 7.628504681633785e-05, 0.13786831498146057], [0.39364972710609436, 0.15414100885391235, 0.5289453864097595, 0.2158767729997635, 0.8369554877281189, 0.5879349708557129, 0.29191306233406067, 0.1240038275718689, 0.0375535674393177, 0.006134674418717623, 0.003127586329355836, 0.02892274223268032, 0.023530103266239166, 0.026029296219348907, 0.16074688732624054], [0.2684386968612671, 0.29252222180366516, 0.6921796798706055, 0.1771971732378006, 0.6445736885070801, 0.7333542704582214, 0.14767038822174072, 0.04686985909938812, 0.030383678153157234, 0.06000908464193344, 0.1879548877477646, 0.5258318781852722, 0.3533342778682709, 0.3370157778263092, 0.05586722865700722], [0.0015460141003131866, 0.010688474401831627, 0.09971211850643158, 0.017146917060017586, 0.1899741291999817, 0.03437719866633415, 0.022833971306681633, 0.015900788828730583, 0.05731913447380066, 0.0008445536368526518, 0.0073861475102603436, 0.06343144923448563, 0.11084617674350739, 0.11975067108869553, 0.13715405762195587]], [[0.021257108077406883, 0.04756314679980278, 0.05559564009308815, 0.030912479385733604, 0.2625647187232971, 0.138688862323761, 0.027820995077490807, 0.05787678435444832, 0.3002224862575531, 0.018701573833823204, 0.027547171339392662, 0.19844435155391693, 0.1917300671339035, 0.07151354849338531, 0.16648255288600922], [0.4235764741897583, 0.10086580365896225, 0.07221788167953491, 0.13654322922229767, 0.04923773929476738, 0.06516944617033005, 0.07642015814781189, 0.147566020488739, 0.013325832784175873, 0.07923475652933121, 0.03588176146149635, 0.02368854358792305, 0.12847480177879333, 0.04384613409638405, 0.18713882565498352], [0.8895729184150696, 0.7431688904762268, 0.3041851818561554, 0.5492796897888184, 0.7013789415359497, 0.2035668045282364, 0.4541507959365845, 0.17740322649478912, 0.37418368458747864, 0.7257221937179565, 0.3302299678325653, 0.32646968960762024, 0.4535413682460785, 0.2710181474685669, 0.06444819271564484], [0.18918083608150482, 0.07354198396205902, 0.03709281235933304, 0.039312511682510376, 0.2119109183549881, 0.32255253195762634, 0.06547961384057999, 0.022612132132053375, 0.0069438498467206955, 0.04682554677128792, 0.04775600507855415, 0.10260774195194244, 0.060122229158878326, 0.07651683688163757, 0.11037445813417435], [0.05778415873646736, 0.1888784021139145, 0.12087801843881607, 0.08340981602668762, 0.2725185453891754, 0.956253707408905, 0.6455949544906616, 0.6532288789749146, 0.3585406243801117, 0.18532338738441467, 0.18782632052898407, 0.09142936766147614, 0.8097347617149353, 0.3558001220226288, 0.037162330001592636], [0.04896414652466774, 0.25620371103286743, 0.11985385417938232, 0.0157163105905056, 0.14219185709953308, 0.22957918047904968, 0.36173656582832336, 0.07001917064189911, 0.3676673173904419, 0.12105175852775574, 0.22853095829486847, 0.07480601221323013, 0.5630075335502625, 0.8219463229179382, 0.12425509095191956], [0.04714362695813179, 0.01630709134042263, 0.04501143842935562, 0.03696214035153389, 0.036871057003736496, 0.14248797297477722, 0.08399422466754913, 0.03027486614882946, 0.0030259382911026478, 0.019033554941415787, 0.2224818617105484, 0.033125121146440506, 0.02079186774790287, 0.04913722351193428, 0.46250322461128235], [0.033912286162376404, 0.0072718155570328236, 0.013269636780023575, 0.010754123330116272, 0.003932052757591009, 0.022333307191729546, 0.05135813727974892, 0.17082874476909637, 0.004249163903295994, 0.009168761782348156, 0.00692910747602582, 0.00042953240335918963, 0.008801857940852642, 0.008872170932590961, 0.02866899035871029], [0.026226887479424477, 0.006219716742634773, 0.016528652980923653, 0.019500089809298515, 0.009756595827639103, 0.01771577261388302, 0.10877248644828796, 0.07924166321754456, 0.026382839307188988, 0.007807224057614803, 0.018975039944052696, 0.009491248056292534, 0.042680755257606506, 0.025040525943040848, 0.31068748235702515], [0.0181743074208498, 0.0022439020685851574, 0.027739310637116432, 0.07926302403211594, 0.007397042121738195, 0.01831221394240856, 0.057637136429548264, 0.025927647948265076, 0.03431807458400726, 0.03189869597554207, 0.20874466001987457, 0.006929311901330948, 0.08810199052095413, 0.09789149463176727, 0.25120988488197327], [0.0006848929915577173, 0.00015734595945104957, 0.0022563491947948933, 0.00281638465821743, 0.00390908308327198, 0.012311742641031742, 0.006667551584541798, 0.010898235253989697, 0.18826207518577576, 0.0010989188449457288, 0.003811799455434084, 0.0007082286756485701, 0.0025871950201690197, 0.0005297476891428232, 0.004719105549156666], [0.008918036706745625, 0.01932302489876747, 0.1743663251399994, 0.04276113957166672, 0.17357498407363892, 0.05217360332608223, 0.01903947815299034, 0.006896412931382656, 0.02532179281115532, 0.019349897280335426, 0.14434273540973663, 0.2454780638217926, 0.06247624009847641, 0.03444024175405502, 0.2827233076095581], [0.014348846860229969, 0.006216275505721569, 0.06011093780398369, 0.05047134682536125, 0.013856974430382252, 0.08402124047279358, 0.0029483914840966463, 0.0018935499247163534, 0.004232283215969801, 0.022591279819607735, 0.34387707710266113, 0.06330335885286331, 0.20501238107681274, 0.1859048306941986, 0.0244001317769289], [0.016000788658857346, 0.003648907644674182, 0.07618206739425659, 0.26581478118896484, 0.00828572828322649, 0.01491115428507328, 0.006984202191233635, 0.00572665361687541, 0.007784067187458277, 0.03336494415998459, 0.19996345043182373, 0.0026567107997834682, 0.14645317196846008, 0.1677580624818802, 0.0739188864827156], [0.033913157880306244, 0.5720782279968262, 0.09820353239774704, 0.06329890340566635, 0.10058190673589706, 0.8026418685913086, 0.08380495011806488, 0.37448471784591675, 0.04885341227054596, 0.01422097533941269, 0.32552391290664673, 0.701602578163147, 0.9988673329353333, 0.9602208137512207, 0.015194611623883247], [0.01701497472822666, 0.004510161932557821, 0.04222021996974945, 0.131240576505661, 0.007172171492129564, 0.0009335885988548398, 0.0025300730485469103, 0.0012859954731538892, 0.013300590217113495, 0.05520036071538925, 0.2908037602901459, 0.0021335158962756395, 0.11976832151412964, 0.046004947274923325, 0.029495948925614357], [0.0007848403765819967, 0.002563882153481245, 0.003471110016107559, 0.009534057229757309, 0.012083875946700573, 0.006908607203513384, 0.0028729254845529795, 0.0018324146512895823, 0.009593485854566097, 0.008395246230065823, 0.009609236381947994, 0.05064208433032036, 0.00595981115475297, 0.002902570180594921, 0.2071433663368225], [0.008253121748566628, 0.01393465232104063, 0.03316362947225571, 0.045629892498254776, 0.015712177380919456, 0.15894818305969238, 0.02510240487754345, 0.013996893540024757, 0.6886083483695984, 0.014645315706729889, 0.04062162712216377, 0.02812274731695652, 0.10265076905488968, 0.10770027339458466, 0.07716524600982666], [0.0017006727866828442, 0.008613905869424343, 0.08540165424346924, 0.014788517728447914, 0.11802737414836884, 0.058780014514923096, 0.008085138164460659, 0.003584004705771804, 0.06396479159593582, 0.006658769678324461, 0.02042919024825096, 0.3806440234184265, 0.01375669613480568, 0.01512871216982603, 0.1676391214132309], [0.017164628952741623, 0.028738657012581825, 0.06823595613241196, 0.08604145050048828, 0.04855107143521309, 0.24198594689369202, 0.008688676171004772, 0.003311790293082595, 0.059665460139513016, 0.08214288204908371, 0.34741461277008057, 0.15404720604419708, 0.18822570145130157, 0.19501997530460358, 0.062469229102134705], [0.04490135982632637, 0.02318926900625229, 0.15967297554016113, 0.36984479427337646, 0.027114713564515114, 0.1867561787366867, 0.04668368771672249, 0.02171866036951542, 0.05653616786003113, 0.08818016946315765, 0.14142879843711853, 0.002535451203584671, 0.06232175603508949, 0.12099058926105499, 0.16113655269145966], [0.07898441702127457, 0.817236065864563, 0.29267793893814087, 0.16063392162322998, 0.31295838952064514, 0.9265751838684082, 0.1967003047466278, 0.5436303615570068, 0.2332589328289032, 0.04864489659667015, 0.5440958142280579, 0.8931991457939148, 0.9993566870689392, 0.9798612594604492, 0.03687797114253044], [0.051174335181713104, 0.009388554841279984, 0.15813162922859192, 0.3707107603549957, 0.02142486348748207, 0.01361497025936842, 0.01679075136780739, 0.00489152641966939, 0.08238242566585541, 0.07653495669364929, 0.14888693392276764, 0.003932347521185875, 0.1416105329990387, 0.05760091543197632, 0.13266737759113312], [0.00042274355655536056, 0.0019217034569010139, 0.0013128711143508554, 0.004135955590754747, 0.004101510625332594, 0.004091422073543072, 0.0013299065176397562, 0.0007323773461394012, 0.006002569571137428, 0.003528070170432329, 0.004258603788912296, 0.04385730251669884, 0.006557406857609749, 0.0025679266545921564, 0.1728060394525528], [0.0034927180968225002, 0.014745223335921764, 0.025302981957793236, 0.04650698974728584, 0.0658985823392868, 0.10278132557868958, 0.009682145901024342, 0.010841106064617634, 0.1757735013961792, 0.03157021477818489, 0.006062814965844154, 0.2611170709133148, 0.3153221011161804, 0.08490109443664551, 0.13624651730060577]], [[0.01888529770076275, 0.5547894835472107, 0.0062187607400119305, 0.02304725907742977, 0.007431741803884506, 0.05333258956670761, 0.13557927310466766, 0.09608769416809082, 0.011193820275366306, 0.006900292821228504, 0.007560353726148605, 0.018807610496878624, 0.018169475719332695, 0.07717052102088928, 0.1439915895462036], [0.045791856944561005, 0.14471176266670227, 0.057932548224925995, 0.15441685914993286, 0.011981116607785225, 0.030152589082717896, 0.13976308703422546, 0.003811573376879096, 0.010053272359073162, 0.1557283103466034, 0.05080341920256615, 0.00967743806540966, 0.003085661679506302, 0.003445286303758621, 0.08783376961946487], [0.010936958715319633, 0.0031021125614643097, 0.009866965003311634, 0.09017129242420197, 0.02775183692574501, 0.0016267865430563688, 0.01958146132528782, 0.003049993421882391, 0.009465858340263367, 0.022049162536859512, 0.013875926844775677, 0.002902107546105981, 0.0008567434852011502, 0.0034160439390689135, 0.13799139857292175], [0.10994840413331985, 0.15032780170440674, 0.0035718681756407022, 0.1491042822599411, 0.020450405776500702, 0.013510379940271378, 0.47067153453826904, 0.6447877883911133, 0.18023402988910675, 0.1876010298728943, 0.011866661719977856, 0.006677938625216484, 0.0005242988117970526, 0.004238110035657883, 0.29615819454193115], [0.06992093473672867, 0.2791251242160797, 0.006900451611727476, 0.053067900240421295, 0.010168666951358318, 0.0023874202743172646, 0.05137968435883522, 0.06462283432483673, 0.11192043125629425, 0.10690896213054657, 0.009735661558806896, 0.04335656389594078, 0.0031411510426551104, 0.011707558296620846, 0.14929862320423126], [0.24040630459785461, 0.43853774666786194, 0.0175826046615839, 0.06282828748226166, 0.03055599145591259, 0.20223812758922577, 0.5439046025276184, 0.8139520287513733, 0.30283859372138977, 0.4911571145057678, 0.09772597998380661, 0.1337594985961914, 0.08667796850204468, 0.03606351464986801, 0.12256386131048203], [0.03999294713139534, 0.1864590346813202, 0.003897173795849085, 0.04184543341398239, 0.0012414547381922603, 0.025941016152501106, 0.05348599702119827, 0.5434274673461914, 0.012460692785680294, 0.31306707859039307, 0.06930337846279144, 0.0021947044879198074, 0.023592861369252205, 0.04260588437318802, 0.01969532109797001], [0.053744781762361526, 0.006899113766849041, 0.0563664473593235, 0.12695427238941193, 0.012777185067534447, 0.08455551415681839, 0.11441048979759216, 0.13062608242034912, 0.19371363520622253, 0.6254263520240784, 0.24294114112854004, 0.020724456757307053, 0.019838949665427208, 0.022365091368556023, 0.1131007969379425], [0.11661048978567123, 0.35882315039634705, 0.03118491731584072, 0.06881216168403625, 0.014698721468448639, 0.0038598491810262203, 0.1485612690448761, 0.39066970348358154, 0.07792866975069046, 0.22571811079978943, 0.040231697261333466, 0.265895277261734, 0.2000368982553482, 0.1125464141368866, 0.24931347370147705], [0.03291217237710953, 0.23853188753128052, 0.04644821211695671, 0.031600918620824814, 0.045192934572696686, 0.0019951597787439823, 0.11113008856773376, 0.36339887976646423, 0.010439107194542885, 0.20188210904598236, 0.027288423851132393, 0.21054767072200775, 0.04143378138542175, 0.0853629931807518, 0.2336580902338028], [0.07334253191947937, 0.14656193554401398, 0.004660916980355978, 0.03353964164853096, 0.00998624786734581, 0.00235390174202621, 0.04832129552960396, 0.031250230967998505, 0.0017524310387670994, 0.10710166394710541, 0.04863408952951431, 0.11276239901781082, 0.00949337612837553, 0.024303043261170387, 0.5020502805709839], [0.15921767055988312, 0.18694822490215302, 0.011401425115764141, 0.15920288860797882, 0.0017978762043640018, 0.00600996520370245, 0.1401643455028534, 0.08585444837808609, 0.05989503860473633, 0.2726706564426422, 0.041456613689661026, 0.0019109381828457117, 0.0026012342423200607, 0.00675933575257659, 0.05683350935578346], [0.6248686909675598, 0.8166397213935852, 0.05456394702196121, 0.3034517765045166, 0.0032548136077821255, 0.03656908869743347, 0.3933179974555969, 0.635881781578064, 0.4090532660484314, 0.6309216618537903, 0.09238837659358978, 0.01225167978554964, 0.0038302247412502766, 0.05015851929783821, 0.4316881597042084], [0.6506885886192322, 0.26984432339668274, 0.19192098081111908, 0.45030322670936584, 0.018604522570967674, 0.06438936293125153, 0.16284945607185364, 0.46218666434288025, 0.2198290228843689, 0.6063108444213867, 0.13934792578220367, 0.19822801649570465, 0.009406321682035923, 0.07906869053840637, 0.39550670981407166], [0.6516265273094177, 0.3494286835193634, 0.13445304334163666, 0.40472084283828735, 0.05377691984176636, 0.043724507093429565, 0.6220480799674988, 0.09338771551847458, 0.1620686650276184, 0.8232020139694214, 0.17699383199214935, 0.03535428270697594, 4.775904380949214e-05, 0.000580178399104625, 0.13870029151439667], [0.40970566868782043, 0.3527304232120514, 0.004458754323422909, 0.09938450157642365, 0.006175781134516001, 0.014084810391068459, 0.22543573379516602, 0.4835565686225891, 0.025563040748238564, 0.39703506231307983, 0.00602720445021987, 0.0051488312892615795, 0.0008810341823846102, 0.0033910071942955256, 0.2277533859014511], [0.19487805664539337, 0.1991150975227356, 0.010765495710074902, 0.08231080323457718, 0.014791524969041348, 0.005413876846432686, 0.2905171811580658, 0.06453394889831543, 0.003980779554694891, 0.08378233760595322, 0.012941073626279831, 0.009292078204452991, 0.0008543379371985793, 0.002103410428389907, 0.1794004589319229], [0.12092277407646179, 0.17967110872268677, 0.0018819703254848719, 0.04615653306245804, 0.002711376640945673, 0.0007180452230386436, 0.10793514549732208, 0.09669310599565506, 0.0005949889309704304, 0.15432700514793396, 0.015202132984995842, 0.003636009059846401, 0.00047353014815598726, 0.0022874167189002037, 0.22825637459754944], [0.14498451352119446, 0.2535317540168762, 0.027076847851276398, 0.14632807672023773, 0.0057570356875658035, 0.011071202345192432, 0.31473973393440247, 0.2956455647945404, 0.07720959931612015, 0.1944134682416916, 0.008117430843412876, 0.0006636073812842369, 0.0008167477208189666, 0.0018315445631742477, 0.15913215279579163], [0.22215187549591064, 0.47823596000671387, 0.018273456022143364, 0.13293205201625824, 0.0049734353087842464, 0.0265207476913929, 0.27213141322135925, 0.33180302381515503, 0.1344960778951645, 0.335622638463974, 0.010143149644136429, 0.0012862810399383307, 0.00035499766818247736, 0.0037611438892781734, 0.27220219373703003], [0.3673586845397949, 0.057844266295433044, 0.06040150299668312, 0.09888742864131927, 0.023171812295913696, 0.05270017683506012, 0.11794743686914444, 0.1507657766342163, 0.008498218841850758, 0.09498187899589539, 0.003615680383518338, 0.010834122076630592, 0.00024780313833616674, 0.0017297717276960611, 0.20351538062095642], [0.6060628294944763, 0.1373525857925415, 0.13755829632282257, 0.4113396406173706, 0.07285188883543015, 0.014519162476062775, 0.5372579097747803, 0.0630655512213707, 0.14564833045005798, 0.695697009563446, 0.06662726402282715, 0.006644518580287695, 1.2849791346525308e-05, 0.00011718441965058446, 0.13694217801094055], [0.16518473625183105, 0.10184229910373688, 0.002064367523416877, 0.05309450253844261, 0.004080682527273893, 0.012669779360294342, 0.18988992273807526, 0.5354599356651306, 0.004024976398795843, 0.07357845455408096, 0.00022774768876843154, 0.00034433722612448037, 4.428778629517183e-05, 0.00011935137445107102, 0.17481543123722076], [0.060375016182661057, 0.09738604724407196, 0.004719918128103018, 0.05357348173856735, 0.007510221563279629, 0.002087255474179983, 0.1777726411819458, 0.04658319056034088, 0.0022654803469777107, 0.02657914347946644, 0.002838509390130639, 0.0023206211626529694, 0.00029234393150545657, 0.0006460589938797057, 0.15720529854297638], [0.006292517296969891, 0.056422796100378036, 0.003871192689985037, 0.016857203096151352, 0.0060961381532251835, 0.01021772250533104, 0.02558758109807968, 0.004345982801169157, 0.003136568469926715, 0.011386821046471596, 0.0007550015579909086, 0.014218548312783241, 0.002899263286963105, 0.00665974011644721, 0.1386014223098755]], [[0.19101674854755402, 0.0880991518497467, 0.25550922751426697, 0.3376496732234955, 0.25425824522972107, 0.2177356481552124, 0.35922226309776306, 0.13405567407608032, 0.2859460711479187, 0.47983312606811523, 0.235154390335083, 0.26708394289016724, 0.2646999657154083, 0.4890832304954529, 0.0349225178360939], [0.12788966298103333, 0.14897412061691284, 0.18708589673042297, 0.1539590060710907, 0.06750026345252991, 0.06459501385688782, 0.24742794036865234, 0.0008040289394557476, 0.08417094498872757, 0.08338519930839539, 0.09756942838430405, 0.05163748189806938, 0.06044981628656387, 0.1204136312007904, 0.005185095127671957], [0.00823432207107544, 0.006774595472961664, 0.011488616466522217, 0.031759701669216156, 0.014620696194469929, 0.015192853286862373, 0.015498323366045952, 0.001623230637051165, 0.04214249551296234, 0.022796856239438057, 0.0813785269856453, 0.058821164071559906, 0.018185952678322792, 0.030505431815981865, 0.13797427713871002], [0.07304069399833679, 0.17316529154777527, 0.0638275146484375, 0.06216027960181236, 0.10879980027675629, 0.2286580353975296, 0.12489848583936691, 0.06798849999904633, 0.12340370565652847, 0.11364749073982239, 0.33209869265556335, 0.7156579494476318, 0.917570948600769, 0.8780012726783752, 0.004697424825280905], [0.04041377454996109, 0.06032548099756241, 0.013153426349163055, 0.12010756880044937, 0.032379359006881714, 0.02533758245408535, 0.03651244193315506, 0.05168384686112404, 0.05184069648385048, 0.20407944917678833, 0.10554968565702438, 0.5571502447128296, 0.039276935160160065, 0.10380254685878754, 0.1458612084388733], [0.025283029302954674, 0.14580176770687103, 0.0262577123939991, 0.01834816485643387, 0.02426275424659252, 0.5010125637054443, 0.025797395035624504, 0.08120379596948624, 0.10846563428640366, 0.05807282403111458, 0.047331083565950394, 0.01890925131738186, 0.041984543204307556, 0.021773895248770714, 0.12734822928905487], [0.11099886894226074, 0.272359162569046, 0.07267793267965317, 0.02685651369392872, 0.04662291333079338, 0.6599292755126953, 0.15850403904914856, 0.1944371908903122, 0.02196124941110611, 0.18415939807891846, 0.2094753533601761, 0.11699666827917099, 0.8625363111495972, 0.6611498594284058, 0.034588079899549484], [0.10045554488897324, 0.003808635985478759, 0.012772331945598125, 0.008206314407289028, 0.016907531768083572, 0.2308196723461151, 0.04502535238862038, 0.16794730722904205, 0.14683513343334198, 0.07804886251688004, 0.12962646782398224, 0.03242946416139603, 0.45433515310287476, 0.3931583762168884, 0.023861808702349663], [0.020261207595467567, 0.011864200234413147, 0.013516101986169815, 0.00783876795321703, 0.006360001862049103, 0.5825139880180359, 0.27136117219924927, 0.28645893931388855, 0.002775657456368208, 0.05587191879749298, 0.01021821890026331, 0.03437367081642151, 0.37942126393318176, 0.11788230389356613, 0.047214996069669724], [0.3444993495941162, 0.4299255907535553, 0.3897337317466736, 0.11608962714672089, 0.07001375406980515, 0.1826992928981781, 0.3195875883102417, 0.1513850837945938, 0.014436168596148491, 0.25265297293663025, 0.18822813034057617, 0.20145024359226227, 0.648497998714447, 0.6856710314750671, 0.13566814363002777], [0.37375974655151367, 0.2605052888393402, 0.636468231678009, 0.14340142905712128, 0.5107957124710083, 0.683059811592102, 0.3617965579032898, 0.3775153160095215, 0.0734284520149231, 0.5245854258537292, 0.5329803228378296, 0.541839063167572, 0.8546188473701477, 0.8892531991004944, 0.08003345131874084], [0.1478864699602127, 0.26107946038246155, 0.2706110179424286, 0.022070137783885002, 0.08394861966371536, 0.7104908227920532, 0.22173403203487396, 0.18465854227542877, 0.3481738865375519, 0.02706378884613514, 0.14399166405200958, 0.24452990293502808, 0.3432118594646454, 0.3138853907585144, 0.0603480227291584], [0.03315366804599762, 0.109662726521492, 0.165960431098938, 0.03089676797389984, 0.00589095801115036, 0.7119044065475464, 0.04612211138010025, 0.03627030551433563, 0.019800378009676933, 0.02169116772711277, 0.07954178750514984, 0.014483828097581863, 0.3210127055644989, 0.25073835253715515, 0.021559905260801315], [0.1801593005657196, 0.7095129489898682, 0.41699883341789246, 0.14223065972328186, 0.03218872845172882, 0.8857168555259705, 0.325775682926178, 0.46090880036354065, 0.31827157735824585, 0.19596631824970245, 0.36584827303886414, 0.568932831287384, 0.05918605625629425, 0.12899020314216614, 0.03239220380783081], [0.15587098896503448, 0.007851594127714634, 0.38951343297958374, 0.26023998856544495, 0.2678505480289459, 0.04164084047079086, 0.060063086450099945, 0.06729273498058319, 0.019880756735801697, 0.0442759171128273, 0.10040930658578873, 0.1083277016878128, 0.0003995952138211578, 0.001039322349242866, 0.14095477759838104], [0.08899319916963577, 0.2356371134519577, 0.40766164660453796, 0.08200893551111221, 0.14033742249011993, 0.12043434381484985, 0.050508081912994385, 0.04391980916261673, 0.2084629088640213, 0.07807423919439316, 0.06514080613851547, 0.6571899652481079, 0.6522034406661987, 0.4899447560310364, 0.0237458273768425], [0.3269592225551605, 0.23715397715568542, 0.21103474497795105, 0.29856637120246887, 0.031984660774469376, 0.019636303186416626, 0.2648169696331024, 0.0041971527971327305, 0.6909844875335693, 0.5414000153541565, 0.4092715382575989, 0.02185220457613468, 0.006548420060425997, 0.013211028650403023, 0.06752441078424454], [0.40959432721138, 0.2696213126182556, 0.4055677354335785, 0.265968382358551, 0.12281941622495651, 0.10883577167987823, 0.16766701638698578, 0.053767129778862, 0.028326192870736122, 0.5353591442108154, 0.3247348368167877, 0.03339260071516037, 0.1199125200510025, 0.14055927097797394, 0.07849014550447464], [0.0703776553273201, 0.17115768790245056, 0.14820680022239685, 0.014450321905314922, 0.036940984427928925, 0.4336852431297302, 0.18269671499729156, 0.1382565200328827, 0.5314536690711975, 0.05019254609942436, 0.11642822623252869, 0.17526941001415253, 0.3684784173965454, 0.3591882586479187, 0.09016428142786026], [0.020959746092557907, 0.2473447471857071, 0.04995026811957359, 0.032434724271297455, 0.004538285546004772, 0.38885483145713806, 0.04268676042556763, 0.035024866461753845, 0.14864443242549896, 0.14174208045005798, 0.13687251508235931, 0.021197974681854248, 0.4566997289657593, 0.37854352593421936, 0.051512595266103745], [0.11558277904987335, 0.8023946285247803, 0.11340320110321045, 0.07801315933465958, 0.012690390460193157, 0.363363116979599, 0.22989940643310547, 0.28700947761535645, 0.3164795935153961, 0.28987860679626465, 0.20186272263526917, 0.5113669037818909, 0.04614659398794174, 0.13675883412361145, 0.05756649002432823], [0.13439694046974182, 0.004173143766820431, 0.22800596058368683, 0.19857077300548553, 0.1396344006061554, 0.007145485375076532, 0.03306930512189865, 0.026599518954753876, 0.02599666267633438, 0.04890456795692444, 0.0713912844657898, 0.040079280734062195, 0.00020046728604938835, 0.0004629320465028286, 0.13767622411251068], [0.21178027987480164, 0.5613860487937927, 0.18598653376102448, 0.13814353942871094, 0.06437420845031738, 0.1469835489988327, 0.09205848723649979, 0.07043211162090302, 0.3314816355705261, 0.1618121713399887, 0.0553976409137249, 0.7871544361114502, 0.7398563027381897, 0.533365786075592, 0.06109875440597534], [0.308572918176651, 0.1810312271118164, 0.10904403775930405, 0.38784971833229065, 0.013434378430247307, 0.011286276392638683, 0.26633715629577637, 0.0027595413848757744, 0.7609409689903259, 0.7608016729354858, 0.6143397688865662, 0.036307673901319504, 0.013564765453338623, 0.02826162986457348, 0.07738469541072845], [0.1500416249036789, 0.027276279404759407, 0.32022449374198914, 0.45847558975219727, 0.23693141341209412, 0.1596660166978836, 0.2821829915046692, 0.005833256058394909, 0.32143598794937134, 0.14477354288101196, 0.029714325442910194, 0.15291856229305267, 0.007731991354376078, 0.029727784916758537, 0.12283544987440109]], [[0.2602275013923645, 0.0514441579580307, 0.4731021821498871, 0.5077798962593079, 0.22717851400375366, 0.04740440100431442, 0.27564913034439087, 0.24302659928798676, 0.05887439846992493, 0.3509802222251892, 0.6124410033226013, 0.11394976824522018, 0.0489780493080616, 0.04593530669808388, 0.01042554248124361], [0.032066281884908676, 0.1349876970052719, 0.04647025838494301, 0.02243492752313614, 0.02574889175593853, 0.03298051655292511, 0.026965852826833725, 0.3248708248138428, 0.005728535819798708, 0.08351098001003265, 0.1499667763710022, 0.16844461858272552, 0.05473209172487259, 0.05656114220619202, 0.10718395560979843], [0.005181984044611454, 0.0008690498070791364, 0.00864254217594862, 0.00306740403175354, 0.10709173232316971, 0.0007182863773778081, 0.004329775460064411, 0.010956686921417713, 0.06760676205158234, 0.010445973835885525, 0.012115269899368286, 0.06696799397468567, 0.0054829977452754974, 0.025371035560965538, 0.13854098320007324], [0.03556624799966812, 0.11754146218299866, 0.010577056556940079, 0.008073115721344948, 0.06965696066617966, 0.0032990325707942247, 0.011276635341346264, 0.09485359489917755, 0.10517128556966782, 0.0125450249761343, 0.007751243654638529, 0.0650070384144783, 0.0006160335033200681, 0.002038064645603299, 0.4774436056613922], [0.13858208060264587, 0.06875398755073547, 0.01532802265137434, 0.10744626820087433, 0.18273182213306427, 0.002165634883567691, 0.069672591984272, 0.11672408878803253, 0.005795653443783522, 0.0880894884467125, 0.05771886929869652, 0.025581423193216324, 0.03904194384813309, 0.07354751974344254, 0.14365413784980774], [0.16291819512844086, 0.050931405276060104, 0.14806726574897766, 0.2683573365211487, 0.2810481786727905, 0.002092417562380433, 0.012745368294417858, 0.01212888304144144, 0.014305775985121727, 0.17753903567790985, 0.1299620419740677, 0.10299177467823029, 0.21836693584918976, 0.06576120108366013, 0.12406044453382492], [0.12156791239976883, 0.39120492339134216, 0.1209033653140068, 0.08395244181156158, 0.29989197850227356, 0.044024936854839325, 0.023133939132094383, 0.05934688448905945, 0.02561376802623272, 0.024757277220487595, 0.04535222053527832, 0.11912120133638382, 0.02126661129295826, 0.03811139240860939, 0.248785600066185], [0.106705442070961, 0.8169862627983093, 0.1967339813709259, 0.01375850010663271, 0.13418887555599213, 0.16134029626846313, 0.005958847235888243, 0.09247319400310516, 0.04806499928236008, 0.025876127183437347, 0.08311128616333008, 0.22926460206508636, 0.05653654783964157, 0.04726153612136841, 0.20836575329303741], [0.04722486063838005, 0.04722658172249794, 0.05176655203104019, 0.00462702801451087, 0.20528024435043335, 0.0011717488523572683, 0.004415996838361025, 0.014451048336923122, 0.028127426281571388, 0.007240481209009886, 0.004411954898387194, 0.10081291943788528, 0.07703132927417755, 0.033158108592033386, 0.21852079033851624], [0.032722555100917816, 0.027063244953751564, 0.014943713322281837, 0.0013555125333368778, 0.016471203416585922, 0.005467826500535011, 0.02999643050134182, 0.014794600196182728, 0.03837134689092636, 0.004397213459014893, 0.01024235412478447, 0.04855721816420555, 0.05723624676465988, 0.051476139575242996, 0.2643129825592041], [0.052069392055273056, 0.003948261961340904, 0.01313212513923645, 0.010319330729544163, 0.04011767730116844, 0.00066552241332829, 0.01502715889364481, 0.007099903654307127, 0.16779832541942596, 0.03226454555988312, 0.052614975720644, 0.014822165481746197, 0.002071568975225091, 0.001763610984198749, 0.05304422974586487], [0.022045070305466652, 0.036587294191122055, 0.06798984855413437, 0.040110163390636444, 0.5405737161636353, 0.015278805047273636, 0.02948732301592827, 0.034845639020204544, 0.27487096190452576, 0.008005083538591862, 0.012681123800575733, 0.10707750916481018, 0.02124345488846302, 0.00868641585111618, 0.4183328449726105], [0.07479816675186157, 0.018890362232923508, 0.2873721718788147, 0.028116360306739807, 0.7967413067817688, 0.008446138352155685, 0.020726248621940613, 0.018564706668257713, 0.33813604712486267, 0.003492887830361724, 0.010393181815743446, 0.18903475999832153, 0.00443642633035779, 0.0231452826410532, 0.42231008410453796], [0.07108656316995621, 0.0021144712809473276, 0.0671088695526123, 0.03148089721798897, 0.7113023400306702, 0.006737539079040289, 0.2500847280025482, 0.023258471861481667, 0.23158760368824005, 0.011219021864235401, 0.04227704927325249, 0.03650788217782974, 0.15078191459178925, 0.09633734077215195, 0.15066072344779968], [0.04487757384777069, 0.009540342725813389, 0.2420971691608429, 0.01275626104325056, 0.3918483257293701, 0.0218670591711998, 0.022137846797704697, 0.08132637292146683, 0.11900310963392258, 0.000993919325992465, 0.03630243241786957, 0.087126724421978, 0.0003738462692126632, 0.02454514056444168, 0.14072805643081665], [0.0048965876922011375, 0.019337626174092293, 0.002879639156162739, 0.0027576948050409555, 0.04260760545730591, 0.003218113211914897, 0.003307115286588669, 0.026640478521585464, 0.011750566773116589, 0.0005104524316266179, 9.575913281878456e-05, 0.057879798114299774, 0.004244217649102211, 0.00609983503818512, 0.28528884053230286], [0.0335795059800148, 0.030716734007000923, 0.023829646408557892, 0.03415534272789955, 0.08875380456447601, 0.0019310596399009228, 0.017619425430893898, 0.012105603702366352, 0.002468202030286193, 0.010380377061665058, 0.01267782598733902, 0.10606792569160461, 0.0014069904573261738, 0.0004161447286605835, 0.19442977011203766], [0.17404082417488098, 0.05758971348404884, 0.12847737967967987, 0.07598815858364105, 0.49957963824272156, 0.003085564589127898, 0.05114232748746872, 0.011464038863778114, 0.06926580518484116, 0.06844814121723175, 0.06813240051269531, 0.08604259043931961, 0.004740274045616388, 0.009239559061825275, 0.19994765520095825], [0.011875619180500507, 0.026503771543502808, 0.054018229246139526, 0.01668175496160984, 0.3499281406402588, 0.01803278550505638, 0.01878167688846588, 0.01221490278840065, 0.15005004405975342, 0.0046301730908453465, 0.005843435879796743, 0.032064031809568405, 0.010490885935723782, 0.00555034726858139, 0.27147379517555237], [0.0646943747997284, 0.047236885875463486, 0.11903148144483566, 0.02203843556344509, 0.4764179587364197, 0.008550588972866535, 0.013687309809029102, 0.008890991099178791, 0.32491248846054077, 0.011557912454009056, 0.009869826957583427, 0.0921611338853836, 0.0031256151851266623, 0.016340140253305435, 0.3438139855861664], [0.17560914158821106, 0.007353567518293858, 0.056802812963724136, 0.032415200024843216, 0.4015137553215027, 0.02137722261250019, 0.35710790753364563, 0.018633568659424782, 0.05862341821193695, 0.02506905421614647, 0.018169963732361794, 0.009134531952440739, 0.07779684662818909, 0.07867905497550964, 0.1750962883234024], [0.05210466682910919, 0.006375414319336414, 0.22638031840324402, 0.012961659580469131, 0.3225522041320801, 0.012402641586959362, 0.024030247703194618, 0.056293144822120667, 0.11919546872377396, 0.0012290689628571272, 0.027758106589317322, 0.025181178003549576, 0.00022994892788119614, 0.012616506777703762, 0.1375768631696701], [0.005459210369735956, 0.03143180534243584, 0.0014205367770045996, 0.0012642937945201993, 0.01687682792544365, 0.007108580321073532, 0.004234722815454006, 0.017920657992362976, 0.003724986221641302, 0.0002761750074569136, 2.4563792976550758e-05, 0.011889445595443249, 0.0013067404506728053, 0.002636768389493227, 0.19040453433990479], [0.031027475371956825, 0.05656901001930237, 0.0113890515640378, 0.024300340563058853, 0.03550150617957115, 0.0024159413296729326, 0.02035972848534584, 0.01581081561744213, 0.002032301388680935, 0.009238713420927525, 0.01651322841644287, 0.11367840319871902, 0.003108791308477521, 0.00086622079834342, 0.16520220041275024], [0.7154905796051025, 0.15825338661670685, 0.49722805619239807, 0.38231807947158813, 0.39668020606040955, 0.051081933081150055, 0.4188354015350342, 0.3623049259185791, 0.3077245056629181, 0.4494604766368866, 0.7933229804039001, 0.20231026411056519, 0.27286192774772644, 0.2623305022716522, 0.06808917224407196]], [[0.437301367521286, 0.15179137885570526, 0.09085877984762192, 0.06997784972190857, 0.17732757329940796, 0.23180970549583435, 0.11514479666948318, 0.32073739171028137, 0.15501314401626587, 0.1294255405664444, 0.06762269139289856, 0.21488851308822632, 0.2614101469516754, 0.12734454870224, 0.049641113728284836], [0.028495818376541138, 0.1544514149427414, 0.06366834789514542, 0.016971074044704437, 0.02302762120962143, 0.054101087152957916, 0.012630121782422066, 0.018889501690864563, 0.004939573351293802, 0.01251249760389328, 0.1164683923125267, 0.009905983693897724, 0.01818472519516945, 0.01017050538212061, 0.04256897792220116], [0.007633751258254051, 0.002589557319879532, 0.02251260355114937, 0.05040144920349121, 0.032673582434654236, 0.0022981506772339344, 0.00627527991309762, 0.0006094649434089661, 0.01362280547618866, 0.006205975078046322, 0.006417383905500174, 0.0010467394022271037, 0.0010408272501081228, 0.007578521966934204, 0.13823428750038147], [0.0074798669666051865, 0.011802621185779572, 0.3115181624889374, 0.22458955645561218, 0.10706131160259247, 0.016402821987867355, 0.046956516802310944, 0.004200803115963936, 0.01468481682240963, 0.014471452683210373, 0.27619558572769165, 0.0038709931541234255, 0.00034889893140643835, 0.0020716534927487373, 0.01783183217048645], [0.015254770405590534, 0.01172303594648838, 0.002065492793917656, 0.005149758420884609, 0.013159574940800667, 0.001197350095026195, 0.018971139565110207, 0.004385960288345814, 0.06813318282365799, 0.021520443260669708, 0.005575989838689566, 0.001505104242824018, 0.0019181625684723258, 0.005167691968381405, 0.15193934738636017], [0.026872141286730766, 0.003412047168239951, 0.03895608335733414, 0.03612855076789856, 0.02536499686539173, 0.03102046251296997, 0.004315483849495649, 0.0027427596505731344, 0.03512648865580559, 0.022632958367466927, 0.05171700567007065, 0.0026941397227346897, 0.0031264815479516983, 0.024213580414652824, 0.12838274240493774], [0.0600903183221817, 0.002928798785433173, 0.0064612883143126965, 0.05414368212223053, 0.029363246634602547, 0.006244697142392397, 0.397325724363327, 0.040878646075725555, 0.005305922590196133, 0.27715954184532166, 0.04618077725172043, 0.008418801240622997, 0.01155431941151619, 0.05281350389122963, 0.025860372930765152], [0.0013151391176506877, 0.002262294292449951, 0.0012738551013171673, 0.0034272209741175175, 0.0030726443510502577, 0.04279911145567894, 0.008567760698497295, 0.17885291576385498, 0.00929640606045723, 0.001624501310288906, 0.02533317357301712, 0.005113683640956879, 0.027247918769717216, 0.07258909195661545, 0.014188846573233604], [0.3408622145652771, 0.07445694506168365, 0.03113507851958275, 0.0754152163863182, 0.014415460638701916, 0.002693483140319586, 0.09953030943870544, 0.11086118221282959, 0.5124953985214233, 0.329039990901947, 0.5092117786407471, 0.027396254241466522, 0.055544231086969376, 0.4057520925998688, 0.09588415175676346], [0.09238530695438385, 0.007053247652947903, 0.0017291916301473975, 0.005093103274703026, 0.0007437380263581872, 0.0014228186337277293, 0.02520381473004818, 0.019087698310613632, 0.47848576307296753, 0.29748132824897766, 0.057576071470975876, 0.01139640249311924, 0.004621520172804594, 0.02937469258904457, 0.015335291624069214], [0.0720675140619278, 0.012255199253559113, 0.04221949726343155, 0.09128241240978241, 0.009349699132144451, 0.008273615501821041, 0.014371694065630436, 0.01100369542837143, 0.1737149953842163, 0.16746114194393158, 0.1696900725364685, 0.014558696188032627, 0.01365632750093937, 0.0269284937530756, 0.016150163486599922], [0.052127860486507416, 0.0038822691421955824, 0.01307338010519743, 0.12611117959022522, 0.013002983294427395, 0.054914653301239014, 0.022843925282359123, 0.0017219025176018476, 0.025739489123225212, 0.3090609014034271, 0.10414470732212067, 0.006550551857799292, 0.006861968897283077, 0.010005415417253971, 0.011784915812313557], [0.074305959045887, 0.010457544587552547, 0.07050318270921707, 0.4022633135318756, 0.04945780336856842, 0.04771194979548454, 0.4660364091396332, 0.07594453543424606, 0.018491366878151894, 0.1513216346502304, 0.09796185791492462, 0.23858080804347992, 0.011272062547504902, 0.09385059028863907, 0.06640274822711945], [0.025815313681960106, 0.0033349080476909876, 0.00924734864383936, 0.012487816624343395, 0.03726305067539215, 0.016575457528233528, 0.23753590881824493, 0.025156090036034584, 0.11919926106929779, 0.04390435293316841, 0.0095932362601161, 0.04137176275253296, 0.08216788619756699, 0.1757660061120987, 0.30195334553718567], [0.05659867450594902, 0.020075146108865738, 0.01205957867205143, 0.004331792704761028, 0.052221644669771194, 0.0230423454195261, 0.0683140978217125, 0.09752152115106583, 0.2100839763879776, 0.0003861601871903986, 0.0032946986611932516, 0.0004593236662913114, 5.027504084864631e-05, 0.0022022551856935024, 0.14128009974956512], [0.08638240396976471, 0.0710444375872612, 0.06771891564130783, 0.17398057878017426, 0.05179189518094063, 0.34193578362464905, 0.2095513492822647, 0.09331211447715759, 0.052257001399993896, 0.006232596468180418, 0.002646914916113019, 0.06318453699350357, 0.019070196896791458, 0.02972061187028885, 0.2659039795398712], [0.26895081996917725, 0.1478959172964096, 0.3258365988731384, 0.404258131980896, 0.3733697533607483, 0.19055484235286713, 0.19857566058635712, 0.01781378500163555, 0.07512970268726349, 0.11693259328603745, 0.1175057590007782, 0.24425068497657776, 0.20241285860538483, 0.2411348670721054, 0.06638508290052414], [0.17850612103939056, 0.12822727859020233, 0.17801056802272797, 0.28459492325782776, 0.058830633759498596, 0.03884930908679962, 0.3513718843460083, 0.061017971485853195, 0.06718380004167557, 0.071348175406456, 0.23821549117565155, 0.03658399358391762, 0.03897847980260849, 0.20709341764450073, 0.13892877101898193], [0.4637373983860016, 0.04377487301826477, 0.15646661818027496, 0.36986854672431946, 0.09056738018989563, 0.23626187443733215, 0.11398540437221527, 0.0026716177817434072, 0.006399102043360472, 0.2626173198223114, 0.20860937237739563, 0.01349638868123293, 0.014208723790943623, 0.042171213775873184, 0.08208009600639343], [0.13806220889091492, 0.04062362387776375, 0.09515099227428436, 0.37904345989227295, 0.10653041303157806, 0.052835192531347275, 0.5728973150253296, 0.03487204387784004, 0.0029783223289996386, 0.07966885715723038, 0.03475099802017212, 0.13843636214733124, 0.006917618680745363, 0.06183210015296936, 0.1688811033964157], [0.02612869068980217, 0.003477374091744423, 0.007765303365886211, 0.0023155075032263994, 0.018893033266067505, 0.022398637607693672, 0.09549611806869507, 0.004012360703200102, 0.0013466936070472002, 0.0021441734861582518, 0.0004924506065435708, 0.006835760548710823, 0.011635211296379566, 0.023846328258514404, 0.22376547753810883], [0.08347997069358826, 0.014491320587694645, 0.015744350850582123, 0.0043899440206587315, 0.05038629099726677, 0.008546282537281513, 0.06458569318056107, 0.03869106248021126, 0.0615551732480526, 0.0002168803766835481, 0.0014501431724056602, 0.00013847390073351562, 1.5032101146061905e-05, 0.0007368824444711208, 0.13783538341522217], [0.072405144572258, 0.036094967275857925, 0.060353852808475494, 0.1382489949464798, 0.03810955956578255, 0.1803218573331833, 0.3716851472854614, 0.04992733895778656, 0.002898369450122118, 0.0008571037324145436, 0.00035707451752386987, 0.02692999318242073, 0.003073085332289338, 0.009645520709455013, 0.17640869319438934], [0.30767515301704407, 0.17313888669013977, 0.17682777345180511, 0.3453424274921417, 0.2732711434364319, 0.18888972699642181, 0.2821650207042694, 0.011036374606192112, 0.013345124199986458, 0.030917862430214882, 0.037141598761081696, 0.14430613815784454, 0.09504004567861557, 0.16429893672466278, 0.0962204858660698], [0.038221023976802826, 0.4632723033428192, 0.022520000115036964, 0.005303966347128153, 0.07163825631141663, 0.030774233862757683, 0.006099082063883543, 0.008936556056141853, 0.02098681591451168, 0.004558844491839409, 0.0029896388296037912, 0.018592750653624535, 0.20478543639183044, 0.08578886091709137, 0.1358346790075302]], [[0.04784957319498062, 0.004609245341271162, 0.006819143425673246, 0.0166594497859478, 0.006965316366404295, 0.000989345251582563, 0.006434451788663864, 0.005414100829511881, 0.027048002928495407, 0.008730669505894184, 0.003844247665256262, 0.0032386775128543377, 0.00916406698524952, 0.02474893629550934, 0.20862001180648804], [0.07474544644355774, 0.14463284611701965, 0.06348620355129242, 0.11649901419878006, 0.010943777859210968, 0.05790672451257706, 0.023460205644369125, 0.09132371097803116, 0.013804412446916103, 0.11923354864120483, 0.04609918221831322, 0.0031168698333203793, 0.02482042834162712, 0.018085025250911713, 0.06715727597475052], [0.07159372419118881, 0.23599489033222198, 0.6269188523292542, 0.2670744061470032, 0.07840307801961899, 0.7659233808517456, 0.4897821247577667, 0.7919513583183289, 0.47275444865226746, 0.20698092877864838, 0.5493778586387634, 0.516223669052124, 0.5164197683334351, 0.6560667753219604, 0.10535097867250443], [0.030506769195199013, 0.030577607452869415, 0.37364113330841064, 0.17907775938510895, 0.011576596647500992, 0.0018289608415216208, 0.0013806972419843078, 0.0006740305689163506, 0.006688407156616449, 0.02554805763065815, 0.1984224021434784, 0.0020999175030738115, 0.0001219362675328739, 0.0009508132934570312, 0.00851912796497345], [0.6425503492355347, 0.21330313384532928, 0.8213226199150085, 0.6104346513748169, 0.4307103455066681, 0.005470798350870609, 0.1284545361995697, 0.017213305458426476, 0.14068865776062012, 0.2507726550102234, 0.6069697737693787, 0.17266355454921722, 0.10257546603679657, 0.4255537688732147, 0.07138645648956299], [0.4833258390426636, 0.07765677571296692, 0.6261626482009888, 0.5845412611961365, 0.457427054643631, 0.012895571999251842, 0.037013884633779526, 0.0045295762829482555, 0.030468540266156197, 0.08583686500787735, 0.4300892949104309, 0.6064226627349854, 0.07339996099472046, 0.02218388393521309, 0.11548874527215958], [0.47047996520996094, 0.06838852912187576, 0.42273014783859253, 0.6319702863693237, 0.4177776277065277, 0.0021309976000338793, 0.00800495408475399, 0.0009326375438831747, 0.00536699453368783, 0.07440605759620667, 0.2710660994052887, 0.5013447999954224, 0.021646764129400253, 0.07749785482883453, 0.039263706654310226], [0.5323148965835571, 0.13256511092185974, 0.352451890707016, 0.6556484699249268, 0.4897412359714508, 0.22345507144927979, 0.17913641035556793, 0.12689323723316193, 0.025374194607138634, 0.169284388422966, 0.17072416841983795, 0.08815333992242813, 0.10821512341499329, 0.18704712390899658, 0.05398408696055412], [0.14081209897994995, 0.02785991132259369, 0.37397870421409607, 0.3742114305496216, 0.4757237732410431, 0.0011322007048875093, 0.0019287536852061749, 0.00011125820310553536, 0.00032575102522969246, 0.0042410544119775295, 0.007025705184787512, 0.007957610301673412, 0.0022035131696611643, 0.0008391661685891449, 0.0013405061326920986], [0.17781563103199005, 0.10205524414777756, 0.04494810104370117, 0.011432765983045101, 0.0031803075689822435, 0.6873405575752258, 0.1935015618801117, 0.2538544535636902, 0.0006125010550022125, 0.0012519293231889606, 0.0009674279135651886, 0.0007319907890632749, 0.006560447160154581, 0.0005926102166995406, 0.045413821935653687], [0.24551935493946075, 0.010881111957132816, 0.16116493940353394, 0.28567203879356384, 0.017490731552243233, 0.03198051080107689, 0.25225502252578735, 0.04009091481566429, 0.1379493623971939, 0.030329206958413124, 0.00725751556456089, 0.0005535308737307787, 0.0001769027003319934, 0.0002177381538785994, 0.11288075149059296], [0.2663186192512512, 0.0841110497713089, 0.39283427596092224, 0.3631373345851898, 0.12446267902851105, 0.0023146900348365307, 0.05166012421250343, 0.025394057855010033, 0.09723125398159027, 0.2633029520511627, 0.09458169341087341, 0.0066002910025417805, 0.0024958536960184574, 0.0033851033076643944, 0.0521465502679348], [0.032533496618270874, 0.005542360246181488, 0.14801643788814545, 0.028237437829375267, 0.09192534536123276, 0.002004631096497178, 0.0014868990983814, 0.0018816014053300023, 0.026168106123805046, 0.03666744753718376, 0.2621643543243408, 0.27366670966148376, 0.011460919864475727, 0.012693443335592747, 0.006134080700576305], [0.028670914471149445, 0.004855436272919178, 0.1069486141204834, 0.02764085866510868, 0.11977140605449677, 0.002686614403501153, 0.007388734724372625, 0.00704799173399806, 0.05677136406302452, 0.0688808336853981, 0.16234178841114044, 0.10548661649227142, 0.1935848444700241, 0.06036479026079178, 0.0025575226172804832], [0.04708265885710716, 0.030478408560156822, 0.0932990089058876, 0.24881142377853394, 0.1139858141541481, 0.03301549330353737, 0.12353643029928207, 0.18121947348117828, 0.3742617964744568, 0.11242274194955826, 0.2673158049583435, 0.05749531090259552, 0.00021243211813271046, 0.005648713558912277, 0.14063234627246857], [0.0034641579259186983, 0.015587975271046162, 0.04098831117153168, 0.025328122079372406, 0.012870541773736477, 0.002695741830393672, 0.0012444279855117202, 0.005834754556417465, 0.005115050356835127, 0.10742342472076416, 0.29450723528862, 0.004624508786946535, 0.028462348505854607, 0.09151851385831833, 0.02349407598376274], [0.00187075010035187, 0.017386021092534065, 0.0033179710153490305, 0.00216178921982646, 0.0006196821923367679, 0.0036519868299365044, 0.020315727218985558, 0.0735914558172226, 0.011879049241542816, 0.05418893322348595, 0.04255518689751625, 0.006776698864996433, 0.007105604745447636, 0.005562894977629185, 0.20312508940696716], [0.018124327063560486, 0.011053304187953472, 0.041496749967336655, 0.08067373931407928, 0.008039752952754498, 0.27361106872558594, 0.12004023045301437, 0.14489491283893585, 0.05115145817399025, 0.09850911796092987, 0.102595254778862, 0.03553636744618416, 0.03690872713923454, 0.062350839376449585, 0.18180564045906067], [0.12148405611515045, 0.0812632218003273, 0.2165963500738144, 0.1931358426809311, 0.08697410672903061, 0.006551810074597597, 0.06685828417539597, 0.03445844352245331, 0.0957593098282814, 0.40685340762138367, 0.14669549465179443, 0.05295614153146744, 0.013317806646227837, 0.016840115189552307, 0.07654187083244324], [0.00987213384360075, 0.006524993572384119, 0.026135168969631195, 0.011839349754154682, 0.033334147185087204, 0.0041054473258554935, 0.0015945311170071363, 0.0032734640408307314, 0.04142798110842705, 0.08157128095626831, 0.26105597615242004, 0.34578391909599304, 0.018666768446564674, 0.02866668626666069, 0.00917118415236473], [0.024172252044081688, 0.01827125810086727, 0.0764245018362999, 0.024589890614151955, 0.045055974274873734, 0.08366040140390396, 0.049236495047807693, 0.16330885887145996, 0.05235174670815468, 0.18916647136211395, 0.2596777379512787, 0.12284716963768005, 0.3776375353336334, 0.3416304290294647, 0.00993264652788639], [0.03498423844575882, 0.015507807955145836, 0.05400218814611435, 0.2035217136144638, 0.06879755109548569, 0.01839861460030079, 0.1265679895877838, 0.19229170680046082, 0.28682830929756165, 0.19846217334270477, 0.19391797482967377, 0.03128731623291969, 0.00016305393364746124, 0.003939830232411623, 0.1374405473470688], [0.013754391111433506, 0.07632532715797424, 0.05588589236140251, 0.060033075511455536, 0.015113652683794498, 0.024528013542294502, 0.0056539555080235004, 0.025407979264855385, 0.0030256062746047974, 0.3076882064342499, 0.2846599221229553, 0.01613902486860752, 0.07589408755302429, 0.25697121024131775, 0.08533195406198502], [0.0015476603293791413, 0.017548631876707077, 0.0017550711054354906, 0.0017123925499618053, 0.0004861274501308799, 0.0013240363914519548, 0.007671059109270573, 0.03281305357813835, 0.0013763409806415439, 0.060824256390333176, 0.04298469424247742, 0.011416267603635788, 0.012759965844452381, 0.012971585616469383, 0.16966485977172852], [0.005211545154452324, 0.0055291797034442425, 0.0040288688614964485, 0.011110500432550907, 0.002710954286158085, 0.0645279660820961, 0.01716793328523636, 0.025083528831601143, 0.010282285511493683, 0.009002536535263062, 0.0011292833369225264, 0.0045064822770655155, 0.007478337734937668, 0.004868943244218826, 0.13875910639762878]], [[0.01263146661221981, 0.08983241021633148, 0.002674827352166176, 0.0008326905663125217, 0.0032944290433079004, 0.06790440529584885, 0.02327594719827175, 0.08626140654087067, 0.0010102109517902136, 0.0009567838278599083, 0.001915089669637382, 0.019144434481859207, 0.060631223022937775, 0.04236740246415138, 0.2042645514011383], [0.12322216480970383, 0.14532910287380219, 0.08289580047130585, 0.07800436019897461, 0.016899574548006058, 0.20651613175868988, 0.15389330685138702, 0.08048079907894135, 0.023754820227622986, 0.08939354121685028, 0.05408218502998352, 0.0083498889580369, 0.16772767901420593, 0.03971855714917183, 0.029394451528787613], [0.002537816995754838, 0.0036866364534944296, 0.0026212686207145452, 0.0010326605988666415, 0.0028582154773175716, 0.0016078348271548748, 0.0024177017621695995, 0.004757970105856657, 0.007405414246022701, 0.0004943490494042635, 0.0008183143800124526, 0.0020540759433060884, 0.0008841927628964186, 0.0009274804615415633, 0.13894422352313995], [0.18076959252357483, 0.11159703880548477, 0.07333940267562866, 0.12368053197860718, 0.1442640721797943, 0.3224244713783264, 0.2286587655544281, 0.10576390475034714, 0.0873323604464531, 0.0707816481590271, 0.07077325880527496, 0.024980774149298668, 0.015894055366516113, 0.01236753724515438, 0.034113459289073944], [0.008514223620295525, 0.006442691199481487, 0.003549255197867751, 0.00919315591454506, 0.0011393448803573847, 0.0005870977183803916, 0.02400296926498413, 0.03577389195561409, 0.006469632964581251, 0.004828252829611301, 0.0027150637470185757, 9.597353346180171e-05, 0.00011822552187368274, 0.000396552961319685, 0.1521017998456955], [0.0016907083336263895, 9.336868970422074e-05, 0.0023900996893644333, 0.0018071996746584773, 0.001690928009338677, 0.0010278637055307627, 0.008010926656425, 0.0018918663263320923, 0.0009378245449624956, 0.0005185406771488488, 0.00012474792310968041, 0.00014544214354828, 2.7525844416231848e-05, 2.095987474604044e-05, 0.12926018238067627], [0.08279342949390411, 0.00717265997081995, 0.01113244891166687, 0.030300047248601913, 0.03227340802550316, 0.02679654024541378, 0.2711687386035919, 0.12656770646572113, 0.0010184150887653232, 0.0069296094588935375, 0.006689318455755711, 0.00307065830565989, 0.004024384077638388, 0.006041096989065409, 0.12722525000572205], [0.09468965977430344, 0.010531323030591011, 0.1253902167081833, 0.09483902901411057, 0.060478318482637405, 0.1959676593542099, 0.5850688219070435, 0.11734473705291748, 0.08924026787281036, 0.031869061291217804, 0.04437774419784546, 0.004531644284725189, 0.19630968570709229, 0.04580901935696602, 0.04253998026251793], [0.03443194553256035, 0.006786322686821222, 0.08545193076133728, 0.2555176913738251, 0.16119416058063507, 0.3760574460029602, 0.3180745542049408, 0.0858285129070282, 0.0052651395089924335, 0.035345133394002914, 0.0046972003765404224, 0.00805696938186884, 0.0738091915845871, 0.004572577308863401, 0.028640231117606163], [0.26599034667015076, 0.06405031681060791, 0.39913085103034973, 0.7390084862709045, 0.8533709049224854, 0.0830850899219513, 0.22198519110679626, 0.15359464287757874, 0.0286090150475502, 0.1338224709033966, 0.06985709816217422, 0.03841168060898781, 0.1308237761259079, 0.01580808497965336, 0.010780439712107182], [0.16064751148223877, 0.5348425507545471, 0.09399141371250153, 0.3709404170513153, 0.3757614493370056, 0.2272261530160904, 0.2699662148952484, 0.46868544816970825, 0.09081633388996124, 0.07856583595275879, 0.054298948496580124, 0.10659310221672058, 0.05178465321660042, 0.012835889123380184, 0.19243957102298737], [0.33067551255226135, 0.40668511390686035, 0.03748138248920441, 0.16017457842826843, 0.02931954525411129, 0.1285390406847, 0.43687552213668823, 0.6227295398712158, 0.016583241522312164, 0.054699335247278214, 0.43602558970451355, 0.028376825153827667, 0.1860552728176117, 0.202489972114563, 0.03443598374724388], [0.025147954002022743, 0.023277895525097847, 0.036982107907533646, 0.030706623569130898, 0.00253032217733562, 0.08060919493436813, 0.062497250735759735, 0.22720953822135925, 0.015824737027287483, 0.020865583792328835, 0.051981136202812195, 0.016274577006697655, 0.3496847152709961, 0.19709302484989166, 0.00854758732020855], [0.0009813109645619988, 0.0007951235747896135, 0.007896890863776207, 0.006039812229573727, 0.001424357295036316, 0.003153599100187421, 0.0010362794855609536, 0.006138501223176718, 0.00410880520939827, 0.003359388094395399, 0.008728301152586937, 0.0021525975316762924, 0.2318088710308075, 0.017491629347205162, 0.0005464124260470271], [0.008814784698188305, 0.009578033350408077, 0.008741176687180996, 0.002597709419205785, 0.0019302073633298278, 0.02750723622739315, 0.010486552491784096, 0.061721935868263245, 0.05738110467791557, 0.0038812088314443827, 0.08735688030719757, 0.00500333309173584, 3.085857315454632e-05, 0.005531619768589735, 0.14116442203521729], [0.015857994556427002, 0.010374038480222225, 0.002225207630544901, 0.002974742790684104, 0.0010843537747859955, 0.007387869525700808, 0.006818806286901236, 0.0318806953728199, 0.1651621013879776, 0.21757511794567108, 0.2911650240421295, 0.08204617351293564, 0.016449127346277237, 0.10985822230577469, 0.0020742996130138636], [0.01972219906747341, 0.20374125242233276, 0.0031293979845941067, 0.004390338435769081, 0.031924858689308167, 0.06048818305134773, 0.0774247944355011, 0.7845978140830994, 0.15838612616062164, 0.06142642721533775, 0.0820784792304039, 0.20785683393478394, 0.46646884083747864, 0.42270010709762573, 0.053927596658468246], [0.026567673310637474, 0.2768426239490509, 0.016553064808249474, 0.07253812253475189, 0.029352964833378792, 0.034967049956321716, 0.09283487498760223, 0.5970632433891296, 0.02342795394361019, 0.04057195410132408, 0.06215028092265129, 0.2966896891593933, 0.4489157795906067, 0.24187524616718292, 0.048112284392118454], [0.14453455805778503, 0.4129781723022461, 0.021322425454854965, 0.11776001751422882, 0.008680691011250019, 0.12525556981563568, 0.1459336131811142, 0.4943058490753174, 0.041365865617990494, 0.06633096933364868, 0.48416346311569214, 0.027247071266174316, 0.10342812538146973, 0.15874288976192474, 0.04535134881734848], [0.03164434805512428, 0.10487183183431625, 0.019769076257944107, 0.0709872916340828, 0.0046073514968156815, 0.12636253237724304, 0.06114564463496208, 0.5786424875259399, 0.17960773408412933, 0.15923625230789185, 0.14680741727352142, 0.04373620077967644, 0.20528176426887512, 0.14476445317268372, 0.03252548724412918], [0.03216148540377617, 0.04786192253232002, 0.0904572606086731, 0.284318745136261, 0.04915444552898407, 0.20336958765983582, 0.019341057166457176, 0.31598398089408875, 0.503376841545105, 0.2976534068584442, 0.3550446927547455, 0.318871408700943, 0.31741514801979065, 0.09137054532766342, 0.022498751059174538], [0.00784912146627903, 0.004314524121582508, 0.007757026236504316, 0.004281783476471901, 0.001910648075863719, 0.00898022297769785, 0.007197065278887749, 0.05121663585305214, 0.12398385256528854, 0.006457128562033176, 0.09335841238498688, 0.0023844544775784016, 1.3785818737233058e-05, 0.0021891386713832617, 0.13778245449066162], [0.0865921899676323, 0.029389984905719757, 0.007211814168840647, 0.022628001868724823, 0.003064699238166213, 0.026838112622499466, 0.02777392417192459, 0.17195671796798706, 0.5349084734916687, 0.37311822175979614, 0.5073185563087463, 0.12468769401311874, 0.014684900641441345, 0.11363118886947632, 0.01852630451321602], [0.021940317004919052, 0.17988227307796478, 0.0027716639451682568, 0.0058884406462311745, 0.02112143486738205, 0.056551095098257065, 0.09669405966997147, 0.8433947563171387, 0.1836535632610321, 0.048101164400577545, 0.0939687192440033, 0.12228170782327652, 0.5153423547744751, 0.4533718526363373, 0.10564926266670227], [0.07970402389764786, 0.263812392950058, 0.027112353593111038, 0.06228066235780716, 0.03007029928267002, 0.5465735197067261, 0.2176109254360199, 0.5667538046836853, 0.10334119945764542, 0.3484029769897461, 0.1586397886276245, 0.28290486335754395, 0.07807470858097076, 0.405972421169281, 0.12247955799102783]]], [[[0.02659090794622898, 0.049626123160123825, 0.04500019550323486, 0.012677792459726334, 0.33557751774787903, 0.02776678465306759, 0.02675992250442505, 0.09967876970767975, 0.04216820374131203, 0.009756066836416721, 0.0133897690102458, 0.12886802852153778, 0.03152704983949661, 0.046163998544216156, 0.21004843711853027], [0.05978302285075188, 0.18161648511886597, 0.038620203733444214, 0.022025080397725105, 0.09790226072072983, 0.04398013651371002, 0.00788698997348547, 0.04135579988360405, 0.0068543110974133015, 0.03809167072176933, 0.03150040656328201, 0.0462106354534626, 0.024762138724327087, 0.011792140081524849, 0.015839271247386932], [0.005166883580386639, 0.0005590450600720942, 0.007114546839147806, 0.0015656572068110108, 0.02179996483027935, 0.0010864944197237492, 0.0051814797334373, 0.0011148365447297692, 0.00816393457353115, 0.0019027285743504763, 0.005033016670495272, 0.010743028484284878, 0.0006906923954375088, 0.0011143455049023032, 0.16189540922641754], [0.17136499285697937, 0.002046054694801569, 0.4725193679332733, 0.24347566068172455, 0.1026763990521431, 0.00369152519851923, 0.013768541626632214, 0.003912978805601597, 0.022358577698469162, 0.06323882192373276, 0.28539538383483887, 0.009778834879398346, 0.0043070269748568535, 0.020384330302476883, 0.006856778170913458], [0.18433871865272522, 0.013500750064849854, 0.42166435718536377, 0.1935500204563141, 0.3502363860607147, 0.0009389789775013924, 0.0472395233809948, 0.015336934477090836, 0.07204270362854004, 0.07276465743780136, 0.4023721218109131, 0.016390468925237656, 0.00493515282869339, 0.01088448241353035, 0.18081046640872955], [0.01929071731865406, 3.154709338559769e-05, 0.04895680397748947, 0.04499320685863495, 0.03726757690310478, 0.0012487026397138834, 0.06078735366463661, 0.0025376947596669197, 0.023622047156095505, 0.008605116978287697, 0.05601886287331581, 0.011475598439574242, 0.0013240767875686288, 0.009706309996545315, 0.13962702453136444], [0.032548993825912476, 0.0047013829462230206, 0.08043498545885086, 0.08197268843650818, 0.43236956000328064, 0.013080407865345478, 0.006017346400767565, 0.05529334023594856, 0.01970849372446537, 0.004050384275615215, 0.0073967562057077885, 0.005829385481774807, 0.0008975209202617407, 0.0025361862499266863, 0.011671289801597595], [0.046304989606142044, 0.026358718052506447, 0.20277923345565796, 0.3021180331707001, 0.6281617879867554, 0.19840610027313232, 0.12000668793916702, 0.21165543794631958, 0.0507807619869709, 0.10083203762769699, 0.17539183795452118, 0.08392243832349777, 0.036049142479896545, 0.06088141351938248, 0.024198466911911964], [0.016816509887576103, 0.003118144813925028, 0.035858120769262314, 0.02315649762749672, 0.2957051992416382, 0.0033856350928545, 0.008419573307037354, 0.013085800223052502, 0.0065522813238203526, 0.004261805210262537, 0.0022621729876846075, 0.0015856586396694183, 0.00012999074533581734, 0.00036330719012767076, 0.004947974346578121], [0.13966688513755798, 0.051315873861312866, 0.16794879734516144, 0.17204447090625763, 0.02530861273407936, 0.1971883773803711, 0.6035643219947815, 0.35590535402297974, 0.01904589682817459, 0.14328262209892273, 0.05827813595533371, 0.12283631414175034, 0.08582676202058792, 0.021607764065265656, 0.09174748510122299], [0.07622234523296356, 0.021088531240820885, 0.13214311003684998, 0.1876712292432785, 0.09946685284376144, 0.0739995539188385, 0.16667790710926056, 0.06527374684810638, 0.2691768705844879, 0.1298666000366211, 0.20347969233989716, 0.28972044587135315, 0.16063560545444489, 0.23408198356628418, 0.02879655919969082], [0.04186922311782837, 0.028065834194421768, 0.2365874946117401, 0.22718128561973572, 0.717268168926239, 0.0283160749822855, 0.047574929893016815, 0.22635598480701447, 0.046485841274261475, 0.11764083057641983, 0.11684223264455795, 0.600357711315155, 0.07936308532953262, 0.1614740490913391, 0.02326863817870617], [0.002160860225558281, 0.00041385856457054615, 0.0032894921023398638, 0.004175879992544651, 0.09230346977710724, 0.00037096597952768207, 0.00036027038004249334, 0.000777967507019639, 0.0010948613053187728, 0.006351495627313852, 0.00803811103105545, 0.2546491026878357, 0.005140772555023432, 0.0052158161997795105, 0.0018242541700601578], [0.01453752163797617, 0.0016249779146164656, 0.07837095856666565, 0.046283330768346786, 0.5220571756362915, 0.00571427633985877, 0.011274048127233982, 0.0005770810530520976, 0.06172677502036095, 0.028573052957654, 0.1375623345375061, 0.2926015257835388, 0.17741695046424866, 0.13592077791690826, 0.025488857179880142], [0.0018199050100520253, 1.759366932674311e-05, 0.005607981700450182, 0.029583722352981567, 0.009902501478791237, 0.00240499060600996, 0.016255119815468788, 0.008434450253844261, 0.0070381201803684235, 0.006882159970700741, 0.008103356696665287, 0.009371891617774963, 3.180988642270677e-05, 0.0005422193789854646, 0.14323127269744873], [0.04913536086678505, 0.005111359525471926, 0.3943053185939789, 0.16504207253456116, 0.1333204060792923, 0.007373967207968235, 0.00649205781519413, 0.005781218875199556, 0.0696163922548294, 0.17078818380832672, 0.43588367104530334, 0.2441176176071167, 0.044073574244976044, 0.13962700963020325, 0.0038013174198567867], [0.02972331829369068, 0.032405998557806015, 0.13676248490810394, 0.2985995411872864, 0.6838041543960571, 0.17950911819934845, 0.02566559985280037, 0.299430251121521, 0.06906868517398834, 0.09219349920749664, 0.14271143078804016, 0.15384355187416077, 0.31184810400009155, 0.37699857354164124, 0.11869719624519348], [0.035901740193367004, 0.049252428114414215, 0.13651704788208008, 0.3431343734264374, 0.4621880352497101, 0.07741573452949524, 0.035817742347717285, 0.1879495084285736, 0.09167803823947906, 0.15167558193206787, 0.20264029502868652, 0.22310277819633484, 0.27972275018692017, 0.27912822365760803, 0.1079779863357544], [0.03869367763400078, 0.07609386742115021, 0.09811960905790329, 0.19582945108413696, 0.7770717144012451, 0.05828123167157173, 0.03398818522691727, 0.4334997236728668, 0.06648975610733032, 0.07675088942050934, 0.06197739765048027, 0.7435874938964844, 0.14106591045856476, 0.2445826381444931, 0.04634908586740494], [0.0033209763932973146, 0.0013802923494949937, 0.007923663593828678, 0.01537866611033678, 0.27329060435295105, 0.0012711664894595742, 0.000925537955481559, 0.0031033798586577177, 0.00518713379278779, 0.008014743216335773, 0.01865261048078537, 0.32840412855148315, 0.015081376768648624, 0.0187647957354784, 0.007287481799721718], [0.012120293453335762, 0.00801909901201725, 0.05887366458773613, 0.08173726499080658, 0.42918333411216736, 0.0074272770434618, 0.018144551664590836, 0.002390465000644326, 0.19959968328475952, 0.01595914363861084, 0.19477497041225433, 0.24081164598464966, 0.32190656661987305, 0.2620943486690521, 0.06223426014184952], [0.001324097509495914, 1.9873512428603135e-05, 0.0026336663868278265, 0.025088831782341003, 0.006480309646576643, 0.0015246026450768113, 0.009156930260360241, 0.006450172513723373, 0.006447002291679382, 0.003797400277107954, 0.0037222199607640505, 0.006030225194990635, 1.9453302229521796e-05, 0.0003723614208865911, 0.13770580291748047], [0.23361828923225403, 0.06709202378988266, 0.7719610333442688, 0.734594464302063, 0.7922726273536682, 0.049216482788324356, 0.04663456231355667, 0.060855433344841, 0.40224209427833557, 0.20935069024562836, 0.5060975551605225, 0.5454070568084717, 0.2919921875, 0.420108824968338, 0.08753460645675659], [0.01675574854016304, 0.0394110269844532, 0.07827049493789673, 0.20941881835460663, 0.5690934658050537, 0.13831959664821625, 0.015872817486524582, 0.2790753245353699, 0.07380014657974243, 0.05484941974282265, 0.11329877376556396, 0.046586740761995316, 0.27540746331214905, 0.3769146502017975, 0.12728242576122284], [0.13399043679237366, 0.38312259316444397, 0.21414920687675476, 0.1335369348526001, 0.883351743221283, 0.17629003524780273, 0.21391625702381134, 0.35840436816215515, 0.7405950427055359, 0.11166028678417206, 0.2222289741039276, 0.2562817633152008, 0.20710349082946777, 0.2988908290863037, 0.10401280969381332]], [[0.169734388589859, 0.018695855513215065, 0.1739528477191925, 0.1591939628124237, 0.2628772258758545, 0.10412096232175827, 0.10786166787147522, 0.024563027545809746, 0.26776236295700073, 0.15710414946079254, 0.04751116409897804, 0.10171505063772202, 0.02745870314538479, 0.022933470085263252, 0.11237789690494537], [0.04881957918405533, 0.17062845826148987, 0.0187830850481987, 0.030382977798581123, 0.08311481773853302, 0.03788991644978523, 0.005156277678906918, 0.026916639879345894, 0.06639944016933441, 0.03180782124400139, 0.02173716016113758, 0.05343012511730194, 0.01850084401667118, 0.0033381145913153887, 0.04681381955742836], [0.11046597361564636, 0.13029024004936218, 0.30802851915359497, 0.31618139147758484, 0.21513698995113373, 0.08858107775449753, 0.07770872116088867, 0.030179373919963837, 0.2956576347351074, 0.19506438076496124, 0.06668522953987122, 0.15814362466335297, 0.07954283803701401, 0.09008871018886566, 0.11347464472055435], [0.14630576968193054, 0.10272074490785599, 0.06626180559396744, 0.39613619446754456, 0.5213132500648499, 0.09462913125753403, 0.19745559990406036, 0.14176879823207855, 0.45916420221328735, 0.2814978361129761, 0.19076579809188843, 0.7478294968605042, 0.15201923251152039, 0.4428024888038635, 0.11204658448696136], [0.17077980935573578, 0.372023344039917, 0.03066021017730236, 0.20403380692005157, 0.25160810351371765, 0.047236956655979156, 0.19034826755523682, 0.09997845441102982, 0.22249065339565277, 0.14956896007061005, 0.12211201339960098, 0.43811750411987305, 0.32559871673583984, 0.4463178217411041, 0.1688702404499054], [0.001587467617355287, 0.0028523027431219816, 0.001275891438126564, 0.007771230302751064, 0.06833823025226593, 0.016362184658646584, 0.01554875634610653, 0.0395360104739666, 0.020186755806207657, 0.02848842740058899, 0.006796931382268667, 0.08043718338012695, 0.1258731484413147, 0.048048797994852066, 0.14538481831550598], [0.19441094994544983, 0.026329312473535538, 0.03907056525349617, 0.5187185406684875, 0.06508557498455048, 0.04464683309197426, 0.23734036087989807, 0.10510969161987305, 0.23671847581863403, 0.2550508677959442, 0.2969563603401184, 0.31371036171913147, 0.023362383246421814, 0.04756302013993263, 0.09379850327968597], [0.009693926200270653, 0.06855454295873642, 0.04046608507633209, 0.021632034331560135, 0.07003092765808105, 0.1099655032157898, 0.02166297659277916, 0.14673617482185364, 0.08559776097536087, 0.021444879472255707, 0.06376301497220993, 0.07838241755962372, 0.2981177270412445, 0.05645254626870155, 0.11510419100522995], [0.1475960612297058, 0.11415769904851913, 0.09677327424287796, 0.22716772556304932, 0.05128113925457001, 0.0685737207531929, 0.17258046567440033, 0.05221087113022804, 0.2985250651836395, 0.36185649037361145, 0.6199293732643127, 0.5016448497772217, 0.08136574923992157, 0.06544326990842819, 0.09482244402170181], [0.16866622865200043, 0.03890697658061981, 0.038960762321949005, 0.045146964490413666, 0.003443084890022874, 0.025941072031855583, 0.02535194903612137, 0.01214737631380558, 0.39030662178993225, 0.11890958994626999, 0.2736153304576874, 0.3244759440422058, 0.00968784186989069, 0.014615286141633987, 0.03826850652694702], [0.08395736664533615, 0.10560688376426697, 0.29490047693252563, 0.15838190913200378, 0.20854075253009796, 0.047574300318956375, 0.025914132595062256, 0.0076736449263989925, 0.23083198070526123, 0.11239635199308395, 0.08150741457939148, 0.3915822207927704, 0.126749187707901, 0.08327525854110718, 0.07453686743974686], [0.08537011593580246, 0.01334940642118454, 0.026223814114928246, 0.09485415369272232, 0.04081009700894356, 0.021519087255001068, 0.04835912212729454, 0.008561250753700733, 0.1425430029630661, 0.15310505032539368, 0.12245412170886993, 0.15674236416816711, 0.03265313804149628, 0.020860055461525917, 0.1338454782962799], [0.009048069827258587, 0.008220783434808254, 0.0010462020291015506, 0.0073586152866482735, 0.01628630980849266, 0.0030796914361417294, 0.0014804736711084843, 0.0016866090008988976, 0.021953675895929337, 0.024090107530355453, 0.02321471832692623, 0.2417944222688675, 0.00791110284626484, 0.012413977645337582, 0.02231968566775322], [0.02412300556898117, 0.02128133550286293, 0.018482450395822525, 0.016898121684789658, 0.07439899444580078, 0.03563898429274559, 0.04473365843296051, 0.0026737016160041094, 0.06965204328298569, 0.10727399587631226, 0.046027760952711105, 0.33166152238845825, 0.12371443957090378, 0.07036767154932022, 0.15801618993282318], [0.007644897326827049, 0.000292555516352877, 0.08444877713918686, 0.17402730882167816, 0.16615508496761322, 0.013423392549157143, 0.054235123097896576, 0.007257240824401379, 0.08712441474199295, 0.012547464109957218, 0.0328214131295681, 0.2736492455005646, 0.0037261026445776224, 0.09982366114854813, 0.13941559195518494], [0.07466596364974976, 0.11066461354494095, 0.02582395263016224, 0.1052846685051918, 0.0988694354891777, 0.13372771441936493, 0.10285167396068573, 0.04043884575366974, 0.12614820897579193, 0.00874736811965704, 0.006169801577925682, 0.3642371892929077, 0.13258321583271027, 0.14621633291244507, 0.16873647272586823], [0.23522600531578064, 0.0398484542965889, 0.3737937808036804, 0.288825660943985, 0.10485613346099854, 0.11366727948188782, 0.29695606231689453, 0.06251946091651917, 0.35146233439445496, 0.04921486973762512, 0.25325968861579895, 0.33112239837646484, 0.06967249512672424, 0.050063006579875946, 0.0896972194314003], [0.1151093989610672, 0.085483118891716, 0.1238018348813057, 0.10984596610069275, 0.07372570037841797, 0.07080911099910736, 0.04283013194799423, 0.011434272862970829, 0.6184931993484497, 0.031299810856580734, 0.1232943907380104, 0.4399086534976959, 0.16973690688610077, 0.18915507197380066, 0.06319096684455872], [0.23179487884044647, 0.03441762179136276, 0.058240070939064026, 0.17834095656871796, 0.049968671053647995, 0.038375332951545715, 0.05405527353286743, 0.00672679441049695, 0.09475977718830109, 0.0764862671494484, 0.1440851390361786, 0.11337311565876007, 0.06998162716627121, 0.031302694231271744, 0.13650138676166534], [0.037197839468717575, 0.022889001294970512, 0.00443503400310874, 0.02830665186047554, 0.056754183024168015, 0.011282439343631268, 0.008815057575702667, 0.005641489755362272, 0.03366301208734512, 0.01200089417397976, 0.022881681099534035, 0.24835483729839325, 0.020306341350078583, 0.028865927830338478, 0.09140723943710327], [0.019821494817733765, 0.0461096465587616, 0.009799499064683914, 0.008886821568012238, 0.03164605051279068, 0.03408728539943695, 0.06531291455030441, 0.004583337344229221, 0.015776870772242546, 0.0067581660114228725, 0.005247185938060284, 0.0803409293293953, 0.12878651916980743, 0.033680036664009094, 0.15540239214897156], [0.006374652031809092, 0.0003620072384364903, 0.05079201981425285, 0.10443739593029022, 0.13200052082538605, 0.007841442711651325, 0.04038690775632858, 0.005943085998296738, 0.04502689838409424, 0.005707652773708105, 0.010736361145973206, 0.17095635831356049, 0.0034604808315634727, 0.08947119116783142, 0.1356668770313263], [0.05784226581454277, 0.06101800128817558, 0.011293647810816765, 0.030310506001114845, 0.02692366950213909, 0.10355494171380997, 0.1643158346414566, 0.02146345190703869, 0.10686127096414566, 0.0006235101609490812, 0.001034505432471633, 0.12770172953605652, 0.08152752369642258, 0.06569667905569077, 0.13584844768047333], [0.24130187928676605, 0.04057329148054123, 0.37395209074020386, 0.32695549726486206, 0.18701796233654022, 0.1542418897151947, 0.4307348132133484, 0.07850468903779984, 0.24226921796798706, 0.027551302686333656, 0.17328326404094696, 0.256756991147995, 0.1007629856467247, 0.0746576264500618, 0.1026487648487091], [0.18065117299556732, 0.0850963443517685, 0.37481072545051575, 0.36960142850875854, 0.042269542813301086, 0.04689870774745941, 0.10553675144910812, 0.031215613707900047, 0.03850337490439415, 0.055640675127506256, 0.11964564025402069, 0.20274300873279572, 0.22541530430316925, 0.07314471900463104, 0.12492100149393082]], [[0.2626786530017853, 0.0849713385105133, 0.11954734474420547, 0.09299539029598236, 0.12019845843315125, 0.1675114780664444, 0.12060416489839554, 0.1292921006679535, 0.33819568157196045, 0.3146125078201294, 0.20831438899040222, 0.39596518874168396, 0.2145393043756485, 0.2666572332382202, 0.05294949933886528], [0.1368129849433899, 0.16135744750499725, 0.15528292953968048, 0.24771884083747864, 0.1416730433702469, 0.05803852900862694, 0.07394444942474365, 0.10563277453184128, 0.033661823719739914, 0.18054474890232086, 0.1985052525997162, 0.05316935107111931, 0.05009648948907852, 0.043446026742458344, 0.03412564843893051], [0.0030849967151880264, 0.0006440586876124144, 0.016017315909266472, 0.0037563794758170843, 0.009170617908239365, 0.0008218333241529763, 0.0032779525499790907, 0.0006974118296056986, 0.12044321000576019, 0.005983977112919092, 0.011704917997121811, 0.023849062621593475, 0.0031650178134441376, 0.01169323269277811, 0.16145823895931244], [0.02798222377896309, 0.012448069639503956, 0.018199993297457695, 0.0069459048099815845, 0.042531996965408325, 0.009718443267047405, 0.013791781850159168, 0.04370715469121933, 0.21814176440238953, 0.024645699188113213, 0.0633857473731041, 0.0802498310804367, 0.006771658081561327, 0.040147896856069565, 0.4109969139099121], [0.02001010812819004, 0.02580004744231701, 0.006869276985526085, 0.007543967105448246, 0.017537932842969894, 0.00023914838675409555, 0.006739956792443991, 0.008227680809795856, 0.05446772649884224, 0.03320171311497688, 0.022232946008443832, 0.01063306163996458, 0.0007752752280794084, 0.0028256638906896114, 0.2078467756509781], [0.0034786108881235123, 0.00011826713307527825, 0.002407492371276021, 0.005452741403132677, 0.002847136929631233, 0.003419033018872142, 0.013516861945390701, 0.002940082224085927, 0.002004653448238969, 0.006652397103607655, 0.004079414997249842, 0.0028307989705353975, 0.0006369714974425733, 0.002542868722230196, 0.1463778167963028], [0.0762338638305664, 0.11778479814529419, 0.03105221875011921, 0.006415408570319414, 0.0190818402916193, 0.027191398665308952, 0.005222225561738014, 0.0170834269374609, 0.05309534817934036, 0.00936796236783266, 0.03816217556595802, 0.17940494418144226, 0.020440110936760902, 0.13513173162937164, 0.3000544309616089], [0.16228125989437103, 0.35454851388931274, 0.04026315361261368, 0.03822629526257515, 0.023396998643875122, 0.30800631642341614, 0.24136781692504883, 0.15176478028297424, 0.0788438618183136, 0.07347536832094193, 0.030298085883259773, 0.007365733850747347, 0.1061745211482048, 0.2841038405895233, 0.07787416130304337], [0.05645078793168068, 0.023840615525841713, 0.013567867688834667, 0.00750470208004117, 0.07643276453018188, 0.08809614926576614, 0.06102507561445236, 0.021034346893429756, 0.039108242839574814, 0.02081543207168579, 0.011458326131105423, 0.20520520210266113, 0.027348484843969345, 0.06299317628145218, 0.2514360249042511], [0.016126127913594246, 0.01087501272559166, 0.01213990617543459, 0.004450921434909105, 0.014690833166241646, 0.30525338649749756, 0.02716207131743431, 0.09981174021959305, 0.027048761025071144, 0.01336466334760189, 0.006663064938038588, 0.0520603246986866, 0.042623523622751236, 0.018071996048092842, 0.1948687732219696], [0.04185086488723755, 0.034399643540382385, 0.041276611387729645, 0.0584070086479187, 0.019824109971523285, 0.00856409315019846, 0.08867836743593216, 0.10337970405817032, 0.09468665719032288, 0.02033121883869171, 0.018058426678180695, 0.059728462249040604, 0.09321711957454681, 0.20168805122375488, 0.1941128522157669], [0.01436887588351965, 0.027922889217734337, 0.046481672674417496, 0.010071231983602047, 0.026127830147743225, 0.06003356724977493, 0.022118212655186653, 0.08160483092069626, 0.07784195244312286, 0.010694753378629684, 0.017130734398961067, 0.05340806022286415, 0.041410259902477264, 0.035884104669094086, 0.2491855025291443], [0.053393200039863586, 0.04828185588121414, 0.03453819081187248, 0.013636122457683086, 0.25098806619644165, 0.12313847243785858, 0.02266266942024231, 0.017618268728256226, 0.019785437732934952, 0.005274764262139797, 0.021053072065114975, 0.20679616928100586, 0.021523641422390938, 0.03855947405099869, 0.1109846979379654], [0.12851715087890625, 0.12400124222040176, 0.2637093663215637, 0.02439347468316555, 0.07038086652755737, 0.12665364146232605, 0.04898465424776077, 0.03412041813135147, 0.0263816025108099, 0.023226425051689148, 0.11513664573431015, 0.09503531455993652, 0.1215861439704895, 0.11158601939678192, 0.14799171686172485], [0.0010214513167738914, 0.004835289902985096, 0.0042709591798484325, 0.0026378841139376163, 0.005866974592208862, 0.008331544697284698, 0.006240549497306347, 0.01365274004638195, 0.1720106601715088, 0.0005307683604769409, 0.0007543729152530432, 0.004353509750217199, 0.0002490385086275637, 0.0017186965560540557, 0.14317919313907623], [0.07205050438642502, 0.12816517055034637, 0.23753608763217926, 0.08243206143379211, 0.5041552186012268, 0.11970840394496918, 0.04837331175804138, 0.034129947423934937, 0.16484025120735168, 0.011070297099649906, 0.05054215341806412, 0.039082955569028854, 0.09205758571624756, 0.1322212517261505, 0.16203875839710236], [0.014979850500822067, 0.03769220784306526, 0.04367470741271973, 0.009415187872946262, 0.019922776147723198, 0.11522040516138077, 0.014906312339007854, 0.04722318425774574, 0.06570684164762497, 0.008925273083150387, 0.019600573927164078, 0.0472339391708374, 0.005348374601453543, 0.0017698986921459436, 0.1612817794084549], [0.023198002949357033, 0.06148262694478035, 0.046858664602041245, 0.013079512864351273, 0.08762317895889282, 0.00949429627507925, 0.0484880767762661, 0.025388503447175026, 0.04432932287454605, 0.006038118619471788, 0.010164186358451843, 0.08949221670627594, 0.06122652441263199, 0.11895263940095901, 0.16355113685131073], [0.009917332790791988, 0.01408212911337614, 0.047434139996767044, 0.005388779100030661, 0.023170381784439087, 0.034844160079956055, 0.009820640087127686, 0.03569778800010681, 0.05789060518145561, 0.0037882563192397356, 0.013808010146021843, 0.04879388585686684, 0.03114072047173977, 0.0507131889462471, 0.18661679327487946], [0.0652787834405899, 0.04612350836396217, 0.04522763565182686, 0.014745297841727734, 0.27657532691955566, 0.16156227886676788, 0.025164838880300522, 0.017732013016939163, 0.023105354979634285, 0.005499221384525299, 0.020183373242616653, 0.19132839143276215, 0.020515967160463333, 0.056384406983852386, 0.14304831624031067], [0.14539514482021332, 0.21388974785804749, 0.34906452894210815, 0.031415559351444244, 0.062017399817705154, 0.08485611528158188, 0.03913363441824913, 0.03569692373275757, 0.023448940366506577, 0.020669998601078987, 0.1622902750968933, 0.1315622329711914, 0.09182734042406082, 0.1796703040599823, 0.13702963292598724], [0.0009059146977961063, 0.004442692268639803, 0.002850044285878539, 0.0024173678830266, 0.006019651889801025, 0.004450949374586344, 0.003768310882151127, 0.009272964671254158, 0.19643637537956238, 0.0004391498805489391, 0.0004852984275203198, 0.005083973053842783, 0.000164541692356579, 0.001456208759918809, 0.13767127692699432], [0.03601038455963135, 0.08602340519428253, 0.042799800634384155, 0.007577326148748398, 0.12637566030025482, 0.07399067282676697, 0.02205651067197323, 0.01475659292191267, 0.14170114696025848, 0.004405674524605274, 0.013175459578633308, 0.03142356127500534, 0.06839168816804886, 0.09161193668842316, 0.1376270353794098], [0.014056011103093624, 0.020953036844730377, 0.03237491473555565, 0.0042424313724040985, 0.017438247799873352, 0.08849667757749557, 0.005714876111596823, 0.025588830932974815, 0.08735965192317963, 0.009712125174701214, 0.02371004782617092, 0.06271149963140488, 0.00425978796556592, 0.0027238703332841396, 0.14272134006023407], [0.15719948709011078, 0.03286461904644966, 0.12916648387908936, 0.10299614071846008, 0.014032969251275063, 0.011700707487761974, 0.06680437922477722, 0.016068298369646072, 0.04505150765180588, 0.056866806000471115, 0.07287567108869553, 0.09101171046495438, 0.06734755635261536, 0.17371943593025208, 0.1297563910484314]], [[0.010018138214945793, 0.02516627125442028, 0.027397310361266136, 0.005101055838167667, 0.025938771665096283, 0.13529063761234283, 0.02690303698182106, 0.11719205975532532, 0.027814749628305435, 0.019565219059586525, 0.07996311038732529, 0.0991574078798294, 0.16288702189922333, 0.1113416850566864, 0.22370746731758118], [0.05219842493534088, 0.1440066546201706, 0.27922260761260986, 0.2058621197938919, 0.11230742931365967, 0.6016822457313538, 0.20846855640411377, 0.04777589067816734, 0.20611444115638733, 0.15481434762477875, 0.11950203776359558, 0.02679699845612049, 0.0639302060008049, 0.047183193266391754, 0.04897741973400116], [0.01555164996534586, 0.0014379153726622462, 0.01706753298640251, 0.003720618085935712, 0.10093016922473907, 0.027928827330470085, 0.015380543656647205, 0.0025812943931668997, 0.020822137594223022, 0.014309070073068142, 0.017923271283507347, 0.0120958611369133, 0.014481468126177788, 0.009491728618741035, 0.15904544293880463], [0.11612647771835327, 0.0010205605067312717, 0.020188286900520325, 0.027076182886958122, 0.09822120517492294, 0.3221674859523773, 0.1250218003988266, 0.002691123867407441, 0.005359187722206116, 0.04976291581988335, 0.023232540115714073, 0.04237976670265198, 0.028708819299936295, 0.049411751329898834, 0.005618311930447817], [0.0470837838947773, 0.007497857324779034, 0.004583081230521202, 0.022991856560111046, 0.0278051495552063, 0.00051211251411587, 0.0627230703830719, 0.011764267459511757, 0.010903585702180862, 0.07272983342409134, 0.011678352952003479, 0.09392477571964264, 0.01558940764516592, 0.03351595252752304, 0.2068868726491928], [0.0024584962520748377, 8.163625898305327e-05, 0.00016154914919752628, 0.0002508168399799615, 0.0019916424062103033, 0.0004536219348665327, 0.0036078437697142363, 0.0008641426684334874, 0.00021941671730019152, 0.0014423344982787967, 0.0004360634775366634, 0.004383172374218702, 0.0009428760386072099, 0.0009436326217837632, 0.14683274924755096], [0.02989446185529232, 0.007703323382884264, 0.12996061146259308, 0.025068828836083412, 0.2812304198741913, 0.0071953474543988705, 0.0021352169569581747, 0.0025125211104750633, 0.0014658492291346192, 0.007028855849057436, 0.0448734275996685, 0.09462164342403412, 0.0503704659640789, 0.11768583953380585, 0.12974096834659576], [0.16756094992160797, 0.028098214417696, 0.20756086707115173, 0.2207580953836441, 0.10928753018379211, 0.13773545622825623, 0.2233184576034546, 0.1774815022945404, 0.13830231130123138, 0.20932619273662567, 0.18267595767974854, 0.05961548537015915, 0.07697918266057968, 0.18739080429077148, 0.06796090304851532], [0.017068415880203247, 0.00098085415083915, 0.010854640044271946, 0.006490680854767561, 0.29060667753219604, 0.006710599176585674, 0.0118483304977417, 0.0008181483135558665, 0.00011296885350020602, 0.0034601599909365177, 0.005098147317767143, 0.010750477202236652, 0.010399019345641136, 0.009376241825520992, 0.017405353486537933], [0.1331326961517334, 0.019769106060266495, 0.01612294837832451, 0.028521019965410233, 0.007509702816605568, 0.2665199935436249, 0.19958320260047913, 0.1385747790336609, 0.0059373765252530575, 0.08046255260705948, 0.052418529987335205, 0.004961848258972168, 0.10941796749830246, 0.06705309450626373, 0.17611992359161377], [0.019668979570269585, 0.0081618782132864, 0.12552350759506226, 0.0802406370639801, 0.07089362293481827, 0.18871739506721497, 0.12778939306735992, 0.04829992726445198, 0.04307088255882263, 0.02314154990017414, 0.14194107055664062, 0.05861861631274223, 0.19650596380233765, 0.11930099874734879, 0.18420156836509705], [0.00538466265425086, 0.0270208939909935, 0.18066750466823578, 0.06076826527714729, 0.035171061754226685, 0.411039799451828, 0.09634009003639221, 0.26394954323768616, 0.1915867179632187, 0.03318370133638382, 0.3213040828704834, 0.10995125770568848, 0.5320225954055786, 0.4394112527370453, 0.15243512392044067], [0.0030147582292556763, 0.00625306461006403, 0.017102748155593872, 0.008551767095923424, 0.0727200135588646, 0.015153692103922367, 0.0023096217773854733, 0.011201570741832256, 0.002435098635032773, 0.006847116630524397, 0.016829995438456535, 0.12519565224647522, 0.3878204822540283, 0.13249750435352325, 0.028183329850435257], [0.066617950797081, 0.006649812217801809, 0.04142908379435539, 0.13957993686199188, 0.025706114247441292, 0.08231058716773987, 0.08377126604318619, 0.02330365777015686, 0.04652002453804016, 0.11060080677270889, 0.09014575183391571, 0.07117310166358948, 0.15938407182693481, 0.1624550223350525, 0.05356656014919281], [0.004379222169518471, 0.0002637936850078404, 0.0022587613202631474, 0.006711117923259735, 0.0006837267428636551, 0.007989797741174698, 0.02997850626707077, 0.045127563178539276, 0.008224103599786758, 0.0034686585422605276, 0.0038658890407532454, 0.00034815416438505054, 7.646608719369397e-05, 0.00017854337056633085, 0.14325816929340363], [0.25216665863990784, 0.1422366499900818, 0.10172943770885468, 0.3735504150390625, 0.0612066313624382, 0.06238102167844772, 0.11154207587242126, 0.031159698963165283, 0.011768986470997334, 0.4107469618320465, 0.1557808816432953, 0.07179611176252365, 0.186580628156662, 0.18789765238761902, 0.099563829600811], [0.0073658498004078865, 0.1486257165670395, 0.03456511348485947, 0.0081891855224967, 0.009660922922194004, 0.09341325610876083, 0.010183881968259811, 0.09390538185834885, 0.005950886756181717, 0.019719628617167473, 0.060451164841651917, 0.021925343200564384, 0.19991156458854675, 0.17004182934761047, 0.15761280059814453], [0.0057948376052081585, 0.023180164396762848, 0.018019115552306175, 0.008233858272433281, 0.005580522585660219, 0.09526203572750092, 0.025384269654750824, 0.05396068096160889, 0.022398412227630615, 0.010895788669586182, 0.02884012460708618, 0.008390026167035103, 0.1754663735628128, 0.0998048186302185, 0.1692073941230774], [0.0038264640606939793, 0.023839879781007767, 0.12264026701450348, 0.02543032169342041, 0.01467527449131012, 0.22457416355609894, 0.02885078825056553, 0.18430863320827484, 0.08557040989398956, 0.016987022012472153, 0.3513573110103607, 0.04023189842700958, 0.40384334325790405, 0.4235673248767853, 0.16652488708496094], [0.006266402080655098, 0.015031179413199425, 0.02853887900710106, 0.010518345981836319, 0.09044987708330154, 0.021657679229974747, 0.0031435268465429544, 0.020945381373167038, 0.004824943374842405, 0.0127853499725461, 0.04820985347032547, 0.12459135800600052, 0.5573670268058777, 0.2566193640232086, 0.05160163715481758], [0.3002758324146271, 0.08866846561431885, 0.06544900685548782, 0.25531354546546936, 0.028160221874713898, 0.12210531532764435, 0.16810676455497742, 0.0764283761382103, 0.17981933057308197, 0.3050864636898041, 0.2806880474090576, 0.13050490617752075, 0.19047558307647705, 0.3216065764427185, 0.07704814523458481], [0.005926316604018211, 0.0003559965989552438, 0.0015365411527454853, 0.005924532189965248, 0.0005743101937696338, 0.007415232714265585, 0.024156678467988968, 0.045611582696437836, 0.009969166480004787, 0.003380746114999056, 0.003106702584773302, 0.0003880919248331338, 4.0538176108384505e-05, 0.00014580521383322775, 0.13770556449890137], [0.1617586314678192, 0.29556339979171753, 0.028325924649834633, 0.059843577444553375, 0.009868957102298737, 0.03965649753808975, 0.07811643928289413, 0.06809397041797638, 0.009963614866137505, 0.11740529537200928, 0.08369920402765274, 0.039758261293172836, 0.13982373476028442, 0.1197674348950386, 0.13220268487930298], [0.012153265066444874, 0.16048333048820496, 0.041802890598773956, 0.00796045083552599, 0.018259191885590553, 0.10963782668113708, 0.009757153689861298, 0.07023902982473373, 0.01128031499683857, 0.030125515535473824, 0.0943576917052269, 0.02206866256892681, 0.1321137398481369, 0.19507774710655212, 0.1400403380393982], [0.005033975467085838, 0.01824766956269741, 0.015512547455728054, 0.006673634983599186, 0.005676268134266138, 0.04240407794713974, 0.023996027186512947, 0.1038113459944725, 0.02023463323712349, 0.0080516142770648, 0.052543867379426956, 0.1188565045595169, 0.05977800861001015, 0.05786403268575668, 0.13343320786952972]], [[0.1022859737277031, 0.17571765184402466, 0.1416551172733307, 0.11749783158302307, 0.09062699973583221, 0.07838433235883713, 0.09344526380300522, 0.3238999545574188, 0.11371968686580658, 0.10100032389163971, 0.09302259236574173, 0.0389624647796154, 0.16697892546653748, 0.1419355273246765, 0.1285012662410736], [0.24028724431991577, 0.14351274073123932, 0.051798444241285324, 0.16382630169391632, 0.04226303845643997, 0.020662518218159676, 0.11527843773365021, 0.29321926832199097, 0.02218940667808056, 0.0878078043460846, 0.10535410046577454, 0.011972848325967789, 0.07032275199890137, 0.04715458303689957, 0.0739566907286644], [0.2799055874347687, 0.11053244769573212, 0.1936434954404831, 0.029654914513230324, 0.3583168685436249, 0.552708625793457, 0.34459343552589417, 0.33612802624702454, 0.17023301124572754, 0.19969996809959412, 0.18768110871315002, 0.6793866157531738, 0.791401207447052, 0.7463385462760925, 0.09094473719596863], [0.1572730988264084, 0.12077052146196365, 0.0489557608962059, 0.1575693041086197, 0.05669395253062248, 0.21311312913894653, 0.07387427985668182, 0.12006285786628723, 0.06427917629480362, 0.05486075580120087, 0.09722346067428589, 0.0672946497797966, 0.519307017326355, 0.15919242799282074, 0.07895061373710632], [0.056666091084480286, 0.13304737210273743, 0.023897293955087662, 0.04679059237241745, 0.045941345393657684, 0.32384783029556274, 0.44531556963920593, 0.533463716506958, 0.08588721603155136, 0.10118058323860168, 0.027683693915605545, 0.15270595252513885, 0.45412689447402954, 0.19033603370189667, 0.009601723402738571], [0.026866083964705467, 0.01856745034456253, 0.00889106560498476, 0.023431263864040375, 0.014423922635614872, 0.06721587479114532, 0.30465173721313477, 0.5084072351455688, 0.06748852878808975, 0.09416066110134125, 0.028160765767097473, 0.08301042765378952, 0.13479003310203552, 0.08470122516155243, 0.14269311726093292], [0.07283831387758255, 0.02513016201555729, 0.513066828250885, 0.1692790985107422, 0.12089971452951431, 0.05420007184147835, 0.019427694380283356, 0.038392528891563416, 0.31973040103912354, 0.29048243165016174, 0.4046151340007782, 0.10607112944126129, 0.0885496586561203, 0.07017665356397629, 0.1372956782579422], [0.27857187390327454, 0.3617483973503113, 0.2938012182712555, 0.22770966589450836, 0.06824903935194016, 0.055705904960632324, 0.2735913395881653, 0.10727421194314957, 0.15245027840137482, 0.12983311712741852, 0.2781352400779724, 0.010307536460459232, 0.09433942288160324, 0.07780664414167404, 0.13000918924808502], [0.09918209165334702, 0.053455647081136703, 0.645177960395813, 0.40746453404426575, 0.08205579966306686, 0.11053493618965149, 0.09200509637594223, 0.0519426129758358, 0.15867555141448975, 0.14363400638103485, 0.08945868164300919, 0.009240956045687199, 0.05626320466399193, 0.024817338213324547, 0.10628006607294083], [0.21029417216777802, 0.16975507140159607, 0.4791514277458191, 0.5080997347831726, 0.14877668023109436, 0.04306463524699211, 0.02225780300796032, 0.027854960411787033, 0.09907854348421097, 0.17716829478740692, 0.027767561376094818, 0.04010230675339699, 0.1045137569308281, 0.07445494085550308, 0.1349247545003891], [0.05318222567439079, 0.11344952136278152, 0.09562063962221146, 0.10165436565876007, 0.11442670226097107, 0.07387696951627731, 0.04448265954852104, 0.12469986081123352, 0.10296554863452911, 0.029610879719257355, 0.006854650564491749, 0.06481806933879852, 0.038151390850543976, 0.029200172051787376, 0.19021393358707428], [0.024841444566845894, 0.16249340772628784, 0.20643305778503418, 0.09402812272310257, 0.0850510448217392, 0.023708872497081757, 0.027868179604411125, 0.16653721034526825, 0.2575382590293884, 0.07176022976636887, 0.04638299718499184, 0.019721999764442444, 0.08340867608785629, 0.04306621477007866, 0.19255293905735016], [0.24242781102657318, 0.4547469913959503, 0.7904132008552551, 0.7443370819091797, 0.4808639585971832, 0.2640213668346405, 0.06001711264252663, 0.24681034684181213, 0.5675581097602844, 0.2725449204444885, 0.247804656624794, 0.029579274356365204, 0.19247104227542877, 0.09198179841041565, 0.18542104959487915], [0.10456986725330353, 0.23679938912391663, 0.29603201150894165, 0.2020668387413025, 0.14429134130477905, 0.4285147190093994, 0.3221139907836914, 0.592944860458374, 0.47945162653923035, 0.273953914642334, 0.2270997315645218, 0.05125115066766739, 0.15167200565338135, 0.14498752355575562, 0.03565559163689613], [0.005393329542130232, 0.004602347034960985, 0.02125353366136551, 0.017772456631064415, 0.029431374743580818, 0.06670433282852173, 0.07382840663194656, 0.05640842020511627, 0.2022721767425537, 0.02110537886619568, 0.006757265422493219, 0.0065305884927511215, 0.00012849831546191126, 0.0015581984771415591, 0.14312443137168884], [0.03693488612771034, 0.3099628686904907, 0.02452116832137108, 0.038606833666563034, 0.04603191837668419, 0.056979674845933914, 0.014461892656981945, 0.021202413365244865, 0.4372372031211853, 0.02073492854833603, 0.005594322457909584, 0.11605570465326309, 0.05724794790148735, 0.01605997234582901, 0.1753198802471161], [0.17487157881259918, 0.2829012870788574, 0.22657853364944458, 0.2227388322353363, 0.09278897941112518, 0.05522100254893303, 0.023270972073078156, 0.031554628163576126, 0.32194823026657104, 0.13948096334934235, 0.09803083539009094, 0.2809208631515503, 0.14969345927238464, 0.03018103539943695, 0.10283161699771881], [0.06711219251155853, 0.13971862196922302, 0.10573939234018326, 0.08062157034873962, 0.22173365950584412, 0.04757346957921982, 0.02002648264169693, 0.06195787340402603, 0.09553409367799759, 0.04351034387946129, 0.015184497460722923, 0.17841440439224243, 0.07658158242702484, 0.04646967723965645, 0.1461518555879593], [0.015694430097937584, 0.09081663191318512, 0.2731003761291504, 0.09780610352754593, 0.06437630951404572, 0.024092676118016243, 0.017730340361595154, 0.09997125715017319, 0.24317535758018494, 0.06615940481424332, 0.05322461575269699, 0.013002216815948486, 0.10308460891246796, 0.03947872668504715, 0.16966252028942108], [0.19514591991901398, 0.2590837776660919, 0.7111572027206421, 0.6245842576026917, 0.2279123067855835, 0.21324849128723145, 0.0465325303375721, 0.16129039227962494, 0.5552195906639099, 0.24888396263122559, 0.16995932161808014, 0.017819084227085114, 0.13601525127887726, 0.04923256114125252, 0.1924036145210266], [0.11466818302869797, 0.23749157786369324, 0.22078867256641388, 0.21260471642017365, 0.1054922342300415, 0.38443663716316223, 0.35735341906547546, 0.3432110548019409, 0.45766645669937134, 0.30316272377967834, 0.15794025361537933, 0.23222389817237854, 0.18522031605243683, 0.12369272857904434, 0.062224190682172775], [0.004928229842334986, 0.004764902405440807, 0.014567935839295387, 0.014073353260755539, 0.020878629758954048, 0.04901519790291786, 0.05124438554048538, 0.042454566806554794, 0.19801755249500275, 0.018003307282924652, 0.004736864008009434, 0.006620202213525772, 0.00011398878996260464, 0.001381832524202764, 0.13761556148529053], [0.013776288367807865, 0.25124475359916687, 0.00789756141602993, 0.00910337083041668, 0.005072988104075193, 0.015830766409635544, 0.005818341393023729, 0.011153762228786945, 0.14152461290359497, 0.008211367763578892, 0.002360414480790496, 0.06666377186775208, 0.057822320610284805, 0.009000283665955067, 0.13980405032634735], [0.25532495975494385, 0.3110601603984833, 0.28066542744636536, 0.29941898584365845, 0.09561395645141602, 0.06004221364855766, 0.0257351566106081, 0.04446575790643692, 0.3475395441055298, 0.2538500130176544, 0.25107017159461975, 0.4736424386501312, 0.29699820280075073, 0.06975124776363373, 0.11745814979076385], [0.06876020133495331, 0.07319146394729614, 0.08357107639312744, 0.06905727088451385, 0.010884120129048824, 0.012632370926439762, 0.04344229772686958, 0.06033884361386299, 0.05559740215539932, 0.048808641731739044, 0.06204793229699135, 0.017201891168951988, 0.028970519080758095, 0.021960163488984108, 0.13179059326648712]], [[0.1855485588312149, 0.4779467284679413, 0.0886944904923439, 0.027812138199806213, 0.051930978894233704, 0.20570456981658936, 0.13285183906555176, 0.12479114532470703, 0.03275279700756073, 0.13280591368675232, 0.10831113904714584, 0.13358037173748016, 0.31709861755371094, 0.18639257550239563, 0.0658930093050003], [0.04738391190767288, 0.17884546518325806, 0.030679181218147278, 0.09374479204416275, 0.015219364315271378, 0.004209337756037712, 0.011544613167643547, 0.014519347809255123, 0.0008998611010611057, 0.03714418038725853, 0.02808041125535965, 0.0015275280456990004, 0.014074422419071198, 0.01773718185722828, 0.02865048497915268], [0.4282352328300476, 0.07421883940696716, 0.37614062428474426, 0.6016114950180054, 0.16448479890823364, 0.10949403792619705, 0.43647968769073486, 0.17394804954528809, 0.2346193641424179, 0.5131813287734985, 0.6543169021606445, 0.06318124383687973, 0.059741634875535965, 0.08049911260604858, 0.08155221492052078], [0.04248558357357979, 0.005498564336448908, 0.015051363967359066, 0.021896474063396454, 0.031015703454613686, 0.23631463944911957, 0.5231030583381653, 0.1651564985513687, 0.010708797723054886, 0.0702022984623909, 0.015817642211914062, 0.01968570239841938, 0.2309122085571289, 0.11954572051763535, 0.04909561946988106], [0.019823409616947174, 0.02119731903076172, 0.0447932668030262, 0.04950243979692459, 0.11350910365581512, 0.3172611892223358, 0.1175147220492363, 0.16474604606628418, 0.025614900514483452, 0.11684545129537582, 0.027774598449468613, 0.03366768732666969, 0.1657668650150299, 0.20241110026836395, 0.02058284729719162], [0.024027986451983452, 0.07085671275854111, 0.014559593982994556, 0.003951122052967548, 0.5812088251113892, 0.07389754801988602, 0.10464153438806534, 0.06822511553764343, 0.1849648803472519, 0.02429678477346897, 0.014226456172764301, 0.2123226672410965, 0.1049809455871582, 0.17609325051307678, 0.13661964237689972], [0.20496347546577454, 0.09403666108846664, 0.02112487144768238, 0.025338320061564445, 0.008130905218422413, 0.1783977895975113, 0.3754851818084717, 0.0950397253036499, 0.0030220954213291407, 0.08205359429121017, 0.011042395606637001, 0.018588367849588394, 0.1888807862997055, 0.10302136838436127, 0.14473272860050201], [0.037373751401901245, 0.07382072508335114, 0.08205787092447281, 0.10832883417606354, 0.02859049290418625, 0.1663966327905655, 0.058918725699186325, 0.17053310573101044, 0.011018002405762672, 0.15213745832443237, 0.027154715731739998, 0.0019660431426018476, 0.22162862122058868, 0.11411792784929276, 0.08493959158658981], [0.015705576166510582, 0.016172299161553383, 0.006149389781057835, 0.0038101596292108297, 0.007736767642199993, 0.20371977984905243, 0.12438680231571198, 0.06649734079837799, 0.004926482681185007, 0.004153827205300331, 0.0012289183214306831, 0.003863752353936434, 0.0550994910299778, 0.04052891582250595, 0.36571574211120605], [0.008730506524443626, 0.002757954876869917, 0.0122150257229805, 0.006305738352239132, 0.004681416787207127, 0.06460410356521606, 0.008150112815201283, 0.010960009880363941, 0.004299533553421497, 0.004670997615903616, 0.0034528695978224277, 0.0024545302148908377, 0.005013267509639263, 0.008545692078769207, 0.23703089356422424], [0.09499987959861755, 0.010673395358026028, 0.007046178914606571, 0.020993953570723534, 0.010670008137822151, 0.07466354966163635, 0.06417079269886017, 0.023990478366613388, 0.17728924751281738, 0.15624059736728668, 0.004560643341392279, 0.010690598748624325, 0.03727814555168152, 0.017693333327770233, 0.14084658026695251], [0.688500165939331, 0.16286028921604156, 0.04583478718996048, 0.22473743557929993, 0.025797681882977486, 0.04771623760461807, 0.5437547564506531, 0.0642164871096611, 0.01443459838628769, 0.2519066631793976, 0.017869845032691956, 0.003991205245256424, 0.04630482196807861, 0.029587149620056152, 0.049375567585229874], [0.14772717654705048, 0.11627800017595291, 0.034884992986917496, 0.02596234902739525, 0.031621210277080536, 0.39286479353904724, 0.6627658009529114, 0.20747745037078857, 0.019052494317293167, 0.06071586161851883, 0.014515946619212627, 0.03545556217432022, 0.1622975915670395, 0.05619712546467781, 0.4560142755508423], [0.3253695070743561, 0.18678773939609528, 0.23196454346179962, 0.43925735354423523, 0.09974130243062973, 0.1577768325805664, 0.26045241951942444, 0.07323815673589706, 0.005399893503636122, 0.23951157927513123, 0.04431937262415886, 0.013187061063945293, 0.0749824121594429, 0.025474021211266518, 0.2768867611885071], [0.049311667680740356, 0.10222040861845016, 0.30249276757240295, 0.11109475791454315, 0.4333159327507019, 0.4476950168609619, 0.14919614791870117, 0.45436185598373413, 0.10977044701576233, 0.101465605199337, 0.28612539172172546, 0.15904487669467926, 0.4858849048614502, 0.19411928951740265, 0.08273273706436157], [0.08865676820278168, 0.0832996591925621, 0.0360012948513031, 0.026901112869381905, 0.0488949753344059, 0.5697077512741089, 0.2118675261735916, 0.21166029572486877, 0.009457184933125973, 0.042189937084913254, 0.010147118009626865, 0.027016732841730118, 0.1966082751750946, 0.18848717212677002, 0.17412608861923218], [0.09455566853284836, 0.047932155430316925, 0.06032469496130943, 0.027359262108802795, 0.004525639116764069, 0.19231697916984558, 0.29536089301109314, 0.10446369647979736, 0.004957688972353935, 0.22148354351520538, 0.017980555072426796, 0.016062501817941666, 0.01227590162307024, 0.007468203082680702, 0.14047065377235413], [0.18475790321826935, 0.03305341675877571, 0.022945405915379524, 0.02499788999557495, 0.016275716945528984, 0.44049808382987976, 0.3255404233932495, 0.03656867519021034, 0.008760510943830013, 0.28132569789886475, 0.00872495025396347, 0.02103549800813198, 0.09103824943304062, 0.045535117387771606, 0.1431308537721634], [0.5226730704307556, 0.08511564135551453, 0.13128292560577393, 0.22977954149246216, 0.025636736303567886, 0.14430683851242065, 0.697600245475769, 0.08303582668304443, 0.03326253592967987, 0.30183717608451843, 0.04944504052400589, 0.004384536296129227, 0.07144975662231445, 0.05258011445403099, 0.06879302859306335], [0.06703877449035645, 0.049393996596336365, 0.041539933532476425, 0.021373772993683815, 0.02868128940463066, 0.32991066575050354, 0.488584041595459, 0.0702073872089386, 0.0075523643754422665, 0.038572411984205246, 0.012813442386686802, 0.04136957228183746, 0.06929102540016174, 0.03757195174694061, 0.23515936732292175], [0.15618596971035004, 0.12941822409629822, 0.2654253840446472, 0.28590527176856995, 0.31243884563446045, 0.1085575670003891, 0.15852880477905273, 0.026613548398017883, 0.004155577160418034, 0.15324708819389343, 0.037679530680179596, 0.09416285902261734, 0.02134908176958561, 0.010629331693053246, 0.17846201360225677], [0.058257974684238434, 0.12017454952001572, 0.32657214999198914, 0.12284700572490692, 0.5568311810493469, 0.41536086797714233, 0.16300946474075317, 0.49100223183631897, 0.15462136268615723, 0.11520260572433472, 0.260068416595459, 0.28476831316947937, 0.501883327960968, 0.21151991188526154, 0.09330709278583527], [0.04007576033473015, 0.04011448100209236, 0.02015572600066662, 0.006723308004438877, 0.01584162376821041, 0.6745935082435608, 0.14270515739917755, 0.05812964215874672, 0.0018657244509086013, 0.018765496090054512, 0.004551106132566929, 0.05217724293470383, 0.21886952221393585, 0.13090433180332184, 0.13149680197238922], [0.051524627953767776, 0.037071868777275085, 0.09267362952232361, 0.03285788744688034, 0.006808253470808268, 0.2584725618362427, 0.21142001450061798, 0.06556515395641327, 0.003410812932997942, 0.18829914927482605, 0.028329605236649513, 0.02864006720483303, 0.014232979156076908, 0.014326054602861404, 0.12804241478443146], [0.13503411412239075, 0.06798373907804489, 0.08072269707918167, 0.04104887321591377, 0.027653640136122704, 0.5933560132980347, 0.15723249316215515, 0.044575583189725876, 0.017590617761015892, 0.04771400988101959, 0.07117579132318497, 0.10345834493637085, 0.10624422132968903, 0.027206260710954666, 0.1271171271800995]], [[0.04247138649225235, 0.01728098653256893, 0.06617120653390884, 0.009399485774338245, 0.0730140432715416, 0.14221039414405823, 0.11889991164207458, 0.10651882737874985, 0.10687308758497238, 0.0351867638528347, 0.09164245426654816, 0.06160420924425125, 0.04699656739830971, 0.14884592592716217, 0.20088525116443634], [0.35919252038002014, 0.017007382586598396, 0.3711448311805725, 0.05260182172060013, 0.23237934708595276, 0.17189942300319672, 0.06846722215414047, 0.25480321049690247, 0.4269619286060333, 0.141769677400589, 0.19745108485221863, 0.3101239502429962, 0.12419883906841278, 0.061588384211063385, 0.3489930033683777], [0.1570073962211609, 0.6818748116493225, 0.08056136965751648, 0.04282544180750847, 0.09609510749578476, 0.21831035614013672, 0.11452964693307877, 0.4344905614852905, 0.09872471541166306, 0.06769980490207672, 0.054214250296354294, 0.015440859831869602, 0.04572026804089546, 0.05267196521162987, 0.06955287605524063], [0.1362180858850479, 0.01786869764328003, 0.3548091650009155, 0.13650378584861755, 0.07479218393564224, 0.08773932605981827, 0.007214170414954424, 0.020996512845158577, 0.09793394804000854, 0.26323461532592773, 0.31718939542770386, 0.004400049336254597, 0.01118874829262495, 0.016452480107545853, 0.0059462906792759895], [0.13787487149238586, 0.02221597172319889, 0.46063661575317383, 0.42787930369377136, 0.16819633543491364, 0.30927538871765137, 0.10940644890069962, 0.14741046726703644, 0.3708270192146301, 0.08424455672502518, 0.34931957721710205, 0.015041538514196873, 0.02219252847135067, 0.0637117251753807, 0.001682900357991457], [0.09526984393596649, 0.013222168199717999, 0.9035038352012634, 0.8715099692344666, 0.20107677578926086, 0.7829492688179016, 0.28305909037590027, 0.141366645693779, 0.15355023741722107, 0.11376345157623291, 0.804192841053009, 0.012117957696318626, 0.3312073349952698, 0.4514775276184082, 0.016239164397120476], [0.34537556767463684, 0.010514522902667522, 0.04824088513851166, 0.12771852314472198, 0.005308120045810938, 0.17857761681079865, 0.2263273000717163, 0.26537755131721497, 0.3297313451766968, 0.3104889690876007, 0.11654951423406601, 0.08535956591367722, 0.02363554947078228, 0.031254567205905914, 0.10634612292051315], [0.2808375656604767, 0.07436379790306091, 0.11235158890485764, 0.07017786800861359, 0.034851111471652985, 0.01653558947145939, 0.025893066078424454, 0.02911091037094593, 0.23654304444789886, 0.2646749019622803, 0.20617236196994781, 0.25081631541252136, 0.013157923705875874, 0.04621773213148117, 0.2354249358177185], [0.5487799644470215, 0.03728892654180527, 0.05227963626384735, 0.18957917392253876, 0.014632479287683964, 0.19499987363815308, 0.29326584935188293, 0.6778355836868286, 0.45779454708099365, 0.33408117294311523, 0.11356081813573837, 0.01941866986453533, 0.010207045823335648, 0.013884961605072021, 0.09069465100765228], [0.09531711786985397, 0.03595840558409691, 0.017401238903403282, 0.061305541545152664, 0.1627957820892334, 0.050434935837984085, 0.05516263470053673, 0.23917846381664276, 0.3637218177318573, 0.09729932248592377, 0.03891580551862717, 0.19205324351787567, 0.041229162365198135, 0.046046942472457886, 0.03756402060389519], [0.08811857551336288, 0.010963470675051212, 0.2593647241592407, 0.26678594946861267, 0.42746680974960327, 0.41530901193618774, 0.07491520792245865, 0.18910719454288483, 0.04928334057331085, 0.04599721357226372, 0.4843277335166931, 0.07717985659837723, 0.09353034198284149, 0.07800954580307007, 0.08156391978263855], [0.04596662148833275, 0.005170373246073723, 0.12165658175945282, 0.15079215168952942, 0.04554709792137146, 0.08856093138456345, 0.04626012593507767, 0.020681705325841904, 0.17637456953525543, 0.26189061999320984, 0.13335715234279633, 0.046832337975502014, 0.018430203199386597, 0.01621258072555065, 0.10917440801858902], [0.5138411521911621, 0.0654044821858406, 0.1128465011715889, 0.18054738640785217, 0.038166921585798264, 0.13531430065631866, 0.12295213341712952, 0.28065726161003113, 0.2875981628894806, 0.5909985899925232, 0.601227879524231, 0.03077608533203602, 0.04096299037337303, 0.09236451238393784, 0.1495288461446762], [0.07072688639163971, 0.012152088806033134, 0.021357353776693344, 0.04663744568824768, 0.020319821313023567, 0.05489102751016617, 0.07223928719758987, 0.23148301243782043, 0.18188072741031647, 0.10590049624443054, 0.10450157523155212, 0.03876996785402298, 0.13536545634269714, 0.10362161695957184, 0.12556865811347961], [0.07390952110290527, 0.023819932714104652, 0.4992673993110657, 0.293674498796463, 0.18016116321086884, 0.3294305205345154, 0.5326097011566162, 0.20817913115024567, 0.231731578707695, 0.17336609959602356, 0.4696378707885742, 0.3560185134410858, 0.5055418610572815, 0.687153697013855, 0.06569264829158783], [0.19887569546699524, 0.009285598993301392, 0.17495201528072357, 0.1799449920654297, 0.0410592183470726, 0.0050115324556827545, 0.025978662073612213, 0.011312133632600307, 0.04069671407341957, 0.23767657577991486, 0.3294059634208679, 0.09899688512086868, 0.03285939246416092, 0.08387716114521027, 0.04885585233569145], [0.054675761610269547, 0.04458622261881828, 0.0536046139895916, 0.016943499445915222, 0.02146792784333229, 0.1686052531003952, 0.036354243755340576, 0.08614800870418549, 0.1611979901790619, 0.170720174908638, 0.163726344704628, 0.09202460944652557, 0.016866492107510567, 0.019021833315491676, 0.13082824647426605], [0.254617303609848, 0.09600356966257095, 0.5283652544021606, 0.35948434472084045, 0.11690203100442886, 0.22449535131454468, 0.07030754536390305, 0.14074397087097168, 0.11056768894195557, 0.2017645388841629, 0.5897989273071289, 0.032950446009635925, 0.0850306898355484, 0.16881772875785828, 0.07667817175388336], [0.06611059606075287, 0.009380446746945381, 0.1600489318370819, 0.18714633584022522, 0.028496628627181053, 0.28509950637817383, 0.06793918460607529, 0.036412376910448074, 0.3864555358886719, 0.38031718134880066, 0.19321800768375397, 0.03279240429401398, 0.024823389947414398, 0.02684853971004486, 0.10572600364685059], [0.5806823372840881, 0.09046274423599243, 0.1468239277601242, 0.2587219774723053, 0.018666794523596764, 0.17986845970153809, 0.1758078932762146, 0.26734092831611633, 0.30597683787345886, 0.6407824158668518, 0.6427304148674011, 0.011203133501112461, 0.017842967063188553, 0.05609212443232536, 0.1528221219778061], [0.09578646719455719, 0.04883359372615814, 0.014442636631429195, 0.07719788700342178, 0.013871591538190842, 0.24272511899471283, 0.11848346889019012, 0.48695430159568787, 0.10090471804141998, 0.15632015466690063, 0.12246286869049072, 0.056596189737319946, 0.051980338990688324, 0.03806659206748009, 0.1369783878326416], [0.12923087179660797, 0.04506811499595642, 0.5631698966026306, 0.4945719838142395, 0.16776354610919952, 0.4656532406806946, 0.6344242095947266, 0.28209388256073, 0.297488808631897, 0.3520771265029907, 0.6463941931724548, 0.3803158104419708, 0.4924411177635193, 0.6891878843307495, 0.08469904214143753], [0.3177553117275238, 0.027823492884635925, 0.11541304737329483, 0.1464630663394928, 0.010460668243467808, 0.028609508648514748, 0.14352867007255554, 0.043905869126319885, 0.18215790390968323, 0.6030426025390625, 0.38763877749443054, 0.1293274313211441, 0.07180552184581757, 0.1464845985174179, 0.10971048474311829], [0.03459807112812996, 0.05000016465783119, 0.02839210256934166, 0.008521324954926968, 0.009519261308014393, 0.12168280780315399, 0.03372196480631828, 0.07665831595659256, 0.21765880286693573, 0.11945746093988419, 0.0821232944726944, 0.058310747146606445, 0.011853469535708427, 0.02031784877181053, 0.13586042821407318], [0.02964477799832821, 0.1353258490562439, 0.017653465270996094, 0.011115004308521748, 0.008141545578837395, 0.05911250412464142, 0.01831989735364914, 0.05519499629735947, 0.03573962301015854, 0.02204814739525318, 0.05097896233201027, 0.08341387659311295, 0.08060181885957718, 0.10490117967128754, 0.13247323036193848]], [[0.20067201554775238, 0.150595024228096, 0.3375815153121948, 0.5753223896026611, 0.03983612731099129, 0.13901081681251526, 0.37267425656318665, 0.07406412810087204, 0.07071352750062943, 0.22996902465820312, 0.35784539580345154, 0.0401473231613636, 0.03251379355788231, 0.07572956383228302, 0.005637211725115776], [0.055522263050079346, 0.0030253075528889894, 0.054468654096126556, 0.18383808434009552, 0.2751407325267792, 0.06163792684674263, 0.5092534422874451, 0.21577699482440948, 0.23691882193088531, 0.32801976799964905, 0.29786956310272217, 0.4967685043811798, 0.6341143250465393, 0.7677603363990784, 0.40264371037483215], [0.0005822544917464256, 0.0004425827646628022, 0.0014265297213569283, 0.0006841197027824819, 0.03406556695699692, 0.0010687633184716105, 0.0028485425282269716, 0.020860498771071434, 0.05133597180247307, 0.002158694202080369, 0.002441320102661848, 0.037159714847803116, 0.005256796721369028, 0.008102376013994217, 0.16207638382911682], [0.20224374532699585, 0.7376267313957214, 0.004014236852526665, 0.0103965038433671, 0.07275543361902237, 0.03262623772025108, 0.04577071964740753, 0.5017040371894836, 0.12205435335636139, 0.19255708158016205, 0.006990006659179926, 0.028381695970892906, 0.046785227954387665, 0.15206293761730194, 0.330488920211792], [0.3634231686592102, 0.404717355966568, 0.00689590023830533, 0.04770800471305847, 0.0251657422631979, 0.0006883289897814393, 0.02071242779493332, 0.019072405993938446, 0.15776626765727997, 0.3694642186164856, 0.036826737225055695, 0.23951902985572815, 0.011015082709491253, 0.04999716952443123, 0.2037181556224823], [0.8270207643508911, 0.8942698836326599, 0.020243747159838676, 0.04263966530561447, 0.09284591674804688, 0.054453812539577484, 0.21418678760528564, 0.23612302541732788, 0.5479635000228882, 0.7225908041000366, 0.08608872443437576, 0.5934221148490906, 0.30024465918540955, 0.22648638486862183, 0.12622572481632233], [0.043734412640333176, 0.7137998342514038, 0.1370490938425064, 0.045488547533750534, 0.06789389997720718, 0.49671053886413574, 0.1280447244644165, 0.4211912155151367, 0.03652801364660263, 0.041476957499980927, 0.08040425181388855, 0.19641457498073578, 0.603863537311554, 0.49263066053390503, 0.07636027038097382], [0.017375759780406952, 0.012506993487477303, 0.020720014348626137, 0.011049210093915462, 0.03743210807442665, 0.0072485157288610935, 0.03524084761738777, 0.005443913396447897, 0.24646395444869995, 0.048276107758283615, 0.03640883043408394, 0.507624089717865, 0.15355341136455536, 0.1730290949344635, 0.2644885182380676], [0.09840062260627747, 0.7509858012199402, 0.13933908939361572, 0.13482652604579926, 0.18154919147491455, 0.32397931814193726, 0.23646889626979828, 0.11657525599002838, 0.03430478647351265, 0.1277371644973755, 0.15700362622737885, 0.24829043447971344, 0.7591869831085205, 0.7825927138328552, 0.06869770586490631], [0.22806629538536072, 0.6706615686416626, 0.2560598850250244, 0.17412559688091278, 0.6327939033508301, 0.04699348285794258, 0.058767881244421005, 0.11556732654571533, 0.09056147933006287, 0.3648419678211212, 0.5388886332511902, 0.261055588722229, 0.6016876697540283, 0.7496042847633362, 0.0894755870103836], [0.5419997572898865, 0.6956567168235779, 0.044124722480773926, 0.12586495280265808, 0.048711128532886505, 0.11729516834020615, 0.4073715806007385, 0.43757542967796326, 0.032695479691028595, 0.4824156165122986, 0.05927032604813576, 0.04766178876161575, 0.25393223762512207, 0.23675066232681274, 0.10572775453329086], [0.09369882941246033, 0.5731168985366821, 0.13611510396003723, 0.13756731152534485, 0.024227088317275047, 0.31910547614097595, 0.16772453486919403, 0.1680929958820343, 0.09319504350423813, 0.0998181626200676, 0.22465890645980835, 0.00899507012218237, 0.16640731692314148, 0.25350457429885864, 0.09016240388154984], [0.02838694490492344, 0.30040091276168823, 0.005878766532987356, 0.015430719591677189, 0.017050068825483322, 0.06605669111013412, 0.12745192646980286, 0.23377051949501038, 0.08052214235067368, 0.033177152276039124, 0.06731567531824112, 0.07575374841690063, 0.18187224864959717, 0.570769727230072, 0.04572387412190437], [0.2655380666255951, 0.4107033908367157, 0.04865417629480362, 0.08488347381353378, 0.04310445114970207, 0.10849997401237488, 0.15643075108528137, 0.04165918007493019, 0.12898734211921692, 0.11095981299877167, 0.23520684242248535, 0.10632039606571198, 0.055878568440675735, 0.24558725953102112, 0.17682571709156036], [0.8565200567245483, 0.8639481067657471, 0.0803997814655304, 0.36449819803237915, 0.17448320984840393, 0.12402030825614929, 0.13765643537044525, 0.2065785825252533, 0.18182852864265442, 0.6806339025497437, 0.1919344812631607, 0.19068314135074615, 0.004361266735941172, 0.01490570418536663, 0.13936595618724823], [0.22751423716545105, 0.21127405762672424, 0.005130667705088854, 0.028237944468855858, 0.06646221876144409, 0.045109983533620834, 0.478432834148407, 0.6443154215812683, 0.140235036611557, 0.0980456992983818, 0.006476161070168018, 0.038696710020303726, 0.25798937678337097, 0.10561345517635345, 0.16755780577659607], [0.3886019289493561, 0.36600789427757263, 0.07069597393274307, 0.12792876362800598, 0.0629734918475151, 0.0820467472076416, 0.2973020672798157, 0.27475541830062866, 0.019707435742020607, 0.2982620298862457, 0.24423947930335999, 0.05686682090163231, 0.23438367247581482, 0.3444555997848511, 0.09858046472072601], [0.31350865960121155, 0.5118260383605957, 0.01775331422686577, 0.060602445155382156, 0.015971101820468903, 0.03445184975862503, 0.4316053092479706, 0.4819965064525604, 0.008238772861659527, 0.27349013090133667, 0.02135261707007885, 0.006705985404551029, 0.06119696795940399, 0.05213680863380432, 0.13011163473129272], [0.11128952354192734, 0.6662537455558777, 0.10913366079330444, 0.08027850091457367, 0.016604425385594368, 0.1904260814189911, 0.09001538157463074, 0.12034764140844345, 0.032395973801612854, 0.07767382264137268, 0.13288450241088867, 0.0038343279156833887, 0.15461067855358124, 0.13092683255672455, 0.1198263093829155], [0.045069050043821335, 0.5156355500221252, 0.014353718608617783, 0.026371080428361893, 0.027669712901115417, 0.08119883388280869, 0.2510265111923218, 0.45373910665512085, 0.0644708126783371, 0.03346102684736252, 0.06456929445266724, 0.036929432302713394, 0.1635800451040268, 0.4964689314365387, 0.12627021968364716], [0.15574656426906586, 0.22756966948509216, 0.016156630590558052, 0.0469389408826828, 0.01719032973051071, 0.01580459624528885, 0.07493647187948227, 0.02412206307053566, 0.018628407269716263, 0.03879624605178833, 0.03891688585281372, 0.03379734605550766, 0.008454171009361744, 0.03055991418659687, 0.1906210333108902], [0.7930518984794617, 0.8248118162155151, 0.03787774592638016, 0.2306395173072815, 0.10945193469524384, 0.048738475888967514, 0.07385316491127014, 0.1171715259552002, 0.09199279546737671, 0.5013920664787292, 0.07074998319149017, 0.14583703875541687, 0.0018764830892905593, 0.00646476075053215, 0.13562877476215363], [0.139163076877594, 0.17112046480178833, 0.0021531793754547834, 0.0053843106143176556, 0.013183848932385445, 0.014547600410878658, 0.39682450890541077, 0.7216413021087646, 0.013683686964213848, 0.038195278495550156, 0.0014429710572585464, 0.0075409854762256145, 0.06976743042469025, 0.016425929963588715, 0.1257757991552353], [0.37428542971611023, 0.3404470980167389, 0.07186836749315262, 0.11062464118003845, 0.09624961018562317, 0.06910651177167892, 0.26704323291778564, 0.35990291833877563, 0.016681469976902008, 0.31615501642227173, 0.23382727801799774, 0.051282789558172226, 0.1643712818622589, 0.24623094499111176, 0.1059461385011673], [0.2896858751773834, 0.2041676938533783, 0.0844137892127037, 0.26597079634666443, 0.007990201003849506, 0.057605594396591187, 0.37075188755989075, 0.33039090037345886, 0.04668770357966423, 0.6492098569869995, 0.34850311279296875, 0.12703292071819305, 0.22453922033309937, 0.2423134297132492, 0.11649563163518906]]], [[[0.12698857486248016, 0.15100647509098053, 0.08910781890153885, 0.09401589632034302, 0.14288602769374847, 0.07712502032518387, 0.1496707946062088, 0.23784373700618744, 0.024656152352690697, 0.07261883467435837, 0.11269068717956543, 0.10889188945293427, 0.23155105113983154, 0.10633593797683716, 0.14060717821121216], [0.33520859479904175, 0.17541100084781647, 0.043081097304821014, 0.07071122527122498, 0.031066332012414932, 0.05302952229976654, 0.13712948560714722, 0.0819549486041069, 0.010218805633485317, 0.05350261554121971, 0.03376028686761856, 0.016291575506329536, 0.04384060204029083, 0.016914406791329384, 0.06937505304813385], [0.2972787618637085, 0.14542943239212036, 0.2801832854747772, 0.6946116089820862, 0.3750338852405548, 0.09368664771318436, 0.11078806221485138, 0.124379463493824, 0.028408339247107506, 0.3442523181438446, 0.15075638890266418, 0.08511755615472794, 0.32891392707824707, 0.12337944656610489, 0.05913665145635605], [0.06821048259735107, 0.007578656077384949, 0.033511072397232056, 0.039627932012081146, 0.016393400728702545, 0.20925503969192505, 0.15704192221164703, 0.024064799770712852, 0.005696912761777639, 0.01698312722146511, 0.15042142570018768, 0.0017041407991200686, 0.016995420679450035, 0.005758653394877911, 0.015053601935505867], [0.05268644914031029, 0.018480738624930382, 0.006206580437719822, 0.01908770017325878, 0.009213676676154137, 0.012446015141904354, 0.2606332302093506, 0.15275397896766663, 0.004711512941867113, 0.01064901053905487, 0.00940486416220665, 0.00429189158603549, 0.014810611493885517, 0.012880465015769005, 0.15466143190860748], [0.017502065747976303, 0.09008979797363281, 0.045234303921461105, 0.04321402683854103, 0.014162504114210606, 0.2841097414493561, 0.10382679849863052, 0.4497845470905304, 0.042821191251277924, 0.03918898105621338, 0.06416238099336624, 0.04602029174566269, 0.2197093665599823, 0.07547488063573837, 0.13285692036151886], [0.02909473329782486, 0.05293780937790871, 0.025932423770427704, 0.061369478702545166, 0.12287095934152603, 0.12207728624343872, 0.20267462730407715, 0.3647293746471405, 0.036313559859991074, 0.028358493000268936, 0.054471470415592194, 0.007501897402107716, 0.10796680301427841, 0.05851392075419426, 0.12157665193080902], [0.02889016829431057, 0.05256107077002525, 0.05110660940408707, 0.09513585269451141, 0.049980901181697845, 0.07343146204948425, 0.21190620958805084, 0.10279127210378647, 0.1787082403898239, 0.022944355383515358, 0.03947293758392334, 0.008258121088147163, 0.09723227471113205, 0.030062679201364517, 0.14898137748241425], [0.027054987847805023, 0.06796294450759888, 0.02347770519554615, 0.04540639370679855, 0.13579830527305603, 0.1935206949710846, 0.09281998127698898, 0.22921815514564514, 0.012567882426083088, 0.02752627059817314, 0.05939676612615585, 0.00633750855922699, 0.24427738785743713, 0.10302533209323883, 0.18246731162071228], [0.13923436403274536, 0.07431720942258835, 0.06541924923658371, 0.14132679998874664, 0.10506866127252579, 0.06156519800424576, 0.21440355479717255, 0.06509862840175629, 0.02759510651230812, 0.10144857317209244, 0.13265900313854218, 0.048845868557691574, 0.16166719794273376, 0.1116088330745697, 0.15105699002742767], [0.14352908730506897, 0.10288456827402115, 0.05261845886707306, 0.1541282832622528, 0.05661991983652115, 0.12065587192773819, 0.10697692632675171, 0.15951323509216309, 0.1055477038025856, 0.14385449886322021, 0.23090383410453796, 0.08539394289255142, 0.09938428550958633, 0.08322764188051224, 0.11896289885044098], [0.24387870728969574, 0.11191204935312271, 0.06428070366382599, 0.3038298189640045, 0.14750736951828003, 0.1200045570731163, 0.46686112880706787, 0.3116493225097656, 0.10273779183626175, 0.10795925557613373, 0.1416371762752533, 0.09460661560297012, 0.27618303894996643, 0.09149192273616791, 0.10828596353530884], [0.1039203479886055, 0.05052376165986061, 0.051659513264894485, 0.18036356568336487, 0.11265069991350174, 0.047071922570466995, 0.3453211784362793, 0.29340654611587524, 0.007079527713358402, 0.06730296462774277, 0.08055143058300018, 0.02563900128006935, 0.19650228321552277, 0.060815099626779556, 0.13184599578380585], [0.1947154402732849, 0.003113611601293087, 0.028957238420844078, 0.026910793036222458, 0.017121652141213417, 0.08169777691364288, 0.32467299699783325, 0.05661681666970253, 0.007502032909542322, 0.02869880571961403, 0.020577264949679375, 0.0070375413633883, 0.16551434993743896, 0.06083058565855026, 0.06852211803197861], [0.018467016518115997, 0.004791167099028826, 0.015553582459688187, 0.021664531901478767, 0.025298617780208588, 0.1971224695444107, 0.13395515084266663, 0.1881190687417984, 0.05309745669364929, 0.018728721886873245, 0.018886514008045197, 0.023248562589287758, 0.008927382528781891, 0.03253133222460747, 0.130488321185112], [0.4018593430519104, 0.09619066119194031, 0.047895513474941254, 0.0887020081281662, 0.04670756310224533, 0.17605426907539368, 0.21604543924331665, 0.1403813511133194, 0.0010993692558258772, 0.07762767374515533, 0.0958188846707344, 0.1024225577712059, 0.06565871089696884, 0.04857100546360016, 0.1717240959405899], [0.31909966468811035, 0.26355716586112976, 0.16833621263504028, 0.334572434425354, 0.18670302629470825, 0.11206400394439697, 0.46585598587989807, 0.15377958118915558, 0.014857469126582146, 0.07049962878227234, 0.1590365469455719, 0.09933225810527802, 0.23580892384052277, 0.09940709918737411, 0.11795931309461594], [0.3361136317253113, 0.18450267612934113, 0.10482683777809143, 0.3672127425670624, 0.09347432106733322, 0.06302808225154877, 0.17493662238121033, 0.11965186893939972, 0.06742112338542938, 0.13331438601016998, 0.26999813318252563, 0.03264465183019638, 0.07908355444669724, 0.09376725554466248, 0.11511774361133575], [0.271436870098114, 0.16103556752204895, 0.09723401814699173, 0.3494490087032318, 0.1582973301410675, 0.11393263936042786, 0.41371721029281616, 0.2938876152038574, 0.08068472146987915, 0.08301044255495071, 0.11968915909528732, 0.07779402285814285, 0.24559125304222107, 0.07589462399482727, 0.1087639182806015], [0.1091129332780838, 0.08970999717712402, 0.08557470142841339, 0.23009367287158966, 0.13180004060268402, 0.0638015940785408, 0.31095248460769653, 0.2814267873764038, 0.0075759077444672585, 0.039292845875024796, 0.06780961900949478, 0.013560868799686432, 0.15987654030323029, 0.04180291295051575, 0.12740370631217957], [0.4568881392478943, 0.01152532733976841, 0.12744615972042084, 0.16633041203022003, 0.05682089552283287, 0.22013583779335022, 0.46718865633010864, 0.06831676512956619, 0.011846139095723629, 0.051503561437129974, 0.07631707936525345, 0.017341753467917442, 0.16032609343528748, 0.06682911515235901, 0.06364742666482925], [0.0270079392939806, 0.003701634705066681, 0.024473953992128372, 0.035727839916944504, 0.031186459586024284, 0.22590965032577515, 0.1764952838420868, 0.1725662350654602, 0.06108492240309715, 0.017804577946662903, 0.01644762232899666, 0.018474329262971878, 0.0059660994447767735, 0.026993868872523308, 0.12890712916851044], [0.32686647772789, 0.10561588406562805, 0.10599718242883682, 0.08397059142589569, 0.05158340185880661, 0.22573474049568176, 0.19403943419456482, 0.08219113945960999, 0.0007591660832986236, 0.028280239552259445, 0.06139420345425606, 0.03943438082933426, 0.025857241824269295, 0.027251310646533966, 0.1435350626707077], [0.21139562129974365, 0.21867576241493225, 0.17973701655864716, 0.29884445667266846, 0.19560806453227997, 0.11132223159074783, 0.28179141879081726, 0.10507592558860779, 0.014165982604026794, 0.04481332749128342, 0.1297360062599182, 0.07738039642572403, 0.2323194295167923, 0.09134778380393982, 0.12234959006309509], [0.2484172284603119, 0.2714419662952423, 0.13623963296413422, 0.33317360281944275, 0.14056812226772308, 0.16453251242637634, 0.23482279479503632, 0.2797185182571411, 0.08398787677288055, 0.13855448365211487, 0.19988903403282166, 0.12159004807472229, 0.21263501048088074, 0.1342880129814148, 0.11613592505455017]], [[0.1659475415945053, 0.1821746528148651, 0.2680368423461914, 0.3257308900356293, 0.2135642170906067, 0.10952500998973846, 0.23729652166366577, 0.15246635675430298, 0.09328519552946091, 0.22413431107997894, 0.22322525084018707, 0.11237151175737381, 0.18681256473064423, 0.1572018712759018, 0.06837792694568634], [0.14290380477905273, 0.026570750400424004, 0.14845344424247742, 0.26635152101516724, 0.12476544827222824, 0.1522083431482315, 0.287058562040329, 0.16522644460201263, 0.21008911728858948, 0.3761942982673645, 0.12840349972248077, 0.0757022351026535, 0.39944273233413696, 0.379029244184494, 0.1911974847316742], [0.00885845348238945, 0.005625984165817499, 0.0020030708983540535, 0.005766861606389284, 0.001782223698683083, 0.004346099682152271, 0.014438317157328129, 0.010037342086434364, 0.0175970196723938, 0.0067982920445501804, 0.003056151093915105, 0.005088370759040117, 0.0035549686290323734, 0.002117584692314267, 0.17935973405838013], [0.04871530085802078, 0.2322341799736023, 0.043161727488040924, 0.046935759484767914, 0.04166096821427345, 0.048159919679164886, 0.2838554382324219, 0.5679410696029663, 0.17445935308933258, 0.05776107683777809, 0.14550535380840302, 0.04300517588853836, 0.2332015484571457, 0.28196635842323303, 0.4675023853778839], [0.03277377411723137, 0.28776609897613525, 0.0018310850718989968, 0.006392122711986303, 0.0034063432831317186, 0.0006021481240168214, 0.02006486989557743, 0.09552518278360367, 0.02804744802415371, 0.060428690165281296, 0.004742977675050497, 0.018782831728458405, 0.016696294769644737, 0.023774143308401108, 0.16262513399124146], [0.006045958958566189, 0.0958699956536293, 0.007954242639243603, 0.011606856249272823, 0.004544504452496767, 0.010406642220914364, 0.011899203062057495, 0.07300186902284622, 0.002370428293943405, 0.012239865958690643, 0.020374998450279236, 0.012496876530349255, 0.024265890941023827, 0.0274967048317194, 0.1423870474100113], [0.008809137158095837, 0.13565093278884888, 0.03191651031374931, 0.0483417883515358, 0.028707973659038544, 0.039296794682741165, 0.018359076231718063, 0.07145766168832779, 0.13921810686588287, 0.01646633818745613, 0.06145479157567024, 0.028490308672189713, 0.056069642305374146, 0.13838331401348114, 0.19134177267551422], [0.39272594451904297, 0.39728477597236633, 0.32111606001853943, 0.41796234250068665, 0.15293559432029724, 0.04586965963244438, 0.16940170526504517, 0.022719532251358032, 0.14239482581615448, 0.5121501088142395, 0.19016578793525696, 0.06530822068452835, 0.29211705923080444, 0.14742477238178253, 0.11553633958101273], [0.009060109965503216, 0.08736205101013184, 0.03623565658926964, 0.046393588185310364, 0.04293924570083618, 0.049119193106889725, 0.018734706565737724, 0.10957584530115128, 0.04821338504552841, 0.02008068934082985, 0.029284991323947906, 0.015971768647432327, 0.05779576674103737, 0.21830672025680542, 0.21264111995697021], [0.02833615615963936, 0.24966742098331451, 0.06237170845270157, 0.03993965685367584, 0.10454770177602768, 0.019859671592712402, 0.03772445023059845, 0.19178973138332367, 0.012827831320464611, 0.03533304110169411, 0.024230163544416428, 0.054630037397146225, 0.032379381358623505, 0.08906079828739166, 0.17152637243270874], [0.015255320817232132, 0.21888743340969086, 0.1253896951675415, 0.08362822234630585, 0.12500159442424774, 0.02890017069876194, 0.03405824303627014, 0.07477163523435593, 0.0229325033724308, 0.01863025315105915, 0.044950928539037704, 0.0560457706451416, 0.04699615016579628, 0.08650227636098862, 0.1548503190279007], [0.011826024390757084, 0.10608652234077454, 0.04723645746707916, 0.057715099304914474, 0.03395959734916687, 0.028910892084240913, 0.011586843058466911, 0.050380002707242966, 0.030421555042266846, 0.00583301018923521, 0.015118762850761414, 0.014350258745253086, 0.01606619358062744, 0.025515934452414513, 0.18496018648147583], [0.015032858587801456, 0.5077551603317261, 0.07541441917419434, 0.08020945638418198, 0.10545077919960022, 0.2137133628129959, 0.01040775515139103, 0.09528981149196625, 0.09038985520601273, 0.012094871141016483, 0.025733938440680504, 0.06706724315881729, 0.03145073354244232, 0.09538157284259796, 0.34148263931274414], [0.32250380516052246, 0.7984310388565063, 0.3962976634502411, 0.40014326572418213, 0.3554738759994507, 0.47898975014686584, 0.10853014886379242, 0.20243746042251587, 0.127571240067482, 0.2699570655822754, 0.16473528742790222, 0.08001074939966202, 0.03713205084204674, 0.14643853902816772, 0.4229389429092407], [0.023898553103208542, 0.03448064997792244, 0.007101188413798809, 0.020377272740006447, 0.09085186570882797, 0.008504875935614109, 0.01689869724214077, 0.021393392235040665, 0.03013733960688114, 0.004040753003209829, 0.000672544410917908, 0.0007860396872274578, 0.0003324192948639393, 0.0003073772240895778, 0.13160185515880585], [0.025859396904706955, 0.29733914136886597, 0.09033425897359848, 0.06196272000670433, 0.10889838635921478, 0.14661002159118652, 0.034964289516210556, 0.07059973478317261, 0.007527152542024851, 0.007617437280714512, 0.006072000600397587, 0.0492180734872818, 0.0069811418652534485, 0.011496509425342083, 0.22706106305122375], [0.014849718660116196, 0.1462036818265915, 0.11065799742937088, 0.06219353526830673, 0.08005399256944656, 0.016894571483135223, 0.010269397869706154, 0.02562439627945423, 0.009192260913550854, 0.009821194224059582, 0.015785057097673416, 0.019254932180047035, 0.01222837995737791, 0.011684795841574669, 0.16154925525188446], [0.01973692700266838, 0.11480830609798431, 0.07148479670286179, 0.05237298831343651, 0.0777522474527359, 0.019268590956926346, 0.01592963933944702, 0.01235677395015955, 0.06519288569688797, 0.019938096404075623, 0.03185376524925232, 0.0271891038864851, 0.01742159202694893, 0.040164995938539505, 0.1837940812110901], [0.006014276295900345, 0.07228019088506699, 0.029915854334831238, 0.031709808856248856, 0.01963544264435768, 0.01660715602338314, 0.00532315531745553, 0.03606380149722099, 0.029185649007558823, 0.0046777487732470036, 0.01710142381489277, 0.013257446698844433, 0.01389795821160078, 0.02201540581882, 0.16183340549468994], [0.008549164049327374, 0.34144893288612366, 0.03957316279411316, 0.03764811158180237, 0.04039980471134186, 0.07271253317594528, 0.00613941578194499, 0.04612124711275101, 0.0911136344075203, 0.008750539273023605, 0.01715807057917118, 0.03749352693557739, 0.024577608332037926, 0.06848984956741333, 0.2503378689289093], [0.1472499966621399, 0.4703251123428345, 0.2558133602142334, 0.283985435962677, 0.21470209956169128, 0.17662864923477173, 0.07007063925266266, 0.06038873642683029, 0.20766907930374146, 0.26984694600105286, 0.16889145970344543, 0.27114859223365784, 0.03473396599292755, 0.13903996348381042, 0.2962591350078583], [0.020655758678913116, 0.020222418010234833, 0.006879583932459354, 0.019070995971560478, 0.07609020173549652, 0.006032301113009453, 0.015974652022123337, 0.01717195473611355, 0.05267442390322685, 0.004277344327419996, 0.0005684247589670122, 0.0007490122807212174, 0.0002994663082063198, 0.0002370573638472706, 0.12958088517189026], [0.009374987334012985, 0.23445867002010345, 0.05258592590689659, 0.020285839214920998, 0.024131227284669876, 0.0535256564617157, 0.01552440132945776, 0.032435644418001175, 0.006646827794611454, 0.005740212742239237, 0.005195626523345709, 0.07125341892242432, 0.0043562185019254684, 0.01014760322868824, 0.17807012796401978], [0.018758203834295273, 0.11843696236610413, 0.09101122617721558, 0.0610043928027153, 0.06165887042880058, 0.012400476261973381, 0.011786350980401039, 0.021215293556451797, 0.014211799949407578, 0.011016220785677433, 0.02130991406738758, 0.02418670989573002, 0.015627985820174217, 0.013993974775075912, 0.14536960422992706], [0.03985379636287689, 0.12957410514354706, 0.13386031985282898, 0.10592924803495407, 0.09455320239067078, 0.03913174197077751, 0.052976641803979874, 0.03812992200255394, 0.11070051789283752, 0.042073190212249756, 0.05433963984251022, 0.058929286897182465, 0.03380222246050835, 0.05054538697004318, 0.1317562311887741]], [[0.038382355123758316, 0.16509199142456055, 0.03795319423079491, 0.018471574410796165, 0.017937200143933296, 0.20822547376155853, 0.036850690841674805, 0.07025959342718124, 0.026183662936091423, 0.008891633711755276, 0.011525453999638557, 0.06559614092111588, 0.10240377485752106, 0.05705304443836212, 0.19186913967132568], [0.18736660480499268, 0.12802250683307648, 0.06000450998544693, 0.07085607945919037, 0.02492770366370678, 0.13308653235435486, 0.01379183866083622, 0.01460492704063654, 0.018005041405558586, 0.18972568213939667, 0.18918126821517944, 0.05261359363794327, 0.08419474214315414, 0.039842329919338226, 0.12843605875968933], [0.003212069161236286, 0.04924406483769417, 0.010131219401955605, 0.0015629208646714687, 0.009065762162208557, 0.04507109895348549, 0.003221129300072789, 0.07382506877183914, 0.0011923180427402258, 0.004047631751745939, 0.006328214425593615, 0.012952281162142754, 0.0641837865114212, 0.02541324496269226, 0.1715373396873474], [0.002438034862279892, 0.0007996301865205169, 0.10929557681083679, 0.030698396265506744, 0.007961505092680454, 0.21520712971687317, 0.0018748894799500704, 0.0015670642023906112, 0.00039643081254325807, 0.0017966092564165592, 0.010619523003697395, 0.0026792865246534348, 0.0035868084523826838, 0.001077426946721971, 0.003137440187856555], [0.04913554713129997, 0.023452362045645714, 0.16805477440357208, 0.2746557891368866, 0.369334876537323, 0.025402046740055084, 0.03595297038555145, 0.27975642681121826, 0.005478397477418184, 0.044800374656915665, 0.028408128768205643, 0.025396348908543587, 0.1202942430973053, 0.22760754823684692, 0.12602998316287994], [0.0008230121457017958, 0.006709535606205463, 0.005090394522994757, 0.005009432788938284, 0.0009200142812915146, 0.002589132636785507, 0.003276216797530651, 0.011904137209057808, 0.0009605096420273185, 0.0016532291192561388, 0.001647727913223207, 0.0010296034161001444, 0.00474548852071166, 0.004530362784862518, 0.14385877549648285], [0.011407818645238876, 0.11073090881109238, 0.11066732555627823, 0.07063236832618713, 0.2326628416776657, 0.057718440890312195, 0.005228970665484667, 0.12933272123336792, 0.010014788247644901, 0.0034599530044943094, 0.015450170263648033, 0.004393222741782665, 0.010258005000650883, 0.00790967233479023, 0.16524673998355865], [0.024886149913072586, 0.019822845235466957, 0.050577834248542786, 0.042761147022247314, 0.013624369166791439, 0.03171548992395401, 0.03447520360350609, 0.057101696729660034, 0.018126925453543663, 0.012612801045179367, 0.056599393486976624, 0.005686976481229067, 0.022324958816170692, 0.021004129201173782, 0.18438492715358734], [0.012148641981184483, 0.047028496861457825, 0.07792042940855026, 0.1455426812171936, 0.3985011875629425, 0.08270914107561111, 0.0031603944953531027, 0.07123681157827377, 0.020226983353495598, 0.005742877256125212, 0.009367674589157104, 0.007002389058470726, 0.013849785551428795, 0.006732230074703693, 0.14449873566627502], [0.029934342950582504, 0.04287242144346237, 0.10493571311235428, 0.10647397488355637, 0.01039193756878376, 0.1410648375749588, 0.06155749782919884, 0.08983614295721054, 0.05490254610776901, 0.038721270859241486, 0.021267540752887726, 0.05536682903766632, 0.019229264929890633, 0.008436290547251701, 0.15105655789375305], [0.009979508817195892, 0.08308109641075134, 0.026161497458815575, 0.023276647552847862, 0.0017319537000730634, 0.056630972772836685, 0.012614267878234386, 0.041058339178562164, 0.026752248406410217, 0.01169703807681799, 0.011314285919070244, 0.007283498533070087, 0.05053415521979332, 0.019243547692894936, 0.16277745366096497], [0.04712976887822151, 0.24274323880672455, 0.053717970848083496, 0.06948067992925644, 0.009206406772136688, 0.0471884086728096, 0.010105792433023453, 0.05801715701818466, 0.01891178824007511, 0.07684698700904846, 0.07729421555995941, 0.042662668973207474, 0.10241091996431351, 0.038032110780477524, 0.15563422441482544], [0.009955390356481075, 0.06358544528484344, 0.028598172590136528, 0.04170457646250725, 0.01363537646830082, 0.011423949152231216, 0.003101062262430787, 0.04170127958059311, 0.01145926769822836, 0.01274544931948185, 0.020664334297180176, 0.15329574048519135, 0.20515742897987366, 0.07666952162981033, 0.13521607220172882], [0.006747167091816664, 0.006801524665206671, 0.007903891615569592, 0.00237295706756413, 0.0009535709978081286, 0.0006887177005410194, 0.0011137888068333268, 0.0005580680444836617, 0.004365934059023857, 0.0043631866574287415, 0.004836279433220625, 0.0014166004257276654, 0.1882382482290268, 0.04424351081252098, 0.006875277496874332], [0.0040101236663758755, 0.00047035442548803985, 0.0008357138140127063, 0.009736553765833378, 0.00025759977870620787, 2.9679033104912378e-05, 0.008525178767740726, 0.0036214631982147694, 0.0009930779924616218, 0.0008531230851076543, 0.0029921825043857098, 7.93160234024981e-06, 6.746472354279831e-05, 0.0017078705132007599, 0.13162609934806824], [0.021027032285928726, 0.04388788715004921, 0.07337366044521332, 0.13240061700344086, 0.005691900383681059, 0.08179081231355667, 0.010154702700674534, 0.019539857283234596, 0.013572044670581818, 0.03972425311803818, 0.14196330308914185, 0.0491810142993927, 0.029326222836971283, 0.024830663576722145, 0.1775946319103241], [0.020570920780301094, 0.07008225470781326, 0.05771828070282936, 0.10093566030263901, 0.0037175160832703114, 0.10588520765304565, 0.008791210129857063, 0.07720224559307098, 0.037850137799978256, 0.016810759902000427, 0.0763774886727333, 0.06772230565547943, 0.10185997188091278, 0.02133399061858654, 0.1501101702451706], [0.027059482410550117, 0.22707954049110413, 0.13379518687725067, 0.08346803486347198, 0.011664706282317638, 0.1994924694299698, 0.013729198835790157, 0.07924864441156387, 0.10303384810686111, 0.02253318764269352, 0.06352351605892181, 0.13561668992042542, 0.3492315113544464, 0.13069112598896027, 0.12187084555625916], [0.038929592818021774, 0.2334582358598709, 0.12089657783508301, 0.17347271740436554, 0.023068996146321297, 0.04853734001517296, 0.008499456569552422, 0.0867975577712059, 0.02351396717131138, 0.04524386301636696, 0.12492679059505463, 0.06575564295053482, 0.10587428510189056, 0.055128976702690125, 0.1414995789527893], [0.011872883886098862, 0.08469298481941223, 0.054403409361839294, 0.08831894397735596, 0.02684788778424263, 0.021699469536542892, 0.0027920349966734648, 0.05190650746226311, 0.006984782870858908, 0.008844600059092045, 0.02751598134636879, 0.22613400220870972, 0.15431185066699982, 0.06476734578609467, 0.1412026435136795], [0.015115483663976192, 0.08628259599208832, 0.023322032764554024, 0.012461238540709019, 0.0028755213133990765, 0.010226217098534107, 0.0010302395094186068, 0.002081838669255376, 0.003762529231607914, 0.013111302629113197, 0.0290949996560812, 0.013309521600604057, 0.22778895497322083, 0.05992528051137924, 0.00796937569975853], [0.0057023135013878345, 0.0003758604871109128, 0.0009645622340030968, 0.01432577334344387, 0.00027227052487432957, 3.7724938010796905e-05, 0.007459490094333887, 0.0037525389343500137, 0.001061747083440423, 0.0008801367366686463, 0.0023195864632725716, 8.150678695528768e-06, 4.0667833673069254e-05, 0.001007204526104033, 0.12961283326148987], [0.017900969833135605, 0.026770949363708496, 0.15903817117214203, 0.31877970695495605, 0.014844128862023354, 0.10845804959535599, 0.00868347566574812, 0.015460771508514881, 0.008762474171817303, 0.01190071552991867, 0.07999671250581741, 0.053750935941934586, 0.013735906220972538, 0.020958656445145607, 0.15606556832790375], [0.022256335243582726, 0.07135839015245438, 0.07359576225280762, 0.12423767894506454, 0.006224590353667736, 0.13500085473060608, 0.008429165929555893, 0.08156562596559525, 0.02983916364610195, 0.013062523677945137, 0.10225346684455872, 0.04065772891044617, 0.06899033486843109, 0.012502058409154415, 0.13831046223640442], [0.016071150079369545, 0.06728275120258331, 0.025518205016851425, 0.023689931258559227, 0.0069392030127346516, 0.04150809720158577, 0.00898416806012392, 0.016712933778762817, 0.005143268499523401, 0.020111138001084328, 0.03020956739783287, 0.01359627302736044, 0.018198341131210327, 0.01637156493961811, 0.1379418522119522]], [[0.029921628534793854, 0.09876842796802521, 0.1324968934059143, 0.09236511588096619, 0.02831152267754078, 0.08077768236398697, 0.03118293546140194, 0.1750149130821228, 0.015778981149196625, 0.07032441347837448, 0.22269371151924133, 0.07579661160707474, 0.029184984043240547, 0.053061336278915405, 0.18562854826450348], [0.07805982232093811, 0.05365234240889549, 0.2842547595500946, 0.2606758773326874, 0.21293140947818756, 0.02651267871260643, 0.08033362030982971, 0.07913534343242645, 0.17101624608039856, 0.12522375583648682, 0.14315897226333618, 0.16815446317195892, 0.0695369690656662, 0.13316825032234192, 0.19111928343772888], [0.11272483319044113, 0.11636882275342941, 0.45685258507728577, 0.0910579040646553, 0.3091263473033905, 0.12632955610752106, 0.1822080761194229, 0.18498732149600983, 0.6353387832641602, 0.08394157886505127, 0.3285849094390869, 0.4818887710571289, 0.08592816442251205, 0.3495768904685974, 0.07449600845575333], [0.2834128737449646, 0.1102365031838417, 0.1840669959783554, 0.5708534121513367, 0.3157653212547302, 0.041008107364177704, 0.038309745490550995, 0.03211268410086632, 0.6102551817893982, 0.20786605775356293, 0.21116787195205688, 0.10018377006053925, 0.04653669148683548, 0.17929011583328247, 0.11314841359853745], [0.5993789434432983, 0.0908532664179802, 0.49218761920928955, 0.41100576519966125, 0.18825526535511017, 0.4342217445373535, 0.12116678059101105, 0.10673660039901733, 0.822167158126831, 0.4385586380958557, 0.6995345950126648, 0.18085956573486328, 0.1357179582118988, 0.2864921987056732, 0.034255724400281906], [0.858432412147522, 0.34460219740867615, 0.7778953909873962, 0.7743141651153564, 0.4405529797077179, 0.4761039614677429, 0.6155950427055359, 0.06873662024736404, 0.7323919534683228, 0.7086790204048157, 0.6720118522644043, 0.45794978737831116, 0.1628962755203247, 0.4249861538410187, 0.040913816541433334], [0.04546767473220825, 0.0383436344563961, 0.10268200188875198, 0.20100316405296326, 0.185649111866951, 0.08432896435260773, 0.060354892164468765, 0.07717668265104294, 0.3201402723789215, 0.04503992572426796, 0.088813915848732, 0.3990366756916046, 0.1564548909664154, 0.08066049963235855, 0.11440145969390869], [0.21178147196769714, 0.043018583208322525, 0.1065564677119255, 0.10858221352100372, 0.05675008147954941, 0.06700197607278824, 0.12675313651561737, 0.058651700615882874, 0.18508696556091309, 0.05493801832199097, 0.037313126027584076, 0.19010567665100098, 0.07823225855827332, 0.034572359174489975, 0.16783590614795685], [0.053469568490982056, 0.03894811123609543, 0.06651152670383453, 0.10646583139896393, 0.08985435962677002, 0.07578439265489578, 0.03395741805434227, 0.09802807122468948, 0.190333291888237, 0.07748086005449295, 0.07400990277528763, 0.6643930077552795, 0.07830479741096497, 0.07947986572980881, 0.11464671790599823], [0.1680978536605835, 0.06724530458450317, 0.16071708500385284, 0.2987021803855896, 0.11997595429420471, 0.007637033239006996, 0.05953739956021309, 0.06456195563077927, 0.07405640929937363, 0.11493658274412155, 0.07269633561372757, 0.12183233350515366, 0.019239120185375214, 0.0931614562869072, 0.15387272834777832], [0.09433168172836304, 0.05311369523406029, 0.44581180810928345, 0.2857709527015686, 0.11141614615917206, 0.04973546415567398, 0.10592624545097351, 0.0732862576842308, 0.26435965299606323, 0.07302475720643997, 0.17637307941913605, 0.06760746240615845, 0.052111051976680756, 0.29667070508003235, 0.11431443691253662], [0.07687122374773026, 0.10929025709629059, 0.4687592387199402, 0.20397132635116577, 0.26744040846824646, 0.03514130413532257, 0.033296968787908554, 0.08783485740423203, 0.22074763476848602, 0.08713625371456146, 0.12920482456684113, 0.05166565254330635, 0.07679110020399094, 0.17419996857643127, 0.1387287825345993], [0.061203911900520325, 0.12594261765480042, 0.353413462638855, 0.22131817042827606, 0.41015592217445374, 0.11432977020740509, 0.010031531564891338, 0.048355478793382645, 0.27572426199913025, 0.07773520797491074, 0.2322542816400528, 0.1527126431465149, 0.05797232687473297, 0.09810248017311096, 0.16366761922836304], [0.10230414569377899, 0.03857935592532158, 0.05230129137635231, 0.14396332204341888, 0.09251677989959717, 0.03541665896773338, 0.005624003708362579, 0.014271721243858337, 0.042375415563583374, 0.13543996214866638, 0.061749108135700226, 0.00788076315075159, 0.1602918803691864, 0.07564403861761093, 0.09375559538602829], [0.705120861530304, 0.026186510920524597, 0.8528315424919128, 0.8252069354057312, 0.24319231510162354, 0.07270172983407974, 0.09487330913543701, 0.07207771390676498, 0.4722364544868469, 0.7067926526069641, 0.8624283075332642, 0.07399676740169525, 0.0075901346281170845, 0.016478050500154495, 0.12560917437076569], [0.27840110659599304, 0.06363435834646225, 0.3689763844013214, 0.33064448833465576, 0.25749024748802185, 0.1453908383846283, 0.03645810857415199, 0.00836147554218769, 0.3977815508842468, 0.41805213689804077, 0.17756043374538422, 0.05318059027194977, 0.011340576224029064, 0.020938394591212273, 0.05934957042336464], [0.17816129326820374, 0.10609658807516098, 0.17893879115581512, 0.28182876110076904, 0.15060719847679138, 0.03372456133365631, 0.04276707395911217, 0.050946421921253204, 0.04137968271970749, 0.16634012758731842, 0.16395889222621918, 0.24548840522766113, 0.05229371041059494, 0.09448723495006561, 0.12793652713298798], [0.14424489438533783, 0.0705854520201683, 0.24214811623096466, 0.24549053609371185, 0.19939330220222473, 0.02639644220471382, 0.021373553201556206, 0.024115193635225296, 0.08405331522226334, 0.14685925841331482, 0.15661610662937164, 0.06219787895679474, 0.032059792429208755, 0.09036684036254883, 0.15146715939044952], [0.06650430709123611, 0.10705426335334778, 0.3146411180496216, 0.1647443175315857, 0.23945462703704834, 0.035643309354782104, 0.026562364771962166, 0.09605439007282257, 0.19827118515968323, 0.1037423387169838, 0.14283734560012817, 0.08165161311626434, 0.07012972235679626, 0.11072988063097, 0.13417953252792358], [0.06460674107074738, 0.10897383838891983, 0.18354696035385132, 0.20187535881996155, 0.38844820857048035, 0.04722803831100464, 0.010622762143611908, 0.04332485795021057, 0.31279584765434265, 0.11892355233430862, 0.20366235077381134, 0.1460915356874466, 0.041410893201828, 0.060890424996614456, 0.16885291039943695], [0.08445128798484802, 0.07278266549110413, 0.017734743654727936, 0.12906457483768463, 0.17354236543178558, 0.01439378596842289, 0.0032682251185178757, 0.009051240049302578, 0.02403325028717518, 0.17859239876270294, 0.05114053934812546, 0.026160510256886482, 0.17188863456249237, 0.059929899871349335, 0.12745818495750427], [0.6940725445747375, 0.016104217618703842, 0.8427497148513794, 0.8075915575027466, 0.2572270333766937, 0.04667792096734047, 0.07690176367759705, 0.06650352478027344, 0.4641934931278229, 0.7403572797775269, 0.892522931098938, 0.08286882191896439, 0.00509345019236207, 0.009769911877810955, 0.1252693384885788], [0.47638654708862305, 0.08160793781280518, 0.2188907116651535, 0.3983159363269806, 0.3041192293167114, 0.0773146003484726, 0.041229549795389175, 0.00785501953214407, 0.20719125866889954, 0.6323855519294739, 0.1790589690208435, 0.15920953452587128, 0.005728188902139664, 0.011172757484018803, 0.10331764072179794], [0.3162515461444855, 0.12029282748699188, 0.1898643672466278, 0.3138664960861206, 0.22235795855522156, 0.03812789171934128, 0.07994988560676575, 0.07006566971540451, 0.06856126338243484, 0.2470276951789856, 0.2142392098903656, 0.4667101502418518, 0.07071195542812347, 0.09391427785158157, 0.11791101843118668], [0.15722334384918213, 0.11492010205984116, 0.22595097124576569, 0.17283931374549866, 0.11246844381093979, 0.07424511015415192, 0.1308857947587967, 0.1509532928466797, 0.12219540029764175, 0.14498494565486908, 0.13763099908828735, 0.16327989101409912, 0.12245305627584457, 0.21428720653057098, 0.12265608459711075]], [[0.03995227441191673, 0.02612248808145523, 0.09039098769426346, 0.04685363546013832, 0.14171013236045837, 0.3046724796295166, 0.08713044226169586, 0.11726538836956024, 0.3945818245410919, 0.03867875412106514, 0.060879118740558624, 0.3211958110332489, 0.1562168449163437, 0.1954476237297058, 0.12928469479084015], [0.138319730758667, 0.1925395429134369, 0.06914161890745163, 0.1830926090478897, 0.22252067923545837, 0.24239645898342133, 0.2738734483718872, 0.3115195333957672, 0.287569522857666, 0.12556934356689453, 0.047479670494794846, 0.1859251707792282, 0.015966184437274933, 0.050888173282146454, 0.04287213087081909], [0.059622667729854584, 0.19761067628860474, 0.019807182252407074, 0.02911451645195484, 0.11472073942422867, 0.03754669055342674, 0.08183436095714569, 0.09122617542743683, 0.10595303028821945, 0.094895139336586, 0.022252719849348068, 0.087751105427742, 0.015402892604470253, 0.02668953314423561, 0.15029701590538025], [0.4440009295940399, 0.5055950880050659, 0.14072291553020477, 0.20776981115341187, 0.24339812994003296, 0.01946749910712242, 0.1477651447057724, 0.24892206490039825, 0.13990418612957, 0.5277839303016663, 0.22113053500652313, 0.7815175652503967, 0.04741470143198967, 0.31336119771003723, 0.318754643201828], [0.003975332248955965, 0.09357346594333649, 0.000580776366405189, 0.001556370290927589, 0.0040078358724713326, 0.00020105167641304433, 0.005314813926815987, 0.0463886484503746, 0.0025405578780919313, 0.008098164573311806, 0.0004367573419585824, 0.0955028310418129, 0.0013312119990587234, 0.008472515270113945, 0.16612127423286438], [0.00713347876444459, 0.11304348707199097, 0.007166451308876276, 0.017305465415120125, 0.01892760582268238, 0.004294875077903271, 0.013284130021929741, 0.05641845986247063, 0.006293897051364183, 0.008091668598353863, 0.004229044076055288, 0.03852742537856102, 0.036073870956897736, 0.030675750225782394, 0.1423715502023697], [0.112990602850914, 0.20299020409584045, 0.29141831398010254, 0.1917479783296585, 0.25626659393310547, 0.40023526549339294, 0.045914653688669205, 0.05403761938214302, 0.3577503561973572, 0.11164049804210663, 0.20054538547992706, 0.23382915556430817, 0.3541012704372406, 0.39880213141441345, 0.05442150682210922], [0.11769542098045349, 0.22490660846233368, 0.16446754336357117, 0.17726869881153107, 0.24409359693527222, 0.16966795921325684, 0.06426751613616943, 0.1868649125099182, 0.17593497037887573, 0.10732528567314148, 0.1210716962814331, 0.18835949897766113, 0.07820838689804077, 0.12172650545835495, 0.0815061554312706], [0.08801974356174469, 0.2964327037334442, 0.17140379548072815, 0.1086457222700119, 0.1790848970413208, 0.042561717331409454, 0.02568918652832508, 0.12736740708351135, 0.4644424617290497, 0.09952269494533539, 0.1403166949748993, 0.12085206061601639, 0.2499331831932068, 0.14905890822410583, 0.04691213369369507], [0.28339406847953796, 0.25363603234291077, 0.49371209740638733, 0.28714650869369507, 0.42171764373779297, 0.03586414083838463, 0.140908345580101, 0.27345338463783264, 0.06897412985563278, 0.24740128219127655, 0.5061832070350647, 0.4192107915878296, 0.43851029872894287, 0.29079654812812805, 0.10071542859077454], [0.049345988780260086, 0.1473262906074524, 0.10952533781528473, 0.16707968711853027, 0.25493475794792175, 0.03866606950759888, 0.046480532735586166, 0.16288119554519653, 0.06614720076322556, 0.0629507377743721, 0.07218940556049347, 0.3448391556739807, 0.06943795084953308, 0.058807674795389175, 0.135455921292305], [0.05557708069682121, 0.024377070367336273, 0.171014666557312, 0.1548214852809906, 0.21205416321754456, 0.29049578309059143, 0.08155391365289688, 0.2053205668926239, 0.09979691356420517, 0.11640740185976028, 0.23155182600021362, 0.4772811830043793, 0.2134055644273758, 0.3209300637245178, 0.0739695355296135], [0.046621087938547134, 0.02855776995420456, 0.11975010484457016, 0.2049850970506668, 0.16244490444660187, 0.14614170789718628, 0.03785347566008568, 0.2537410259246826, 0.3719625771045685, 0.1159287542104721, 0.23734091222286224, 0.26474830508232117, 0.04938332363963127, 0.17566856741905212, 0.034675102680921555], [0.08535599708557129, 0.01230260543525219, 0.28460273146629333, 0.3323705196380615, 0.13364574313163757, 0.14216013252735138, 0.16550986468791962, 0.36634352803230286, 0.3233327269554138, 0.13755354285240173, 0.6341029405593872, 0.1276889443397522, 0.0818048045039177, 0.2633805274963379, 0.10007897019386292], [0.014263293705880642, 0.07173046469688416, 0.01932992786169052, 0.01909404993057251, 0.16755935549736023, 0.2271488904953003, 0.1093294620513916, 0.14342457056045532, 0.0580194853246212, 0.01671113632619381, 0.03395597264170647, 0.0692841187119484, 0.07175575196743011, 0.04972841590642929, 0.12856654822826385], [0.06590985506772995, 0.1636172980070114, 0.09935098141431808, 0.20126965641975403, 0.4101002812385559, 0.21936923265457153, 0.26084569096565247, 0.3593950569629669, 0.014820259064435959, 0.05201014503836632, 0.03426084294915199, 0.38774317502975464, 0.1401163786649704, 0.3782513439655304, 0.13036324083805084], [0.05128908529877663, 0.11090300232172012, 0.24501535296440125, 0.07115167379379272, 0.3950805068016052, 0.2010982632637024, 0.08927696198225021, 0.2923780679702759, 0.11195118725299835, 0.05971711874008179, 0.14540457725524902, 0.4000069797039032, 0.2374461144208908, 0.47139719128608704, 0.10731440782546997], [0.014083221554756165, 0.029302498325705528, 0.019839908927679062, 0.019802037626504898, 0.11310776323080063, 0.014347831718623638, 0.013065088540315628, 0.0404186025261879, 0.14103254675865173, 0.01056672353297472, 0.02028844505548477, 0.4335528016090393, 0.019943613559007645, 0.08491621166467667, 0.15365199744701385], [0.04251990094780922, 0.025738505646586418, 0.19788101315498352, 0.08900192379951477, 0.20504283905029297, 0.36725619435310364, 0.05852765589952469, 0.12635937333106995, 0.07596885412931442, 0.055006030946969986, 0.1975020170211792, 0.39253395795822144, 0.2602497935295105, 0.3791850209236145, 0.11310473829507828], [0.06150972843170166, 0.049163203686475754, 0.14174170792102814, 0.13322500884532928, 0.16170991957187653, 0.21354396641254425, 0.04667104035615921, 0.26311540603637695, 0.32218027114868164, 0.0809161439538002, 0.18361496925354004, 0.23948682844638824, 0.09133663028478622, 0.25973111391067505, 0.07212682068347931], [0.12382826954126358, 0.035204268991947174, 0.3469122052192688, 0.27821084856987, 0.12485836446285248, 0.1130678728222847, 0.12963837385177612, 0.3451126217842102, 0.16417652368545532, 0.12570835649967194, 0.5000419616699219, 0.09880878776311874, 0.042446259409189224, 0.2635292708873749, 0.16834798455238342], [0.010800065472722054, 0.04851265624165535, 0.01629789173603058, 0.013155121356248856, 0.14412836730480194, 0.10944324731826782, 0.08000180870294571, 0.10409139841794968, 0.054843056946992874, 0.011575616896152496, 0.02017728053033352, 0.044063322246074677, 0.04816943034529686, 0.03936787694692612, 0.1280953288078308], [0.03501533716917038, 0.12365423142910004, 0.058643028140068054, 0.026187611743807793, 0.2106953263282776, 0.09627192467451096, 0.1373300403356552, 0.209503173828125, 0.00544273667037487, 0.010177833028137684, 0.00795654021203518, 0.17826952040195465, 0.06280092895030975, 0.2785777747631073, 0.15446779131889343], [0.055331505835056305, 0.14680130779743195, 0.22850985825061798, 0.040600359439849854, 0.2299574315547943, 0.21366852521896362, 0.10291176289319992, 0.2649042010307312, 0.07482050359249115, 0.04207760840654373, 0.11352740973234177, 0.22353075444698334, 0.2551318407058716, 0.4900997579097748, 0.11985023319721222], [0.04223596677184105, 0.14613933861255646, 0.08112313598394394, 0.04192597419023514, 0.11981905251741409, 0.18680673837661743, 0.07695262134075165, 0.14058402180671692, 0.1875196099281311, 0.05864474177360535, 0.0581248439848423, 0.23554684221744537, 0.21983209252357483, 0.1619952768087387, 0.12595340609550476]], [[0.24939602613449097, 0.0921018123626709, 0.20195554196834564, 0.25931593775749207, 0.24976609647274017, 0.08025927096605301, 0.10602997988462448, 0.08455296605825424, 0.038250602781772614, 0.34039628505706787, 0.2528480887413025, 0.17168891429901123, 0.12038858979940414, 0.16591216623783112, 0.05973837152123451], [0.04881530627608299, 0.07757209986448288, 0.080610491335392, 0.047049663960933685, 0.2744564712047577, 0.18291208148002625, 0.11781244724988937, 0.130965456366539, 0.16412131488323212, 0.049904536455869675, 0.10192018002271652, 0.46385079622268677, 0.23078110814094543, 0.23192283511161804, 0.17445482313632965], [0.11153621971607208, 0.27696484327316284, 0.0350787453353405, 0.011731116101145744, 0.08945441246032715, 0.2750371992588043, 0.07341955602169037, 0.12011690437793732, 0.026965567842125893, 0.023494159802794456, 0.015654105693101883, 0.05704642832279205, 0.11022293567657471, 0.0463077574968338, 0.1307818740606308], [0.06216026097536087, 0.123567596077919, 0.044055916368961334, 0.012494971975684166, 0.045035671442747116, 0.18137943744659424, 0.1501520872116089, 0.0996006652712822, 0.05310875549912453, 0.11289763450622559, 0.05045852065086365, 0.055306825786828995, 0.3424266576766968, 0.1600506752729416, 0.04121629521250725], [0.03470996022224426, 0.38486456871032715, 0.007671448867768049, 0.014272118918597698, 0.01295357197523117, 0.001353065250441432, 0.035229261964559555, 0.10929086059331894, 0.03641098737716675, 0.08741087466478348, 0.01870635710656643, 0.10011491179466248, 0.03142678365111351, 0.12343490868806839, 0.15971165895462036], [0.03053746558725834, 0.24113330245018005, 0.009466315619647503, 0.01980357989668846, 0.04114365205168724, 0.05523357167840004, 0.027042368426918983, 0.10979101061820984, 0.004461985547095537, 0.04689180105924606, 0.04529552906751633, 0.1364448219537735, 0.054305437952280045, 0.06579019129276276, 0.13895106315612793], [0.3289671242237091, 0.3443813920021057, 0.38217487931251526, 0.32642021775245667, 0.12515123188495636, 0.04144418612122536, 0.06740343570709229, 0.024584289640188217, 0.007359183859080076, 0.39375364780426025, 0.38123685121536255, 0.3035361170768738, 0.18788036704063416, 0.13260427117347717, 0.09976762533187866], [0.1711268573999405, 0.1900682896375656, 0.20778892934322357, 0.08847668021917343, 0.39589688181877136, 0.3955995440483093, 0.3348483741283417, 0.11133389919996262, 0.10861264914274216, 0.14033687114715576, 0.26926568150520325, 0.4846358299255371, 0.23405344784259796, 0.4343181252479553, 0.08998383581638336], [0.4154844284057617, 0.4073733687400818, 0.5541329383850098, 0.43809109926223755, 0.11503908038139343, 0.02849700301885605, 0.025097709149122238, 0.014711813069880009, 0.006424109451472759, 0.39197838306427, 0.4694826304912567, 0.17039237916469574, 0.16142874956130981, 0.19919125735759735, 0.054951149970293045], [0.24498042464256287, 0.277620404958725, 0.060333866626024246, 0.030503980815410614, 0.04090564325451851, 0.4659561812877655, 0.2110646367073059, 0.11101182550191879, 0.028219982981681824, 0.10508411377668381, 0.025386929512023926, 0.0648839995265007, 0.13676653802394867, 0.07622335106134415, 0.09164498746395111], [0.4220424294471741, 0.21296784281730652, 0.10483475774526596, 0.11319100856781006, 0.14396990835666656, 0.1309618502855301, 0.13656088709831238, 0.2097199261188507, 0.1397993415594101, 0.263439804315567, 0.10735370218753815, 0.27457332611083984, 0.26051631569862366, 0.18891198933124542, 0.10100831091403961], [0.12607140839099884, 0.08847615122795105, 0.09191321581602097, 0.06030821427702904, 0.21649383008480072, 0.10438336431980133, 0.07331530004739761, 0.1330888420343399, 0.04176999628543854, 0.06727378815412521, 0.06257567554712296, 0.21110908687114716, 0.09018781781196594, 0.09389244765043259, 0.13621515035629272], [0.062066610902547836, 0.07845254987478256, 0.24838510155677795, 0.16541223227977753, 0.16867581009864807, 0.019677892327308655, 0.021460779011249542, 0.018530650064349174, 0.023010587319731712, 0.10349667817354202, 0.16099916398525238, 0.3089703619480133, 0.08426959812641144, 0.16459643840789795, 0.06073381006717682], [0.11642084270715714, 0.11190053075551987, 0.12368596345186234, 0.04549993947148323, 0.3567850887775421, 0.06569506227970123, 0.07286660373210907, 0.03259556367993355, 0.09530685096979141, 0.19273261725902557, 0.06463074684143066, 0.7640278339385986, 0.06371455639600754, 0.1593337506055832, 0.2193848341703415], [0.11034999042749405, 0.03210863843560219, 0.010996339842677116, 0.026450032368302345, 0.051475513726472855, 0.02743532694876194, 0.3610350787639618, 0.20538736879825592, 0.017281753942370415, 0.05300014466047287, 0.012052728794515133, 0.08001075685024261, 0.0069017065688967705, 0.010893179103732109, 0.13085691630840302], [0.07615644484758377, 0.1536630541086197, 0.1253354847431183, 0.048576656728982925, 0.05276811867952347, 0.1611642986536026, 0.12317243963479996, 0.32385867834091187, 0.012925365939736366, 0.0864856168627739, 0.08918802440166473, 0.23886144161224365, 0.20351386070251465, 0.20744860172271729, 0.13318131864070892], [0.051417503505945206, 0.1600690335035324, 0.08639511466026306, 0.02997625432908535, 0.08503448963165283, 0.32695260643959045, 0.06822863221168518, 0.16364485025405884, 0.06138167902827263, 0.07786902785301208, 0.04443247988820076, 0.0585777647793293, 0.1263807862997055, 0.10769001394510269, 0.13808733224868774], [0.1321558654308319, 0.24967153370380402, 0.0761917233467102, 0.044561922550201416, 0.12028387933969498, 0.19908402860164642, 0.04708404839038849, 0.10076720267534256, 0.09921064227819443, 0.18345412611961365, 0.09404058009386063, 0.21650025248527527, 0.11625839024782181, 0.1530369222164154, 0.12011245638132095], [0.10757170617580414, 0.1042957603931427, 0.13590699434280396, 0.06331591308116913, 0.24158470332622528, 0.09161848574876785, 0.0633605495095253, 0.13977625966072083, 0.03925082087516785, 0.07121878862380981, 0.1023484393954277, 0.26378345489501953, 0.10990181565284729, 0.12030858546495438, 0.1261080652475357], [0.06512168049812317, 0.13837532699108124, 0.3250073194503784, 0.16753129661083221, 0.21647527813911438, 0.04118574038147926, 0.03336784988641739, 0.029927842319011688, 0.03334499150514603, 0.08782976865768433, 0.17631417512893677, 0.3171449303627014, 0.10520178824663162, 0.15139654278755188, 0.0914224162697792], [0.06382797658443451, 0.2566763758659363, 0.11056842654943466, 0.028001734986901283, 0.2813059389591217, 0.24806144833564758, 0.07807287573814392, 0.05373501405119896, 0.21183612942695618, 0.09658068418502808, 0.05084875971078873, 0.501965343952179, 0.06208595260977745, 0.10913741588592529, 0.26912179589271545], [0.08548272401094437, 0.017544403672218323, 0.011271107010543346, 0.022962557151913643, 0.05241750180721283, 0.02648325450718403, 0.3057800531387329, 0.19772306084632874, 0.025625178590416908, 0.03652432560920715, 0.006945622619241476, 0.05576859414577484, 0.00584550853818655, 0.008180957287549973, 0.12917736172676086], [0.03209112584590912, 0.1926622986793518, 0.09989916533231735, 0.02044818177819252, 0.04127199947834015, 0.22930434346199036, 0.09912838786840439, 0.3779822289943695, 0.007566491607576609, 0.046152934432029724, 0.04734500125050545, 0.35250937938690186, 0.10047939419746399, 0.16575956344604492, 0.13635975122451782], [0.05301084369421005, 0.1661737710237503, 0.08216799795627594, 0.025789698585867882, 0.07900767773389816, 0.3054123520851135, 0.08738221228122711, 0.17720931768417358, 0.06289011240005493, 0.06967967748641968, 0.05491774156689644, 0.02886299602687359, 0.10253670811653137, 0.09415244311094284, 0.129754438996315], [0.1895110011100769, 0.09308972954750061, 0.1887637972831726, 0.14927715063095093, 0.3653167188167572, 0.1686658412218094, 0.1126369759440422, 0.17013703286647797, 0.0685301423072815, 0.15278968214988708, 0.19327588379383087, 0.18825437128543854, 0.143904447555542, 0.143670454621315, 0.1203024610877037]], [[0.20045556128025055, 0.06346653401851654, 0.1246497705578804, 0.132145956158638, 0.18068760633468628, 0.0611145943403244, 0.3011611998081207, 0.09648064523935318, 0.3848741054534912, 0.20776434242725372, 0.09024091809988022, 0.10095226764678955, 0.05726093426346779, 0.17784324288368225, 0.06983170658349991], [0.06639314442873001, 0.03837187588214874, 0.306266725063324, 0.09758531302213669, 0.10875808447599411, 0.20901371538639069, 0.0894559919834137, 0.21620051562786102, 0.13805773854255676, 0.07912127673625946, 0.3521624505519867, 0.036526914685964584, 0.1551785171031952, 0.14622288942337036, 0.19236178696155548], [0.03379146009683609, 0.11666905134916306, 0.02791847102344036, 0.04754703491926193, 0.02039634808897972, 0.23185299336910248, 0.07985613495111465, 0.3240954875946045, 0.04561735317111015, 0.061520081013441086, 0.18156962096691132, 0.10860903561115265, 0.3409081995487213, 0.3218340575695038, 0.13103368878364563], [0.06278766691684723, 0.001863734913058579, 0.30563783645629883, 0.056017640978097916, 0.245498925447464, 0.11060530692338943, 0.09064232558012009, 0.004372697789222002, 0.007118886336684227, 0.06251134723424911, 0.17941752076148987, 0.004394095856696367, 0.11450538039207458, 0.046043287962675095, 0.021101655438542366], [0.11553236097097397, 0.0885467380285263, 0.2750205993652344, 0.21104735136032104, 0.3459762930870056, 0.07976578176021576, 0.218110129237175, 0.05760955810546875, 0.09680842608213425, 0.2662138342857361, 0.21090076863765717, 0.41520535945892334, 0.21548694372177124, 0.2248467653989792, 0.10481394827365875], [0.03112325258553028, 0.08175794035196304, 0.035110849887132645, 0.038375336676836014, 0.2468937784433365, 0.060934457927942276, 0.0843387246131897, 0.03423367813229561, 0.02026834897696972, 0.07970783859491348, 0.08959806710481644, 0.1693299561738968, 0.16057033836841583, 0.21660663187503815, 0.13329552114009857], [0.09539461880922318, 0.058681365102529526, 0.01674766093492508, 0.02866855263710022, 0.012030106969177723, 0.21465063095092773, 0.034089475870132446, 0.04479566961526871, 0.014019637368619442, 0.035355255007743835, 0.1569557934999466, 0.01038492750376463, 0.06631091982126236, 0.1547483503818512, 0.19284123182296753], [0.04954487085342407, 0.07065968960523605, 0.07275094836950302, 0.040997497737407684, 0.07946129143238068, 0.17300859093666077, 0.03222974017262459, 0.02469809167087078, 0.18557047843933105, 0.13542628288269043, 0.26776814460754395, 0.056715987622737885, 0.15973475575447083, 0.19029632210731506, 0.17610958218574524], [0.047577280551195145, 0.02606579288840294, 0.0165295097976923, 0.04137043654918671, 0.013305035419762135, 0.32835593819618225, 0.026565413922071457, 0.06772360950708389, 0.010228256694972515, 0.041277337819337845, 0.1336892545223236, 0.008326719515025616, 0.10322394222021103, 0.1976388841867447, 0.21077491343021393], [0.043893925845623016, 0.021177353337407112, 0.028366681188344955, 0.07016126066446304, 0.07573862373828888, 0.22699910402297974, 0.055615294724702835, 0.07980518788099289, 0.009269739501178265, 0.09460800141096115, 0.16427507996559143, 0.20832805335521698, 0.1427353024482727, 0.2680304944515228, 0.13907650113105774], [0.03411688283085823, 0.056632235646247864, 0.07365043461322784, 0.10934542864561081, 0.09185239672660828, 0.5077250003814697, 0.05141168087720871, 0.047258101403713226, 0.053326722234487534, 0.13365329802036285, 0.28296661376953125, 0.041020717471838, 0.08861301094293594, 0.13371184468269348, 0.11519401520490646], [0.04096442833542824, 0.07374820858240128, 0.07300861179828644, 0.10121195018291473, 0.051522452384233475, 0.3508135676383972, 0.03948133811354637, 0.047985587269067764, 0.06340529769659042, 0.06765846908092499, 0.281475692987442, 0.05536516010761261, 0.1822110116481781, 0.22272904217243195, 0.13150985538959503], [0.07982534170150757, 0.06016559898853302, 0.03820561617612839, 0.02410227432847023, 0.006901262793689966, 0.42442968487739563, 0.02364957146346569, 0.07835549116134644, 0.027230771258473396, 0.12123586237430573, 0.15446297824382782, 0.018115278333425522, 0.21087171137332916, 0.29417684674263, 0.08362340182065964], [0.05696694925427437, 0.014171368442475796, 0.06200120970606804, 0.021368764340877533, 0.012162269093096256, 0.0841592326760292, 0.03827953711152077, 0.07895056158304214, 0.01159723848104477, 0.05937046930193901, 0.023348387330770493, 0.008824712596833706, 0.13521961867809296, 0.23698511719703674, 0.03196632117033005], [0.11678174138069153, 0.8205142617225647, 0.01038320455700159, 0.023903295397758484, 0.21764065325260162, 0.2580764889717102, 0.20165181159973145, 0.2900886535644531, 0.03504627197980881, 0.10256802290678024, 0.03713424876332283, 0.7063723206520081, 0.8779962062835693, 0.8367014527320862, 0.0919082760810852], [0.038494985550642014, 0.05109047889709473, 0.07501792907714844, 0.04001014679670334, 0.021166233345866203, 0.03079657442867756, 0.01494709774851799, 0.010983827523887157, 0.0029027159325778484, 0.0995086133480072, 0.350593626499176, 0.02021479234099388, 0.34575650095939636, 0.21952421963214874, 0.05450797453522682], [0.028108511120080948, 0.08174566179513931, 0.03328564018011093, 0.03230520337820053, 0.012646276503801346, 0.1872790902853012, 0.025206655263900757, 0.06737280637025833, 0.033121660351753235, 0.08641302585601807, 0.2848047614097595, 0.059273794293403625, 0.18425194919109344, 0.15244826674461365, 0.1352420449256897], [0.07509021461009979, 0.05027765780687332, 0.23718997836112976, 0.11438266932964325, 0.11051909625530243, 0.431958943605423, 0.046987809240818024, 0.021854011341929436, 0.15366314351558685, 0.1928708851337433, 0.2900879681110382, 0.052021902054548264, 0.11538787186145782, 0.25173547863960266, 0.10233873873949051], [0.03257948160171509, 0.08023553341627121, 0.06238585337996483, 0.06856023520231247, 0.02927098423242569, 0.2968010902404785, 0.03317389637231827, 0.04758336395025253, 0.07943073660135269, 0.053982626646757126, 0.21416282653808594, 0.05025764927268028, 0.14347779750823975, 0.19969123601913452, 0.13921964168548584], [0.07817428559064865, 0.11046875268220901, 0.040724072605371475, 0.024797527119517326, 0.004808576311916113, 0.5141928791999817, 0.024754824116826057, 0.080713652074337, 0.03179122135043144, 0.12244449555873871, 0.22665926814079285, 0.013305582106113434, 0.23485711216926575, 0.323343425989151, 0.10171245783567429], [0.03765244409441948, 0.0463164821267128, 0.06456112116575241, 0.05319739878177643, 0.010156691074371338, 0.1155625581741333, 0.02458079345524311, 0.07648347318172455, 0.019683409482240677, 0.06488858163356781, 0.09342794120311737, 0.059032924473285675, 0.15581923723220825, 0.2894386053085327, 0.04157077521085739], [0.14924734830856323, 0.8862696886062622, 0.013125438243150711, 0.033269379287958145, 0.22599543631076813, 0.33975404500961304, 0.25561264157295227, 0.36481109261512756, 0.05327271297574043, 0.09902165085077286, 0.03598061203956604, 0.754990816116333, 0.9104278087615967, 0.8631682395935059, 0.10125402361154556], [0.03672042489051819, 0.12888115644454956, 0.1578092873096466, 0.056865133345127106, 0.03288109228014946, 0.1379515379667282, 0.021150214597582817, 0.013284055516123772, 0.003249341854825616, 0.08646353334188461, 0.5471532940864563, 0.0361909456551075, 0.5093809366226196, 0.39931434392929077, 0.07520455867052078], [0.03492635861039162, 0.09938696771860123, 0.028945090249180794, 0.03084651380777359, 0.012707062065601349, 0.15071596205234528, 0.029011720791459084, 0.05455483868718147, 0.03256314992904663, 0.07100401073694229, 0.2587825059890747, 0.05546442046761513, 0.17298617959022522, 0.15517692267894745, 0.13362783193588257], [0.050736088305711746, 0.10139954090118408, 0.08949553966522217, 0.0938185378909111, 0.06053004041314125, 0.18139560520648956, 0.0767659917473793, 0.11340610682964325, 0.19499026238918304, 0.11419404298067093, 0.23666803538799286, 0.05730360746383667, 0.07293370366096497, 0.11558260023593903, 0.12613430619239807]], [[0.1489560306072235, 0.2212677150964737, 0.055408962070941925, 0.03110104240477085, 0.02513720653951168, 0.07830048352479935, 0.05067736655473709, 0.06611648201942444, 0.02238955721259117, 0.03719142824411392, 0.025896798819303513, 0.04350690543651581, 0.11618120968341827, 0.08714473247528076, 0.15466241538524628], [0.002932992298156023, 0.307859867811203, 0.008187332190573215, 0.003677746979519725, 0.0005738585605286062, 0.0008406178676523268, 0.0005446207360364497, 0.00039283244404941797, 0.0009221792570315301, 0.000758469570428133, 0.003933709114789963, 0.0009352274937555194, 0.001059120986610651, 0.0020118390675634146, 0.010183396749198437], [0.37297555804252625, 0.09208715707063675, 0.16802547872066498, 0.11860792338848114, 0.08042033761739731, 0.18612971901893616, 0.45423436164855957, 0.07133221626281738, 0.13892753422260284, 0.3810507357120514, 0.291797935962677, 0.16154640913009644, 0.050885219126939774, 0.10468144714832306, 0.10335776954889297], [0.028274476528167725, 0.018124615773558617, 0.13954800367355347, 0.03560209274291992, 0.08428613841533661, 0.17491763830184937, 0.13035845756530762, 0.0214189775288105, 0.009060325101017952, 0.012400318868458271, 0.031279344111680984, 0.011209131218492985, 0.19533281028270721, 0.012452301569283009, 0.020085560157895088], [0.11180772632360458, 0.012462746351957321, 0.04844700172543526, 0.06198285147547722, 0.06685204058885574, 0.44600817561149597, 0.30352795124053955, 0.1519387811422348, 0.003835479263216257, 0.08384031802415848, 0.027865614742040634, 0.159846231341362, 0.46423590183258057, 0.09249147027730942, 0.09178084880113602], [0.04840230569243431, 0.026793736964464188, 0.1120820939540863, 0.09037120640277863, 0.2328549474477768, 0.1063276007771492, 0.14073747396469116, 0.19612964987754822, 0.1904316544532776, 0.10354755818843842, 0.10268037766218185, 0.13820117712020874, 0.3374333083629608, 0.15443934500217438, 0.12536528706550598], [0.36786824464797974, 0.056283749639987946, 0.03846094757318497, 0.07181648164987564, 0.03666122257709503, 0.04024837538599968, 0.5659748911857605, 0.2338860183954239, 0.11518415063619614, 0.3659259080886841, 0.04107162728905678, 0.012827688828110695, 0.0609581284224987, 0.02837788313627243, 0.060403015464544296], [0.0033490851055830717, 0.001678164815530181, 0.02563566155731678, 0.028815647587180138, 0.007257265504449606, 0.04370535537600517, 0.026118090376257896, 0.435838907957077, 0.005564961116760969, 0.014266176149249077, 0.018343305215239525, 0.0009297388605773449, 0.03809681162238121, 0.020595146343111992, 0.03566184639930725], [0.34718528389930725, 0.028826624155044556, 0.05378839746117592, 0.0680842474102974, 0.0254778191447258, 0.1994519978761673, 0.7739751935005188, 0.28213825821876526, 0.24756361544132233, 0.3363908529281616, 0.08445209264755249, 0.0067241075448691845, 0.09118638187646866, 0.04656682163476944, 0.0331079363822937], [0.06212884560227394, 0.013463910669088364, 0.024143628776073456, 0.025745615363121033, 0.12165382504463196, 0.04105379059910774, 0.21918880939483643, 0.12444313615560532, 0.7241542935371399, 0.2624671459197998, 0.05330171436071396, 0.026902005076408386, 0.04947282373905182, 0.06268218904733658, 0.04105047509074211], [0.23139908909797668, 0.12510670721530914, 0.062008026987314224, 0.06357982009649277, 0.21447335183620453, 0.06672460585832596, 0.5059712529182434, 0.23151132464408875, 0.3211345672607422, 0.29274967312812805, 0.07394816726446152, 0.12323616445064545, 0.33240705728530884, 0.13292434811592102, 0.0974365845322609], [0.3976813554763794, 0.24336650967597961, 0.030069073662161827, 0.04866141080856323, 0.061815883964300156, 0.023062149062752724, 0.2837987542152405, 0.10572359710931778, 0.42220908403396606, 0.47088485956192017, 0.06114182993769646, 0.05295940861105919, 0.04274435341358185, 0.033208493143320084, 0.07069624215364456], [0.6213744282722473, 0.08501708507537842, 0.08457361906766891, 0.0819045826792717, 0.02008524350821972, 0.02321169711649418, 0.5481746196746826, 0.17061969637870789, 0.19314314424991608, 0.48946020007133484, 0.08799289166927338, 0.009451461024582386, 0.1643926501274109, 0.03458939492702484, 0.0487554594874382], [0.11498570442199707, 0.014700047671794891, 0.04425002261996269, 0.027370423078536987, 0.031341005116701126, 0.11119254678487778, 0.2834031581878662, 0.24822625517845154, 0.387948602437973, 0.17188440263271332, 0.026020031422376633, 0.003112945705652237, 0.1680845320224762, 0.013143973425030708, 0.05647796019911766], [0.00710845272988081, 0.009718026034533978, 0.08296849578619003, 0.05356726795434952, 0.20372402667999268, 0.20898059010505676, 0.07373131066560745, 0.07588774710893631, 0.33318811655044556, 0.09730548411607742, 0.031877510249614716, 0.04629351943731308, 0.026428943499922752, 0.05165233090519905, 0.12934288382530212], [0.092291921377182, 0.13057716190814972, 0.11971572786569595, 0.09643372148275375, 0.0971774011850357, 0.03882397338747978, 0.30341219902038574, 0.06688009947538376, 0.5493715405464172, 0.21897412836551666, 0.10454282909631729, 0.09917838126420975, 0.19730664789676666, 0.0889393612742424, 0.0462181456387043], [0.3365032970905304, 0.06134270504117012, 0.11965256929397583, 0.08703643828630447, 0.08615697175264359, 0.01610170491039753, 0.289604127407074, 0.16905160248279572, 0.690265953540802, 0.5125291347503662, 0.11020015180110931, 0.05034353584051132, 0.04973014071583748, 0.04155145213007927, 0.06180096045136452], [0.25151577591896057, 0.0737723708152771, 0.11452356725931168, 0.07270905375480652, 0.27380475401878357, 0.046423640102148056, 0.6668940782546997, 0.60158771276474, 0.286392480134964, 0.2904633581638336, 0.07359147071838379, 0.040276750922203064, 0.2706137001514435, 0.15532110631465912, 0.051646988838911057], [0.4344438314437866, 0.2159019559621811, 0.0411386713385582, 0.059745997190475464, 0.08364511281251907, 0.02960371784865856, 0.3908357322216034, 0.17347759008407593, 0.4736940562725067, 0.5831181406974792, 0.08143209666013718, 0.05496616289019585, 0.0508774034678936, 0.03704635798931122, 0.07529113441705704], [0.6010525822639465, 0.07716702669858932, 0.12942874431610107, 0.11651009321212769, 0.029510293155908585, 0.025635747238993645, 0.564699649810791, 0.20346374809741974, 0.1942133754491806, 0.5329980254173279, 0.09726559370756149, 0.006782675161957741, 0.1884276419878006, 0.02957840822637081, 0.046941183507442474], [0.07098641246557236, 0.02088714949786663, 0.0536419078707695, 0.04874833673238754, 0.1357380896806717, 0.10192368179559708, 0.22615019977092743, 0.3848302960395813, 0.3569928705692291, 0.19976821541786194, 0.030237246304750443, 0.012232640758156776, 0.14491091668605804, 0.01217038556933403, 0.025625383481383324], [0.007031308952718973, 0.007269172929227352, 0.08423776179552078, 0.053896792232990265, 0.21268267929553986, 0.2456619292497635, 0.0817742720246315, 0.07338020205497742, 0.2872445285320282, 0.08955906331539154, 0.02503780461847782, 0.043076977133750916, 0.024157537147402763, 0.05127491056919098, 0.1281031221151352], [0.06564409285783768, 0.10634885728359222, 0.14713656902313232, 0.07514703273773193, 0.3204736113548279, 0.07143916934728622, 0.4829144775867462, 0.2612879276275635, 0.7603816986083984, 0.17889906466007233, 0.07189968973398209, 0.10938191413879395, 0.2776612341403961, 0.08681799471378326, 0.052979547530412674], [0.28806957602500916, 0.05887402966618538, 0.12616868317127228, 0.10481040924787521, 0.19247829914093018, 0.033351678401231766, 0.39873749017715454, 0.22540906071662903, 0.7029480338096619, 0.5013188719749451, 0.10523373633623123, 0.08320688456296921, 0.0816955640912056, 0.04881281033158302, 0.09282685816287994], [0.2559513747692108, 0.07615252584218979, 0.11904845386743546, 0.07934627681970596, 0.09980516135692596, 0.14371442794799805, 0.3059750497341156, 0.09035829454660416, 0.22693291306495667, 0.32864776253700256, 0.08986205607652664, 0.1614997386932373, 0.17624114453792572, 0.16325940191745758, 0.119119793176651]]]], \"bot_text\": [\"Das_\", \"Tier\", \"_\", \"\\u00fcber\", \"quer\", \"te_\", \"die_\", \"Stra\\u00dfe_\", \"nicht_\", \", _\", \"weil_\", \"es_\", \"zu_\", \"m\\u00fc\", \"de_\", \"war_\", \", _\", \"weil_\", \"es_\", \"zu_\", \"m\\u00fc\", \"de_\", \"war_\", \"._\"]}, \"all\": {\"top_text\": [\"The_\", \"animal_\", \"didn_\", \"'_\", \"t_\", \"cross_\", \"the_\", \"street_\", \"because_\", \"it_\", \"was_\", \"too_\", \"tire\", \"d_\", \"Das_\", \"Tier\", \"_\", \"\\u00fcber\", \"quer\", \"te_\", \"die_\", \"Stra\\u00dfe_\", \"nicht_\", \", _\", \"weil_\", \"es_\", \"zu_\", \"m\\u00fc\", \"de_\", \"war_\", \", _\", \"weil_\", \"es_\", \"zu_\", \"m\\u00fc\", \"de_\", \"war_\", \"._\"], \"att\": [[[[0.04540494084358215, 0.009098929353058338, 0.06841860711574554, 0.050027038902044296, 0.1867244392633438, 0.20893266797065735, 0.15536439418792725, 0.2501838803291321, 0.03253718465566635, 0.045193806290626526, 0.01405471283942461, 0.15126678347587585, 0.5554144382476807, 0.07120772451162338, 0.21479088068008423, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.010880604386329651, 0.008569094352424145, 0.3644530475139618, 0.032524824142456055, 0.15862980484962463, 0.2895345985889435, 0.007411073427647352, 0.03074379824101925, 0.23678991198539734, 0.04092710092663765, 0.21633881330490112, 0.10217994451522827, 0.5741018652915955, 0.08794906735420227, 0.15811748802661896, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1548197716474533, 0.04407857358455658, 0.04267416149377823, 0.14390510320663452, 0.39150071144104004, 0.10470721870660782, 0.21010224521160126, 0.37398451566696167, 0.24677534401416779, 0.3071460425853729, 0.12511251866817474, 0.37053829431533813, 0.34731435775756836, 0.21468856930732727, 0.22426171600818634, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01666487753391266, 0.070415198802948, 0.13558338582515717, 0.030082950368523598, 0.17114414274692535, 0.20995233952999115, 0.018852930516004562, 0.2688913345336914, 0.024380644783377647, 0.01614876091480255, 0.058318838477134705, 0.003357462352141738, 0.22233186662197113, 0.08606056123971939, 0.08522026240825653, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.26702794432640076, 0.10013092309236526, 0.15535299479961395, 0.01822819747030735, 0.19259323179721832, 0.1620739996433258, 0.06925511360168457, 0.14121465384960175, 0.30160874128341675, 0.138941690325737, 0.14571446180343628, 0.1845642775297165, 0.3172887861728668, 0.1378965824842453, 0.15321676433086395, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.05774107202887535, 0.08979255706071854, 0.15777261555194855, 0.0986839085817337, 0.04042482376098633, 0.02364284358918667, 0.006265458185225725, 0.20312650501728058, 0.04589210823178291, 0.2705432176589966, 0.29482388496398926, 0.25277185440063477, 0.21941334009170532, 0.09023746848106384, 0.12374064326286316, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.10808208584785461, 0.08377770334482193, 0.3031982481479645, 0.08575166761875153, 0.1659224033355713, 0.02410510927438736, 0.024052061140537262, 0.06346622854471207, 0.012278172187507153, 0.033475130796432495, 0.02865537814795971, 0.2309909611940384, 0.5272806286811829, 0.058207638561725616, 0.12589795887470245, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2848440408706665, 0.04557379335165024, 0.07043055444955826, 0.13887976109981537, 0.25104182958602905, 0.08729252219200134, 0.03900376707315445, 0.06159999966621399, 0.07028467953205109, 0.1360185593366623, 0.12163159996271133, 0.4339398145675659, 0.18035274744033813, 0.13636742532253265, 0.35040098428726196, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03364454582333565, 0.06385143101215363, 0.4650610089302063, 0.13847006857395172, 0.12132523953914642, 0.23606915771961212, 0.02828356996178627, 0.17786316573619843, 0.0068073878064751625, 0.0032905752304941416, 0.04716186597943306, 0.060036350041627884, 0.5867005586624146, 0.23594366014003754, 0.05739189311861992, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04961356148123741, 0.4571499228477478, 0.32633671164512634, 0.044803813099861145, 0.12193554639816284, 0.15620054304599762, 0.031114954501390457, 0.37925899028778076, 0.023853085935115814, 0.007363635115325451, 0.0625552162528038, 0.04359081760048866, 0.12771400809288025, 0.10945692658424377, 0.03218715265393257, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.054336514323949814, 0.12682472169399261, 0.28572455048561096, 0.7098703384399414, 0.04356186464428902, 0.036012813448905945, 0.12616953253746033, 0.12438997626304626, 0.06097114831209183, 0.011340769939124584, 0.00453603221103549, 0.02511424943804741, 0.15918391942977905, 0.004009802360087633, 0.1337292641401291, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.029656492173671722, 0.11861541867256165, 0.25968441367149353, 0.6952800154685974, 0.06073199212551117, 0.3734285235404968, 0.030824951827526093, 0.09641394764184952, 0.0529148206114769, 0.01715172454714775, 0.01323915645480156, 0.055627286434173584, 0.11593649536371231, 0.04441850632429123, 0.04630020260810852, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.10554661601781845, 0.6362442970275879, 0.6959939002990723, 0.018170323222875595, 0.40134888887405396, 0.15823723375797272, 0.1629355400800705, 0.11358990520238876, 0.24731940031051636, 0.23558683693408966, 0.07505767047405243, 0.03725680336356163, 0.014009351842105389, 0.03713200241327286, 0.09585387259721756, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.4055319130420685, 0.2534714341163635, 0.44874629378318787, 0.14194901287555695, 0.3008168041706085, 0.20029903948307037, 0.07248799502849579, 0.26174047589302063, 0.1826024055480957, 0.0982341319322586, 0.09884719550609589, 0.22728654742240906, 0.04277953878045082, 0.06280668079853058, 0.09454112499952316, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.025013893842697144, 0.013348683714866638, 0.22353146970272064, 0.0037027201615273952, 0.14888618886470795, 0.22346094250679016, 0.021921563893556595, 0.6342950463294983, 0.03356323391199112, 0.06236502528190613, 0.03522828221321106, 0.17797930538654327, 0.04731723666191101, 0.06786928325891495, 0.042550042271614075, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01107952743768692, 0.002038179198279977, 0.02572617679834366, 0.043437324464321136, 0.026865433901548386, 0.008821134455502033, 0.05896050110459328, 0.006038360297679901, 0.05802087485790253, 0.05262080207467079, 0.021981995552778244, 0.01655607670545578, 0.007265332620590925, 0.017941446974873543, 0.19668635725975037, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4201550781726837, 0.0003083523770328611, 0.003427971852943301, 0.027074502781033516, 0.0025770263746380806, 0.0006525526405312121, 0.0672224909067154, 0.0006329934694804251, 0.002376251621171832, 0.007315297145396471, 0.0018543159822002053, 0.0002170451043639332, 5.486799182108371e-06, 8.465739665552974e-05, 0.018722370266914368, 0.33067038655281067, 0.02820705994963646, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [6.826388562330976e-05, 0.41254693269729614, 8.318798791151494e-05, 0.00021303755056578666, 2.6623651137924753e-05, 1.3030116861045826e-06, 3.3524677292007254e-06, 9.95700816019962e-07, 0.00025696202646940947, 0.00021154701244086027, 4.0387480112258345e-05, 7.382633339148015e-05, 0.0001871670683613047, 0.0001393109851051122, 0.00044668230111710727, 0.43891066312789917, 0.3106566071510315, 0.006947982590645552, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0012913167010992765, 0.46178945899009705, 0.0011929792817682028, 0.0014885100536048412, 0.001382660586386919, 0.00010778238356579095, 4.841455302084796e-05, 4.8626650823280215e-05, 0.0007912410655990243, 0.0019299217965453863, 0.0002972490037791431, 0.0004315593687351793, 0.013707359321415424, 0.0025058358442038298, 0.00208207662217319, 0.8740342259407043, 0.6547167897224426, 0.0062981778755784035, 0.46666401624679565, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0008573953527957201, 5.803010481031379e-06, 0.0034995940513908863, 0.007113253697752953, 4.1040249925572425e-05, 0.48505696654319763, 0.0009781911503523588, 2.57480514846975e-05, 0.0006811833591200411, 0.011991027742624283, 0.013829604722559452, 0.02649468183517456, 0.018967876210808754, 0.008940043859183788, 0.0023627132177352905, 0.009682492353022099, 0.17458303272724152, 0.7120969891548157, 0.10496775060892105, 0.0038010317366570234, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [3.2793446735013276e-05, 4.91645641886862e-06, 0.0003670089063234627, 0.0005689052632078528, 0.0004337447171565145, 0.6979628205299377, 0.00025133590679615736, 1.3211038094596006e-05, 0.001040837960317731, 0.0008422345272265375, 0.00011131400242447853, 0.0007033413276076317, 0.00044049491407349706, 0.0004404923238325864, 0.00032976132933981717, 0.31054121255874634, 0.41146165132522583, 0.4573209881782532, 0.639615535736084, 0.038498248904943466, 0.06232544779777527, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.002877118531614542, 0.0015123215271160007, 0.21683953702449799, 0.042356427758932114, 0.09360139071941376, 0.7325531840324402, 0.007687804754823446, 0.0004983373219147325, 0.0008397439960390329, 0.018263472244143486, 0.01633409783244133, 0.06572946161031723, 0.029279880225658417, 0.13710656762123108, 0.013406738638877869, 0.2996446192264557, 0.18095439672470093, 0.8072441220283508, 0.6008384227752686, 0.045412980020046234, 0.09029265493154526, 0.15878555178642273, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09384340792894363, 0.002295592101290822, 0.05245966836810112, 0.10398446023464203, 0.13232196867465973, 0.2621823251247406, 0.7299563884735107, 0.01621837355196476, 0.008298774249851704, 0.019108427688479424, 0.013038183562457561, 0.008606976829469204, 0.0014156820252537727, 0.008462491445243359, 0.08448491245508194, 0.07671086490154266, 0.13175785541534424, 0.032809216529130936, 0.06887537240982056, 0.32570284605026245, 0.22846734523773193, 0.06983717530965805, 0.07415641844272614, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [7.994164479896426e-05, 9.660106115916278e-06, 1.3390360436460469e-05, 0.0009496311540715396, 7.498388185922522e-06, 0.0023292596451938152, 0.0033705621026456356, 0.45610299706459045, 0.00048403104301542044, 0.0003956609289161861, 6.013430538587272e-05, 1.5610943592037074e-05, 4.899038231087616e-06, 1.0044974260381423e-05, 0.0011326958192512393, 0.4443431496620178, 0.2924090623855591, 0.09237049520015717, 0.07077033072710037, 0.05661908909678459, 0.1886560618877411, 0.5792031288146973, 0.23326165974140167, 0.024399278685450554, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0021254755556583405, 0.025354469195008278, 0.0505821667611599, 0.04718977212905884, 0.3544465899467468, 0.27984359860420227, 0.10468283295631409, 0.03827415779232979, 0.0065247067250311375, 0.003615353489294648, 0.001024437602609396, 0.02404061146080494, 0.00031744904117658734, 0.011979974806308746, 0.06911104917526245, 0.0045473226346075535, 0.015263181179761887, 0.11153102666139603, 0.01091472152620554, 0.07137833535671234, 0.14599360525608063, 0.24649137258529663, 0.2676219940185547, 0.14942915737628937, 0.03359955921769142, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06793052703142166, 0.04423084855079651, 0.009074175730347633, 0.010606715455651283, 0.023761747404932976, 0.06765440851449966, 0.048715878278017044, 0.13498826324939728, 0.15846557915210724, 0.01835249364376068, 0.0033974519465118647, 0.011923078447580338, 0.0035463334061205387, 0.036997705698013306, 0.15195232629776, 0.0021246292162686586, 0.019146723672747612, 0.0190261360257864, 0.004887872841209173, 0.032842181622982025, 0.009469296783208847, 0.015122202225029469, 0.056959331035614014, 0.014146327041089535, 0.2864534854888916, 0.028167642652988434, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00013637961819767952, 0.00010623007256072015, 0.00015417735266964883, 0.00014589299098588526, 0.0007127521676011384, 0.0008950252668000758, 0.00038585966103710234, 0.002901369472965598, 0.34460243582725525, 0.00040915730642154813, 0.00017379666678607464, 9.334777860203758e-05, 0.0002283527428517118, 0.0001650981866987422, 0.0021401161793619394, 0.007321672048419714, 0.06949152052402496, 0.18409577012062073, 0.05168240889906883, 0.5332358479499817, 0.12983477115631104, 0.020923368632793427, 0.015086837112903595, 0.05491120368242264, 0.38865622878074646, 0.036598365753889084, 0.02645716816186905, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03951041400432587, 0.015644539147615433, 0.002765331417322159, 0.020979223772883415, 0.001914863707497716, 0.049360573291778564, 0.010446744039654732, 0.06006397679448128, 0.18512527644634247, 0.5769777894020081, 0.07455664873123169, 0.016840822994709015, 0.21517987549304962, 0.030672460794448853, 0.04319411888718605, 0.004608431365340948, 0.07759333401918411, 0.05611182749271393, 0.031112710013985634, 0.06043193116784096, 0.023203425109386444, 0.01299421489238739, 0.011212858371436596, 0.2615091800689697, 0.5089370608329773, 0.22289350628852844, 0.10276756435632706, 0.03959360718727112, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0012064727488905191, 0.0013226938899606466, 0.002064700936898589, 0.008003294467926025, 0.002116014016792178, 0.0028530799318104982, 0.006337625440210104, 0.0002913604548666626, 0.0004794643900822848, 0.0026383439544588327, 0.0038926906418055296, 0.3737375736236572, 0.002772320294752717, 0.007620541378855705, 0.003997606225311756, 0.012221934273838997, 0.040381401777267456, 0.0694599524140358, 0.0800129845738411, 0.023234205320477486, 0.003881127340719104, 0.03062801994383335, 0.024260450154542923, 0.012832778505980968, 0.01656900905072689, 0.2333584874868393, 0.3572527766227722, 0.0072386497631669044, 0.014752739109098911, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [1.0432314411445986e-05, 4.745730166177964e-06, 1.672162215982098e-05, 2.360623693675734e-05, 4.496370820561424e-06, 1.767691173881758e-06, 4.21794857174973e-06, 1.7029789205480483e-06, 2.8430429665604606e-05, 7.409282261505723e-05, 0.00010478614422027022, 0.00017224416660610586, 0.480630487203598, 0.017292670905590057, 3.8113743357826024e-05, 0.09144259989261627, 0.1256924569606781, 0.6557105779647827, 0.1641494482755661, 0.04417502135038376, 0.42902442812919617, 0.377028226852417, 0.1956152766942978, 0.27481555938720703, 0.37677863240242004, 0.4323487877845764, 0.6219720244407654, 0.3997260332107544, 0.1145903542637825, 0.041462015360593796, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00031966043752618134, 7.799067680025473e-05, 0.0005293181748129427, 0.0002383182873018086, 6.09634407737758e-05, 1.622732997930143e-05, 0.0001254813396371901, 4.548055585473776e-05, 0.0002202334435423836, 0.0014038329245522618, 0.008373874239623547, 0.0005300238262861967, 0.8584288358688354, 0.0721927285194397, 0.0012385909212753177, 0.5997433662414551, 0.1045081838965416, 0.10960735380649567, 0.047688476741313934, 0.31575047969818115, 0.1532202959060669, 0.4197675585746765, 0.16546213626861572, 0.31973955035209656, 0.23332525789737701, 0.15541672706604004, 0.05988143011927605, 0.5733460187911987, 0.8565582036972046, 0.009604076854884624, 0.030047349631786346, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.008336205966770649, 0.000929497298784554, 0.060522519052028656, 0.02858084999024868, 0.004865946713835001, 0.19429318606853485, 0.006222299765795469, 0.00020022530225105584, 0.03241097182035446, 0.2199898362159729, 0.40489089488983154, 0.12284909188747406, 0.04783688485622406, 0.16652296483516693, 0.03165041282773018, 0.02339007519185543, 0.01581897959113121, 0.02374129369854927, 0.02252129279077053, 0.08995510637760162, 0.0626068115234375, 0.27313846349716187, 0.036778680980205536, 0.22608895599842072, 0.06801939755678177, 0.035735905170440674, 0.022851483896374702, 0.06078701093792915, 0.42404335737228394, 0.41984546184539795, 0.08353053033351898, 0.058427464216947556, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06735408306121826, 0.02395833097398281, 0.022876637056469917, 0.059418935328722, 0.020556019619107246, 0.006657767109572887, 0.01686989888548851, 0.03750348463654518, 0.0929105281829834, 0.11066772043704987, 0.07383746653795242, 0.04306775704026222, 0.1764260083436966, 0.2488536387681961, 0.14264866709709167, 0.034203190356492996, 0.23458202183246613, 0.15632590651512146, 0.02520577609539032, 0.26413342356681824, 0.06292548030614853, 0.06378099322319031, 0.08676797896623611, 0.02988903410732746, 0.3430734872817993, 0.007843950763344765, 0.03405369073152542, 0.01887335814535618, 0.39618176221847534, 0.2528276741504669, 0.10531513392925262, 0.12583006918430328, 0.09389571845531464, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00023218609567265958, 9.724824485601857e-05, 0.00017837552877608687, 0.000249945733230561, 0.00043016509152948856, 0.0002728255931288004, 0.0002596308768261224, 0.0021448382176458836, 0.33870813250541687, 0.0012523159384727478, 0.0004828754754271358, 7.525486580561846e-05, 0.001232807757332921, 0.00022845527564641088, 0.0029908884316682816, 0.009769688360393047, 0.056299567222595215, 0.11172951757907867, 0.02802591770887375, 0.3647110164165497, 0.09813904017210007, 0.016619421541690826, 0.006417513824999332, 0.016537560150027275, 0.15495160222053528, 0.023067951202392578, 0.011397394351661205, 0.029141509905457497, 0.0527399443089962, 0.2784731984138489, 0.059669919312000275, 0.5969582796096802, 0.09549567103385925, 0.03235183656215668, NaN, NaN, NaN, NaN, NaN, NaN], [0.044313203543424606, 0.014693659730255604, 0.001713237608782947, 0.01787775754928589, 0.001054717693477869, 0.03111616149544716, 0.005932849366217852, 0.035437386482954025, 0.10908837616443634, 0.6214090585708618, 0.11623460799455643, 0.018710769712924957, 0.26884767413139343, 0.036007944494485855, 0.04555344209074974, 0.00987912341952324, 0.12349259853363037, 0.037169262766838074, 0.01944275200366974, 0.06324917078018188, 0.02598830871284008, 0.020618943497538567, 0.009103300981223583, 0.1360517293214798, 0.09789924323558807, 0.06809242814779282, 0.12332575768232346, 0.034675393253564835, 0.16954950988292694, 0.010956126265227795, 0.11111389100551605, 0.1871008574962616, 0.2434563934803009, 0.10274684429168701, 0.0379486046731472, NaN, NaN, NaN, NaN, NaN], [0.0014647350180894136, 0.0016486160457134247, 0.001705971430055797, 0.008203698322176933, 0.0011827786220237613, 0.001036314177326858, 0.004107706248760223, 0.00018337460642214864, 0.0005908485618419945, 0.004427316598594189, 0.0075510423630476, 0.37528446316719055, 0.0045065670274198055, 0.01084148045629263, 0.0047609396278858185, 0.010987702757120132, 0.03791751340031624, 0.03792046010494232, 0.0400051474571228, 0.008841714821755886, 0.002161285374313593, 0.031619150191545486, 0.01907121017575264, 0.0057282340712845325, 0.002385619329288602, 0.03308374434709549, 0.11032091826200485, 0.0044158026576042175, 0.05701944977045059, 0.0651637390255928, 0.027267253026366234, 0.3151875138282776, 0.17881636321544647, 0.3164456784725189, 0.005250148009508848, 0.011875288560986519, NaN, NaN, NaN, NaN], [1.1546462701517157e-05, 6.3197094277711585e-06, 1.3665205187862739e-05, 2.3049220544635318e-05, 3.1024922009237343e-06, 9.712728115118807e-07, 4.2468768697290216e-06, 1.4032799526830786e-06, 2.1501631636056118e-05, 0.00011254433775320649, 0.00014821428339928389, 0.00021640797785948962, 0.4815296530723572, 0.022970588877797127, 4.596232975018211e-05, 0.08034691959619522, 0.1792650669813156, 0.6813479661941528, 0.11697664856910706, 0.022037051618099213, 0.4362119436264038, 0.3332834541797638, 0.16648675501346588, 0.3133866786956787, 0.21180157363414764, 0.22306133806705475, 0.5634312033653259, 0.2539531886577606, 0.28583550453186035, 0.0421890914440155, 0.24185270071029663, 0.9185315370559692, 0.5444227457046509, 0.7130873799324036, 0.36675870418548584, 0.1082441657781601, 0.02894955314695835, NaN, NaN, NaN], [0.0004618540406227112, 0.00011890243331436068, 0.0008028792799450457, 0.0003817373653873801, 7.645944424439222e-05, 2.0059787857462652e-05, 0.00017321997438557446, 3.885024489136413e-05, 0.00016429855895694345, 0.0017073642229661345, 0.011983372271060944, 0.0008083870052359998, 0.8495219349861145, 0.07573292404413223, 0.0017974229995161295, 0.3316553831100464, 0.07297243922948837, 0.18084223568439484, 0.0543624572455883, 0.141310915350914, 0.15985439717769623, 0.22593949735164642, 0.09976530820131302, 0.2670679986476898, 0.12590403854846954, 0.10189743340015411, 0.06066418066620827, 0.14688965678215027, 0.6279550790786743, 0.004891595803201199, 0.013660040684044361, 0.19539086520671844, 0.13336770236492157, 0.11226529628038406, 0.4554508626461029, 0.7914823293685913, 0.007615156006067991, 0.015521766617894173, NaN, NaN], [0.00848880223929882, 0.0010204557329416275, 0.06384890526533127, 0.030244439840316772, 0.004545390605926514, 0.2111765593290329, 0.007047791499644518, 0.00020413362653926015, 0.03285042569041252, 0.2096482813358307, 0.40160003304481506, 0.12425301223993301, 0.05433715134859085, 0.2013336718082428, 0.03489448130130768, 0.010082974098622799, 0.009416572749614716, 0.026376336812973022, 0.021534079685807228, 0.041008636355400085, 0.028814975172281265, 0.09862472116947174, 0.019531887024641037, 0.1915404349565506, 0.055525705218315125, 0.03489372506737709, 0.035597167909145355, 0.017297467216849327, 0.13875839114189148, 0.18795406818389893, 0.13025526702404022, 0.03705297037959099, 0.016517892479896545, 0.028779756277799606, 0.02632485330104828, 0.36631691455841064, 0.4771501123905182, 0.10461407899856567, 0.07566797733306885, NaN], [0.018106432631611824, 0.01663283444941044, 0.006966447923332453, 0.06288447231054306, 0.008926548063755035, 0.0005806194385513663, 0.004527462646365166, 0.00047311693197116256, 0.010450053960084915, 0.008817908354103565, 0.02498125471174717, 0.02475220151245594, 0.006219316273927689, 0.034688226878643036, 0.15510374307632446, 0.00671275844797492, 0.019956005737185478, 0.15321078896522522, 0.00987993273884058, 0.1430601179599762, 0.02432059310376644, 0.007838046178221703, 0.016839532181620598, 0.017622128129005432, 0.03075602278113365, 0.01907699555158615, 0.30206096172332764, 0.010013632476329803, 0.06018203869462013, 0.19546428322792053, 0.020215312018990517, 0.04091925173997879, 0.022548291832208633, 0.26572445034980774, 0.010653333738446236, 0.1212434321641922, 0.3668496906757355, 0.1586136817932129, 0.14579400420188904, 0.04911552369594574]], [[0.1577349603176117, 0.09554319828748703, 0.02016325853765011, 0.08440300822257996, 0.33925309777259827, 0.35353752970695496, 0.49755600094795227, 0.2782062292098999, 0.2544572949409485, 0.6230229735374451, 0.04059281200170517, 0.12019311636686325, 0.2659685015678406, 0.3508304953575134, 0.10784413665533066, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.053030457347631454, 0.00926118716597557, 0.08361255377531052, 0.1587543487548828, 0.42493122816085815, 0.0713140144944191, 0.05032603442668915, 0.790120005607605, 0.4618776738643646, 0.3647898733615875, 0.20375682413578033, 0.2847990393638611, 0.20242592692375183, 0.33538198471069336, 0.174686461687088, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.08703262358903885, 0.32554149627685547, 0.013934381306171417, 0.05831753462553024, 0.13550086319446564, 0.24707834422588348, 0.10738440603017807, 0.2015978991985321, 0.20393061637878418, 0.3176687955856323, 0.11071985214948654, 0.18533341586589813, 0.23293758928775787, 0.34885379672050476, 0.5850104689598083, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.10977373272180557, 0.1966770738363266, 0.08552326261997223, 0.3559982180595398, 0.025181425735354424, 0.05637436732649803, 0.04466243088245392, 0.30799123644828796, 0.24855823814868927, 0.13041310012340546, 0.16531962156295776, 0.11238406598567963, 0.33737656474113464, 0.08863592892885208, 0.043888676911592484, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.5166918635368347, 0.35558366775512695, 0.01755080744624138, 0.011931763030588627, 0.556053638458252, 0.21828243136405945, 0.17387567460536957, 0.11686032265424728, 0.22141756117343903, 0.6036979556083679, 0.3235246241092682, 0.21816273033618927, 0.20258961617946625, 0.7225815653800964, 0.3817636966705322, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.34899845719337463, 0.35567307472229004, 0.2643766403198242, 0.12664493918418884, 0.18397535383701324, 0.012551958672702312, 0.056629326194524765, 0.06369142234325409, 0.252005010843277, 0.3601645529270172, 0.3771168887615204, 0.4479873776435852, 0.13717319071292877, 0.6667386293411255, 0.1451762467622757, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.5782451629638672, 0.6189379096031189, 0.11758852005004883, 0.3125992715358734, 0.3504111170768738, 0.10631152987480164, 0.16217094659805298, 0.04177623987197876, 0.10916820168495178, 0.3274877965450287, 0.10721725970506668, 0.11595069617033005, 0.11270644515752792, 0.32787472009658813, 0.13412055373191833, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2553749084472656, 0.5479037165641785, 0.3395489752292633, 0.13140854239463806, 0.07771788537502289, 0.06743729114532471, 0.04718935862183571, 0.022107038646936417, 0.2706955075263977, 0.06462319940328598, 0.20574931800365448, 0.08401398360729218, 0.11249610781669617, 0.20925462245941162, 0.07354141771793365, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.15992610156536102, 0.4297313988208771, 0.11996463686227798, 0.29957810044288635, 0.19940054416656494, 0.6192947030067444, 0.07005859166383743, 0.4058174192905426, 0.0451255701482296, 0.02480492927134037, 0.052432600408792496, 0.13078351318836212, 0.14195236563682556, 0.12686756253242493, 0.10959619283676147, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.13202522695064545, 0.3311104476451874, 0.12707853317260742, 0.06901858001947403, 0.13186469674110413, 0.37057942152023315, 0.1482420712709427, 0.21941475570201874, 0.1949346363544464, 0.11534072458744049, 0.011536079458892345, 0.018882060423493385, 0.16279305517673492, 0.07962523400783539, 0.11737312376499176, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0604790523648262, 0.5140921473503113, 0.37517040967941284, 0.060462601482868195, 0.14644990861415863, 0.49839717149734497, 0.08009912073612213, 0.3367377519607544, 0.0785842090845108, 0.043956201523542404, 0.0826396569609642, 0.015624956227838993, 0.10417986661195755, 0.07971351593732834, 0.018050679937005043, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.10509271919727325, 0.5468136072158813, 0.2136838436126709, 0.13898353278636932, 0.11654751002788544, 0.1982421725988388, 0.03731672093272209, 0.5618436336517334, 0.37511539459228516, 0.015668287873268127, 0.07859797775745392, 0.026544239372015, 0.11879771202802658, 0.051024846732616425, 0.03191406652331352, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2583395540714264, 0.306291788816452, 0.15283380448818207, 0.48663485050201416, 0.24239543080329895, 0.6472541093826294, 0.11895711719989777, 0.7050262093544006, 0.43789902329444885, 0.07257331907749176, 0.1529301553964615, 0.07237879186868668, 0.029207568615674973, 0.031136667355895042, 0.04320577159523964, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.37997886538505554, 0.3090342879295349, 0.09529577195644379, 0.06091787666082382, 0.5611693859100342, 0.5351426005363464, 0.5250707268714905, 0.4058402180671692, 0.08284364640712738, 0.7192233204841614, 0.12988585233688354, 0.24924960732460022, 0.016598563641309738, 0.6531801819801331, 0.22117754817008972, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.31734058260917664, 0.02799793891608715, 0.08435621112585068, 0.4273812472820282, 0.37900310754776, 0.1551857888698578, 0.12445898354053497, 0.02975497953593731, 0.13922178745269775, 0.25836795568466187, 0.3142063617706299, 0.5329877138137817, 0.020000692456960678, 0.19246473908424377, 0.34441179037094116, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.011485431343317032, 0.057214245200157166, 0.11445975303649902, 0.035292237997055054, 0.17235025763511658, 0.21079879999160767, 0.08683252334594727, 0.33144259452819824, 0.2781406342983246, 0.07864350080490112, 0.10017280280590057, 0.0828540250658989, 0.17722147703170776, 0.21101748943328857, 0.15805292129516602, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.041519034653902054, 0.11474552005529404, 0.04909001290798187, 0.1299373209476471, 0.06295691430568695, 0.0239214189350605, 0.22038953006267548, 0.6809458136558533, 0.03295678645372391, 0.34942832589149475, 0.1847512274980545, 0.22206875681877136, 0.13646042346954346, 0.277276873588562, 0.1334262192249298, 0.00017037145153153688, 0.1837475299835205, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0764331966638565, 0.004937899298965931, 0.049346037209033966, 0.05165911093354225, 0.051789041608572006, 0.11632981896400452, 0.3382570743560791, 0.21805666387081146, 0.5269062519073486, 0.05627245828509331, 0.1284114420413971, 0.3053610324859619, 0.058564696460962296, 0.14431920647621155, 0.19175130128860474, 4.619961600837996e-06, 0.00011092388740507886, 0.19595862925052643, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08274618536233902, 0.009897814132273197, 0.07511309534311295, 0.03663979470729828, 0.16369661688804626, 0.04579350724816322, 0.04420214146375656, 0.06866282969713211, 0.17000554502010345, 0.09549596160650253, 0.07313749194145203, 0.06223462149500847, 0.11603321135044098, 0.07143211364746094, 0.2059532254934311, 7.402049959637225e-07, 0.0014410031726583838, 0.15330694615840912, 0.0009438465931452811, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.41769060492515564, 0.07210511714220047, 0.40716952085494995, 0.22363832592964172, 0.48781970143318176, 0.015007800422608852, 0.4504202902317047, 0.4675638973712921, 0.24936619400978088, 0.5447031855583191, 0.4296078681945801, 0.07025930285453796, 0.1902965009212494, 0.3567025065422058, 0.12464861571788788, 6.564930572494632e-07, 1.2471617083065212e-05, 0.0012651559663936496, 1.2094314115529414e-05, 0.2683168947696686, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3858333230018616, 0.06937354803085327, 0.5601253509521484, 0.30969470739364624, 0.36272186040878296, 0.005774383433163166, 0.16290897130966187, 0.16338182985782623, 0.1734752655029297, 0.10127251595258713, 0.6812319159507751, 0.35078492760658264, 0.26554787158966064, 0.3089393675327301, 0.12310608476400375, 3.960849710438197e-07, 2.835777740983758e-05, 0.0015905762556940317, 5.72201497561764e-05, 0.20671997964382172, 0.03618929535150528, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.047016799449920654, 0.04388514533638954, 0.010725832544267178, 0.029561294242739677, 0.04913409426808357, 0.007112162187695503, 0.045616600662469864, 0.09563170373439789, 0.021758677437901497, 0.05606407672166824, 0.023780539631843567, 0.2586848735809326, 0.1317795366048813, 0.13214319944381714, 0.18490085005760193, 3.613545777625404e-05, 4.069158967467956e-05, 0.0019799659494310617, 4.598083614837378e-05, 0.28016433119773865, 0.1021510660648346, 0.0019787675701081753, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.024271933361887932, 0.10952932387590408, 0.01092300284653902, 0.005798409227281809, 0.03478696197271347, 0.015390553511679173, 0.005925341974943876, 0.04537563398480415, 0.00714160455390811, 0.005484140943735838, 0.00704369880259037, 0.04858299717307091, 0.06617175042629242, 0.13874217867851257, 0.17208275198936462, 0.03414154052734375, 0.018152736127376556, 0.002861178945749998, 0.0031036457512527704, 0.2743661403656006, 0.08905426412820816, 0.058365415781736374, 0.2834230065345764, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1448126882314682, 0.16020630300045013, 0.02696153335273266, 0.06902630627155304, 0.03837759047746658, 0.07682601362466812, 0.15773272514343262, 0.005734406877309084, 0.16041570901870728, 0.10849703103303909, 0.08964504301548004, 0.4313186705112457, 0.12084108591079712, 0.20548132061958313, 0.1913137137889862, 0.0001288916973862797, 0.0019113116431981325, 0.0011359998025000095, 2.5460678443778306e-05, 0.0018093753606081009, 0.008086470887064934, 0.005666371434926987, 0.0014489549212157726, 0.27176737785339355, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03147122263908386, 0.06498080492019653, 0.03835386037826538, 0.021906379610300064, 0.004580754786729813, 0.08777225762605667, 0.06548282504081726, 0.0501156747341156, 0.09960248321294785, 0.05812418833374977, 0.04425663501024246, 0.12932318449020386, 0.040425609797239304, 0.10523593425750732, 0.20731014013290405, 0.0013363973703235388, 0.015213730745017529, 0.019847076386213303, 0.0016770424554124475, 0.6085457801818848, 0.051846977323293686, 0.06904839724302292, 0.023163089528679848, 0.0024616841692477465, 0.4075135886669159, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03185653313994408, 0.014990762807428837, 0.012671640142798424, 0.014554454945027828, 0.005096337758004665, 0.025306345894932747, 0.015522593632340431, 0.012109486386179924, 0.014945329166948795, 0.0111803337931633, 0.010501275770366192, 0.010505528189241886, 0.013426732271909714, 0.01895906589925289, 0.16498495638370514, 1.5705205441918224e-05, 0.00011942459968850017, 3.308789018774405e-05, 0.00047703171730972826, 1.5581523257424124e-05, 3.566192026482895e-05, 0.000621139828581363, 0.002513762330636382, 0.0013953398447483778, 0.001656065694987774, 0.6708395481109619, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05249502509832382, 0.3800218403339386, 0.048091597855091095, 0.01820666529238224, 0.10161028057336807, 0.18240275979042053, 0.03954629600048065, 0.08666953444480896, 0.00239415536634624, 0.05545663461089134, 0.11899324506521225, 0.03552442044019699, 0.037884730845689774, 0.08727249503135681, 0.23120805621147156, 0.0009777048835530877, 0.006719581317156553, 0.017090875655412674, 0.007835427299141884, 0.0003081739123445004, 0.0027951891534030437, 0.0031432590913027525, 0.011542102321982384, 0.01903962530195713, 0.032312098890542984, 0.23448777198791504, 0.18604722619056702, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06818026304244995, 0.06384387612342834, 0.013627037405967712, 0.017488455399870872, 0.04112459346652031, 0.37204819917678833, 0.2269488275051117, 0.050778258591890335, 0.07564288377761841, 0.002337054116651416, 0.03256889060139656, 0.017944803461432457, 0.02268233709037304, 0.05458826571702957, 0.17415940761566162, 0.0010771078523248434, 0.00013067253166809678, 0.0004810431564692408, 0.0005832655006088316, 0.27172601222991943, 0.023587899282574654, 0.0011203349567949772, 0.0001570776366861537, 3.2636336982250214e-05, 0.008125105872750282, 0.3860749900341034, 0.011222672648727894, 0.4488545358181, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3350563049316406, 0.14807114005088806, 0.16856855154037476, 0.0634150505065918, 0.6115131974220276, 0.8617944717407227, 0.4784194529056549, 0.271447092294693, 0.44727417826652527, 0.03638387843966484, 0.0791390910744667, 0.0010650564217939973, 0.10882135480642319, 0.07249648869037628, 0.16217634081840515, 0.0018897228874266148, 0.00010004806244978681, 0.040837980806827545, 0.0009045379119925201, 0.4036760926246643, 0.033945482224226, 0.0009020724683068693, 2.477952148183249e-05, 0.0006147518288344145, 2.3498352675233036e-05, 0.0003015661786776036, 0.00019162058015353978, 0.0013656887458637357, 0.9207848906517029, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.6229478120803833, 0.11473710834980011, 0.9313594102859497, 0.6977004408836365, 0.7760463953018188, 0.5547962784767151, 0.2850213646888733, 0.12024195492267609, 0.6867435574531555, 0.3715392053127289, 0.5383524894714355, 0.04410971701145172, 0.001209885231219232, 0.03505939990282059, 0.07057712972164154, 3.0049262932152487e-05, 0.00032340767211280763, 0.0004620190302375704, 1.456133759347722e-05, 0.4214256703853607, 0.00038119935197755694, 2.2086916942498647e-05, 5.437946310848929e-05, 0.0005922063137404621, 0.0002251591213280335, 4.171442924416624e-05, 0.0011568808695301414, 6.667344860034063e-05, 0.004539569839835167, 0.07099039107561111, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12039526551961899, 0.15183398127555847, 0.23466746509075165, 0.07534174621105194, 0.09489727020263672, 0.12723755836486816, 0.06088049337267876, 0.06659132242202759, 0.24534910917282104, 0.08624531328678131, 0.05703657865524292, 0.031156441196799278, 0.0026320687029510736, 0.016870809718966484, 0.16136524081230164, 0.0001142411565524526, 0.001007341779768467, 0.5582761764526367, 0.0006983705679886043, 0.04208780825138092, 0.07311324775218964, 0.011010478250682354, 0.00018356108921580017, 0.11227726191282272, 1.5535662896581925e-05, 7.865564111853018e-05, 8.497068483848125e-05, 0.007107958197593689, 0.04726947844028473, 0.03816111385822296, 0.7400538921356201, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.024926312267780304, 0.055538877844810486, 0.0035579875111579895, 0.006728078704327345, 0.10179352015256882, 0.12386216968297958, 0.08368373662233353, 0.17138876020908356, 0.13290183246135712, 0.025975322350859642, 0.0007942751399241388, 0.08679928630590439, 0.006940893363207579, 0.006668384652584791, 0.2167840152978897, 9.270196460420266e-05, 0.00014002913667354733, 0.006266205105930567, 8.287983655463904e-05, 0.029540851712226868, 0.019505193457007408, 0.0002005908900173381, 0.0002361711667617783, 0.002089217072352767, 0.0007247799658216536, 0.0003387654141988605, 3.3522373996675014e-05, 0.00015295531193260103, 0.005682599265128374, 0.01914886385202408, 0.006167547311633825, 0.6065680980682373, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03079223819077015, 0.008776835165917873, 0.025623725727200508, 0.02996702678501606, 0.076390340924263, 0.11722294241189957, 0.03722265735268593, 0.06894396245479584, 0.023492204025387764, 0.02721765637397766, 0.02432498149573803, 0.009946721605956554, 0.02367306686937809, 0.02709045261144638, 0.15603508055210114, 0.017243418842554092, 0.0717378556728363, 0.015470567159354687, 0.14577892422676086, 0.003815611358731985, 0.01656431145966053, 0.21609994769096375, 0.24452562630176544, 0.07360902428627014, 0.020440302789211273, 0.9522358775138855, 0.0012982342159375548, 0.00034142163349315524, 4.905217429040931e-05, 0.0002677988959476352, 0.0020047405268996954, 0.013444142416119576, 0.5238149166107178, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.050754088908433914, 0.38707080483436584, 0.056088101118803024, 0.022330837324261665, 0.19594413042068481, 0.356031596660614, 0.05540256202220917, 0.17031489312648773, 0.002592364326119423, 0.0904960110783577, 0.17009596526622772, 0.02688765898346901, 0.05266827344894409, 0.09536514431238174, 0.2306852787733078, 0.006589227356016636, 0.025933612138032913, 0.05151839554309845, 0.019538801163434982, 0.000567624403629452, 0.011064885184168816, 0.018599001690745354, 0.0389220230281353, 0.03263486549258232, 0.03920944407582283, 0.309482604265213, 0.18455958366394043, 0.0028949796687811613, 0.0009189100819639862, 0.01304793544113636, 0.01903691701591015, 0.0013186958385631442, 0.1459255963563919, 0.2617945969104767, NaN, NaN, NaN, NaN, NaN, NaN], [0.052731066942214966, 0.07647765427827835, 0.009669344872236252, 0.013631273992359638, 0.037963252514600754, 0.40968915820121765, 0.1877974420785904, 0.06287717074155807, 0.06925270706415176, 0.0021469732746481895, 0.03106895461678505, 0.02147551439702511, 0.022071314975619316, 0.058794401586055756, 0.17150944471359253, 0.000940846570301801, 6.996696902206168e-05, 0.0001185448418254964, 0.00013115631008986384, 0.04620806872844696, 0.009408986195921898, 0.0010798430303111672, 0.00010642426059348509, 1.4586596989829559e-05, 0.0008147742482833564, 0.049950405955314636, 0.0020658469293266535, 0.020368386059999466, 0.0015965981874614954, 0.0005227082292549312, 8.089001494226977e-05, 0.42970454692840576, 0.3893451988697052, 0.006195466499775648, 0.2630486488342285, NaN, NaN, NaN, NaN, NaN], [0.2993965446949005, 0.1887350082397461, 0.17583680152893066, 0.06075390800833702, 0.6836855411529541, 0.8825634121894836, 0.44942814111709595, 0.3110062777996063, 0.6245057582855225, 0.04149743914604187, 0.08928828686475754, 0.0010537458583712578, 0.13885420560836792, 0.09175378829240799, 0.16601231694221497, 0.0015646422980353236, 5.644361226586625e-05, 0.015588155947625637, 0.0004337269929237664, 0.061090677976608276, 0.015012362040579319, 0.0009935805574059486, 3.2441483199363574e-05, 0.0006383971776813269, 7.901599929027725e-06, 0.00011085882579209283, 2.031324947893154e-05, 0.0001886440732050687, 0.1558367908000946, 2.918860081990715e-05, 0.00031420652521774173, 3.769064642256126e-05, 0.000311522075207904, 8.488001913065091e-05, 0.001447036280296743, 0.9016569256782532, NaN, NaN, NaN, NaN], [0.6222140192985535, 0.13893182575702667, 0.9335290789604187, 0.7374492883682251, 0.8253674507141113, 0.5633905529975891, 0.4091120660305023, 0.12903769314289093, 0.8090996742248535, 0.490604043006897, 0.6206711530685425, 0.06171489879488945, 0.0013746770564466715, 0.055387232452631, 0.07617512345314026, 6.329882307909429e-05, 0.0007932570297271013, 0.0008974742377176881, 3.545067738741636e-05, 0.41645264625549316, 0.0012166639789938927, 5.162824527360499e-05, 0.00016062096983660012, 0.0028807471971958876, 0.0007734368555247784, 0.0001738688733894378, 0.0017386887921020389, 8.449772576568648e-05, 0.008313576690852642, 0.04833607003092766, 5.605717160506174e-05, 0.000497612461913377, 0.00019103533122688532, 0.0018799308454617858, 0.000193181011127308, 0.010939341969788074, 0.11687301844358444, NaN, NaN, NaN], [0.1216169223189354, 0.17628714442253113, 0.21903447806835175, 0.08471400290727615, 0.12100206315517426, 0.12684285640716553, 0.060168445110321045, 0.05725802481174469, 0.204857736825943, 0.07119028270244598, 0.04997517541050911, 0.046147700399160385, 0.002665548352524638, 0.01769380457699299, 0.1595369428396225, 2.7039888664148748e-05, 0.0002653435221873224, 0.3520841896533966, 0.0011641159653663635, 0.017258664593100548, 0.13898366689682007, 0.004804374184459448, 0.0001136215214501135, 0.10132589936256409, 1.9021857951884158e-05, 0.00018713112513069063, 5.577637057285756e-05, 0.0021825090516358614, 0.016621561720967293, 0.003813497256487608, 0.05257569998502731, 7.136658678064123e-05, 0.00013083907833788544, 8.304342918563634e-05, 0.009517401456832886, 0.07102376222610474, 0.0242641419172287, 0.791592538356781, NaN, NaN], [0.02323095127940178, 0.05151251330971718, 0.002836216241121292, 0.007343180477619171, 0.11471041291952133, 0.09745588153600693, 0.08793136477470398, 0.19987791776657104, 0.2081962525844574, 0.026029428467154503, 0.0006721516838297248, 0.15218332409858704, 0.008676346391439438, 0.009503011591732502, 0.20713838934898376, 1.8426982933306135e-05, 6.735812348779291e-05, 0.005383457988500595, 0.0002568464260548353, 0.03709089383482933, 0.05173188075423241, 0.00015440442075487226, 0.00026214553508907557, 0.0031172526068985462, 0.0018413036596029997, 0.001364374067634344, 0.0001026472236844711, 0.00015940713637974113, 0.00464483629912138, 0.007250420283526182, 0.006640422623604536, 0.10042263567447662, 0.00037284562131389976, 5.502302519744262e-05, 0.00017516437219455838, 0.013823487795889378, 0.028728578239679337, 0.014491567388176918, 0.5602642297744751, NaN], [0.07751920074224472, 0.05964339151978493, 0.026831025257706642, 0.018057459965348244, 0.1489739865064621, 0.27560925483703613, 0.15271086990833282, 0.29336896538734436, 0.2548864185810089, 0.015449506230652332, 0.02643660455942154, 0.05839552357792854, 0.06659974157810211, 0.1841144859790802, 0.1324990689754486, 1.3810687960358337e-05, 0.0002572945086285472, 0.008041280321776867, 0.00040080497274175286, 0.00010326507617719471, 0.0013340600999072194, 0.00019016038277186453, 0.00019489554688334465, 0.0007417663000524044, 0.0012533330591395497, 0.0032668926287442446, 0.001072657760232687, 5.286548912408762e-05, 4.225512952871213e-07, 1.0035311788669787e-05, 2.1279807697283104e-05, 0.0006032216479070485, 0.00048016011714935303, 0.00037273563793860376, 3.447151175350882e-05, 9.715819260236458e-07, 2.8930742701049894e-05, 0.0003854547976516187, 0.005018792115151882, 0.4505775570869446]], [[0.022252710536122322, 0.017558962106704712, 0.12289869785308838, 0.01514213066548109, 0.04983796179294586, 0.160098597407341, 0.09159664064645767, 0.03634485974907875, 0.27353572845458984, 0.14908282458782196, 0.8423851132392883, 0.33708906173706055, 0.03012021631002426, 0.05972116440534592, 0.2686574459075928, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.13637107610702515, 0.02899317629635334, 0.09026061743497849, 0.22582301497459412, 0.09117049723863602, 0.19661013782024384, 0.30083417892456055, 0.13528303802013397, 0.1352328211069107, 0.18504901230335236, 0.3621358573436737, 0.504258930683136, 0.10044156759977341, 0.37106865644454956, 0.36433035135269165, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.10935092717409134, 0.06271693855524063, 0.044740546494722366, 0.1709805577993393, 0.22382155060768127, 0.2615796625614166, 0.3429900109767914, 0.02677186205983162, 0.39723172783851624, 0.1559167355298996, 0.6381150484085083, 0.34350308775901794, 0.14388519525527954, 0.322640985250473, 0.07209958881139755, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.11123806983232498, 0.14550834894180298, 0.12841136753559113, 0.013620064593851566, 0.006130752619355917, 0.025231752544641495, 0.11538708955049515, 0.09429272264242172, 0.3855685293674469, 0.016912028193473816, 0.3869503438472748, 0.1961694061756134, 0.15352581441402435, 0.019190048798918724, 0.4291467070579529, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1283823847770691, 0.33987957239151, 0.06837885081768036, 0.03946131095290184, 0.03139644116163254, 0.11983324587345123, 0.12062173336744308, 0.46404916048049927, 0.24212448298931122, 0.1594262570142746, 0.4298713207244873, 0.5236353278160095, 0.2188095897436142, 0.049411591142416, 0.10146455466747284, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.010564678348600864, 0.32722386717796326, 0.19864077866077423, 0.015389330685138702, 0.0028029000386595726, 0.007416849955916405, 0.003262599464505911, 0.23795713484287262, 0.05000551417469978, 0.075996033847332, 0.049679387360811234, 0.21265098452568054, 0.2097157984972, 0.01007634773850441, 0.03895873948931694, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.10390599817037582, 0.04329453781247139, 0.42168325185775757, 0.06385642290115356, 0.04340887442231178, 0.029213739559054375, 0.036663200706243515, 0.0028809772338718176, 0.19718152284622192, 0.16335125267505646, 0.6605148315429688, 0.17834524810314178, 0.08135847747325897, 0.05741032958030701, 0.24636343121528625, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.010566278360784054, 0.32608217000961304, 0.34194469451904297, 0.08201102167367935, 0.036688148975372314, 0.12155891954898834, 0.015490439720451832, 0.05858473479747772, 0.1731383204460144, 0.12207219004631042, 0.0636284351348877, 0.2239474654197693, 0.2988812327384949, 0.033257871866226196, 0.04593053460121155, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.26241976022720337, 0.0378817655146122, 0.10770448297262192, 0.11944369971752167, 0.367754727602005, 0.041288651525974274, 0.25914207100868225, 0.061461515724658966, 0.061867646872997284, 0.08977923542261124, 0.03797370195388794, 0.2101898193359375, 0.035329420119524, 0.38835543394088745, 0.3324989080429077, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3753410875797272, 0.031615160405635834, 0.1074504628777504, 0.07966858148574829, 0.16393397748470306, 0.01204571221023798, 0.36072632670402527, 0.026240641251206398, 0.09493876993656158, 0.12203314155340195, 0.0640302300453186, 0.13458214700222015, 0.19451306760311127, 0.3176366686820984, 0.19878560304641724, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.19523903727531433, 0.1090913861989975, 0.11059779673814774, 0.03402426466345787, 0.4491459131240845, 0.1729225516319275, 0.3482173979282379, 0.01764478161931038, 0.14307594299316406, 0.22771455347537994, 0.04787566140294075, 0.14714154601097107, 0.028272001072764397, 0.23823784291744232, 0.19700175523757935, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1428564339876175, 0.03585843741893768, 0.023294193670153618, 0.1143055409193039, 0.07461919635534286, 0.13578416407108307, 0.4153969883918762, 0.03374828025698662, 0.10746961832046509, 0.17216910421848297, 0.02314077876508236, 0.02450137585401535, 0.06497504562139511, 0.381274551153183, 0.14229674637317657, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.5444629788398743, 0.049506742507219315, 0.09827632457017899, 0.29229700565338135, 0.06650383025407791, 0.11397240310907364, 0.597455620765686, 0.1362738311290741, 0.15222173929214478, 0.2562837302684784, 0.13646292686462402, 0.38294121623039246, 0.030382927507162094, 0.038297515362501144, 0.465526819229126, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.12950241565704346, 0.2834409177303314, 0.40745216608047485, 0.040315985679626465, 0.09126543253660202, 0.16738829016685486, 0.24838824570178986, 0.2707839906215668, 0.5177856087684631, 0.1416875720024109, 0.6573355793952942, 0.4225574731826782, 0.02239617332816124, 0.07502269744873047, 0.07588320225477219, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00751910824328661, 0.5024122595787048, 0.38239815831184387, 0.016937274485826492, 0.039716992527246475, 0.11479316651821136, 0.004478333052247763, 0.02017248421907425, 0.011771232821047306, 0.0035600941628217697, 0.03807784244418144, 0.07125832885503769, 0.1964063048362732, 0.0026467873249202967, 0.00302477041259408, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.006645309738814831, 0.043047573417425156, 0.04108792915940285, 0.028674451634287834, 0.10265154391527176, 0.03326163440942764, 0.05858607590198517, 0.06312219053506851, 0.013714859262108803, 0.017589740455150604, 0.02732386440038681, 0.11026919633150101, 0.028857730329036713, 0.054291173815727234, 0.19011041522026062, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.006623337976634502, 0.06184479594230652, 0.014693422242999077, 0.03981047496199608, 0.08752858638763428, 0.01962500624358654, 0.06706372648477554, 0.011501927860081196, 0.0061228955164551735, 0.013949333690106869, 0.018435969948768616, 0.03678559139370918, 0.022487374022603035, 0.0660797506570816, 0.28934401273727417, 4.347301455709385e-06, 0.18382565677165985, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04245300590991974, 0.10349805653095245, 0.03407163918018341, 0.007511724252253771, 0.011565770022571087, 0.010817471891641617, 0.05971734598278999, 0.00459411833435297, 0.00350962788797915, 0.021488210186362267, 0.02298545651137829, 0.06376963108778, 0.036461468786001205, 0.1865386664867401, 0.16962040960788727, 0.0001576173526700586, 0.00605444610118866, 0.19315025210380554, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.014149562455713749, 0.03299444913864136, 0.007003516890108585, 0.004260434303432703, 0.018919609487056732, 0.008522795513272285, 0.018369171768426895, 0.015471882186830044, 0.0008095644298009574, 0.012402600608766079, 0.0075600892305374146, 0.03885417431592941, 0.05682341009378433, 0.0525624044239521, 0.22132590413093567, 0.0015271879965439439, 0.2696094512939453, 0.0976908802986145, 0.19172586500644684, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01582285761833191, 0.013434984721243382, 0.0299182441085577, 0.03647983819246292, 0.009840411134064198, 0.06101881340146065, 0.04943924769759178, 0.3809337913990021, 0.027872184291481972, 0.07177315652370453, 0.06987256556749344, 0.014244881458580494, 0.18650749325752258, 0.16280896961688995, 0.16209137439727783, 0.018620789051055908, 0.1513659805059433, 0.1261996626853943, 0.04123798385262489, 0.18324223160743713, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.018014581874012947, 0.11459828168153763, 0.013770120218396187, 0.021584663540124893, 0.02155740186572075, 0.03133949637413025, 0.03938381373882294, 0.28105995059013367, 0.02592163160443306, 0.026603924110531807, 0.010026685893535614, 0.009953479282557964, 0.004658891819417477, 0.014652709476649761, 0.16460371017456055, 7.739824650343508e-05, 0.0007302183075807989, 0.0020413347519934177, 0.0010007238015532494, 0.20195050537586212, 0.04546361416578293, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.001359884045086801, 0.029354294762015343, 0.0013457777677103877, 0.0026418184861540794, 0.008543581701815128, 0.003654624568298459, 0.0034977763425558805, 0.039957791566848755, 0.00108401442412287, 0.0005604945472441614, 0.0003877367707900703, 0.0033066808246076107, 0.007358025759458542, 0.007617549039423466, 0.20286646485328674, 0.0007431988487951458, 0.330532044172287, 0.08558935672044754, 0.06556878238916397, 0.10690004378557205, 0.1145712360739708, 0.06475446373224258, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.015068605542182922, 0.027786174789071083, 0.015096615999937057, 0.048349082469940186, 0.03296791389584541, 0.0033369800075888634, 0.004459223244339228, 0.01348987128585577, 0.0010384898632764816, 0.013556106016039848, 0.015940798446536064, 0.042712315917015076, 0.02055070362985134, 0.042082786560058594, 0.17761820554733276, 0.015635214745998383, 0.050190601497888565, 0.02352251298725605, 0.24284599721431732, 0.06325101107358932, 0.02171560376882553, 0.015677697956562042, 0.4775830805301666, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09032934159040451, 0.007927155122160912, 0.08835490047931671, 0.21186837553977966, 0.05379607528448105, 0.23637458682060242, 0.16646702587604523, 0.022663533687591553, 0.024165447801351547, 0.08468358218669891, 0.07286331057548523, 0.016201749444007874, 0.031014403328299522, 0.026781529188156128, 0.21159759163856506, 0.03602181747555733, 0.2262161672115326, 0.11374488472938538, 0.22297167778015137, 0.018925879150629044, 0.2400040328502655, 0.13629396259784698, 0.14897051453590393, 0.11721047759056091, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.014649872668087482, 0.032003261148929596, 0.1914098560810089, 0.17710277438163757, 0.07542474567890167, 0.05287592485547066, 0.14732114970684052, 0.08320016413927078, 0.025441674515604973, 0.02800501137971878, 0.0780739113688469, 0.04154554009437561, 0.017996925860643387, 0.08907850831747055, 0.17056028544902802, 0.001669732853770256, 0.0008830919396132231, 0.007873992435634136, 0.004793200176209211, 0.032567575573921204, 0.019068563356995583, 0.01167156733572483, 0.006520072463899851, 0.001765590044669807, 0.479371041059494, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.29397615790367126, 0.03400568664073944, 0.3242063522338867, 0.3681035339832306, 0.48163339495658875, 0.025333818048238754, 0.20042747259140015, 0.06051841378211975, 0.2913966476917267, 0.19229580461978912, 0.12739360332489014, 0.07057002186775208, 0.012750222347676754, 0.053084854036569595, 0.09877952188253403, 0.04264334216713905, 0.01628556102514267, 0.012549073435366154, 0.1270730197429657, 0.09553729742765427, 0.12904676795005798, 0.28088441491127014, 0.08353402465581894, 0.19219043850898743, 0.1467161476612091, 0.04815742373466492, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2290111482143402, 0.04351853206753731, 0.4067046046257019, 0.12047477811574936, 0.3140789866447449, 0.03630740940570831, 0.1768438071012497, 0.13207398355007172, 0.0676346942782402, 0.07621245086193085, 0.1797569841146469, 0.24804529547691345, 0.009716867469251156, 0.01671340875327587, 0.15996301174163818, 0.006975929252803326, 0.05510300025343895, 0.007132354192435741, 0.0349782258272171, 0.02191060781478882, 0.018211986869573593, 0.026551326736807823, 0.03648876026272774, 0.06464254856109619, 0.049987878650426865, 0.05908217281103134, 0.5448521375656128, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0448942668735981, 0.015721717849373817, 0.04864601418375969, 0.03494936227798462, 0.016112152487039566, 0.06668571382761002, 0.05302642658352852, 0.07182876765727997, 0.006946365814656019, 0.011091585271060467, 0.1120418831706047, 0.008756275288760662, 0.055249348282814026, 0.03253563493490219, 0.187040314078331, 0.000807860866189003, 0.00374230626039207, 0.004482839722186327, 0.005506760906428099, 0.000447272410383448, 0.003816538956016302, 0.03234753757715225, 0.014306235127151012, 0.01718331128358841, 0.04840204864740372, 0.06595310568809509, 0.18900929391384125, 0.0723472312092781, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3104230761528015, 0.04545353353023529, 0.3986057937145233, 0.6762936115264893, 0.03838818892836571, 0.03300129249691963, 0.27034318447113037, 0.21517230570316315, 0.008858010172843933, 0.2650390863418579, 0.2720700800418854, 0.005442188587039709, 0.06764175742864609, 0.053534120321273804, 0.18754751980304718, 0.00447529973462224, 0.019966747611761093, 0.03737834841012955, 0.3797287940979004, 0.010614297352731228, 0.05463654175400734, 0.32780376076698303, 0.0739898681640625, 0.25606051087379456, 0.8621841073036194, 0.2645638585090637, 0.25103500485420227, 0.016027942299842834, 0.004609693773090839, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.011383982375264168, 0.11127021163702011, 0.0030386100988835096, 0.0067845494486391544, 0.013927198015153408, 0.08719860762357712, 0.03287587687373161, 0.5690041184425354, 0.03855481743812561, 0.020931608974933624, 0.01293823029845953, 0.047187648713588715, 0.021772168576717377, 0.1471272110939026, 0.18776896595954895, 0.0010164460400119424, 0.011448963545262814, 0.03378765657544136, 0.02785181999206543, 0.056788451969623566, 0.07099426537752151, 0.008927138522267342, 0.01755385287106037, 0.039185769855976105, 0.09313513338565826, 0.027632856741547585, 0.12282836437225342, 0.017955774441361427, 0.02453978732228279, 0.267269104719162, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.005892250686883926, 0.03474593162536621, 0.023128867149353027, 0.002957691205665469, 0.03212961554527283, 0.015600761398673058, 0.0076070488430559635, 0.04006163775920868, 0.012522950768470764, 0.00397108681499958, 0.004476191475987434, 0.01931026391685009, 0.006290406920015812, 0.014653924852609634, 0.17843826115131378, 0.09903331845998764, 0.854941725730896, 0.020280463621020317, 0.8786925673484802, 0.37992238998413086, 0.20425425469875336, 0.32038459181785583, 0.8171603083610535, 0.2503354549407959, 0.7644308805465698, 0.7474347949028015, 0.935006856918335, 0.36836859583854675, 0.03383934497833252, 0.0021248040720820427, 0.21007098257541656, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.030382098630070686, 0.14396639168262482, 0.0023552696220576763, 0.003069670405238867, 0.03293609246611595, 0.010766614228487015, 0.04698408767580986, 0.0892328992486, 0.010764017701148987, 0.01645551063120365, 0.0007101192022673786, 0.14693684875965118, 0.10194381326436996, 0.06734117865562439, 0.21650707721710205, 0.09584157168865204, 0.00421579135581851, 0.0017077650409191847, 0.0670090913772583, 0.10943465679883957, 0.05715145170688629, 0.03694647178053856, 0.04514404758810997, 0.04956913739442825, 0.07195062190294266, 0.4566742479801178, 0.20942343771457672, 0.1548582911491394, 0.3906869888305664, 0.03925589844584465, 0.005858495831489563, 0.23115697503089905, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11579495668411255, 0.04704239219427109, 0.08932461589574814, 0.10469675809144974, 0.3945455551147461, 0.10528933256864548, 0.15413445234298706, 0.13012593984603882, 0.37207290530204773, 0.07726370543241501, 0.08641648292541504, 0.07665102183818817, 0.02378079853951931, 0.06452124565839767, 0.12331708520650864, 0.10393274575471878, 0.03258725255727768, 0.01998279243707657, 0.13928532600402832, 0.08602269738912582, 0.139993816614151, 0.2561682462692261, 0.08122693002223969, 0.28790318965911865, 0.34215468168258667, 0.023110536858439445, 0.8003224730491638, 0.11519370973110199, 0.5406965613365173, 0.2252652645111084, 0.07071924954652786, 0.03988110274076462, 0.09249765425920486, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20921318233013153, 0.07137931883335114, 0.3537597060203552, 0.1065746620297432, 0.30610421299934387, 0.07002534717321396, 0.22329437732696533, 0.23702743649482727, 0.06014438346028328, 0.05975072830915451, 0.17522762715816498, 0.3013332188129425, 0.02163097821176052, 0.016774384304881096, 0.15580035746097565, 0.006400381214916706, 0.03668399527668953, 0.006957556586712599, 0.024804070591926575, 0.013962345197796822, 0.010118995793163776, 0.014814852736890316, 0.02360437996685505, 0.038752347230911255, 0.10996780544519424, 0.24877001345157623, 0.7050904035568237, 0.103914275765419, 0.0656881257891655, 0.03925013542175293, 0.0268316138535738, 0.009403076022863388, 0.042995911091566086, 0.38370969891548157, NaN, NaN, NaN, NaN, NaN, NaN], [0.037447404116392136, 0.022215796634554863, 0.033449236303567886, 0.026462113484740257, 0.01563168875873089, 0.07434160262346268, 0.05695066228508949, 0.11209315806627274, 0.007291351445019245, 0.008904322981834412, 0.08964232355356216, 0.01435061078518629, 0.07215401530265808, 0.030404584482312202, 0.17889626324176788, 0.0005728903925046325, 0.0018518416909500957, 0.003297911025583744, 0.002339646453037858, 0.0003125199000351131, 0.0013706001918762922, 0.011640608310699463, 0.005699110683053732, 0.00646078959107399, 0.029403753578662872, 0.09435103088617325, 0.4532504379749298, 0.1454003006219864, 0.08155784755945206, 0.1478416919708252, 0.06988534331321716, 0.07031917572021484, 0.08092489838600159, 0.16178953647613525, 0.09959835559129715, NaN, NaN, NaN, NaN, NaN], [0.35028940439224243, 0.06261257082223892, 0.400876522064209, 0.6601436138153076, 0.0364767424762249, 0.0348673090338707, 0.3584212362766266, 0.3042086958885193, 0.012779565528035164, 0.3784087598323822, 0.29859334230422974, 0.00785628892481327, 0.11913719773292542, 0.06971576809883118, 0.17937220633029938, 0.007587960455566645, 0.01947515644133091, 0.06775914877653122, 0.37032291293144226, 0.014833947643637657, 0.04509717598557472, 0.2979332506656647, 0.08052700757980347, 0.2017516791820526, 0.8817963004112244, 0.3514429032802582, 0.3636293411254883, 0.14158478379249573, 0.09958238899707794, 0.13573585450649261, 0.27771836519241333, 0.47418463230133057, 0.36210212111473083, 0.2140081375837326, 0.022566867992281914, 0.004614678677171469, NaN, NaN, NaN, NaN], [0.014627714641392231, 0.1739588975906372, 0.0033204040955752134, 0.007496224716305733, 0.011711684986948967, 0.10170583426952362, 0.050673384219408035, 0.6495208740234375, 0.040652137249708176, 0.03492900729179382, 0.01829371228814125, 0.07074988633394241, 0.02588740922510624, 0.18312060832977295, 0.1794223189353943, 0.0009141381597146392, 0.00906511303037405, 0.026196878403425217, 0.011460180394351482, 0.03924085199832916, 0.05833837762475014, 0.004696658346801996, 0.009781464003026485, 0.029306253418326378, 0.06398104876279831, 0.017127037048339844, 0.0922316163778305, 0.03436172753572464, 0.12105685472488403, 0.475220263004303, 0.20121201872825623, 0.0066191148944199085, 0.018271028995513916, 0.05732923001050949, 0.018915977329015732, 0.019877590239048004, 0.23682713508605957, NaN, NaN, NaN], [0.006626310292631388, 0.049714479595422745, 0.02355029061436653, 0.0033578642178326845, 0.02970620058476925, 0.020507775247097015, 0.008351391181349754, 0.03789898753166199, 0.008593969978392124, 0.004206442274153233, 0.004605707712471485, 0.02678176388144493, 0.006028715055435896, 0.012980426661670208, 0.1725957691669464, 0.14320576190948486, 0.892350971698761, 0.030759859830141068, 0.8051734566688538, 0.7149769067764282, 0.4937312602996826, 0.3181091248989105, 0.8743517994880676, 0.3442763686180115, 0.8711729049682617, 0.7545801997184753, 0.9297782182693481, 0.6998263001441956, 0.17287810146808624, 0.008261360228061676, 0.9148194789886475, 0.7390273213386536, 0.743715763092041, 0.8801547288894653, 0.47275617718696594, 0.02699747122824192, 0.002916275057941675, 0.1803632229566574, NaN, NaN], [0.029822910204529762, 0.18419219553470612, 0.002088941168040037, 0.00302593014203012, 0.028257815167307854, 0.012486547231674194, 0.051940228790044785, 0.10161811858415604, 0.01137576438486576, 0.02022942155599594, 0.0007436276064254344, 0.2113851010799408, 0.1359580010175705, 0.08821411430835724, 0.2053057849407196, 0.0431031733751297, 0.0034584910608828068, 0.0008681766339577734, 0.032780423760414124, 0.11873625963926315, 0.03893061354756355, 0.019801655784249306, 0.03132590278983116, 0.05763043835759163, 0.06388700753450394, 0.3317660689353943, 0.16543246805667877, 0.10311393439769745, 0.4146954417228699, 0.09686555713415146, 0.06189668923616409, 0.5733434557914734, 0.2515217959880829, 0.17396190762519836, 0.13145960867404938, 0.40639445185661316, 0.07709264755249023, 0.007335619535297155, 0.2446187138557434, NaN], [0.016353517770767212, 0.03170220926403999, 0.014149405062198639, 0.013441388495266438, 0.037340469658374786, 0.010170645080506802, 0.0053974115289747715, 0.025274941697716713, 0.017184404656291008, 0.0020940443500876427, 0.006704597268253565, 0.009430822916328907, 0.030376460403203964, 0.024553189054131508, 0.15533798933029175, 0.046706411987543106, 0.31744489073753357, 0.6429179310798645, 0.4889025092124939, 0.43930482864379883, 0.3055577576160431, 0.6935683488845825, 0.25992196798324585, 0.7758384346961975, 0.2076689600944519, 0.8320663571357727, 0.39907822012901306, 0.8469056487083435, 0.5997118353843689, 0.31635957956314087, 0.36650604009628296, 0.2247273474931717, 0.7608639597892761, 0.37947097420692444, 0.8680096864700317, 0.5816919803619385, 0.19056683778762817, 0.27210569381713867, 0.06685535609722137, 0.040061503648757935]], [[0.06952784210443497, 0.0770183801651001, 0.23747292160987854, 0.022874178364872932, 0.14143598079681396, 0.08435114473104477, 0.0795491486787796, 0.054600730538368225, 0.015159118920564651, 0.06120437756180763, 0.02771361917257309, 0.06765643507242203, 0.013518131338059902, 0.15485556423664093, 0.21279898285865784, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2531612813472748, 0.03241151198744774, 0.04793045297265053, 0.13835468888282776, 0.05921119078993797, 0.20751594007015228, 0.5453532934188843, 0.021712571382522583, 0.07093679159879684, 0.2689567506313324, 0.13515745103359222, 0.05570060759782791, 0.04099860414862633, 0.03517309948801994, 0.11268090456724167, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.35043928027153015, 0.18572849035263062, 0.0481790192425251, 0.19426384568214417, 0.018465382978320122, 0.2676069438457489, 0.3000488579273224, 0.2726097106933594, 0.08134563267230988, 0.10164237022399902, 0.05787196010351181, 0.03694695979356766, 0.21335498988628387, 0.0815601795911789, 0.051584985107183456, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.10967924445867538, 0.047143928706645966, 0.06498727947473526, 0.0161599051207304, 0.08311080187559128, 0.25361040234565735, 0.2589581310749054, 0.0646943673491478, 0.11701063811779022, 0.7398742437362671, 0.11236728727817535, 0.4240334630012512, 0.09019055217504501, 0.1980810910463333, 0.08526580780744553, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0050394656136631966, 0.005000656470656395, 0.01952306181192398, 0.4184519350528717, 0.012662295252084732, 0.015614073723554611, 0.006089636590331793, 0.027387546375393867, 0.007885311730206013, 0.009227052330970764, 0.015002718195319176, 0.002679894445464015, 0.040426015853881836, 0.023895790800452232, 0.031263262033462524, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1104135811328888, 0.16341662406921387, 0.10040471702814102, 0.15014782547950745, 0.22085179388523102, 0.07417210936546326, 0.08140900731086731, 0.21936744451522827, 0.12380684167146683, 0.030364450067281723, 0.008148477412760258, 0.040405042469501495, 0.016740301623940468, 0.05651557818055153, 0.03777482733130455, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.021739037707448006, 0.025255737826228142, 0.041796568781137466, 0.028582973405718803, 0.06361079961061478, 0.10603900998830795, 0.04079660773277283, 0.23573672771453857, 0.031395647674798965, 0.17699679732322693, 0.11518478393554688, 0.12758946418762207, 0.029195530340075493, 0.19761133193969727, 0.24158287048339844, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1121676117181778, 0.056780170649290085, 0.05766424164175987, 0.4753672778606415, 0.17093990743160248, 0.055545274168252945, 0.23774300515651703, 0.047642335295677185, 0.2396271675825119, 0.07084424793720245, 0.05071293190121651, 0.15200014412403107, 0.17973174154758453, 0.16349640488624573, 0.16329222917556763, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.08155515789985657, 0.04415197670459747, 0.09395420551300049, 0.06736686080694199, 0.009449290111660957, 0.007789341267198324, 0.08313233405351639, 0.018231436610221863, 0.2736586928367615, 0.12516330182552338, 0.14283257722854614, 0.03993181511759758, 0.11735112965106964, 0.037545330822467804, 0.095799021422863, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.07989984005689621, 0.019307896494865417, 0.05061032995581627, 0.29983657598495483, 0.009587445296347141, 0.23453857004642487, 0.06259765475988388, 0.014452173374593258, 0.026213111355900764, 0.03952796012163162, 0.12968890368938446, 0.019515926018357277, 0.23016268014907837, 0.18980233371257782, 0.14884653687477112, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.042069002985954285, 0.007410319056361914, 0.027750220149755478, 0.14348776638507843, 0.190275177359581, 0.0696464255452156, 0.09576459228992462, 0.08924749493598938, 0.16830699145793915, 0.14098002016544342, 0.2945949137210846, 0.08460760116577148, 0.11812892556190491, 0.2108343094587326, 0.28860458731651306, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.509858250617981, 0.07021021842956543, 0.044154465198516846, 0.005825423635542393, 0.5241404175758362, 0.030089300125837326, 0.19222509860992432, 0.02549084462225437, 0.1939508020877838, 0.09437919408082962, 0.10883274674415588, 0.13631868362426758, 0.08004569262266159, 0.04784407094120979, 0.14005501568317413, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.029798628762364388, 0.0011461747344583273, 0.00650657806545496, 0.02902117185294628, 0.007348767947405577, 0.012432223185896873, 0.018553903326392174, 0.006125486921519041, 0.008405826054513454, 0.057926055043935776, 0.04542696848511696, 0.21123111248016357, 0.05352021008729935, 0.2931033968925476, 0.1833699345588684, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01627730205655098, 0.0057758791372179985, 0.013731835409998894, 0.6289489269256592, 0.011782719753682613, 0.006108477246016264, 0.005309773609042168, 0.023312430828809738, 0.012817217037081718, 0.00939176045358181, 0.04320970177650452, 0.012798959389328957, 0.1585281491279602, 0.11795029044151306, 0.13285225629806519, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.39748579263687134, 0.10528232902288437, 0.006042438093572855, 0.07306646555662155, 0.020484283566474915, 0.09288878738880157, 0.6331413388252258, 0.03478514030575752, 0.016230005770921707, 0.039869412779808044, 0.10224607586860657, 0.005181388463824987, 0.007975003682076931, 0.01008305512368679, 0.026732152327895164, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.005564282648265362, 0.001319661969318986, 0.028383644297719002, 0.01146539393812418, 0.028919272124767303, 0.012663042172789574, 0.023019153624773026, 0.0018097365973517299, 0.0143426563590765, 0.021044740453362465, 0.015969598665833473, 0.03200899809598923, 0.013908782042562962, 0.03448842838406563, 0.20206299424171448, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3364894986152649, 0.00033270660787820816, 0.017299778759479523, 0.02505551464855671, 0.00914769060909748, 0.0018482855521142483, 0.040363892912864685, 0.0008854345069266856, 0.020481230691075325, 0.022734129801392555, 0.016724254935979843, 0.0011141380527988076, 5.783090819022618e-05, 0.0005799515638500452, 0.07228588312864304, 0.17503570020198822, 0.10145211219787598, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0004661931307055056, 0.4122284948825836, 0.0022180580999702215, 0.00018468582129571587, 0.00030452435021288693, 5.825214248034172e-05, 0.0012309255544096231, 0.0017770789563655853, 1.19774986160337e-05, 0.0001907332189148292, 0.0007099026697687805, 0.0006694658659398556, 1.216385771840578e-05, 0.00011785236711148173, 0.00036971797817386687, 0.002467370592057705, 0.014373218640685081, 0.18901397287845612, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04950903728604317, 0.2967310845851898, 0.021222729235887527, 0.01289455872029066, 0.009955117478966713, 0.008917939849197865, 0.011312013491988182, 0.01272521447390318, 0.0006359940161928535, 0.011413054540753365, 0.006479735020548105, 0.0053005279041826725, 0.001741865067742765, 0.0027997863944619894, 0.08213357627391815, 4.782021278515458e-05, 0.0002036100922850892, 0.15351639688014984, 0.001678619533777237, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.020872987806797028, 3.087984805461019e-05, 0.009670623578131199, 0.0253498163074255, 0.010817835107445717, 0.4320962131023407, 0.017970044165849686, 0.0021109851077198982, 0.0003069202939514071, 0.008261006325483322, 0.006166533567011356, 0.7898750901222229, 0.11304597556591034, 0.12737329304218292, 0.011856237426400185, 0.015930648893117905, 0.006582066882401705, 0.10560829937458038, 0.3465193808078766, 0.012144939973950386, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06067817285656929, 0.005839335732161999, 0.025896329432725906, 0.03351203724741936, 0.025002295151352882, 0.25514867901802063, 0.4275963008403778, 0.0194717925041914, 0.0888834074139595, 0.04690318927168846, 0.03570560738444328, 0.0850825086236, 0.0388353131711483, 0.24394167959690094, 0.10019046813249588, 0.010950141586363316, 0.003185260808095336, 0.03380253165960312, 0.13516294956207275, 0.16374172270298004, 0.0833682045340538, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.014415884390473366, 0.001141559099778533, 0.0678224116563797, 0.024646559730172157, 0.08796916157007217, 0.022639306262135506, 0.07784608006477356, 0.02605922892689705, 0.014093886129558086, 0.0286162830889225, 0.09674176573753357, 0.04692256450653076, 0.03519048914313316, 0.20982496440410614, 0.1800668090581894, 4.016391176264733e-05, 0.0003202538937330246, 0.0050767818465828896, 1.7212016246048734e-05, 0.5176156759262085, 0.003749872324988246, 0.00026106167933903635, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02086471952497959, 0.0008324789232574403, 0.01815967448055744, 0.002886975882574916, 0.0020961007103323936, 0.004472428001463413, 0.033020272850990295, 0.0047500282526016235, 0.012928733602166176, 0.014328529126942158, 0.015946470201015472, 0.06593997031450272, 0.00855537410825491, 0.07526978105306625, 0.1768130511045456, 0.13457109034061432, 0.07774609327316284, 0.006220821291208267, 0.0008077693055383861, 0.2509746253490448, 0.17662860453128815, 0.13796226680278778, 0.053514063358306885, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0009654826717451215, 0.000225315525312908, 0.0006124225910753012, 0.0007836261647753417, 0.0007428302778862417, 0.003282200777903199, 0.008662715554237366, 0.45239004492759705, 4.857195381191559e-05, 0.0006357804522849619, 0.0010122592793777585, 0.0006606358801946044, 0.00025698603712953627, 0.0011707579251378775, 0.0028539940249174833, 0.06553670763969421, 0.09473168104887009, 0.013516419567167759, 0.0013789478689432144, 0.03089364431798458, 0.0676402598619461, 0.03963227570056915, 0.17151857912540436, 0.1338733434677124, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0025523374788463116, 0.0009212270379066467, 0.09748471528291702, 0.057154957205057144, 0.4982932209968567, 0.000552327954210341, 0.02918482944369316, 0.0039253802970051765, 0.00450148293748498, 0.0014971394557505846, 0.009822547435760498, 0.0017059196252375841, 0.001570553402416408, 0.005804183427244425, 0.00957300141453743, 0.07379595190286636, 0.1714182198047638, 0.13684017956256866, 0.00734432740136981, 0.0039545828476548195, 0.09408346563577652, 0.0452522449195385, 0.2525797188282013, 0.15314188599586487, 0.008748584426939487, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.016401896253228188, 0.00043752315104939044, 0.0039018490351736546, 0.005885160993784666, 0.0023499932140111923, 0.0031332974322140217, 0.055512603372335434, 0.003903925186023116, 0.10197419673204422, 0.009071548469364643, 0.023729920387268066, 0.002627716166898608, 0.01914973370730877, 0.02837507426738739, 0.1623656302690506, 0.006909683812409639, 0.034793343394994736, 0.13824458420276642, 0.0004423256032168865, 0.38493895530700684, 0.12702688574790955, 0.0007700703572481871, 0.005257567390799522, 0.3978818655014038, 0.028774550184607506, 0.016022928059101105, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0004865071678068489, 2.4051656509982422e-05, 0.00020084556308574975, 0.0003736558719538152, 0.000646126689389348, 9.209318523062393e-05, 0.009753170423209667, 9.854567178990692e-05, 0.34485483169555664, 0.00047165394062176347, 0.0012700805673375726, 0.000479432987049222, 0.0015819557011127472, 0.0008011643076315522, 0.0017131956992670894, 0.15589091181755066, 0.059809040278196335, 0.2019805759191513, 0.006274765357375145, 0.053891621530056, 0.38889890909194946, 0.024021193385124207, 0.016828669235110283, 0.09206627309322357, 0.15270450711250305, 0.10960505902767181, 0.14381197094917297, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03442303463816643, 0.014513631351292133, 0.003174385754391551, 0.00478995218873024, 0.0017101461999118328, 0.003900717245414853, 0.05713852494955063, 0.013628470711410046, 0.0976317971944809, 0.28217896819114685, 0.01894235610961914, 0.009533336386084557, 0.003816690994426608, 0.005922130309045315, 0.12864208221435547, 0.0011966965394094586, 0.0013769377255812287, 0.0006101150647737086, 4.0936538425739855e-05, 0.008213219232857227, 0.03395655378699303, 0.0003392287762835622, 0.00015790743054822087, 0.000944053172133863, 0.0007261222926899791, 0.011664116755127907, 0.22049497067928314, 0.0034024016931653023, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01004086248576641, 0.01997406780719757, 0.005450551863759756, 0.006583535112440586, 0.0027623113710433245, 0.002903316868469119, 0.03531726077198982, 0.008635452017188072, 0.029197845607995987, 0.02162068709731102, 0.013219092041254044, 0.2711889445781708, 0.00537630682811141, 0.006846235599368811, 0.06079954653978348, 0.2470119595527649, 0.22662757337093353, 0.086290642619133, 0.0011605313047766685, 0.20862528681755066, 0.31339770555496216, 0.007298772688955069, 0.00864456407725811, 0.010568802244961262, 0.01924213580787182, 0.034804634749889374, 0.16789764165878296, 0.11296499520540237, 0.017940307036042213, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00031272557680495083, 8.196506314561702e-06, 4.237617031321861e-05, 0.00043677922803908587, 0.00024717405904084444, 0.022641032934188843, 0.002573953475803137, 0.0004433683061506599, 0.0013428670354187489, 0.00034036010038107634, 0.0007929583080112934, 0.0033021108247339725, 0.4761846959590912, 0.05593165382742882, 0.00081905338447541, 0.3800778388977051, 0.4679488241672516, 0.19362112879753113, 0.18464821577072144, 0.046723559498786926, 0.160307839512825, 0.24654103815555573, 0.2610638439655304, 0.07595612108707428, 0.1325986683368683, 0.022732526063919067, 0.1294456422328949, 0.2688123285770416, 0.12097980827093124, 0.12297553569078445, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00267792004160583, 4.751862070406787e-05, 0.014043050818145275, 0.02037942036986351, 0.04410611465573311, 0.04370833560824394, 0.06117184832692146, 0.01571183279156685, 0.11117196083068848, 0.006906491704285145, 0.0029646854382008314, 0.15407170355319977, 0.010935205966234207, 0.03797803074121475, 0.16977860033512115, 0.005153980106115341, 0.0002073257346637547, 0.12819816172122955, 0.00011319551413180307, 0.08506736904382706, 0.013190183788537979, 0.0028314462397247553, 0.00016588614380452782, 0.009067418053746223, 0.0008525841985829175, 0.00018506577180232853, 0.0002737078757490963, 0.0002474631182849407, 0.04919072240591049, 0.1850043386220932, 0.0018668848788365722, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.011722833849489689, 0.005004812031984329, 0.007801789790391922, 0.0020204312168061733, 0.004946417640894651, 0.000467105332063511, 0.11018845438957214, 0.016256244853138924, 0.05208335816860199, 0.08122430741786957, 0.4447634816169739, 0.0032620911952108145, 0.0036480925045907497, 0.02699565887451172, 0.038189876824617386, 0.4235798418521881, 0.8363600969314575, 0.13292381167411804, 0.03160996362566948, 0.6294970512390137, 0.3827916085720062, 0.01768689975142479, 0.031598031520843506, 0.05291707068681717, 0.004268768709152937, 0.01666090451180935, 0.0017059938982129097, 0.03961870074272156, 0.006749838124960661, 0.2787548303604126, 0.12898604571819305, 0.00984524842351675, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.024071840569376945, 0.0004321316082496196, 0.023504342883825302, 0.020648522302508354, 0.021508874371647835, 0.012214796617627144, 0.024360070005059242, 0.0013747027842327952, 0.0815734788775444, 0.08039785921573639, 0.06951787322759628, 0.017521949484944344, 0.04566040262579918, 0.08389204740524292, 0.15396325290203094, 0.001200420199893415, 0.004923743661493063, 0.03312471881508827, 7.996988279046491e-05, 0.2118730992078781, 0.0288531631231308, 0.00010192030458711088, 0.0002958755649160594, 0.007303019054234028, 0.00011155433458043262, 2.6572593014861923e-06, 0.00035481253871694207, 2.4723947262828005e-06, 2.6933960270980606e-06, 0.017764916643500328, 0.0003658832865767181, 0.25218549370765686, 0.002238432876765728, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0014979105908423662, 4.0405931940767914e-05, 0.0008743218495510519, 0.001329930848442018, 0.0032007889822125435, 0.0002464030694682151, 0.015361684374511242, 0.00014017200737725943, 0.3369258642196655, 0.0015512423124164343, 0.003011554479598999, 0.0010034784208983183, 0.0037561107892543077, 0.0018123533809557557, 0.0037892721593379974, 0.16854390501976013, 0.046801913529634476, 0.18834064900875092, 0.005545254796743393, 0.10321269929409027, 0.3906272351741791, 0.03742265701293945, 0.024458711966872215, 0.05521516501903534, 0.07171308994293213, 0.021107476204633713, 0.025199010968208313, 0.0027974944096058607, 0.0025010560639202595, 0.02306896261870861, 0.15930885076522827, 0.06242140382528305, 0.11754277348518372, 0.21403564512729645, NaN, NaN, NaN, NaN, NaN, NaN], [0.03386643901467323, 0.015328249894082546, 0.002211565151810646, 0.003828595858067274, 0.0012934240512549877, 0.004837968852370977, 0.04463785141706467, 0.014559985138475895, 0.04106945917010307, 0.26340487599372864, 0.017707379534840584, 0.01015215553343296, 0.0033097255509346724, 0.0058202859945595264, 0.13427288830280304, 0.0004002669302280992, 0.00040952101699076593, 0.00012874403910245746, 8.880775567376986e-06, 0.005201425869017839, 0.007163480389863253, 0.0002137795090675354, 0.00012960725871380419, 0.0005550362984649837, 0.0001244707527803257, 0.0006415210082195699, 0.03161805495619774, 4.1008814150700346e-05, 0.000599265971686691, 0.00399716105312109, 5.7038221711991355e-05, 0.0033261284697800875, 0.006950944196432829, 0.22392861545085907, 0.0028074102010577917, NaN, NaN, NaN, NaN, NaN], [0.011043943464756012, 0.029788998886942863, 0.004548549186438322, 0.006417197175323963, 0.0019613932818174362, 0.0028304944280534983, 0.02768276073038578, 0.006805655546486378, 0.02553243562579155, 0.0314837321639061, 0.015709027647972107, 0.2568790316581726, 0.008081428706645966, 0.009137820452451706, 0.06746803224086761, 0.22722585499286652, 0.18426381051540375, 0.07697561383247375, 0.0012757674558088183, 0.23254786431789398, 0.14769063889980316, 0.013780240900814533, 0.02735842764377594, 0.04001649469137192, 0.031179115176200867, 0.015889445319771767, 0.062248069792985916, 0.013498637825250626, 0.0052745710127055645, 0.2219674438238144, 0.0031969451811164618, 0.0037056237924844027, 0.028058722615242004, 0.22486938536167145, 0.09661445021629333, 0.02616964653134346, NaN, NaN, NaN, NaN], [0.0003306480939500034, 1.1417017958592623e-05, 3.816767639364116e-05, 0.000435528316302225, 0.00020690191013272852, 0.02179853804409504, 0.002864222740754485, 0.0005160043947398663, 0.001080053043551743, 0.0004847492673434317, 0.0009861867874860764, 0.003908392507582903, 0.47703394293785095, 0.07113853842020035, 0.000873323529958725, 0.27366653084754944, 0.354305237531662, 0.16368547081947327, 0.1598840057849884, 0.02900015190243721, 0.10581760108470917, 0.21902981400489807, 0.27043354511260986, 0.19813168048858643, 0.2514232099056244, 0.025616073980927467, 0.12471329420804977, 0.09682969748973846, 0.07310353219509125, 0.02883375994861126, 0.09285400807857513, 0.013515813276171684, 0.021914459764957428, 0.14159631729125977, 0.3238908648490906, 0.1783936321735382, 0.11570748686790466, NaN, NaN, NaN], [0.0030808241572231054, 6.38188939774409e-05, 0.011707174591720104, 0.023645061999559402, 0.038246914744377136, 0.047200631350278854, 0.04958858713507652, 0.012573646381497383, 0.04961754009127617, 0.005252092145383358, 0.002489157486706972, 0.17429526150226593, 0.008030706085264683, 0.02717452496290207, 0.1679786741733551, 0.0030968550126999617, 7.297070260392502e-05, 0.1371629387140274, 0.00018204482330475003, 0.04798782989382744, 0.01213640347123146, 0.0023585439193993807, 0.00011540603009052575, 0.016970379278063774, 0.0015150568215176463, 0.0003718302759807557, 0.00044133648043498397, 0.00012143531785113737, 0.021671650931239128, 0.023021340370178223, 0.00010860650218091905, 0.0005334930610843003, 0.000257489358773455, 0.0005856966599822044, 0.00045311596477404237, 0.09709983319044113, 0.18528476357460022, 0.0029071324970573187, NaN, NaN], [0.01455691922456026, 0.008012487553060055, 0.006938801147043705, 0.00259140832349658, 0.004911262542009354, 0.0004763725446537137, 0.10579084604978561, 0.021042171865701675, 0.03971559554338455, 0.07511086016893387, 0.43185338377952576, 0.0035418386105448008, 0.004437423776835203, 0.03184036538004875, 0.04226255044341087, 0.49188995361328125, 0.918917715549469, 0.2054058462381363, 0.08403602242469788, 0.6967929005622864, 0.5653088688850403, 0.03772272169589996, 0.04957969859242439, 0.18319177627563477, 0.012161915190517902, 0.07060753554105759, 0.009896048344671726, 0.1126827672123909, 0.010653471574187279, 0.1938174068927765, 0.1352803260087967, 0.0021707522682845592, 0.030638370662927628, 0.003963022027164698, 0.03303877264261246, 0.004082953091710806, 0.20578816533088684, 0.11854958534240723, 0.02041587606072426, NaN], [0.055085837841033936, 0.014846320264041424, 0.06939522176980972, 0.036867137998342514, 0.13156765699386597, 0.04343622922897339, 0.18117153644561768, 0.04244613274931908, 0.04596249759197235, 0.13158053159713745, 0.047130946069955826, 0.549620509147644, 0.24813801050186157, 0.3232562243938446, 0.11823604255914688, 0.001465475419536233, 0.00045102695003151894, 0.017218099907040596, 0.00030212500132620335, 0.11662620306015015, 0.017841650173068047, 0.00014393724268302321, 0.0003088460653088987, 0.006560556124895811, 0.0005491081974469125, 5.78465114813298e-05, 0.0019656207878142595, 0.00016285650781355798, 0.0002489366161171347, 0.011378495953977108, 0.0017521223053336143, 0.00787137821316719, 8.434856863459572e-05, 0.0012881350703537464, 7.287580228876323e-05, 0.00021561238099820912, 0.020317554473876953, 0.04195580258965492, 0.24219898879528046, 0.0017395684262737632]], [[0.2484879046678543, 0.12593188881874084, 0.11472177505493164, 0.6318025588989258, 0.009745504707098007, 0.030495919287204742, 0.054615989327430725, 0.004801109898835421, 0.23875823616981506, 0.011562658473849297, 0.02087206020951271, 0.059635717421770096, 0.011483770795166492, 0.07716090232133865, 0.041850361973047256, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3294946551322937, 0.17723912000656128, 0.041080135852098465, 0.30134642124176025, 0.0073102316819131374, 0.049291279166936874, 0.0495959147810936, 0.0037847748026251793, 0.014987694099545479, 0.07676513493061066, 0.039059415459632874, 0.006041571032255888, 0.011380840092897415, 0.011979957111179829, 0.02782473713159561, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.008675806224346161, 0.016726570203900337, 0.19906938076019287, 0.3167073726654053, 0.022006884217262268, 0.014510865323245525, 0.00237266905605793, 0.00938868336379528, 0.004848333541303873, 0.00305117666721344, 0.042285457253456116, 0.0026737553998827934, 0.017337674275040627, 0.0016427191440016031, 0.0027906473260372877, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.06292864680290222, 0.010060630738735199, 0.07846219092607498, 0.3009726405143738, 0.09911586344242096, 0.3769649565219879, 0.290684312582016, 0.048859626054763794, 0.015964722260832787, 0.02972962148487568, 0.25837212800979614, 0.050403933972120285, 0.052831199020147324, 0.44793814420700073, 0.12096201628446579, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0647541731595993, 0.06744952499866486, 0.010754776187241077, 0.15598785877227783, 0.08916914463043213, 0.4045051634311676, 0.5958212018013, 0.10594789683818817, 0.12025819718837738, 0.04822946712374687, 0.02913811057806015, 0.014846491627395153, 0.17111137509346008, 0.049513354897499084, 0.14188753068447113, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.07069405168294907, 0.0006015333347022533, 0.0017680496675893664, 0.0010985832195729017, 0.0012869784841313958, 0.22278346121311188, 0.4465882480144501, 0.06128238886594772, 0.02642727456986904, 0.03756114840507507, 0.002607540925964713, 0.0018699204083532095, 0.0059012919664382935, 0.020283877849578857, 0.03355809301137924, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0861939862370491, 0.03346291184425354, 0.009915103204548359, 0.35010838508605957, 0.03437130153179169, 0.18394741415977478, 0.5006390810012817, 0.0633198693394661, 0.36160194873809814, 0.07578127831220627, 0.038500167429447174, 0.08213403075933456, 0.026455186307430267, 0.12013117223978043, 0.1146865040063858, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2484544962644577, 0.00790119543671608, 0.004407763481140137, 0.02700735628604889, 0.015422074124217033, 0.015295883640646935, 0.40846768021583557, 0.10706920176744461, 0.06367217004299164, 0.22094424068927765, 0.21221157908439636, 0.006999517325311899, 0.054566796869039536, 0.124799944460392, 0.09114839136600494, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1237153485417366, 0.029043834656476974, 0.07521974295377731, 0.04068650305271149, 0.002623512176796794, 0.008706655353307724, 0.03832445293664932, 0.14616532623767853, 0.1701044738292694, 0.20599642395973206, 0.11677426844835281, 0.2341107875108719, 0.06235762685537338, 0.003964806441217661, 0.15731573104858398, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.034962959587574005, 0.023077068850398064, 0.034600574523210526, 0.14041800796985626, 0.0021679585333913565, 0.009290770627558231, 0.07274696230888367, 0.014187950640916824, 0.1371506154537201, 0.39440277218818665, 0.2198760211467743, 0.19940708577632904, 0.11203428357839584, 0.08552268147468567, 0.11737436801195145, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.015330069698393345, 0.007386082783341408, 0.017500948160886765, 0.01906486414372921, 0.010120063088834286, 0.05364372953772545, 0.043298348784446716, 0.12658876180648804, 0.06039673835039139, 0.02238147333264351, 0.16429400444030762, 0.06984445452690125, 0.3043651580810547, 0.055543575435876846, 0.11423089355230331, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.09644094854593277, 0.0058854687958955765, 0.03721459209918976, 0.0025620406959205866, 0.062300242483615875, 0.003563062520697713, 0.07219880819320679, 0.03924282267689705, 0.025451356545090675, 0.06598387658596039, 0.026776403188705444, 0.07250863313674927, 0.45021528005599976, 0.08199745416641235, 0.4220075309276581, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01460834126919508, 0.0005662022740580142, 0.0013911814894527197, 0.05315173417329788, 0.008028149604797363, 0.016604119911789894, 0.011740745045244694, 0.008678588084876537, 0.0025609249714761972, 0.01638207584619522, 0.018210044130682945, 0.014119945466518402, 0.06550943106412888, 0.34254926443099976, 0.04794229939579964, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.05372002348303795, 0.14061135053634644, 0.018787089735269547, 0.0958278551697731, 0.0019092779839411378, 0.03348369151353836, 0.13957257568836212, 0.031220966950058937, 0.19735871255397797, 0.017847368493676186, 0.0589337982237339, 0.01900595612823963, 0.1276925951242447, 0.04769464209675789, 0.4384888708591461, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.08416850119829178, 0.1088641807436943, 0.0573052242398262, 0.27551695704460144, 0.030813831835985184, 0.18022866547107697, 0.10468263924121857, 0.09972096234560013, 0.31189021468162537, 0.3315774202346802, 0.2321816384792328, 0.034622836858034134, 0.14143656194210052, 0.04640315845608711, 0.09621720016002655, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.7448275089263916, 0.00023065913410391659, 0.0003700565139297396, 0.0002745355886872858, 0.0005768057890236378, 1.0151054993912112e-05, 1.3715341992792673e-05, 7.643950084457174e-06, 0.0004341531603131443, 5.2913601393811405e-05, 5.353476808522828e-05, 8.812115265754983e-05, 1.1566834245968494e-06, 5.744800546381157e-06, 5.576572584686801e-05, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [8.114575030049309e-05, 0.06691394746303558, 0.04036417603492737, 0.022258125245571136, 0.055233534425497055, 0.050445422530174255, 0.048324622213840485, 0.00889397319406271, 0.1270352452993393, 0.04156908392906189, 0.20929713547229767, 0.21122632920742035, 0.414194792509079, 0.12628954648971558, 0.25567519664764404, 0.39058852195739746, 8.28505744721042e-06, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0012628535041585565, 0.0008597301202826202, 0.036364536732435226, 0.0971999391913414, 0.04217860475182533, 0.10421664267778397, 0.16082510352134705, 0.03283625468611717, 0.09032318741083145, 0.09653837233781815, 0.21890851855278015, 0.06589526683092117, 0.47985169291496277, 0.21388037502765656, 0.21010825037956238, 2.7811127438326366e-05, 0.4158080220222473, 0.0005852450849488378, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0002990703214891255, 0.001862871926277876, 0.010526847094297409, 0.01025421917438507, 0.05592086538672447, 0.02697981521487236, 0.01570008136332035, 0.02568165771663189, 0.010194454342126846, 0.048093631863594055, 0.04421652480959892, 0.02353351190686226, 0.21245922148227692, 0.0448865108191967, 0.23352482914924622, 9.039229868085252e-13, 4.1926887206500396e-05, 0.15358270704746246, 0.00044542484101839364, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00015855174569878727, 0.013162538409233093, 0.006567019037902355, 0.004201928153634071, 0.006268346216529608, 0.00024757537175901234, 0.012954139150679111, 0.003747382666915655, 0.03740423545241356, 0.007960616610944271, 0.013323514722287655, 0.06273993849754333, 0.048431456089019775, 0.13987915217876434, 0.20342004299163818, 1.9216391628896996e-16, 4.9363904963684035e-08, 0.0004218998074065894, 0.40449434518814087, 4.695959432865493e-06, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.013553211465477943, 0.03824196010828018, 0.02278091199696064, 0.09299258887767792, 0.0559159517288208, 0.00022306715254671872, 0.031003709882497787, 0.010444254614412785, 0.16168788075447083, 0.03666102886199951, 0.00852662418037653, 0.4432809352874756, 0.009321487508714199, 0.024379035457968712, 0.17351986467838287, 1.7349648803667746e-14, 5.141012060505545e-09, 3.7822364902240224e-06, 0.0002717413299251348, 0.22465285658836365, 2.698016260183067e-06, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00026768012321554124, 0.015254812315106392, 0.007090381346642971, 0.006173381581902504, 0.006773150525987148, 0.0008773274021223187, 0.00638232659548521, 0.016591282561421394, 0.004996343981474638, 0.009327422827482224, 0.008862738497555256, 0.05876166746020317, 0.009527520276606083, 0.00578573253005743, 0.20356230437755585, 3.6696812255598843e-09, 2.368522711293508e-09, 3.1902116006676806e-06, 9.520445587440918e-08, 9.990107355406508e-05, 0.2170185148715973, 0.019131841138005257, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0008312691352330148, 0.012717761099338531, 0.013986560516059399, 0.007093494758009911, 0.004876464139670134, 0.0027259632479399443, 0.0033886858727782965, 0.01589561626315117, 0.00876854918897152, 0.005017295014113188, 0.023178039118647575, 0.05755693465471268, 0.05451130494475365, 0.06928746402263641, 0.1796484887599945, 2.292660354896725e-07, 1.4062491449085002e-10, 1.0373556180720556e-11, 2.945570870549474e-11, 1.3987125901948616e-09, 1.1205498822164373e-06, 0.3382871150970459, 0.0008390913717448711, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00016753048112150282, 0.011822681874036789, 0.005686081480234861, 0.011659285984933376, 0.004307762254029512, 0.0031254058703780174, 0.009316416457295418, 0.0016170619055628777, 0.012603488750755787, 0.0245236624032259, 0.01756892167031765, 0.011099276132881641, 0.11892349272966385, 0.02075323462486267, 0.2549600899219513, 2.3133984541345853e-06, 0.00017511146143078804, 1.441240442545677e-06, 3.064446918443764e-09, 3.097617096159411e-08, 7.23518027712089e-08, 0.0017295092111453414, 0.39626115560531616, 0.00019915253506042063, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00017647366621531546, 0.053185176104307175, 0.007304554805159569, 0.004834755789488554, 0.000954066461417824, 0.025718921795487404, 0.02985404059290886, 0.09960591793060303, 0.010695043951272964, 0.016483109444379807, 0.018774237483739853, 0.05090473219752312, 0.01008983701467514, 0.028674444183707237, 0.22871088981628418, 8.689644937311981e-15, 2.8357308110571466e-06, 5.0946681540153804e-08, 2.0269605438549831e-10, 1.289949813632063e-10, 3.375676821404383e-11, 8.602300205495794e-09, 4.5097981455910485e-06, 0.29888245463371277, 6.641173968091607e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0008755451999604702, 0.020039640367031097, 0.003969491925090551, 0.007670485880225897, 0.006173306610435247, 0.012295764870941639, 0.0076020946726202965, 0.012137084268033504, 0.010956642217934132, 0.010541083291172981, 0.018125493079423904, 0.03226908668875694, 0.02587633579969406, 0.016216130927205086, 0.1660052388906479, 2.8127108337250475e-18, 1.3557467148928026e-08, 7.431774662336466e-08, 2.301476165200711e-08, 1.1707952315975767e-11, 7.274678689300762e-12, 7.034611066401852e-13, 5.257664963120856e-13, 3.4044413041556254e-05, 0.32336506247520447, 4.600838292390108e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [5.4335410823114216e-05, 0.03367479890584946, 0.004507457371801138, 0.004544241353869438, 0.00623831432312727, 0.002192543353885412, 0.004128816071897745, 0.021106822416186333, 0.0003909784718416631, 0.00830051489174366, 0.018183842301368713, 0.009683135896921158, 0.0325237475335598, 0.00792472343891859, 0.25227075815200806, 6.300134025583048e-13, 5.676838910062543e-08, 1.822371018533886e-06, 2.3448223146260716e-05, 2.5415656068616954e-07, 3.417801153204891e-08, 5.353474885616549e-10, 2.141239963115993e-11, 3.762530198514469e-08, 6.24434178462252e-05, 0.33693620562553406, 3.183486114721745e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0006012204103171825, 0.01188816037029028, 0.023532994091510773, 0.00770517997443676, 0.007410787045955658, 0.007087987381964922, 0.021027186885476112, 0.013456426560878754, 0.03266710042953491, 0.001251929672434926, 0.09021235257387161, 0.024440091103315353, 0.024299103766679764, 0.02338516153395176, 0.1967199146747589, 1.5877897954763576e-12, 1.2288996487086479e-09, 3.458522428445576e-07, 9.462546586291865e-06, 7.457422907464206e-05, 0.0005706463125534356, 1.4425116212635203e-08, 4.5430816769144455e-13, 2.616490357709722e-12, 3.545688542772041e-08, 0.00016559385403525084, 0.22770871222019196, 0.0009294600458815694, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0009616355528123677, 0.059039004147052765, 0.04997482895851135, 0.013552234508097172, 0.03981975466012955, 0.020335622131824493, 0.014380398206412792, 0.07606764137744904, 0.07161007821559906, 0.024130970239639282, 0.06891870498657227, 0.0008635766571387649, 0.023193923756480217, 0.02981526218354702, 0.21020111441612244, 2.579016999959549e-10, 1.5412886245069757e-10, 5.557828156033118e-11, 1.2367832313842086e-09, 3.3751638284229557e-07, 4.776334208145272e-07, 1.75399406998622e-07, 9.608910021829953e-12, 7.499024594652057e-14, 2.8573548556528813e-14, 3.2670008191793e-12, 4.494925178732956e-06, 0.37381958961486816, 3.638648195192218e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0013424595817923546, 0.0746709555387497, 0.011544802226126194, 0.027912717312574387, 0.0729047879576683, 0.10483764857053757, 0.07119728624820709, 0.010606798343360424, 0.044552259147167206, 0.05723145231604576, 0.034647323191165924, 0.38214871287345886, 0.003923356998711824, 0.08778946846723557, 0.19581711292266846, 3.090227983193472e-05, 8.430293382843956e-05, 4.32313208875712e-05, 1.6493000885020592e-06, 8.794136192591395e-06, 0.0005616153357550502, 0.0013158570509403944, 0.0005267951055429876, 3.675571861094795e-05, 2.42239195813454e-07, 8.356466074666002e-10, 2.3424906885338714e-06, 0.0012797197559848428, 0.6210904717445374, 0.0014036636566743255, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0016638260567560792, 0.01581355184316635, 0.08943041414022446, 0.02092832513153553, 0.021133122965693474, 0.012408973649144173, 0.01347691286355257, 0.00275444146245718, 0.027862150222063065, 0.01225491613149643, 0.018322426825761795, 0.008929668925702572, 0.00015579524915665388, 0.0014782899525016546, 0.18181975185871124, 7.67247776423119e-09, 2.954437938740284e-08, 8.54147774731473e-09, 2.011255162415182e-09, 5.265776792384713e-08, 1.4630668898618637e-09, 2.2913241082278546e-06, 3.266295323101076e-08, 1.6124132571349037e-06, 1.13081211061683e-11, 2.6358108895513247e-15, 7.728456763445024e-11, 2.3767283696685126e-09, 2.1271845980663784e-05, 0.19462287425994873, 6.456446044467157e-06, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0008640239248052239, 0.06174946948885918, 0.004653214477002621, 0.002717669354751706, 0.015129820443689823, 0.00935456808656454, 0.016078660264611244, 0.08089328557252884, 0.017857585102319717, 0.0025031790137290955, 0.00012101473839720711, 0.013123439624905586, 0.005499868653714657, 0.001559562049806118, 0.22764776647090912, 4.312543703220706e-13, 2.1705271535665815e-07, 1.1365986551936658e-07, 1.9739390211270802e-07, 7.690645453806155e-09, 4.219609994748907e-09, 9.716764060030414e-10, 3.915795687703394e-08, 3.0873563900968293e-06, 5.5168204227129536e-08, 1.0056843552375128e-10, 6.254387632798064e-12, 4.318517331930449e-12, 1.5618051990573534e-11, 6.033264071447775e-05, 0.4116440713405609, 1.8908482161350548e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0008687095833010972, 0.025285501033067703, 0.01658034697175026, 0.02363765239715576, 0.02393241412937641, 0.0657346174120903, 0.015298763290047646, 0.01792113669216633, 0.021707117557525635, 0.018967296928167343, 0.037634264677762985, 0.013209421187639236, 0.02256513573229313, 0.007774183992296457, 0.15961462259292603, 1.797858697974407e-17, 3.5553746058347713e-10, 1.0377114723070235e-09, 5.157609006545272e-09, 5.5740526777592336e-11, 3.675403037473046e-11, 3.015720268992328e-12, 1.2632186895361434e-14, 3.2584634990229233e-09, 2.7093712162695738e-08, 2.733851353305984e-15, 2.0347772078377346e-10, 7.802066534575867e-16, 1.702402683943053e-16, 1.8298086656987067e-10, 6.30185184036236e-08, 0.2592085301876068, 3.469779585429933e-06, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0001073219973477535, 0.04253393039107323, 0.010077103972434998, 0.007349912542849779, 0.00879223458468914, 0.004757148679345846, 0.008167163468897343, 0.03753674402832985, 0.00042728587868623435, 0.014237778261303902, 0.029898250475525856, 0.006872681900858879, 0.045794516801834106, 0.007500257343053818, 0.2562271058559418, 3.386366187463352e-10, 1.5587464474720036e-07, 5.430682108453766e-07, 1.926859113154933e-05, 2.7584928830037825e-06, 5.553058031182445e-07, 6.554741815989473e-08, 7.146391256540596e-10, 4.225638150501254e-08, 2.0539353045023745e-06, 0.00010312868107575923, 2.5505174860995794e-08, 1.3659710695890226e-08, 4.206753695390475e-11, 5.200286035123014e-11, 3.842067428649898e-07, 1.4282905794971157e-05, 0.31164512038230896, 0.00011869923037011176, NaN, NaN, NaN, NaN, NaN, NaN], [0.0005320480559021235, 0.010701313614845276, 0.020972738042473793, 0.007364482618868351, 0.006165153346955776, 0.00950621161609888, 0.022682208567857742, 0.018515970557928085, 0.03319491446018219, 0.00125269521959126, 0.07773777842521667, 0.022826068103313446, 0.02051766775548458, 0.020874740555882454, 0.1872510462999344, 3.098006018387167e-10, 3.2388165482899467e-09, 1.8609943808201024e-08, 5.099297482047405e-07, 4.603737033903599e-05, 0.00016448901442345232, 1.6998721719119203e-07, 1.7718410072475876e-11, 2.5886336477154437e-11, 9.218055652127077e-09, 1.2046231745443947e-07, 7.304957398446277e-05, 2.3164133111652774e-10, 2.8952129582648922e-09, 2.9085676575557606e-11, 8.895827650901023e-12, 8.14965606110718e-09, 8.762691868469119e-05, 0.2280847281217575, 0.0004104141262359917, NaN, NaN, NaN, NaN, NaN], [0.0008804904646240175, 0.05573932081460953, 0.06578188389539719, 0.01897181011736393, 0.043492771685123444, 0.026308609172701836, 0.016426166519522667, 0.09104844927787781, 0.12495335191488266, 0.04637341946363449, 0.0944451242685318, 0.0008321930072270334, 0.03243781998753548, 0.03530845418572426, 0.2013196051120758, 1.3149543676149733e-09, 1.080373679407387e-09, 5.5150013028582023e-11, 7.800748935693491e-10, 1.7859061074432248e-07, 2.183157299384675e-08, 2.5236221290469985e-07, 2.35878039323012e-10, 9.060349692724401e-12, 1.4339956088890715e-12, 1.7799637631876752e-12, 2.9941787715870305e-08, 6.0217857935640495e-06, 3.1683756313016787e-11, 4.5713120788715145e-11, 3.4124135808721867e-13, 3.591858459424911e-15, 1.3559961530365539e-12, 3.119595021416899e-06, 0.35679423809051514, 3.964137067669071e-05, NaN, NaN, NaN, NaN], [0.001610875129699707, 0.08435038477182388, 0.014167247340083122, 0.03493078798055649, 0.07050123810768127, 0.10772886872291565, 0.09850788861513138, 0.013066386803984642, 0.05027954652905464, 0.10465669631958008, 0.04533415287733078, 0.47037968039512634, 0.004505114629864693, 0.12196572870016098, 0.18816377222537994, 4.326914222474443e-06, 0.00023807807883713394, 0.00026310785324312747, 8.714396244613454e-06, 1.617559973965399e-05, 0.0001319001312367618, 0.0005945482989773154, 0.000823884445708245, 0.0008506007143296301, 1.7805428797146305e-05, 2.734714854568665e-08, 2.8855724849563558e-06, 4.891938442597166e-05, 0.0011682395124807954, 8.529372053089901e-07, 0.00017029111040756106, 1.0359013202787537e-07, 7.06834313302096e-10, 1.0861956525332062e-06, 0.0008713650749996305, 0.596385657787323, 0.0009257638594135642, NaN, NaN, NaN], [0.0018758929800242186, 0.019657986238598824, 0.1020394116640091, 0.033738646656274796, 0.024869924411177635, 0.012215637601912022, 0.015038376674056053, 0.002843664726242423, 0.02175789885222912, 0.01636381261050701, 0.01989913359284401, 0.01190999522805214, 0.00020280842727515846, 0.0016855570720508695, 0.17570628225803375, 1.4773272882795396e-10, 2.3448599506536993e-08, 6.434380566133768e-07, 3.8027360460546333e-07, 2.454226432746509e-06, 5.541529457531169e-09, 3.5226184991188347e-06, 2.5443886997322807e-08, 1.7749154721968807e-05, 1.8393259137994278e-09, 4.026108439691978e-12, 6.382850692432385e-09, 1.7809153263215194e-08, 8.996512974590587e-07, 0.00010512088192626834, 1.1464897607671443e-11, 2.794342757184154e-09, 2.4549680847631107e-15, 9.933188299671158e-11, 7.3009864820505754e-09, 8.105817687464878e-05, 0.2077004611492157, 2.0097606466151774e-05, NaN, NaN], [0.0009206020040437579, 0.08179444819688797, 0.00436751963570714, 0.003652991494163871, 0.019383452832698822, 0.008280212059617043, 0.016885409131646156, 0.10377784073352814, 0.023152435198426247, 0.0037028237711638212, 0.0001251623034477234, 0.018928401172161102, 0.009926089085638523, 0.002465219935402274, 0.21539123356342316, 1.1257004341538607e-14, 1.3137036347643516e-08, 4.6611327775281097e-07, 3.0405328743654536e-06, 1.5423474053477548e-07, 2.520166120234535e-08, 3.4643394819511286e-09, 1.1558090484697914e-08, 1.417677253812144e-06, 9.112129362165433e-08, 4.2694305868451465e-09, 3.7723260626343347e-10, 4.1450526344632976e-10, 2.7357388923676673e-11, 6.112880441833113e-07, 3.9687514799879864e-05, 8.382351063263016e-11, 8.293656039715103e-11, 4.97465783844131e-12, 4.144883221368634e-12, 1.4191136113450575e-11, 2.5566061594872735e-05, 0.4056495428085327, 4.4409513066057116e-05, NaN], [0.0005496710073202848, 0.039492249488830566, 0.016358638182282448, 0.007983607240021229, 0.006420070305466652, 0.0012171968119218946, 0.003928476013243198, 0.005028040148317814, 0.010722441598773003, 0.0025004756171256304, 0.015696601942181587, 0.006085758097469807, 0.0033880609553307295, 0.0056163351982831955, 0.1572248637676239, 9.215334861117716e-19, 2.6557794852166694e-10, 5.799645919069008e-07, 1.003176621633406e-11, 7.217926736302616e-07, 4.876178394397357e-08, 8.254863459455919e-11, 1.424103456687531e-12, 1.1857503423584603e-08, 1.3074058502482444e-09, 8.580362115262474e-12, 5.829819293978744e-09, 1.8017319407259702e-12, 9.234832950427707e-14, 3.576115098491428e-11, 1.9265784523270213e-09, 1.8997316146851517e-06, 1.949248054633479e-11, 8.860704392432694e-10, 2.8198800851872777e-14, 5.674391451236226e-15, 1.0258181110112119e-10, 6.93914080329705e-06, 0.25534507632255554, 2.742740150551981e-07]], [[0.130781888961792, 0.31469303369522095, 0.10550640523433685, 0.05234318599104881, 0.073336161673069, 0.022349786013364792, 0.04807984083890915, 0.1931842416524887, 0.06399697810411453, 0.042083337903022766, 0.026750531047582626, 0.11997608095407486, 0.008983415551483631, 0.03431839123368263, 0.019280044361948967, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1582711637020111, 0.14862558245658875, 0.20016248524188995, 0.08876624703407288, 0.11006557196378708, 0.14632253348827362, 0.04025046527385712, 0.010204354301095009, 0.017868297174572945, 0.059372395277023315, 0.02111685276031494, 0.04181571304798126, 0.025184988975524902, 0.09681157767772675, 0.11611668020486832, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.23875439167022705, 0.3084685802459717, 0.14188633859157562, 0.026331612840294838, 0.0149313323199749, 0.09176106750965118, 0.03131069242954254, 0.10051372647285461, 0.03149634972214699, 0.11085867136716843, 0.014410188421607018, 0.02796255424618721, 0.034816499799489975, 0.025807565078139305, 0.01846306212246418, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3404518961906433, 0.24260303378105164, 0.15383434295654297, 0.17020593583583832, 0.011800014413893223, 0.014385397545993328, 0.09441643208265305, 0.12204645574092865, 0.13843503594398499, 0.045293405652046204, 0.010667533613741398, 0.19693949818611145, 0.10281307995319366, 0.01422606036067009, 0.06984427571296692, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.002873742487281561, 0.008706165477633476, 0.35573768615722656, 0.0015586970839649439, 0.015496796928346157, 0.003392455168068409, 0.01149011217057705, 0.01891980692744255, 0.016394488513469696, 0.003960000351071358, 0.0035995631478726864, 0.008501716889441013, 0.018164046108722687, 0.004727588500827551, 0.013562880456447601, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.044807154685258865, 0.02788197249174118, 0.03947468474507332, 0.1271299421787262, 0.17640650272369385, 0.25110092759132385, 0.08349309861660004, 0.02069718949496746, 0.45751577615737915, 0.039922621101140976, 0.1781769096851349, 0.002931024879217148, 0.16567888855934143, 0.1177627220749855, 0.5156693458557129, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.005990047473460436, 0.04782475531101227, 0.01399919856339693, 0.010489771142601967, 0.06132129579782486, 0.030459748581051826, 0.010153756476938725, 0.3387801945209503, 0.06446883827447891, 0.007243711035698652, 0.00693717272952199, 0.020023254677653313, 0.007285784464329481, 0.009139767847955227, 0.0044054011814296246, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.020405659452080727, 0.00729386368766427, 0.06661678105592728, 0.08295443654060364, 0.20373474061489105, 0.3448184132575989, 0.04295210912823677, 0.20947468280792236, 0.03081577830016613, 0.010805373080074787, 0.17521467804908752, 0.06567652523517609, 0.012400656938552856, 0.10652147233486176, 0.07385163754224777, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.21573591232299805, 0.13175059854984283, 0.04085814207792282, 0.04119405150413513, 0.03551999852061272, 0.023009058088064194, 0.2751774191856384, 0.047030266374349594, 0.14272502064704895, 0.20153193175792694, 0.09575672447681427, 0.11327007412910461, 0.008532780222594738, 0.053245026618242264, 0.08952803909778595, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2778390347957611, 0.11423225700855255, 0.3034791946411133, 0.34643107652664185, 0.5395972728729248, 0.06785042583942413, 0.13029156625270844, 0.18737749755382538, 0.029348008334636688, 0.16667678952217102, 0.021040884777903557, 0.008728248998522758, 0.037633832544088364, 0.02033349499106407, 0.03947347402572632, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.4898838996887207, 0.08082167059183121, 0.07362432777881622, 0.02171795442700386, 0.1333591789007187, 0.09000474214553833, 0.13501934707164764, 0.03979193791747093, 0.19113953411579132, 0.13522492349147797, 0.16557832062244415, 0.16255514323711395, 0.07687958329916, 0.15948235988616943, 0.09843874722719193, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.045906297862529755, 0.18602333962917328, 0.4082620143890381, 0.010370302945375443, 0.04507172852754593, 0.19693265855312347, 0.04021843150258064, 0.027866821736097336, 0.1546991914510727, 0.33766424655914307, 0.09260500222444534, 0.05066358670592308, 0.05655887722969055, 0.13157807290554047, 0.06850539147853851, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.020344020798802376, 0.0030158585868775845, 0.004445259924978018, 0.022628312930464745, 0.030150510370731354, 0.027700912207365036, 0.026311388239264488, 0.012862108647823334, 0.07009940594434738, 0.24656175076961517, 0.10596039146184921, 0.1143152266740799, 0.3679012656211853, 0.0068145813420414925, 0.04171491786837578, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.004749340936541557, 0.00182742765173316, 0.0021293568424880505, 0.00394084258005023, 0.004750867374241352, 5.3125138947507367e-05, 0.0026011874433606863, 0.000718552153557539, 0.002356230979785323, 0.00125187449157238, 0.0021339249797165394, 0.00044074622564949095, 0.2141493707895279, 0.0029175111558288336, 0.00477015832439065, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.12991508841514587, 0.06724811345338821, 0.06397818773984909, 0.15923364460468292, 0.2566852867603302, 0.07963784784078598, 0.09182894974946976, 0.040824584662914276, 0.21298912167549133, 0.2517295181751251, 0.2285410314798355, 0.11115844547748566, 0.1010512113571167, 0.3968040943145752, 0.1870165765285492, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.09555985033512115, 0.6603901982307434, 0.4109249413013458, 0.6857163310050964, 0.16377028822898865, 0.1341286301612854, 0.19969937205314636, 0.28269705176353455, 0.14764364063739777, 0.41980865597724915, 0.4319525361061096, 0.3789142668247223, 0.49345141649246216, 0.26345306634902954, 0.00909768883138895, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1460653841495514, 0.2758752405643463, 0.2826583981513977, 0.551855206489563, 0.05612415447831154, 0.19304026663303375, 0.0849798247218132, 0.038316093385219574, 0.02312053181231022, 0.46154478192329407, 0.36433619260787964, 0.35877159237861633, 0.1596277803182602, 0.0554661750793457, 6.483463948825374e-05, 0.0002614231198094785, 0.183704674243927, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [3.716628270922229e-05, 1.9402585849093157e-07, 1.0113188182003796e-05, 6.318590021692216e-05, 6.053787728887983e-07, 2.5790013751247898e-06, 0.00022986173280514777, 1.074662236533186e-06, 6.082240361138247e-06, 3.35614299729059e-06, 2.225729804194998e-05, 7.863033715693746e-06, 1.555537892272696e-06, 3.881560041918419e-05, 0.23657216131687164, 1.3331101555991154e-08, 0.003119559260085225, 0.19454506039619446, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.6150763630867004, 0.041665952652692795, 0.4174444377422333, 0.4949702024459839, 0.20794649422168732, 0.3307763934135437, 0.8098993897438049, 0.2721010744571686, 0.7274996042251587, 0.4779607057571411, 0.6233283281326294, 0.7560765147209167, 0.3628612458705902, 0.7672091722488403, 5.392584171204362e-06, 1.1244888353800775e-09, 0.0005117341643199325, 0.15345418453216553, 0.0018621939234435558, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [5.640763447445352e-06, 2.5884469323500525e-07, 1.2724142379738623e-06, 8.170181899913587e-06, 1.2345621769327408e-07, 1.310836523771286e-07, 1.02673438959755e-05, 9.661080184741877e-07, 6.520539272969472e-07, 7.602448022225872e-07, 2.058099425994442e-06, 6.885502301656743e-08, 1.0175665465794737e-06, 1.7383708836860023e-05, 0.20754273235797882, 2.882708471929618e-08, 0.0006895777769386768, 0.008299488574266434, 0.004234161227941513, 0.26378652453422546, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [9.27566077280062e-07, 5.395870630309219e-07, 1.8455818917573197e-07, 1.2775643654094893e-06, 2.105696061960316e-08, 3.1680112755338996e-08, 6.263408067752607e-06, 4.3284012463118415e-07, 1.918825773827848e-06, 1.694104128091567e-07, 3.363936968980852e-07, 9.135120215830739e-09, 4.4058825920956224e-08, 7.840970965844463e-07, 0.18219269812107086, 6.507164653157815e-05, 0.0030905166640877724, 0.269605815410614, 0.06594818085432053, 0.07055308669805527, 0.24370616674423218, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.7144812345504761, 0.6739043593406677, 0.2952970862388611, 0.49478814005851746, 0.17151717841625214, 0.06989942491054535, 0.5132517218589783, 0.30886489152908325, 0.5621734261512756, 0.5728412866592407, 0.576314389705658, 0.34687095880508423, 0.25617536902427673, 0.29690253734588623, 7.371841547865188e-06, 5.806248736917041e-05, 0.0008924558642320335, 0.00047033390728756785, 0.003593915607780218, 0.044251326471567154, 0.18547922372817993, 0.19724349677562714, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.6291437745094299, 0.5982875823974609, 0.4885888695716858, 0.5792520046234131, 0.2514877915382385, 0.5298613905906677, 0.11972777545452118, 0.6076628565788269, 0.04243328422307968, 0.5940482020378113, 0.6775911450386047, 0.3496588468551636, 0.4937344789505005, 0.40163323283195496, 2.9517783332266845e-05, 0.03321969881653786, 0.1786998063325882, 0.0021111152600497007, 0.00015362887643277645, 0.0013223892310634255, 0.01674751006066799, 0.27181917428970337, 0.0704144611954689, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.6414378881454468, 0.20530864596366882, 0.8448930978775024, 0.5841984748840332, 0.48009997606277466, 0.48003992438316345, 0.4468145966529846, 0.036266062408685684, 0.3466547429561615, 0.521195650100708, 0.7532409429550171, 0.14529024064540863, 0.3844791650772095, 0.46825459599494934, 2.1059213395346887e-05, 0.0005316429305821657, 0.0021434861700981855, 0.0005638045258820057, 2.0347550162114203e-05, 8.372889715246856e-05, 0.0012170294066891074, 0.0006328476592898369, 0.0015302025713026524, 0.2731996476650238, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.7977450489997864, 0.5162288546562195, 0.513008177280426, 0.6203657984733582, 0.04621165990829468, 0.2237500697374344, 0.10730908066034317, 0.17203836143016815, 0.028481170535087585, 0.5342445969581604, 0.7256113290786743, 0.5827998518943787, 0.755642294883728, 0.511749804019928, 0.00015279543003998697, 3.384976253073546e-06, 0.0032942681573331356, 0.003179847961291671, 0.0003072107210755348, 3.0923787562642246e-05, 0.0003082206822000444, 0.0026841319631785154, 0.011449099518358707, 0.2928124964237213, 0.0015787724405527115, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5001324415206909, 0.7283154129981995, 0.6225411295890808, 0.5096700191497803, 0.4470505714416504, 0.6475648880004883, 0.4919697046279907, 0.42729777097702026, 0.22966071963310242, 0.4533919394016266, 0.5539101958274841, 0.2698501944541931, 0.3532210886478424, 0.2643750309944153, 2.9741322578047402e-05, 4.910896677756682e-05, 0.01189705915749073, 0.0036808690056204796, 0.006090851966291666, 0.0029882052913308144, 0.006760776974260807, 0.0002592294185888022, 0.0001972121826838702, 0.15788163244724274, 0.14973512291908264, 0.14614373445510864, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.42266348004341125, 0.20205438137054443, 0.42841264605522156, 0.6724829077720642, 0.29094210267066956, 0.4464052617549896, 0.24126748740673065, 0.22405968606472015, 0.21308888494968414, 0.3085091710090637, 0.4672502279281616, 0.14604215323925018, 0.09687051922082901, 0.12085973471403122, 2.7047781259170733e-05, 7.539001671830192e-05, 0.036947283893823624, 0.01112621370702982, 0.04119950905442238, 0.06979847699403763, 0.01383589580655098, 0.008948443457484245, 9.020609286380932e-05, 0.0005221512983553112, 0.34183818101882935, 0.12104173004627228, 0.027292484417557716, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5077533721923828, 0.4866065979003906, 0.8742184638977051, 0.805268406867981, 0.8406472206115723, 0.45863693952560425, 0.3596036732196808, 0.36316972970962524, 0.38783764839172363, 0.03767421096563339, 0.43841618299484253, 0.3401361405849457, 0.3197961747646332, 0.20812755823135376, 7.5720936365542e-06, 5.4811065638205037e-05, 0.015359039418399334, 0.005874635651707649, 0.024854328483343124, 0.16572602093219757, 0.13195344805717468, 0.08553953468799591, 0.00124072446487844, 0.0008515206864103675, 0.0025517549365758896, 0.03817262500524521, 0.1957935392856598, 0.020919298753142357, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12348711490631104, 0.49926623702049255, 0.1342328041791916, 0.07936512678861618, 0.11133208125829697, 0.032334309071302414, 0.028592387214303017, 0.036310840398073196, 0.036252155900001526, 0.10585709661245346, 0.19267472624778748, 0.34429997205734253, 0.16909800469875336, 0.2464863359928131, 3.1697504709882196e-06, 3.401398498681374e-05, 0.0008079431718215346, 0.00045223115012049675, 0.00013304724416229874, 0.0006849576020613313, 0.009534466080367565, 0.010466179810464382, 0.00030334663460962474, 0.00033610902028158307, 2.1021634893259034e-05, 6.891421071486548e-05, 0.0028196852654218674, 0.3685440421104431, 0.0008976467652246356, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [4.5035082507638435e-07, 4.8253248507990065e-08, 2.1990938847693542e-08, 4.3766593194050074e-07, 1.1283042766763174e-07, 2.4235429663121977e-08, 4.6985369408503175e-06, 1.5805973418991925e-07, 1.1619090578562918e-08, 1.9516033233912822e-08, 1.8456361772223318e-07, 2.2261544074808626e-07, 2.278205402106437e-09, 7.143006541809882e-07, 0.21044957637786865, 0.0012722803512588143, 0.07485485821962357, 0.004568059463053942, 0.008557068184018135, 0.04491077736020088, 0.010689688846468925, 0.010801602154970169, 0.015439217910170555, 0.001288879313506186, 0.032191790640354156, 9.430324280401692e-05, 0.0010071481810882688, 0.03593403846025467, 0.015365669503808022, 0.28865233063697815, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.71169513463974, 0.2780396640300751, 0.44078493118286133, 0.7963916063308716, 0.6933308839797974, 0.5056049823760986, 0.7329073548316956, 0.810703694820404, 0.551677942276001, 0.6459015607833862, 0.6943050622940063, 0.2817550301551819, 0.10247289389371872, 0.7378624677658081, 8.274764695670456e-06, 0.0003195737663190812, 0.0016381103778257966, 0.001899963477626443, 0.000450764549896121, 0.0029568641912192106, 0.0004077073244843632, 0.006739944685250521, 5.316005626809783e-05, 0.000977654941380024, 0.00033480822457931936, 1.5544836060144007e-05, 5.177688763069455e-06, 0.000280524865956977, 8.569184137741104e-05, 0.19435854256153107, 0.0009946423815563321, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.723514199256897, 0.08602748066186905, 0.6093902587890625, 0.8655006289482117, 0.42677831649780273, 0.03823491558432579, 0.30262306332588196, 0.036271825432777405, 0.12300263345241547, 0.2776595950126648, 0.07632125169038773, 0.06917709112167358, 0.14498986303806305, 0.06881040334701538, 2.5871422622003593e-06, 0.0004552309401333332, 0.00916277151554823, 0.2859989106655121, 0.028668222948908806, 0.004703177139163017, 0.013283651322126389, 0.011935138143599033, 0.00041849465924315155, 0.021506765857338905, 0.0005354905733838677, 2.3408898414345458e-05, 5.557515123655321e-06, 4.006853941973532e-06, 0.000782388960942626, 0.032734211534261703, 0.33600685000419617, 0.05645810067653656, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.7111753225326538, 0.8019941449165344, 0.7984396815299988, 0.6959745287895203, 0.34880974888801575, 0.5955101251602173, 0.6658092141151428, 0.5378626585006714, 0.35595381259918213, 0.5855972766876221, 0.5757258534431458, 0.133575439453125, 0.3884122669696808, 0.11617641150951385, 8.579120731155854e-06, 0.001615832676179707, 0.0592908076941967, 0.004439341835677624, 0.0221478920429945, 0.05761101841926575, 0.08599329739809036, 0.009327156469225883, 0.0014337823959067464, 0.22479815781116486, 0.007599419914186001, 0.00010282513540005311, 0.003995772451162338, 0.0007532926392741501, 0.0001985877170227468, 0.042725738137960434, 0.609107255935669, 0.032340146601200104, 0.2600889503955841, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.43439850211143494, 0.1714652180671692, 0.4214288294315338, 0.6560039520263672, 0.15961043536663055, 0.25604698061943054, 0.26937225461006165, 0.1702796220779419, 0.22940081357955933, 0.327440470457077, 0.3977930247783661, 0.08873222768306732, 0.13160161674022675, 0.07058954238891602, 2.3103428247850388e-05, 0.0007894318550825119, 0.08912800997495651, 0.00870462041348219, 0.062210533767938614, 0.21669252216815948, 0.04955689236521721, 0.12036743760108948, 0.001276280265301466, 0.002290783217176795, 0.4637441337108612, 0.041003014892339706, 0.007595454342663288, 0.0049859327264130116, 0.030789200216531754, 0.01441932376474142, 0.02666427381336689, 0.013092019595205784, 0.22824719548225403, 0.07290598005056381, NaN, NaN, NaN, NaN, NaN, NaN], [0.48717519640922546, 0.4504354000091553, 0.9026078581809998, 0.8262973427772522, 0.8697957992553711, 0.4322546720504761, 0.47440072894096375, 0.40584686398506165, 0.6554202437400818, 0.04447361081838608, 0.5114831924438477, 0.4020007252693176, 0.3586147725582123, 0.19603849947452545, 5.424046776170144e-06, 4.2991967347916216e-05, 0.006631283089518547, 0.0006027332856319845, 0.004053125157952309, 0.03894652798771858, 0.031787656247615814, 0.10168109834194183, 0.004267984535545111, 0.002045443281531334, 0.0010633694473654032, 0.005091637372970581, 0.031351421028375626, 6.663963722530752e-05, 0.09428737312555313, 0.0008465268765576184, 0.00024849644978530705, 0.002269570017233491, 0.01905866153538227, 0.2164839655160904, 0.010082208551466465, NaN, NaN, NaN, NaN, NaN], [0.09346597641706467, 0.41046077013015747, 0.13097965717315674, 0.06711046397686005, 0.09538185596466064, 0.021688319742679596, 0.027864748612046242, 0.029869627207517624, 0.07506763935089111, 0.13717295229434967, 0.21322546899318695, 0.3559926152229309, 0.19059841334819794, 0.24045485258102417, 2.0756003777933074e-06, 1.1191940757271368e-05, 0.0006002296577207744, 0.0002709901600610465, 9.913583926390857e-05, 0.0001758227008394897, 0.0029332106932997704, 0.008675863035023212, 0.0011328428518027067, 0.0023299665190279484, 6.693489558529109e-05, 0.00013525204849429429, 0.0013442488852888346, 0.022858861833810806, 2.321010106243193e-05, 0.0010626229923218489, 2.5993340386776254e-05, 3.972689592046663e-05, 5.326797690941021e-05, 0.0033412689808756113, 0.35271701216697693, 0.0008956229430623353, NaN, NaN, NaN, NaN], [4.6634454520244617e-07, 5.573102512812511e-08, 2.3018172257138758e-08, 3.889360016273713e-07, 9.709493298259986e-08, 2.4796046105279856e-08, 7.192591056082165e-06, 1.7916640615567303e-07, 1.8580767147113875e-08, 3.5935642017648206e-08, 2.774728216081712e-07, 3.801677337378351e-07, 2.8816848907098347e-09, 9.808413778955583e-07, 0.2028982788324356, 0.00036489564809016883, 0.07616367936134338, 0.00673737283796072, 0.011110173538327217, 0.021392904222011566, 0.010494116693735123, 0.006134945899248123, 0.015969248488545418, 0.005187375005334616, 0.12039955705404282, 0.0005341891082935035, 0.0022901638876646757, 0.027128320187330246, 0.005907480139285326, 0.033119603991508484, 0.002176248235628009, 0.0003625153622124344, 6.369769835146144e-05, 0.0007003483478911221, 0.03456505015492439, 0.01570759527385235, 0.28412890434265137, NaN, NaN, NaN], [0.6667957305908203, 0.327456533908844, 0.4202725291252136, 0.7458598613739014, 0.6837785840034485, 0.5435037612915039, 0.7794858813285828, 0.849186360836029, 0.6942030787467957, 0.7531007528305054, 0.7604266405105591, 0.4857816696166992, 0.12311270833015442, 0.7958275079727173, 7.400509275612421e-06, 3.192616713931784e-05, 0.00035208670306019485, 0.002478531561791897, 0.0006564928335137665, 0.0008886585710570216, 0.0005662215990014374, 0.0016915983287617564, 1.3900444173486903e-05, 0.0009738726075738668, 0.00042995362309738994, 8.639829320600256e-05, 1.4000924238644075e-05, 0.00033226466621272266, 2.9785558581352234e-05, 0.00921203475445509, 3.390025085536763e-06, 5.1574592362158e-05, 2.3835823412809987e-06, 1.9022172637050971e-06, 0.00016878120368346572, 9.063100151252002e-05, 0.20696188509464264, 0.001649125711992383, NaN, NaN], [0.704485297203064, 0.08825523406267166, 0.5944071412086487, 0.8510531783103943, 0.4262540936470032, 0.04518446326255798, 0.38849392533302307, 0.055145543068647385, 0.277063250541687, 0.40566664934158325, 0.09198901802301407, 0.13750647008419037, 0.24822941422462463, 0.1165834292769432, 3.5331499930180144e-06, 0.00019471753330435604, 0.003537738462910056, 0.2800489366054535, 0.036592625081539154, 0.002127013634890318, 0.024595409631729126, 0.008275463245809078, 0.00023266732750926167, 0.021680369973182678, 0.0005173377576284111, 7.175304199336097e-05, 2.6857771445065737e-05, 1.6371919627999887e-05, 0.0012281013187021017, 0.011112956330180168, 0.058813560754060745, 0.0009629606502130628, 1.1531898962857667e-05, 4.947432444168953e-06, 2.475359451636905e-06, 0.0005685617215931416, 0.0267820842564106, 0.3296748399734497, 0.06147307902574539, NaN], [0.5231692790985107, 0.6706213355064392, 0.7785398364067078, 0.7122241258621216, 0.34260621666908264, 0.579698920249939, 0.5863306522369385, 0.4822496175765991, 0.5804131031036377, 0.7801564335823059, 0.7983464002609253, 0.22512593865394592, 0.4790371060371399, 0.2274763584136963, 1.8860177078749985e-05, 3.20236104300875e-08, 0.00013383101031649858, 0.00029007354169152677, 0.002788462908938527, 0.0014709108509123325, 0.0009710633894428611, 0.0001290659129153937, 2.0881772798020393e-05, 7.236683813971467e-06, 3.12792144541163e-05, 7.099155482137576e-05, 3.213396485080011e-05, 3.9666349039180204e-05, 0.00022854047711007297, 0.0037343965377658606, 1.487573445047019e-05, 0.00019343644089531153, 8.10168421594426e-05, 1.1448363693489227e-05, 3.5921341350331204e-06, 2.216967368440237e-05, 0.0017730530817061663, 0.0001526248233858496, 0.009769736789166927, 0.4419056475162506]], [[0.06147387623786926, 0.0657946914434433, 0.22564710676670074, 0.1299343705177307, 0.021580645814538002, 0.08992400765419006, 0.025479430332779884, 0.04823821783065796, 0.05891237407922745, 0.016958819702267647, 0.0021926285699009895, 0.017513686791062355, 0.09859969466924667, 0.16368542611598969, 0.038398925215005875, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.029852252453565598, 0.26626214385032654, 0.14803646504878998, 0.038784727454185486, 0.07803148031234741, 0.006210723891854286, 0.0026457132771611214, 0.006018034182488918, 0.05453306809067726, 0.002730109030380845, 0.015730326995253563, 0.0017557059181854129, 0.034912969917058945, 0.03208531066775322, 0.03983413055539131, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01053018867969513, 0.02744918502867222, 0.2530466914176941, 0.05846027657389641, 0.1744728684425354, 0.011957419104874134, 0.003304906887933612, 0.00205883732996881, 0.00874510407447815, 0.0014524421421810985, 0.0009729861048981547, 0.0026561047416180372, 0.0023208027705550194, 0.0038251704536378384, 0.005045189522206783, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.016039762645959854, 0.05755838379263878, 0.10756286233663559, 0.03799062967300415, 0.5738711953163147, 0.061907339841127396, 0.128611221909523, 0.01847657933831215, 0.06501789391040802, 0.015564735978841782, 0.0016139671206474304, 0.014343881979584694, 0.020734043791890144, 0.14008449018001556, 0.13515408337116241, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.005847899243235588, 0.11914067715406418, 0.01715121790766716, 0.3517457842826843, 0.0661543607711792, 0.07493122667074203, 0.012425812892615795, 0.11745280772447586, 0.08440648764371872, 0.020029406994581223, 0.05165768414735794, 0.04094480350613594, 0.024548601359128952, 0.005826729815453291, 0.13841456174850464, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.015926362946629524, 0.007578620687127113, 0.1226087138056755, 0.030128292739391327, 0.03851892054080963, 0.3367418944835663, 0.01694057136774063, 0.09829536825418472, 0.0361555740237236, 0.10537439584732056, 0.007450005039572716, 0.029753634706139565, 0.22920416295528412, 0.01793695241212845, 0.05258304625749588, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01326388493180275, 0.05337870866060257, 0.047661036252975464, 0.08615607023239136, 0.12425915151834488, 0.4180251955986023, 0.04702466353774071, 0.0717325434088707, 0.05138256773352623, 0.06877672672271729, 0.0152205191552639, 0.0719875767827034, 0.1666427105665207, 0.13322126865386963, 0.053655143827199936, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.026802292093634605, 0.003955241292715073, 0.0206829272210598, 0.02742936834692955, 0.06016179919242859, 0.15127348899841309, 0.06774158030748367, 0.2981398105621338, 0.05239749699831009, 0.09365928173065186, 0.035629644989967346, 0.020771589130163193, 0.13655303418636322, 0.012941722758114338, 0.05640798062086105, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.06469012051820755, 0.1851334124803543, 0.08788572251796722, 0.19977343082427979, 0.00846380740404129, 0.03702360764145851, 0.0876760184764862, 0.046302031725645065, 0.11564433574676514, 0.05180440843105316, 0.49518024921417236, 0.1649368405342102, 0.030481798574328423, 0.10461966693401337, 0.07739346474409103, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.020106524229049683, 0.01925482228398323, 0.006043681409209967, 0.01652396097779274, 0.001572003006003797, 0.005779887083917856, 0.015335858799517155, 0.03537710756063461, 0.009967570193111897, 0.09144406765699387, 0.43651703000068665, 0.2613205015659332, 0.0483890138566494, 0.06553913652896881, 0.055434126406908035, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.07980967313051224, 0.14815203845500946, 0.09271827340126038, 0.004086778499186039, 0.010790406726300716, 0.0747552439570427, 0.10995902121067047, 0.04728228971362114, 0.1809520274400711, 0.025821411982178688, 0.06657237559556961, 0.1431768387556076, 0.19449584186077118, 0.20780201256275177, 0.10148976743221283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.05537823587656021, 0.008725662715733051, 0.0058344281278550625, 0.029011448845267296, 0.048424966633319855, 0.047911662608385086, 0.16901308298110962, 0.17019973695278168, 0.011648884043097496, 0.08953043073415756, 0.5360274910926819, 0.10330803692340851, 0.078437939286232, 0.12202966213226318, 0.11905822902917862, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01546903420239687, 0.0005347061669453979, 0.0015839362749829888, 0.053056132048368454, 0.23614321649074554, 0.013318118639290333, 0.051473915576934814, 0.011966699734330177, 0.007302975282073021, 0.09275621920824051, 0.06646261364221573, 0.010813506320118904, 0.13289499282836914, 0.22826357185840607, 0.04386172071099281, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.009458722546696663, 0.0058342707343399525, 0.012789146974682808, 0.005895438138395548, 0.026010286062955856, 0.057482823729515076, 0.005663284566253424, 0.005727604031562805, 0.0033144087065011263, 0.011671853251755238, 0.00424896739423275, 0.056589994579553604, 0.20401620864868164, 0.03777612745761871, 0.03114682249724865, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0012354525970295072, 0.034024473279714584, 0.10020612925291061, 0.02267461270093918, 0.08676987141370773, 0.14216794073581696, 0.0033775768242776394, 0.07320579141378403, 0.07390473037958145, 0.0168889332562685, 0.00386308366432786, 0.02569040097296238, 0.24664165079593658, 0.2674221694469452, 0.014589445665478706, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.12044757604598999, 0.22699733078479767, 0.3625817894935608, 0.18942511081695557, 0.468371719121933, 0.5971034169197083, 0.5581120252609253, 0.29680517315864563, 0.4773823618888855, 0.4035939574241638, 0.3702273666858673, 0.3751682937145233, 0.267861545085907, 0.4069889783859253, 0.040672045201063156, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0243044663220644, 0.4273812174797058, 0.5286219716072083, 0.05566978082060814, 0.4582313597202301, 0.5064847469329834, 0.09591992199420929, 0.1787465512752533, 0.7349562644958496, 0.00692495983093977, 0.04355573281645775, 0.04027868062257767, 0.03415951877832413, 0.02788657508790493, 0.03653726726770401, 0.07662782073020935, 0.14776498079299927, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1999487727880478, 0.02213704027235508, 0.750217854976654, 0.5677059292793274, 0.8556592464447021, 0.6869031190872192, 0.2201639711856842, 0.6947058439254761, 0.2711787521839142, 0.21462410688400269, 0.3783731162548065, 0.39328378438949585, 0.3796219229698181, 0.27560317516326904, 0.052095912396907806, 0.0006832284270785749, 0.003495789598673582, 0.19430121779441833, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17733721435070038, 0.1195838525891304, 0.4294462502002716, 0.41039443016052246, 0.45686641335487366, 0.5433338284492493, 0.08341590315103531, 0.5749803781509399, 0.0773383378982544, 0.2876206338405609, 0.19534848630428314, 0.10015372186899185, 0.2102438062429428, 0.04678432643413544, 0.044711172580718994, 0.00020953372586518526, 0.007476589176803827, 0.1521030217409134, 0.003494996577501297, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4523387849330902, 0.8917949795722961, 0.4903220534324646, 0.5869925022125244, 0.47626572847366333, 0.006232858635485172, 0.41125378012657166, 0.13404546678066254, 0.6460333466529846, 0.32553666830062866, 0.3429105877876282, 0.031081799417734146, 0.42998504638671875, 0.16709895431995392, 0.08821719139814377, 0.00048688906827010214, 0.0011088894680142403, 0.0024602855555713177, 0.0005520267877727747, 0.26744863390922546, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.49767979979515076, 0.7566660642623901, 0.25263193249702454, 0.4967457056045532, 0.47193706035614014, 0.006824302952736616, 0.2858791947364807, 0.18135732412338257, 0.4390898644924164, 0.7668571472167969, 0.15391138195991516, 0.08414287865161896, 0.5640745759010315, 0.35628020763397217, 0.09142898768186569, 0.0004194685607217252, 0.0005068383179605007, 0.026896899566054344, 0.0004147894505877048, 0.006156287621706724, 0.4387049376964569, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18697474896907806, 0.23196713626384735, 0.23554784059524536, 0.34321168065071106, 0.5325552225112915, 0.15430577099323273, 0.2887123227119446, 0.4957616627216339, 0.36584702134132385, 0.2891024053096771, 0.08069057762622833, 0.18119029700756073, 0.4536079466342926, 0.16425864398479462, 0.03777371346950531, 1.0518371709622443e-05, 5.5142045312095433e-05, 0.016997506842017174, 3.693701364682056e-05, 0.0006244040559977293, 0.21657241880893707, 0.01345360092818737, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17079660296440125, 0.16765500605106354, 0.28291502594947815, 0.16039209067821503, 0.2695491909980774, 0.16163654625415802, 0.08897912502288818, 0.28747832775115967, 0.8989478349685669, 0.26775097846984863, 0.17184530198574066, 0.3264879584312439, 0.31386569142341614, 0.1549917310476303, 0.05264737084507942, 0.3619365394115448, 0.25655418634414673, 0.3611752688884735, 0.14710570871829987, 0.018539972603321075, 0.21814967691898346, 0.09323819726705551, 0.01780291646718979, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04084352031350136, 0.5361505150794983, 0.018223807215690613, 0.03828004375100136, 0.3140276074409485, 0.08277524262666702, 0.07094793766736984, 0.012667819857597351, 0.3304368853569031, 0.10053964704275131, 0.03868165612220764, 0.31755131483078003, 0.22644393146038055, 0.07613880187273026, 0.12961620092391968, 0.004012200981378555, 0.004658036399632692, 0.017421945929527283, 0.0026806569658219814, 0.590861439704895, 0.051964171230793, 0.007618917152285576, 0.0007336572161875665, 0.12340892106294632, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07373615354299545, 0.19122207164764404, 0.06966950744390488, 0.01624569669365883, 0.017842771485447884, 0.2144099771976471, 0.24285149574279785, 0.3761756718158722, 0.8141085505485535, 0.27487871050834656, 0.09974052757024765, 0.10127317160367966, 0.16323235630989075, 0.21032299101352692, 0.10343435406684875, 0.44725751876831055, 0.6053639054298401, 0.07041247189044952, 0.07085516303777695, 0.003138674655929208, 0.2879992425441742, 0.049135204404592514, 0.14297868311405182, 0.06008363142609596, 0.06304289400577545, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06651142984628677, 0.1456020176410675, 0.01741747185587883, 0.07566884905099869, 0.018790215253829956, 0.20801369845867157, 0.16892337799072266, 0.33592528104782104, 0.1834612786769867, 0.29906225204467773, 0.2579277753829956, 0.5998365879058838, 0.5642448663711548, 0.572043240070343, 0.0891154333949089, 0.7072809338569641, 0.7582566142082214, 0.16150887310504913, 0.18586905300617218, 0.015776842832565308, 0.08385244756937027, 0.32581770420074463, 0.5540359020233154, 0.13379113376140594, 0.0028463751077651978, 0.051922835409641266, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03234146162867546, 0.1962265521287918, 0.0277019701898098, 0.06972747296094894, 0.10650040954351425, 0.07791601866483688, 0.38205334544181824, 0.4892197549343109, 0.003444283502176404, 0.414199560880661, 0.16890743374824524, 0.4916560649871826, 0.8149713277816772, 0.7298122048377991, 0.14976243674755096, 0.4378974437713623, 0.10523661971092224, 0.014314417727291584, 0.30093127489089966, 0.06324318051338196, 0.08432605862617493, 0.2594241797924042, 0.6188808083534241, 0.3929617404937744, 0.00827555637806654, 0.07725780457258224, 0.06407154351472855, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07799918204545975, 0.2381461262702942, 0.01647050306200981, 0.08363308757543564, 0.05209676921367645, 0.02968973107635975, 0.11220219731330872, 0.32446831464767456, 0.1546868085861206, 0.06510066986083984, 0.1935844123363495, 0.5264057517051697, 0.34881067276000977, 0.6311980485916138, 0.09822507947683334, 0.2013174593448639, 0.5200937390327454, 0.3190821707248688, 0.5249915719032288, 0.18779213726520538, 0.1779765784740448, 0.29882070422172546, 0.5049118399620056, 0.06443758308887482, 0.007539320737123489, 0.16998757421970367, 0.031686559319496155, 0.3610091209411621, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1688770204782486, 0.13700607419013977, 0.20374003052711487, 0.12288741022348404, 0.15864238142967224, 0.039533428847789764, 0.12642242014408112, 0.35126128792762756, 0.365562379360199, 0.48467183113098145, 0.3247453570365906, 0.003142370842397213, 0.5969579219818115, 0.5533550977706909, 0.1647837609052658, 0.5546301603317261, 0.5397829413414001, 0.43089261651039124, 0.08987504988908768, 0.3114354610443115, 0.4812281131744385, 0.11215226352214813, 0.17198431491851807, 0.5790820121765137, 0.03648975491523743, 0.0541677288711071, 0.04165489599108696, 0.07749651372432709, 0.030232839286327362, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3052995800971985, 0.6539703607559204, 0.022321274504065514, 0.1902511715888977, 0.05963977798819542, 0.17083951830863953, 0.5218495726585388, 0.2573777139186859, 0.17107829451560974, 0.46426069736480713, 0.3389802873134613, 0.4338558316230774, 0.014936042949557304, 0.6202957630157471, 0.13899832963943481, 0.005376005079597235, 0.010858614929020405, 0.02991071715950966, 0.029742157086730003, 0.04020260274410248, 0.1695990264415741, 0.0604972317814827, 0.10318762809038162, 0.48727869987487793, 0.07163358479738235, 0.025501595810055733, 0.05125340074300766, 0.22269804775714874, 0.08394679427146912, 0.19870582222938538, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12219581007957458, 0.5012378692626953, 0.06702763587236404, 0.06399006396532059, 0.07401375472545624, 0.24048954248428345, 0.08739905059337616, 0.050457850098609924, 0.030934542417526245, 0.1506662517786026, 0.1536494344472885, 0.49837279319763184, 0.018043117597699165, 0.11216632276773453, 0.12939369678497314, 0.0006954512791708112, 0.0002132337394868955, 0.037006676197052, 0.0018452922813594341, 0.16118928790092468, 0.5505160689353943, 0.028353480622172356, 0.0021746368147432804, 0.027092093601822853, 0.0001434519508620724, 0.0029707583598792553, 4.2726576793938875e-05, 0.0012847317848354578, 0.0010433235438540578, 0.18891005218029022, 0.014656933024525642, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11525271832942963, 0.521948516368866, 0.007329752668738365, 0.008543604053556919, 0.05213259160518646, 0.04235774278640747, 0.2166471928358078, 0.528154194355011, 0.42159566283226013, 0.22446103394031525, 0.0032521234825253487, 0.5035390257835388, 0.365617960691452, 0.44961339235305786, 0.15735329687595367, 0.013874622993171215, 0.0695175901055336, 0.005752294324338436, 0.005697373300790787, 0.0021822804119437933, 0.02415846660733223, 0.00723307253792882, 0.3120453357696533, 0.016472192481160164, 0.004319194238632917, 0.041901107877492905, 0.7052133083343506, 0.0035930864978581667, 0.020578961819410324, 0.0021869041956961155, 0.0003597450559027493, 0.0005889505264349282, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03232282027602196, 0.08449342846870422, 0.004147443920373917, 0.050799064338207245, 0.037334948778152466, 0.08206064254045486, 0.07099173963069916, 0.19771835207939148, 0.021330662071704865, 0.08051090687513351, 0.1005825400352478, 0.700605034828186, 0.3027697801589966, 0.4364767074584961, 0.10480254143476486, 0.29724666476249695, 0.30918487906455994, 0.0693497508764267, 0.04026606306433678, 0.00593132060021162, 0.04497085511684418, 0.07199602574110031, 0.16270284354686737, 0.058071933686733246, 0.0005904879071749747, 0.0013724194141104817, 0.013050474226474762, 0.002609569113701582, 0.013482913374900818, 0.089314766228199, 0.03341012820601463, 0.21929660439491272, 0.006776490714401007, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.034268103539943695, 0.16091260313987732, 0.0168391652405262, 0.06967493146657944, 0.0915973111987114, 0.051104262471199036, 0.2385529726743698, 0.3295409679412842, 0.0004638703539967537, 0.22104156017303467, 0.13362999260425568, 0.5110065937042236, 0.7347238063812256, 0.7763577103614807, 0.15897347033023834, 0.3422777056694031, 0.07256462424993515, 0.012822822667658329, 0.21187257766723633, 0.060081083327531815, 0.09390594810247421, 0.19744858145713806, 0.5327264666557312, 0.3024030029773712, 0.013231869786977768, 0.1601967215538025, 0.04191795364022255, 0.5788960456848145, 0.791706383228302, 0.2698511779308319, 0.26516515016555786, 0.2890409529209137, 0.032140959054231644, 0.02436642162501812, NaN, NaN, NaN, NaN, NaN, NaN], [0.08530293405056, 0.1988343894481659, 0.010091865435242653, 0.07736483961343765, 0.030177433043718338, 0.023718634620308876, 0.06320804357528687, 0.20902810990810394, 0.020835628733038902, 0.026085397228598595, 0.10371798276901245, 0.427949994802475, 0.2465561032295227, 0.6410334706306458, 0.12414435297250748, 0.15722303092479706, 0.44676893949508667, 0.24300073087215424, 0.3980245292186737, 0.29666030406951904, 0.21130049228668213, 0.31708449125289917, 0.45276522636413574, 0.04954151436686516, 0.006070373114198446, 0.23888874053955078, 0.06321726739406586, 0.48237892985343933, 0.09136107563972473, 0.571183979511261, 0.36026179790496826, 0.0799446776509285, 0.1583012342453003, 0.025381257757544518, 0.5154083371162415, NaN, NaN, NaN, NaN, NaN], [0.17881684005260468, 0.09949745982885361, 0.17292529344558716, 0.14197823405265808, 0.0994792953133583, 0.022899990901350975, 0.07621151208877563, 0.20277591049671173, 0.059071850031614304, 0.23252709209918976, 0.2142648547887802, 0.0016634195344522595, 0.4786902368068695, 0.5105896592140198, 0.1802191287279129, 0.6566299200057983, 0.6752134561538696, 0.5489535927772522, 0.1520741730928421, 0.6433172821998596, 0.7151104211807251, 0.290630042552948, 0.3418242335319519, 0.686417818069458, 0.046654678881168365, 0.09611856192350388, 0.0634889155626297, 0.4891318380832672, 0.46607306599617004, 0.5581225156784058, 0.4337400496006012, 0.06152508407831192, 0.08386452496051788, 0.0397774837911129, 0.11068917065858841, 0.04009125009179115, NaN, NaN, NaN, NaN], [0.29184988141059875, 0.5299537181854248, 0.01714717224240303, 0.1581006944179535, 0.034420810639858246, 0.1480618417263031, 0.35555243492126465, 0.16130897402763367, 0.0352683924138546, 0.2384539395570755, 0.22334522008895874, 0.274210661649704, 0.008749962784349918, 0.5107676982879639, 0.16247788071632385, 0.0024060788564383984, 0.006098441779613495, 0.013975032605230808, 0.014695755206048489, 0.022452646866440773, 0.10514718294143677, 0.04751533642411232, 0.0609392412006855, 0.31799331307411194, 0.04427095875144005, 0.01951766200363636, 0.04202713817358017, 0.3371936082839966, 0.2731744647026062, 0.3478449583053589, 0.03363266587257385, 0.011759405955672264, 0.01767517626285553, 0.024101490154862404, 0.19511322677135468, 0.05518092215061188, 0.2097322940826416, NaN, NaN, NaN], [0.1536586880683899, 0.39876002073287964, 0.060627128928899765, 0.08434724807739258, 0.06138864532113075, 0.18170806765556335, 0.0558285117149353, 0.026850836351513863, 0.004648242145776749, 0.05450701341032982, 0.08679821342229843, 0.24500715732574463, 0.009806739166378975, 0.06359081715345383, 0.14997224509716034, 0.000109505133877974, 2.9198725314927287e-05, 0.01053665205836296, 0.0007290886132977903, 0.055462777614593506, 0.18011406064033508, 0.013305839151144028, 0.0007181179826147854, 0.008689867332577705, 4.760328374686651e-05, 0.0016827695071697235, 2.2867327061248943e-05, 0.000821226101834327, 0.0012459746794775128, 0.2353316843509674, 0.004575389437377453, 0.003901307238265872, 0.0009429306373931468, 1.1980442650383338e-05, 0.0003497266152407974, 0.00027309934375807643, 0.1965111494064331, 0.005757085047662258, NaN, NaN], [0.1216418668627739, 0.4058372378349304, 0.00597163662314415, 0.009731672704219818, 0.04685758054256439, 0.030955728143453598, 0.14503908157348633, 0.4122965633869171, 0.13539999723434448, 0.08889995515346527, 0.0017191163497045636, 0.24694381654262543, 0.23039060831069946, 0.2996818721294403, 0.1837962418794632, 0.0017744784709066153, 0.012578981928527355, 0.0015974465059116483, 0.002320722443982959, 0.0008557687979191542, 0.004459704738110304, 0.00322481500916183, 0.13683773577213287, 0.010506929829716682, 0.0027294831816107035, 0.03936534747481346, 0.7146239876747131, 0.0021277000196278095, 0.014929071068763733, 0.003117389976978302, 0.0010002683848142624, 0.0005979579291306436, 0.037009548395872116, 0.6984097361564636, 0.0021584301721304655, 0.012162267230451107, 0.002483450109139085, 0.00014705986541230232, 0.0003713203768711537, NaN], [0.2966727912425995, 0.1567845344543457, 0.07310101389884949, 0.14124755561351776, 0.2961083948612213, 0.07968501001596451, 0.06122228875756264, 0.14724984765052795, 0.06047076731920242, 0.055829375982284546, 0.06430483609437943, 0.11614347994327545, 0.15107537806034088, 0.15706941485404968, 0.12527146935462952, 0.10933294892311096, 0.0594157911837101, 0.01442565955221653, 0.027944112196564674, 0.24928514659404755, 0.3314722180366516, 0.036283038556575775, 0.01824975199997425, 0.03247179090976715, 0.02741291932761669, 0.0011664694175124168, 0.03365480154752731, 0.10097742080688477, 0.021067792549729347, 0.42791858315467834, 0.11242418736219406, 0.11434369534254074, 0.000791618600487709, 0.02291581965982914, 0.07201644033193588, 0.02081850729882717, 0.39859694242477417, 0.2763477563858032, 0.13874487578868866, 0.003258609212934971]], [[0.2643359303474426, 0.2943609654903412, 0.10517127066850662, 0.013473477214574814, 0.17808614671230316, 0.05031028389930725, 0.0477585569024086, 0.13444076478481293, 0.0626431554555893, 0.05089121311903, 0.025438696146011353, 0.12666909396648407, 0.015911895781755447, 0.08822031319141388, 0.09637932479381561, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.02893858775496483, 0.3286381959915161, 0.024464154615998268, 0.015645690262317657, 0.07065004110336304, 0.03320073336362839, 0.0035833900328725576, 0.002133443485945463, 0.0077736834064126015, 0.0014096481027081609, 0.006704544182866812, 0.0034484381321817636, 0.010553284548223019, 0.029550330713391304, 0.0064092278480529785, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0403970405459404, 0.029290249571204185, 0.2564694881439209, 0.03103366494178772, 0.01930038072168827, 0.0007984130643308163, 0.0024861868005245924, 0.013074777089059353, 0.025626862421631813, 0.0022637112997472286, 0.010511897504329681, 0.03038576804101467, 0.00803295336663723, 0.000980974524281919, 0.040744345635175705, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.23322375118732452, 0.23003342747688293, 0.24563531577587128, 0.07496963441371918, 0.029645830392837524, 0.0015733843902125955, 0.048427432775497437, 0.07474764436483383, 0.005064227152615786, 0.006064139772206545, 0.00639030896127224, 0.0023683567997068167, 0.0201968252658844, 0.0057837339118123055, 0.030518243089318275, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.009382463060319424, 0.004108777269721031, 0.355550616979599, 0.0026344929356127977, 0.036474164575338364, 0.0013674235669896007, 0.010420771315693855, 0.008167937397956848, 0.005904712714254856, 0.0164882093667984, 0.0014915319625288248, 0.00666471105068922, 0.007061991840600967, 0.006146776955574751, 0.03842667490243912, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.340854674577713, 0.027831802144646645, 0.11495380103588104, 0.4507772624492645, 0.33573275804519653, 0.07158998399972916, 0.3054116368293762, 0.09558256715536118, 0.008191889151930809, 0.08007357269525528, 0.08199689537286758, 0.011630101129412651, 0.016172919422388077, 0.020448284223675728, 0.05253906920552254, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0825798362493515, 0.09406770020723343, 0.044158000499010086, 0.06245531886816025, 0.15669509768486023, 0.1018981784582138, 0.17849969863891602, 0.1823071539402008, 0.1725231111049652, 0.14688736200332642, 0.027769910171628, 0.1729786992073059, 0.04907526820898056, 0.09640378504991531, 0.07928813993930817, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04138464853167534, 0.0045098732225596905, 0.098704032599926, 0.034942083060741425, 0.1842936873435974, 0.1567782759666443, 0.14141200482845306, 0.1953822374343872, 0.09936889261007309, 0.281032919883728, 0.13522183895111084, 0.012650868855416775, 0.02501768246293068, 0.2133605033159256, 0.14542686939239502, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.05831298604607582, 0.07845572382211685, 0.00935202743858099, 0.09348727762699127, 0.2554629147052765, 0.026818757876753807, 0.15820558369159698, 0.09712891280651093, 0.18406683206558228, 0.297629177570343, 0.011888068169355392, 0.04674078896641731, 0.01729435659945011, 0.04945852607488632, 0.08047669380903244, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.030211733654141426, 0.004252443555742502, 0.044400423765182495, 0.0032993308268487453, 0.029341043904423714, 0.14371474087238312, 0.17894455790519714, 0.12369092553853989, 0.48359414935112, 0.06321088969707489, 0.05475561320781708, 0.3139732778072357, 0.086760014295578, 0.13208359479904175, 0.2905256450176239, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.06285266578197479, 0.0062216646037995815, 0.016913438215851784, 0.007285475265234709, 0.01629750058054924, 0.004617355298250914, 0.06147269159555435, 0.21831700205802917, 0.11657348275184631, 0.39258062839508057, 0.17390909790992737, 0.3519352376461029, 0.014494672417640686, 0.04437657818198204, 0.04845427721738815, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.014810703694820404, 0.027867808938026428, 0.00787208043038845, 0.003661711234599352, 0.06816401332616806, 0.014048570767045021, 0.04280591011047363, 0.04519394412636757, 0.07874996215105057, 0.2074531614780426, 0.12078044563531876, 0.53052818775177, 0.035032909363508224, 0.1398327797651291, 0.02986292913556099, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.011430865153670311, 0.002694258699193597, 0.03896895423531532, 0.04504057392477989, 0.00808126013725996, 0.01048098411411047, 0.012571780942380428, 0.0054772221483290195, 0.07419075071811676, 0.02193005569279194, 0.3994891941547394, 0.15694338083267212, 0.3065741956233978, 0.022703034803271294, 0.07852455973625183, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0007813395350240171, 4.470362910069525e-06, 0.0010683261789381504, 0.022204171866178513, 0.0022952572908252478, 4.198186070425436e-05, 0.0009061718010343611, 0.0006557627930305898, 0.0009219115017913282, 0.0006920882733538747, 0.005404994357377291, 0.012070748023688793, 0.21383939683437347, 0.0026518681552261114, 0.0011399114737287164, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03732156753540039, 0.14082211256027222, 0.08218222856521606, 0.02148711122572422, 0.037640467286109924, 0.011636778712272644, 0.01611051708459854, 0.06724098324775696, 0.20042963325977325, 0.035641491413116455, 0.045655738562345505, 0.041121501475572586, 0.23917138576507568, 0.01630677469074726, 0.2854580283164978, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.004390498157590628, 0.00876205787062645, 0.016465701162815094, 0.005714573431760073, 0.036494653671979904, 0.0032131776679307222, 0.01477664802223444, 0.018077310174703598, 0.010320773348212242, 0.006645719520747662, 0.03231831267476082, 0.004141036421060562, 0.011432528495788574, 0.011813640594482422, 0.20326180756092072, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.024762088432908058, 0.05259820073843002, 0.06384432315826416, 0.1483391523361206, 0.26820069551467896, 0.20398226380348206, 0.37573596835136414, 0.08007726073265076, 0.052950888872146606, 0.09653404355049133, 0.1610451638698578, 0.12953783571720123, 0.2330068051815033, 0.4463363587856293, 0.19394421577453613, 0.026641450822353363, 0.17128966748714447, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.679330587387085, 0.043791741132736206, 0.12768849730491638, 0.27546241879463196, 0.03847555071115494, 0.08167082816362381, 0.21957245469093323, 0.04802798852324486, 0.10780715942382812, 0.6106712222099304, 0.2505488693714142, 0.1709391176700592, 0.04529926925897598, 0.17936259508132935, 0.13903558254241943, 0.5577486157417297, 0.24638143181800842, 0.025497647002339363, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05959116667509079, 0.03547457605600357, 0.03805014118552208, 0.02909783646464348, 0.08531224727630615, 0.035567909479141235, 0.017052877694368362, 0.03032829985022545, 0.012725351378321648, 0.06508343666791916, 0.04963213950395584, 0.013415418565273285, 0.026129938662052155, 0.011819864623248577, 0.21026377379894257, 0.1241803988814354, 0.06599891930818558, 0.13004763424396515, 0.33318501710891724, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0922531858086586, 0.009465531446039677, 0.05285167694091797, 0.11621613800525665, 0.008946871384978294, 0.0003396931570023298, 0.056973982602357864, 0.011571673676371574, 0.03833528608083725, 0.02977353148162365, 0.12428728491067886, 0.005304301157593727, 0.012764646671712399, 0.03717968612909317, 0.1998610943555832, 0.9552784562110901, 0.6656578779220581, 0.04364815354347229, 0.097982257604599, 0.0012550450628623366, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.024207258597016335, 0.015275360085070133, 0.12442810088396072, 0.044900182634592056, 0.06243159621953964, 0.002727220067754388, 0.05297050252556801, 0.34427115321159363, 0.10989916324615479, 0.020859790965914726, 0.11048608273267746, 0.02605186030268669, 0.1171213760972023, 0.05136575922369957, 0.16462838649749756, 0.6779462695121765, 0.5809971690177917, 0.2087380737066269, 0.15752893686294556, 0.08772724121809006, 0.09023962169885635, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03260662034153938, 0.00298042013309896, 0.16533112525939941, 0.056620776653289795, 0.049906134605407715, 0.008958332240581512, 0.05700542405247688, 0.016634995117783546, 0.029206881299614906, 0.025224529206752777, 0.19688823819160461, 0.03853357210755348, 0.07708126306533813, 0.04636078327894211, 0.17741571366786957, 0.6994673609733582, 0.48720496892929077, 0.08263873308897018, 0.3298986256122589, 0.0049313209019601345, 0.07016509026288986, 0.5443912744522095, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04517968371510506, 0.08089613169431686, 0.11787059158086777, 0.09224344044923782, 0.27191361784935, 0.020393863320350647, 0.01454318780452013, 0.009129227139055729, 0.020442765206098557, 0.08070629835128784, 0.07541637122631073, 0.10045406222343445, 0.04119513928890228, 0.10953037440776825, 0.15667563676834106, 0.3437848389148712, 0.28689879179000854, 0.5712999105453491, 0.5371078252792358, 0.06584293395280838, 0.2492358684539795, 0.014812931418418884, 0.02226697839796543, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08136362582445145, 0.07834970951080322, 0.015254710800945759, 0.0832342654466629, 0.10864067077636719, 0.11524737626314163, 0.1366880238056183, 0.012557982467114925, 0.1251911222934723, 0.15952906012535095, 0.026927798986434937, 0.07786250859498978, 0.11803606152534485, 0.2014097422361374, 0.2085045427083969, 0.44942334294319153, 0.3777551054954529, 0.7612449526786804, 0.7021526098251343, 0.30080679059028625, 0.4424319267272949, 0.22922295331954956, 0.04627525433897972, 0.055941756814718246, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07754338532686234, 0.11610410362482071, 0.032187070697546005, 0.05519983917474747, 0.0022462301421910524, 0.11507689952850342, 0.2733137607574463, 0.17666463553905487, 0.010644900612533092, 0.08315187692642212, 0.02269633859395981, 0.06840697675943375, 0.010724963620305061, 0.0371541827917099, 0.21114735305309296, 0.47138965129852295, 0.18856076896190643, 0.6503154039382935, 0.9041082859039307, 0.2803841233253479, 0.4006999135017395, 0.5757170915603638, 0.295682817697525, 0.04142303764820099, 0.006079117301851511, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.022315502166748047, 0.012378118932247162, 0.0062178960070014, 0.0078407758846879, 0.015144318342208862, 0.010697844438254833, 0.011326298117637634, 0.013119788840413094, 0.009139686822891235, 0.006104558240622282, 0.005014281254261732, 0.002417754614725709, 0.007784656248986721, 0.009948876686394215, 0.16676713526248932, 0.24097655713558197, 0.15950126945972443, 0.6649572849273682, 0.6751598119735718, 0.46790093183517456, 0.6438081860542297, 0.3765251934528351, 0.2975021302700043, 0.10267924517393112, 0.060453154146671295, 0.03869982063770294, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2628116309642792, 0.1443735957145691, 0.08422664552927017, 0.11404431611299515, 0.17927099764347076, 0.25378888845443726, 0.1460212618112564, 0.04387032985687256, 0.023589681833982468, 0.13644081354141235, 0.045464351773262024, 0.06847606599330902, 0.006222521886229515, 0.036451175808906555, 0.20291540026664734, 0.39086097478866577, 0.6666929125785828, 0.5642580389976501, 0.557075023651123, 0.25761184096336365, 0.3620971143245697, 0.656988263130188, 0.301082581281662, 0.3758563995361328, 0.026163028553128242, 0.024990877136588097, 0.0074356794357299805, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.22663825750350952, 0.15363532304763794, 0.01756531558930874, 0.025186356157064438, 0.038983430713415146, 0.01259024627506733, 0.15960636734962463, 0.10260611027479172, 0.059462085366249084, 0.02338782697916031, 0.039677273482084274, 0.055942799896001816, 0.010165784507989883, 0.013570738956332207, 0.1720115691423416, 0.7909376621246338, 0.3817039430141449, 0.6133569478988647, 0.41290101408958435, 0.30558884143829346, 0.6049348711967468, 0.5688384175300598, 0.4680134057998657, 0.6550416946411133, 0.42371857166290283, 0.10508850961923599, 0.021316751837730408, 0.05294431000947952, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04994741827249527, 0.08986728638410568, 0.03736276924610138, 0.029899757355451584, 0.03542618826031685, 0.007244490087032318, 0.040187276899814606, 0.040814109146595, 0.04076588898897171, 0.05965813249349594, 0.045340292155742645, 0.0002602309104986489, 0.026138437911868095, 0.02984587848186493, 0.21049101650714874, 0.17973686754703522, 0.17233335971832275, 0.334688276052475, 0.4481850564479828, 0.04172942414879799, 0.10337609797716141, 0.5107487440109253, 0.7207926511764526, 0.1405051052570343, 0.0654703825712204, 0.41273486614227295, 0.17914383113384247, 0.042542651295661926, 0.010745447129011154, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.058702513575553894, 0.04533839225769043, 0.03167680650949478, 0.07689032703638077, 0.07722999900579453, 0.05968516319990158, 0.08647314459085464, 0.04232413321733475, 0.05769982933998108, 0.08562258630990982, 0.07418374717235565, 0.08922348916530609, 0.0013435373548418283, 0.0365031398832798, 0.1955317258834839, 0.5207539200782776, 0.308788537979126, 0.08189663290977478, 0.5850351452827454, 0.3457651734352112, 0.15844188630580902, 0.2948668897151947, 0.4065589904785156, 0.12084604799747467, 0.29343682527542114, 0.49164822697639465, 0.07233413308858871, 0.0535273477435112, 0.014947501011192799, 0.008541097864508629, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.035160183906555176, 0.01820351555943489, 0.1303882896900177, 0.019772829487919807, 0.040328264236450195, 0.05493366718292236, 0.03643186390399933, 0.013673724606633186, 0.020261095836758614, 0.09265058487653732, 0.06087178364396095, 0.005874141119420528, 0.0010416797595098615, 0.00679743243381381, 0.17795756459236145, 0.2949400544166565, 0.03748409450054169, 0.14473117887973785, 0.0705113336443901, 0.013025683350861073, 0.005298166535794735, 0.21091029047966003, 0.014800299890339375, 0.2805088758468628, 0.000897476973477751, 0.0938984826207161, 0.004705057479441166, 0.04936474934220314, 0.011992034502327442, 0.18721424043178558, 0.00230285432189703, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0850016176700592, 0.12483492493629456, 0.30438917875289917, 0.08283902704715729, 0.36141735315322876, 0.5806636810302734, 0.21757252514362335, 0.0776025652885437, 0.2093839943408966, 0.1517311930656433, 0.0691467672586441, 0.05431315675377846, 0.323522686958313, 0.21248842775821686, 0.11186490952968597, 0.44276589155197144, 0.06478449702262878, 0.543609619140625, 0.8444110155105591, 0.13468694686889648, 0.4405028522014618, 0.6528593897819519, 0.5737791061401367, 0.6313535571098328, 0.8501816987991333, 0.4486657381057739, 0.06076665595173836, 0.7409859299659729, 0.15147589147090912, 0.20801351964473724, 0.027446726337075233, 0.036936238408088684, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.017619943246245384, 0.008017263375222683, 0.019503258168697357, 0.014857600443065166, 0.07692210376262665, 0.015309707261621952, 0.015313221141695976, 0.008549719117581844, 0.03095930442214012, 0.019377540796995163, 0.031960610300302505, 0.0054225618951022625, 0.016712497919797897, 0.015215321443974972, 0.15961019694805145, 0.5445577502250671, 0.2876933515071869, 0.7013069987297058, 0.627236008644104, 0.37061285972595215, 0.6206991076469421, 0.38252583146095276, 0.4230470061302185, 0.31842562556266785, 0.28603002429008484, 0.015331648290157318, 0.14692452549934387, 0.8622261881828308, 0.049388445913791656, 0.37183380126953125, 0.17907747626304626, 0.05781394988298416, 0.020684318616986275, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2695287764072418, 0.16650046408176422, 0.14075446128845215, 0.1364857405424118, 0.23432065546512604, 0.261515349149704, 0.18958930671215057, 0.053015366196632385, 0.031337250024080276, 0.28422990441322327, 0.08986067771911621, 0.06408891826868057, 0.008591849356889725, 0.031372129917144775, 0.19151051342487335, 0.4656296670436859, 0.6725881099700928, 0.6199259161949158, 0.6479836702346802, 0.24076998233795166, 0.34658652544021606, 0.5947279930114746, 0.37259459495544434, 0.5521662831306458, 0.14718003571033478, 0.19626900553703308, 0.024240192025899887, 0.27736979722976685, 0.05565635487437248, 0.3618892729282379, 0.44332295656204224, 0.027751203626394272, 0.0260067880153656, 0.010717106983065605, NaN, NaN, NaN, NaN, NaN, NaN], [0.2586316764354706, 0.21131351590156555, 0.019284198060631752, 0.02717362530529499, 0.037918541580438614, 0.014535612426698208, 0.14439015090465546, 0.14164134860038757, 0.06384728103876114, 0.03232301026582718, 0.05240772292017937, 0.08253412693738937, 0.007928711362183094, 0.011026060208678246, 0.1583670824766159, 0.830940842628479, 0.42077580094337463, 0.7156820893287659, 0.57599937915802, 0.5493759512901306, 0.7128159999847412, 0.5476810932159424, 0.527928352355957, 0.8053308725357056, 0.8646240234375, 0.542984127998352, 0.2950981855392456, 0.3170693516731262, 0.5610483884811401, 0.26465174555778503, 0.45835256576538086, 0.22733505070209503, 0.10187508910894394, 0.03538959100842476, 0.07069608569145203, NaN, NaN, NaN, NaN, NaN], [0.0646420493721962, 0.15151722729206085, 0.04734531044960022, 0.03642117232084274, 0.03833956643939018, 0.007805521599948406, 0.03985777497291565, 0.05410199984908104, 0.07749858498573303, 0.1281091719865799, 0.06692291796207428, 0.0004382343322504312, 0.02769407443702221, 0.03219819441437721, 0.20084568858146667, 0.09599269181489944, 0.08247342705726624, 0.25253206491470337, 0.4357891380786896, 0.039192523807287216, 0.0719948410987854, 0.3563676178455353, 0.5300538539886475, 0.06311739236116409, 0.037909455597400665, 0.5032193064689636, 0.39894816279411316, 0.3283153772354126, 0.21619060635566711, 0.017918655648827553, 0.2577371895313263, 0.14531975984573364, 0.346793532371521, 0.2014700472354889, 0.0539211668074131, 0.0146569162607193, NaN, NaN, NaN, NaN], [0.06935474276542664, 0.07278740406036377, 0.0317843034863472, 0.061563972383737564, 0.057788632810115814, 0.05731336027383804, 0.08327846229076385, 0.046548519283533096, 0.06359860301017761, 0.13075897097587585, 0.09122113883495331, 0.1188196912407875, 0.0009191188146360219, 0.03464866429567337, 0.18994329869747162, 0.6422337889671326, 0.3740711212158203, 0.10689651221036911, 0.6858291029930115, 0.4494076073169708, 0.2826421856880188, 0.3886936604976654, 0.475405216217041, 0.13226336240768433, 0.3073323965072632, 0.7139697670936584, 0.17356495559215546, 0.25040003657341003, 0.23144030570983887, 0.024455448612570763, 0.4280460476875305, 0.048713963478803635, 0.3974619209766388, 0.06130422651767731, 0.05969162657856941, 0.015271119773387909, 0.00685582309961319, NaN, NaN, NaN], [0.04588386043906212, 0.027941085398197174, 0.16196617484092712, 0.023955674842000008, 0.04093120992183685, 0.06800121814012527, 0.031365618109703064, 0.013349683955311775, 0.016157155856490135, 0.09367228299379349, 0.06382262706756592, 0.009268027730286121, 0.0006308736628852785, 0.005314440466463566, 0.17240527272224426, 0.5218734741210938, 0.03395698964595795, 0.2861349880695343, 0.13773199915885925, 0.02211177349090576, 0.014614011161029339, 0.43378758430480957, 0.02492188662290573, 0.26067787408828735, 0.0009113854030147195, 0.1411941796541214, 0.009023642167448997, 0.14982649683952332, 0.15959703922271729, 0.7153633832931519, 0.014257365837693214, 0.06102409213781357, 0.12158294767141342, 0.006897313520312309, 0.06130388379096985, 0.012951835058629513, 0.16874605417251587, 0.002189028775319457, NaN, NaN], [0.09685268998146057, 0.17937548458576202, 0.31954076886177063, 0.09235721081495285, 0.3550800085067749, 0.5939842462539673, 0.19687135517597198, 0.10603781044483185, 0.27224627137184143, 0.17071248590946198, 0.0712975338101387, 0.10525800287723541, 0.3080449402332306, 0.250378280878067, 0.11120767891407013, 0.45293620228767395, 0.05202305316925049, 0.4803192913532257, 0.8224762082099915, 0.10338833183050156, 0.2861584722995758, 0.8321961760520935, 0.7622299790382385, 0.5323314070701599, 0.8633370995521545, 0.5219312310218811, 0.07432084530591965, 0.7646023631095886, 0.4150907099246979, 0.4998815357685089, 0.606073796749115, 0.2854492664337158, 0.6639280319213867, 0.09482558071613312, 0.806840717792511, 0.19665148854255676, 0.18194931745529175, 0.01953776553273201, 0.037144362926483154, NaN], [0.012543261051177979, 0.010277148336172104, 0.014658409170806408, 0.007294217124581337, 0.028056686744093895, 0.009602113626897335, 0.004711315967142582, 0.003909323364496231, 0.019910220056772232, 0.0035717461723834276, 0.016398703679442406, 0.01044577918946743, 0.015165981836616993, 0.04322582483291626, 0.1563079059123993, 0.8357685804367065, 0.6023411154747009, 0.16389556229114532, 0.4697819948196411, 0.05014880374073982, 0.3185025751590729, 0.2618474066257477, 0.7044641375541687, 0.16675803065299988, 0.7323283553123474, 0.14429442584514618, 0.2621355652809143, 0.041847843676805496, 0.3185603618621826, 0.04513467848300934, 0.49906620383262634, 0.611339807510376, 0.21515053510665894, 0.3302164673805237, 0.04920952767133713, 0.2760073244571686, 0.0218669306486845, 0.25043201446533203, 0.13627314567565918, 0.01334126852452755]]], [[[0.00028402332100085914, 1.9304454923485537e-08, 1.5483598847509938e-09, 7.885660006923256e-12, 2.7246130684943637e-08, 2.9440096113830805e-05, 4.3406546978985716e-07, 3.7434634236888087e-07, 3.9264233464564313e-07, 1.911867819615054e-08, 6.894639170695882e-08, 1.9322192201798316e-06, 1.594805780769093e-06, 1.097217136702966e-06, 0.25163131952285767, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.8221166729927063, 0.0031213052570819855, 7.842657214496285e-05, 5.977510153520882e-10, 6.043178735204435e-10, 7.336016096815001e-07, 0.0001510237343609333, 0.000765863514970988, 0.0003504687047097832, 5.704807790607447e-07, 3.8402351520971933e-08, 3.7901799032624695e-07, 1.534954208182171e-05, 4.934078606311232e-05, 0.00023439944197889417, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0023944040294736624, 0.796754002571106, 0.004422985017299652, 9.068900226338883e-07, 5.795331436964091e-10, 1.0343059742012883e-08, 4.4964113499190717e-07, 0.0014743957435712218, 0.00028717826353386045, 7.994436600711197e-05, 3.3569827451174206e-07, 1.215876466176269e-07, 7.940250839055807e-07, 4.835407253267476e-06, 2.585098854979151e-07, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [4.3931080995207594e-11, 0.0005229745293036103, 0.5791732668876648, 0.0002632129180710763, 3.316774765949049e-08, 1.7754019825469425e-12, 1.4596207272357664e-14, 1.5350217763554497e-09, 1.2882580335826788e-07, 7.457471838279162e-06, 1.2410231420290074e-06, 2.736720361440348e-08, 3.621486097116211e-11, 3.919724787804224e-12, 2.306477925317907e-12, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [3.994035801418473e-14, 1.3595737036187217e-10, 5.270875135465758e-06, 0.5513067841529846, 0.00020578903786372393, 1.9226330039145978e-07, 1.181193272532799e-12, 2.80986930771554e-13, 9.120337812881449e-14, 1.37843805814164e-10, 7.154308718781976e-07, 1.5133276747292257e-06, 7.425698944629744e-10, 2.2010659354171347e-13, 1.8997327582565005e-12, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.3444651168352815e-12, 2.1774425253313912e-13, 1.857566878094019e-09, 0.00030468025943264365, 0.9472002983093262, 0.00010681805724743754, 2.00606624645161e-08, 5.2167251502746245e-14, 1.354494091723496e-15, 5.737065011425513e-13, 8.729777456473187e-10, 3.2425006793346256e-05, 7.676636641917867e-07, 1.870739785303499e-09, 2.3914221713994266e-09, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [3.644098217625569e-11, 3.867062572937563e-11, 4.1057553190615437e-11, 1.5412249254609378e-09, 0.018834512680768967, 0.505605936050415, 0.0010763276368379593, 5.434728933551014e-08, 2.6194791127864825e-11, 6.074670846504876e-15, 3.814499497517554e-12, 1.2291486939375318e-07, 9.572526323609054e-06, 4.437842653715052e-05, 7.18067713023629e-06, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [5.002242687623948e-05, 2.445471238843311e-07, 7.217475506138271e-09, 2.943958878759423e-12, 1.391844648424012e-07, 0.0035048718564212322, 0.755942702293396, 0.0011242764303460717, 1.4866960555082187e-05, 9.753278740198823e-11, 3.792431321238132e-13, 1.6398679289486573e-11, 1.3850768709744443e-07, 0.0002873632765840739, 2.565975592005998e-05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [7.748224284398475e-09, 3.667011867491965e-07, 1.7906526261768363e-09, 1.001209222569038e-16, 4.707358499311462e-15, 2.921879960204876e-10, 4.77575849799905e-06, 0.9355171918869019, 1.7088919776142575e-05, 1.5246609308405823e-08, 1.546373502880373e-14, 1.9256968477537417e-16, 2.8356877952137637e-15, 6.199032398512827e-10, 3.679770266273863e-09, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [6.04271771509346e-11, 2.349539499846287e-06, 6.254656170767703e-08, 2.0915530592191534e-12, 3.303753013789688e-16, 1.0466700578893717e-14, 7.288482968201282e-13, 0.0006303040427155793, 0.47335511445999146, 8.928982424549758e-05, 1.5872458902776998e-08, 1.3611594998645584e-14, 1.3777586457132233e-16, 1.589055302510104e-15, 8.100658338561217e-11, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [3.812023474658588e-10, 1.421315573679749e-06, 2.2867025109007955e-06, 2.6682736020688935e-08, 3.632111755455525e-12, 1.6831340872913367e-14, 3.240909670081289e-14, 1.4920277635610546e-07, 0.0005182845052331686, 0.39297640323638916, 0.0007259719423018396, 1.2580667174688642e-08, 3.7229049595736974e-13, 2.157145159519631e-15, 1.0612778433838344e-09, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [6.84109713322556e-10, 1.9775532322796607e-08, 5.041609938416514e-07, 0.00017906920402310789, 1.631619738873269e-06, 2.0158734681530177e-09, 9.65507530290054e-15, 4.2181228128435055e-12, 8.564649545128589e-10, 0.00023218656133394688, 0.6439363956451416, 0.000818322179839015, 1.3831699163802114e-07, 2.1358659198916774e-12, 5.4572883101400294e-08, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.4084274191361601e-08, 2.1930364191291574e-09, 7.004614666072939e-09, 2.0828078959311824e-06, 6.64705439703539e-05, 3.6118690331932157e-06, 4.0857584676645686e-11, 1.0090924406833124e-12, 5.430448080009356e-15, 6.815135122906213e-09, 0.0007384128402918577, 0.9033229351043701, 0.0037223652470856905, 5.428325380307797e-07, 5.097080588711833e-07, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [3.370899046006848e-11, 1.5044922772877722e-12, 1.903236411786996e-13, 5.2399131041103164e-12, 5.3600892613303586e-09, 3.287689196440624e-07, 1.293990137263279e-09, 3.2395277866498207e-13, 8.98320316581696e-19, 7.591717251043266e-18, 2.4333673097343134e-12, 7.08575316821225e-05, 0.3025490641593933, 0.00011370918218744919, 1.7842703314840946e-08, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0009491983219049871, 3.734114216058515e-05, 0.00010643315181368962, 4.299266220186837e-05, 0.0019948105327785015, 0.012520392425358295, 0.0005770812276750803, 0.00013455892622005194, 0.0002518744731787592, 0.0005399127840064466, 0.0017743584467098117, 0.004756112117320299, 0.00398082984611392, 0.002925803419202566, 0.1746407300233841, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.017177388072013855, 0.0003127168456558138, 0.004294774029403925, 0.0025685238651931286, 0.0020048224832862616, 0.0018501998856663704, 0.004262528382241726, 0.00010045748058473691, 0.004143967293202877, 0.0026836262550204992, 0.0008790316642262042, 0.0012905423063784838, 8.68891947902739e-05, 0.00021419797849375755, 0.16245633363723755, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12795236706733704, 0.00371668953448534, 0.02831968478858471, 0.025539351627230644, 0.0009935664711520076, 0.0005314573645591736, 0.0308157317340374, 4.653090945794247e-05, 0.004544692113995552, 0.02307700179517269, 0.014357739128172398, 0.0017676070565357804, 1.5830510164960288e-05, 0.0005655316635966301, 0.23366259038448334, 0.13569742441177368, 0.0376364141702652, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0012442924780771136, 0.6349257826805115, 1.560185046400875e-05, 0.0005892697954550385, 2.671209358595661e-06, 1.747990245348774e-05, 0.00010909549746429548, 9.000968930195086e-06, 1.720580803521443e-05, 0.0008049540338106453, 0.00025925427326001227, 4.468534825718962e-06, 5.9764097386505455e-06, 7.895294402260333e-05, 0.00020540088007692248, 0.05053132027387619, 0.5417848825454712, 0.07814626395702362, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.014811321161687374, 0.6550174951553345, 5.4754978918936104e-05, 0.0013682727003470063, 7.1730828494764864e-06, 3.513193587423302e-05, 0.00030579010490328074, 4.0161107790481765e-06, 8.621193410363048e-05, 0.0020331761334091425, 0.00018049145000986755, 1.5370842447737232e-05, 2.3058303213474574e-06, 3.803792060352862e-05, 0.0004018820764031261, 0.03762863576412201, 0.4749486744403839, 0.013701170682907104, 0.053301598876714706, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0038746336940675974, 0.000324725842801854, 0.0051879663951694965, 0.009153621271252632, 0.0008864403935149312, 0.6781038641929626, 0.057408660650253296, 0.0010902854846790433, 0.00043091498082503676, 0.000930881651584059, 0.00047575533972121775, 0.0024355631321668625, 0.0005705857765860856, 0.0003382607828825712, 0.0010924984235316515, 0.10598134994506836, 0.16776065528392792, 0.11929589509963989, 0.16846179962158203, 0.40715572237968445, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [3.359095899213571e-06, 1.5333833403019526e-07, 3.112653939751908e-05, 0.00013510043208952993, 6.284327810135437e-06, 0.7821753025054932, 0.0016732696676626801, 2.949555346276611e-05, 1.1825303545265342e-06, 2.2443591660703532e-06, 4.938602842230466e-07, 8.253279020209447e-07, 2.1931487026449759e-07, 9.422030302630446e-07, 3.409375494811684e-06, 0.05147748813033104, 0.203742116689682, 0.11462464928627014, 0.46246808767318726, 0.01836300455033779, 0.02458924613893032, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00014056767395231873, 5.100669682178705e-07, 0.0031089531257748604, 0.006296438630670309, 0.00044245802564546466, 0.5631491541862488, 0.006006886251270771, 0.00015836386592127383, 1.0129460861207917e-05, 9.741926623973995e-05, 8.02019567345269e-05, 2.8800504878745414e-05, 2.2740101485396735e-05, 9.966635116143152e-05, 5.9340749430703e-05, 0.17594558000564575, 0.17753779888153076, 0.024665912613272667, 0.19817322492599487, 0.008797828108072281, 0.022263213992118835, 0.29173722863197327, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07201159745454788, 9.12444302230142e-05, 0.07167930901050568, 0.07350550591945648, 0.008381813764572144, 0.32997292280197144, 0.32325229048728943, 0.006826527416706085, 0.005964158568531275, 0.01031426526606083, 0.0041834041476249695, 0.0003298712254036218, 2.8659975214395672e-05, 0.00019656911899801344, 0.02016262151300907, 0.016114797443151474, 0.0061007170006632805, 0.028504224494099617, 0.017245782539248466, 0.08753485232591629, 0.11264273524284363, 0.6154332160949707, 0.029144972562789917, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0011574724921956658, 3.413460092360765e-07, 0.00010100962390424684, 0.0058910842053592205, 3.088227913394803e-06, 0.01394782867282629, 0.16852441430091858, 0.6476468443870544, 4.158269439358264e-05, 0.002217742381617427, 3.1430703529622406e-05, 8.318846812471747e-05, 7.552150123046886e-07, 2.136993316526059e-06, 0.00013183141709305346, 0.027042992413043976, 0.032212790101766586, 0.019619816914200783, 0.014702342450618744, 0.06721275299787521, 0.2560867667198181, 0.5545244216918945, 0.40561506152153015, 0.037922732532024384, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.056869976222515106, 0.00018767332949209958, 0.07251239567995071, 0.21200358867645264, 0.5404223799705505, 0.01658189669251442, 0.03565289452672005, 0.0015120785683393478, 0.002293382305651903, 0.005935561377555132, 0.012055100873112679, 0.005193157121539116, 0.003556813346222043, 0.007320231292396784, 0.018532630056142807, 0.1654873937368393, 0.013622531667351723, 0.0656571239233017, 0.09179358184337616, 0.03440919890999794, 0.08533406257629395, 0.16269220411777496, 0.1151970624923706, 0.09265416115522385, 0.028269361704587936, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.37012216448783875, 0.0030506134498864412, 0.585090160369873, 0.3774729073047638, 0.6362679600715637, 0.12865976989269257, 0.340728759765625, 0.01963443122804165, 0.11373940855264664, 0.0405576266348362, 0.04042620584368706, 0.006893007550388575, 0.0011100739939138293, 0.004035779275000095, 0.12706774473190308, 0.2598540484905243, 0.010173649527132511, 0.004170349799096584, 0.003479698905721307, 0.0014636714477092028, 0.0011101020500063896, 0.001677120802924037, 0.034040722995996475, 0.0041177538223564625, 0.024958845227956772, 0.016315795481204987, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01695789396762848, 0.00023016006161924452, 0.013878279365599155, 0.04998883232474327, 0.0032932739704847336, 8.226843783631921e-05, 0.014781651087105274, 0.00017401285003870726, 0.4112556278705597, 0.007095593959093094, 0.01393651869148016, 0.000858593441080302, 0.0009966455399990082, 0.006141065154224634, 0.004614917561411858, 0.17492477595806122, 0.010013026185333729, 0.005800239276140928, 0.0069971769116818905, 0.0036480696871876717, 0.001016399241052568, 0.0060493675991892815, 0.0034581662621349096, 0.00659980857744813, 0.0047594537027180195, 0.3941299021244049, 0.2407994568347931, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.023780474439263344, 4.510316648520529e-05, 0.013797261752188206, 0.087004654109478, 0.0004407854867167771, 0.0013536562910303473, 0.04187630116939545, 0.0028901200275868177, 0.06213926523923874, 0.3483656048774719, 0.03705320879817009, 0.005524389911442995, 0.0004139445663895458, 0.0025706440210342407, 0.012163926847279072, 0.06559828668832779, 0.005602334160357714, 0.0005807551206089556, 0.0005322807701304555, 0.004617360420525074, 0.00354054500348866, 0.005599506665021181, 0.011434626765549183, 0.006905066315084696, 0.009602343663573265, 0.11027393490076065, 0.36931946873664856, 0.06368503719568253, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.017730457708239555, 8.937691018218175e-05, 0.00767871318385005, 0.02321789041161537, 0.00010702417785068974, 0.004407694097608328, 0.0538853257894516, 0.011079255491495132, 0.003184565110132098, 0.026336153969168663, 0.005110009107738733, 0.3480301797389984, 0.002053677337244153, 0.01653059385716915, 0.00945478305220604, 0.015983520075678825, 0.012168757617473602, 0.0015684146201238036, 0.0005484889261424541, 0.00233695306815207, 0.0038106110878288746, 0.005947766825556755, 0.04194773733615875, 0.014443459920585155, 0.06465759128332138, 0.14989611506462097, 0.5095774531364441, 0.1882752925157547, 0.02387852594256401, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00016590843733865768, 4.410037217894569e-05, 0.0031412369571626186, 0.0015988551313057542, 0.002399750053882599, 0.0004506838449742645, 0.001152031123638153, 0.00021803524577990174, 0.00054850586457178, 0.0001300607982557267, 0.001143390079960227, 0.0023531741462647915, 0.6484718322753906, 0.061944324523210526, 1.8855764210456982e-05, 0.11159919947385788, 0.06036144495010376, 0.06681493669748306, 0.0798669382929802, 0.03668922558426857, 0.018710536882281303, 0.029976846650242805, 0.0675768032670021, 0.03372039645910263, 0.057603828608989716, 0.14515243470668793, 0.25060775876045227, 0.23181115090847015, 0.14262832701206207, 0.33286023139953613, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [5.492825607689156e-07, 1.991102926979238e-08, 2.3713612335996004e-06, 1.7095164366764948e-05, 8.657893886265811e-07, 3.6805211323098774e-08, 1.598790731804911e-06, 2.0731313554733788e-07, 4.274500042811269e-07, 5.490248440764844e-06, 0.00014167907647788525, 5.53526615476585e-06, 0.5851997137069702, 0.22563536465168, 1.0684430407081891e-07, 0.018035059794783592, 0.02341379225254059, 0.0019442361081019044, 0.004369894042611122, 0.00136191223282367, 0.00017434914479963481, 0.0011034610215574503, 0.06787250190973282, 0.060198791325092316, 0.12004764378070831, 0.11878902465105057, 0.2063554972410202, 0.28332868218421936, 0.35319504141807556, 0.008158767595887184, 0.26057863235473633, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01633528247475624, 0.0006067559006623924, 0.047781698405742645, 0.1674666851758957, 0.0008243213524110615, 0.0007217283127829432, 0.005900595337152481, 0.0001012250068015419, 0.006910703144967556, 0.1343279927968979, 0.5695670247077942, 0.0034049933310598135, 0.008110514841973782, 0.0796104148030281, 0.00713667506352067, 0.17278411984443665, 0.007028562016785145, 0.010641193017363548, 0.013809186406433582, 0.0005732428980991244, 0.001056239241734147, 0.0005258666351437569, 0.03639528155326843, 0.02256075292825699, 0.01660884916782379, 0.1527748554944992, 0.1477358043193817, 0.2577149271965027, 0.03867224231362343, 0.04304511100053787, 0.11759469658136368, 0.0762997567653656, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02614973485469818, 0.001497315475717187, 0.11498566716909409, 0.08699594438076019, 0.006599655374884605, 0.0011878651566803455, 0.009639720432460308, 0.0002812722814269364, 0.014351817779242992, 0.06119270250201225, 0.19180962443351746, 0.06391202658414841, 0.4759237766265869, 0.44549837708473206, 0.058810409158468246, 0.38573285937309265, 0.0028330886270850897, 0.0014278099406510592, 0.0009824484586715698, 9.371336636831984e-05, 0.00015483389142900705, 6.760591350030154e-05, 0.0035791138652712107, 0.0002520910056773573, 0.0005180046427994967, 0.00024238335026893765, 0.011901103891432285, 0.011019378900527954, 0.006276060827076435, 0.0026990415062755346, 0.016820058226585388, 0.03330027312040329, 0.047877803444862366, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.041024841368198395, 0.0016396299470216036, 0.05072889104485512, 0.1323171705007553, 0.0024413676001131535, 0.00023246044293045998, 0.02059599943459034, 0.00033336327760480344, 0.7358176708221436, 0.04226389154791832, 0.0658484548330307, 0.002587914001196623, 0.013076293282210827, 0.0423613116145134, 0.051219869405031204, 0.21399648487567902, 0.008264300413429737, 0.0051351506263017654, 0.005111425183713436, 0.0020249083172529936, 0.00047485672985203564, 0.0018332998733967543, 0.0008904117858037353, 0.0017731828847900033, 0.000539442349690944, 0.03944296017289162, 0.039767228066921234, 0.00580678740516305, 0.004312179517000914, 0.003937484696507454, 0.00913114845752716, 0.006211036816239357, 0.3553882837295532, 0.3024981617927551, NaN, NaN, NaN, NaN, NaN, NaN], [0.025904469192028046, 0.00014531973283737898, 0.014812517911195755, 0.11958510428667068, 0.0003183217777404934, 0.0012536202557384968, 0.031174438074231148, 0.0025010022800415754, 0.045685503631830215, 0.4334242641925812, 0.057037968188524246, 0.005963113158941269, 0.0007164725102484226, 0.00356480129994452, 0.02565825544297695, 0.05261809378862381, 0.004144520964473486, 0.00047606538282707334, 0.0003396419051568955, 0.002880769083276391, 0.0015178520698100328, 0.0018901955336332321, 0.0029504895210266113, 0.0017174717504531145, 0.0006908842478878796, 0.0046035549603402615, 0.09042679518461227, 0.0032755613792687654, 0.007712012622505426, 0.032594844698905945, 0.02268057130277157, 0.033856723457574844, 0.07955116033554077, 0.4074561595916748, 0.07153668999671936, NaN, NaN, NaN, NaN, NaN], [0.04193783551454544, 0.0005606984486803412, 0.01569434627890587, 0.058890990912914276, 0.00016686622984707355, 0.0032934362534433603, 0.10695304721593857, 0.011062747798860073, 0.008127261884510517, 0.04922156408429146, 0.01035262644290924, 0.3408533036708832, 0.003045044606551528, 0.019185535609722137, 0.046415992081165314, 0.019381573423743248, 0.012705344706773758, 0.0019882190972566605, 0.0005741973291151226, 0.0020475401543080807, 0.0023934554774314165, 0.004172713495790958, 0.021013854071497917, 0.005879250820726156, 0.006729640066623688, 0.00632414361461997, 0.09735815972089767, 0.01909361220896244, 0.00100265524815768, 0.003452989971265197, 0.008203250356018543, 0.05971603840589523, 0.11904174834489822, 0.5188009142875671, 0.2541559338569641, 0.029506316408514977, NaN, NaN, NaN, NaN], [0.00012501348101068288, 4.870840712101199e-05, 0.0024386774748563766, 0.001847597537562251, 0.0017206922639161348, 0.0002501157287042588, 0.0009360458934679627, 0.00021343374100979418, 0.0004799730086233467, 0.00017777700850274414, 0.0013057318283244967, 0.0019216074142605066, 0.7016423344612122, 0.059743087738752365, 1.6802117897896096e-05, 0.10572486370801926, 0.04525948688387871, 0.055838145315647125, 0.050681136548519135, 0.027844024822115898, 0.014026278629899025, 0.025656970217823982, 0.0361209474503994, 0.017075760290026665, 0.01003955863416195, 0.016965145245194435, 0.04991300031542778, 0.01522271428257227, 0.007584442384541035, 0.03757705166935921, 0.03609456866979599, 0.10922907292842865, 0.19329114258289337, 0.2903786897659302, 0.29551932215690613, 0.1564989984035492, 0.3518115282058716, NaN, NaN, NaN], [1.7574552657606546e-06, 9.272354617451128e-08, 1.001089003693778e-05, 5.891482942388393e-05, 3.3656547202554066e-06, 1.2065736143540562e-07, 6.7727110035775695e-06, 6.411150366147922e-07, 1.3192883443480241e-06, 1.1707085832313169e-05, 0.00026830541901290417, 1.0283902156515978e-05, 0.6812964081764221, 0.27208930253982544, 4.838558993469633e-07, 0.017342884093523026, 0.024629754945635796, 0.0017386168474331498, 0.003977979999035597, 0.0011948446044698358, 0.0001711023651296273, 0.0019097719341516495, 0.050265345722436905, 0.048485398292541504, 0.025773482397198677, 0.011941587552428246, 0.02582539990544319, 0.014500979334115982, 0.011088544502854347, 0.0004536270862445235, 0.001346826204098761, 0.09912228584289551, 0.03899921476840973, 0.19399496912956238, 0.33165985345840454, 0.3351045250892639, 0.007158405613154173, 0.26822295784950256, NaN, NaN], [0.01900503970682621, 0.0008953948272392154, 0.09836827963590622, 0.2858547866344452, 0.0013939865166321397, 0.0011423979885876179, 0.011685764417052269, 0.00014273256238084286, 0.010754182003438473, 0.15914513170719147, 0.6438553333282471, 0.002441136632114649, 0.008362390100955963, 0.07132171094417572, 0.011131932027637959, 0.15815527737140656, 0.009173951111733913, 0.012453499250113964, 0.01756284572184086, 0.0007500716019421816, 0.0020462200045585632, 0.00166225153952837, 0.05335438624024391, 0.037105023860931396, 0.009711050428450108, 0.05516523867845535, 0.04893142729997635, 0.03887411952018738, 0.002221355913206935, 0.004346344619989395, 0.004376854281872511, 0.001785764587111771, 0.09844812005758286, 0.14674220979213715, 0.34636548161506653, 0.04763580113649368, 0.057022612541913986, 0.12166893482208252, 0.13556897640228271, NaN], [0.12417581677436829, 0.0153038389980793, 0.12986266613006592, 0.6406017541885376, 0.009386910125613213, 0.057520631700754166, 0.09723392128944397, 0.0041757188737392426, 0.030985616147518158, 0.12765046954154968, 0.052563395351171494, 0.09427980333566666, 0.010530965402722359, 0.01615813747048378, 0.110444575548172, 0.16895240545272827, 0.0006144722574390471, 0.0027162963524460793, 0.0007400937611237168, 0.0007253509247675538, 0.0007097159395925701, 0.000199983871425502, 0.0005034026107750833, 0.0002540702698752284, 0.0002154638059437275, 0.0004817947919946164, 0.0019994170870631933, 0.0003459753352217376, 6.575404404429719e-05, 0.004540599416941404, 0.00010029276745626703, 0.0005050064064562321, 0.003569946391507983, 0.008527955040335655, 0.003213587449863553, 0.0022120880894362926, 0.11142478138208389, 0.01313241571187973, 0.055687084794044495, 0.21235007047653198]], [[0.1577264666557312, 0.03251823037862778, 0.4939506947994232, 0.8334789872169495, 0.6927971243858337, 0.3147047460079193, 0.7604361176490784, 0.11822030693292618, 0.7022377848625183, 0.6516091823577881, 0.14691989123821259, 0.2232232689857483, 0.14339210093021393, 0.3761228322982788, 0.014605461619794369, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.028655482456088066, 0.14083503186702728, 0.08485368639230728, 0.8299343585968018, 0.8304422497749329, 0.5664599537849426, 0.834579586982727, 0.7438958287239075, 0.8452481031417847, 0.8614712953567505, 0.3640905022621155, 0.805733323097229, 0.3481642007827759, 0.795884370803833, 0.05269646272063255, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.02106422185897827, 0.010846637189388275, 0.073356993496418, 0.017661061137914658, 0.8741048574447632, 0.5687165856361389, 0.5249210000038147, 0.5693489909172058, 0.5103186368942261, 0.5253384709358215, 0.6472406387329102, 0.4561024308204651, 0.1524587720632553, 0.45141565799713135, 0.034538887441158295, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2203565090894699, 0.02154199220240116, 0.007279311306774616, 0.003464027540758252, 0.18461424112319946, 0.07773485034704208, 0.7297388315200806, 0.2260110229253769, 0.6848539113998413, 0.2328294813632965, 0.22646839916706085, 0.3173597455024719, 0.10388152301311493, 0.06158056855201721, 0.11330780386924744, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1574045568704605, 0.12516136467456818, 0.04707150533795357, 0.0032313871197402477, 0.19444315135478973, 0.046962298452854156, 0.48863229155540466, 0.8290899991989136, 0.892469584941864, 0.6836395859718323, 0.83636474609375, 0.47956424951553345, 0.034452617168426514, 0.38761135935783386, 0.055785421282052994, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.4389230012893677, 0.6133158802986145, 0.4783843159675598, 0.11230780929327011, 0.006951127201318741, 0.0644199401140213, 0.03406795859336853, 0.33251792192459106, 0.9552598595619202, 0.8827710747718811, 0.9276224970817566, 0.8325800895690918, 0.737617552280426, 0.745059609413147, 0.05149900168180466, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3395847976207733, 0.09897124767303467, 0.16763220727443695, 0.1671983003616333, 0.049412358552217484, 0.007114487700164318, 0.3340696394443512, 0.018166696652770042, 0.7235669493675232, 0.9639523029327393, 0.851059079170227, 0.7306914925575256, 0.5801126956939697, 0.8017169237136841, 0.08099871873855591, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.44394704699516296, 0.6082286238670349, 0.37166181206703186, 0.3715074956417084, 0.35315781831741333, 0.10853563994169235, 0.013190319761633873, 0.07092351466417313, 0.03435605764389038, 0.25131845474243164, 0.921750545501709, 0.8745512366294861, 0.7473158240318298, 0.834020733833313, 0.1216883435845375, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.18251584470272064, 0.8759727478027344, 0.1439245641231537, 0.06640342622995377, 0.060579828917980194, 0.2710072100162506, 0.011089610867202282, 0.034396518021821976, 0.1700025051832199, 0.043876904994249344, 0.14450228214263916, 0.9449294805526733, 0.9689385294914246, 0.939329981803894, 0.07954179495573044, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.32071176171302795, 0.7452729344367981, 0.11999625712633133, 0.08053360879421234, 0.3748469650745392, 0.31863275170326233, 0.028054066002368927, 0.2197551280260086, 0.01771731488406658, 0.23943577706813812, 0.01906767673790455, 0.8113164901733398, 0.9739595055580139, 0.9691897630691528, 0.21732129156589508, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.6261264085769653, 0.6649302244186401, 0.5194191336631775, 0.6324451565742493, 0.6771988272666931, 0.7814968228340149, 0.4118405878543854, 0.3728334903717041, 0.03296521306037903, 0.008678224869072437, 0.6047253012657166, 0.11251461505889893, 0.21560458838939667, 0.9244948625564575, 0.10127653181552887, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3176693320274353, 0.5172579884529114, 0.1793123036623001, 0.37762320041656494, 0.23678036034107208, 0.5621929168701172, 0.08773050457239151, 0.24525783956050873, 0.010828782804310322, 0.025829488411545753, 0.0057976157404482365, 0.08708162605762482, 0.04166324809193611, 0.5714256167411804, 0.16898052394390106, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.6460146307945251, 0.8194199800491333, 0.48921409249305725, 0.6910595297813416, 0.5259124636650085, 0.6389046311378479, 0.3241840600967407, 0.7817367911338806, 0.17853572964668274, 0.1606196016073227, 0.06383053213357925, 0.007355134002864361, 0.02128707617521286, 0.02206379547715187, 0.23354344069957733, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.5992116332054138, 0.6358246803283691, 0.47243836522102356, 0.5617506504058838, 0.6971379518508911, 0.6431114673614502, 0.39991113543510437, 0.8182389140129089, 0.2704472243785858, 0.20400457084178925, 0.059529319405555725, 0.06732083112001419, 0.008503233082592487, 0.06121496111154556, 0.2071741670370102, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2342938333749771, 0.5683650374412537, 0.6037701964378357, 0.7331977486610413, 0.7349027395248413, 0.6651985049247742, 0.23853524029254913, 0.2293619066476822, 0.48426058888435364, 0.7077944874763489, 0.5918195843696594, 0.8169012665748596, 0.7005065679550171, 0.4784330725669861, 0.015931207686662674, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.05668458715081215, 0.013551714830100536, 0.3300224542617798, 0.22417771816253662, 0.24923239648342133, 0.16107039153575897, 0.07639153301715851, 0.036736860871315, 0.044193096458911896, 0.14611276984214783, 0.15061600506305695, 0.035221245139837265, 0.0397845022380352, 0.06225845590233803, 0.12414046376943588, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.29422780871391296, 0.3258638381958008, 0.027477310970425606, 0.10906420648097992, 0.003920723684132099, 0.020042676478624344, 0.05157224088907242, 0.0009247793932445347, 0.005282218102365732, 0.1744423359632492, 0.0761384516954422, 0.0033416510559618473, 0.0003361533163115382, 0.0012587645323947072, 0.013668928295373917, 0.13440807163715363, 0.048166193068027496, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19355924427509308, 0.1259031891822815, 0.004604514688253403, 0.04003702849149704, 0.0129036083817482, 0.019794460386037827, 0.06589072942733765, 0.0014933310449123383, 0.012753497809171677, 0.06252782791852951, 0.0361945815384388, 0.011655895970761776, 0.01012047752737999, 0.02639157697558403, 0.16549569368362427, 0.14904144406318665, 0.03273539990186691, 0.03615117073059082, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4293937385082245, 0.07181306928396225, 0.003158864099532366, 0.04697505012154579, 0.01354672759771347, 0.09221473336219788, 0.24058710038661957, 0.0037424738984555006, 0.07543525844812393, 0.0656844824552536, 0.01989266835153103, 0.06512395292520523, 0.01137665193527937, 0.029709961265325546, 0.18951866030693054, 0.17614386975765228, 0.0854690745472908, 0.038236960768699646, 0.12011754512786865, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.052543047815561295, 0.03695955500006676, 0.100065678358078, 0.07546547800302505, 0.053252771496772766, 0.11382242292165756, 0.28551623225212097, 0.14051520824432373, 0.12815484404563904, 0.15533913671970367, 0.11139650642871857, 0.09512985497713089, 0.017796501517295837, 0.04266834259033203, 0.1351824700832367, 0.14069411158561707, 0.1466522365808487, 0.07941046357154846, 0.06070372834801674, 0.045592159032821655, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.002040643012151122, 0.005490712355822325, 0.024769198149442673, 0.007002650294452906, 0.0020249236840754747, 0.03913044556975365, 0.01487613096833229, 0.09424738585948944, 0.010089649818837643, 0.05513475462794304, 0.0488949678838253, 0.007691625505685806, 0.002344577107578516, 0.012510538101196289, 0.20307941734790802, 0.15778480470180511, 0.11167039722204208, 0.20017755031585693, 0.10082826018333435, 0.013994856737554073, 0.07346371561288834, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04981796815991402, 0.13342007994651794, 0.4189896881580353, 0.06767702847719193, 0.007763800676912069, 0.11641503125429153, 0.029343493282794952, 0.11072052270174026, 0.06700066477060318, 0.1429358571767807, 0.3406253457069397, 0.00571059063076973, 0.0006326772854663432, 0.004126383922994137, 0.17491626739501953, 0.15305520594120026, 0.26692208647727966, 0.1222626119852066, 0.14178596436977386, 0.012799645774066448, 0.019025815650820732, 0.14782781898975372, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.008032058365643024, 0.009898788295686245, 0.0165096465498209, 0.015990890562534332, 0.001612947671674192, 0.07025154680013657, 0.1309722512960434, 0.45684561133384705, 0.020022952929139137, 0.014566164463758469, 0.01627122238278389, 0.001012062537483871, 0.003352430183440447, 0.006583840120583773, 0.0849505066871643, 0.050227321684360504, 0.49922510981559753, 0.2564227879047394, 0.37594476342201233, 0.05222875997424126, 0.019398091360926628, 0.07475102692842484, 0.13636687397956848, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.027854006737470627, 0.008844887837767601, 0.011581032536923885, 0.014227867126464844, 0.0022522227372974157, 0.6803511381149292, 0.24682462215423584, 0.11913055926561356, 0.0028406307101249695, 0.006190288811922073, 0.00574448611587286, 0.0012344244169071317, 0.010572707280516624, 0.00985674187541008, 0.11121391505002975, 0.1278427243232727, 0.4489462971687317, 0.09382158517837524, 0.09914611279964447, 0.11451858282089233, 0.14035384356975555, 0.0858180820941925, 0.1395546793937683, 0.05027398467063904, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11111988872289658, 0.0035893325693905354, 0.4007861316204071, 0.2033512443304062, 0.1986382007598877, 0.15137647092342377, 0.12109687924385071, 0.007575488183647394, 0.021906785666942596, 0.03087061457335949, 0.08533017337322235, 0.07086688280105591, 0.06729871034622192, 0.045789312571287155, 0.1673528403043747, 0.06907324492931366, 0.44302117824554443, 0.21607427299022675, 0.21861647069454193, 0.14559195935726166, 0.12854896485805511, 0.21420170366764069, 0.5056769251823425, 0.05036870762705803, 0.14160890877246857, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06468851119279861, 0.006587199401110411, 0.23617494106292725, 0.19800357520580292, 0.15495024621486664, 0.06172433868050575, 0.05180465057492256, 0.01833559013903141, 0.016546709463000298, 0.05746111273765564, 0.0824536681175232, 0.007550883572548628, 0.007943101227283478, 0.011712267994880676, 0.33849596977233887, 0.08832916617393494, 0.4917650520801544, 0.16961733996868134, 0.21240676939487457, 0.17275941371917725, 0.13381528854370117, 0.1763075888156891, 0.3443826735019684, 0.022638684138655663, 0.14659351110458374, 0.05034468695521355, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09414701163768768, 0.10295354574918747, 0.0844656303524971, 0.06548816710710526, 0.08529236167669296, 0.06227908656001091, 0.030192906036973, 0.010874724946916103, 0.025562399998307228, 0.005146168638020754, 0.014559037052094936, 0.013559900224208832, 0.06781303137540817, 0.05153109133243561, 0.33232951164245605, 0.10765255987644196, 0.1569133847951889, 0.14696621894836426, 0.12414205074310303, 0.1321374922990799, 0.32589367032051086, 0.09939466416835785, 0.15668180584907532, 0.035531532019376755, 0.18526552617549896, 0.100669264793396, 0.1766001582145691, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.314544141292572, 0.6832185983657837, 0.07794945687055588, 0.042061515152454376, 0.015504884533584118, 0.1916494369506836, 0.006379975005984306, 0.0006176759488880634, 0.0012508369982242584, 0.01929312013089657, 0.022219885140657425, 0.0019787217024713755, 0.01769268326461315, 0.008809820748865604, 0.08711312711238861, 0.0920143872499466, 0.03631591796875, 0.10338561236858368, 0.13865944743156433, 0.14365890622138977, 0.19164490699768066, 0.08302215486764908, 0.17053648829460144, 0.20418454706668854, 0.4243081212043762, 0.23730118572711945, 0.11353020370006561, 0.062482837587594986, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.027118999511003494, 0.07309459149837494, 0.04486501216888428, 0.012266037985682487, 0.024303032085299492, 0.030924739316105843, 0.021004648879170418, 0.003694491693750024, 0.01517508551478386, 0.025275954976677895, 0.0075909653678536415, 0.24021397531032562, 0.04135901853442192, 0.07603362947702408, 0.11061857640743256, 0.14247462153434753, 0.10275112092494965, 0.08782284706830978, 0.07633533328771591, 0.09427531808614731, 0.2382509559392929, 0.11237408220767975, 0.1274290829896927, 0.09234490990638733, 0.29983192682266235, 0.19681134819984436, 0.09119200706481934, 0.1394888311624527, 0.02876400761306286, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.025165440514683723, 0.019109023734927177, 0.008520743809640408, 0.015198510140180588, 0.007751345168799162, 0.005125374533236027, 0.008160223253071308, 0.0017721926560625434, 0.08641061931848526, 0.07765892893075943, 0.017936453223228455, 0.020675569772720337, 0.0024341135285794735, 0.023971976712346077, 0.16557703912258148, 0.14126147329807281, 0.06271495670080185, 0.09029032289981842, 0.10313913226127625, 0.08530516922473907, 0.05194256827235222, 0.09853952378034592, 0.05407971888780594, 0.10021005570888519, 0.14394013583660126, 0.19472479820251465, 0.17138735949993134, 0.055624835193157196, 0.022259291261434555, 0.010825252160429955, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.22320780158042908, 0.05348529666662216, 0.01734296977519989, 0.1172923669219017, 0.004340981598943472, 0.003372892737388611, 0.033841460943222046, 0.024162178859114647, 0.05216863751411438, 0.3090120553970337, 0.2295515090227127, 0.014075365848839283, 0.020010780543088913, 0.20773397386074066, 0.12411301583051682, 0.15579406917095184, 0.5571659207344055, 0.09220181405544281, 0.09424383193254471, 0.2893342971801758, 0.14449337124824524, 0.08881417661905289, 0.09621196240186691, 0.05768556892871857, 0.34467604756355286, 0.16894927620887756, 0.32070621848106384, 0.32385867834091187, 0.08616255223751068, 0.0030245021916925907, 0.011462957598268986, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1383964717388153, 0.05579448863863945, 0.1563209742307663, 0.09128513187170029, 0.039257608354091644, 0.009886945597827435, 0.006391164381057024, 0.0007081980584189296, 0.006523598916828632, 0.16335614025592804, 0.02935076504945755, 0.023180969059467316, 0.19186609983444214, 0.2336183488368988, 0.16814255714416504, 0.06543286889791489, 0.3303832709789276, 0.1981877088546753, 0.17906354367733002, 0.08578304201364517, 0.12075137346982956, 0.09918820112943649, 0.14948950707912445, 0.0696079283952713, 0.2870473861694336, 0.2037079930305481, 0.20505982637405396, 0.415317177772522, 0.18504147231578827, 0.05944397673010826, 0.03780561313033104, 0.06350213289260864, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1625337302684784, 0.007939358241856098, 0.11928629875183105, 0.1341797411441803, 0.005670298356562853, 0.0033473502844572067, 0.022544465959072113, 0.005534132476896048, 0.007299710530787706, 0.08667418360710144, 0.07403960824012756, 0.004230144899338484, 0.002401313977316022, 0.005503634922206402, 0.20701391994953156, 0.08806300163269043, 0.5073549151420593, 0.15216797590255737, 0.1779468059539795, 0.08599209040403366, 0.038353316485881805, 0.05095306783914566, 0.13815101981163025, 0.05531492829322815, 0.3680262565612793, 0.045964885503053665, 0.5803228616714478, 0.2365681380033493, 0.10053237527608871, 0.016326427459716797, 0.011199035681784153, 0.02849578857421875, 0.09785498678684235, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08204744011163712, 0.04882703348994255, 0.048393696546554565, 0.02867632359266281, 0.012730585411190987, 0.02805456519126892, 0.014470821246504784, 0.008571655489504337, 0.011637779884040356, 0.011116313748061657, 0.015620187856256962, 0.00444953003898263, 0.038398172706365585, 0.021771300584077835, 0.25556278228759766, 0.10047968477010727, 0.17735490202903748, 0.1303417980670929, 0.1233980730175972, 0.11124629527330399, 0.27208706736564636, 0.09057758748531342, 0.20949512720108032, 0.0595981664955616, 0.32820063829421997, 0.19304482638835907, 0.3008245825767517, 0.24370267987251282, 0.0977335274219513, 0.0604717954993248, 0.08826017379760742, 0.05976974964141846, 0.11658596247434616, 0.26095637679100037, NaN, NaN, NaN, NaN, NaN, NaN], [0.3818233609199524, 0.6690115928649902, 0.07648678869009018, 0.0345233753323555, 0.011518634855747223, 0.1436365395784378, 0.005264819134026766, 0.000502048700582236, 0.0017500953981652856, 0.03918173909187317, 0.04129163548350334, 0.0023984990548342466, 0.020183494314551353, 0.008427903987467289, 0.09516369551420212, 0.08956606686115265, 0.03296149522066116, 0.07127847522497177, 0.10275094956159592, 0.12852256000041962, 0.15250688791275024, 0.05763629823923111, 0.13953621685504913, 0.2147330343723297, 0.3297017514705658, 0.25630685687065125, 0.3529660999774933, 0.05266188457608223, 0.19866161048412323, 0.08034973591566086, 0.16050152480602264, 0.12120798975229263, 0.21796129643917084, 0.13665789365768433, 0.05867582932114601, NaN, NaN, NaN, NaN, NaN], [0.02332407608628273, 0.06938373297452927, 0.035716570913791656, 0.008126936852931976, 0.012537641450762749, 0.0137803228572011, 0.01513306051492691, 0.00204691500402987, 0.029820755124092102, 0.05474912002682686, 0.016170548275113106, 0.22342036664485931, 0.05026429146528244, 0.06863567978143692, 0.11948796361684799, 0.16931524872779846, 0.06866136193275452, 0.058377113193273544, 0.054153572767972946, 0.06997817754745483, 0.17294903099536896, 0.06504172086715698, 0.09800923615694046, 0.07601338624954224, 0.22323867678642273, 0.17471107840538025, 0.20914696156978607, 0.32561469078063965, 0.04201642796397209, 0.014874166809022427, 0.043757203966379166, 0.11901038885116577, 0.15924809873104095, 0.08216992020606995, 0.13305248320102692, 0.031323518604040146, NaN, NaN, NaN, NaN], [0.020166568458080292, 0.015762973576784134, 0.006330324336886406, 0.008625769056379795, 0.005781465210020542, 0.00451312493532896, 0.007413441780954599, 0.0018466140609234571, 0.14846709370613098, 0.1376892477273941, 0.02431248314678669, 0.03153817355632782, 0.0025850962847471237, 0.026987632736563683, 0.15984071791172028, 0.14597494900226593, 0.05063166096806526, 0.07245789468288422, 0.08537694066762924, 0.07253167033195496, 0.03945168852806091, 0.07488631457090378, 0.04114159941673279, 0.09447583556175232, 0.11984950304031372, 0.21245841681957245, 0.24130037426948547, 0.053050536662340164, 0.036372195929288864, 0.012788524851202965, 0.05413965508341789, 0.17548364400863647, 0.18113258481025696, 0.17045176029205322, 0.056165628135204315, 0.023532675579190254, 0.007599800359457731, NaN, NaN, NaN], [0.11904438585042953, 0.03637225553393364, 0.013324074447154999, 0.04586002975702286, 0.00359557312913239, 0.002297254279255867, 0.02453085221350193, 0.019205793738365173, 0.07615289092063904, 0.3510056436061859, 0.24748629331588745, 0.0179043747484684, 0.015299135819077492, 0.16336295008659363, 0.13914434611797333, 0.20880575478076935, 0.4742221236228943, 0.0684090405702591, 0.07499475032091141, 0.22897963225841522, 0.11411925405263901, 0.06380540132522583, 0.06602712720632553, 0.04886250197887421, 0.25098055601119995, 0.16695836186408997, 0.41882073879241943, 0.45364588499069214, 0.19780457019805908, 0.004864717833697796, 0.007611281704157591, 0.23698794841766357, 0.08390159159898758, 0.28844529390335083, 0.28151822090148926, 0.0680297240614891, 0.0018790157046169043, 0.008693840354681015, NaN, NaN], [0.0598345547914505, 0.028141267597675323, 0.11996681243181229, 0.04193190485239029, 0.03001757152378559, 0.006633914541453123, 0.005910022184252739, 0.0007469199481420219, 0.010509159415960312, 0.18832749128341675, 0.032145459204912186, 0.022126449272036552, 0.16793787479400635, 0.1917877346277237, 0.16885708272457123, 0.06649312376976013, 0.2272576093673706, 0.15548978745937347, 0.13675269484519958, 0.06747769564390182, 0.09888236224651337, 0.07679145783185959, 0.09811051189899445, 0.059132058173418045, 0.16564641892910004, 0.1534833461046219, 0.21299242973327637, 0.46317315101623535, 0.18783308565616608, 0.06707606464624405, 0.07066023349761963, 0.038238298147916794, 0.13390158116817474, 0.1738123893737793, 0.3894510865211487, 0.199345201253891, 0.05267143249511719, 0.03450411930680275, 0.0674150139093399, NaN], [0.30011340975761414, 0.029496116563677788, 0.21246175467967987, 0.11388618499040604, 0.019265230745077133, 0.011386800557374954, 0.02386542037129402, 0.0049255480989813805, 0.002113579073920846, 0.2235003262758255, 0.1410367637872696, 0.022971738129854202, 0.009332037530839443, 0.01034344732761383, 0.12311729788780212, 0.13068987429141998, 0.5177554488182068, 0.21822108328342438, 0.17411521077156067, 0.11371950805187225, 0.10282127559185028, 0.14754493534564972, 0.10529720038175583, 0.04059072583913803, 0.1422514021396637, 0.16688787937164307, 0.3468432128429413, 0.07328897714614868, 0.033892080187797546, 0.005811289418488741, 0.006848806049674749, 0.033459149301052094, 0.08608346432447433, 0.29348817467689514, 0.07146795839071274, 0.05563248693943024, 0.008248405531048775, 0.00942459236830473, 0.03898181766271591, 0.13983668386936188]], [[0.04383472725749016, 0.02773081697523594, 0.016415273770689964, 0.024880478158593178, 0.005487722344696522, 0.14834517240524292, 0.010061212815344334, 0.013310510665178299, 0.03559315577149391, 0.022788431495428085, 0.016539618372917175, 0.022621937096118927, 0.3853665292263031, 0.02895752713084221, 0.21785423159599304, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.02212689444422722, 0.0360226184129715, 0.0007962794625200331, 0.005733562167733908, 0.0017349227564409375, 0.011109595187008381, 0.02015179581940174, 0.048344310373067856, 0.003794114338234067, 0.016348786652088165, 0.0018908409401774406, 0.010183308273553848, 0.04822028428316116, 0.011540568433701992, 0.21287554502487183, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.19621919095516205, 0.02568935602903366, 0.012553256005048752, 0.05958101898431778, 0.0049527534283697605, 0.009129918180406094, 0.035662900656461716, 0.006033026147633791, 0.01979534700512886, 0.016174430027604103, 0.025959551334381104, 0.017891131341457367, 0.21532145142555237, 0.010915487073361874, 0.2776879370212555, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.22681212425231934, 0.26364389061927795, 0.1368870735168457, 0.07472710311412811, 0.004966794513165951, 0.17209400236606598, 0.07595591247081757, 0.10330677032470703, 0.009879215620458126, 0.30214887857437134, 0.027453631162643433, 0.07928238064050674, 0.6068928837776184, 0.0009245484252460301, 0.41711828112602234, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03220081329345703, 0.07110226154327393, 0.19687172770500183, 0.32465922832489014, 0.06123804301023483, 0.009123058058321476, 0.008925903588533401, 0.001694322214461863, 0.009767607785761356, 0.012425252236425877, 0.021234901621937752, 0.006749649532139301, 0.022427640855312347, 0.00419656652957201, 0.11337225884199142, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1499132513999939, 0.1588381826877594, 0.006192722357809544, 0.06905046850442886, 0.021936854347586632, 0.04223879054188728, 0.01654554158449173, 0.012800824828445911, 0.001194898271933198, 0.011350413784384727, 0.0011690479004755616, 0.03650015965104103, 0.0330234132707119, 0.032408226281404495, 0.30060991644859314, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.10197536647319794, 0.32784661650657654, 0.22266407310962677, 0.37194594740867615, 0.4840903878211975, 0.2562866806983948, 0.20682689547538757, 0.01685171388089657, 0.02662164717912674, 0.01744299754500389, 0.07043293118476868, 0.06053447723388672, 0.13449640572071075, 0.0437617152929306, 0.15905345976352692, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04155902937054634, 0.02725875750184059, 0.06621034443378448, 0.15740959346294403, 0.22226983308792114, 0.11737026274204254, 0.021176597103476524, 0.037896860390901566, 0.001983239781111479, 0.07737525552511215, 0.040612466633319855, 0.036445699632167816, 0.04206009954214096, 0.005294053349643946, 0.22695806622505188, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3731417655944824, 0.020610323175787926, 0.04687204957008362, 0.19942151010036469, 0.0219199787825346, 0.023319954052567482, 0.607546865940094, 0.0038317576982080936, 0.05746426433324814, 0.0039819530211389065, 0.0020286834333091974, 0.023514816537499428, 0.0007224131841212511, 0.0017132725333794951, 0.31377115845680237, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.007707278709858656, 0.04994801804423332, 0.0602150596678257, 0.1843070536851883, 0.023052150383591652, 0.00867108628153801, 0.0030793596524745226, 0.008175634779036045, 0.3707427382469177, 0.032583341002464294, 0.030614105984568596, 0.003414844162762165, 0.0027733321767300367, 0.00039667857345193624, 0.06665757298469543, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.06275568902492523, 0.15385569632053375, 0.07121506333351135, 0.04657430946826935, 0.08974524587392807, 0.017753345891833305, 0.09537442773580551, 0.08409535884857178, 0.4617481529712677, 0.05371565744280815, 0.051210206001996994, 0.014556556940078735, 0.0261379461735487, 0.0015151489060372114, 0.25993233919143677, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.037524934858083725, 0.08964382112026215, 0.11503562331199646, 0.2385229468345642, 0.14595970511436462, 0.01507873460650444, 0.07354842126369476, 0.014194677583873272, 0.01029899064451456, 0.3145633935928345, 0.08443433046340942, 0.02799280546605587, 0.006364578381180763, 0.0011598452692851424, 0.25597554445266724, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03498825803399086, 0.003427299438044429, 0.012860815972089767, 0.00960747804492712, 0.0073430403135716915, 0.002194140339270234, 0.020218953490257263, 0.04016692563891411, 0.0035721054300665855, 0.11439335346221924, 0.03179614990949631, 0.0055262502282857895, 0.08811097592115402, 0.0019241927657276392, 0.31578439474105835, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0003122057532891631, 0.0005657155998051167, 0.0003099576279055327, 0.018182117491960526, 8.608390635345131e-05, 0.00029685357003472745, 0.00030423246789723635, 0.0039575002156198025, 0.00041145391878671944, 0.0009832053910940886, 0.0007515411707572639, 0.006357411853969097, 0.3007054328918457, 0.00010537439811741933, 0.00161165336612612, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.052370160818099976, 0.019386928528547287, 0.0404941625893116, 0.16087706387043, 0.14014431834220886, 0.0561581589281559, 0.1907973736524582, 0.027806226164102554, 0.022970959544181824, 0.05846026912331581, 0.09902504831552505, 0.038958851248025894, 0.016928229480981827, 0.04114920645952225, 0.14461401104927063, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03517069295048714, 0.03549245744943619, 0.004381549544632435, 0.008797217160463333, 0.007323419209569693, 0.042320944368839264, 0.004849699325859547, 0.003679578425362706, 0.011580413207411766, 0.009367180056869984, 0.006541883572936058, 0.022973380982875824, 0.023761657997965813, 0.02892483025789261, 0.1581033319234848, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01528994832187891, 0.20408181846141815, 0.11101088672876358, 0.08111120015382767, 0.07986893504858017, 0.010126215405762196, 0.020366966724395752, 0.1417536586523056, 0.04787333309650421, 0.04340335354208946, 0.2409791648387909, 0.04442436248064041, 0.005909040104597807, 0.014603852294385433, 0.18931475281715393, 0.13037645816802979, 0.08109150826931, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21622280776500702, 0.09626477211713791, 0.10110790282487869, 0.31975099444389343, 0.2572377920150757, 0.630383312702179, 0.1336757242679596, 0.17725828289985657, 0.02378956414759159, 0.22253809869289398, 0.13939163088798523, 0.30914127826690674, 0.35968318581581116, 0.48164138197898865, 0.09301326423883438, 0.14859925210475922, 0.02925589494407177, 0.0505123995244503, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.168080672621727, 0.1516411453485489, 0.07150255143642426, 0.32225823402404785, 0.2490793913602829, 0.30686429142951965, 0.032337237149477005, 0.16698232293128967, 0.04405289515852928, 0.2310783565044403, 0.10561788827180862, 0.2769646644592285, 0.19830158352851868, 0.1653461754322052, 0.09653043746948242, 0.21387919783592224, 0.03206360712647438, 0.012896520085632801, 0.06630519032478333, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04038669914007187, 0.16624715924263, 0.3317047655582428, 0.3851986229419708, 0.42305275797843933, 0.008450526744127274, 0.09501849114894867, 0.24002836644649506, 0.4256587326526642, 0.15410973131656647, 0.19127053022384644, 0.04389801248908043, 0.030224177986383438, 0.05971052870154381, 0.11478950828313828, 0.15968731045722961, 0.046736959367990494, 0.014681101776659489, 0.01418250147253275, 0.011044399812817574, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04527302458882332, 0.15370813012123108, 0.46266382932662964, 0.06791326403617859, 0.6029869914054871, 0.018879592418670654, 0.07514301687479019, 0.07948564738035202, 0.6243545413017273, 0.11254889518022537, 0.24916931986808777, 0.08612842112779617, 0.07598677277565002, 0.13317255675792694, 0.04299912229180336, 0.22570300102233887, 0.051045093685388565, 0.020206425338983536, 0.021926334127783775, 0.008406145498156548, 0.0702541247010231, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03695433586835861, 0.028389452025294304, 0.2721908688545227, 0.07653216272592545, 0.6730886697769165, 0.004614274017512798, 0.004165990743786097, 0.01533985324203968, 0.28992146253585815, 0.028840038925409317, 0.055076081305742264, 0.024787841364741325, 0.0010191021719947457, 0.0022868094965815544, 0.030124979093670845, 0.28555917739868164, 0.03329295665025711, 0.036049578338861465, 0.038853298872709274, 0.007190736476331949, 0.006643606815487146, 0.08228380233049393, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.005083801224827766, 0.09139324724674225, 0.28116321563720703, 0.08195066452026367, 0.6340349316596985, 0.012272918596863747, 0.0005934475339017808, 0.010692326352000237, 0.1514793336391449, 0.016046250239014626, 0.04672969505190849, 0.014393122866749763, 0.002580928150564432, 0.007409923244267702, 0.12582267820835114, 0.2511760890483856, 0.07463249564170837, 0.04988643527030945, 0.0701586976647377, 0.028143733739852905, 0.007391677238047123, 0.02261284738779068, 0.0737045407295227, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00605103699490428, 0.11548061668872833, 0.2870264947414398, 0.061026521027088165, 0.8064441084861755, 0.2189176380634308, 0.020241523161530495, 0.07779920846223831, 0.08952271938323975, 0.0073190852999687195, 0.02372264862060547, 0.038144610822200775, 0.07446137070655823, 0.09413070231676102, 0.030171062797307968, 0.15217745304107666, 0.19177564978599548, 0.125013530254364, 0.1473270058631897, 0.20325084030628204, 0.10669662803411484, 0.07946557551622391, 0.027662983164191246, 0.09494684636592865, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08316895365715027, 0.6715664267539978, 0.04549514129757881, 0.17856287956237793, 0.018127189949154854, 0.38010329008102417, 0.16956135630607605, 0.5726994872093201, 0.1473512202501297, 0.13756032288074493, 0.044131502509117126, 0.03872460126876831, 0.13646697998046875, 0.07963203638792038, 0.10255669057369232, 0.13806378841400146, 0.2514709234237671, 0.17176732420921326, 0.21858137845993042, 0.17882317304611206, 0.16198168694972992, 0.20351995527744293, 0.07158615440130234, 0.0266498401761055, 0.23213928937911987, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0817432552576065, 0.2031053900718689, 0.02472570165991783, 0.02598942257463932, 0.05427335575222969, 0.43315476179122925, 0.06398319453001022, 0.14792829751968384, 0.18555517494678497, 0.020227503031492233, 0.03572608157992363, 0.008726409636437893, 0.33127138018608093, 0.0956021174788475, 0.032814960926771164, 0.17152094841003418, 0.15314172208309174, 0.15820659697055817, 0.19208288192749023, 0.19640566408634186, 0.061033159494400024, 0.12321671098470688, 0.07748300582170486, 0.07906179875135422, 0.032524362206459045, 0.08073069155216217, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.36652442812919617, 0.4977355897426605, 0.09286413341760635, 0.21385566890239716, 0.18058304488658905, 0.4562758207321167, 0.4738945960998535, 0.2067655473947525, 0.17124009132385254, 0.035114847123622894, 0.05785587430000305, 0.03289380669593811, 0.3892229497432709, 0.2459530532360077, 0.0885753259062767, 0.11935991793870926, 0.25889015197753906, 0.181893989443779, 0.2521744966506958, 0.2510518431663513, 0.1320696324110031, 0.17421388626098633, 0.10352174937725067, 0.13144756853580475, 0.06071629375219345, 0.07381404936313629, 0.11898738145828247, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3338637053966522, 0.241106316447258, 0.10183558613061905, 0.16975384950637817, 0.22215212881565094, 0.1208982765674591, 0.12069278955459595, 0.027770178392529488, 0.12589573860168457, 0.018161755055189133, 0.05639319866895676, 0.024462532252073288, 0.08646970242261887, 0.18506868183612823, 0.2994369864463806, 0.11384479701519012, 0.12307179719209671, 0.17695116996765137, 0.21105043590068817, 0.2652710974216461, 0.1994313895702362, 0.5530626177787781, 0.33474239706993103, 0.11353342235088348, 0.20157715678215027, 0.12058570981025696, 0.02405776083469391, 0.20302970707416534, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24999171495437622, 0.7484717965126038, 0.1908620148897171, 0.6611655354499817, 0.24442408978939056, 0.0825357735157013, 0.5622089505195618, 0.4391622543334961, 0.045715928077697754, 0.2250336855649948, 0.3067566156387329, 0.014471310190856457, 0.06388252228498459, 0.21674634516239166, 0.13583892583847046, 0.1661912202835083, 0.3088836967945099, 0.3049609959125519, 0.34614017605781555, 0.3287224769592285, 0.19484750926494598, 0.49978625774383545, 0.2471936047077179, 0.14924246072769165, 0.2264283001422882, 0.11719675362110138, 0.028577886521816254, 0.03125511854887009, 0.04683076590299606, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05097173899412155, 0.16686855256557465, 0.15120531618595123, 0.3698476254940033, 0.35846272110939026, 0.6895467042922974, 0.8159933686256409, 0.843620777130127, 0.6904561519622803, 0.307870090007782, 0.450530469417572, 0.6275950074195862, 0.15986312925815582, 0.5293903350830078, 0.07888244837522507, 0.1382068395614624, 0.14312644302845, 0.15027517080307007, 0.2806132137775421, 0.10704077035188675, 0.15715429186820984, 0.3545873463153839, 0.2772214114665985, 0.11900671571493149, 0.16433128714561462, 0.08395379036664963, 0.0337035246193409, 0.08286106586456299, 0.029390821233391762, 0.07092607021331787, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3532100319862366, 0.1141892597079277, 0.06207668036222458, 0.23437273502349854, 0.13035829365253448, 0.16457295417785645, 0.6610441207885742, 0.6354422569274902, 0.6703211069107056, 0.18266227841377258, 0.16635818779468536, 0.1048990935087204, 0.1468038111925125, 0.17976891994476318, 0.0709633082151413, 0.31265145540237427, 0.17018769681453705, 0.42172688245773315, 0.3373875319957733, 0.26503118872642517, 0.3668123483657837, 0.6080453991889954, 0.3421963155269623, 0.29850897192955017, 0.22005639970302582, 0.08626232296228409, 0.05660916119813919, 0.04967416450381279, 0.020023291930556297, 0.01626538299024105, 0.03365384787321091, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18437133729457855, 0.20806346833705902, 0.06752406805753708, 0.15831130743026733, 0.3405534625053406, 0.0627271831035614, 0.3717433214187622, 0.3913803696632385, 0.5862330794334412, 0.29396724700927734, 0.02299528755247593, 0.060014016926288605, 0.08232607692480087, 0.15418194234371185, 0.15275102853775024, 0.11847452819347382, 0.5065410137176514, 0.4161456227302551, 0.44356557726860046, 0.358999639749527, 0.34202155470848083, 0.6410406231880188, 0.5693260431289673, 0.3344528377056122, 0.3382241725921631, 0.16963228583335876, 0.12081613391637802, 0.09492655098438263, 0.06781262904405594, 0.059771545231342316, 0.013083304278552532, 0.15846344828605652, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07671413570642471, 0.17070698738098145, 0.13325846195220947, 0.07402658462524414, 0.6503690481185913, 0.1330946981906891, 0.165133535861969, 0.2397843301296234, 0.6370089054107666, 0.09848601371049881, 0.09929761290550232, 0.10903115570545197, 0.14141131937503815, 0.14783106744289398, 0.08112896233797073, 0.14143924415111542, 0.33810776472091675, 0.4273369610309601, 0.4442084729671478, 0.4867575168609619, 0.40271657705307007, 0.7919159531593323, 0.5796146988868713, 0.41502290964126587, 0.19611117243766785, 0.2659074366092682, 0.0590454526245594, 0.09533000737428665, 0.06579555571079254, 0.049002423882484436, 0.011413656175136566, 0.05989237129688263, 0.0694013461470604, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1416744738817215, 0.274202436208725, 0.13295260071754456, 0.20105819404125214, 0.3945937156677246, 0.333781898021698, 0.3556738793849945, 0.2839928865432739, 0.10343024134635925, 0.07706140726804733, 0.054361648857593536, 0.05752982571721077, 0.2817353904247284, 0.27278265357017517, 0.13429909944534302, 0.06363721936941147, 0.3402014374732971, 0.30108359456062317, 0.3598821461200714, 0.356340229511261, 0.2955020070075989, 0.3913557827472687, 0.34592464566230774, 0.3881937265396118, 0.23078370094299316, 0.49122318625450134, 0.3432621657848358, 0.1563359946012497, 0.12668228149414062, 0.1534397453069687, 0.06296171993017197, 0.07472987473011017, 0.07419107109308243, 0.08810260146856308, NaN, NaN, NaN, NaN, NaN, NaN], [0.22879131138324738, 0.1777554452419281, 0.09183042496442795, 0.14726729691028595, 0.1873711347579956, 0.05672184377908707, 0.08326486498117447, 0.01781904511153698, 0.0835406556725502, 0.02614605240523815, 0.06876543164253235, 0.03439611196517944, 0.0621294341981411, 0.16512615978717804, 0.26481878757476807, 0.06025628373026848, 0.1445734202861786, 0.2208743691444397, 0.22917300462722778, 0.34805941581726074, 0.30598515272140503, 0.6932811141014099, 0.6030279994010925, 0.2491629421710968, 0.46458470821380615, 0.5228609442710876, 0.2136632800102234, 0.610046923160553, 0.25265923142433167, 0.14038830995559692, 0.07342293113470078, 0.22653138637542725, 0.10003089159727097, 0.02225746400654316, 0.14559555053710938, NaN, NaN, NaN, NaN, NaN], [0.1532706916332245, 0.5982866883277893, 0.18050755560398102, 0.5800401568412781, 0.22030943632125854, 0.025230426341295242, 0.3744361996650696, 0.265155166387558, 0.03173244372010231, 0.2068646252155304, 0.27338433265686035, 0.012270096689462662, 0.05047086998820305, 0.14277896285057068, 0.15170519053936005, 0.0902293398976326, 0.5066702961921692, 0.45472872257232666, 0.45485398173332214, 0.5058757662773132, 0.3594079613685608, 0.7028806209564209, 0.5180745720863342, 0.25713953375816345, 0.5372852683067322, 0.6213670372962952, 0.2659974694252014, 0.3181111812591553, 0.5259383916854858, 0.33730512857437134, 0.13441412150859833, 0.36266574263572693, 0.10496268421411514, 0.02362431399524212, 0.020191077142953873, 0.04590708762407303, NaN, NaN, NaN, NaN], [0.04688200727105141, 0.12437571585178375, 0.1870293915271759, 0.4533093273639679, 0.3565751910209656, 0.5648568868637085, 0.7852934002876282, 0.7657470703125, 0.5417794585227966, 0.4419334828853607, 0.632922887802124, 0.7103447914123535, 0.15686877071857452, 0.6169639825820923, 0.08483293652534485, 0.1059701219201088, 0.2303982675075531, 0.21762119233608246, 0.3580361306667328, 0.17096057534217834, 0.24843183159828186, 0.5131583213806152, 0.47260501980781555, 0.21650557219982147, 0.38561707735061646, 0.416827529668808, 0.1716565638780594, 0.3172723054885864, 0.29216328263282776, 0.47280052304267883, 0.38235870003700256, 0.1798420399427414, 0.1762932986021042, 0.04000748321413994, 0.08066289126873016, 0.03975420445203781, 0.08505715429782867, NaN, NaN, NaN], [0.2884610891342163, 0.10604135692119598, 0.07176870107650757, 0.2240629643201828, 0.12294583767652512, 0.10159854590892792, 0.6051279902458191, 0.5541971921920776, 0.5623130798339844, 0.16405576467514038, 0.18055777251720428, 0.13399486243724823, 0.12637703120708466, 0.18360036611557007, 0.09598042815923691, 0.2317487895488739, 0.2560827136039734, 0.5102789998054504, 0.4199059009552002, 0.44283756613731384, 0.5258800983428955, 0.732390284538269, 0.4491574466228485, 0.4244932234287262, 0.5298821926116943, 0.43037980794906616, 0.2800268232822418, 0.3093121647834778, 0.4250229299068451, 0.19317308068275452, 0.2640416920185089, 0.38813653588294983, 0.11181202530860901, 0.054203763604164124, 0.037284549325704575, 0.018739882856607437, 0.014264266937971115, 0.035236652940511703, NaN, NaN], [0.10626664012670517, 0.1478983461856842, 0.07806308567523956, 0.11814259737730026, 0.31690794229507446, 0.03372211009263992, 0.30042603611946106, 0.29277828335762024, 0.44479742646217346, 0.216581329703331, 0.023049354553222656, 0.0511498898267746, 0.08494822680950165, 0.14207273721694946, 0.16419102251529694, 0.08032029122114182, 0.6358892321586609, 0.5042787194252014, 0.5074477195739746, 0.5223307013511658, 0.5343775749206543, 0.703619122505188, 0.6657658815383911, 0.45647403597831726, 0.602655827999115, 0.5387927889823914, 0.39006462693214417, 0.39567169547080994, 0.43596506118774414, 0.41000646352767944, 0.269907683134079, 0.5412885546684265, 0.2038634866476059, 0.10306636989116669, 0.05501747503876686, 0.04515310004353523, 0.04695969074964523, 0.008877278305590153, 0.09985174983739853, NaN], [0.048457998782396317, 0.0638582855463028, 0.20956584811210632, 0.021124709397554398, 0.09014897048473358, 0.11662621796131134, 0.3483109474182129, 0.4503737986087799, 0.17136822640895844, 0.02997676283121109, 0.21708470582962036, 0.05856599286198616, 0.2859736979007721, 0.41663405299186707, 0.12262307107448578, 0.03129265457391739, 0.2636677324771881, 0.3672870099544525, 0.438161164522171, 0.7497870922088623, 0.43876102566719055, 0.6747432947158813, 0.5918557643890381, 0.5535795092582703, 0.7133825421333313, 0.7440239787101746, 0.3780657947063446, 0.4423457384109497, 0.6450315713882446, 0.5939705967903137, 0.7279283404350281, 0.4253756105899811, 0.4950290024280548, 0.13756991922855377, 0.08432447165250778, 0.11775307357311249, 0.12791647017002106, 0.07922011613845825, 0.04417572543025017, 0.3473970592021942]], [[0.1774463951587677, 0.26868411898612976, 0.03527391701936722, 0.01705012284219265, 0.00047759010340087116, 0.006241941824555397, 0.0031507122330367565, 0.2944689095020294, 0.038735195994377136, 0.003944840747863054, 0.004385389853268862, 0.004225992131978273, 0.03986744210124016, 0.00549504067748785, 0.07870971411466599, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00027908835909329355, 0.005506355315446854, 0.001626787707209587, 0.13775548338890076, 0.0008261757320724428, 0.00028156363987363875, 0.0002459189563523978, 0.0025131029542535543, 0.0009445812902413309, 0.001017659087665379, 0.002250042976811528, 0.0015115974238142371, 0.0017954352078959346, 0.0006745054270140827, 0.21780018508434296, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.021244889125227928, 0.1178143173456192, 0.008956437930464745, 0.14321640133857727, 0.023635229095816612, 0.3068733811378479, 0.15845780074596405, 0.3092327415943146, 0.0024783278349786997, 0.06481246650218964, 0.008965774439275265, 0.019083118066191673, 0.04005150496959686, 0.01112168189138174, 0.19139143824577332, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00042023108107969165, 0.0008873279439285398, 0.0019056870369240642, 0.007766622584313154, 0.23140135407447815, 0.5036463141441345, 0.015440672636032104, 0.008361338637769222, 0.001879698014818132, 0.0006688520661555231, 0.01133010908961296, 0.09722423553466797, 0.03314661607146263, 0.006971372757107019, 0.02285030484199524, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.002678314223885536, 0.004764833487570286, 0.0003137744788546115, 0.0006636036559939384, 0.07552827149629593, 0.36051952838897705, 0.21059149503707886, 0.11911091953516006, 0.00013829045929014683, 0.00018005385936703533, 0.00021675217431038618, 0.007453517522662878, 0.004449300933629274, 0.03708551451563835, 0.13281597197055817, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.008487393148243427, 0.014329447411000729, 0.005103611387312412, 0.0017902699764817953, 0.00018748251022771, 0.07080603390932083, 0.1865091174840927, 0.03389747440814972, 0.0026728338561952114, 0.00012369015894364566, 0.0001717496052151546, 0.0016556874616071582, 0.0035823825746774673, 0.018341869115829468, 0.2051384449005127, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0016413311241194606, 0.0038119314704090357, 0.0005628983490169048, 6.117233715485781e-05, 0.00011399950017221272, 0.0007454796577803791, 0.054881561547517776, 0.30246245861053467, 0.15667226910591125, 0.0004453254514373839, 0.0002609542279969901, 0.0001120980887208134, 0.0006856885738670826, 0.00573006272315979, 0.011146760545670986, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.001007524086162448, 0.0022212164476513863, 0.00036003260174766183, 2.8946307793376036e-05, 1.0167077562073246e-05, 0.00012231878645252436, 0.00022786400222685188, 0.03619853034615517, 0.005354967433959246, 0.003357505425810814, 0.0005030903848819435, 5.3131421736907214e-05, 4.2532476072665304e-05, 0.00010396525613032281, 0.2518664300441742, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.004948427900671959, 0.0037361346185207367, 0.0040338728576898575, 0.0015943445032462478, 3.9753424061927944e-05, 0.00016846440848894417, 0.00017597683472558856, 0.003258961718529463, 0.06328149139881134, 0.43567389249801636, 0.03252503648400307, 0.006277996581047773, 3.634384847828187e-05, 2.672040500328876e-05, 0.030029548332095146, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00322673749178648, 0.017767680808901787, 0.0033617434091866016, 0.029219835996627808, 0.0009114073473028839, 0.002889687195420265, 0.00012576105655170977, 0.01574547402560711, 0.0018639388727024198, 0.6032934188842773, 0.1301620751619339, 0.04121570661664009, 0.0035096178762614727, 0.00032833084696903825, 0.3004224896430969, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.033899419009685516, 0.07324357330799103, 0.00985381193459034, 0.017461512237787247, 0.019165849313139915, 0.07006029784679413, 0.01799222268164158, 0.013579626567661762, 0.00021177329472266138, 0.026033537462353706, 0.13102787733078003, 0.2077469676733017, 0.7029638886451721, 0.029135672375559807, 0.05414650961756706, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0015424743760377169, 0.007544125430285931, 0.010602829977869987, 0.0016127177514135838, 0.006006686482578516, 0.08514653891324997, 0.003129118587821722, 0.0036380700767040253, 1.298951519856928e-05, 6.919799488969147e-05, 0.0003367147874087095, 0.031529009342193604, 0.36636054515838623, 0.21289798617362976, 0.04463290795683861, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.005653384607285261, 0.005221519153565168, 0.010438429191708565, 0.0023121859412640333, 0.0034771040081977844, 0.01156994141638279, 0.006321457680314779, 0.006196276750415564, 2.671167931111995e-05, 0.00012823205906897783, 0.00023895784397609532, 0.0015353390481323004, 0.06888392567634583, 0.3010466396808624, 0.05789510905742645, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0025978884659707546, 0.0011408268474042416, 0.0005907863960601389, 0.0073682027868926525, 5.514698841579957e-06, 0.0001586068101460114, 0.0016139426734298468, 0.002635698765516281, 2.2516995159094222e-05, 7.803570952091832e-06, 4.170422926108586e-06, 4.799172893399373e-05, 8.148160122800618e-05, 0.006126015912741423, 0.363029420375824, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.018444720655679703, 0.036891017109155655, 0.08301377296447754, 0.04485299810767174, 0.0371856652200222, 0.0472157783806324, 0.022677546367049217, 0.017107300460338593, 0.03217196837067604, 0.03369837626814842, 0.021089907735586166, 0.018274538218975067, 0.020997297018766403, 0.034321803599596024, 0.1648317128419876, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01622859761118889, 0.0033176897559314966, 0.006228303536772728, 0.003451053285971284, 0.011415286920964718, 0.016942020505666733, 0.0027556640561670065, 0.001647507306188345, 0.0010015909792855382, 0.0013629572931677103, 0.004746851045638323, 0.009338179603219032, 0.00885467603802681, 0.006604180671274662, 0.16180677711963654, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17455320060253143, 0.026163265109062195, 0.2041780799627304, 0.027548620477318764, 0.4711945950984955, 0.5480062365531921, 0.10718726366758347, 0.032194506376981735, 0.08035919070243835, 0.010791448876261711, 0.11821587383747101, 0.04372825473546982, 0.5788823962211609, 0.10199426859617233, 0.06844703108072281, 0.13398022949695587, 0.051660239696502686, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.023936308920383453, 0.03560526669025421, 0.007881848141551018, 0.022994371131062508, 0.003501775674521923, 0.000663262908346951, 0.0027445319574326277, 0.0008202926255762577, 0.002215484855696559, 0.014335977844893932, 0.06139073148369789, 0.0039900378324091434, 0.004902976099401712, 0.006251698825508356, 0.21882350742816925, 0.14254364371299744, 0.023038247600197792, 0.14531654119491577, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01501577626913786, 0.026870740577578545, 0.007700353395193815, 0.02517320215702057, 0.005199552513659, 0.0040618558414280415, 0.0018289085710421205, 0.0005822794046252966, 0.008953371085226536, 0.004845716059207916, 0.02605423890054226, 0.010851072147488594, 0.011600007303059101, 0.011058725416660309, 0.2679094076156616, 0.17795929312705994, 0.024941343814134598, 0.06730933487415314, 0.21388311684131622, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05198093131184578, 0.026691097766160965, 0.04745011776685715, 0.02099662832915783, 0.007765383925288916, 0.0017653746763244271, 0.002459246199578047, 0.0005052239284850657, 0.0007161727407947183, 0.00449666241183877, 0.00950489193201065, 0.002728741616010666, 0.007593079470098019, 0.0031749741174280643, 0.1993207037448883, 0.09399491548538208, 0.3603954315185547, 0.2704434394836426, 0.1475897580385208, 0.18568314611911774, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0031879025045782328, 0.001219254801981151, 0.007273980416357517, 0.0029734931886196136, 9.794573998078704e-05, 0.0006066279602237046, 0.000905939843505621, 0.0002116545947501436, 0.00022416051069740206, 0.001432110439054668, 0.00046862047747708857, 0.0008043517009355128, 0.00010411434050183743, 0.0003457288257777691, 0.22099417448043823, 0.14775781333446503, 0.19919507205486298, 0.14170727133750916, 0.05924544855952263, 0.05067846551537514, 0.45942243933677673, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.020157048478722572, 0.026601465418934822, 0.04540588706731796, 0.04344630241394043, 0.0022944926749914885, 0.0010618591913953424, 0.00406603142619133, 0.0029086798895150423, 0.0019963555969297886, 0.010005260817706585, 0.0020353682339191437, 0.0019374215044081211, 0.0013613863848149776, 0.001661884132772684, 0.34173521399497986, 0.14211317896842957, 0.055850330740213394, 0.31645503640174866, 0.16900919377803802, 0.038168299943208694, 0.07897188514471054, 0.2625669240951538, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09776000678539276, 0.012011643499135971, 0.12930582463741302, 0.019725820049643517, 0.03450663015246391, 0.44516250491142273, 0.09379248321056366, 0.011904217302799225, 0.012111036106944084, 0.007218031212687492, 0.028761520981788635, 0.011232447810471058, 0.17035166919231415, 0.022308414801955223, 0.055901553481817245, 0.08848852664232254, 0.1616290658712387, 0.37575462460517883, 0.24721546471118927, 0.16591095924377441, 0.06889674067497253, 0.052010323852300644, 0.12634019553661346, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0270126610994339, 0.0034831874072551727, 0.03977394104003906, 0.025583824142813683, 0.0007700100541114807, 0.002870001830160618, 0.0027750579174607992, 0.0016644555144011974, 0.0016086471732705832, 0.001177149242721498, 0.00746855279430747, 0.002065857872366905, 0.0016993783647194505, 0.0015537800500169396, 0.32808277010917664, 0.0747382640838623, 0.14914710819721222, 0.6135430335998535, 0.5929751992225647, 0.35069379210472107, 0.2108047604560852, 0.11502823978662491, 0.02365955151617527, 0.17759312689304352, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16020068526268005, 0.019860466942191124, 0.3786206543445587, 0.04546584561467171, 0.22538548707962036, 0.035959187895059586, 0.022749971598386765, 0.0223965086042881, 0.010994979180395603, 0.013655508868396282, 0.08095952123403549, 0.07914181798696518, 0.5184871554374695, 0.24710357189178467, 0.059729527682065964, 0.02855301834642887, 0.21659326553344727, 0.4310435652732849, 0.40604472160339355, 0.3670090436935425, 0.48140615224838257, 0.27167943120002747, 0.09097199141979218, 0.1627163589000702, 0.1288144737482071, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.002354596508666873, 0.013563946820795536, 0.0012282072566449642, 0.0011236226418986917, 0.004269973374903202, 0.05393142253160477, 0.010044331662356853, 0.012847290374338627, 0.23206481337547302, 0.0042032524943351746, 0.002388538094237447, 0.005051162093877792, 0.004106870852410793, 0.003583247307687998, 0.0021634430158883333, 0.03365316241979599, 0.14809295535087585, 0.3644290566444397, 0.4046455919742584, 0.26744210720062256, 0.32108214497566223, 0.1678413599729538, 0.190241739153862, 0.22121649980545044, 0.03444775566458702, 0.46765974164009094, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1318124532699585, 0.006612265948206186, 0.026151085272431374, 0.15551267564296722, 0.006537565030157566, 0.045402105897665024, 0.08115606755018234, 0.020273711532354355, 0.2617640495300293, 0.03846455365419388, 0.42425140738487244, 0.0063036843203008175, 0.045534029603004456, 0.06594183295965195, 0.0061628553085029125, 0.038216885179281235, 0.2552680969238281, 0.4071650505065918, 0.3936895430088043, 0.4416206479072571, 0.38015541434288025, 0.1657901555299759, 0.15260477364063263, 0.22771137952804565, 0.10614379495382309, 0.0724361315369606, 0.1760038137435913, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0171976238489151, 0.0023818486370146275, 0.036466922610998154, 0.011855212040245533, 0.019672302529215813, 0.007386004086583853, 0.02982362173497677, 0.0045198979787528515, 0.02385052479803562, 0.25256073474884033, 0.2446560561656952, 0.0453505739569664, 0.08819476515054703, 0.09139581024646759, 0.0022182920947670937, 0.07068492472171783, 0.07818713039159775, 0.3302493095397949, 0.299561083316803, 0.46339741349220276, 0.48102065920829773, 0.15714748203754425, 0.27301517128944397, 0.38065311312675476, 0.19789563119411469, 0.11113718152046204, 0.05171056091785431, 0.13386131823062897, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.023948049172759056, 0.006307430099695921, 0.014840157702565193, 0.01758965104818344, 0.0009477039566263556, 0.00178795016836375, 0.005927308928221464, 0.0026511158794164658, 0.00012311375758145005, 0.04321818798780441, 0.0496363490819931, 0.3416200280189514, 0.001097637927159667, 0.007029203698039055, 0.007338459137827158, 0.05115865543484688, 0.44867002964019775, 0.49208834767341614, 0.477664977312088, 0.4642978608608246, 0.46059542894363403, 0.25649622082710266, 0.406831830739975, 0.27858051657676697, 0.2405669242143631, 0.11958811432123184, 0.1450459510087967, 0.0628136694431305, 0.09898709505796432, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1633826345205307, 0.005062526557594538, 0.04231903329491615, 0.24309031665325165, 0.0009563505300320685, 0.0008045694557949901, 0.004994159564375877, 0.0011061460245400667, 0.0013372766552492976, 0.023061903193593025, 0.044598180800676346, 0.0017028035363182425, 2.3589664124301635e-05, 0.0003540365141816437, 0.16737498342990875, 0.04031704366207123, 0.6707005500793457, 0.529548704624176, 0.4586588144302368, 0.3106471002101898, 0.6713098287582397, 0.4458201229572296, 0.5507155060768127, 0.6255134344100952, 0.5032600164413452, 0.18919125199317932, 0.2968505918979645, 0.3902440667152405, 0.16804949939250946, 0.088200144469738, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1106855720281601, 0.005593962036073208, 0.014953872188925743, 0.19064223766326904, 0.0008905718568712473, 0.002549833618104458, 0.019427485764026642, 0.019940704107284546, 0.0020017458591610193, 0.029780413955450058, 0.01774613931775093, 0.00061158457538113, 0.0022336822003126144, 0.007989613339304924, 0.2558586895465851, 0.13188821077346802, 0.1971314549446106, 0.3902590274810791, 0.4961083233356476, 0.37017205357551575, 0.46889960765838623, 0.2874276340007782, 0.1815745085477829, 0.39618349075317383, 0.17909032106399536, 0.26052209734916687, 0.13463276624679565, 0.11223814636468887, 0.05094114691019058, 0.030694767832756042, 0.23131275177001953, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07112060487270355, 0.029737049713730812, 0.09336916357278824, 0.07307538390159607, 0.023197662085294724, 0.022866347804665565, 0.060328319668769836, 0.04474486783146858, 0.0006379868718795478, 0.027103934437036514, 0.2942929267883301, 0.011375843547284603, 0.07746338844299316, 0.09051978588104248, 0.11258094012737274, 0.029627619311213493, 0.0727827325463295, 0.2382729947566986, 0.16726669669151306, 0.3644602298736572, 0.47072863578796387, 0.2034798413515091, 0.1723088026046753, 0.43477845191955566, 0.18565386533737183, 0.3540991544723511, 0.2379947453737259, 0.07713616639375687, 0.19858470559120178, 0.17015229165554047, 0.0891638696193695, 0.22899208962917328, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15941812098026276, 0.02997875213623047, 0.08360203355550766, 0.10365118086338043, 0.03050130233168602, 0.39312028884887695, 0.3065427839756012, 0.2912093997001648, 0.135236918926239, 0.18899840116500854, 0.13724294304847717, 0.1948302835226059, 0.07353706657886505, 0.12220755219459534, 0.10422825068235397, 0.01839388906955719, 0.10223808884620667, 0.244280606508255, 0.22035017609596252, 0.2828108072280884, 0.41914066672325134, 0.09010869264602661, 0.14338640868663788, 0.35142722725868225, 0.12073972821235657, 0.6723650693893433, 0.17433631420135498, 0.20010362565517426, 0.17566151916980743, 0.17214345932006836, 0.06743419170379639, 0.08234895765781403, 0.4274884760379791, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24064786732196808, 0.0051915524527430534, 0.09652373939752579, 0.2287912219762802, 0.019215410575270653, 0.13947954773902893, 0.15343742072582245, 0.07055477797985077, 0.05467608571052551, 0.10673969984054565, 0.5659986138343811, 0.014077076688408852, 0.1709020584821701, 0.23944324254989624, 0.026877261698246002, 0.02117752842605114, 0.17625343799591064, 0.2448491007089615, 0.23410049080848694, 0.3357784152030945, 0.2992798388004303, 0.09099920094013214, 0.1110134869813919, 0.20308172702789307, 0.1763213574886322, 0.1646280288696289, 0.23259523510932922, 0.3615821301937103, 0.32664546370506287, 0.296549916267395, 0.2726198732852936, 0.07387500256299973, 0.07587912678718567, 0.14093360304832458, NaN, NaN, NaN, NaN, NaN, NaN], [0.019817974418401718, 0.002034382661804557, 0.04978875443339348, 0.009913384914398193, 0.033772312104701996, 0.0069160182029008865, 0.027356693521142006, 0.004301261156797409, 0.005268980748951435, 0.24062182009220123, 0.2975090742111206, 0.09841412305831909, 0.13523375988006592, 0.1965852826833725, 0.004198803100734949, 0.05486638844013214, 0.06597498804330826, 0.2194771021604538, 0.1927901804447174, 0.37433308362960815, 0.412477970123291, 0.07100911438465118, 0.1499587744474411, 0.3056679368019104, 0.16932857036590576, 0.15193165838718414, 0.19111526012420654, 0.291239857673645, 0.37710845470428467, 0.510109543800354, 0.47089657187461853, 0.17204606533050537, 0.09759342670440674, 0.05198577418923378, 0.1557197868824005, NaN, NaN, NaN, NaN, NaN], [0.017094334587454796, 0.005556214600801468, 0.011722622439265251, 0.009952181950211525, 0.0008346029790118337, 0.0009373819339089096, 0.006794091779738665, 0.0019291864009574056, 4.7701923904241994e-05, 0.0364256277680397, 0.035398196429014206, 0.3890627920627594, 0.0013647697633132339, 0.008012092672288418, 0.013173048384487629, 0.03942986950278282, 0.2940163016319275, 0.3192412853240967, 0.3550935387611389, 0.28974649310112, 0.35144588351249695, 0.111830934882164, 0.2212614268064499, 0.1942923218011856, 0.16557106375694275, 0.12293191254138947, 0.3516637980937958, 0.22679129242897034, 0.3504909574985504, 0.4427362084388733, 0.6422855854034424, 0.29741936922073364, 0.17250965535640717, 0.13341550529003143, 0.05469499155879021, 0.0792233869433403, NaN, NaN, NaN, NaN], [0.12328237295150757, 0.0036286553367972374, 0.03202027454972267, 0.16562366485595703, 0.0006255045300349593, 0.00061140360776335, 0.00499368691816926, 0.0010923785157501698, 0.0008833102765493095, 0.03177933022379875, 0.04344986379146576, 0.00255553494207561, 2.260845576529391e-05, 0.0005036385264247656, 0.16160868108272552, 0.03949292004108429, 0.6095755696296692, 0.4376317858695984, 0.4024345874786377, 0.24819140136241913, 0.555855929851532, 0.2881583273410797, 0.40402302145957947, 0.5775710940361023, 0.42070186138153076, 0.22824901342391968, 0.4547353982925415, 0.567461371421814, 0.5762937664985657, 0.33163049817085266, 0.41951635479927063, 0.37286072969436646, 0.25620296597480774, 0.25266289710998535, 0.3395143151283264, 0.13239842653274536, 0.07333662360906601, NaN, NaN, NaN], [0.050196755677461624, 0.002699600299820304, 0.009293685667216778, 0.06999042630195618, 0.0006182404467836022, 0.0013977399794384837, 0.014421526342630386, 0.010930507443845272, 0.0008620836888439953, 0.015927143394947052, 0.008692404255270958, 0.0006625624373555183, 0.0011245491914451122, 0.0053406055085361, 0.2061784416437149, 0.11607979983091354, 0.18507249653339386, 0.30528268218040466, 0.41669708490371704, 0.22673273086547852, 0.3321194052696228, 0.17922396957874298, 0.1181870847940445, 0.299829363822937, 0.11785572022199631, 0.23005077242851257, 0.1731709986925125, 0.17971253395080566, 0.2448451966047287, 0.15796169638633728, 0.701153576374054, 0.1659945547580719, 0.4861533045768738, 0.20215842127799988, 0.13506482541561127, 0.058445703238248825, 0.03114200383424759, 0.21790345013141632, NaN, NaN], [0.04101766273379326, 0.020672734826803207, 0.08772061765193939, 0.04009746387600899, 0.01892852783203125, 0.017910925671458244, 0.057973578572273254, 0.03737492114305496, 0.00047206622548401356, 0.021084431558847427, 0.21054430305957794, 0.013546224683523178, 0.08985017240047455, 0.10610225051641464, 0.1389981210231781, 0.017429474741220474, 0.04190561920404434, 0.14842365682125092, 0.09654705971479416, 0.16489917039871216, 0.24686570465564728, 0.09686223417520523, 0.09368213266134262, 0.2918589413166046, 0.08991989493370056, 0.18521137535572052, 0.19666530191898346, 0.06316249072551727, 0.222347229719162, 0.3215444087982178, 0.3288835287094116, 0.38603323698043823, 0.4142700135707855, 0.25910744071006775, 0.0714699923992157, 0.2130158245563507, 0.1895158588886261, 0.07420682162046432, 0.2235250473022461, NaN], [0.018278781324625015, 0.03789714351296425, 0.00408195098862052, 0.005283118225634098, 0.009515376761555672, 0.11360906809568405, 0.008760524913668633, 0.006613489706069231, 0.018946174532175064, 0.008831392042338848, 0.015675490722060204, 0.021136337891221046, 0.13481837511062622, 0.08728663623332977, 0.15406787395477295, 0.011625233106315136, 0.13701221346855164, 0.3079974055290222, 0.17742200195789337, 0.10538481175899506, 0.17213597893714905, 0.08605048805475235, 0.13507568836212158, 0.2275547832250595, 0.07923908531665802, 0.07705283164978027, 0.2479921281337738, 0.3453103303909302, 0.2883259654045105, 0.36409828066825867, 0.18068012595176697, 0.4896908700466156, 0.399289608001709, 0.5261627435684204, 0.6339481472969055, 0.6382991671562195, 0.5417840480804443, 0.2542280852794647, 0.330732524394989, 0.21995915472507477]], [[0.2133164256811142, 0.025492815300822258, 0.20653849840164185, 0.07043907791376114, 0.10411863774061203, 0.3043566346168518, 0.06760577112436295, 0.5064103603363037, 0.08081910014152527, 0.27507925033569336, 0.5432406663894653, 0.27881479263305664, 0.16320040822029114, 0.2653813064098358, 0.11116068065166473, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.015402763150632381, 0.2444494515657425, 0.0030522451270371675, 0.00048490799963474274, 0.0026600188575685024, 0.06905494630336761, 0.012269481085240841, 0.014592616818845272, 0.004205085337162018, 0.0039128707721829414, 0.0037959537003189325, 0.012499181553721428, 0.02713301219046116, 0.00563135975971818, 0.19437076151371002, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04805738478899002, 0.007929358631372452, 0.4969516396522522, 0.08109094947576523, 0.008613435551524162, 0.06128339096903801, 0.020970679819583893, 0.014624540694057941, 0.001800250494852662, 0.04372387006878853, 0.036881472915410995, 0.022519467398524284, 0.032134752720594406, 0.17586740851402283, 0.15428785979747772, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.021660206839442253, 0.06483402103185654, 0.07990853488445282, 0.8655576705932617, 0.10770212858915329, 0.042777951806783676, 0.004243527539074421, 0.04141073673963547, 0.0011197980493307114, 0.0010354480473324656, 0.007620980031788349, 0.009411019273102283, 0.023886993527412415, 0.8532692193984985, 0.009252375923097134, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03802541270852089, 0.5626884698867798, 0.3869370222091675, 0.012873617932200432, 0.11968709528446198, 0.014900745823979378, 0.02957817167043686, 0.018288375809788704, 0.005979553796350956, 0.03379013389348984, 0.016338851302862167, 0.01766209304332733, 0.8086205720901489, 0.08052025735378265, 0.13067808747291565, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0663566142320633, 0.02082742564380169, 0.009716741740703583, 0.003548208624124527, 0.0008020728128030896, 0.4547119140625, 0.03523911535739899, 0.0031006578356027603, 0.006736437324434519, 0.0009184986702166498, 0.0011584048625081778, 0.04212343320250511, 0.019468490034341812, 0.001240313402377069, 0.20631356537342072, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.004470710642635822, 0.02006937935948372, 0.020011691376566887, 0.019766854122281075, 0.12330501526594162, 0.15558527410030365, 0.04160740226507187, 0.1780312955379486, 0.014384130015969276, 0.005233153235167265, 0.004123131278902292, 0.05227937176823616, 0.013469746336340904, 0.022578507661819458, 0.07922197878360748, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.17898443341255188, 0.006772744003683329, 0.041487641632556915, 0.009575014933943748, 0.016729410737752914, 0.2668032944202423, 0.12321095168590546, 0.6781973838806152, 0.0025635806377977133, 0.01087682880461216, 0.002732365159317851, 0.020299792289733887, 0.0031363710295408964, 0.0008204782498069108, 0.05180227383971214, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.12461799383163452, 0.013122161850333214, 0.02311752177774906, 0.0762406587600708, 0.09383975714445114, 0.007501720450818539, 0.07133012264966965, 0.008159258402884007, 0.13900579512119293, 0.006521029397845268, 0.021471921354532242, 0.012502939440310001, 0.0014349960256367922, 0.011674328707158566, 0.3848530650138855, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.014992507174611092, 0.010756749659776688, 0.10129547864198685, 0.15213072299957275, 0.1363232582807541, 0.16603931784629822, 0.0040587568655610085, 0.505429208278656, 0.0025213102344423532, 0.05678342655301094, 0.20746274292469025, 0.04314066469669342, 0.0019582516979426146, 0.01985819824039936, 0.18090446293354034, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.11427638679742813, 0.0123747568577528, 0.020808644592761993, 0.1336503028869629, 0.008563186042010784, 0.09643486887216568, 0.15193390846252441, 0.050255559384822845, 0.0023536821827292442, 0.3208443820476532, 0.021319447085261345, 0.003293143818154931, 0.027340535074472427, 0.01197835523635149, 0.09007034450769424, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.15923485159873962, 0.11477550864219666, 0.21969333291053772, 0.09681756794452667, 0.07061057537794113, 0.1670638769865036, 0.1398637294769287, 0.059452954679727554, 0.00850652251392603, 0.062244825065135956, 0.03212086483836174, 0.10482167452573776, 0.05658517777919769, 0.03675027936697006, 0.24718202650547028, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.004966236650943756, 0.001515651005320251, 0.002549123717471957, 0.006106496322900057, 0.00036676786839962006, 0.0014838402858003974, 0.008350875228643417, 0.003760475432500243, 9.004020830616355e-05, 0.003012964967638254, 0.000879374798387289, 0.0023141989950090647, 0.5349817276000977, 0.00013737898552790284, 0.18041089177131653, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [3.0577066354453564e-05, 0.00011073229688918218, 0.0002722943318076432, 0.00012968607188668102, 3.925479541067034e-05, 9.284611587645486e-05, 1.1375399481039494e-05, 0.00013649655738845468, 2.160583608201705e-05, 3.872126853821101e-06, 4.776401965500554e-06, 5.892393892281689e-05, 0.3018791675567627, 0.0016873051645234227, 0.00020723984926007688, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0053407615050673485, 0.002270790981128812, 0.015077341347932816, 0.008943013846874237, 0.01947944425046444, 0.013856526464223862, 0.021029049530625343, 0.011522401124238968, 0.019980257377028465, 0.021877266466617584, 0.03018842823803425, 0.06539047509431839, 0.04945596680045128, 0.008784771896898746, 0.1688213050365448, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.05651351809501648, 0.11774645000696182, 0.026926513761281967, 0.04848615080118179, 0.10334916412830353, 0.4247743785381317, 0.21147629618644714, 0.6254463195800781, 0.10587190836668015, 0.08194849640130997, 0.04674661532044411, 0.35135090351104736, 0.35409873723983765, 0.43208518624305725, 0.11939813196659088, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05609016492962837, 0.06931670010089874, 0.1576625108718872, 0.27308744192123413, 0.04202406853437424, 0.2399596869945526, 0.3320065140724182, 0.6272499561309814, 0.09423039108514786, 0.144412100315094, 0.2769482433795929, 0.05643320456147194, 0.11388154327869415, 0.32551372051239014, 0.13187405467033386, 0.04915444552898407, 0.7444152235984802, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1798395812511444, 0.02382134646177292, 0.024498937651515007, 0.28730508685112, 0.19651466608047485, 0.13693250715732574, 0.34929007291793823, 0.1055094301700592, 0.08990196883678436, 0.5189381837844849, 0.3313819468021393, 0.34343984723091125, 0.21719343960285187, 0.21188895404338837, 0.15588119626045227, 0.10270431637763977, 0.20103313028812408, 0.23083212971687317, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.26584357023239136, 0.03035559318959713, 0.026536965742707253, 0.20298171043395996, 0.23938016593456268, 0.24181482195854187, 0.31930428743362427, 0.10626629739999771, 0.13103167712688446, 0.4636806845664978, 0.393515944480896, 0.3422740399837494, 0.342117577791214, 0.5495904088020325, 0.14030353724956512, 0.1558120846748352, 0.09243088960647583, 0.02280065417289734, 0.32627996802330017, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.30834218859672546, 0.3875667452812195, 0.32842832803726196, 0.16462059319019318, 0.416511207818985, 0.03730625659227371, 0.23662680387496948, 0.5092235207557678, 0.08549848943948746, 0.3278381824493408, 0.507111668586731, 0.0415511280298233, 0.5590415596961975, 0.6185146570205688, 0.0664283037185669, 0.1265193670988083, 0.1639627069234848, 0.12297425419092178, 0.08557231724262238, 0.1833999902009964, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0765935555100441, 0.29552146792411804, 0.05705742537975311, 0.01913047581911087, 0.15779250860214233, 0.030224651098251343, 0.08988720178604126, 0.3389361500740051, 0.08153010904788971, 0.05811480060219765, 0.09408371150493622, 0.19600677490234375, 0.6126919388771057, 0.623294472694397, 0.13969288766384125, 0.11118379235267639, 0.23907560110092163, 0.16732671856880188, 0.1982172429561615, 0.02825341187417507, 0.15412425994873047, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4304950535297394, 0.5688965320587158, 0.09143517911434174, 0.09618712961673737, 0.13307496905326843, 0.014428870752453804, 0.040250685065984726, 0.15830516815185547, 0.10923942923545837, 0.23653797805309296, 0.3180045783519745, 0.5594316720962524, 0.5058388710021973, 0.3866141140460968, 0.14058275520801544, 0.06564534455537796, 0.4107542335987091, 0.09891282767057419, 0.3507450222969055, 0.0021941487211734056, 0.004341787192970514, 0.11288701742887497, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.31169822812080383, 0.7707167863845825, 0.30778199434280396, 0.10994993895292282, 0.18047340214252472, 0.01769133098423481, 0.014783667400479317, 0.009741406887769699, 0.1340220719575882, 0.11223828792572021, 0.46960482001304626, 0.360332190990448, 0.56731116771698, 0.5470200181007385, 0.18929171562194824, 0.09254656732082367, 0.17870496213436127, 0.11882538348436356, 0.2565489113330841, 0.06709786504507065, 0.020701991394162178, 0.05621851608157158, 0.571487307548523, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2397254854440689, 0.361926406621933, 0.24345533549785614, 0.18179422616958618, 0.10373111069202423, 0.014045567251741886, 0.08654272556304932, 0.018043776974081993, 0.02193235233426094, 0.07134812325239182, 0.19312754273414612, 0.6192790865898132, 0.6039608716964722, 0.673239529132843, 0.15608295798301697, 0.12130707502365112, 0.06869146227836609, 0.052872415632009506, 0.07373122870922089, 0.03967232629656792, 0.019552208483219147, 0.024196362122893333, 0.1570335328578949, 0.3329051434993744, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.32110491394996643, 0.2706402838230133, 0.034645695239305496, 0.029830342158675194, 0.00933478306978941, 0.25964564085006714, 0.17791348695755005, 0.11580535769462585, 0.07073061913251877, 0.10197918862104416, 0.06440304219722748, 0.2378954440355301, 0.09358810633420944, 0.24307624995708466, 0.22625915706157684, 0.12370187789201736, 0.027735348790884018, 0.007442266680300236, 0.018701551482081413, 0.04923407360911369, 0.022976329550147057, 0.06834850460290909, 0.13354788720607758, 0.13089321553707123, 0.41554775834083557, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18688960373401642, 0.6521251797676086, 0.05505351349711418, 0.05518023297190666, 0.07190049439668655, 0.15721110999584198, 0.11867944896221161, 0.2974295914173126, 0.018550140783190727, 0.1645369827747345, 0.09910324215888977, 0.499615877866745, 0.34706613421440125, 0.5406060218811035, 0.24014075100421906, 0.08012630045413971, 0.020899765193462372, 0.032236725091934204, 0.011631320230662823, 0.1322554349899292, 0.13739252090454102, 0.3272823691368103, 0.10228703171014786, 0.16136890649795532, 0.12631160020828247, 0.3315902352333069, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24844318628311157, 0.24823600053787231, 0.41713690757751465, 0.05438315495848656, 0.5823535323143005, 0.1801777333021164, 0.13823869824409485, 0.16278210282325745, 0.035736992955207825, 0.017554355785250664, 0.03778500482439995, 0.09959819167852402, 0.18642207980155945, 0.26950401067733765, 0.24913227558135986, 0.07002493739128113, 0.03239390626549721, 0.05209453031420708, 0.033656563609838486, 0.10301846265792847, 0.08080227673053741, 0.10908480733633041, 0.10694557428359985, 0.2992934286594391, 0.26628223061561584, 0.1579413264989853, 0.18216297030448914, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21744470298290253, 0.04392259195446968, 0.5108200907707214, 0.27167755365371704, 0.5572997331619263, 0.30860280990600586, 0.5083038210868835, 0.6815038919448853, 0.3754148483276367, 0.01992654800415039, 0.0589066781103611, 0.07934294641017914, 0.15649113059043884, 0.3772245943546295, 0.25267744064331055, 0.23901967704296112, 0.02059122547507286, 0.03393668681383133, 0.04736512154340744, 0.05927135422825813, 0.02361929975450039, 0.006761881057173014, 0.05556455999612808, 0.1379650980234146, 0.12424714863300323, 0.191926509141922, 0.01547694206237793, 0.05743350088596344, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11088164150714874, 0.06568774580955505, 0.49295517802238464, 0.06175035238265991, 0.3928946256637573, 0.306259423494339, 0.1265336275100708, 0.29877781867980957, 0.061930101364851, 0.053618840873241425, 0.02546272985637188, 0.011733881197869778, 0.4200928509235382, 0.25557151436805725, 0.12701815366744995, 0.0662187710404396, 0.02669837884604931, 0.008789082989096642, 0.004751283209770918, 0.0528719425201416, 0.011242655105888844, 0.018989307805895805, 0.07620660215616226, 0.012969521805644035, 0.039284493774175644, 0.22954939305782318, 0.04563957825303078, 0.029234008863568306, 0.7488549947738647, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06005493924021721, 0.46575742959976196, 0.4922090172767639, 0.06956527382135391, 0.3788193464279175, 0.21330630779266357, 0.06565267592668533, 0.10461793839931488, 0.1200915202498436, 0.07597928494215012, 0.08451344817876816, 0.06952610611915588, 0.03487509861588478, 0.12158560007810593, 0.14820002019405365, 0.10826153308153152, 0.014460555277764797, 0.0725417360663414, 0.03217141702771187, 0.06698039174079895, 0.08051858842372894, 0.05872708931565285, 0.022866755723953247, 0.06705553829669952, 0.07034263759851456, 0.3507814407348633, 0.05356235057115555, 0.08709309250116348, 0.23604632914066315, 0.324868768453598, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11028759926557541, 0.4027779996395111, 0.8237467408180237, 0.1328621804714203, 0.7811888456344604, 0.5416622757911682, 0.16887041926383972, 0.2001309096813202, 0.08848496526479721, 0.05607001483440399, 0.13165172934532166, 0.10739479213953018, 0.052385441958904266, 0.05461856350302696, 0.16259506344795227, 0.13878783583641052, 0.02536645717918873, 0.06943535804748535, 0.05891912057995796, 0.006977759767323732, 0.003910682164132595, 0.004916978534311056, 0.04463541880249977, 0.07985055446624756, 0.07872368395328522, 0.291103333234787, 0.21302121877670288, 0.16995804011821747, 0.19893744587898254, 0.01890285685658455, 0.3838881254196167, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12960980832576752, 0.21605639159679413, 0.13754284381866455, 0.0687912181019783, 0.2001095861196518, 0.7652902007102966, 0.3308810591697693, 0.3389359712600708, 0.07430214434862137, 0.036511119455099106, 0.010612682439386845, 0.005050503648817539, 0.1584991067647934, 0.036481909453868866, 0.18724960088729858, 0.04579493775963783, 0.04550570994615555, 0.013287660665810108, 0.023886512964963913, 0.024052713066339493, 0.017023656517267227, 0.04836693033576012, 0.030526861548423767, 0.017645621672272682, 0.03170713782310486, 0.09266000241041183, 0.23106807470321655, 0.03557471185922623, 0.12432269752025604, 0.10334902256727219, 0.3233395516872406, 0.3770029842853546, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16838932037353516, 0.47491130232810974, 0.21776747703552246, 0.05912807583808899, 0.16565343737602234, 0.34125030040740967, 0.2414778620004654, 0.28169524669647217, 0.03973108157515526, 0.03921183571219444, 0.02238578163087368, 0.02449338510632515, 0.05498792976140976, 0.03159895911812782, 0.17659053206443787, 0.0394071489572525, 0.011173942126333714, 0.019201254472136497, 0.012027204036712646, 0.1043756976723671, 0.09629304707050323, 0.044260744005441666, 0.010774374939501286, 0.027033720165491104, 0.01529898401349783, 0.004158060997724533, 0.03471178933978081, 0.3574643135070801, 0.04469288885593414, 0.27014297246932983, 0.10925178974866867, 0.34427598118782043, 0.2875407040119171, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14295107126235962, 0.27777984738349915, 0.30436068773269653, 0.03198731318116188, 0.38494178652763367, 0.27411460876464844, 0.18790900707244873, 0.29966217279434204, 0.029011890292167664, 0.012050352990627289, 0.008839968591928482, 0.009298003278672695, 0.09229473769664764, 0.05935056507587433, 0.2074589878320694, 0.08343059569597244, 0.043180350214242935, 0.0767669752240181, 0.06360654532909393, 0.1271795630455017, 0.0800960585474968, 0.06889919936656952, 0.05648425221443176, 0.1521727591753006, 0.09240606427192688, 0.03566697984933853, 0.03560119867324829, 0.1492718607187271, 0.18653850257396698, 0.3474813401699066, 0.3278762698173523, 0.10706853121519089, 0.127774178981781, 0.1299499273300171, NaN, NaN, NaN, NaN, NaN, NaN], [0.185210719704628, 0.0802093893289566, 0.4863169491291046, 0.24164138734340668, 0.5185936689376831, 0.381059467792511, 0.5372542142868042, 0.6922534108161926, 0.40473121404647827, 0.015452258288860321, 0.03550630062818527, 0.023993153125047684, 0.09803077578544617, 0.14391310513019562, 0.25199130177497864, 0.23721955716609955, 0.02343675307929516, 0.03610215708613396, 0.05973569303750992, 0.07488072663545609, 0.026813305914402008, 0.0050082337111234665, 0.03149579092860222, 0.06251367926597595, 0.02305557392537594, 0.025774041190743446, 0.007636546157300472, 0.004965651780366898, 0.09922869503498077, 0.133448526263237, 0.1956746131181717, 0.04676169902086258, 0.27956491708755493, 0.021136147901415825, 0.057313986122608185, NaN, NaN, NaN, NaN, NaN], [0.08245678246021271, 0.1390499472618103, 0.5461503863334656, 0.060220371931791306, 0.43899697065353394, 0.5144884586334229, 0.22183947265148163, 0.5088672041893005, 0.09321429580450058, 0.05354699492454529, 0.02214067056775093, 0.004303250927478075, 0.39110496640205383, 0.12463895231485367, 0.1568218618631363, 0.0697786882519722, 0.028010839596390724, 0.012634677812457085, 0.007894599810242653, 0.0697624459862709, 0.015741104260087013, 0.01737123914062977, 0.05471426621079445, 0.0063003492541611195, 0.009287585504353046, 0.02825707383453846, 0.016440505161881447, 0.0038715004920959473, 0.07019948214292526, 0.02518516778945923, 0.041359793394804, 0.06545242667198181, 0.29174378514289856, 0.05010553449392319, 0.020036837086081505, 0.7549301981925964, NaN, NaN, NaN, NaN], [0.043030936270952225, 0.498334676027298, 0.5084810853004456, 0.06107298657298088, 0.3904430866241455, 0.35258427262306213, 0.08483341336250305, 0.17738159000873566, 0.1815967708826065, 0.09597334265708923, 0.08432064205408096, 0.040181081742048264, 0.02593160979449749, 0.08670566976070404, 0.14764654636383057, 0.12042609602212906, 0.016146911308169365, 0.09666067361831665, 0.04101520776748657, 0.09386932849884033, 0.11830881983041763, 0.08227012306451797, 0.02001151442527771, 0.0443122573196888, 0.028465820476412773, 0.11253371834754944, 0.02299223281443119, 0.013287386856973171, 0.043506089597940445, 0.09705191105604172, 0.08899306505918503, 0.14267200231552124, 0.1414598524570465, 0.04555709660053253, 0.08242949843406677, 0.2358742356300354, 0.30384859442710876, NaN, NaN, NaN], [0.0785449668765068, 0.4015392065048218, 0.8182658553123474, 0.10243776440620422, 0.7659414410591125, 0.5735372304916382, 0.16621330380439758, 0.21339072287082672, 0.12523002922534943, 0.05685745179653168, 0.1081186980009079, 0.07184037566184998, 0.02847907319664955, 0.031456008553504944, 0.15293413400650024, 0.14026813209056854, 0.02709769457578659, 0.07936792075634003, 0.07383942604064941, 0.01026969589293003, 0.007506935391575098, 0.01013263501226902, 0.043357811868190765, 0.054843299090862274, 0.032377004623413086, 0.07885654270648956, 0.05951513722538948, 0.021026868373155594, 0.029062975198030472, 0.004067933652549982, 0.00896876398473978, 0.031901001930236816, 0.2457016408443451, 0.1949184089899063, 0.16180625557899475, 0.23649972677230835, 0.020314330235123634, 0.390868216753006, NaN, NaN], [0.07311940938234329, 0.15430475771427155, 0.1386927217245102, 0.04823235049843788, 0.20945730805397034, 0.8191487193107605, 0.33371293544769287, 0.3618466258049011, 0.1152336597442627, 0.031010858714580536, 0.008395140990614891, 0.002998974174261093, 0.13362915813922882, 0.02411211095750332, 0.1613900512456894, 0.036581799387931824, 0.048626694828271866, 0.015552042052149773, 0.027681825682520866, 0.03610476478934288, 0.033903565257787704, 0.10816461592912674, 0.038128215819597244, 0.015381437726318836, 0.020138615742325783, 0.04596110060811043, 0.12391334027051926, 0.008882056921720505, 0.017164889723062515, 0.019657107070088387, 0.039318498224020004, 0.012226631864905357, 0.12883862853050232, 0.2578184902667999, 0.03228205814957619, 0.13855229318141937, 0.08962707966566086, 0.32015570998191833, 0.32621434330940247, NaN], [0.2622520923614502, 0.7386532425880432, 0.41215938329696655, 0.08539438247680664, 0.7665934562683105, 0.5218235850334167, 0.42940571904182434, 0.4037780165672302, 0.7456067204475403, 0.07961834967136383, 0.02781907096505165, 0.02608557976782322, 0.15701159834861755, 0.05025498941540718, 0.11428551375865936, 0.16620944440364838, 0.03880922496318817, 0.027515552937984467, 0.018877340480685234, 0.019147777929902077, 0.2389368712902069, 0.02623477764427662, 0.012871777638792992, 0.013969821855425835, 0.021991701796650887, 0.0026013199239969254, 0.00741098215803504, 0.01774594374001026, 0.003101027337834239, 0.007316285278648138, 0.009464021772146225, 0.007634901907294989, 0.005969886668026447, 0.011287253350019455, 0.04429420828819275, 0.016200777143239975, 0.03440575301647186, 0.14183124899864197, 0.1436305195093155, 0.03402799740433693]], [[0.09667091816663742, 0.08969368785619736, 0.16646768152713776, 0.01428181305527687, 0.1262292116880417, 0.03015410713851452, 0.00857650488615036, 0.013287652283906937, 0.013465571217238903, 0.009945754893124104, 0.03584994748234749, 0.07976501435041428, 0.013894102536141872, 0.07191513478755951, 0.16682514548301697, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00307486648671329, 0.2169581949710846, 0.015313946641981602, 0.005070009268820286, 0.13766343891620636, 0.036365993320941925, 0.013734312728047371, 0.012890451587736607, 0.00037508379318751395, 0.002069024136289954, 0.0038654597010463476, 0.007793853525072336, 0.006365353707224131, 0.02897111512720585, 0.19472798705101013, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.013033762574195862, 0.0016745100729167461, 0.09789733588695526, 0.11557573825120926, 0.070904940366745, 0.039959780871868134, 0.06112189590930939, 0.005926545709371567, 0.05931684747338295, 0.06562750041484833, 0.015556245110929012, 0.2949027419090271, 0.09280899167060852, 0.18960142135620117, 0.2321171909570694, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0009253448224626482, 0.0011463494738563895, 0.0022407870274037123, 0.022192178294062614, 0.18083734810352325, 0.18906380236148834, 0.06340676546096802, 0.5556718111038208, 0.008876022882759571, 0.00195835973136127, 0.009641225449740887, 0.13488754630088806, 0.03692271187901497, 0.0069083282724022865, 0.19416382908821106, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.020195724442601204, 0.0026999269612133503, 0.0047158133238554, 0.017117822542786598, 0.22690622508525848, 0.009801734238862991, 0.18513473868370056, 0.000916039280127734, 0.006044555455446243, 0.006021710112690926, 0.010346228256821632, 0.04500352963805199, 0.008295656181871891, 0.1122727021574974, 0.4271945357322693, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.02983868308365345, 0.03651329129934311, 0.005064305383712053, 0.00043434457620605826, 0.001774297677911818, 0.10316617041826248, 0.10274261981248856, 0.570116400718689, 0.0018607155652716756, 0.004884766880422831, 0.0001192242925753817, 0.01004798710346222, 0.011760696768760681, 0.020220324397087097, 0.036799319088459015, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.020830435678362846, 0.04066089913249016, 0.01340602245181799, 0.0007146665593609214, 0.05329689383506775, 0.010700137354433537, 0.06310626864433289, 0.1416247934103012, 0.059007443487644196, 0.009734428487718105, 0.023192377761006355, 0.030464952811598778, 0.011454294435679913, 0.06458231806755066, 0.29838618636131287, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04047420993447304, 0.05575861781835556, 0.0035385461524128914, 0.00047053993330337107, 0.010776028037071228, 0.0002634078555274755, 0.006466362159699202, 0.09768779575824738, 0.011305907741189003, 0.6455902457237244, 0.005685864482074976, 0.009437574073672295, 0.0014128481270745397, 0.0036261524073779583, 0.1994941532611847, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.001968077849596739, 0.00013096239126753062, 0.014192181639373302, 0.0025808673817664385, 1.1752749742299784e-05, 7.090794679243118e-05, 8.489128958899528e-05, 7.501097570639104e-05, 0.005588378757238388, 0.00024033378576859832, 0.7911840081214905, 0.0006417080294340849, 0.00012212486763019115, 0.0026151463389396667, 0.024830428883433342, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.007711799815297127, 0.006852409336715937, 0.005409319419413805, 0.029324712231755257, 0.0012151957489550114, 0.0014427780406549573, 0.0002848623844329268, 0.0011284908978268504, 0.00042831210885196924, 0.0035933239851146936, 0.2853389084339142, 0.04352247342467308, 0.0011324246879667044, 0.0015205255476757884, 0.05924868583679199, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.06333743035793304, 0.004831443540751934, 0.017261236906051636, 0.05893971398472786, 0.005950291641056538, 0.002105317311361432, 0.003185122972354293, 0.0028415010310709476, 0.004572128411382437, 0.007815520279109478, 0.07613655924797058, 0.10669270157814026, 0.027066918089985847, 0.03207901865243912, 0.4743220806121826, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.10327208787202835, 0.004544916562736034, 0.05445469170808792, 0.010814311914145947, 0.026858847588300705, 0.011217474937438965, 0.07071709632873535, 0.05960191786289215, 0.0010665962472558022, 0.025403864681720734, 0.006131312809884548, 0.5720618963241577, 0.029676837846636772, 0.17520834505558014, 0.23297326266765594, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.011414228938519955, 0.002735550981014967, 0.015156290493905544, 0.0027777000796049833, 0.009832575917243958, 0.015552453696727753, 0.017305195331573486, 0.004722784738987684, 4.7792200348339975e-05, 0.0034479873720556498, 0.0004017044266220182, 0.0011886333813890815, 0.18307994306087494, 0.2786843478679657, 0.04159880056977272, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0032662157900631428, 0.004168938845396042, 0.0016457620076835155, 0.0005059303948655725, 0.0003206630062777549, 0.000853654695674777, 0.010604765266180038, 0.005784912034869194, 0.00014833646127954125, 0.0001704594906186685, 5.580573997576721e-05, 0.0004662217397708446, 0.0009024841128848493, 0.025914611294865608, 0.3543371260166168, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.057395875453948975, 0.01834016665816307, 0.017516011372208595, 0.011936328373849392, 0.010095582343637943, 0.018046732991933823, 0.24530914425849915, 0.01257838774472475, 0.014466731809079647, 0.027552323415875435, 0.054997242987155914, 0.013960911892354488, 0.0074861980974674225, 0.03251070901751518, 0.14566579461097717, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.5009713768959045, 0.11806200444698334, 0.543484628200531, 0.29247328639030457, 0.5261343717575073, 0.23446989059448242, 0.5474087595939636, 0.062012095004320145, 0.8189043998718262, 0.538780152797699, 0.6200674176216125, 0.43515679240226746, 0.24830776453018188, 0.341129869222641, 0.04290800169110298, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.018064359202980995, 0.030848585069179535, 0.08071158826351166, 0.0676560178399086, 0.13447926938533783, 0.11551786214113235, 0.17043589055538177, 0.10128363966941833, 0.6618390679359436, 0.2855142652988434, 0.0971621423959732, 0.23388729989528656, 0.21859601140022278, 0.46025529503822327, 0.182326078414917, 0.13823550939559937, 0.01690824329853058, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04308566823601723, 0.03711610287427902, 0.06502576172351837, 0.10632220655679703, 0.09326566010713577, 0.08777783066034317, 0.3412204086780548, 0.6204424500465393, 0.8231819868087769, 0.09377399832010269, 0.1541169434785843, 0.21222646534442902, 0.11298450827598572, 0.15309588611125946, 0.11645805835723877, 0.1366243064403534, 0.10029595345258713, 0.03309698402881622, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07351326197385788, 0.05497964471578598, 0.07563240081071854, 0.32393333315849304, 0.057468246668577194, 0.2634526193141937, 0.3780488967895508, 0.7154850363731384, 0.7017503976821899, 0.20895157754421234, 0.29085400700569153, 0.06311048567295074, 0.03268700838088989, 0.14748480916023254, 0.03694311901926994, 0.14204008877277374, 0.17578311264514923, 0.058153361082077026, 0.03275991603732109, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15202973783016205, 0.07260382175445557, 0.07307075709104538, 0.01561899296939373, 0.03831832483410835, 0.04392734169960022, 0.07259247452020645, 0.03668325021862984, 0.315115749835968, 0.14016768336296082, 0.147903710603714, 0.09513753652572632, 0.08079177141189575, 0.04876280575990677, 0.1678115576505661, 0.15378697216510773, 0.06811928749084473, 0.031730279326438904, 0.02174059860408306, 0.06419884413480759, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20334205031394958, 0.03987862542271614, 0.2323523759841919, 0.08299659937620163, 0.11007620394229889, 0.049821991473436356, 0.05303451418876648, 0.020633194595575333, 0.20804192125797272, 0.621069610118866, 0.6013453006744385, 0.6998922824859619, 0.30664384365081787, 0.1810489445924759, 0.12484823167324066, 0.2336570769548416, 0.05475717782974243, 0.004165933933109045, 0.0025384188629686832, 0.005177688784897327, 0.12858138978481293, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.33830341696739197, 0.10967365652322769, 0.03348035365343094, 0.09579410403966904, 0.07735400646924973, 0.09874830394983292, 0.15181724727153778, 0.11190870404243469, 0.4600948095321655, 0.5270871520042419, 0.27297794818878174, 0.3748718500137329, 0.4609748125076294, 0.5019738078117371, 0.0790465772151947, 0.1292651742696762, 0.01662198081612587, 0.01174056064337492, 0.002378111705183983, 0.04036910459399223, 0.6038607358932495, 0.053664252161979675, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18835663795471191, 0.05185278132557869, 0.06106729805469513, 0.04512745887041092, 0.04466439411044121, 0.025852244347333908, 0.031750425696372986, 0.022515133023262024, 0.5077425837516785, 0.6734393835067749, 0.37964752316474915, 0.35936975479125977, 0.19831591844558716, 0.216437429189682, 0.2985125184059143, 0.13257111608982086, 0.0015173845458775759, 0.11979293078184128, 0.025075461715459824, 0.17128729820251465, 0.38108551502227783, 0.04533570259809494, 0.02173132263123989, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5560556054115295, 0.47877317667007446, 0.15116584300994873, 0.40482252836227417, 0.04176756739616394, 0.04773563891649246, 0.13619393110275269, 0.07804162055253983, 0.07037016749382019, 0.5527278780937195, 0.486864298582077, 0.22204715013504028, 0.2625967860221863, 0.19855597615242004, 0.060070205479860306, 0.12533389031887054, 0.01691550202667713, 0.03341663256287575, 0.04296481981873512, 0.13898836076259613, 0.21484552323818207, 0.09921174496412277, 0.178620383143425, 0.08540544658899307, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21585102379322052, 0.028776921331882477, 0.056070148944854736, 0.3207121789455414, 0.0078024002723395824, 0.016524065285921097, 0.3710367977619171, 0.14693383872509003, 0.12693363428115845, 0.6266815662384033, 0.6993157863616943, 0.5497558116912842, 0.14310741424560547, 0.3664083480834961, 0.047443971037864685, 0.19628551602363586, 0.0262758769094944, 0.06177970767021179, 0.020167797803878784, 0.21508394181728363, 0.05243970826268196, 0.05236654728651047, 0.019688904285430908, 0.04470491781830788, 0.03636182099580765, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.28475576639175415, 0.10818006843328476, 0.08735410869121552, 0.329417884349823, 0.02252645045518875, 0.04752267897129059, 0.3733118176460266, 0.39454737305641174, 0.029050499200820923, 0.6059318780899048, 0.7311877012252808, 0.44807982444763184, 0.29598307609558105, 0.33838847279548645, 0.16424106061458588, 0.10685201734304428, 0.1520930975675583, 0.22691352665424347, 0.1206204891204834, 0.20647111535072327, 0.3387817144393921, 0.17652125656604767, 0.14866295456886292, 0.058651361614465714, 0.13512541353702545, 0.029732942581176758, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08968453854322433, 0.11453098803758621, 0.20413988828659058, 0.368092805147171, 0.07694120705127716, 0.048818718641996384, 0.12943927943706512, 0.036333490163087845, 0.04509947448968887, 0.25635746121406555, 0.2806471586227417, 0.5608395338058472, 0.1390012502670288, 0.28897786140441895, 0.04701472818851471, 0.14931687712669373, 0.17397953569889069, 0.045104723423719406, 0.029273295775055885, 0.009919327683746815, 0.05321130529046059, 0.40632039308547974, 0.053491849452257156, 0.10154163092374802, 0.08916116505861282, 0.038379959762096405, 0.050926242023706436, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05315335839986801, 0.017116300761699677, 0.1720367670059204, 0.3916313052177429, 0.05510414391756058, 0.2876152992248535, 0.22692401707172394, 0.14989952743053436, 0.3368622660636902, 0.0913245752453804, 0.3484038710594177, 0.3637443780899048, 0.007217096630483866, 0.103476881980896, 0.036375418305397034, 0.1467411071062088, 0.6613936424255371, 0.30691561102867126, 0.27473992109298706, 0.05103013291954994, 0.09803401678800583, 0.18992389738559723, 0.012332501821219921, 0.08918186277151108, 0.009687116369605064, 0.01925584301352501, 0.0046735359355807304, 0.006799460854381323, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5125223994255066, 0.07351671159267426, 0.21591535210609436, 0.21059465408325195, 0.3288169205188751, 0.5466507077217102, 0.21618640422821045, 0.15017350018024445, 0.8681062459945679, 0.2442341297864914, 0.06865198910236359, 0.019835328683257103, 0.10077274590730667, 0.12228173017501831, 0.1682003289461136, 0.23535212874412537, 0.03722311928868294, 0.0383867472410202, 0.06886720657348633, 0.040591221302747726, 0.07368911802768707, 0.09838991612195969, 0.052333034574985504, 0.3684787154197693, 0.05692664161324501, 0.030762571841478348, 0.0074586388655006886, 0.017855344340205193, 0.004115242511034012, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4846254289150238, 0.17620818316936493, 0.23995715379714966, 0.09631974995136261, 0.22585628926753998, 0.04512355476617813, 0.06700992584228516, 0.01503949984908104, 0.07369402050971985, 0.03452376648783684, 0.04930250719189644, 0.1451164036989212, 0.010093613527715206, 0.020862746983766556, 0.16003692150115967, 0.17482686042785645, 0.020169643685221672, 0.038628242909908295, 0.03409411385655403, 0.011309999041259289, 0.013418656773865223, 0.010934274643659592, 0.0036632094997912645, 0.017374617978930473, 0.023464469239115715, 0.0031370571814477444, 0.004764250945299864, 0.022831382229924202, 0.0012565170181915164, 0.01132481824606657, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12189289927482605, 0.3658526837825775, 0.06606122851371765, 0.1638106107711792, 0.07819290459156036, 0.27624964714050293, 0.09599297493696213, 0.08126427978277206, 0.14055852591991425, 0.02327289618551731, 0.03783821687102318, 0.2963305115699768, 0.13405835628509521, 0.09205315262079239, 0.12166540324687958, 0.2204812914133072, 0.0262058824300766, 0.011961801908910275, 0.00864139012992382, 0.033310361206531525, 0.014301336370408535, 0.009627565741539001, 0.26419174671173096, 0.09070254862308502, 0.04369048774242401, 0.05080936849117279, 0.022543352097272873, 0.012377972714602947, 0.030277462676167488, 0.2341402769088745, 0.01971697248518467, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.278896301984787, 0.1438806802034378, 0.46959513425827026, 0.3356979489326477, 0.3651174008846283, 0.1071292906999588, 0.18117688596248627, 0.20183299481868744, 0.29131460189819336, 0.13872042298316956, 0.021824011579155922, 0.06362087279558182, 0.34404000639915466, 0.13715140521526337, 0.1120462715625763, 0.253863126039505, 0.004828702192753553, 0.05376851186156273, 0.11550138890743256, 0.1064227893948555, 0.03894256055355072, 0.006152869202196598, 0.03161965310573578, 0.06215812265872955, 0.10950783640146255, 0.01032247580587864, 0.005066303536295891, 0.011880352161824703, 0.09494113177061081, 0.06700112670660019, 0.10617008060216904, 0.020382743328809738, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2151702344417572, 0.2682046890258789, 0.2758127450942993, 0.20445802807807922, 0.06759822368621826, 0.058143485337495804, 0.21948587894439697, 0.1328936666250229, 0.04737214744091034, 0.09880322962999344, 0.06969184428453445, 0.0649414211511612, 0.09957331418991089, 0.08072139322757721, 0.15442174673080444, 0.04813924431800842, 0.008662978187203407, 0.10469061881303787, 0.06787187606096268, 0.02962217852473259, 0.04144993796944618, 0.019078848883509636, 0.10597121715545654, 0.0923849567770958, 0.24696239829063416, 0.010940729640424252, 0.060362689197063446, 0.059540145099163055, 0.36283043026924133, 0.1817280501127243, 0.2542697787284851, 0.10456714779138565, 0.017782384529709816, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10625648498535156, 0.3580685555934906, 0.2235240340232849, 0.2717205584049225, 0.14765356481075287, 0.1302592158317566, 0.182493656873703, 0.07402253895998001, 0.044094108045101166, 0.28373098373413086, 0.09141446650028229, 0.13240621984004974, 0.1622740924358368, 0.2716645896434784, 0.09359043836593628, 0.10143542289733887, 0.13917230069637299, 0.040259018540382385, 0.030723553150892258, 0.006155712995678186, 0.031952716410160065, 0.3338092863559723, 0.06915750354528427, 0.1324792504310608, 0.11542332917451859, 0.05764009431004524, 0.04023035988211632, 0.03596781566739082, 0.1495574563741684, 0.02840258926153183, 0.049019940197467804, 0.4096885919570923, 0.03150010108947754, 0.02953496389091015, NaN, NaN, NaN, NaN, NaN, NaN], [0.08181191235780716, 0.05183182656764984, 0.18780435621738434, 0.39972010254859924, 0.11086275428533554, 0.3443254232406616, 0.26716044545173645, 0.2157517671585083, 0.3917877972126007, 0.09846898168325424, 0.25891563296318054, 0.25942671298980713, 0.008535100147128105, 0.11220833659172058, 0.06895694881677628, 0.1521255224943161, 0.6490614414215088, 0.39427587389945984, 0.3861289620399475, 0.05361294746398926, 0.09808307886123657, 0.16810499131679535, 0.014004985801875591, 0.1451900601387024, 0.008040589280426502, 0.022555561736226082, 0.013471563346683979, 0.006859058979898691, 0.05312783271074295, 0.04058152437210083, 0.023753749206662178, 0.3811529278755188, 0.052651502192020416, 0.007359141018241644, 0.007947265170514584, NaN, NaN, NaN, NaN, NaN], [0.4507053792476654, 0.10277862101793289, 0.16431982815265656, 0.2027788907289505, 0.318918377161026, 0.4106469452381134, 0.24116744101047516, 0.1587350070476532, 0.8309358358383179, 0.2625651955604553, 0.047453198581933975, 0.009295494295656681, 0.07160880416631699, 0.07481760531663895, 0.19364440441131592, 0.2650813162326813, 0.032561566680669785, 0.05222610384225845, 0.09714324027299881, 0.038093939423561096, 0.08016244322061539, 0.09171951562166214, 0.056265611201524734, 0.42980653047561646, 0.0462084598839283, 0.03524700179696083, 0.017182864248752594, 0.04137876257300377, 0.007372017949819565, 0.08077534288167953, 0.07507885992527008, 0.050101280212402344, 0.02560576982796192, 0.006666052620857954, 0.016142593696713448, 0.003943128511309624, NaN, NaN, NaN, NaN], [0.5336673855781555, 0.18865860998630524, 0.19927646219730377, 0.10614699125289917, 0.21258802711963654, 0.035614922642707825, 0.07572873681783676, 0.021095039322972298, 0.08985494822263718, 0.061252057552337646, 0.05201297253370285, 0.10173538327217102, 0.008337927050888538, 0.017984798178076744, 0.15578274428844452, 0.186274453997612, 0.02024305984377861, 0.052268851548433304, 0.04830823838710785, 0.011142827570438385, 0.015970220789313316, 0.01383616030216217, 0.004258061293512583, 0.024750858545303345, 0.02320612221956253, 0.004944193176925182, 0.006908308248966932, 0.022138824686408043, 0.002315782941877842, 0.022694725543260574, 0.010753386653959751, 0.0032616793178021908, 0.0013332129456102848, 0.0031688748858869076, 0.015737321227788925, 0.00092066585784778, 0.009911282919347286, NaN, NaN, NaN], [0.11776354163885117, 0.337507039308548, 0.055947914719581604, 0.144154354929924, 0.09536269307136536, 0.2646341919898987, 0.10820504277944565, 0.0982295498251915, 0.1891198456287384, 0.027041049674153328, 0.03162495046854019, 0.2652260959148407, 0.10165920853614807, 0.07911970466375351, 0.1373925358057022, 0.2620354890823364, 0.032388050109148026, 0.01473915670067072, 0.01008685864508152, 0.03682388737797737, 0.017798764631152153, 0.012407293543219566, 0.2692665457725525, 0.10958822816610336, 0.03793380409479141, 0.07735131680965424, 0.03087974339723587, 0.01817244663834572, 0.0740593820810318, 0.5664002895355225, 0.01639901101589203, 0.07361851632595062, 0.02498074807226658, 0.01953950524330139, 0.011185318231582642, 0.024920325726270676, 0.19407986104488373, 0.01722806692123413, NaN, NaN], [0.20648452639579773, 0.10074114054441452, 0.42538517713546753, 0.26027214527130127, 0.3658106029033661, 0.09280957281589508, 0.23363487422466278, 0.27985435724258423, 0.3744349181652069, 0.1453229784965515, 0.02015594393014908, 0.05169985443353653, 0.3284047245979309, 0.12707991898059845, 0.12262601405382156, 0.27593934535980225, 0.005811678245663643, 0.07111961394548416, 0.13982559740543365, 0.1345955729484558, 0.06462955474853516, 0.009384723380208015, 0.03974011912941933, 0.0818282812833786, 0.09768332540988922, 0.015042337588965893, 0.006764655001461506, 0.01590757444500923, 0.11177312582731247, 0.1289886087179184, 0.2743605673313141, 0.018859822303056717, 0.01428449247032404, 0.0072670611552894115, 0.013756940141320229, 0.08787993341684341, 0.08323681354522705, 0.09635237604379654, 0.025643613189458847, NaN], [0.019576620310544968, 0.03319034352898598, 0.0111849969252944, 0.010870445519685745, 0.03222370147705078, 0.13807591795921326, 0.0675833523273468, 0.0615379698574543, 0.013822048902511597, 0.008804764598608017, 0.004974161274731159, 0.01815059222280979, 0.1774466335773468, 0.06282598525285721, 0.15396134555339813, 0.17263205349445343, 0.01194645743817091, 0.02866498939692974, 0.16296441853046417, 0.0019488729303702712, 0.034664519131183624, 0.05397665500640869, 0.1285821497440338, 0.10828299820423126, 0.02950196899473667, 0.008275950327515602, 0.008977574296295643, 0.09588290750980377, 0.01758315972983837, 0.00981396809220314, 0.06520896404981613, 0.03634792938828468, 0.007794357370585203, 0.007516053505241871, 0.0633511170744896, 0.016588596627116203, 0.008872142061591148, 0.04887184873223305, 0.025813041254878044, 0.0022019031457602978]], [[0.3107149600982666, 0.049285680055618286, 0.08128133416175842, 0.03986956924200058, 0.07088969647884369, 0.1961679309606552, 0.15016919374465942, 0.05429982393980026, 0.1291487067937851, 0.03663256764411926, 0.25306442379951477, 0.3913470208644867, 0.2542778253555298, 0.252127081155777, 0.15921251475811005, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.10834414511919022, 0.3508348762989044, 0.02124197781085968, 0.019397908821702003, 0.026673240587115288, 0.3167271912097931, 0.11886779963970184, 0.17699773609638214, 0.14507175981998444, 0.115145742893219, 0.6241064667701721, 0.1622784435749054, 0.5683063268661499, 0.15724869072437286, 0.12728430330753326, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.6979861855506897, 0.039286430925130844, 0.3014020621776581, 0.003208757843822241, 0.01772892102599144, 0.014036925509572029, 0.19886529445648193, 0.09335973858833313, 0.4060034155845642, 0.28424081206321716, 0.26539483666419983, 0.1895008385181427, 0.4672236740589142, 0.16107353568077087, 0.10992881655693054, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.5298255681991577, 0.6474234461784363, 0.19260530173778534, 0.026028962805867195, 0.013013242743909359, 0.01466711051762104, 0.11121421307325363, 0.06523838639259338, 0.29339125752449036, 0.46135157346725464, 0.7174844145774841, 0.3618351221084595, 0.19526919722557068, 0.0703459233045578, 0.24330592155456543, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.7494951486587524, 0.23358309268951416, 0.3640848398208618, 0.09014757722616196, 0.32190942764282227, 0.0021980239544063807, 0.07713330537080765, 0.030900368466973305, 0.08560045808553696, 0.26394325494766235, 0.11549779027700424, 0.44356539845466614, 0.12175428122282028, 0.3783136308193207, 0.14015373587608337, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3064809739589691, 0.15617568790912628, 0.4955383241176605, 0.8125641942024231, 0.02114781178534031, 0.2633197009563446, 0.014569958671927452, 0.04754461348056793, 0.03227522596716881, 0.09995166957378387, 0.0697590634226799, 0.0770602896809578, 0.19454655051231384, 0.18272873759269714, 0.19963966310024261, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.5314973592758179, 0.5086395144462585, 0.5757231116294861, 0.44031307101249695, 0.2709468603134155, 0.0639616996049881, 0.2984015941619873, 0.0039451331831514835, 0.0197422094643116, 0.0031917106825858355, 0.05093149095773697, 0.12591752409934998, 0.25977155566215515, 0.0615861676633358, 0.3711840510368347, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2939777970314026, 0.2997593581676483, 0.5167340040206909, 0.46100836992263794, 0.39705657958984375, 0.5034002065658569, 0.07978513836860657, 0.0779491513967514, 0.012053987942636013, 0.01132633350789547, 0.028715649619698524, 0.059212565422058105, 0.20603224635124207, 0.15584728121757507, 0.14816488325595856, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3128078877925873, 0.0864272266626358, 0.7678588032722473, 0.6537591814994812, 0.8236088752746582, 0.6979317665100098, 0.30976778268814087, 0.014760972931981087, 0.5645584464073181, 0.004590533208101988, 0.008271697908639908, 0.012132997624576092, 0.028745530173182487, 0.04464057460427284, 0.1669740080833435, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.6456499099731445, 0.1693999022245407, 0.7097220420837402, 0.5244839191436768, 0.46365103125572205, 0.5023244023323059, 0.9643971920013428, 0.24913577735424042, 0.13337120413780212, 0.06419410556554794, 0.012416149489581585, 0.0573885552585125, 0.016666844487190247, 0.008706454187631607, 0.1754455268383026, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.09960467368364334, 0.0907629206776619, 0.36143985390663147, 0.11092879623174667, 0.19937658309936523, 0.03214935213327408, 0.3196737766265869, 0.4763943552970886, 0.497630774974823, 0.1899363249540329, 0.1145005002617836, 0.004749455489218235, 0.0008605146431364119, 0.0007969819707795978, 0.02025206945836544, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3807562589645386, 0.26623356342315674, 0.4209006428718567, 0.27443018555641174, 0.5137820839881897, 0.1592678278684616, 0.6250110864639282, 0.6178545951843262, 0.9692861437797546, 0.5716569423675537, 0.22724294662475586, 0.17567582428455353, 0.008769324980676174, 0.002557128667831421, 0.05025441572070122, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2969632148742676, 0.16767999529838562, 0.46978121995925903, 0.28813451528549194, 0.45300158858299255, 0.33029136061668396, 0.6236194968223572, 0.1634167730808258, 0.8177276253700256, 0.718397855758667, 0.9021148681640625, 0.07875741273164749, 0.09992827475070953, 0.004932410083711147, 0.1707668900489807, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3945808410644531, 0.3581867516040802, 0.5247420072555542, 0.4120633900165558, 0.3024104833602905, 0.35548633337020874, 0.5872392654418945, 0.15815261006355286, 0.7289484143257141, 0.7948301434516907, 0.9396543502807617, 0.9256777167320251, 0.08537369966506958, 0.03166399896144867, 0.03224433213472366, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.004588960204273462, 0.041907694190740585, 0.17755450308322906, 0.039724841713905334, 0.047663237899541855, 0.09274838864803314, 0.010110240429639816, 0.014862497337162495, 0.11161036789417267, 0.0490046888589859, 0.18517035245895386, 0.029471391811966896, 0.05094437301158905, 0.002971563721075654, 0.16300250589847565, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.07712388038635254, 0.042244281619787216, 0.004363007377833128, 0.0015959119191393256, 0.019252488389611244, 0.02118455246090889, 0.001846740604378283, 0.0012080060550943017, 0.0007866616360843182, 0.001261864323168993, 0.002815018408000469, 0.017323212698101997, 0.00286104716360569, 0.004067797679454088, 0.15733002126216888, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.176344633102417, 0.3271441161632538, 0.08498391509056091, 0.04002806171774864, 0.06676299124956131, 0.008946515619754791, 0.012590638361871243, 0.0061616976745426655, 0.010515754111111164, 0.042563267052173615, 0.024306243285536766, 0.009260479360818863, 0.0002838150830939412, 0.0009972971165552735, 0.0829070582985878, 0.13826748728752136, 0.016647184267640114, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3345734477043152, 0.016792800277471542, 0.785018265247345, 0.16747814416885376, 0.3955724537372589, 0.09289640188217163, 0.041390396654605865, 0.004024161957204342, 0.04094661772251129, 0.023736434057354927, 0.20348279178142548, 0.041674140840768814, 0.012969214469194412, 0.03994787111878395, 0.04405270516872406, 0.12115656584501266, 0.053111400455236435, 0.35221540927886963, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.027460135519504547, 0.0009503767942078412, 0.8045902252197266, 0.05251304432749748, 0.4111766219139099, 0.08071836084127426, 0.01928381621837616, 0.0005491983611136675, 0.029575586318969727, 0.001678029540926218, 0.033282194286584854, 0.007144003175199032, 0.012064780108630657, 0.008930332958698273, 0.0033295771572738886, 0.06620940566062927, 0.0874415934085846, 0.3174281120300293, 0.09698687493801117, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18455208837985992, 0.0566692017018795, 0.08522135764360428, 0.2798183560371399, 0.013304274529218674, 0.0006802850402891636, 0.09522412717342377, 0.0060977875255048275, 0.002369458321481943, 0.017453324049711227, 0.0036190226674079895, 2.9809654733981006e-05, 0.0002128492487827316, 0.0002820969675667584, 0.18610867857933044, 0.05510773882269859, 0.045387670397758484, 0.35701045393943787, 0.5011870265007019, 0.0787656381726265, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.6536933779716492, 0.3485175371170044, 0.2007695585489273, 0.8106443881988525, 0.12433423846960068, 0.008092332631349564, 0.6807736158370972, 0.40895989537239075, 0.04516575112938881, 0.1387551873922348, 0.004862201400101185, 0.0003120531910099089, 0.00022667655139230192, 0.00031860917806625366, 0.07640787214040756, 0.05231153964996338, 0.1393265277147293, 0.34751832485198975, 0.15474379062652588, 0.1892920285463333, 0.06652400642633438, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08564082533121109, 0.05155009403824806, 0.10021068900823593, 0.5880905985832214, 0.0823356956243515, 0.0626063123345375, 0.7381499409675598, 0.566346287727356, 0.04188016802072525, 0.02469027414917946, 0.004355741199105978, 0.00042968738125637174, 2.4299803044414148e-05, 2.7212277927901596e-05, 0.001896930974908173, 0.04669328033924103, 0.038986966013908386, 0.38860636949539185, 0.09904015064239502, 0.3339899182319641, 0.027963249012827873, 0.04134462773799896, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03975995257496834, 0.012421448715031147, 0.08890707790851593, 0.605818510055542, 0.05048904940485954, 0.017510779201984406, 0.24702893197536469, 0.39587050676345825, 0.06098005548119545, 0.052625395357608795, 0.013424866832792759, 0.0005194320692680776, 0.000250102486461401, 0.0003063087642658502, 0.0010793216060847044, 0.20758312940597534, 0.07789289951324463, 0.047907259315252304, 0.006299893371760845, 0.2608397901058197, 0.044556185603141785, 0.061705876141786575, 0.034865181893110275, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11902385950088501, 0.011114073917269707, 0.22151720523834229, 0.2006509006023407, 0.03878694027662277, 0.01363028772175312, 0.3268369734287262, 0.04311302676796913, 0.8067907094955444, 0.34777864813804626, 0.25920552015304565, 0.09021251648664474, 0.035271789878606796, 0.0031717135570943356, 0.004271878860890865, 0.18052776157855988, 0.08179321140050888, 0.059846919029951096, 0.02793782763183117, 0.062999427318573, 0.04310278594493866, 0.024987775832414627, 0.015387488529086113, 0.132792130112648, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.006270309444516897, 0.0001492560259066522, 0.00045137249981053174, 0.0007612273329868913, 7.476524478988722e-05, 0.013270817697048187, 0.04344405606389046, 0.014117085374891758, 0.6041488647460938, 0.07304701954126358, 0.010559855960309505, 0.0026350386906415224, 0.02638809196650982, 0.002994539914652705, 0.00020572090579662472, 0.03587701544165611, 0.020078828558325768, 0.04571571201086044, 0.02593454346060753, 0.007220670115202665, 0.03280382603406906, 0.012364541180431843, 0.04736338183283806, 0.48638036847114563, 0.015403805300593376, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.002078789984807372, 0.000502656155731529, 0.00018232718866784126, 0.0008548289188183844, 0.0009249084978364408, 0.02029070071876049, 0.012032798491418362, 0.024348178878426552, 0.2300865352153778, 0.10343841463327408, 0.007660495117306709, 0.0012821657583117485, 0.0114271380007267, 0.0009412667131982744, 7.524124521296471e-05, 0.010417330078780651, 0.019508572295308113, 0.03964173421263695, 0.041229844093322754, 0.021899865940213203, 0.0029071751050651073, 0.010124437510967255, 0.08508285880088806, 0.40291228890419006, 0.4734281599521637, 0.015163381583988667, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.022463228553533554, 0.0013134862529113889, 0.00013891702110413462, 0.002816978842020035, 0.0011811865260824561, 0.0014538302784785628, 0.0005458829691633582, 0.0004073161107953638, 0.000992793939076364, 0.626685380935669, 0.1310541182756424, 0.1785772740840912, 0.1327074021100998, 0.014590581879019737, 3.459410072537139e-05, 0.08744391798973083, 0.1107466071844101, 0.15557123720645905, 0.13837403059005737, 0.05803389474749565, 0.026755833998322487, 0.03754325956106186, 0.4220706820487976, 0.16102783381938934, 0.2859216034412384, 0.1457504779100418, 0.03281670808792114, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.004299411084502935, 0.00014757749158889055, 0.0013493087608367205, 0.003552102018147707, 0.004041418433189392, 0.004232631530612707, 0.00022051982523407787, 5.3625211876351386e-05, 0.008671559393405914, 0.2003454566001892, 0.2010745257139206, 0.20048564672470093, 0.327506959438324, 0.12215141952037811, 7.573522452730685e-05, 0.21633882820606232, 0.07441287487745285, 0.04740259423851967, 0.026924576610326767, 0.012407396920025349, 0.002398786135017872, 0.0038467273116111755, 0.13835540413856506, 0.06710492819547653, 0.026295386254787445, 0.17057135701179504, 0.013244924135506153, 0.46883779764175415, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.011497906409204006, 0.0014132088981568813, 0.002270179335027933, 0.006387166678905487, 5.5530636018374935e-05, 0.0020248510409146547, 0.0021348590962588787, 0.001147052156738937, 0.0024277162738144398, 0.3687064051628113, 0.5298402905464172, 0.006611559074372053, 0.3372868299484253, 0.2915361225605011, 0.0002606022753752768, 0.027107199653983116, 0.05742119997739792, 0.06533583253622055, 0.024222400039434433, 0.014050583355128765, 0.013653005473315716, 0.0030738371424376965, 0.04425956308841705, 0.06826918572187424, 0.011929179541766644, 0.14959540963172913, 0.16161218285560608, 0.5212987065315247, 0.041249219328165054, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.043351031839847565, 0.015730101615190506, 0.006545424461364746, 0.11301398277282715, 0.001535893650725484, 0.0002994980022776872, 0.002417969051748514, 0.0027875620871782303, 0.007663458585739136, 0.4366588592529297, 0.29866132140159607, 0.03879629448056221, 0.0005757116014137864, 0.10755223035812378, 0.15693426132202148, 0.12232528626918793, 0.02327316626906395, 0.043996360152959824, 0.010462167672812939, 0.05786772817373276, 0.006097386125475168, 0.001271827262826264, 0.022651376202702522, 0.03627351298928261, 0.030646052211523056, 0.03145253658294678, 0.18536151945590973, 0.10030946880578995, 0.3235938847064972, 0.09760642796754837, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05824243649840355, 0.00918568018823862, 0.004823020659387112, 0.12202360481023788, 0.001364732626825571, 0.009540650062263012, 0.017077280208468437, 0.02250218391418457, 0.031557418406009674, 0.39489659667015076, 0.4118596911430359, 0.4739699363708496, 0.04330656677484512, 0.22410848736763, 0.009354491718113422, 0.01696004532277584, 0.0005225083441473544, 0.012039890512824059, 0.0003213977033738047, 0.024568837136030197, 0.0005492557538673282, 6.035636397427879e-05, 0.0032521369867026806, 0.016784805804491043, 0.013033770024776459, 0.023488081991672516, 0.04594254866242409, 0.04732683673501015, 0.2366781234741211, 0.2578820288181305, 0.02447950839996338, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10114194452762604, 0.055991608649492264, 0.0056193675845861435, 0.044799599796533585, 0.005612906999886036, 0.0018076150445267558, 0.0035521595273166895, 0.003050913568586111, 0.014126029796898365, 0.18568304181098938, 0.044660091400146484, 0.8178999423980713, 0.12312521040439606, 0.22830259799957275, 0.0015339198289439082, 0.016271475702524185, 0.026037830859422684, 0.05988215655088425, 0.04065781086683273, 0.0548781082034111, 0.0059303357265889645, 0.000490839418489486, 0.009792556054890156, 0.05564826726913452, 0.029693011194467545, 0.015783851966261864, 0.050408631563186646, 0.10483089834451675, 0.18894171714782715, 0.4590488076210022, 0.24355939030647278, 0.03408684581518173, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17329555749893188, 0.022842630743980408, 0.03050464764237404, 0.3040459156036377, 0.023058682680130005, 0.05675753578543663, 0.012084487825632095, 0.018060212954878807, 0.012510768137872219, 0.4205268621444702, 0.403047114610672, 0.5196431279182434, 0.14466160535812378, 0.15726853907108307, 0.003281315555796027, 0.011992339976131916, 0.02786487340927124, 0.025577154010534286, 0.02912752889096737, 0.009845648892223835, 0.0007121131638996303, 0.001387864351272583, 0.015649031847715378, 0.05334821715950966, 0.05039743706583977, 0.0003855754912365228, 0.07798124849796295, 0.03745294734835625, 0.16697214543819427, 0.29521557688713074, 0.2776513993740082, 0.29445046186447144, 0.031993161886930466, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21814380586147308, 0.013853680342435837, 0.0011839027283713222, 0.02006133459508419, 0.0059941732324659824, 0.004335244186222553, 0.0006587213138118386, 0.0008069095201790333, 6.766151636838913e-05, 0.4439576268196106, 0.16648612916469574, 0.7347545623779297, 0.19459886848926544, 0.05657987296581268, 0.0006026092451065779, 0.11517049372196198, 0.11416894942522049, 0.19162771105766296, 0.14611610770225525, 0.060761958360672, 0.02055470645427704, 0.021888524293899536, 0.20655019581317902, 0.047658227384090424, 0.055987950414419174, 0.01683689095079899, 0.005808014422655106, 0.045862384140491486, 0.09340663254261017, 0.10908356308937073, 0.18944555521011353, 0.26804569363594055, 0.20485185086727142, 0.037772081792354584, NaN, NaN, NaN, NaN, NaN, NaN], [0.034262340515851974, 0.0017182001611217856, 0.005656392779201269, 0.017169898375868797, 0.0156857930123806, 0.01468763966113329, 0.0007699507405050099, 0.00017933807976078242, 0.002019587904214859, 0.09474337100982666, 0.21286551654338837, 0.39837440848350525, 0.44769343733787537, 0.30061447620391846, 0.0009720441303215921, 0.24184046685695648, 0.07921410351991653, 0.056290365755558014, 0.026794791221618652, 0.016941547393798828, 0.0021516080014407635, 0.0023830668069422245, 0.05685606598854065, 0.02070370689034462, 0.003236053278669715, 0.01165463775396347, 0.004370343871414661, 0.030780060216784477, 0.00907946564257145, 0.06188458576798439, 0.04407832771539688, 0.006142587400972843, 0.14762946963310242, 0.013672620058059692, 0.4999893307685852, NaN, NaN, NaN, NaN, NaN], [0.1974877417087555, 0.05350746586918831, 0.02080627717077732, 0.07140190154314041, 0.0007820951868779957, 0.021851971745491028, 0.023295408114790916, 0.011020028032362461, 0.0015720969531685114, 0.3204348385334015, 0.5890824198722839, 0.011122598312795162, 0.40923523902893066, 0.5521805882453918, 0.009284045547246933, 0.03566991165280342, 0.0538097508251667, 0.09943600744009018, 0.028607800602912903, 0.020965654402971268, 0.013461945578455925, 0.002478980924934149, 0.02911236882209778, 0.02446376532316208, 0.0022762087173759937, 0.010774179361760616, 0.04047773778438568, 0.06471210718154907, 0.0026813328731805086, 0.07523855566978455, 0.030470186844468117, 0.0345987044274807, 0.1238497719168663, 0.17781274020671844, 0.4970780611038208, 0.04515520855784416, NaN, NaN, NaN, NaN], [0.04384012520313263, 0.020103074610233307, 0.00601673498749733, 0.10121199488639832, 0.0015372235793620348, 0.00047879578778520226, 0.0028034253045916557, 0.0035304632037878036, 0.0019347126362845302, 0.15543726086616516, 0.10060140490531921, 0.012154079042375088, 0.00020098914683330804, 0.049742307513952255, 0.15931616723537445, 0.12716706097126007, 0.02434932254254818, 0.05787394568324089, 0.013031681068241596, 0.06681805849075317, 0.007088592275977135, 0.0018475945107638836, 0.021072670817375183, 0.024636711925268173, 0.010089303366839886, 0.0076353950425982475, 0.05158482864499092, 0.009980393573641777, 0.034229546785354614, 0.01627102866768837, 0.008032353594899178, 0.013575052842497826, 0.04940066114068031, 0.19428585469722748, 0.10819438844919205, 0.2976790964603424, 0.08516447991132736, NaN, NaN, NaN], [0.33183732628822327, 0.07794758677482605, 0.02364480309188366, 0.3878714144229889, 0.007764760870486498, 0.055411770939826965, 0.07855504751205444, 0.09397301822900772, 0.02721172571182251, 0.38145557045936584, 0.42047446966171265, 0.5078706741333008, 0.03859835863113403, 0.25985077023506165, 0.0625251829624176, 0.01713084802031517, 0.000499976216815412, 0.019638467580080032, 0.00048709739348851144, 0.03356647491455078, 0.0008144291932694614, 0.00011953162174904719, 0.003664336632937193, 0.013800683431327343, 0.004805452190339565, 0.004433726891875267, 0.011711561121046543, 0.003556638490408659, 0.01588965393602848, 0.025807680562138557, 0.00022126971452962607, 0.004036479629576206, 0.00837762001901865, 0.04655361920595169, 0.04086336866021156, 0.22630761563777924, 0.2765483856201172, 0.02425519935786724, NaN, NaN], [0.4473247230052948, 0.3730325996875763, 0.029895052313804626, 0.15908104181289673, 0.02762797847390175, 0.008889964781701565, 0.016516737639904022, 0.012883803807199001, 0.01523641124367714, 0.22003965079784393, 0.05771813541650772, 0.8456536531448364, 0.1770154982805252, 0.31127816438674927, 0.007925343699753284, 0.010901566594839096, 0.020337969064712524, 0.07802019268274307, 0.0504593625664711, 0.06312800198793411, 0.009868033230304718, 0.000861799344420433, 0.010114955715835094, 0.052247028797864914, 0.012602821923792362, 0.005399123765528202, 0.01934058591723442, 0.013776490464806557, 0.010564911179244518, 0.04300173744559288, 0.008748980239033699, 0.0006391598144546151, 0.006108305882662535, 0.05087457224726677, 0.09035929292440414, 0.18751013278961182, 0.4462290108203888, 0.28552356362342834, 0.05451636388897896, NaN], [0.2188224196434021, 0.06026163697242737, 0.01674255169928074, 0.1205059364438057, 0.017392028123140335, 0.033714599907398224, 0.013199009001255035, 0.035441260784864426, 0.006878681946545839, 0.5097362399101257, 0.5390803217887878, 0.7098195552825928, 0.20610427856445312, 0.34404870867729187, 0.06464894115924835, 0.1367119550704956, 0.02979014255106449, 0.04602046683430672, 0.022530242800712585, 0.009278235025703907, 0.01184787880629301, 0.010125648230314255, 0.02445557340979576, 0.052750833332538605, 0.013119504787027836, 0.0006633299053646624, 0.007243738044053316, 0.02398994006216526, 0.00908573716878891, 0.013761860318481922, 0.007176807615906, 0.00677318312227726, 0.0021949538495391607, 0.01309704128652811, 0.09677710384130478, 0.12711098790168762, 0.1613820642232895, 0.37058699131011963, 0.3504316806793213, 0.02586444839835167]], [[6.113462859502761e-06, 0.5065946578979492, 7.261813152581453e-05, 5.1066386498122354e-14, 1.0490246824277965e-15, 1.4956003015903496e-12, 2.5734427609724886e-13, 2.1143946469237562e-06, 9.544867651811728e-08, 4.2543565892394497e-10, 6.215519418595328e-12, 1.687761909396901e-11, 1.6993320528513323e-08, 1.0583119935958507e-09, 9.857150189418462e-07, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [4.727198188447801e-08, 0.002272214274853468, 0.8730366826057434, 0.0016238681273534894, 9.849362297975617e-11, 6.310171162720105e-14, 1.3311845115798748e-12, 1.350557283785747e-07, 1.07800769910682e-05, 3.4101576602552086e-05, 7.529693561991735e-07, 3.7022258592145363e-09, 3.1551092294357375e-10, 8.851498527195911e-12, 1.024629546009237e-05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [6.003397223786067e-10, 5.335852165444521e-06, 0.00445933174341917, 0.5796651840209961, 5.976808097329922e-05, 2.377180230439535e-09, 1.7792844021063958e-12, 1.2140626282075573e-09, 6.417224529542409e-09, 2.601910637167748e-06, 1.1842810181406094e-06, 1.8266834445057611e-07, 1.3081095096012518e-09, 1.5776791765370612e-12, 4.7676843678345904e-05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.4071971206038626e-15, 2.3560551770727793e-14, 9.98394700246763e-11, 1.7167060661904543e-07, 0.2774648666381836, 1.6012703781598248e-05, 9.760837530760607e-15, 4.654387315338889e-18, 8.039692137064508e-20, 2.1508527635127157e-16, 1.789740057545064e-11, 2.4233797191186568e-08, 2.7592322870972907e-10, 4.956549239646573e-15, 1.5411848153235042e-06, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.9919477308935618e-13, 5.266535346254387e-16, 1.2917133013982517e-14, 7.221083175856791e-10, 8.195231930585578e-05, 0.5564944744110107, 4.117699063499458e-06, 5.438900198273533e-13, 2.4172004338169554e-20, 9.57835365503234e-22, 9.376302678036402e-17, 3.235451073724249e-10, 6.101883442966027e-09, 9.971044129253315e-11, 1.6162671201414014e-08, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [9.771466125130246e-08, 3.17872256294649e-11, 3.1429036890379125e-13, 5.901367481980172e-16, 4.2342058748090494e-09, 0.0012305855052545667, 0.6103256940841675, 2.2161180822877213e-05, 7.972257402844019e-12, 6.481494664823834e-19, 5.35928561114305e-19, 7.863773244772346e-14, 1.1593314752644801e-07, 8.808668212623161e-07, 1.1730364235518209e-07, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.6939844799400703e-10, 3.892770337188267e-07, 2.2438891023046637e-10, 2.095593632707407e-18, 1.8655412772298346e-14, 2.206185598652155e-07, 3.0316745323943906e-05, 0.33891788125038147, 5.437008439912461e-06, 1.3213468337612382e-14, 2.5347562276209975e-18, 1.0659246862729562e-18, 2.6392999114346893e-13, 9.868956762915104e-10, 1.6170986327779246e-06, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.3015508670832787e-09, 4.1474245904282725e-07, 7.619819371029735e-06, 9.079691751061325e-13, 5.725895077835787e-16, 1.0568446176517903e-14, 8.978999488373773e-11, 2.253716047562193e-05, 0.9323674440383911, 0.0001553743495605886, 1.1094852814252931e-10, 4.251380123255501e-17, 3.4548606558270072e-18, 1.563022274271835e-14, 1.7832363141678798e-07, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.2218349942916262e-10, 4.9370779464652514e-08, 1.0212672805209877e-06, 3.802215486903293e-11, 4.1323817879847246e-16, 3.8503187577578586e-16, 6.2032051316354e-15, 3.2203126920649083e-07, 8.202762546716258e-05, 0.5051153898239136, 1.6483796571264975e-05, 2.317061202194298e-13, 9.134085045449695e-19, 4.959048342554486e-21, 1.9839136555788173e-08, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [3.5615963439117673e-14, 6.311461336200308e-12, 7.572167781688677e-09, 7.864790063649707e-08, 5.871175941252194e-13, 4.399392566282849e-15, 3.6105855357745724e-20, 8.408651243829376e-14, 2.915925279012299e-09, 2.7294316168990918e-05, 0.31493836641311646, 1.4271394093157141e-06, 7.57530499374999e-14, 1.0444343699767344e-21, 5.65783730976932e-09, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.619628042792698e-10, 6.862534152052291e-11, 7.238428190170509e-10, 5.1994692995549485e-08, 8.193378420173758e-08, 6.734891755399985e-09, 1.47457238341411e-14, 5.793711288450045e-15, 1.5065480465795492e-14, 1.167909147170576e-08, 0.0003541565383784473, 0.5504465699195862, 2.5677532903500833e-05, 4.9321430864142715e-14, 1.3459792569392448e-07, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [8.003913504195381e-11, 5.626729984720136e-12, 4.9737857062137625e-12, 1.4365373474101162e-11, 1.165467935493325e-07, 3.263785401941277e-05, 9.4434834951862e-11, 2.6144878938953817e-15, 6.540743544149476e-19, 2.5930401594030658e-17, 1.8366722587259687e-09, 1.8794700736179948e-05, 0.49058014154434204, 8.066950840657228e-07, 1.3585024589701788e-06, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.0801989728040362e-12, 2.2359935084037552e-13, 1.1691597126203823e-12, 1.0214807062303036e-16, 2.4270561688882752e-12, 4.4484740890915475e-10, 1.1468358207533669e-10, 1.5131759777478604e-13, 3.7208958865722007e-20, 6.888861115537483e-21, 1.5888746801787275e-18, 3.2241334168431335e-12, 5.685043561243219e-06, 0.3912107050418854, 3.0407140694244106e-10, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [5.397048425948014e-07, 2.3629811494174646e-06, 8.614414923613367e-07, 8.006720286779512e-13, 4.92412575016192e-14, 2.066644277931573e-08, 0.00031528103863820434, 0.011093947105109692, 3.7555511767095595e-07, 1.151808547627739e-13, 5.505821095062543e-16, 1.6971218267519683e-12, 5.383023108151974e-06, 0.8731740117073059, 0.04139598086476326, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.6266164779663086, 0.3128010928630829, 0.06246759742498398, 0.00042505442979745567, 0.008534153923392296, 0.09425555169582367, 0.2709643542766571, 0.686626672744751, 0.3142872750759125, 0.10107265412807465, 0.015935143455863, 0.012286541052162647, 0.14970052242279053, 0.3989029824733734, 0.022492708638310432, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.24012988805770874, 0.6692726612091064, 0.08029869198799133, 0.41845017671585083, 0.08128808438777924, 0.09738753736019135, 0.15100885927677155, 0.2691691815853119, 0.013517879880964756, 0.21848294138908386, 0.16758716106414795, 0.12734578549861908, 0.32224464416503906, 0.12471552193164825, 0.07385692000389099, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13747748732566833, 0.012865100987255573, 0.3056560158729553, 0.3759651184082031, 0.20075583457946777, 0.056869279593229294, 0.27502477169036865, 0.09038521349430084, 0.09535539150238037, 0.27579623460769653, 0.15189220011234283, 0.6071571111679077, 0.0820951759815216, 0.09481122344732285, 0.09779953956604004, 0.13988038897514343, 0.003474950324743986, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.007538634352385998, 0.02957071363925934, 0.011847163550555706, 0.055522944778203964, 0.04100131243467331, 0.031534671783447266, 0.06567902117967606, 0.09044305235147476, 0.007193693891167641, 0.06334451586008072, 0.07378207892179489, 0.07786792516708374, 0.28214019536972046, 0.08070375770330429, 0.20607011020183563, 0.14879919588565826, 0.018745053559541702, 0.07372914999723434, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.005881547927856445, 0.008371960371732712, 0.010823756456375122, 0.024797217920422554, 0.024142105132341385, 0.01083815935999155, 0.008304014801979065, 0.006388344801962376, 0.009114595130085945, 0.022048065438866615, 0.1306026130914688, 0.23451638221740723, 0.3918500244617462, 0.08784151822328568, 0.2650633752346039, 0.030327370390295982, 0.02692173607647419, 0.46947386860847473, 0.09036581218242645, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20629070699214935, 0.2529377341270447, 0.028870999813079834, 0.049127642065286636, 0.04690879210829735, 0.11594393104314804, 0.15515393018722534, 0.06585636734962463, 0.0420556403696537, 0.1996643990278244, 0.028717953711748123, 0.7190893292427063, 0.30376943945884705, 0.22654840350151062, 0.12926629185676575, 0.164228156208992, 0.0009850627975538373, 0.0044541023671627045, 0.0005622706958092749, 0.024160074070096016, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01586613617837429, 0.15566423535346985, 0.015082520432770252, 0.009204044006764889, 0.002680863719433546, 0.07106906920671463, 0.08370621502399445, 0.05749649554491043, 0.03059268370270729, 0.012942377477884293, 0.0011753733269870281, 0.00916373822838068, 0.0020018015056848526, 0.049308281391859055, 0.19197486340999603, 0.020124448463320732, 0.0011880549136549234, 0.0042731426656246185, 3.242780803702772e-05, 0.6858344078063965, 0.023040860891342163, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03849078342318535, 0.08146823942661285, 0.03517843410372734, 0.025976145640015602, 0.02364599145948887, 0.1389763057231903, 0.02619975060224533, 0.034312427043914795, 0.02985706366598606, 0.029806064441800117, 0.00684476038441062, 0.03280223533511162, 0.030126189813017845, 0.10321015119552612, 0.23163792490959167, 0.0017230550292879343, 3.356653905939311e-05, 0.001307086437009275, 1.4968540199333802e-05, 0.5564903616905212, 0.236929789185524, 0.007688341196626425, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2772977352142334, 0.05161405727267265, 0.04358568787574768, 0.047931231558322906, 0.04583681374788284, 0.08128579705953598, 0.15782645344734192, 0.0856042429804802, 0.10767779499292374, 0.11355230212211609, 0.041377030313014984, 0.252811074256897, 0.05780917406082153, 0.19973745942115784, 0.22427907586097717, 0.1612924486398697, 0.00029754414572380483, 0.0029063820838928223, 0.0015110797248780727, 0.16695675253868103, 0.3453270196914673, 0.07193248718976974, 0.006359610706567764, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.023119861260056496, 0.02037731558084488, 0.0453791618347168, 0.1060030460357666, 0.006244942545890808, 0.0085020512342453, 0.012060720473527908, 0.014560479670763016, 0.00689319521188736, 0.011241135187447071, 0.023835573345422745, 0.02693312056362629, 0.011436404660344124, 0.019489392638206482, 0.30997538566589355, 0.1910298615694046, 0.01051796693354845, 0.0018660163041204214, 0.0012154864380136132, 0.022663934156298637, 0.008557457476854324, 0.016767704859375954, 0.05246622860431671, 0.08816055208444595, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.045414164662361145, 0.005229660775512457, 0.011418518610298634, 0.009312640875577927, 0.0002147085906472057, 0.12653864920139313, 0.05854451283812523, 0.11896014213562012, 0.0156405046582222, 0.010270207189023495, 0.0032450463622808456, 0.015787174925208092, 0.011106730438768864, 0.007675709668546915, 0.3779195249080658, 0.24295811355113983, 0.0012021175352856517, 0.0005200211890041828, 0.00015996988804545254, 0.002627951791509986, 0.03450923040509224, 0.014827161096036434, 0.015967652201652527, 0.005632439162582159, 0.001854590023867786, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.007367350626736879, 0.012884993106126785, 0.01019106525927782, 0.011957473121583462, 0.054886650294065475, 0.09750530868768692, 0.029414953663945198, 0.08492925018072128, 0.17440666258335114, 0.003643231000751257, 0.00105402956251055, 0.02280060388147831, 0.0010922637302428484, 0.005130939185619354, 0.09500079602003098, 0.2492469847202301, 0.004325273912400007, 0.004784590099006891, 0.013903478160500526, 0.0013026667293161154, 0.003877879586070776, 0.017029188573360443, 0.01781909167766571, 0.05003270506858826, 0.026610376313328743, 0.008462576195597649, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02996714971959591, 0.028387926518917084, 0.16122521460056305, 0.0898616760969162, 0.06381779164075851, 0.20551051199436188, 0.13175098598003387, 0.562389075756073, 0.04834860563278198, 0.013581722043454647, 0.03991095721721649, 0.10736902058124542, 0.03830268979072571, 0.05736052244901657, 0.27213579416275024, 0.25306010246276855, 0.0017952719936147332, 0.005404005758464336, 0.021692873910069466, 0.0005702165653929114, 9.544018394080922e-05, 0.001603480544872582, 0.001225438085384667, 0.036846794188022614, 0.001749897957779467, 0.016878794878721237, 0.021703237667679787, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03571658954024315, 0.012061648070812225, 0.08574458211660385, 0.022463832050561905, 0.12578466534614563, 0.07826194912195206, 0.06577891856431961, 0.13274507224559784, 0.06591502577066422, 0.05002211779356003, 0.03129255399107933, 0.27911075949668884, 0.31601372361183167, 0.10930214822292328, 0.30993908643722534, 0.055758021771907806, 0.000425096252001822, 0.0005783061496913433, 0.0011671994579955935, 0.00034630659501999617, 0.00031045774812810123, 0.0006358043756335974, 0.004018810577690601, 0.0004720573779195547, 0.006387148518115282, 0.038948215544223785, 0.40798652172088623, 0.0038703898899257183, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04630875587463379, 0.03141915798187256, 0.03061339072883129, 0.007028677500784397, 0.008451082743704319, 0.02540888637304306, 0.012118873186409473, 0.09331455826759338, 0.0033372503239661455, 0.01357665192335844, 0.0069510783068835735, 0.017483821138739586, 0.033454760909080505, 0.014270796440541744, 0.44127020239830017, 0.29551389813423157, 0.006183725781738758, 0.0010477532632648945, 0.001470124931074679, 0.0028535614255815744, 0.003910644445568323, 0.004942604340612888, 0.003798475954681635, 0.01567114144563675, 0.060374900698661804, 0.006600319407880306, 0.010896215215325356, 0.009779008105397224, 0.007320093456655741, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1722828894853592, 0.15122008323669434, 0.056102070957422256, 0.09136570990085602, 0.02421834133565426, 0.045343294739723206, 0.034619707614183426, 0.030837759375572205, 0.019798463210463524, 0.04411705583333969, 0.05331422761082649, 0.09423463046550751, 0.1436629444360733, 0.13433872163295746, 0.1229754090309143, 0.1632017195224762, 0.00519327400252223, 0.00790441408753395, 0.0009941658936440945, 0.3241596221923828, 0.0008480648975819349, 0.0001429034018656239, 0.0012253100285306573, 0.0008457236108370125, 0.006411578040570021, 0.0016067628748714924, 0.003762597683817148, 0.029224932193756104, 0.07677540183067322, 0.06338826566934586, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.022473091259598732, 0.0489150770008564, 0.010993139818310738, 0.03897916153073311, 0.003662768052890897, 0.002051829593256116, 0.0037445707712322474, 0.016557298600673676, 0.014907213859260082, 0.004300208762288094, 0.004852794576436281, 0.0027131394017487764, 0.016001524403691292, 0.008091894909739494, 0.25544992089271545, 0.005401996895670891, 6.3005199990584515e-06, 0.0004310416697990149, 8.47076989884954e-06, 0.009243682958185673, 0.0008590375073254108, 4.37394373875577e-06, 6.523932825075462e-05, 8.531090134056285e-05, 0.0006816720124334097, 7.644478318979964e-05, 0.00018924157484434545, 0.0012375408550724387, 0.023784970864653587, 0.4309314787387848, 0.034907225519418716, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08012817800045013, 0.2898695766925812, 0.022246699780225754, 0.06057273969054222, 0.025327028706669807, 0.02957070618867874, 0.04002644121646881, 0.019245512783527374, 0.01995179057121277, 0.020330116152763367, 0.006697094067931175, 0.015452835708856583, 0.014569609425961971, 0.04013357311487198, 0.2585589587688446, 0.29775136709213257, 0.006892140489071608, 0.009814155288040638, 0.016249310225248337, 0.004830268211662769, 0.0035455955658107996, 0.0007549467263743281, 0.000541276705916971, 0.0031480982434004545, 0.001557780895382166, 0.0010192448971793056, 0.0018504501786082983, 0.002619183622300625, 0.1016833484172821, 0.03818811476230621, 0.06928347051143646, 0.0412699431180954, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01832924410700798, 0.023918962106108665, 0.024782713502645493, 0.033514510840177536, 0.050549402832984924, 0.013098560273647308, 0.023091215640306473, 0.030541786924004555, 0.1064886748790741, 0.006106832530349493, 0.0024854408111423254, 0.018918434157967567, 0.0075035663321614265, 0.009370497427880764, 0.21452490985393524, 0.26683223247528076, 0.0017643374158069491, 0.02531762421131134, 0.047485485672950745, 0.0005023732082918286, 0.0011795219033956528, 0.002227108459919691, 0.0028741960413753986, 0.005215880926698446, 0.001946018310263753, 3.592624852899462e-05, 0.001338632428087294, 0.0025214410852640867, 0.07723907381296158, 0.012742026709020138, 0.25196006894111633, 0.052669085562229156, 0.020061112940311432, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.027254067361354828, 0.020437292754650116, 0.14233240485191345, 0.08538791537284851, 0.03242940828204155, 0.0897425189614296, 0.08476056158542633, 0.2620556950569153, 0.02126460149884224, 0.023079702630639076, 0.03143052011728287, 0.04489685967564583, 0.046720463782548904, 0.03604652360081673, 0.23038896918296814, 0.3006725609302521, 0.0014043879928067327, 0.009936605580151081, 0.037061650305986404, 0.0005129858036525548, 5.274279828881845e-05, 0.0006371501949615777, 0.00048446646542288363, 0.015043019317090511, 0.0003374778898432851, 0.0015171451959758997, 0.001911269617266953, 0.0014702629996463656, 0.015123972669243813, 0.0006335150101222098, 0.0006853189552202821, 0.0006114236894063652, 0.013829384930431843, 0.010252222418785095, NaN, NaN, NaN, NaN, NaN, NaN], [0.042377930134534836, 0.017293933779001236, 0.08730384707450867, 0.030179454013705254, 0.12187745422124863, 0.05139933153986931, 0.047754548490047455, 0.066692054271698, 0.06521614640951157, 0.05196157470345497, 0.028108397498726845, 0.17703385651111603, 0.22747749090194702, 0.06955988705158234, 0.28824013471603394, 0.11150761693716049, 0.0006332705961540341, 0.0012255925685167313, 0.0022868558298796415, 0.0007688697660341859, 0.00046408100752159953, 0.0006869957433082163, 0.0021696356125175953, 0.0003113164857495576, 0.0013619231758639216, 0.004312699660658836, 0.1263500303030014, 0.0001710234791971743, 0.0024227115791291, 0.0006429344066418707, 0.008991677314043045, 0.01230061985552311, 0.025017380714416504, 0.33947470784187317, 0.0032216052059084177, NaN, NaN, NaN, NaN, NaN], [0.03372317552566528, 0.030876630917191505, 0.025082340463995934, 0.008588657714426517, 0.007454049773514271, 0.009771045297384262, 0.010381288826465607, 0.041183773428201675, 0.004549690056592226, 0.01619204692542553, 0.0060179769061505795, 0.009672058746218681, 0.022905999794602394, 0.009750566445291042, 0.30946746468544006, 0.31111404299736023, 0.0035644923336803913, 0.0013678895775228739, 0.0016790243098512292, 0.0035299588926136494, 0.004438228905200958, 0.004504224751144648, 0.0015486004995182157, 0.006104794796556234, 0.009403211995959282, 0.00038756802678108215, 0.001732571516185999, 0.00042684219079092145, 0.00029873420135118067, 0.02043243870139122, 0.02443091571331024, 0.011036018840968609, 0.0030384601559489965, 0.007405058480799198, 0.004648045636713505, 0.010011163540184498, NaN, NaN, NaN, NaN], [0.18900562822818756, 0.14908763766288757, 0.05840699374675751, 0.10216160118579865, 0.03072887472808361, 0.04109037667512894, 0.03799780085682869, 0.02909342385828495, 0.03500371053814888, 0.0757574513554573, 0.061073921620845795, 0.09956928342580795, 0.10441071540117264, 0.14136889576911926, 0.13095542788505554, 0.16896948218345642, 0.0033956619445234537, 0.009647470898926258, 0.0011160745052620769, 0.30864211916923523, 0.0008666384965181351, 0.0001862353819888085, 0.0007671809289604425, 0.0006719603552483022, 0.002030742121860385, 0.00038655498065054417, 0.0009093419066630304, 0.0015865613240748644, 0.007534818258136511, 0.009185722097754478, 0.00011195908882655203, 0.003075815038755536, 0.000886340974830091, 0.0034873690456151962, 0.021776562556624413, 0.11334169656038284, 0.0832705944776535, NaN, NaN, NaN], [0.014150185510516167, 0.03789284825325012, 0.007744992151856422, 0.02556411363184452, 0.0037681234534829855, 0.001123085618019104, 0.002939486177638173, 0.010072565637528896, 0.019109029322862625, 0.003645692951977253, 0.0027771664317697287, 0.002490789396688342, 0.007166225463151932, 0.005180294159799814, 0.2058444321155548, 0.006588279269635677, 7.165617716964334e-06, 0.0005450915195979178, 1.0953889614029322e-05, 0.01959507167339325, 0.001590097788721323, 1.1096496564277913e-05, 7.439414184773341e-05, 9.72584675764665e-05, 0.00039174238918349147, 2.7912905352422968e-05, 4.964227991877124e-05, 7.256279786815867e-05, 0.00222678086720407, 0.04727102443575859, 0.0002576226834207773, 0.00020273383415769786, 7.391278631985188e-05, 0.00018598776659928262, 0.000617648009210825, 0.03195251524448395, 0.45461374521255493, 0.037591490894556046, NaN, NaN], [0.0469474196434021, 0.1743137687444687, 0.021908296272158623, 0.046387769281864166, 0.02985612489283085, 0.019742406904697418, 0.040140021592378616, 0.01437240932136774, 0.02856219932436943, 0.018488112837076187, 0.004136314615607262, 0.01038376335054636, 0.009851893410086632, 0.026245350018143654, 0.22488054633140564, 0.35417911410331726, 0.010997277684509754, 0.014662563800811768, 0.023722819983959198, 0.01071385107934475, 0.009427045471966267, 0.002653747797012329, 0.0011037624208256602, 0.005973298568278551, 0.0016420705942437053, 0.0009447215707041323, 0.001327668083831668, 0.0005524749867618084, 0.012130306102335453, 0.005379356909543276, 0.0037436189595609903, 0.0009285339619964361, 0.0002853046462405473, 0.0013114019529893994, 0.0012977200094610453, 0.08090774714946747, 0.034737478941679, 0.058711227029561996, 0.0672648623585701, NaN], [0.00832295510917902, 0.021339448168873787, 0.00394090311601758, 0.002333499025553465, 0.05547437444329262, 0.007243151310831308, 0.011641105636954308, 0.0331541933119297, 0.010278979316353798, 0.011881710961461067, 0.001766148954629898, 0.04899042472243309, 0.01878243498504162, 0.01244808267802, 0.15685127675533295, 0.18188641965389252, 0.00040442554745823145, 0.0015771333128213882, 0.005189571529626846, 8.387575689994264e-06, 0.0001226859458256513, 0.0011242604814469814, 0.0013583728577941656, 0.0030172227416187525, 0.00029841059586033225, 1.2829146726289764e-05, 0.001467264024540782, 0.001090237987227738, 0.002914785873144865, 0.0006871690275147557, 0.002592542441561818, 0.00021328746515791863, 6.871169898658991e-05, 0.002350796014070511, 0.0026233955286443233, 0.02620280720293522, 0.005966363474726677, 0.08270465582609177, 0.010547555983066559, 0.018362630158662796]]], [[[0.1393769532442093, 0.0735321119427681, 0.701509952545166, 0.10650816559791565, 0.05110495164990425, 0.021589145064353943, 0.0033319133799523115, 0.0014166238252073526, 0.01486207265406847, 0.006584684830158949, 0.002582702785730362, 0.0004108685825485736, 0.010701421648263931, 0.009390643797814846, 0.06290604919195175, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0030957262497395277, 0.0237117987126112, 0.7945073246955872, 0.09792613238096237, 0.2614360749721527, 0.179405078291893, 0.011310527101159096, 0.009954328648746014, 0.009489532560110092, 0.0005609119543805718, 0.000751268700696528, 0.0001462608779547736, 0.004604416899383068, 0.004964352585375309, 0.019775664433836937, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.002461136318743229, 0.024594180285930634, 0.009559455327689648, 0.055053047835826874, 0.30010533332824707, 0.4690517783164978, 0.03334644436836243, 0.0075769852846860886, 0.007821744307875633, 0.004109389614313841, 0.0022267017047852278, 0.000916018383577466, 0.0037954216822981834, 0.0007741246954537928, 0.004415341652929783, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0019876149017363787, 0.0012237336486577988, 0.00015556006110273302, 0.0003553472051862627, 0.4419420659542084, 0.6252713799476624, 0.02062046155333519, 0.0028509902767837048, 0.00548406969755888, 0.0003452444798313081, 0.0001962203241419047, 0.0008938669925555587, 0.0009214308229275048, 1.2216354662086815e-05, 0.0019377138232812285, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00020824302919209003, 0.00021322975226212293, 4.6913473852328025e-06, 0.00017657040734775364, 0.0005752452998422086, 0.5289100408554077, 0.1970362812280655, 0.12947966158390045, 0.0005265067447908223, 0.000227929005632177, 6.233566091395915e-05, 0.0001991882745642215, 0.00032238851417787373, 0.0003627484547905624, 0.0016414258861914277, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0010278578847646713, 0.0029486939311027527, 0.00014835220645181835, 0.00036925319000147283, 0.00742883887141943, 0.03272741660475731, 0.8576475977897644, 0.03500620648264885, 0.2982224225997925, 0.0003585784579627216, 5.663683623424731e-05, 0.0011889662127941847, 0.00576341338455677, 0.003998933359980583, 0.03130826726555824, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.002113666385412216, 0.004151111003011465, 0.002428078791126609, 0.002119476906955242, 0.001100956811569631, 0.003687644377350807, 0.13543397188186646, 0.11922256648540497, 0.7567945718765259, 0.2570010721683502, 0.004903816152364016, 0.0001005519661703147, 0.000830159813631326, 0.001259618904441595, 0.14076685905456543, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0010344160255044699, 0.00660368800163269, 0.0025270660407841206, 0.00023567670723423362, 0.0004021638887934387, 0.0030120171140879393, 0.0016376315616071224, 0.0524386465549469, 0.7797302007675171, 0.1269131302833557, 0.004214781802147627, 0.0002750723797362298, 0.002267329953610897, 0.001067862962372601, 0.16698867082595825, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0009750229655764997, 0.0120720649138093, 0.0038384809158742428, 0.0036232813727110624, 0.004431525245308876, 0.0007613649941049516, 5.662842158926651e-05, 0.01338160876184702, 0.041878536343574524, 0.7091978788375854, 0.2535402476787567, 0.13969287276268005, 0.026510832831263542, 0.0006678565987385809, 0.015569130890071392, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0002093962684739381, 0.00030164673808030784, 0.00010105424007633701, 5.030819465901004e-06, 0.001411793869920075, 0.003664590884000063, 0.00017403968377038836, 0.0011218853760510683, 0.011106000281870365, 0.003924186807125807, 0.07315385341644287, 0.3008219599723816, 0.36353737115859985, 0.025737306103110313, 0.0060785748064517975, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0001716838014544919, 0.0008840822265483439, 4.3183892557863146e-05, 3.6494086543825688e-06, 0.0005770743009634316, 0.010045445524156094, 0.00010205945727648214, 6.57988857710734e-05, 0.0006949909729883075, 0.004452799912542105, 0.009000658988952637, 0.49080607295036316, 0.17717383801937103, 0.11174798011779785, 0.021669577807188034, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.019416164606809616, 0.0014941463014110923, 0.001027028076350689, 0.001502541359513998, 0.0085412273183465, 0.12493651360273361, 0.0035243057645857334, 0.0026196581311523914, 0.0008317703031934798, 0.0015569254755973816, 0.060888972133398056, 0.06929422169923782, 0.3396435081958771, 0.387500524520874, 0.017253199592232704, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04994890093803406, 0.15025374293327332, 0.024391163140535355, 0.00227133696898818, 0.012616162188351154, 0.2894521951675415, 0.4185648262500763, 0.19089959561824799, 0.027421748265624046, 0.001001756638288498, 0.0036985764745622873, 0.06802930682897568, 0.02484762854874134, 0.057649459689855576, 0.1606004238128662, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03736208751797676, 0.11793919652700424, 0.0180205088108778, 0.0001436693564755842, 0.0030756669584661722, 0.08228655159473419, 0.12110688537359238, 0.09650447964668274, 0.015347721055150032, 0.0004259537090547383, 0.00022625335259363055, 0.001013986300677061, 0.0784289613366127, 0.2240448147058487, 0.18707746267318726, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.7529165148735046, 0.7075774073600769, 0.6068683862686157, 0.3852986991405487, 0.6197313666343689, 0.6735447645187378, 0.6598724722862244, 0.7226093411445618, 0.31395286321640015, 0.2518909275531769, 0.07010441273450851, 0.21793116629123688, 0.4325476884841919, 0.7029338479042053, 0.06848814338445663, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04773104563355446, 0.01963546872138977, 0.16452182829380035, 0.04063690826296806, 0.1849776655435562, 0.08088860660791397, 0.11659693717956543, 0.038044340908527374, 0.2744975686073303, 0.003083554795011878, 0.019721103832125664, 0.08137688785791397, 0.0169991385191679, 0.03939461708068848, 0.14168404042720795, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09676018357276917, 0.018249453976750374, 0.657112717628479, 0.5890088677406311, 0.5712416768074036, 0.2744671702384949, 0.48642322421073914, 0.26345524191856384, 0.23708243668079376, 0.03475205600261688, 0.15204745531082153, 0.0676480308175087, 0.050043635070323944, 0.0665324404835701, 0.036993421614170074, 0.13007116317749023, 0.035988736897706985, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04065309092402458, 0.0025235058274120092, 0.11838234961032867, 0.27863210439682007, 0.37560757994651794, 0.7046668529510498, 0.12516380846500397, 0.1912177950143814, 0.14992743730545044, 0.05949303135275841, 0.056387268006801605, 0.04353337734937668, 0.17471297085285187, 0.07017815858125687, 0.12025584280490875, 0.17991511523723602, 0.05124381557106972, 0.013642107136547565, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.015422305092215538, 0.000844803755171597, 0.015767300501465797, 0.11098357290029526, 0.273564875125885, 0.3235251009464264, 0.14805495738983154, 0.17132841050624847, 0.25568780303001404, 0.034506767988204956, 0.046862825751304626, 0.03818853572010994, 0.025031423196196556, 0.027911247685551643, 0.009120252914726734, 0.16831281781196594, 0.043814778327941895, 0.0950295478105545, 0.07350433617830276, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01866327039897442, 0.11290711164474487, 0.007440958172082901, 0.031009642407298088, 0.059622399508953094, 0.035299621522426605, 0.012064317241311073, 0.17540854215621948, 0.06399405747652054, 0.010346408933401108, 0.023967623710632324, 0.006549614481627941, 0.015476463362574577, 0.017944032326340675, 0.15624091029167175, 0.13759823143482208, 0.14112484455108643, 0.20577600598335266, 0.13910864293575287, 0.034107428044080734, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.115133136510849, 0.5564319491386414, 0.0024013265501707792, 0.014839398674666882, 0.027623601257801056, 0.003712957026436925, 0.11139625310897827, 0.4320802688598633, 0.18111301958560944, 0.025198934599757195, 0.05914938822388649, 0.029404014348983765, 0.1131783202290535, 0.1630096137523651, 0.14384765923023224, 0.11619941890239716, 0.038306448608636856, 0.06045802682638168, 0.03494013100862503, 0.374624639749527, 0.22046393156051636, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.047323077917099, 0.01987922191619873, 0.021367410197854042, 0.0816798061132431, 0.11104802042245865, 0.01310664601624012, 0.37855657935142517, 0.16697411239147186, 0.31461480259895325, 0.04616151005029678, 0.27547621726989746, 0.04939346760511398, 0.02232075110077858, 0.15515512228012085, 0.01579722762107849, 0.08332619816064835, 0.009484739042818546, 0.012810231186449528, 0.0027760458178818226, 0.3268325924873352, 0.26342087984085083, 0.17634892463684082, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13229456543922424, 0.031869739294052124, 0.26943540573120117, 0.2586674690246582, 0.3796730637550354, 0.127562016248703, 0.20277942717075348, 0.05910756066441536, 0.14354895055294037, 0.08293455094099045, 0.2214740365743637, 0.23150987923145294, 0.18035069108009338, 0.2860051393508911, 0.07895194739103317, 0.057563915848731995, 0.01992173306643963, 0.03713805601000786, 0.014863312244415283, 0.25726908445358276, 0.14832180738449097, 0.402090460062027, 0.06479739397764206, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09224988520145416, 0.07457923144102097, 0.05282874405384064, 0.09438028931617737, 0.06849074363708496, 0.012997711077332497, 0.007214613724499941, 0.004257954657077789, 0.2309093326330185, 0.38276976346969604, 0.5917518734931946, 0.7830951809883118, 0.8438952565193176, 0.7586230039596558, 0.04145537316799164, 0.21478669345378876, 0.15359601378440857, 0.26770198345184326, 0.12653663754463196, 0.09151764959096909, 0.07003500312566757, 0.19363711774349213, 0.014233908616006374, 0.023967349901795387, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.014161140657961369, 0.027171263471245766, 0.0029068312142044306, 0.020549731329083443, 0.0005743438960053027, 0.00417140731588006, 0.003657599212601781, 0.00956815481185913, 0.34446486830711365, 0.5171273946762085, 0.39057764410972595, 0.2845093309879303, 0.1669711321592331, 0.5306525230407715, 0.015455210581421852, 0.2834857702255249, 0.07559704780578613, 0.07655511796474457, 0.16202391684055328, 0.08316012471914291, 0.11911017447710037, 0.0204884335398674, 0.011816238984465599, 0.13204774260520935, 0.039266277104616165, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02566671371459961, 0.00907080341130495, 0.0006065603229217231, 0.03001752682030201, 0.00023783017240930349, 0.0005533608491532505, 0.013808660209178925, 0.003767948364838958, 0.06461481004953384, 0.1359771490097046, 0.08153439313173294, 0.572087287902832, 0.36045318841934204, 0.44234389066696167, 0.0030113777611404657, 0.23006244003772736, 0.03933367133140564, 0.07187695801258087, 0.04476522281765938, 0.01073860377073288, 0.0032203071750700474, 0.00176758982706815, 0.018770985305309296, 0.12121162563562393, 0.18536020815372467, 0.01582610420882702, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03087739646434784, 0.012099061161279678, 0.004942088853567839, 0.038267359137535095, 0.0023591304197907448, 0.0037323227152228355, 0.04966888204216957, 0.012427400797605515, 0.16158415377140045, 0.020882699638605118, 0.05600592866539955, 0.367767333984375, 0.24262923002243042, 0.38281354308128357, 0.00973587203770876, 0.18067117035388947, 0.009833509102463722, 0.03744787722826004, 0.016920698806643486, 0.05744745582342148, 0.04540643468499184, 0.008024180307984352, 0.012110988609492779, 0.09370782226324081, 0.08820194005966187, 0.06259123980998993, 0.025030089542269707, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04249054566025734, 0.0069285486824810505, 0.006088858004659414, 0.044397544115781784, 0.05390672758221626, 0.006144464481621981, 0.018320903182029724, 0.01545354351401329, 0.05193139612674713, 0.03221629932522774, 0.02379259280860424, 0.27246853709220886, 0.22103002667427063, 0.23179520666599274, 0.005589436274021864, 0.11523616313934326, 0.03200709819793701, 0.050564926117658615, 0.010618647560477257, 0.09430865943431854, 0.018685024231672287, 0.022438397631049156, 0.017720744013786316, 0.1592920571565628, 0.21717989444732666, 0.2463550567626953, 0.2194516956806183, 0.0009421245777048171, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04184036701917648, 0.03700190782546997, 0.008264865726232529, 0.02439146116375923, 0.00799429602921009, 0.12502151727676392, 0.05032283812761307, 0.18101848661899567, 0.07329469919204712, 0.08409427851438522, 0.10790428519248962, 0.011960207484662533, 0.20496119558811188, 0.19276422262191772, 0.0069670299999415874, 0.09747911244630814, 0.1645127683877945, 0.1875433474779129, 0.09478750824928284, 0.08721300214529037, 0.02294742316007614, 0.02039182186126709, 0.07351931929588318, 0.1815827339887619, 0.5564144849777222, 0.41975197196006775, 0.2698606848716736, 0.05650324374437332, 0.05821085348725319, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06364590674638748, 0.06483624875545502, 0.015260975807905197, 0.1278582364320755, 0.006228389218449593, 0.02756887674331665, 0.020600903779268265, 0.015440343879163265, 0.018087223172187805, 0.017098410055041313, 0.025406692177057266, 0.0007098353235051036, 0.00014885497512295842, 0.0013503700029104948, 0.15608660876750946, 0.14833268523216248, 0.1209164559841156, 0.08990822732448578, 0.0656033307313919, 0.23720099031925201, 0.11782333254814148, 0.04633651673793793, 0.16808320581912994, 0.06126163899898529, 0.43528908491134644, 0.3754012882709503, 0.13757933676242828, 0.05596579611301422, 0.16984672844409943, 0.002737722359597683, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.6220619678497314, 0.6306124329566956, 0.6737340092658997, 0.49940165877342224, 0.1517823040485382, 0.8503586649894714, 0.705633282661438, 0.6629571914672852, 0.11157920956611633, 0.39899003505706787, 0.3173867464065552, 0.027327625080943108, 0.014980590902268887, 0.009274562820792198, 0.08523338288068771, 0.19258342683315277, 0.05838138237595558, 0.04652376100420952, 0.017318567261099815, 0.23482391238212585, 0.16333334147930145, 0.02100907638669014, 0.048424359411001205, 0.06841404736042023, 0.3133482038974762, 0.07921069860458374, 0.021035969257354736, 0.03291412815451622, 0.18175286054611206, 0.1566929817199707, 0.053215935826301575, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15005189180374146, 0.04609784111380577, 0.17501141130924225, 0.21113994717597961, 0.26919078826904297, 0.6422000527381897, 0.7493206858634949, 0.2162598967552185, 0.010351919569075108, 0.09728528559207916, 0.09688232094049454, 0.028558582067489624, 0.10305432975292206, 0.05914681404829025, 0.11260810494422913, 0.17641158401966095, 0.15294750034809113, 0.15352487564086914, 0.10843643546104431, 0.08260629326105118, 0.016529222950339317, 0.012650150805711746, 0.07893627882003784, 0.1388573795557022, 0.19094663858413696, 0.03751035034656525, 0.05650494620203972, 0.2426995038986206, 0.16961677372455597, 0.07263431698083878, 0.152814581990242, 0.018521834164857864, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09041088819503784, 0.052050016820430756, 0.08856991678476334, 0.2977358102798462, 0.04025371000170708, 0.3506464660167694, 0.6434463858604431, 0.25059518218040466, 0.01933867670595646, 0.04819375276565552, 0.07508239895105362, 0.04970608279109001, 0.02890131063759327, 0.02355407178401947, 0.12558245658874512, 0.25574439764022827, 0.04364950954914093, 0.05707173049449921, 0.02453112043440342, 0.016254547983407974, 0.0026636396069079638, 0.0035282839089632034, 0.015699811279773712, 0.03404982015490532, 0.04375504329800606, 0.001423283712938428, 0.05359426140785217, 0.1740386039018631, 0.10691730678081512, 0.03620539605617523, 0.04950953647494316, 0.022295303642749786, 0.025807255879044533, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18765486776828766, 0.021713200956583023, 0.21844394505023956, 0.3042432367801666, 0.17823228240013123, 0.1673380434513092, 0.8088975548744202, 0.46762967109680176, 0.05706785246729851, 0.009645337238907814, 0.0322297103703022, 0.09777479618787766, 0.08048812299966812, 0.10106904059648514, 0.17228879034519196, 0.216966450214386, 0.016096990555524826, 0.08351551741361618, 0.02645382098853588, 0.05811392888426781, 0.04091750830411911, 0.014506897889077663, 0.015038754791021347, 0.07221462577581406, 0.08585365861654282, 0.059816163033246994, 0.04502185434103012, 0.00397779606282711, 0.041175276041030884, 0.04448581859469414, 0.10983181744813919, 0.01911303587257862, 0.07987141609191895, 0.062483180314302444, NaN, NaN, NaN, NaN, NaN, NaN], [0.4792143702507019, 0.09839366376399994, 0.1882246881723404, 0.4093988239765167, 0.7147246599197388, 0.24897223711013794, 0.4705742597579956, 0.4205995500087738, 0.01958448253571987, 0.026842152699828148, 0.02239188365638256, 0.15106931328773499, 0.08969185501337051, 0.10003618896007538, 0.1635625958442688, 0.11257521063089371, 0.027663733810186386, 0.023284420371055603, 0.0038690094370394945, 0.053685132414102554, 0.008445030078291893, 0.014706910587847233, 0.009755544364452362, 0.06406830251216888, 0.10475295782089233, 0.08554040640592575, 0.16072620451450348, 0.00029980239924043417, 0.03509804978966713, 0.03031017631292343, 0.04435117170214653, 0.06420817226171494, 0.2780051827430725, 0.2271702140569687, 0.0013584558619186282, NaN, NaN, NaN, NaN, NaN], [0.40625429153442383, 0.3796224594116211, 0.2515096962451935, 0.36165565252304077, 0.24774380028247833, 0.8824228644371033, 0.8048573136329651, 0.857955813407898, 0.058371078222990036, 0.07109472155570984, 0.11402199417352676, 0.0021524245385080576, 0.019929109141230583, 0.030590593814849854, 0.11712031066417694, 0.10895614326000214, 0.15509657561779022, 0.19682957231998444, 0.07681374996900558, 0.06229116767644882, 0.016663551330566406, 0.015513443388044834, 0.04232686012983322, 0.0986364334821701, 0.35070890188217163, 0.19941051304340363, 0.163076713681221, 0.026361489668488503, 0.018140846863389015, 0.016411108896136284, 0.03203867748379707, 0.053678009659051895, 0.19773079454898834, 0.3572796881198883, 0.059515852481126785, 0.04298213869333267, NaN, NaN, NaN, NaN], [0.04390633478760719, 0.032843075692653656, 0.010515165515244007, 0.11869800090789795, 0.005461697466671467, 0.023131608963012695, 0.01705162413418293, 0.008547519333660603, 0.003713170997798443, 0.008410640992224216, 0.009457322768867016, 0.00015943740436341614, 3.361727431183681e-05, 0.0002994383394252509, 0.1532706469297409, 0.15568822622299194, 0.11876019835472107, 0.09203660488128662, 0.059780094772577286, 0.24089980125427246, 0.06525673717260361, 0.029934749007225037, 0.11168782413005829, 0.03211824223399162, 0.30118685960769653, 0.22822384536266327, 0.08190999180078506, 0.018841415643692017, 0.1366286426782608, 0.0017427116399630904, 0.02601366490125656, 0.09386949241161346, 0.19522085785865784, 0.1546826809644699, 0.06491755694150925, 0.19679579138755798, 0.0025137634947896004, NaN, NaN, NaN], [0.6348351836204529, 0.5127235651016235, 0.5931673645973206, 0.5543242692947388, 0.12377271056175232, 0.8264753222465515, 0.6941898465156555, 0.5687963962554932, 0.03150533139705658, 0.12843358516693115, 0.11884576827287674, 0.005231617949903011, 0.0018767286092042923, 0.0011644444894045591, 0.11210005730390549, 0.26271528005599976, 0.07045364379882812, 0.0520184300839901, 0.023400958627462387, 0.11433269083499908, 0.07895253598690033, 0.012276851572096348, 0.023823700845241547, 0.04200353845953941, 0.16687022149562836, 0.05654531344771385, 0.038080912083387375, 0.012698299251496792, 0.10473722219467163, 0.0643644630908966, 0.015445034019649029, 0.014234953559935093, 0.06144930049777031, 0.05821693688631058, 0.0568128302693367, 0.1767931431531906, 0.1402994990348816, 0.07714083790779114, NaN, NaN], [0.10790421068668365, 0.016916295513510704, 0.09771728515625, 0.22749783098697662, 0.26325535774230957, 0.49138790369033813, 0.6275916695594788, 0.08931886404752731, 0.0033968419302254915, 0.024402111768722534, 0.018104346469044685, 0.003288157982751727, 0.010537534020841122, 0.006979967001825571, 0.12102893739938736, 0.1969611942768097, 0.16093717515468597, 0.1609625220298767, 0.11138524115085602, 0.026131147518754005, 0.00619129091501236, 0.005407778546214104, 0.04104578495025635, 0.06517186760902405, 0.06833471357822418, 0.020616043359041214, 0.03467438742518425, 0.095084547996521, 0.06247802451252937, 0.022057469934225082, 0.06569864600896835, 0.0052108620293438435, 0.03032413311302662, 0.0838729590177536, 0.3427644968032837, 0.19215865433216095, 0.08116735517978668, 0.14785417914390564, 0.015012684278190136, NaN], [0.028179557994008064, 0.011468129232525826, 0.016789404675364494, 0.00803140178322792, 0.00952040497213602, 0.02960360422730446, 0.24957160651683807, 0.03544437885284424, 0.005487674381583929, 0.0028927521780133247, 0.005656986031681299, 0.0040698484517633915, 0.04730471968650818, 0.0667993351817131, 0.1372966766357422, 0.1272672563791275, 0.008308093063533306, 0.030398543924093246, 0.02721896767616272, 0.016537277027964592, 0.021588556468486786, 0.002818688517436385, 0.010970782488584518, 0.01434051152318716, 0.012293173000216484, 0.04184769093990326, 0.03683166950941086, 0.023453323170542717, 0.020430248230695724, 0.03333409130573273, 0.068024642765522, 0.02648366242647171, 0.1640448421239853, 0.109919473528862, 0.1576652079820633, 0.14138163626194, 0.16884489357471466, 0.30372628569602966, 0.2283693552017212, 0.17022481560707092]], [[0.0006553527782671154, 0.5631614327430725, 0.0008777088369242847, 0.00020331511041149497, 0.0014234310947358608, 0.013944034464657307, 9.958680493582506e-06, 0.01898920349776745, 0.00014103656576480716, 1.4779416233068332e-06, 1.1701366275929104e-07, 1.195983372781484e-06, 0.00012817273091059178, 3.365538941579871e-05, 0.00028557839686982334, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00638999929651618, 0.7093943953514099, 0.004974186420440674, 0.06159398332238197, 0.003979360219091177, 0.06536109745502472, 0.005324128083884716, 0.02885170467197895, 0.0003847253101412207, 0.0002721542550716549, 4.3882369936909527e-05, 0.00024302180099766701, 0.00612376956269145, 0.006710950285196304, 0.0343138724565506, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.109707772731781, 0.1680740863084793, 0.05170662701129913, 0.04158816486597061, 0.026700180023908615, 0.23248757421970367, 0.5156019330024719, 0.3799504041671753, 0.02909121848642826, 0.009008231572806835, 0.0013055672170594335, 0.0032788640819489956, 0.0791734829545021, 0.010587821714580059, 0.06850002706050873, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04004191607236862, 0.02257939800620079, 0.01325287576764822, 0.14834734797477722, 0.0700073167681694, 0.12831416726112366, 0.47980472445487976, 0.3121630549430847, 0.05984592065215111, 0.015101294964551926, 0.002668763743713498, 0.0007187540177255869, 0.04004915803670883, 0.0007627750164829195, 0.05523831769824028, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0007188548916019499, 0.006864115130156279, 0.00033292395528405905, 0.000431404507253319, 0.0152564262971282, 0.2775210440158844, 0.03714991733431816, 0.7278205156326294, 0.004819776862859726, 0.00047404138604179025, 0.0003997469611931592, 0.0001266899926122278, 0.0201359074562788, 0.0027800032403320074, 0.042311206459999084, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00020999301341362298, 0.0025689874310046434, 3.502765650864603e-07, 6.610702985199168e-05, 0.00024143110204022378, 0.018905406817793846, 0.033397458493709564, 0.4650881290435791, 0.004783111158758402, 0.00013528004637919366, 5.751344360760413e-06, 7.93816871009767e-05, 0.0039043116848915815, 0.0005016719806008041, 0.07914639264345169, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00019393693946767598, 0.07456899434328079, 1.429513213224709e-05, 4.6383509470615536e-05, 6.820548151154071e-05, 0.004400796256959438, 0.0021800962276756763, 0.45963534712791443, 0.00143687822856009, 0.0008175616967491806, 6.983020284678787e-05, 3.49152869603131e-05, 0.0030698180198669434, 0.0006545006763190031, 0.001625033444724977, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.004301158711314201, 0.013502174988389015, 4.788395017385483e-05, 0.00021532995742745697, 7.713190279901028e-05, 0.001439842046238482, 0.005622516851872206, 0.121849425137043, 0.006593172438442707, 0.006624745205044746, 0.0006814572843722999, 0.0002721978526096791, 0.0009267745190300047, 0.0016606011195108294, 0.2357456088066101, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0064394231885671616, 0.03409593552350998, 0.0025135872419923544, 0.0008376456098631024, 0.0004409599641803652, 0.0026055865455418825, 0.005634414032101631, 0.014003962278366089, 0.2343187928199768, 0.08099395036697388, 0.23927520215511322, 0.01715606264770031, 0.10332414507865906, 0.021894987672567368, 0.1941189020872116, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0004975660121999681, 0.0015548047376796603, 6.826691333117196e-06, 1.0557592986515374e-06, 2.731301538005937e-05, 0.0005447702133096755, 0.00042012380436062813, 0.0503113828599453, 0.0053693996742367744, 0.0012762928381562233, 0.0017790982965379953, 0.019809026271104813, 0.47653263807296753, 0.008869247511029243, 0.017010610550642014, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00012974163109902292, 0.005610004533082247, 2.3442629753844813e-05, 1.8520654521125834e-06, 3.9678394387010485e-05, 0.0016583451069891453, 0.00029088594601489604, 0.004530484322458506, 0.0021493860986083746, 0.00029196502873674035, 0.0005848451401107013, 0.0028240433894097805, 0.4590959846973419, 0.22978197038173676, 0.0020738127641379833, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00021855060185771435, 0.005491270218044519, 1.9927349057979882e-05, 7.633860150235705e-06, 0.0004071943403687328, 0.008836714550852776, 7.301902951439843e-05, 0.011723233386874199, 1.7278060113312677e-05, 0.0001269245840376243, 0.00022235361393541098, 0.016586007550358772, 0.41012606024742126, 0.37776312232017517, 0.0024871949572116137, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.02619638666510582, 0.18392468988895416, 0.0003054745029658079, 0.00016413358389399946, 0.0015171386767178774, 0.004799532704055309, 0.004810427315533161, 0.058836404234170914, 0.0003794554795604199, 0.0017285931389778852, 0.000568193441722542, 0.003299211384728551, 0.6178385019302368, 0.5079926252365112, 0.05467592179775238, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03445081040263176, 0.14193737506866455, 0.0007241201237775385, 0.0002892682678066194, 0.0003202178922947496, 0.003702279180288315, 0.01134149543941021, 0.12129464000463486, 0.0006569268880411983, 0.0008894759230315685, 8.523569704266265e-05, 0.00030898841214366257, 0.7088924646377563, 0.10790188610553741, 0.05374660715460777, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04547691345214844, 0.010678221471607685, 0.0016328264027833939, 0.024403419345617294, 0.012795579619705677, 0.004323439672589302, 0.06414945423603058, 0.014008321799337864, 0.011475995182991028, 0.00871653389185667, 0.012156924232840538, 0.0147528275847435, 0.009472412057220936, 0.0331418551504612, 0.1366012692451477, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.11859580129384995, 0.07486707717180252, 0.21083025634288788, 0.32276296615600586, 0.08426652103662491, 0.03581860288977623, 0.24113436043262482, 0.608397364616394, 0.13584911823272705, 0.45509204268455505, 0.594833254814148, 0.30372148752212524, 0.8448506593704224, 0.7470672726631165, 0.09252076596021652, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04140070080757141, 0.00858838576823473, 0.11639615148305893, 0.1280786097049713, 0.2722368836402893, 0.21025919914245605, 0.4195333421230316, 0.631318211555481, 0.6560773253440857, 0.29341432452201843, 0.6862512230873108, 0.7675639986991882, 0.8915717005729675, 0.8601328730583191, 0.23356862366199493, 0.12451039254665375, 0.1335938721895218, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.23441848158836365, 0.1666196584701538, 0.16664288938045502, 0.25857093930244446, 0.13334479928016663, 0.17917701601982117, 0.8257887363433838, 0.7395779490470886, 0.6802234053611755, 0.8125103712081909, 0.671615719795227, 0.8831866383552551, 0.6773648858070374, 0.7102506160736084, 0.08689045161008835, 0.18396444618701935, 0.017508728429675102, 0.02471269853413105, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24967892467975616, 0.48421844840049744, 0.036505091935396194, 0.17128480970859528, 0.01777578890323639, 0.09479225426912308, 0.36135032773017883, 0.0868472084403038, 0.16740600764751434, 0.523710310459137, 0.24439233541488647, 0.42307958006858826, 0.6259368062019348, 0.3662186563014984, 0.20058651268482208, 0.18453162908554077, 0.038695670664310455, 0.04155581444501877, 0.05072518810629845, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.28931790590286255, 0.4439229369163513, 0.24370647966861725, 0.6020305752754211, 0.17363131046295166, 0.338454008102417, 0.5701692700386047, 0.33999428153038025, 0.68463534116745, 0.8701388239860535, 0.7831944823265076, 0.9611375331878662, 0.9679895043373108, 0.9072677493095398, 0.0468842089176178, 0.14826133847236633, 0.04252630099654198, 0.08689215034246445, 0.08308856934309006, 0.015247097238898277, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1225743219256401, 0.062406159937381744, 0.03387807682156563, 0.02868799865245819, 0.01787530817091465, 0.04143121838569641, 0.5920179486274719, 0.08798510581254959, 0.2968905568122864, 0.7129084467887878, 0.4609105885028839, 0.29060137271881104, 0.7909923791885376, 0.5701599717140198, 0.13614380359649658, 0.1348571479320526, 0.07033194601535797, 0.10030655562877655, 0.13752251863479614, 0.030713800340890884, 0.1331333965063095, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0705394446849823, 0.02209068462252617, 0.0211530439555645, 0.008882923051714897, 0.0033682750072330236, 0.08319123089313507, 0.11070933192968369, 0.0025125632528215647, 0.10380591452121735, 0.17744502425193787, 0.10391969978809357, 0.12427430599927902, 0.5562515258789062, 0.49710196256637573, 0.3223192095756531, 0.20671042799949646, 0.05809834972023964, 0.1630101054906845, 0.06033356115221977, 0.07501133531332016, 0.017328333109617233, 0.028450097888708115, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15847322344779968, 0.015464702621102333, 0.13866224884986877, 0.053395166993141174, 0.03494010120630264, 0.13738934695720673, 0.02684560976922512, 0.03214175999164581, 0.5759801864624023, 0.1755424290895462, 0.13409779965877533, 0.035038210451602936, 0.6489107012748718, 0.4460716247558594, 0.4074119031429291, 0.15813153982162476, 0.14090144634246826, 0.26030233502388, 0.10773709416389465, 0.16133210062980652, 0.04816069453954697, 0.01304988656193018, 0.13335363566875458, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00857736449688673, 0.012718217447400093, 0.01174219325184822, 0.012934550642967224, 0.006551709491759539, 0.24597492814064026, 0.030029013752937317, 0.05923602730035782, 0.04650798439979553, 0.02447274886071682, 0.019859377294778824, 0.003505804343149066, 0.04937520623207092, 0.05625420808792114, 0.28037816286087036, 0.3033713400363922, 0.22469042241573334, 0.4264413118362427, 0.3422197103500366, 0.14910078048706055, 0.06983038783073425, 0.023690486326813698, 0.010566752403974533, 0.05880258232355118, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0015372766647487879, 0.015295127406716347, 0.018696704879403114, 0.004789609462022781, 0.19481690227985382, 0.04769033566117287, 0.01355075929313898, 0.02196505106985569, 0.08700259774923325, 0.020393503829836845, 0.02400771528482437, 0.18789233267307281, 0.15418098866939545, 0.08713112771511078, 0.19334079325199127, 0.25368839502334595, 0.33459752798080444, 0.3829180896282196, 0.2782860994338989, 0.2427205741405487, 0.08768615871667862, 0.031752120703458786, 0.02143564634025097, 0.03798065707087517, 0.07379034906625748, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04759770259261131, 0.04375501722097397, 0.02714523859322071, 0.05194481834769249, 0.05246514454483986, 0.14355513453483582, 0.17152011394500732, 0.14246520400047302, 0.1098044142127037, 0.013531663455069065, 0.008927365764975548, 0.03807468339800835, 0.10050502419471741, 0.02236531302332878, 0.3381733298301697, 0.14200474321842194, 0.2391311228275299, 0.18728229403495789, 0.11236919462680817, 0.20923744142055511, 0.13365258276462555, 0.052715059369802475, 0.134474515914917, 0.14480768144130707, 0.06683899462223053, 0.104619100689888, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10647730529308319, 0.04246760904788971, 0.08123224973678589, 0.13003453612327576, 0.07854175567626953, 0.24148082733154297, 0.6790831685066223, 0.7492273449897766, 0.28685522079467773, 0.03681188449263573, 0.15954196453094482, 0.2672117054462433, 0.11099980026483536, 0.04468434303998947, 0.4826459586620331, 0.09595079720020294, 0.2752297520637512, 0.21842314302921295, 0.13660691678524017, 0.35477691888809204, 0.37130749225616455, 0.20556269586086273, 0.35276445746421814, 0.31008264422416687, 0.11074709892272949, 0.19841141998767853, 0.07199764251708984, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2962004542350769, 0.47284576296806335, 0.11245852708816528, 0.23689918220043182, 0.10807513445615768, 0.8532499074935913, 0.5788733959197998, 0.6375027894973755, 0.33168625831604004, 0.06381742656230927, 0.004373080097138882, 0.015940984711050987, 0.3371734917163849, 0.06828418374061584, 0.21185840666294098, 0.15323933959007263, 0.4611065983772278, 0.07869336754083633, 0.03600241616368294, 0.47375282645225525, 0.7350273132324219, 0.297486275434494, 0.6052883863449097, 0.4953201115131378, 0.144621342420578, 0.3493393063545227, 0.04881289228796959, 0.10520726442337036, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3828115463256836, 0.12613584101200104, 0.47516295313835144, 0.4473835527896881, 0.17031393945217133, 0.6938255429267883, 0.7945614457130432, 0.34594833850860596, 0.5323623418807983, 0.34808266162872314, 0.11382761597633362, 0.1349307745695114, 0.013382190838456154, 0.0600610226392746, 0.30783677101135254, 0.12003841996192932, 0.2704387903213501, 0.20063650608062744, 0.23778890073299408, 0.36254584789276123, 0.5319709777832031, 0.4483972191810608, 0.15058189630508423, 0.11134153604507446, 0.09426670521497726, 0.21241672337055206, 0.10488338023424149, 0.049764484167099, 0.15823495388031006, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.7362364530563354, 0.8323087096214294, 0.9336822032928467, 0.7739728689193726, 0.8897883296012878, 0.9609381556510925, 0.9334329962730408, 0.9553548693656921, 0.7747710943222046, 0.4005538523197174, 0.5586770176887512, 0.25099167227745056, 0.4200068712234497, 0.1631680577993393, 0.06528117507696152, 0.15233570337295532, 0.21891875565052032, 0.13215333223342896, 0.2837490439414978, 0.08042775094509125, 0.43866410851478577, 0.2773631513118744, 0.12773916125297546, 0.3155127763748169, 0.07932031899690628, 0.1219707503914833, 0.11212008446455002, 0.1944955438375473, 0.07170752435922623, 0.004313962999731302, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07449624687433243, 0.061402805149555206, 0.09389828145503998, 0.048646457493305206, 0.024208296090364456, 0.10819891840219498, 0.10563155263662338, 0.1243496686220169, 0.048523951321840286, 0.14693649113178253, 0.06614942103624344, 0.0066792843863368034, 0.2858017086982727, 0.04383772611618042, 0.15409637987613678, 0.2607015371322632, 0.3645761013031006, 0.37828943133354187, 0.3385462462902069, 0.2960833013057709, 0.5598280429840088, 0.544554591178894, 0.47054967284202576, 0.3477361798286438, 0.13701467216014862, 0.14822737872600555, 0.030188634991645813, 0.05528556555509567, 0.058441486209630966, 0.03410256654024124, 0.17273126542568207, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02467108517885208, 0.049052223563194275, 0.08135215938091278, 0.013768618926405907, 0.01176412496715784, 0.15210841596126556, 0.004693970084190369, 0.0041237217374145985, 0.018837640061974525, 0.03490369766950607, 0.036496780812740326, 0.0011750683188438416, 0.018557026982307434, 0.02382473833858967, 0.22122804820537567, 0.1872977614402771, 0.29805198311805725, 0.5206820368766785, 0.33024296164512634, 0.6395015716552734, 0.7210167050361633, 0.353913813829422, 0.406305193901062, 0.5096184015274048, 0.26257815957069397, 0.07301049679517746, 0.03464117646217346, 0.0787002444267273, 0.10916904360055923, 0.3557807505130768, 0.08364078402519226, 0.08538500964641571, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.012043171562254429, 0.03080524504184723, 0.02248452790081501, 0.008785543963313103, 0.00550604984164238, 0.05614035204052925, 0.015958979725837708, 0.01727765053510666, 0.03423915058374405, 0.017799094319343567, 0.029912255704402924, 0.01144923735409975, 0.09533664584159851, 0.02436906285583973, 0.20283196866512299, 0.13269101083278656, 0.2835436165332794, 0.47488275170326233, 0.24851854145526886, 0.694171130657196, 0.6760384440422058, 0.2759343385696411, 0.29058361053466797, 0.7136873602867126, 0.20711864531040192, 0.04295802861452103, 0.07691331952810287, 0.11943909525871277, 0.1323360651731491, 0.20847304165363312, 0.05967296287417412, 0.12062160670757294, 0.09502720832824707, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01959865354001522, 0.003073114436119795, 0.06498773396015167, 0.027286570519208908, 0.019540993496775627, 0.052237618714571, 0.08713454008102417, 0.28957968950271606, 0.3906492590904236, 0.044482238590717316, 0.17143161594867706, 0.1301742047071457, 0.10445850342512131, 0.03699616342782974, 0.2442801147699356, 0.058743223547935486, 0.276242733001709, 0.29826071858406067, 0.20218241214752197, 0.4631478488445282, 0.48415693640708923, 0.2865871787071228, 0.3694051504135132, 0.4054408073425293, 0.19627220928668976, 0.2907293438911438, 0.09057808667421341, 0.11348091810941696, 0.21781016886234283, 0.38082650303840637, 0.3570795953273773, 0.22612451016902924, 0.09323522448539734, 0.03618632256984711, NaN, NaN, NaN, NaN, NaN, NaN], [0.11208802461624146, 0.11668127030134201, 0.09828943759202957, 0.10754654556512833, 0.015885351225733757, 0.38998937606811523, 0.183034285902977, 0.3230077624320984, 0.20506803691387177, 0.08733018487691879, 0.007069121580570936, 0.010435528121888638, 0.30221423506736755, 0.047303054481744766, 0.19994190335273743, 0.07694489508867264, 0.41184449195861816, 0.038429711014032364, 0.018668875098228455, 0.5307568907737732, 0.7476497888565063, 0.4137455224990845, 0.6917499303817749, 0.6703397035598755, 0.3623183071613312, 0.579600989818573, 0.12613137066364288, 0.20100651681423187, 0.40998968482017517, 0.46115902066230774, 0.575211763381958, 0.35096046328544617, 0.163946270942688, 0.021770814433693886, 0.09986086189746857, NaN, NaN, NaN, NaN, NaN], [0.1682588905096054, 0.051582805812358856, 0.4415716230869293, 0.2735750675201416, 0.07878735661506653, 0.06776249408721924, 0.15038572251796722, 0.03211068734526634, 0.6709542274475098, 0.37688353657722473, 0.1879340261220932, 0.04096703231334686, 0.011627858504652977, 0.03471425548195839, 0.19384095072746277, 0.0834016501903534, 0.33346420526504517, 0.238715261220932, 0.28079062700271606, 0.5652539134025574, 0.6881173849105835, 0.5534363985061646, 0.22000034153461456, 0.1979052871465683, 0.3127084970474243, 0.4257359504699707, 0.18722867965698242, 0.1397658735513687, 0.3447277843952179, 0.13513657450675964, 0.31811001896858215, 0.32070791721343994, 0.12404847145080566, 0.05496959760785103, 0.04215753450989723, 0.16014836728572845, NaN, NaN, NaN, NaN], [0.8205305933952332, 0.9214023947715759, 0.9559677839279175, 0.7988566160202026, 0.9105063080787659, 0.9672437906265259, 0.9506043195724487, 0.9735420346260071, 0.9064961075782776, 0.6156813502311707, 0.6370130777359009, 0.18943972885608673, 0.3681671619415283, 0.1194160059094429, 0.08283783495426178, 0.13260646164417267, 0.29362690448760986, 0.18431688845157623, 0.38109344244003296, 0.20342527329921722, 0.5946046113967896, 0.4558189809322357, 0.26072001457214355, 0.5455912351608276, 0.2635512351989746, 0.31394094228744507, 0.23975242674350739, 0.36583349108695984, 0.2753828167915344, 0.01127256266772747, 0.41475725173950195, 0.29836422204971313, 0.2503683567047119, 0.10983213782310486, 0.21767295897006989, 0.0692884549498558, 0.003035380970686674, NaN, NaN, NaN], [0.10534824430942535, 0.08027994632720947, 0.1381307989358902, 0.07063161581754684, 0.01806548424065113, 0.10409632325172424, 0.12885765731334686, 0.2072904407978058, 0.09267445653676987, 0.23836983740329742, 0.11645739525556564, 0.006059943698346615, 0.1595546454191208, 0.017974214628338814, 0.14464683830738068, 0.2068602293729782, 0.4467880427837372, 0.4564751386642456, 0.4485791325569153, 0.45999279618263245, 0.6740500330924988, 0.7906107902526855, 0.6832103133201599, 0.5420533418655396, 0.4096798300743103, 0.3950984477996826, 0.13646338880062103, 0.10497336834669113, 0.17230592668056488, 0.07012390345335007, 0.27583980560302734, 0.3079235553741455, 0.1555996537208557, 0.038740403950214386, 0.05588690564036369, 0.03859011456370354, 0.02352789230644703, 0.12950412929058075, NaN, NaN], [0.026579611003398895, 0.02949470281600952, 0.04954056441783905, 0.017031243070960045, 0.008355016820132732, 0.09075918793678284, 0.0036468924954533577, 0.0022332987282425165, 0.050134338438510895, 0.049380820244550705, 0.028885982930660248, 0.0007559077348560095, 0.015549316070973873, 0.013319555670022964, 0.1734825074672699, 0.16561447083950043, 0.3958832919597626, 0.5531814098358154, 0.4040684700012207, 0.7809365391731262, 0.8175305128097534, 0.5712264180183411, 0.6113651394844055, 0.6668697595596313, 0.4850655198097229, 0.18787693977355957, 0.08608534932136536, 0.19115354120731354, 0.2498423308134079, 0.6246696710586548, 0.31422460079193115, 0.373276948928833, 0.049351077526807785, 0.046956032514572144, 0.08076699078083038, 0.09392194449901581, 0.3349837362766266, 0.062239501625299454, 0.10001940280199051, NaN], [0.05047497898340225, 0.027197130024433136, 0.11470095813274384, 0.007973222993314266, 0.12679167091846466, 0.4866730570793152, 0.17132264375686646, 0.15032453835010529, 0.14889459311962128, 0.01696154847741127, 0.0735161080956459, 0.0034290377516299486, 0.05194668471813202, 0.06144191324710846, 0.13309471309185028, 0.06568613648414612, 0.36780038475990295, 0.6246912479400635, 0.7116879820823669, 0.754679262638092, 0.7714072465896606, 0.7616819739341736, 0.5837911367416382, 0.9111838936805725, 0.8262851238250732, 0.6737059354782104, 0.5146453380584717, 0.7674095630645752, 0.7359525561332703, 0.5679676532745361, 0.7213301062583923, 0.6703079342842102, 0.5636342167854309, 0.38883939385414124, 0.5560528635978699, 0.518941342830658, 0.3739706873893738, 0.32013192772865295, 0.3743935525417328, 0.3977084755897522]], [[0.3143080472946167, 0.014564945362508297, 0.07743841409683228, 0.19665417075157166, 0.23130221664905548, 0.03274351730942726, 0.23599109053611755, 0.04763320833444595, 0.20168107748031616, 0.7521476149559021, 0.7922006249427795, 0.840878427028656, 0.6463541388511658, 0.6008138656616211, 0.0070990691892802715, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.05880431830883026, 0.004086965229362249, 0.06557433307170868, 0.4476080536842346, 0.32179930806159973, 0.2046266496181488, 0.5952353477478027, 0.20483972132205963, 0.7834360599517822, 0.27592822909355164, 0.5900363922119141, 0.6986290812492371, 0.3548848032951355, 0.36629796028137207, 0.07452832907438278, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.4484235942363739, 0.0712433010339737, 0.09740526974201202, 0.49982836842536926, 0.18807044625282288, 0.007537430617958307, 0.2073078453540802, 0.015238385647535324, 0.18028782308101654, 0.6095888018608093, 0.4225178062915802, 0.6769288778305054, 0.3957397937774658, 0.7102670669555664, 0.05611870437860489, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.4341801106929779, 0.05481646955013275, 0.17834456264972687, 0.2579769194126129, 0.326920747756958, 0.0030261597130447626, 0.03147314488887787, 0.003279186552390456, 0.09941483289003372, 0.5679370760917664, 0.8480010032653809, 0.8133074045181274, 0.4710683822631836, 0.9189481139183044, 0.04321537911891937, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.559230387210846, 0.08983521163463593, 0.16111011803150177, 0.14667965471744537, 0.32596829533576965, 0.008685072883963585, 0.1111784353852272, 0.02690659649670124, 0.06770152598619461, 0.18340016901493073, 0.4614297151565552, 0.502476155757904, 0.42325475811958313, 0.5992166996002197, 0.05437220633029938, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.367906779050827, 0.21432256698608398, 0.3548191487789154, 0.2603428363800049, 0.22096140682697296, 0.0013341127196326852, 0.021726170554757118, 0.005543001927435398, 0.5389296412467957, 0.818263828754425, 0.919593095779419, 0.8187286257743835, 0.4823090434074402, 0.4897681474685669, 0.07018090784549713, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.7116888761520386, 0.17206020653247833, 0.6874114871025085, 0.19288089871406555, 0.20990870893001556, 0.011273512616753578, 0.2026582807302475, 0.004371582996100187, 0.10976968705654144, 0.4432500898838043, 0.7022042274475098, 0.8704607486724854, 0.721519947052002, 0.7422701716423035, 0.025589054450392723, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.7674684524536133, 0.20032620429992676, 0.42808812856674194, 0.11714937537908554, 0.32732346653938293, 0.009955272078514099, 0.05444686487317085, 0.0040375906974077225, 0.12078685313463211, 0.6266691088676453, 0.5163981914520264, 0.8307003378868103, 0.32096055150032043, 0.24524804949760437, 0.04717922583222389, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.7549813389778137, 0.15439504384994507, 0.33331331610679626, 0.24930144846439362, 0.2927357852458954, 0.04936225712299347, 0.44933974742889404, 0.06466211378574371, 0.09519664198160172, 0.08716140687465668, 0.058296240866184235, 0.09990595281124115, 0.5117565989494324, 0.1508449912071228, 0.039490822702646255, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.654628574848175, 0.3205694854259491, 0.5841068029403687, 0.21299651265144348, 0.365792840719223, 0.0401315838098526, 0.18686936795711517, 0.05883712321519852, 0.05069931596517563, 0.33667507767677307, 0.3354107439517975, 0.22027519345283508, 0.05277648940682411, 0.09031395614147186, 0.015531455166637897, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3366456627845764, 0.1530359387397766, 0.41866233944892883, 0.39775165915489197, 0.7769761681556702, 0.06979230791330338, 0.41583842039108276, 0.02130916155874729, 0.14617334306240082, 0.25815388560295105, 0.1423572301864624, 0.18894770741462708, 0.041056301444768906, 0.026175418868660927, 0.03888533264398575, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.24913249909877777, 0.0818726196885109, 0.5426726341247559, 0.1687711775302887, 0.8305720090866089, 0.26261457800865173, 0.39635857939720154, 0.1712585836648941, 0.1158638522028923, 0.17366157472133636, 0.12521226704120636, 0.5298976302146912, 0.041029125452041626, 0.02415779046714306, 0.1170416921377182, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3567614257335663, 0.035316068679094315, 0.3819185495376587, 0.10469090938568115, 0.3454773426055908, 0.09596268832683563, 0.3821227550506592, 0.17425164580345154, 0.40528857707977295, 0.1745157092809677, 0.10956539213657379, 0.5078453421592712, 0.0026470222510397434, 0.016186503693461418, 0.08932095021009445, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.330766886472702, 0.039845019578933716, 0.6981685757637024, 0.09713104367256165, 0.8411048650741577, 0.16356231272220612, 0.3630223274230957, 0.1627381145954132, 0.6954487562179565, 0.17326875030994415, 0.1752558946609497, 0.24479816854000092, 0.026946308091282845, 0.016200177371501923, 0.06702017039060593, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.07683827728033066, 0.07034450024366379, 0.21707428991794586, 0.2902449369430542, 0.1834353357553482, 0.01726321130990982, 0.13144701719284058, 0.005189047660678625, 0.150242418050766, 0.1182665303349495, 0.4041094183921814, 0.12062898278236389, 0.05959685891866684, 0.1186181977391243, 0.1283060759305954, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.005987181328237057, 0.0011158415582031012, 0.0026756690349429846, 0.0011391430161893368, 0.0021053741220384836, 0.0005449134623631835, 0.0017384873935952783, 0.000736464629881084, 0.00014482461847364902, 0.0008784460369497538, 0.0008941806154325604, 0.0009559267782606184, 0.00015614555741194636, 0.00044419756159186363, 0.16329224407672882, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3448674976825714, 0.07203025370836258, 0.011963781900703907, 0.012941744178533554, 0.011539866216480732, 0.003333584638312459, 0.005511423572897911, 0.0016478801844641566, 0.003020848147571087, 0.006189296022057533, 0.0020935258362442255, 0.00048376841004937887, 8.994764357339591e-05, 0.00040787423495203257, 0.2113737165927887, 0.1305680274963379, 0.02726716920733452, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.44219815731048584, 0.8124432563781738, 0.1900549679994583, 0.3808274269104004, 0.045300956815481186, 0.024617541581392288, 0.0172295980155468, 0.03488133102655411, 0.004235385917127132, 0.05999733507633209, 0.03787413239479065, 0.0011567235924303532, 0.0017442036187276244, 0.008845857344567776, 0.004224383272230625, 0.002169837476685643, 0.0032534021884202957, 0.5694547891616821, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07874103635549545, 0.02866651676595211, 0.3287397623062134, 0.27984437346458435, 0.10563887655735016, 0.003691220423206687, 0.005916049238294363, 0.0007406381191685796, 0.0005066083394922316, 0.0481056272983551, 0.029072491452097893, 0.000652547983918339, 0.0003529583918862045, 0.0009863339364528656, 0.002192106796428561, 0.1568225622177124, 0.12336109578609467, 0.028200775384902954, 0.03890102356672287, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.030638281255960464, 0.02597089111804962, 0.6577842831611633, 0.16596756875514984, 0.48041173815727234, 0.6114144921302795, 0.028207998722791672, 0.053615398705005646, 0.1417267620563507, 0.03454216569662094, 0.023575417697429657, 0.004873087164014578, 0.0009616028983145952, 0.00223313900642097, 0.0011337294708937407, 0.008017625659704208, 0.013223886489868164, 0.04581261798739433, 0.017950134351849556, 0.8790656328201294, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.29477018117904663, 0.14754106104373932, 0.8534399271011353, 0.9182198643684387, 0.6083860993385315, 0.9389832019805908, 0.12579986453056335, 0.03590020909905434, 0.012173496186733246, 0.16479530930519104, 0.15366923809051514, 0.0035958383232355118, 0.002988115418702364, 0.026292480528354645, 0.0003885648038703948, 0.08130903542041779, 0.2643316090106964, 0.5756329894065857, 0.29882851243019104, 0.31516125798225403, 0.09644471108913422, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2897806465625763, 0.01695333980023861, 0.6714832782745361, 0.4471692144870758, 0.24303969740867615, 0.15563154220581055, 0.008645682595670223, 0.0004950988804921508, 0.0001695932005532086, 0.13566477596759796, 0.030448369681835175, 0.00021736785129178315, 9.297585347667336e-05, 0.0014399208594113588, 5.083655923954211e-05, 0.20484277606010437, 0.3443664610385895, 0.0019387316424399614, 0.017399819567799568, 0.0004214652581140399, 0.00013534165918827057, 0.01563790813088417, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1102917492389679, 0.0027466323226690292, 0.13646264374256134, 0.07094646990299225, 0.17040857672691345, 0.6033481955528259, 0.41631338000297546, 0.013031017035245895, 0.00012492973473854363, 0.005976412910968065, 0.0002816450723912567, 4.682707003667019e-05, 0.00021861463028471917, 0.00019605428678914905, 0.001022772048600018, 0.1571786254644394, 0.5643889307975769, 0.13441002368927002, 0.09036820381879807, 0.02947377972304821, 0.015878956764936447, 0.022048691287636757, 0.14189693331718445, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.7042187452316284, 0.49455204606056213, 0.43194010853767395, 0.7080989480018616, 0.382207989692688, 0.06800723820924759, 0.48792970180511475, 0.12651333212852478, 0.0012585417134687304, 0.07895761728286743, 0.01729964278638363, 0.0006471746601164341, 0.00013743228919338435, 0.00039039706462062895, 0.00010207234299741685, 0.005826869048178196, 0.13292454183101654, 0.00521356426179409, 0.005004087463021278, 0.10703893005847931, 0.26877719163894653, 0.1785666048526764, 0.23197543621063232, 0.007970587350428104, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5233215093612671, 0.7835124135017395, 0.3596530258655548, 0.5502080917358398, 0.589034378528595, 0.24138878285884857, 0.4714515507221222, 0.13250088691711426, 0.08884716778993607, 0.06473898142576218, 0.12478159368038177, 0.001717525301501155, 0.01358798798173666, 0.004862584639340639, 0.0004225081647746265, 0.03136341646313667, 0.08873608708381653, 0.009185479953885078, 0.03043411858379841, 0.3010490834712982, 0.36070317029953003, 0.178965762257576, 0.21872122585773468, 0.005464768502861261, 0.06020791083574295, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0975094586610794, 0.14095744490623474, 0.009511731564998627, 0.03128954395651817, 0.01951521448791027, 0.0017430862644687295, 0.033708807080984116, 0.009512575343251228, 0.3042309582233429, 0.0025639990344643593, 0.0006334132049232721, 2.5987004846683703e-05, 0.0001574041525600478, 1.1997842193522956e-05, 1.5690195141360164e-05, 0.07854610681533813, 0.03772095590829849, 0.016643106937408447, 0.02832828275859356, 0.0785825327038765, 0.09336084127426147, 0.24177083373069763, 0.2718014717102051, 0.12932275235652924, 0.08437053114175797, 0.24188947677612305, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.536220133304596, 0.12877297401428223, 0.013534938916563988, 0.13534405827522278, 0.015604051761329174, 0.0035537974908947945, 0.02344023622572422, 0.008398037403821945, 0.2580391466617584, 0.2587551474571228, 0.014949243515729904, 0.0010696486569941044, 0.00046315763029269874, 0.0013398011215031147, 8.422375685768202e-05, 0.17239268124103546, 0.029533302411437035, 0.030515655875205994, 0.026403654366731644, 0.05037287250161171, 0.13986584544181824, 0.11416076123714447, 0.08228978514671326, 0.26975753903388977, 0.020502708852291107, 0.030797043815255165, 0.006723156664520502, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.028944578021764755, 0.013114584609866142, 0.0438210591673851, 0.05079193785786629, 0.03694206848740578, 0.0008442872785963118, 0.0030779552180320024, 0.002579997293651104, 0.01023491844534874, 0.21445545554161072, 0.2806929349899292, 0.00855539832264185, 0.03333647921681404, 0.06091907247900963, 1.9560096916393377e-05, 0.35662412643432617, 0.005917226430028677, 0.00044432797585614026, 0.00022813511895947158, 0.0073361690156161785, 0.0027237480971962214, 0.007987208664417267, 0.021625559777021408, 0.010472757741808891, 0.0008755659800954163, 0.012584702111780643, 0.000526397256180644, 0.01033733133226633, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0058769844472408295, 0.06350620836019516, 0.003568005282431841, 0.0076079596765339375, 0.0037217612843960524, 0.004286385141313076, 0.03584115207195282, 0.14617407321929932, 0.0030082303564995527, 0.12143123894929886, 0.0793885663151741, 0.1555183082818985, 0.14442139863967896, 0.29275521636009216, 7.129996811272576e-05, 0.189227893948555, 0.01606086827814579, 0.0030457540415227413, 0.005861388053745031, 0.04963670298457146, 0.004091562703251839, 0.01225967425853014, 0.037419673055410385, 0.01020084973424673, 0.003108290024101734, 0.01512740459293127, 0.006679146084934473, 0.014098022133111954, 0.03816642239689827, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.034930020570755005, 0.09419079124927521, 0.0127689428627491, 0.008763227611780167, 0.0065171802416443825, 0.008632887154817581, 0.02612082101404667, 0.02043459191918373, 0.0836663544178009, 0.5329904556274414, 0.3228733241558075, 0.7184357047080994, 0.5793755650520325, 0.783859133720398, 0.0001531920424895361, 0.00965302623808384, 0.0035168000031262636, 0.03902876377105713, 0.0158648993819952, 0.32648226618766785, 0.0038036927580833435, 0.002248003613203764, 0.002372291637584567, 0.014672092162072659, 0.007728067692369223, 0.022481968626379967, 0.028911879286170006, 0.044244468212127686, 0.021532919257879257, 0.6417658925056458, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0009532110998407006, 0.0024861039128154516, 7.189704774646088e-05, 0.00014637503772974014, 2.8552024105010787e-06, 3.0342853278853e-05, 0.0007709002820774913, 0.0005337693146429956, 6.919851330167148e-06, 0.02619163505733013, 0.02381032705307007, 0.008668542839586735, 0.39639002084732056, 0.7824769616127014, 1.1539431170604075e-06, 0.037641312927007675, 0.005557402968406677, 0.0006393054500222206, 0.006437606643885374, 0.007460788358002901, 0.0009530181414447725, 0.0016025539953261614, 0.0067516821436584, 0.02322007343173027, 0.018459537997841835, 0.011051125824451447, 0.006488891318440437, 0.04039585590362549, 0.18200218677520752, 0.0006002468289807439, 0.6243939995765686, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02785377763211727, 0.15845024585723877, 0.19323119521141052, 0.06543393433094025, 0.014044036157429218, 0.040286585688591, 0.07583826035261154, 0.6567350029945374, 0.004159754142165184, 0.35265031456947327, 0.6287637948989868, 0.12951745092868805, 0.32439297437667847, 0.653313934803009, 0.0008144593448378146, 0.01615065336227417, 0.01699231006205082, 0.00012957912986166775, 0.016060354188084602, 0.0006264564581215382, 0.0012908404460176826, 0.002684527076780796, 0.027531128376722336, 0.015566377900540829, 0.003692139405757189, 0.5753727555274963, 0.5145941376686096, 0.03750383481383324, 0.009545800276100636, 0.0034461882896721363, 0.005381980445235968, 0.00046628122800029814, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02927210181951523, 0.04805546626448631, 0.295967698097229, 0.060625556856393814, 0.014990724623203278, 0.10397231578826904, 0.12186732143163681, 0.5237559080123901, 0.0203724168241024, 0.43874940276145935, 0.4409005343914032, 0.09095493704080582, 0.5531511306762695, 0.5263633728027344, 0.0002321143983863294, 0.021861553192138672, 0.01695878431200981, 0.0018149337265640497, 0.015764223411679268, 0.007719711866229773, 0.0034752548672258854, 0.007653116714209318, 0.03472340479493141, 0.038436826318502426, 0.014262136071920395, 0.8426622748374939, 0.36256304383277893, 0.21876515448093414, 0.019672129303216934, 0.020847154781222343, 0.00781619269400835, 0.005409067030996084, 0.16073459386825562, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5664732456207275, 0.02422192506492138, 0.3148367702960968, 0.37531769275665283, 0.06290365755558014, 0.02708868682384491, 0.03764869272708893, 0.06476183980703354, 0.09221415221691132, 0.3172641098499298, 0.088014617562294, 0.02202794700860977, 0.004314645659178495, 0.0619816817343235, 0.0017959593096747994, 0.18507197499275208, 0.027911728248000145, 0.014699580147862434, 0.025536103174090385, 0.014524195343255997, 0.045023027807474136, 0.031167738139629364, 0.07539253681898117, 0.22652071714401245, 0.011904416605830193, 0.08752688765525818, 0.03955431655049324, 0.2908211648464203, 0.03612781688570976, 0.00514488760381937, 0.017019467428326607, 0.07116629183292389, 0.03509910777211189, 0.02026083506643772, NaN, NaN, NaN, NaN, NaN, NaN], [0.04828598350286484, 0.01127469539642334, 0.1758044958114624, 0.0725238099694252, 0.01880812831223011, 0.003422890789806843, 0.0039800796657800674, 0.008112750947475433, 0.0007020575576461852, 0.0960424467921257, 0.3098883628845215, 0.03193678706884384, 0.03351299837231636, 0.2577627897262573, 0.0005041947006247938, 0.40259334444999695, 0.005078054964542389, 0.00017122419376391917, 9.21270766411908e-05, 0.002624903805553913, 0.0009363252320326865, 0.00360113475471735, 0.01331485528498888, 0.008243494667112827, 0.0007176694343797863, 0.019634194672107697, 0.002027983544394374, 0.02349759265780449, 0.030203014612197876, 0.000993669149465859, 0.0008422310347668827, 0.013102295808494091, 0.025159381330013275, 0.0006507099606096745, 0.018182074651122093, NaN, NaN, NaN, NaN, NaN], [0.008833246305584908, 0.03231082111597061, 0.009648996405303478, 0.01135926228016615, 0.004257569555193186, 0.002696139505133033, 0.026390861719846725, 0.07894735038280487, 0.0002903220884036273, 0.05877671018242836, 0.0971919596195221, 0.32856324315071106, 0.08294347673654556, 0.6861463785171509, 0.00047716210247017443, 0.2579963207244873, 0.021157346665859222, 0.002921733073890209, 0.006211739499121904, 0.031850416213274, 0.0022005264181643724, 0.0070661455392837524, 0.036871425807476044, 0.012320333160459995, 0.005331193562597036, 0.033889420330524445, 0.020235266536474228, 0.07458563148975372, 0.1398555487394333, 0.008059950545430183, 0.0405682735145092, 0.03368399292230606, 0.012085597030818462, 0.010676471516489983, 0.03411625698208809, 0.08152885735034943, NaN, NaN, NaN, NaN], [0.020260397344827652, 0.03928471356630325, 0.012783887796103954, 0.0091601787135005, 0.005565040744841099, 0.007968534715473652, 0.020862603560090065, 0.012279938906431198, 0.01832268387079239, 0.3204420506954193, 0.28696081042289734, 0.7937509417533875, 0.6314787864685059, 0.8277974724769592, 0.00014348741387948394, 0.005019576288759708, 0.001437423750758171, 0.014701779931783676, 0.005876661743968725, 0.15098156034946442, 0.001037455745972693, 0.0006782425916753709, 0.0010664333822205663, 0.006170186679810286, 0.004750464111566544, 0.015587885864078999, 0.020612932741642, 0.024904461577534676, 0.027292385697364807, 0.6522603631019592, 0.02780178189277649, 0.009980881586670876, 0.010863273404538631, 0.016993993893265724, 0.026612548157572746, 0.013426730409264565, 0.6643192768096924, NaN, NaN, NaN], [0.00497927563264966, 0.011739314533770084, 0.0009416648535989225, 0.0009133343119174242, 2.0598932678694837e-05, 0.00024278588534798473, 0.00463896244764328, 0.0027787971775978804, 1.9694551156135276e-05, 0.026842234656214714, 0.05824153125286102, 0.023767979815602303, 0.7019069194793701, 0.8979114294052124, 1.5536308637820184e-05, 0.023952102288603783, 0.0025056565646082163, 0.0002975048264488578, 0.0031560298521071672, 0.002087814500555396, 0.00019765450269915164, 0.00028781042783521116, 0.0023521913681179285, 0.009429593570530415, 0.010675383731722832, 0.013774069957435131, 0.012372920289635658, 0.030660077929496765, 0.3810364305973053, 0.0006224916432984173, 0.6039706468582153, 0.2701583206653595, 0.012816790491342545, 0.005745226051658392, 0.052403513342142105, 0.18411211669445038, 0.00043697847286239266, 0.6234135627746582, NaN, NaN], [0.06832221150398254, 0.18812543153762817, 0.5426309108734131, 0.237625390291214, 0.041615329682826996, 0.11611851304769516, 0.16301436722278595, 0.827357828617096, 0.011619587428867817, 0.35340800881385803, 0.8248108625411987, 0.22083298861980438, 0.4978465139865875, 0.8379470109939575, 0.008811386302113533, 0.007988094352185726, 0.006256349850445986, 4.065780740347691e-05, 0.006692530121654272, 0.00010113247117260471, 0.0002641561150085181, 0.0006015493418090045, 0.009669815190136433, 0.00486318813636899, 0.0012557843001559377, 0.43231210112571716, 0.35852983593940735, 0.01959061808884144, 0.007567983586341143, 0.0019125458784401417, 0.00857639778405428, 0.0005027590086683631, 0.41286540031433105, 0.4292365312576294, 0.01753525249660015, 0.005813234485685825, 0.00216498039662838, 0.003382693277671933, 0.00027526391204446554, NaN], [0.7676634788513184, 0.8615484237670898, 0.768317461013794, 0.9594964981079102, 0.36958935856819153, 0.4649639129638672, 0.5634418725967407, 0.8043064475059509, 0.6601962447166443, 0.9397303462028503, 0.8348119258880615, 0.9867405295372009, 0.7646960020065308, 0.8154686689376831, 0.03640103340148926, 0.1387476772069931, 0.027318276464939117, 0.00785337295383215, 0.019197843968868256, 0.013794281519949436, 0.020801816135644913, 0.013009469024837017, 0.07068510353565216, 0.020734209567308426, 0.024748992174863815, 0.04673967882990837, 0.025586238130927086, 0.01648368127644062, 0.06557000428438187, 0.022920427843928337, 0.013843921944499016, 0.04100487753748894, 0.0375630147755146, 0.023956134915351868, 0.018727701157331467, 0.05957711860537529, 0.020177751779556274, 0.007389482576400042, 0.027843382209539413, 0.025224220007658005]], [[0.06827192008495331, 0.0036808219738304615, 0.005701950751245022, 0.005157816223800182, 0.003777393838390708, 0.024757172912359238, 0.0020165019668638706, 0.010267351754009724, 0.013163687661290169, 0.001690453034825623, 0.00837681908160448, 0.00522418599575758, 0.061038240790367126, 0.015438525006175041, 0.325132817029953, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.7422951459884644, 0.028774140402674675, 0.06394203752279282, 0.00887901522219181, 0.04345611855387688, 0.027670713141560555, 0.0295904241502285, 0.01398912351578474, 0.025535697117447853, 0.02094031311571598, 0.022182827815413475, 0.009663421660661697, 0.049684178084135056, 0.026225639507174492, 0.13834334909915924, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.20897099375724792, 0.21868035197257996, 0.23815643787384033, 0.005872054491192102, 0.0010661164997145534, 0.0017293300479650497, 0.00042713910806924105, 0.002609806600958109, 0.016046296805143356, 0.009100147522985935, 0.014420107938349247, 0.0022624030243605375, 0.010553905740380287, 0.007111164275556803, 0.25332581996917725, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2508500814437866, 0.20390872657299042, 0.7329782247543335, 0.07117453217506409, 0.016424261033535004, 0.021444672718644142, 0.001510130357928574, 0.004098558332771063, 0.0484151765704155, 0.02061472274363041, 0.001126835006289184, 0.0022107160184532404, 0.007578131277114153, 0.004504901356995106, 0.1403624713420868, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.27370113134384155, 0.8174626231193542, 0.7193068861961365, 0.7076587677001953, 0.07771007716655731, 0.01620337925851345, 0.004001453518867493, 0.004182097036391497, 0.03681829199194908, 0.09453201293945312, 0.026799198240041733, 0.006044679321348667, 0.03725922852754593, 0.016391301527619362, 0.04474738612771034, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3889567255973816, 0.4487122893333435, 0.5870586037635803, 0.6609426140785217, 0.6319714188575745, 0.10676700621843338, 0.009257740341126919, 0.0017087672604247928, 0.027955975383520126, 0.07590407133102417, 0.006841681431978941, 0.08621303737163544, 0.05063363164663315, 0.016846608370542526, 0.05719457566738129, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00991373136639595, 0.0983041524887085, 0.15667210519313812, 0.19277995824813843, 0.5809133052825928, 0.7996482253074646, 0.06316149979829788, 0.004939877428114414, 0.023352928459644318, 0.010926214046776295, 0.008795071393251419, 0.006998055148869753, 0.0765714943408966, 0.006783204153180122, 0.05886436253786087, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.07887525111436844, 0.017153050750494003, 0.2216421663761139, 0.13068468868732452, 0.5295770764350891, 0.35302138328552246, 0.8493326902389526, 0.04265422001481056, 0.052519019693136215, 0.027357611805200577, 0.01357424259185791, 0.004279646556824446, 0.026089098304510117, 0.04089489206671715, 0.014124121516942978, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03465811163187027, 0.15351061522960663, 0.2825109362602234, 0.08174889534711838, 0.19755861163139343, 0.5825939774513245, 0.37084007263183594, 0.7892780900001526, 0.1287456750869751, 0.006381133571267128, 0.001940184272825718, 0.00047384126810356975, 0.011903955601155758, 0.003972942009568214, 0.06710142642259598, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.013788340613245964, 0.006632686126977205, 0.02207767777144909, 0.0785517543554306, 0.014113685116171837, 0.048156753182411194, 0.1944313496351242, 0.22155866026878357, 0.49656373262405396, 0.009422117844223976, 0.004702835343778133, 0.0007582302205264568, 0.00014129001647233963, 0.00033574484405107796, 0.23994654417037964, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00469209672883153, 0.015491061843931675, 0.035103749483823776, 0.009631682187318802, 0.008573818951845169, 0.051444172859191895, 0.04315423220396042, 0.05495374649763107, 0.6859460473060608, 0.5370080471038818, 0.06784479320049286, 0.004556083586066961, 0.001035997993312776, 0.0006345660076476634, 0.13974453508853912, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.02668480947613716, 0.016245348379015923, 0.01112398225814104, 0.008507933467626572, 0.02067524567246437, 0.17763113975524902, 0.05662769451737404, 0.04544723033905029, 0.7948054671287537, 0.7384940385818481, 0.5224500298500061, 0.1060851439833641, 0.014122114516794682, 0.0019289307529106736, 0.08371670544147491, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.02394592948257923, 0.04371663182973862, 0.028385786339640617, 0.007640721742063761, 0.014576996676623821, 0.08887659758329391, 0.017377078533172607, 0.020801657810807228, 0.187345951795578, 0.5047414302825928, 0.6342922449111938, 0.3672487437725067, 0.04719087854027748, 0.10966072231531143, 0.08543073385953903, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.009629062376916409, 0.020042795687913895, 0.006009343545883894, 0.001406975439749658, 0.0026742229238152504, 0.006072318647056818, 0.006495587062090635, 0.0032924923580139875, 0.034326668828725815, 0.5998041033744812, 0.7456773519515991, 0.7204623818397522, 0.012111457996070385, 0.018825965002179146, 0.008305574767291546, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.08114123344421387, 0.05478224158287048, 0.11802507936954498, 0.1980995535850525, 0.15338915586471558, 0.11414031684398651, 0.06528255343437195, 0.04494854062795639, 0.26375874876976013, 0.30061599612236023, 0.26960447430610657, 0.5329554677009583, 0.4288364350795746, 0.12292250245809555, 0.12395624816417694, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.5194346308708191, 0.08715501427650452, 0.09860441088676453, 0.08100719004869461, 0.11848669499158859, 0.14280925691127777, 0.19592297077178955, 0.1196337640285492, 0.2793996334075928, 0.0691760703921318, 0.09539081901311874, 0.05545644089579582, 0.02620256133377552, 0.03735822066664696, 0.09928011149168015, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.002687783446162939, 0.2585922181606293, 0.004556892905384302, 0.0005560630816034973, 0.0013625096762552857, 0.000865808455273509, 2.095674426527694e-05, 0.013363445177674294, 1.4331720194604713e-05, 0.00023233501997310668, 0.013212678954005241, 0.00027388104354031384, 2.99917119264137e-05, 5.10126119479537e-05, 0.0653858631849289, 0.1319446712732315, 0.003103907685726881, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.010489544831216335, 0.001751396106556058, 0.2775154411792755, 0.0030420231632888317, 0.08156438916921616, 0.0006471106316894293, 1.7804295566747896e-05, 0.00014657371502835304, 0.00035265504266135395, 0.00129506376106292, 0.018553601577878, 0.0019669390749186277, 0.009056665003299713, 0.05091148242354393, 0.1541917622089386, 0.004627853631973267, 0.8189921975135803, 0.006355744786560535, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0025869093369692564, 0.008571458049118519, 0.38431695103645325, 0.030530055984854698, 0.03365315869450569, 0.005854337941855192, 0.00010941662185359746, 4.1041937947738916e-05, 0.000364075880497694, 0.0011989381164312363, 0.014197473414242268, 0.0010815636487677693, 0.0004893331206403673, 0.0013785242335870862, 0.011478900909423828, 0.0004822930786758661, 0.5574855208396912, 0.0058120423927903175, 0.014268792234361172, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20589935779571533, 0.03613102436065674, 0.009011336602270603, 0.09399610757827759, 0.042497485876083374, 0.000576009857468307, 0.0040712482295930386, 0.00162220629863441, 0.00015305644774343818, 0.0034409475047141314, 0.025435233488678932, 2.175084773625713e-05, 1.0188268788624555e-05, 5.634217450278811e-05, 0.160919189453125, 0.15055440366268158, 0.0014966451562941074, 0.1733904629945755, 0.05038055405020714, 0.0057296124286949635, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00994176883250475, 0.015379102900624275, 0.000435269670560956, 0.004355194512754679, 0.002023787936195731, 4.86412636746536e-06, 0.0007220985717140138, 0.0004895065212622285, 0.0005591813242062926, 0.009127096273005009, 0.023014724254608154, 0.0003639610658865422, 3.1703839340480044e-05, 0.00036040451959706843, 0.1469942033290863, 0.1304439753293991, 0.00022060537594370544, 0.03428095951676369, 0.0157721396535635, 0.20856629312038422, 0.2746620774269104, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.31647789478302, 0.5689504742622375, 0.010991040617227554, 0.29046669602394104, 0.008814695291221142, 0.008600234054028988, 0.094898521900177, 0.02089405618607998, 0.005384301766753197, 0.1224634200334549, 0.2525540888309479, 0.011421876028180122, 9.89354812190868e-05, 0.00020726426737383008, 0.3419104218482971, 0.017820989713072777, 1.0936159014818259e-05, 0.0006241680239327252, 4.3406893382780254e-05, 0.2565733790397644, 0.5255003571510315, 0.040596142411231995, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.006757077760994434, 0.1354868859052658, 0.002759847091510892, 0.009205225855112076, 0.0038083188701421022, 0.0014255000278353691, 0.0007299972930923104, 0.2051592320203781, 0.00020230394147802144, 0.001623967313207686, 0.006681961473077536, 0.0021689198911190033, 5.557909025810659e-05, 0.000162289768923074, 0.20840437710285187, 0.2143511176109314, 3.818454570136964e-05, 0.0006476931739598513, 0.00012842394062317908, 0.007853559218347073, 0.008102592080831528, 0.0005345920799300075, 0.00793861411511898, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.010027364827692509, 0.02789497748017311, 0.0041139991953969, 0.012661347165703773, 0.0013435317669063807, 0.0034407242201268673, 0.0064836894161999226, 0.007366063538938761, 0.29601985216140747, 0.053567804396152496, 0.040060218423604965, 0.004607491660863161, 0.00018677859043236822, 3.186250978615135e-05, 0.10952453315258026, 0.00014670012751594186, 7.536429620813578e-06, 0.0001294321846216917, 0.00024457855033688247, 0.00022483686916530132, 0.001284220488741994, 0.0014163334853947163, 0.5552030801773071, 0.006061996798962355, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19971387088298798, 0.012958711944520473, 0.001638519112020731, 0.17775660753250122, 0.0022716999519616365, 0.03685721755027771, 0.06948257982730865, 0.005452410783618689, 0.037147630006074905, 0.19678887724876404, 0.21911752223968506, 0.02466990426182747, 0.0004891769494861364, 6.33890085737221e-05, 0.21250228583812714, 0.09223808348178864, 0.004348577931523323, 0.013163902796804905, 0.018216131255030632, 0.035016678273677826, 0.11075899004936218, 0.1728493720293045, 0.19621391594409943, 0.029301786795258522, 0.46166056394577026, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05692211166024208, 0.036700569093227386, 0.0015533106634393334, 0.01848980039358139, 0.002404581755399704, 0.008354752324521542, 0.023693444207310677, 0.02836945652961731, 0.29948922991752625, 0.005321406293660402, 0.0022319734562188387, 0.0005214664852246642, 0.00019869217067025602, 5.8369230828247964e-05, 0.008838840760290623, 0.11309938877820969, 0.004489036742597818, 0.0485633909702301, 0.021462395787239075, 0.4192940890789032, 0.26214849948883057, 0.22032421827316284, 0.0067114257253706455, 0.010406548157334328, 0.11692964285612106, 0.23004111647605896, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.011123275384306908, 0.003955129534006119, 0.0015235289465636015, 0.011223106645047665, 0.002481319010257721, 0.000903434120118618, 0.0006720115779899061, 0.00024289102293550968, 0.010115177370607853, 0.26232361793518066, 0.014199022203683853, 0.0005582758458331227, 0.0001542939426144585, 5.357913687475957e-05, 0.050008371472358704, 0.14281870424747467, 0.000545236689504236, 0.003893920686095953, 0.0005153689999133348, 0.01790653169155121, 0.004868220537900925, 0.0031487985979765654, 0.0011714915744960308, 0.0043698386289179325, 0.020373020321130753, 0.02358497679233551, 0.2682037353515625, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.025191567838191986, 0.009952094405889511, 0.015023785643279552, 0.0893990620970726, 0.006299919448792934, 0.0077370950020849705, 0.0004422276106197387, 0.00010742250742623582, 0.001807618304155767, 0.052116382867097855, 0.33116668462753296, 0.0029348258394747972, 0.004942082799971104, 0.0017646296182647347, 0.009777115657925606, 0.09794370085000992, 0.0018320194212719798, 0.000285644200630486, 3.260145604144782e-05, 0.00041393720312044024, 0.0043053096160292625, 0.002047628629952669, 0.0003047001373488456, 0.002447759034112096, 0.0016152235912159085, 0.024524936452507973, 0.29461416602134705, 0.014563476666808128, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12133541703224182, 0.0033125760965049267, 0.008441481739282608, 0.0257105715572834, 0.005432062782347202, 0.020603680983185768, 0.0008238950395025313, 0.00019463927310425788, 0.0001117472565965727, 0.011082900688052177, 0.4118730425834656, 0.0024717452470213175, 0.21560189127922058, 0.015253315679728985, 0.03452993184328079, 0.13817672431468964, 0.0034516772720962763, 0.002911344636231661, 0.0003800573176704347, 0.001462712767533958, 0.001961951842531562, 0.0040230052545666695, 0.0023086154833436012, 0.002483226591721177, 0.028553131967782974, 0.014239847660064697, 0.18359807133674622, 0.09542248398065567, 0.2067933827638626, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00568122835829854, 0.003583817044273019, 0.0009402501164004207, 0.0034319525584578514, 0.014700439758598804, 0.00014027200813870877, 5.928567406954244e-05, 0.0005310353590175509, 0.001004774123430252, 0.00433507701382041, 0.003991644363850355, 0.0015378128737211227, 6.231402221601456e-05, 0.02625701017677784, 0.15481357276439667, 0.14011409878730774, 0.01466476172208786, 0.09487155824899673, 0.03769487887620926, 0.062972791492939, 0.003495296463370323, 0.0004466120735742152, 0.0044098952785134315, 0.056031279265880585, 0.12585759162902832, 0.04736572876572609, 0.02727479301393032, 0.06542934477329254, 0.563940703868866, 0.024195805191993713, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00503728911280632, 0.004739185329526663, 0.021364033222198486, 0.04603096470236778, 0.004565324168652296, 0.021244995296001434, 0.07592181116342545, 0.027910754084587097, 0.008603491820394993, 0.004941265098750591, 0.03103908710181713, 0.035909827798604965, 0.01818632334470749, 0.04406380280852318, 0.17931725084781647, 0.05395817384123802, 6.747527368133888e-05, 0.0018676340114325285, 0.0002809480356518179, 0.03275269269943237, 0.005758063402026892, 9.199039777740836e-05, 0.00011598093260545284, 0.0015754709020256996, 0.026104740798473358, 0.009686414152383804, 0.001081737456843257, 0.0017741151386871934, 0.49180474877357483, 0.007121484261006117, 0.013531914912164211, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21416018903255463, 0.005411786492913961, 0.02111194096505642, 0.07001130282878876, 0.04736214876174927, 0.09187527745962143, 0.1399366855621338, 0.030981194227933884, 0.02342112548649311, 0.07424263656139374, 0.02716991677880287, 0.5710572600364685, 0.007255392149090767, 0.005560784600675106, 0.054831843823194504, 0.03839295729994774, 0.0002068357716780156, 0.006204192526638508, 0.0054313126020133495, 0.011207946576178074, 0.0013116636546328664, 0.008276019245386124, 0.002269806107506156, 0.004080863669514656, 0.01488969475030899, 0.0006726597202941775, 0.009391524828970432, 0.039596475660800934, 0.19840312004089355, 0.043704546988010406, 0.31202515959739685, 0.23529505729675293, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3339015245437622, 0.03176174685359001, 0.25991618633270264, 0.31748515367507935, 0.17923809587955475, 0.2977932095527649, 0.14185847342014313, 0.09826549887657166, 0.4168005883693695, 0.09961694478988647, 0.1390676498413086, 0.191667839884758, 0.0443519689142704, 0.10075851529836655, 0.08045557886362076, 0.07469534128904343, 0.001304430770687759, 0.0239309910684824, 0.008060658350586891, 0.021029237657785416, 0.015191669575870037, 0.006979105528444052, 0.0016427322989329696, 0.002132130553945899, 0.015241370536386967, 0.0018563566263765097, 0.035101406276226044, 0.06515936553478241, 0.27313047647476196, 0.10352547466754913, 0.2570805549621582, 0.45083746314048767, 0.1295340657234192, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.018510108813643456, 0.0015040059806779027, 0.011199833825230598, 0.021222928538918495, 0.02421635016798973, 0.004175371024757624, 0.0007807075162418187, 0.0005349562270566821, 0.0038052168674767017, 0.3727143108844757, 0.022828511893749237, 0.01009275484830141, 0.0012628438416868448, 0.0009096930734813213, 0.10904579609632492, 0.19253067672252655, 0.0008209676598198712, 0.004669400863349438, 0.00047802351764403284, 0.013135433197021484, 0.0034620855003595352, 0.0016354827675968409, 0.0008273401763290167, 0.0018895546672865748, 0.009773027151823044, 0.006215384230017662, 0.2356690764427185, 0.01036232803016901, 0.06144833192229271, 0.008870624005794525, 0.024212215095758438, 0.008509873412549496, 0.01347219105809927, 0.35532569885253906, NaN, NaN, NaN, NaN, NaN, NaN], [0.05896773934364319, 0.023542853072285652, 0.0776505172252655, 0.15385140478610992, 0.011508575640618801, 0.0939982458949089, 0.0018089915392920375, 0.0003290986060164869, 0.0005636389250867069, 0.029514340683817863, 0.35146546363830566, 0.007090898230671883, 0.012099701911211014, 0.006742698606103659, 0.052738532423973083, 0.10910779982805252, 0.002221200615167618, 0.0001436042075511068, 1.1848528629343491e-05, 0.0001887700636871159, 0.0020721519831568003, 0.0009632316650822759, 0.00014056939107831568, 0.0007320817094296217, 0.0006829273188486695, 0.007395991589874029, 0.2889891564846039, 0.007074101362377405, 0.0002627878566272557, 0.004363438580185175, 0.0018575063440948725, 0.00557676050812006, 0.012322820723056793, 0.31134024262428284, 0.027276715263724327, NaN, NaN, NaN, NaN, NaN], [0.18205131590366364, 0.00472951028496027, 0.03192766383290291, 0.059333182871341705, 0.028221452608704567, 0.033883631229400635, 0.00131422549020499, 0.0001085989861167036, 5.632251122733578e-05, 0.004554648417979479, 0.2950275242328644, 0.0014449548907577991, 0.2329740822315216, 0.0520821250975132, 0.1361607313156128, 0.18170765042304993, 0.003209297079592943, 0.0023912524338811636, 0.00020479358499869704, 0.0009326079743914306, 0.0013757160631939769, 0.0021110770758241415, 0.0008730489062145352, 0.000792569131590426, 0.01825624145567417, 0.0059272306971251965, 0.11984144151210785, 0.05654650926589966, 0.08423373848199844, 0.024963613599538803, 0.027966396883130074, 0.1777324080467224, 0.005578523967415094, 0.14623191952705383, 0.11331525444984436, 0.2157108038663864, NaN, NaN, NaN, NaN], [0.0063572716899216175, 0.002779513830319047, 0.0009721479145810008, 0.0035897656343877316, 0.019835324957966805, 0.00021187934908084571, 8.435463678324595e-05, 0.00043589723645709455, 0.0004945950931869447, 0.004414541646838188, 0.0027602717746049166, 0.0008482423145323992, 5.171148222871125e-05, 0.021799515932798386, 0.15211130678653717, 0.1515214741230011, 0.008395697921514511, 0.0657893642783165, 0.019086696207523346, 0.05097401514649391, 0.0016111076110973954, 0.00021851839846931398, 0.002003778237849474, 0.01669292151927948, 0.06321260333061218, 0.015100682154297829, 0.010209205560386181, 0.015906400978565216, 0.30131736397743225, 0.012282183393836021, 0.09666845202445984, 0.00808996893465519, 0.03798958286643028, 0.013879657723009586, 0.047733187675476074, 0.5371345281600952, 0.020763304084539413, NaN, NaN, NaN], [0.005286877974867821, 0.008391096256673336, 0.025823507457971573, 0.030178312212228775, 0.00857502967119217, 0.042816706001758575, 0.07608389109373093, 0.03679429367184639, 0.0067360359244048595, 0.0038807345554232597, 0.03710461035370827, 0.037315309047698975, 0.018847206607460976, 0.0415174663066864, 0.15352587401866913, 0.07945924997329712, 4.7485355025855824e-05, 0.0020416006445884705, 0.00022757358965463936, 0.013386114500463009, 0.001981395063921809, 3.6917605029884726e-05, 2.620528539409861e-05, 0.0003202208608854562, 0.009042860940098763, 0.0030785591807216406, 0.0011855574557557702, 0.0005728560499846935, 0.20002734661102295, 0.00213914574123919, 0.002927121240645647, 0.004968173801898956, 0.0065933396108448505, 0.002585601294413209, 0.002817549044266343, 0.547335147857666, 0.006171087268739939, 0.018697692081332207, NaN, NaN], [0.2992006242275238, 0.008802352473139763, 0.027079692110419273, 0.08564624935388565, 0.11560814827680588, 0.22971339523792267, 0.1826445311307907, 0.033842965960502625, 0.06175734102725983, 0.11205370724201202, 0.04016120731830597, 0.5851526856422424, 0.016921253874897957, 0.011652404442429543, 0.08951538056135178, 0.059381648898124695, 0.00026094831991940737, 0.007586375344544649, 0.006061093881726265, 0.0039266073144972324, 0.0004965912085026503, 0.003665223019197583, 0.0008195870905183256, 0.0014654117403551936, 0.0045553394593298435, 0.00032001128420233727, 0.004615657962858677, 0.017150992527604103, 0.07922492176294327, 0.012805018573999405, 0.1320599913597107, 0.09461667388677597, 0.003555287839844823, 0.019601207226514816, 0.047796737402677536, 0.29085052013397217, 0.04383813217282295, 0.32529252767562866, 0.24933147430419922, NaN], [0.12446854263544083, 0.0009617851465009153, 0.004788657650351524, 0.0008746102685108781, 0.16037316620349884, 0.003065474098548293, 0.0056405095383524895, 0.005250739399343729, 0.05696318671107292, 0.013819074258208275, 0.028642717748880386, 0.0011808956041932106, 0.08446037769317627, 0.03008313849568367, 0.13710428774356842, 0.13618361949920654, 0.0007103006355464458, 0.025071904063224792, 0.004419561009854078, 0.001962232170626521, 0.0023795748129487038, 0.002366183791309595, 0.0003890783409588039, 0.00022811641974840313, 0.0010611300822347403, 0.001608739490620792, 0.028126444667577744, 0.005591525696218014, 0.0024579197634011507, 0.004123267717659473, 0.0409882515668869, 0.010364435613155365, 0.010518459603190422, 0.09771004319190979, 0.037823982536792755, 0.019979961216449738, 0.018303534016013145, 0.22492042183876038, 0.09256016463041306, 0.005498841404914856]], [[0.09139528125524521, 0.1232069656252861, 0.06926427036523819, 0.03596228361129761, 0.08677947521209717, 0.3523865342140198, 0.17220446467399597, 0.3048216700553894, 0.24129998683929443, 0.008230631239712238, 0.012852879241108894, 0.0024019270204007626, 0.003931952640414238, 0.002576343482360244, 0.13348431885242462, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.005495021585375071, 0.009821278043091297, 0.006606503389775753, 0.0009270968730561435, 0.022634856402873993, 0.02637101709842682, 0.03666122257709503, 0.003247066168114543, 0.03138025477528572, 0.0023785934317857027, 0.007012520916759968, 0.0027185468934476376, 0.001623710268177092, 0.009003029204905033, 0.24841202795505524, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.004891206510365009, 0.01856830157339573, 0.01660238206386566, 0.05400720611214638, 0.2678459584712982, 0.21548990905284882, 0.0901486948132515, 0.14165979623794556, 0.4387242794036865, 0.0060303402133286, 0.03774549812078476, 0.022296983748674393, 0.014843892306089401, 0.003844154067337513, 0.0701230987906456, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.009136357344686985, 0.005524215288460255, 0.002000550739467144, 0.004360574297606945, 0.06230698525905609, 0.032116882503032684, 0.14447683095932007, 0.11250873655080795, 0.12456412613391876, 0.017903752624988556, 0.03641437739133835, 0.030236193910241127, 0.03817100450396538, 0.0020203718449920416, 0.24235397577285767, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.011458649300038815, 0.0028747334145009518, 0.0048751854337751865, 0.0034302298445254564, 0.032581884413957596, 0.009492963552474976, 0.29646721482276917, 0.024549754336476326, 0.5199102163314819, 0.07497825473546982, 0.039336495101451874, 0.23366358876228333, 0.2855432629585266, 0.0047793262638151646, 0.131587415933609, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0048281243070960045, 0.014400148764252663, 0.00021499136346392334, 0.00015902110317256302, 0.0008502291166223586, 0.005816742777824402, 0.03721616789698601, 0.31765323877334595, 0.006985681131482124, 9.90723492577672e-05, 0.0015535155544057488, 0.002471775049343705, 0.00966054666787386, 0.002636645222082734, 0.15553238987922668, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01824354939162731, 0.02838711440563202, 0.0006440957658924162, 0.00040316785452887416, 0.00041587575105950236, 0.0021029487252235413, 0.07766012847423553, 0.3384210765361786, 0.005884509067982435, 0.02229108288884163, 0.02292727865278721, 0.00326070049777627, 0.002748187631368637, 0.004811563994735479, 0.08466839045286179, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0009052195237018168, 0.00028935770387761295, 0.00010135041520697996, 4.4237076508579776e-05, 9.765469440026209e-05, 0.0003226006228942424, 0.0006174442823976278, 0.003764552064239979, 0.001191335148178041, 0.0005841490346938372, 0.001988127361983061, 0.0019700597040355206, 0.0006354944198392332, 0.0011416736524552107, 0.25631290674209595, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.007226317655295134, 0.015471585094928741, 0.027516253292560577, 0.0063530029729008675, 0.015222059562802315, 0.004327190574258566, 0.010739101096987724, 0.0023785619996488094, 0.053105201572179794, 0.0674574077129364, 0.31870341300964355, 0.4986713230609894, 0.027042971923947334, 0.0736011192202568, 0.116986483335495, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.015794623643159866, 0.009404269978404045, 0.017993446439504623, 0.003823975333943963, 0.004969433881342411, 0.03679484874010086, 0.04242165759205818, 0.017222637310624123, 0.1201641708612442, 0.016131659969687462, 0.3518509864807129, 0.3061373829841614, 0.0458594486117363, 0.15943044424057007, 0.17968055605888367, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.006380036938935518, 0.028477374464273453, 0.006851766724139452, 0.005024573765695095, 0.02579522877931595, 0.052536945790052414, 0.0111169358715415, 0.0038714397232979536, 0.008046599105000496, 0.008921324275434017, 0.011395278386771679, 0.10255969315767288, 0.21638940274715424, 0.44467252492904663, 0.05895284563302994, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.010142950341105461, 0.001643709372729063, 0.002422438468784094, 0.0009472724632360041, 0.0033483330626040697, 0.003415578044950962, 0.03889569267630577, 0.005287462379783392, 0.00042015319922938943, 0.0010667687747627497, 0.00740370387211442, 0.00895014964044094, 0.0067735291086137295, 0.017782215029001236, 0.26753443479537964, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.11724554747343063, 0.0023070531897246838, 0.004510094877332449, 0.0014967885799705982, 0.007825762964785099, 0.00018500315491110086, 0.013543304987251759, 0.0012864026939496398, 0.0007778326398693025, 0.00044295378029346466, 0.001640060218051076, 0.0014512997586280107, 0.002360806567594409, 0.2112705558538437, 0.19457924365997314, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.09882069379091263, 0.014871560037136078, 0.005077258683741093, 0.0014827846316620708, 0.005620975513011217, 0.0024449406191706657, 0.07368315756320953, 0.06950978189706802, 0.0017206794582307339, 0.00039900749106891453, 0.0006052122334949672, 0.0005968212499283254, 0.004762541502714157, 0.0232950821518898, 0.2500154376029968, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.001020739320665598, 0.001402992638759315, 0.0006185534875839949, 0.0003395593084860593, 0.0013021298218518496, 0.0008022591937333345, 0.003452729433774948, 0.0026675688568502665, 0.0021077031269669533, 0.0008018113439902663, 0.0017594166565686464, 0.0005115982494316995, 0.0007778447470627725, 0.0008368113776668906, 0.13888627290725708, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.005261753685772419, 0.005328452680259943, 0.1075906753540039, 0.007504252251237631, 0.18196941912174225, 0.2677178680896759, 0.18533208966255188, 0.041308093816041946, 0.04052837938070297, 0.0018225060775876045, 0.004738607443869114, 0.028365809470415115, 0.07867489755153656, 0.032602421939373016, 0.14697469770908356, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.024903474375605583, 0.2637169063091278, 0.01148936152458191, 0.01806865818798542, 0.010384032502770424, 0.05497525632381439, 0.01011874619871378, 6.159161421237513e-05, 0.03404803201556206, 0.01315199863165617, 0.004086918197572231, 0.033981483429670334, 0.0007253359071910381, 0.0010365481721237302, 0.023150891065597534, 0.11621169000864029, 0.2792567312717438, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03176039457321167, 0.002004105830565095, 0.011469452641904354, 0.003235333366319537, 0.011606591753661633, 0.01332010142505169, 0.007885226979851723, 0.0010319099528715014, 0.0026684575714170933, 0.003885145066305995, 0.002207087352871895, 0.010414022952318192, 0.015553043223917484, 0.01973811537027359, 0.1639232188463211, 0.16788142919540405, 0.08717074245214462, 0.024576181545853615, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24842531979084015, 0.031220050528645515, 0.028132880106568336, 0.029530569911003113, 0.01766534335911274, 0.36354437470436096, 0.06892471760511398, 0.02528339996933937, 0.01102821622043848, 0.15825842320919037, 0.13755246996879578, 0.07390110194683075, 0.19022952020168304, 0.1824880689382553, 0.1432848572731018, 0.14762163162231445, 0.09094145894050598, 0.023598572239279747, 0.2273045778274536, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0013664831640198827, 0.001714985934086144, 0.0013615208445116878, 0.0015855998499318957, 0.0011547008762136102, 0.007221538573503494, 0.01537399459630251, 0.020302001386880875, 0.0011185031617060304, 0.001242821803316474, 0.0004577837826218456, 0.0013307477347552776, 6.100967220845632e-05, 3.943840420106426e-05, 0.16435295343399048, 0.10424397885799408, 0.7145561575889587, 0.21233327686786652, 0.5272893309593201, 0.04291817173361778, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0006725311395712197, 0.000846685899887234, 0.001614874112419784, 0.000348375499015674, 0.0019150535808876157, 0.01370947528630495, 0.026421356946229935, 0.08118636161088943, 0.0008913385099731386, 0.0004401778569445014, 0.0003709472657646984, 0.0007744845934212208, 0.002328733913600445, 0.0003664834948722273, 0.14579549431800842, 0.11001076549291611, 0.4734446108341217, 0.06134912371635437, 0.2925608456134796, 0.02150837518274784, 0.19962187111377716, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.011207095347344875, 0.029191432520747185, 0.015348215587437153, 0.012354064732789993, 0.002485303906723857, 0.7150441408157349, 0.0764552503824234, 0.14450958371162415, 0.0016117440536618233, 0.008765846490859985, 0.011787951923906803, 0.002862851833924651, 0.022502094507217407, 0.007210019044578075, 0.007054056040942669, 0.17212024331092834, 0.1419786959886551, 0.05631781369447708, 0.2185172289609909, 0.002532752463594079, 0.0032626313623040915, 0.18381445109844208, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.006926322355866432, 0.0050496323965489864, 0.010020078159868717, 0.021360181272029877, 0.0027102867607027292, 0.028520535677671432, 0.05918040871620178, 0.23060235381126404, 0.019199691712856293, 0.09477535635232925, 0.013206732459366322, 0.0014817069750279188, 0.0153219448402524, 0.01803957298398018, 0.07950127124786377, 0.09107878059148788, 0.12160263955593109, 0.2150201052427292, 0.3705081045627594, 0.07164584845304489, 0.05021890252828598, 0.14392021298408508, 0.39638784527778625, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.009242678992450237, 0.05580667033791542, 0.014326682314276695, 0.04630666971206665, 0.010674487799406052, 0.5850453972816467, 0.4108324944972992, 0.4116209149360657, 0.007144990377128124, 0.20661039650440216, 0.037308260798454285, 0.054067905992269516, 0.037599414587020874, 0.03113422356545925, 0.22261686623096466, 0.2121918499469757, 0.20806513726711273, 0.15205760300159454, 0.38131871819496155, 0.1009124368429184, 0.09936784207820892, 0.07077471911907196, 0.05006752535700798, 0.14871110022068024, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0023711349349468946, 0.019731320440769196, 0.027566438540816307, 0.03758935630321503, 0.022646954283118248, 0.06538618355989456, 0.01152126956731081, 0.014797273091971874, 0.003413880243897438, 0.024214325472712517, 0.019466044381260872, 0.007235943805426359, 0.0008611958473920822, 0.0011126803001388907, 0.268255352973938, 0.21685828268527985, 0.23333710432052612, 0.06609098613262177, 0.12803798913955688, 0.1004808098077774, 0.025170300155878067, 0.04069148004055023, 0.10828333348035812, 0.10351972281932831, 0.29450517892837524, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08772679418325424, 0.02003292553126812, 0.09465871006250381, 0.41126132011413574, 0.07995565980672836, 0.5143890976905823, 0.1155472919344902, 0.01320470031350851, 0.02149844542145729, 0.06702866405248642, 0.6884661316871643, 0.09638151526451111, 0.35587188601493835, 0.2170087993144989, 0.019593046978116035, 0.05205162987112999, 0.22306090593338013, 0.049221184104681015, 0.061203524470329285, 0.09776578843593597, 0.06183243915438652, 0.17444021999835968, 0.321644127368927, 0.054029058665037155, 0.2629997134208679, 0.2757931053638458, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01343127153813839, 0.0019279895350337029, 0.01925632171332836, 0.04226915165781975, 0.005290344823151827, 0.5555825233459473, 0.06846548616886139, 0.006453313864767551, 0.019162334501743317, 0.0017575293313711882, 0.2967261075973511, 0.11721283942461014, 0.4438721835613251, 0.1899448037147522, 0.007863422855734825, 0.05800137668848038, 0.32540804147720337, 0.13333332538604736, 0.05756821855902672, 0.12640602886676788, 0.11846329271793365, 0.2918737828731537, 0.3632459342479706, 0.18816226720809937, 0.6433262228965759, 0.3291742205619812, 0.12170911580324173, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12789316475391388, 0.004323228262364864, 0.03538274019956589, 0.05581461265683174, 0.020947236567735672, 0.09860846400260925, 0.11394336074590683, 0.010361305437982082, 0.011101406998932362, 0.33580121397972107, 0.13689599931240082, 0.038663506507873535, 0.19725953042507172, 0.10533706098794937, 0.008538279682397842, 0.11078674346208572, 0.40781712532043457, 0.06261185556650162, 0.05779192969202995, 0.18194560706615448, 0.1120922714471817, 0.5645142793655396, 0.33037880063056946, 0.18058234453201294, 0.6155731678009033, 0.21430827677249908, 0.044265877455472946, 0.20548948645591736, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.007053391542285681, 0.012331487610936165, 0.008611395955085754, 0.031008008867502213, 0.004283395130187273, 0.0029549654573202133, 0.00849387887865305, 0.008564120158553123, 0.02629040740430355, 0.009985123760998249, 0.00761940935626626, 0.003499145619571209, 0.0015691317385062575, 0.005600257311016321, 0.5214234590530396, 0.08288691937923431, 0.2962968051433563, 0.2819015085697174, 0.19574381411075592, 0.1136796846985817, 0.07755676656961441, 0.20596812665462494, 0.3330870270729065, 0.21944326162338257, 0.22804425656795502, 0.1688224822282791, 0.2872299253940582, 0.13759873807430267, 0.09907422959804535, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0007030746201053262, 0.0001308645587414503, 0.0001913319865707308, 0.00016671058256179094, 0.000299752748105675, 0.0001608166057849303, 0.004501530434936285, 0.0010771069210022688, 0.003937124740332365, 0.001599485520273447, 0.0007339937728829682, 0.0030779645312577486, 3.4502605558373034e-05, 9.700484952190891e-05, 0.15641583502292633, 0.11118441820144653, 0.6110438108444214, 0.6292654871940613, 0.5805363655090332, 0.22765980660915375, 0.4274957776069641, 0.6573506593704224, 0.6816673278808594, 0.5361799597740173, 0.320940226316452, 0.3845328688621521, 0.6242536306381226, 0.41633498668670654, 0.12922972440719604, 0.01991792768239975, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.027913473546504974, 0.10055015236139297, 0.005828284192830324, 0.007361504249274731, 0.0010143647668883204, 0.000654859293717891, 0.0101061025634408, 0.029607031494379044, 0.04485415667295456, 0.09235014766454697, 0.05163425952196121, 0.03075464628636837, 0.027050884440541267, 0.021472401916980743, 0.18064866960048676, 0.10675505548715591, 0.1912444829940796, 0.23975566029548645, 0.32351911067962646, 0.046362437307834625, 0.08004549145698547, 0.3363644778728485, 0.2706483006477356, 0.26792168617248535, 0.2952979505062103, 0.4496033787727356, 0.1126319095492363, 0.5116660594940186, 0.015820369124412537, 0.030236991122364998, 0.03603934869170189, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0011193754617124796, 0.03864011913537979, 0.0033454783260822296, 0.0006957795703783631, 0.001480268081650138, 0.0012079592561349273, 0.00020605533791240305, 0.0011212154058739543, 0.0015670693246647716, 0.0014121911954134703, 0.0012700740480795503, 0.0019415348069742322, 0.001359732006676495, 0.0011440571397542953, 0.23876120150089264, 0.2233639359474182, 0.0911012589931488, 0.12918633222579956, 0.17958812415599823, 0.037158817052841187, 0.06043876335024834, 0.43303725123405457, 0.3349981904029846, 0.09061599522829056, 0.23225362598896027, 0.1514965295791626, 0.09056703746318817, 0.2480165809392929, 0.056160230189561844, 0.015552842989563942, 0.007365798112004995, 0.17054231464862823, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.012943120673298836, 0.020876264199614525, 0.04825761169195175, 0.03707631304860115, 0.015636419877409935, 0.11923719942569733, 0.021652603521943092, 0.026653259992599487, 0.020431919023394585, 0.03287035599350929, 0.10921605676412582, 0.11103712767362595, 0.08490956574678421, 0.05352960154414177, 0.1791488379240036, 0.09585364907979965, 0.22669152915477753, 0.08040254563093185, 0.0638674795627594, 0.15364862978458405, 0.13237975537776947, 0.3887532651424408, 0.5357696413993835, 0.07155110687017441, 0.4139500856399536, 0.05426981300115585, 0.1238613948225975, 0.07816720753908157, 0.14353296160697937, 0.021915707737207413, 0.02897939831018448, 0.22262324392795563, 0.4835837185382843, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.010143280029296875, 0.0011783033842220902, 0.07699523866176605, 0.04151652753353119, 0.013031265698373318, 0.6595657467842102, 0.04001229628920555, 0.015414847061038017, 0.05828738585114479, 0.00582495890557766, 0.39538952708244324, 0.3540988564491272, 0.5535411834716797, 0.14920510351657867, 0.05510678142309189, 0.05190133675932884, 0.3522363007068634, 0.14802464842796326, 0.07656959444284439, 0.12417534738779068, 0.17628712952136993, 0.33604755997657776, 0.38481405377388, 0.20552395284175873, 0.5797679424285889, 0.3262830972671509, 0.19466114044189453, 0.045280374586582184, 0.2712458372116089, 0.041196610778570175, 0.08666794002056122, 0.3327068090438843, 0.1922111064195633, 0.10969121754169464, NaN, NaN, NaN, NaN, NaN, NaN], [0.10365689545869827, 0.011393263004720211, 0.09083462506532669, 0.05552159622311592, 0.021694108843803406, 0.23093751072883606, 0.12655670940876007, 0.02638416364789009, 0.016898566856980324, 0.4334920644760132, 0.1302367001771927, 0.07987051457166672, 0.26015403866767883, 0.07882147282361984, 0.06412448734045029, 0.10818891227245331, 0.3937702178955078, 0.030490810051560402, 0.030189264565706253, 0.11243001371622086, 0.07142115384340286, 0.3648340702056885, 0.2467786818742752, 0.13009557127952576, 0.5037410855293274, 0.18716548383235931, 0.08825942128896713, 0.23451530933380127, 0.24434491991996765, 0.03496113047003746, 0.04431905224919319, 0.3934983015060425, 0.31427451968193054, 0.05462265387177467, 0.2524711489677429, NaN, NaN, NaN, NaN, NaN], [0.0009046280756592751, 0.006186267826706171, 0.001710598124191165, 0.0040000369772315025, 0.0010556421475484967, 0.00010012275743065402, 0.000467440317152068, 0.00034073027200065553, 0.012450831942260265, 0.001776019111275673, 0.0016348852077499032, 0.0004490323772188276, 0.00023723821504972875, 0.0005369102582335472, 0.2610536217689514, 0.06088699772953987, 0.23725801706314087, 0.2046121060848236, 0.14171433448791504, 0.06688592582941055, 0.06064169481396675, 0.14286598563194275, 0.21723276376724243, 0.13491223752498627, 0.2083195000886917, 0.15285742282867432, 0.34066644310951233, 0.18166381120681763, 0.10532425343990326, 0.06318715214729309, 0.052211396396160126, 0.20970472693443298, 0.20715771615505219, 0.28281068801879883, 0.13935938477516174, 0.11923542618751526, NaN, NaN, NaN, NaN], [0.00040706052095629275, 5.995776882627979e-05, 0.00011266738147241995, 0.00010974665929097682, 0.00022393744438886642, 7.468188414350152e-05, 0.00239625689573586, 0.0004222780407872051, 0.002755024004727602, 0.0011263962369412184, 0.0004159261588938534, 0.0013214137870818377, 1.3015362128498964e-05, 3.146446033497341e-05, 0.15343648195266724, 0.09884612262248993, 0.5530695915222168, 0.6301063299179077, 0.5187459588050842, 0.28427499532699585, 0.33059176802635193, 0.49595603346824646, 0.6107674241065979, 0.387560099363327, 0.3283739984035492, 0.3905918300151825, 0.5949583053588867, 0.2912430167198181, 0.19163259863853455, 0.03091937117278576, 0.3911139667034149, 0.3233675956726074, 0.421701043844223, 0.6310504674911499, 0.4068542718887329, 0.13317596912384033, 0.02126597985625267, NaN, NaN, NaN], [0.02487853355705738, 0.06922142952680588, 0.005931189749389887, 0.005149703938513994, 0.0007503133383579552, 0.00046759017277508974, 0.004864065907895565, 0.010271446779370308, 0.03885169327259064, 0.0494176521897316, 0.032662954181432724, 0.015474021434783936, 0.005468437913805246, 0.0031831569503992796, 0.16160887479782104, 0.07192745804786682, 0.09934075176715851, 0.15662430226802826, 0.18248029053211212, 0.021172231063246727, 0.037516966462135315, 0.12766626477241516, 0.09711621701717377, 0.09662153571844101, 0.1303528994321823, 0.3114719092845917, 0.1600099802017212, 0.265144020318985, 0.011710498481988907, 0.02471126988530159, 0.012725233100354671, 0.12533646821975708, 0.446529746055603, 0.11092787981033325, 0.45893827080726624, 0.011159577406942844, 0.028070949018001556, 0.024378135800361633, NaN, NaN], [0.0006016235565766692, 0.010655699297785759, 0.0012552555417641997, 0.0004406629304867238, 0.0006771506741642952, 0.0004804672207683325, 8.584682655055076e-05, 0.00018533790716901422, 0.0020008538849651814, 0.0008522755815647542, 0.0005471827462315559, 0.0006654397584497929, 0.0003326669684611261, 0.00020969027536921203, 0.18202657997608185, 0.21178482472896576, 0.0713806003332138, 0.12116114795207977, 0.16551871597766876, 0.025692136958241463, 0.03932836279273033, 0.255863755941391, 0.20887790620326996, 0.05500240623950958, 0.14075487852096558, 0.158308207988739, 0.10016348958015442, 0.22940821945667267, 0.06542190909385681, 0.016673747450113297, 0.011679067276418209, 0.21266934275627136, 0.27460965514183044, 0.08977667987346649, 0.1985965520143509, 0.05640871822834015, 0.014301197603344917, 0.004748867359012365, 0.1251523643732071, NaN], [0.0006660889484919608, 0.0011989487102255225, 0.006168409250676632, 0.0007392434636130929, 0.002072105184197426, 0.0013732375809922814, 0.001215140800923109, 8.942947169998661e-05, 0.0032219376880675554, 0.00034276655060239136, 0.0006051870877854526, 0.0004003554640803486, 0.0006330502219498158, 9.228585986420512e-05, 0.13989190757274628, 0.11377177387475967, 0.4656391441822052, 0.26672884821891785, 0.20802536606788635, 0.1860857605934143, 0.16829806566238403, 0.19711202383041382, 0.3023360073566437, 0.035885076969861984, 0.11114621162414551, 0.21048156917095184, 0.27827921509742737, 0.11178875714540482, 0.13154125213623047, 0.3096882104873657, 0.09530708193778992, 0.2201821655035019, 0.1989239901304245, 0.27841058373451233, 0.15223632752895355, 0.2206900417804718, 0.34536775946617126, 0.09229245036840439, 0.24595825374126434, 0.2865155339241028]], [[0.04622220993041992, 0.12740419805049896, 0.05372706800699234, 0.5582705140113831, 0.030120277777314186, 0.3703221380710602, 0.020304178819060326, 0.3357560634613037, 0.11819478869438171, 0.0765489861369133, 0.09261158853769302, 0.03858334198594093, 0.13079233467578888, 0.0447748564183712, 0.11706516146659851, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0919138491153717, 0.05798470228910446, 0.02827676385641098, 0.34965166449546814, 0.05504997447133064, 0.1526506543159485, 0.09941896051168442, 0.4367760419845581, 0.061004042625427246, 0.5390062928199768, 0.28723591566085815, 0.15840129554271698, 0.2018149495124817, 0.11561664938926697, 0.1249081939458847, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.032068803906440735, 0.0549696609377861, 0.018587671220302582, 0.2202640324831009, 0.0011182812741026282, 0.03810814768075943, 0.027008401229977608, 0.3763306438922882, 0.11146998405456543, 0.16719762980937958, 0.13283231854438782, 0.014421377331018448, 0.07254088670015335, 0.007401765324175358, 0.20662666857242584, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.10753453522920609, 0.479284405708313, 0.009764611721038818, 0.0431443527340889, 0.0008862981921993196, 0.03188035264611244, 0.00600279588252306, 0.43093177676200867, 0.08460848033428192, 0.18502341210842133, 0.038902610540390015, 0.030237559229135513, 0.1820157915353775, 0.03367093205451965, 0.14427724480628967, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.013928310945630074, 0.032752107828855515, 0.0024797581136226654, 0.10617181658744812, 0.0002726189268287271, 0.011333486996591091, 0.005626056343317032, 0.05421115458011627, 0.020341530442237854, 0.0548044852912426, 0.027503041550517082, 0.005752534605562687, 0.033552803099155426, 0.008454940281808376, 0.388910174369812, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.15046736598014832, 0.296213299036026, 0.044096194207668304, 0.05168119817972183, 0.02727358601987362, 0.04717152938246727, 0.0016543868696317077, 0.035376399755477905, 0.027143586426973343, 0.0870317667722702, 0.05812281742691994, 0.06705813109874725, 0.3147181272506714, 0.39039844274520874, 0.23394177854061127, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.14644725620746613, 0.5605929493904114, 0.11812092363834381, 0.5902084112167358, 0.021858595311641693, 0.10718227922916412, 0.007383488584309816, 0.019886687397956848, 0.06570647656917572, 0.10820640623569489, 0.1357717514038086, 0.025582531467080116, 0.077891044318676, 0.061965201050043106, 0.164744034409523, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.049012791365385056, 0.35138410329818726, 0.26388463377952576, 0.7301797866821289, 0.014552393928170204, 0.24720129370689392, 0.0041521950624883175, 0.07795857638120651, 0.014070906676352024, 0.04667593538761139, 0.1480453461408615, 0.010990227572619915, 0.20039354264736176, 0.17517414689064026, 0.0717916414141655, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.09980960935354233, 0.4834202826023102, 0.20237547159194946, 0.5161312222480774, 0.2011035680770874, 0.31254804134368896, 0.023049525916576385, 0.09284620732069016, 0.030714770779013634, 0.009841320104897022, 0.03625232353806496, 0.02249438874423504, 0.030981028452515602, 0.01249231118708849, 0.19809871912002563, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2242409735918045, 0.5898000001907349, 0.2996082305908203, 0.6961580514907837, 0.3950251638889313, 0.824604332447052, 0.0551396869122982, 0.5436567068099976, 0.06683327257633209, 0.03568824753165245, 0.060814060270786285, 0.00592254800722003, 0.012778226286172867, 0.017990900203585625, 0.1082865446805954, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03427329286932945, 0.7018846869468689, 0.18350760638713837, 0.5559015274047852, 0.03810380771756172, 0.7226935029029846, 0.05184842646121979, 0.881024181842804, 0.06315085291862488, 0.03384441137313843, 0.014913397841155529, 0.002015632577240467, 0.008405282162129879, 0.0011906703002750874, 0.2768104076385498, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.022437993437051773, 0.7336767315864563, 0.2893984615802765, 0.7315550446510315, 0.021726222708821297, 0.3247562646865845, 0.05117126554250717, 0.7097986340522766, 0.03149837628006935, 0.017582548782229424, 0.017906883731484413, 0.004864181391894817, 0.0014982494758442044, 0.0005988480988889933, 0.17147301137447357, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.279982328414917, 0.427709698677063, 0.4798988997936249, 0.811837911605835, 0.5607104301452637, 0.3233453035354614, 0.03364620357751846, 0.48738226294517517, 0.20507316291332245, 0.2806957960128784, 0.20560167729854584, 0.021487781777977943, 0.0051806773990392685, 0.018182942643761635, 0.10378202050924301, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.15081651508808136, 0.5779510736465454, 0.21354816854000092, 0.8126901984214783, 0.041816346347332, 0.5376638174057007, 0.02729017473757267, 0.45972490310668945, 0.1708957701921463, 0.17148789763450623, 0.06268936395645142, 0.0045938147231936455, 0.0036332160234451294, 0.0009066996863111854, 0.10311751067638397, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.009540104307234287, 0.03889232128858566, 0.016071060672402382, 0.08366316556930542, 0.004574422258883715, 0.029401082545518875, 0.00834547821432352, 0.0893266350030899, 0.14732055366039276, 0.09065960347652435, 0.14173488318920135, 0.042114999145269394, 0.004022075328975916, 0.003513866104185581, 0.1347859650850296, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.17597882449626923, 0.03865775838494301, 0.04927876219153404, 0.19269852340221405, 0.07631995528936386, 0.03202155977487564, 0.04315444082021713, 0.0381813645362854, 0.14437337219715118, 0.14268529415130615, 0.12548406422138214, 0.22065725922584534, 0.007455701474100351, 0.012540786527097225, 0.13194040954113007, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12168548256158829, 0.12690430879592896, 0.03319493681192398, 0.044549524784088135, 0.022643521428108215, 0.12293753027915955, 0.012858373112976551, 0.056580886244773865, 0.0409478023648262, 0.5390252470970154, 0.04499629884958267, 0.010665545240044594, 0.0012580851325765252, 0.0006077282596379519, 0.16003872454166412, 0.13124778866767883, 0.015335792675614357, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.004976227879524231, 0.0016218257369473577, 0.10218203067779541, 0.005807417444884777, 0.025330372154712677, 0.00805770605802536, 0.0010953968157991767, 0.007808555383235216, 0.03332183510065079, 0.01014297641813755, 0.0378553569316864, 0.0012688467977568507, 0.0070253219455480576, 0.006525768432766199, 0.1611432433128357, 0.19323189556598663, 0.005229663103818893, 0.005805561784654856, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.018298039212822914, 0.043392445892095566, 0.026758581399917603, 0.06685060262680054, 0.007846164517104626, 0.0070086256600916386, 0.0011090404586866498, 0.0016357558779418468, 0.015295942313969135, 0.022091375663876534, 0.08676162362098694, 0.0013220091350376606, 0.0007799563463777304, 0.0005145008908584714, 0.5814905166625977, 0.06695510447025299, 0.08997365087270737, 0.32878753542900085, 0.35321861505508423, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16791731119155884, 0.01838838867843151, 0.03170344606041908, 0.04746389389038086, 0.024931352585554123, 0.002624210435897112, 0.3320338726043701, 0.32248422503471375, 0.021048149093985558, 0.02857070416212082, 0.11922428011894226, 4.079664358869195e-05, 0.0002566495386417955, 0.0005197013379074633, 0.1538068950176239, 0.1452476531267166, 0.07996584475040436, 0.2002653181552887, 0.13149262964725494, 0.005022347904741764, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03376027196645737, 0.001082546659745276, 0.003266592975705862, 0.006257645785808563, 0.023632841184735298, 0.00021245618700049818, 0.033721838146448135, 0.15340450406074524, 0.009442711248993874, 0.006162047851830721, 0.09923229366540909, 0.0001386175281368196, 0.0008165750186890364, 0.0010916005121544003, 0.14602994918823242, 0.1274433135986328, 0.13577045500278473, 0.16066212952136993, 0.1959238052368164, 0.04180024936795235, 0.06788772344589233, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04221357777714729, 0.03857824206352234, 0.004161412362009287, 0.06419923156499863, 0.010648604482412338, 0.008165394887328148, 0.04070910066366196, 0.34736329317092896, 0.0012154168216511607, 0.1630050241947174, 0.07001504302024841, 0.0033116117119789124, 0.00023883172252681106, 0.00045473958016373217, 0.2740376889705658, 0.14809708297252655, 0.29017606377601624, 0.22457490861415863, 0.17088554799556732, 0.041788797825574875, 0.013634788803756237, 0.02984887920320034, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.007271567825227976, 0.0015110730892047286, 0.0014769553672522306, 0.0053740208968520164, 0.0038654205854982138, 0.0024983601178973913, 0.049697574228048325, 0.27208074927330017, 0.0006182760698720813, 0.014045008458197117, 0.00131281279027462, 0.00040628391434438527, 0.00037906834040768445, 0.0001199298130813986, 0.006693295668810606, 0.21402230858802795, 0.012405444867908955, 0.0014808804262429476, 0.0009161182679235935, 0.0035427443217486143, 0.0017166208708658814, 0.001927618752233684, 0.015056394040584564, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08829134702682495, 0.11286511272192001, 0.004967967513948679, 0.006996258161962032, 0.0014454894699156284, 0.006397548597306013, 0.01389994379132986, 0.27431485056877136, 0.0018983082845807076, 0.09154568612575531, 0.022492842748761177, 0.0017391144065186381, 0.000634143827483058, 4.5783879613736644e-05, 0.318096399307251, 0.10794443637132645, 0.13477572798728943, 0.046750620007514954, 0.03419584408402443, 0.30604344606399536, 0.11879221349954605, 0.08022946119308472, 0.11745522916316986, 0.21712547540664673, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02142007276415825, 0.007001234218478203, 0.00761477230116725, 0.018849696964025497, 0.010492328554391861, 0.01844215951859951, 0.008208145387470722, 0.01109394058585167, 0.006335548125207424, 0.01884968765079975, 0.01652243174612522, 0.016355833038687706, 0.0014795949682593346, 0.0011322565842419863, 0.27169719338417053, 0.06259628385305405, 0.21873348951339722, 0.248628169298172, 0.2344663441181183, 0.09133727103471756, 0.05752522125840187, 0.03945200890302658, 0.39403918385505676, 0.15040725469589233, 0.009099425747990608, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17013461887836456, 0.14343884587287903, 0.017679741606116295, 0.10850679129362106, 0.01231957133859396, 0.010847942903637886, 0.04900640249252319, 0.023357992991805077, 0.014735743403434753, 0.014097570441663265, 0.012582896277308464, 0.0010529988212510943, 0.00046457236749120057, 0.0006211225991137326, 0.5663455724716187, 0.06400181353092194, 0.3208324611186981, 0.5040323138237, 0.6282902359962463, 0.04389061778783798, 0.08030739426612854, 0.10539824515581131, 0.1485716998577118, 0.08085520565509796, 0.13963551819324493, 0.0947280004620552, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1586649864912033, 0.08337923884391785, 0.0181503314524889, 0.22676831483840942, 0.016727542504668236, 0.015186772681772709, 0.0050455182790756226, 0.00688449339941144, 0.025511443614959717, 0.20239992439746857, 0.024231791496276855, 0.0023393011651933193, 0.0011192933889105916, 0.0005647524958476424, 0.390881210565567, 0.0935494601726532, 0.3055664598941803, 0.46751275658607483, 0.6914730072021484, 0.12860655784606934, 0.15726737678050995, 0.2987912595272064, 0.1529359668493271, 0.062232255935668945, 0.041881486773490906, 0.03399288281798363, 0.026789270341396332, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3443087935447693, 0.28029316663742065, 0.23536846041679382, 0.34415915608406067, 0.11761639267206192, 0.006012732163071632, 0.008058828301727772, 0.005314267706125975, 0.013309409841895103, 0.09906232357025146, 0.10091385245323181, 0.018941059708595276, 0.025248508900403976, 0.014945760369300842, 0.7436007857322693, 0.012478480115532875, 0.051689472049474716, 0.7194163799285889, 0.8485123515129089, 0.006671697832643986, 0.03636787086725235, 0.05433559790253639, 0.01463489979505539, 0.0011851346353068948, 0.0010049004340544343, 0.012586181983351707, 0.0039429632015526295, 0.0029262336902320385, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0022638223599642515, 0.004991845227777958, 0.004655482713133097, 0.0007185174035839736, 0.0013901105849072337, 0.011776956729590893, 0.0005479936371557415, 0.00022604972764384001, 0.00024645475787110627, 0.009541304782032967, 0.011744895949959755, 0.0007132806931622326, 0.27867355942726135, 0.02834550105035305, 0.007979176938533783, 0.16095376014709473, 0.10161679983139038, 0.15561290085315704, 0.27214428782463074, 0.06339859217405319, 0.047669682651758194, 0.16775988042354584, 0.30333516001701355, 0.29585903882980347, 0.026492541655898094, 0.03390856087207794, 0.020966142416000366, 0.027538424357771873, 0.040642742067575455, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.024570701643824577, 0.00167787482496351, 0.004072254989296198, 0.00223688711412251, 0.007143567781895399, 0.00014352552534546703, 0.0004634522774722427, 0.0016921478090807796, 0.003620122792199254, 0.007754941936582327, 0.011850811541080475, 0.0027722271624952555, 9.3724018370267e-05, 0.02145184949040413, 0.15506701171398163, 0.1701768934726715, 0.015393235720694065, 0.0020776872988790274, 0.011533004231750965, 0.013215321116149426, 0.004845780786126852, 0.011772604659199715, 0.006262979004532099, 0.00390799343585968, 0.007256041280925274, 0.0014780729543417692, 0.007152961101382971, 0.1450572907924652, 0.009833375923335552, 0.004788131918758154, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01723022572696209, 0.08018677681684494, 0.007713299244642258, 0.004271229729056358, 0.0005464836140163243, 0.00456921337172389, 0.0031762931030243635, 0.009469777345657349, 0.000385247083613649, 0.01870143786072731, 0.033109456300735474, 0.004042719956487417, 0.004976211115717888, 0.005646048113703728, 0.19230251014232635, 0.27953270077705383, 0.3106633424758911, 0.3078516721725464, 0.2835734188556671, 0.23220741748809814, 0.10028243064880371, 0.059542566537857056, 0.10900203883647919, 0.24247398972511292, 0.19294817745685577, 0.04455278813838959, 0.032558612525463104, 0.2623904049396515, 0.04071282595396042, 0.07101175934076309, 0.01397540420293808, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.016216034069657326, 0.04777013510465622, 0.01620146818459034, 0.010810854844748974, 0.16034351289272308, 0.006931359879672527, 0.0032006967812776566, 0.032106515020132065, 0.0003033989341929555, 0.015325331129133701, 0.006036583799868822, 0.12791146337985992, 0.19952742755413055, 0.023708127439022064, 0.18307197093963623, 0.15828359127044678, 0.26215362548828125, 0.1828027367591858, 0.3383132517337799, 0.14976613223552704, 0.17187725007534027, 0.16098640859127045, 0.10713529586791992, 0.2253616452217102, 0.27887699007987976, 0.0991593673825264, 0.1987481713294983, 0.2010713517665863, 0.24892166256904602, 0.09143882989883423, 0.028894133865833282, 0.0226773452013731, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.014499284327030182, 0.035677529871463776, 0.009275808930397034, 0.01653297245502472, 0.006223962642252445, 0.0020693510305136442, 0.007680083625018597, 0.013822571374475956, 0.00040966575033962727, 0.0038025544490665197, 0.013774569146335125, 0.006069935858249664, 0.004488381557166576, 0.005977130029350519, 0.217429518699646, 0.08621957898139954, 0.39239373803138733, 0.32060059905052185, 0.6169360876083374, 0.04211895540356636, 0.07954877614974976, 0.28241875767707825, 0.1073535904288292, 0.10431969910860062, 0.28138864040374756, 0.05428503826260567, 0.29005417227745056, 0.2829020619392395, 0.1771886944770813, 0.12728992104530334, 0.029228007420897484, 0.09527892619371414, 0.030012397095561028, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03237156197428703, 0.013441890478134155, 0.0194883793592453, 0.09343220293521881, 0.05379915237426758, 0.004893247038125992, 0.0011929833563044667, 0.009432576596736908, 0.015330814756453037, 0.14898745715618134, 0.018398255109786987, 0.01228779274970293, 0.00492482166737318, 0.0038985873106867075, 0.2601524889469147, 0.10387677699327469, 0.28899070620536804, 0.34778735041618347, 0.5978891849517822, 0.08856049180030823, 0.11093756556510925, 0.2773001492023468, 0.1387036144733429, 0.05535874143242836, 0.040542375296354294, 0.057020239531993866, 0.08593740314245224, 0.3575255870819092, 0.1780063509941101, 0.03115975111722946, 0.05683879926800728, 0.20087137818336487, 0.022991398349404335, 0.024780578911304474, NaN, NaN, NaN, NaN, NaN, NaN], [0.08357361704111099, 0.18220724165439606, 0.10462122410535812, 0.08245989680290222, 0.03124452568590641, 0.002170282183215022, 0.0020384257659316063, 0.004550496581941843, 0.003485089400783181, 0.036062099039554596, 0.0278666652739048, 0.011443988420069218, 0.01760544627904892, 0.013599698431789875, 0.3874043822288513, 0.027872784063220024, 0.11975038051605225, 0.8484699726104736, 0.9221431016921997, 0.010032964870333672, 0.05817321315407753, 0.14408904314041138, 0.03149182349443436, 0.0027255630120635033, 0.003546576714143157, 0.054592132568359375, 0.03846639767289162, 0.0179138146340847, 0.04004756733775139, 0.0025625908747315407, 0.006073353346437216, 0.017890095710754395, 0.006128084380179644, 0.0035659971181303263, 0.005842072889208794, NaN, NaN, NaN, NaN, NaN], [0.001995340920984745, 0.011527596041560173, 0.005334027577191591, 0.0006887424970045686, 0.0023407095577567816, 0.00276917009614408, 0.00029977987287566066, 0.00012230046559125185, 0.00026578022516332567, 0.008239910937845707, 0.009819538332521915, 0.000393931899452582, 0.605858564376831, 0.08989311754703522, 0.011135715991258621, 0.21095024049282074, 0.16082847118377686, 0.2551726996898651, 0.40046265721321106, 0.07841236889362335, 0.05558479577302933, 0.20925307273864746, 0.4381427764892578, 0.47918838262557983, 0.07096414268016815, 0.11106863617897034, 0.09138666838407516, 0.1393880993127823, 0.1506565660238266, 0.07743309438228607, 0.06943798065185547, 0.09801105409860611, 0.017720624804496765, 0.015859564766287804, 0.029157793149352074, 0.0392736941576004, NaN, NaN, NaN, NaN], [0.021298440173268318, 0.001658836961723864, 0.004600299056619406, 0.0025729055050760508, 0.015332063660025597, 0.00017298871534876525, 0.0005721640191040933, 0.00186175387352705, 0.0037871075328439474, 0.009124312549829483, 0.01116581168025732, 0.0031747270841151476, 0.00012207991676405072, 0.029056062921881676, 0.15163807570934296, 0.17935752868652344, 0.014263968914747238, 0.0022281131241470575, 0.011617614887654781, 0.022433524951338768, 0.0047986325807869434, 0.013686214573681355, 0.007696506567299366, 0.004939754959195852, 0.012488129548728466, 0.002878576284274459, 0.013457567431032658, 0.23303280770778656, 0.030022362247109413, 0.013181640766561031, 0.027029545977711678, 0.010247751139104366, 0.0006795030203647912, 0.0032072996255010366, 0.1104368045926094, 0.006663828622549772, 0.003364446572959423, NaN, NaN, NaN], [0.020229021087288857, 0.11621151119470596, 0.015550180338323116, 0.006284819450229406, 0.0013723199954256415, 0.013658476993441582, 0.005685316864401102, 0.02063058130443096, 0.001440295367501676, 0.022225895896553993, 0.07092871516942978, 0.007373427972197533, 0.00771017000079155, 0.006927240639925003, 0.16024509072303772, 0.3113161623477936, 0.29550519585609436, 0.2834082841873169, 0.292662650346756, 0.1380799263715744, 0.055221766233444214, 0.0487985797226429, 0.10219268500804901, 0.25612032413482666, 0.2569950222969055, 0.10279092192649841, 0.16084249317646027, 0.5340818166732788, 0.10305190831422806, 0.16831228137016296, 0.03310799598693848, 0.10521702468395233, 0.008185362443327904, 0.02029210887849331, 0.2447529286146164, 0.0189062412828207, 0.051586367189884186, 0.011271311901509762, NaN, NaN], [0.014029471203684807, 0.02389930933713913, 0.011611595749855042, 0.012217668816447258, 0.2477317750453949, 0.006976675242185593, 0.0035841658245772123, 0.022232146933674812, 0.0018886715406551957, 0.01750483363866806, 0.005654812324792147, 0.10889071226119995, 0.19916927814483643, 0.022882532328367233, 0.16074435412883759, 0.21913117170333862, 0.2667233347892761, 0.15068072080612183, 0.2934513986110687, 0.11010763049125671, 0.11770202964544296, 0.1548316478729248, 0.10880382359027863, 0.19848009943962097, 0.2926469147205353, 0.17939361929893494, 0.38748762011528015, 0.38622626662254333, 0.4369211196899414, 0.14473943412303925, 0.11290202289819717, 0.11878126114606857, 0.013051117770373821, 0.18458649516105652, 0.15622372925281525, 0.14840805530548096, 0.06742489337921143, 0.01624887064099312, 0.028317920863628387, NaN], [0.0032621105201542377, 0.006088452413678169, 0.012619324028491974, 0.008848619647324085, 0.17461968958377838, 8.660123421577737e-05, 0.0006109846872277558, 0.0007747155614197254, 0.003163054818287492, 0.017787659540772438, 0.029563669115304947, 0.0032195982057601213, 0.013336165808141232, 0.013171130791306496, 0.1387031376361847, 0.13670727610588074, 0.11102687567472458, 0.008893890306353569, 0.008979070000350475, 0.01785319298505783, 0.008134939707815647, 0.02043774165213108, 0.030145585536956787, 0.014907605946063995, 0.021436721086502075, 0.020207075402140617, 0.10284662246704102, 0.06823904067277908, 0.04208305850625038, 0.03810393810272217, 0.04656955599784851, 0.025087369605898857, 0.005296032875776291, 0.07358870655298233, 0.057817310094833374, 0.033472564071416855, 0.02220221422612667, 0.01758744567632675, 0.012124869041144848, 0.052647966891527176]], [[0.009570755064487457, 0.005546795669943094, 0.006825579330325127, 0.033384330570697784, 0.3769712448120117, 0.15916845202445984, 0.5290282368659973, 0.24695992469787598, 0.2377869039773941, 0.0913546234369278, 0.07570143043994904, 0.06522544473409653, 0.12397455424070358, 0.2645682692527771, 0.1787039041519165, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0061562443152070045, 0.040286894887685776, 0.0029807272367179394, 0.016133036464452744, 0.1151214987039566, 0.07519882172346115, 0.10128971189260483, 0.046498823910951614, 0.04111110791563988, 0.11845260113477707, 0.08915312588214874, 0.10556784272193909, 0.16933780908584595, 0.3531811535358429, 0.21578538417816162, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.14712950587272644, 0.04435151070356369, 0.015454337000846863, 0.01427951455116272, 0.08342041075229645, 0.005383625626564026, 0.10468690097332001, 0.05861024558544159, 0.08666124939918518, 0.15304753184318542, 0.23543620109558105, 0.2374279797077179, 0.10751555860042572, 0.10399115085601807, 0.23440681397914886, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0859314426779747, 0.15731151401996613, 0.005385389551520348, 0.04620514437556267, 0.010708490386605263, 0.006711416877806187, 0.012445325031876564, 0.056288186460733414, 0.097142793238163, 0.07020799815654755, 0.02479076385498047, 0.0890590250492096, 0.22972674667835236, 0.034618109464645386, 0.28529092669487, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.07441635429859161, 0.018118128180503845, 0.016377849504351616, 0.003080169903114438, 0.20936372876167297, 0.0007255859090946615, 0.03578657656908035, 0.00550744216889143, 0.1172742024064064, 0.5684130191802979, 0.3980042636394501, 0.15252694487571716, 0.10817506164312363, 0.23486874997615814, 0.2619861364364624, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.05188249424099922, 0.0069924332201480865, 0.0009591103880666196, 0.0061192926950752735, 0.002253405749797821, 0.006572761107236147, 0.004667140077799559, 0.11107926070690155, 0.03415685519576073, 0.010113962925970554, 0.006655086297541857, 0.010832482948899269, 0.03651394695043564, 0.040573474019765854, 0.2686486840248108, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.08095332235097885, 0.02014574408531189, 0.011188640259206295, 0.0037319576367735863, 0.024485761299729347, 0.0018746056593954563, 0.04114176332950592, 0.034570205956697464, 0.009728988632559776, 0.07755846530199051, 0.09898480027914047, 0.0613434873521328, 0.09528356045484543, 0.1511603444814682, 0.2821846306324005, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04335615411400795, 0.026033984497189522, 0.03572213277220726, 0.017578190192580223, 0.05956277251243591, 0.01715734601020813, 0.011929154396057129, 0.28936532139778137, 0.0027683174703270197, 0.061091482639312744, 0.23734883964061737, 0.10397756844758987, 0.16337142884731293, 0.37352773547172546, 0.18409839272499084, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.06077902019023895, 0.031166722998023033, 0.11759120225906372, 0.1409873068332672, 0.24215947091579437, 0.009796793572604656, 0.10265856236219406, 0.01014934666454792, 0.2757207751274109, 0.023714441806077957, 0.038815632462501526, 0.15303847193717957, 0.14991649985313416, 0.6824791431427002, 0.13190437853336334, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.06505369395017624, 0.006089756730943918, 0.036541152745485306, 0.005829536356031895, 0.20233574509620667, 0.029401954263448715, 0.49993017315864563, 0.030510973185300827, 0.01976127363741398, 0.07993583381175995, 0.017815636470913887, 0.04079095646739006, 0.022992853075265884, 0.6425142288208008, 0.26567763090133667, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.6054520010948181, 0.07051455229520798, 0.2702813744544983, 0.029061302542686462, 0.13962645828723907, 0.07908772677183151, 0.4563634395599365, 0.02414957620203495, 0.02722080610692501, 0.03215296193957329, 0.015534932725131512, 0.009437407366931438, 0.0218642745167017, 0.08506882190704346, 0.4000338017940521, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3943043351173401, 0.11258544027805328, 0.12088752537965775, 0.0732470229268074, 0.030587676912546158, 0.056065596640110016, 0.2533946633338928, 0.04020307958126068, 0.03702285513281822, 0.018525324761867523, 0.009753274731338024, 0.01584538072347641, 0.006842197384685278, 0.013304048217833042, 0.2415902465581894, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.09087645262479782, 0.0733630359172821, 0.03259122744202614, 0.05433432757854462, 0.028730718418955803, 0.026890264824032784, 0.0992540791630745, 0.042951032519340515, 0.1659460812807083, 0.017093859612941742, 0.006921885069459677, 0.0007972968742251396, 0.010357401333749294, 0.037234287708997726, 0.1852690428495407, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2766205668449402, 0.06249983608722687, 0.03302843123674393, 0.08374682813882828, 0.07296875864267349, 0.016804786399006844, 0.2612326145172119, 0.06074067950248718, 0.06402052938938141, 0.021471360698342323, 0.00216249143704772, 0.001582604949362576, 0.0037338242400437593, 0.005314995069056749, 0.23526467382907867, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.005338736344128847, 0.013486125506460667, 0.016210375353693962, 0.00714905746281147, 0.01115293800830841, 0.008639699779450893, 0.009605110622942448, 0.01017976924777031, 0.008433598093688488, 0.06244685873389244, 0.040223702788352966, 0.009117859415709972, 0.005228321999311447, 0.0028589563444256783, 0.13790398836135864, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.09661699831485748, 0.7619754076004028, 0.05676787346601486, 0.020180072635412216, 0.10883769392967224, 0.42711278796195984, 0.09064477682113647, 0.10612691193819046, 0.04782179743051529, 0.06935178488492966, 0.027948519214987755, 0.00755169615149498, 0.007339869160205126, 0.025803416967391968, 0.09292053431272507, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.042798254638910294, 0.23223945498466492, 0.062359996140003204, 0.01933804154396057, 0.04838808253407478, 0.30189236998558044, 0.0354127362370491, 0.019764740020036697, 0.00920741818845272, 0.0097093116492033, 0.0160877276211977, 0.0032758424058556557, 0.005296806804835796, 0.011010169051587582, 0.02110680378973484, 0.1301431953907013, 0.0347244068980217, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02002989500761032, 0.001048662350513041, 0.03834937512874603, 0.030392715707421303, 0.09750902652740479, 0.056120067834854126, 0.008173296228051186, 0.006944228895008564, 0.004440560005605221, 0.005061029922217131, 0.007118762470781803, 0.008411978371441364, 0.023608768358826637, 0.04182775691151619, 0.16016238927841187, 0.19350707530975342, 0.0006586865638382733, 0.008110460825264454, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.041295986622571945, 0.19780276715755463, 0.03777160495519638, 0.1712082475423813, 0.20935285091400146, 0.158755823969841, 0.3937656581401825, 0.684601902961731, 0.2584594190120697, 0.11237194389104843, 0.1112959012389183, 0.09882687777280807, 0.05429066717624664, 0.24210131168365479, 0.016339490190148354, 0.07742509245872498, 0.025898784399032593, 0.46813124418258667, 0.21566073596477509, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.26312491297721863, 0.2720799446105957, 0.005703570321202278, 0.0481516495347023, 0.027902500703930855, 0.0034437666181474924, 0.03425572067499161, 0.03555849939584732, 0.028000997379422188, 0.0429554246366024, 0.002753790933638811, 0.0017769382102414966, 0.002218457870185375, 0.003535473719239235, 0.1597488671541214, 0.15508510172367096, 0.002848779782652855, 0.006727630738168955, 0.01290579792112112, 0.0019038956379517913, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.22248251736164093, 0.03185709938406944, 0.000688861298840493, 0.005810217931866646, 0.007679672911763191, 0.0008787074475549161, 0.07858764380216599, 0.14273476600646973, 0.07306984066963196, 0.02433006465435028, 0.011720307171344757, 0.013396549038589, 0.017704129219055176, 0.034836068749427795, 0.1453055441379547, 0.1506490558385849, 0.0018329949816688895, 0.0011812039883807302, 0.010563074611127377, 0.0007367127691395581, 0.0007524989196099341, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1531120240688324, 0.15391655266284943, 0.006810865830630064, 0.07720811665058136, 0.008951452560722828, 0.01149735413491726, 0.2822602391242981, 0.30408379435539246, 0.48283058404922485, 0.33028021454811096, 0.16095426678657532, 0.031167738139629364, 0.03355513513088226, 0.13962571322917938, 0.012790725566446781, 0.0463392436504364, 0.0861721858382225, 0.5342088341712952, 0.5262086987495422, 0.252642959356308, 0.014757110737264156, 0.02778990939259529, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03593587130308151, 0.03233448788523674, 0.22662676870822906, 0.405829519033432, 0.014032814651727676, 0.02822977490723133, 0.09231841564178467, 0.1225365549325943, 0.20093639194965363, 0.2508411109447479, 0.5826555490493774, 0.037383783608675, 0.07952429354190826, 0.10720134526491165, 0.15212680399417877, 0.08082517981529236, 0.10121051222085953, 0.3481808602809906, 0.41374534368515015, 0.38359278440475464, 0.07890304177999496, 0.1096968874335289, 0.1685827672481537, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.037364520132541656, 0.04119153320789337, 0.0012645104434341192, 0.021537767723202705, 0.000536995125003159, 0.0011436643544584513, 0.019049961119890213, 0.06139632686972618, 0.385105162858963, 0.13276730477809906, 0.24771228432655334, 0.04952799528837204, 0.04911990836262703, 0.11973114311695099, 0.021608887240290642, 0.1433362513780594, 0.13670213520526886, 0.10138670355081558, 0.1093992069363594, 0.236768901348114, 0.09415888041257858, 0.011134332977235317, 0.019298367202281952, 0.5348934531211853, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.004867227748036385, 0.009626063518226147, 0.0003137234307359904, 0.0026314754504710436, 0.00027048110496252775, 0.000934475683607161, 0.007251756265759468, 0.03575620427727699, 0.40781450271606445, 0.05584407597780228, 0.040446195751428604, 0.005334825720638037, 0.007708138320595026, 0.06401336193084717, 0.010240204632282257, 0.024931270629167557, 0.02871265634894371, 0.20136752724647522, 0.1457405984401703, 0.13753218948841095, 0.13171687722206116, 0.07031083852052689, 0.04771474376320839, 0.5403124690055847, 0.04482616111636162, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19358457624912262, 0.2328234314918518, 0.0017398587660863996, 0.10100623220205307, 0.0019695234950631857, 0.1674531251192093, 0.4513051509857178, 0.6547151803970337, 0.030009860172867775, 0.7025956511497498, 0.1685936599969864, 0.03178222477436066, 0.13270388543605804, 0.23426049947738647, 0.010277668945491314, 0.026511939242482185, 0.12058579176664352, 0.09381356090307236, 0.09726550430059433, 0.13490843772888184, 0.36408668756484985, 0.19949088990688324, 0.09435784071683884, 0.45831772685050964, 0.1274537742137909, 0.014095090329647064, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09463346004486084, 0.5257620811462402, 0.0045187450014054775, 0.07222570478916168, 0.0025188177824020386, 0.1410406231880188, 0.06597349792718887, 0.0719805508852005, 0.09957849979400635, 0.17567123472690582, 0.18618373572826385, 0.02195402979850769, 0.042485080659389496, 0.12470933794975281, 0.00617468124255538, 0.12624163925647736, 0.03293433412909508, 0.07055910676717758, 0.06304988265037537, 0.23899653553962708, 0.15645378828048706, 0.07000429183244705, 0.02516351453959942, 0.06797400116920471, 0.07094329595565796, 0.1311238706111908, 0.21208471059799194, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.027796348556876183, 0.06599752604961395, 0.002643989399075508, 0.029425768181681633, 0.008861851878464222, 0.013279970735311508, 0.25377023220062256, 0.2656356692314148, 0.055540941655635834, 0.027583830058574677, 0.004816746339201927, 0.3890189528465271, 0.12020140886306763, 0.33882811665534973, 0.0040408894419670105, 0.1118171289563179, 0.015469676814973354, 0.08768722414970398, 0.046650953590869904, 0.23542486131191254, 0.09032069146633148, 0.05012429133057594, 0.004171812906861305, 0.15006321668624878, 0.017805932089686394, 0.049085501581430435, 0.035517167299985886, 0.6428134441375732, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4147956669330597, 0.5514373779296875, 0.09636387228965759, 0.29775112867355347, 0.03436855599284172, 0.08799602836370468, 0.07023341208696365, 0.10276275128126144, 0.25543972849845886, 0.10302554070949554, 0.05857125297188759, 0.029829595237970352, 0.114840567111969, 0.33078575134277344, 0.07371985912322998, 0.09301143884658813, 0.13257478177547455, 0.1489255279302597, 0.18642880022525787, 0.318376362323761, 0.31357452273368835, 0.1382697969675064, 0.07457731664180756, 0.17392435669898987, 0.00920780934393406, 0.020603884011507034, 0.049020376056432724, 0.322329580783844, 0.3050764203071594, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07031518220901489, 0.001305539975874126, 0.0025430582463741302, 0.010662226937711239, 0.0007357596186921, 0.000663888524286449, 0.0014398572966456413, 0.0005107407923787832, 0.005960140842944384, 0.0030986208003014326, 0.0017578504048287868, 0.00018377922242507339, 1.743367283779662e-05, 4.847845411859453e-05, 0.15638960897922516, 0.17444664239883423, 0.0007958812057040632, 5.6854176364140585e-05, 0.0004179355164524168, 0.00013179269444663078, 0.00024977640714496374, 0.0001107741700252518, 7.639485556865111e-05, 0.0008396806661039591, 0.00030287212575785816, 0.00023763117496855557, 0.003834246192127466, 0.003433886216953397, 0.00015348535089287907, 0.00014843019016552716, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24421003460884094, 0.03331591188907623, 0.07573812454938889, 0.33240795135498047, 0.006838400848209858, 0.008697851561009884, 0.06428743898868561, 0.06466686725616455, 0.006176145281642675, 0.06394235789775848, 0.09260299056768417, 0.19959890842437744, 0.02154124155640602, 0.021672323346138, 0.15025706589221954, 0.00841783918440342, 0.03505324944853783, 0.02469123899936676, 0.026689309626817703, 0.1500382125377655, 0.08861804753541946, 0.006530162878334522, 0.060150377452373505, 0.04669034481048584, 0.007807246409356594, 0.02131708152592182, 0.012364925816655159, 0.041818197816610336, 0.02841370552778244, 0.6981374621391296, 0.06836962699890137, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5462155342102051, 0.545982301235199, 0.3341628611087799, 0.5788259506225586, 0.08809857815504074, 0.06356553733348846, 0.022417092695832253, 0.0164126455783844, 0.00386660173535347, 0.10154324769973755, 0.14015790820121765, 0.0864240974187851, 0.34186482429504395, 0.22899740934371948, 0.05407746881246567, 0.0009672276792116463, 0.0037913541309535503, 0.00524782482534647, 0.006044968497008085, 0.07807419449090958, 0.026950905099511147, 0.0024354930501431227, 0.005482541862875223, 0.013836389407515526, 0.002816400956362486, 0.0006559633184224367, 0.002845867071300745, 0.018497759476304054, 0.19704575836658478, 0.41393977403640747, 0.4024144113063812, 0.00308317132294178, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.48888036608695984, 0.6578190326690674, 0.030819885432720184, 0.2205304652452469, 0.004883326590061188, 0.0656682699918747, 0.04461565986275673, 0.05094402655959129, 0.0005314986919984221, 0.15455113351345062, 0.10763049870729446, 0.1186080202460289, 0.14419804513454437, 0.1328149437904358, 0.09490374475717545, 0.0023347423411905766, 0.018236415460705757, 0.011423468589782715, 0.014267664402723312, 0.06272618472576141, 0.09006785601377487, 0.023437032476067543, 0.008957883343100548, 0.03532397374510765, 0.006200278177857399, 0.0002018583327298984, 0.016960909590125084, 0.04933774098753929, 0.1362536996603012, 0.47770828008651733, 0.5670948624610901, 0.06992122530937195, 0.03068283386528492, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15812784433364868, 0.9118645191192627, 0.022590545937418938, 0.05952226370573044, 0.00360964541323483, 0.07875056564807892, 0.013187792152166367, 0.02020449750125408, 0.0020393244922161102, 0.033818699419498444, 0.0449705570936203, 0.02132066898047924, 0.0717315599322319, 0.12101268768310547, 0.06353376060724258, 0.0730348452925682, 0.024321116507053375, 0.06646358221769333, 0.0630527138710022, 0.23201428353786469, 0.1378810703754425, 0.04738042131066322, 0.010255109518766403, 0.0316733755171299, 0.07226394861936569, 0.06345586478710175, 0.13366159796714783, 0.1651405692100525, 0.1875276118516922, 0.475235253572464, 0.34701114892959595, 0.106105737388134, 0.17074023187160492, 0.14835108816623688, NaN, NaN, NaN, NaN, NaN, NaN], [0.07771441340446472, 0.4748976230621338, 0.012594498693943024, 0.043653786182403564, 0.006564431358128786, 0.024485116824507713, 0.20463299751281738, 0.1550481915473938, 0.0016144687542691827, 0.005543926265090704, 0.0017496985383331776, 0.3491710126399994, 0.23835937678813934, 0.3316482901573181, 0.08539295196533203, 0.1317213624715805, 0.02603350207209587, 0.05892709270119667, 0.02498493157327175, 0.2902502715587616, 0.11121267080307007, 0.057563167065382004, 0.004654969088733196, 0.12363925576210022, 0.02343585342168808, 0.03682887554168701, 0.054189957678318024, 0.5043657422065735, 0.23388440907001495, 0.46154457330703735, 0.32561513781547546, 0.055846668779850006, 0.06476935744285583, 0.026345595717430115, 0.5623452067375183, NaN, NaN, NaN, NaN, NaN], [0.22228576242923737, 0.3581831455230713, 0.10504736006259918, 0.2062736451625824, 0.015430409461259842, 0.007369442842900753, 0.009848481975495815, 0.0027359407395124435, 0.003257193835452199, 0.004766176920384169, 0.0058546122163534164, 0.0040231142193078995, 0.032162997871637344, 0.05548902228474617, 0.22239458560943604, 0.037178635597229004, 0.08259578794240952, 0.0920928493142128, 0.09107104688882828, 0.19359135627746582, 0.17535823583602905, 0.06819135695695877, 0.03716395050287247, 0.07458745688199997, 0.0064619481563568115, 0.009060872718691826, 0.02094256319105625, 0.1461041122674942, 0.11104261875152588, 0.6685899496078491, 0.4500047266483307, 0.029085516929626465, 0.03437849134206772, 0.03590574488043785, 0.20188003778457642, 0.23542997241020203, NaN, NaN, NaN, NaN], [0.040305208414793015, 0.0008039010572247207, 0.001399470493197441, 0.006614126265048981, 0.0003286598657723516, 0.0002559607964940369, 0.0005696980515494943, 0.00010972175368806347, 0.0006102611077949405, 0.0009710662416182458, 0.0004746906051877886, 5.0628168537514284e-05, 6.201828455232317e-06, 1.1841932064271532e-05, 0.15342259407043457, 0.18516498804092407, 0.0009336460498161614, 7.266629108926281e-05, 0.00041225351742468774, 0.00023152375069912523, 0.0002865330025088042, 0.00012637366307899356, 8.909442112781107e-05, 0.0006568549433723092, 0.0003727772564161569, 0.00021836791711393744, 0.0030449857003986835, 0.002062517451122403, 0.0001740154402796179, 0.00019746039470192045, 0.0010639599058777094, 3.738106170203537e-05, 0.00018948569777421653, 0.0017019548686221242, 0.0021623496431857347, 7.414143328787759e-05, 0.00010166682477574795, NaN, NaN, NaN], [0.18667390942573547, 0.05485990643501282, 0.06146723031997681, 0.2094709873199463, 0.003188095986843109, 0.005957009736448526, 0.04363764822483063, 0.02604665607213974, 0.0011390803847461939, 0.022857926785945892, 0.035827361047267914, 0.07732249796390533, 0.00673074834048748, 0.004807854071259499, 0.15350142121315002, 0.014717604033648968, 0.07327108085155487, 0.049021750688552856, 0.04824157431721687, 0.2509053647518158, 0.1518847495317459, 0.011399514973163605, 0.08240412920713425, 0.052963949739933014, 0.012185328640043736, 0.03166860342025757, 0.029948236420750618, 0.0332757867872715, 0.026646502315998077, 0.6691258549690247, 0.05157328397035599, 0.010373775847256184, 0.027277877554297447, 0.022091276943683624, 0.06386284530162811, 0.02213944122195244, 0.7486419677734375, 0.1026511937379837, NaN, NaN], [0.46625471115112305, 0.6644052863121033, 0.19963930547237396, 0.36004284024238586, 0.06144074350595474, 0.06362717598676682, 0.016601700335741043, 0.006137203890830278, 0.0020489897578954697, 0.041981395334005356, 0.042364589869976044, 0.04546959325671196, 0.25786423683166504, 0.1048446074128151, 0.10812478512525558, 0.0010381464380770922, 0.0033105257898569107, 0.005275417119264603, 0.005129440221935511, 0.05292869359254837, 0.018404772505164146, 0.0016328096389770508, 0.0039754449389874935, 0.007563540246337652, 0.0015294092008844018, 0.00038045260589569807, 0.0016144785331562161, 0.00974529329687357, 0.09415796399116516, 0.176291361451149, 0.35064396262168884, 0.0026081653777509928, 0.0026635529939085245, 0.004589376971125603, 0.028667066246271133, 0.20089752972126007, 0.45412325859069824, 0.4352543354034424, 0.005037708207964897, NaN], [0.01868601329624653, 0.08739857375621796, 0.016145089641213417, 0.000850466953124851, 0.0035631621722131968, 0.013478883542120457, 0.0006747889565303922, 0.0010685214074328542, 0.013735192827880383, 0.0029910006560385227, 0.017663421109318733, 0.0005569100612774491, 0.0335303470492363, 0.010939561761915684, 0.13854636251926422, 0.1408424973487854, 0.01142195239663124, 0.027654578909277916, 0.018255943432450294, 0.00871819257736206, 0.007302883546799421, 0.002508251927793026, 0.0010894191218540072, 0.002539109904319048, 0.0016572934109717607, 0.002274427330121398, 0.00915378425270319, 0.004932411015033722, 0.000505969044752419, 0.0064278775826096535, 0.013472460210323334, 0.0009905033512040973, 0.004150861874222755, 0.015419019386172295, 0.013300818391144276, 0.00147106999065727, 0.01399929728358984, 0.03311459720134735, 0.0035406623501330614, 0.008275571279227734]], [[0.3301994204521179, 0.08890271931886673, 0.08465498685836792, 0.06385943293571472, 0.21852104365825653, 0.02508896216750145, 0.03711355850100517, 0.034155964851379395, 0.1728704422712326, 0.06344152241945267, 0.01567375846207142, 0.047274719923734665, 0.023079151287674904, 0.06240373104810715, 0.17532315850257874, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.08584976941347122, 0.12593986093997955, 0.03313801810145378, 0.017280908301472664, 0.17652282118797302, 0.268716037273407, 0.12116961926221848, 0.2558431923389435, 0.04765854403376579, 0.04246087744832039, 0.0035840249620378017, 0.02463056705892086, 0.2119264155626297, 0.11800020188093185, 0.14393316209316254, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.046346988528966904, 0.39951857924461365, 0.5525277853012085, 0.10910754650831223, 0.13167327642440796, 0.030212268233299255, 0.021472660824656487, 0.018023721873760223, 0.1298973113298416, 0.04191790521144867, 0.1535157859325409, 0.04246748238801956, 0.3158371150493622, 0.15602277219295502, 0.1064835637807846, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0703379437327385, 0.07535148411989212, 0.05811825022101402, 0.428435742855072, 0.07080380618572235, 0.15123498439788818, 0.3036666214466095, 0.07787945121526718, 0.48052453994750977, 0.12286645174026489, 0.04789941385388374, 0.033336445689201355, 0.030469346791505814, 0.005462532863020897, 0.08732402324676514, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0663379579782486, 0.03187985718250275, 0.09551261365413666, 0.0323714055120945, 0.33827176690101624, 0.1471284031867981, 0.3127540946006775, 0.02734280750155449, 0.23260797560214996, 0.02317011170089245, 0.046465177088975906, 0.0992102101445198, 0.09175661206245422, 0.13314616680145264, 0.07444406300783157, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.034720633178949356, 0.01384154986590147, 0.012703170999884605, 0.020319687202572823, 0.10901976376771927, 0.7807050347328186, 0.03443336486816406, 0.028544975444674492, 0.061822760850191116, 0.00809338316321373, 0.007171421777456999, 0.01342758722603321, 0.09649696201086044, 0.05527613312005997, 0.10404697060585022, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.030445659533143044, 0.041789710521698, 0.023520270362496376, 0.01782963052392006, 0.16124852001667023, 0.06983006745576859, 0.4703807234764099, 0.01895260065793991, 0.027326058596372604, 0.07994905114173889, 0.026343191042542458, 0.032219063490629196, 0.022085823118686676, 0.031095484271645546, 0.24155765771865845, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.055046502500772476, 0.3847074508666992, 0.04798666015267372, 0.003912709187716246, 0.06840738654136658, 0.36789029836654663, 0.07226144522428513, 0.4079316258430481, 0.022340288385748863, 0.10408379882574081, 0.07774890959262848, 0.04753485694527626, 0.285355806350708, 0.16128498315811157, 0.02375940792262554, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03513112664222717, 0.11586778610944748, 0.03034079447388649, 0.001017131027765572, 0.04634808376431465, 0.03800477832555771, 0.03768199309706688, 0.013300161808729172, 0.14031966030597687, 0.015252463519573212, 0.053176701068878174, 0.06856708973646164, 0.13856393098831177, 0.054046642035245895, 0.2367301732301712, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.025786809623241425, 0.06564735621213913, 0.039564721286296844, 0.0026341548655182123, 0.016324089840054512, 0.016701271757483482, 0.020613567903637886, 0.0767805427312851, 0.22950275242328644, 0.51694655418396, 0.1544727236032486, 0.1054847463965416, 0.025381706655025482, 0.05480813980102539, 0.1677880734205246, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.012255452573299408, 0.02410232275724411, 0.08552651852369308, 0.002623841166496277, 0.010307574644684792, 0.0127415731549263, 0.021285703405737877, 0.010095748119056225, 0.06661782413721085, 0.12517453730106354, 0.7383688688278198, 0.19885332882404327, 0.07497892528772354, 0.10072800517082214, 0.06182975694537163, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2776626944541931, 0.046990759670734406, 0.032447993755340576, 0.015461347065865993, 0.08414210379123688, 0.04174359515309334, 0.19995476305484772, 0.013662091456353664, 0.019540153443813324, 0.048985805362463, 0.25616249442100525, 0.2484772503376007, 0.1799653023481369, 0.17696446180343628, 0.09890354424715042, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.05504303798079491, 0.08340897411108017, 0.04799877479672432, 0.017563870176672935, 0.028545444831252098, 0.1704884171485901, 0.030681313946843147, 0.02359093725681305, 0.007767115719616413, 0.019779905676841736, 0.03771185874938965, 0.029841119423508644, 0.28957709670066833, 0.04182300344109535, 0.12634176015853882, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.06153338775038719, 0.02491314895451069, 0.02542346529662609, 0.0031092099379748106, 0.03241894021630287, 0.1874629557132721, 0.1358277052640915, 0.02619485929608345, 0.017582973465323448, 0.03225348889827728, 0.01329810544848442, 0.026643214747309685, 0.1614912450313568, 0.6035103797912598, 0.09545250982046127, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.027727488428354263, 0.10283610969781876, 0.02349940501153469, 0.010801603086292744, 0.0136191351339221, 0.1518852412700653, 0.05784522369503975, 0.11107083410024643, 0.10270816832780838, 0.1666017472743988, 0.06030665338039398, 0.06198698654770851, 0.05951831862330437, 0.015173939988017082, 0.1310720145702362, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03039383515715599, 0.011264979839324951, 0.30973049998283386, 0.33407092094421387, 0.24303670227527618, 0.013086382299661636, 0.12547586858272552, 0.047571711242198944, 0.07738520950078964, 0.2579103410243988, 0.13098950684070587, 0.3019145727157593, 0.018321001902222633, 0.10478901118040085, 0.1313871294260025, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.32489657402038574, 0.01967906951904297, 0.10292623937129974, 0.18745845556259155, 0.06220339238643646, 0.03126899152994156, 0.030121171846985817, 0.013807957991957664, 0.01960192248225212, 0.10352540761232376, 0.08122410625219345, 0.11610747873783112, 0.05098450556397438, 0.06022121384739876, 0.24838198721408844, 0.10530310869216919, 0.47072935104370117, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21547414362430573, 0.011987588368356228, 0.09540344774723053, 0.03949207067489624, 0.22973625361919403, 0.013393656350672245, 0.014646085910499096, 0.018391601741313934, 0.12483032047748566, 0.04761500656604767, 0.16838808357715607, 0.0500614158809185, 0.09093409031629562, 0.09172232449054718, 0.14920873939990997, 0.07470229268074036, 0.01594272069633007, 0.3473423421382904, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3455514907836914, 0.20528344810009003, 0.14200778305530548, 0.1397678107023239, 0.3345029056072235, 0.04282815381884575, 0.020769812166690826, 0.02952164225280285, 0.29125186800956726, 0.09975660592317581, 0.3298649489879608, 0.36294782161712646, 0.10288939625024796, 0.1784013956785202, 0.03550736606121063, 0.19784890115261078, 0.02982909232378006, 0.008884507231414318, 0.026416730135679245, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.023072484880685806, 0.08888474851846695, 0.04328835755586624, 0.009794876910746098, 0.18984860181808472, 0.0009663040982559323, 0.0038235578685998917, 0.05101485177874565, 0.059323158115148544, 0.00876270979642868, 0.021391507238149643, 0.02426949329674244, 0.013026251457631588, 0.06840420514345169, 0.15691325068473816, 0.15099161863327026, 0.004257611930370331, 0.06880252063274384, 0.03778434172272682, 0.016005711629986763, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20066522061824799, 0.18445545434951782, 0.10427504032850266, 0.02148139849305153, 0.3108636438846588, 0.0010669901967048645, 0.031332992017269135, 0.06621930748224258, 0.42585986852645874, 0.05703788995742798, 0.1919325739145279, 0.6617251038551331, 0.07196007668972015, 0.2038833349943161, 0.13549473881721497, 0.14908726513385773, 0.01576131209731102, 0.006129090208560228, 0.013888919726014137, 0.006888655014336109, 0.007033796049654484, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06934618204832077, 0.15043997764587402, 0.24868465960025787, 0.0180400051176548, 0.61164391040802, 0.0047634197399020195, 0.0077652581967413425, 0.01316747348755598, 0.09036756306886673, 0.016214115545153618, 0.09484434872865677, 0.7773507833480835, 0.3649398386478424, 0.19880527257919312, 0.026039909571409225, 0.1207430437207222, 0.0697125568985939, 0.0065151299349963665, 0.0038357542362064123, 0.04419673979282379, 0.16196060180664062, 0.49751368165016174, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5420496463775635, 0.775536835193634, 0.21455605328083038, 0.17522192001342773, 0.3905614912509918, 0.07102629542350769, 0.15213513374328613, 0.06534071266651154, 0.05938922241330147, 0.3742612600326538, 0.040289394557476044, 0.6919643878936768, 0.07523911446332932, 0.14220400154590607, 0.06588775664567947, 0.02684849314391613, 0.03953110799193382, 0.00281998747959733, 0.001733462675474584, 0.08529012650251389, 0.6486974358558655, 0.306731641292572, 0.07198647409677505, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05002814158797264, 0.18039211630821228, 0.4788157641887665, 0.0970841720700264, 0.5287489891052246, 0.07699278742074966, 0.024560611695051193, 0.055294524878263474, 0.031155720353126526, 0.029308732599020004, 0.023515479639172554, 0.10280930250883102, 0.01905171573162079, 0.033789344131946564, 0.006217750255018473, 0.012395885773003101, 0.009238478727638721, 0.0003186498652212322, 0.0010813054395839572, 0.008392964489758015, 0.2777543067932129, 0.44055092334747314, 0.0011997584952041507, 0.00246741552837193, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2326076328754425, 0.12470381706953049, 0.5816100239753723, 0.187625452876091, 0.17989297211170197, 0.58512943983078, 0.4148763120174408, 0.7688660621643066, 0.02497384324669838, 0.10204316675662994, 0.16508084535598755, 0.4722842574119568, 0.654721736907959, 0.31103214621543884, 0.02808636985719204, 0.034838397055864334, 0.015937600284814835, 0.002090656431391835, 0.002794815693050623, 0.008703295141458511, 0.10732896625995636, 0.4454900026321411, 0.001775766140781343, 0.0009654808673076332, 0.016644174233078957, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.32085803151130676, 0.3732209801673889, 0.8471049070358276, 0.2474840134382248, 0.8311324715614319, 0.1531035155057907, 0.14141014218330383, 0.12460694462060928, 0.15561653673648834, 0.05888388305902481, 0.03703024983406067, 0.2600737512111664, 0.049645353108644485, 0.08333000540733337, 0.053744472563266754, 0.293722003698349, 0.0148458918556571, 0.02856721729040146, 0.006315621547400951, 0.005582483485341072, 0.0013911855639889836, 0.004092940129339695, 0.0036679452750831842, 0.0010494120651856065, 0.016411608085036278, 0.023008037358522415, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.048572178930044174, 0.20163586735725403, 0.8568418025970459, 0.3438677489757538, 0.8764770030975342, 0.038519736379384995, 0.10765119642019272, 0.14438603818416595, 0.13915397226810455, 0.04139794409275055, 0.24816225469112396, 0.22188685834407806, 0.1582770049571991, 0.255889892578125, 0.05260627716779709, 0.13037414848804474, 0.020949387922883034, 0.03831411898136139, 0.007462172769010067, 0.02548721246421337, 0.006367610301822424, 0.008434200659394264, 0.010317808948457241, 0.003713584039360285, 0.00402417778968811, 0.19032441079616547, 0.26746228337287903, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10717450082302094, 0.14654512703418732, 0.5492125749588013, 0.149112731218338, 0.6473506689071655, 0.014123019762337208, 0.023513145744800568, 0.06304500997066498, 0.5243880152702332, 0.17494699358940125, 0.11734810471534729, 0.2534768283367157, 0.06080847606062889, 0.1781260073184967, 0.01657547615468502, 0.041874390095472336, 0.024160701781511307, 0.00029624058515764773, 0.00016299582784995437, 0.00014630405348725617, 0.0004776908899657428, 0.0010664566652849317, 0.005874973721802235, 0.000636687153019011, 0.0013240330154076219, 0.0912160873413086, 0.35286882519721985, 0.01772063784301281, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.024022793397307396, 0.20128284394741058, 0.39493197202682495, 0.16542883217334747, 0.7724959254264832, 0.05353498458862305, 0.039175428450107574, 0.21511156857013702, 0.10924636572599411, 0.3127569556236267, 0.20907098054885864, 0.6610769033432007, 0.026550091803073883, 0.07443477213382721, 0.04747246578335762, 0.11822566390037537, 0.015047432854771614, 0.019423136487603188, 0.00686526857316494, 0.0036870460025966167, 0.00022719512344338, 0.002930518239736557, 0.025171050801873207, 0.005165010690689087, 0.05391281098127365, 0.11512911319732666, 0.07776232063770294, 0.2967449426651001, 0.09380093216896057, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0639173686504364, 0.0019661476835608482, 0.03054100275039673, 0.07290788739919662, 0.07458660751581192, 0.0017515828367322683, 0.01338117104023695, 0.0049591753631830215, 0.10895326733589172, 0.03256915882229805, 0.07470867037773132, 0.022291045635938644, 0.00026081688702106476, 0.003768018214032054, 0.15579301118850708, 0.09375648200511932, 0.01475021056830883, 0.012638024985790253, 0.0046005831100046635, 0.051909249275922775, 0.0036223391070961952, 0.004371740389615297, 0.009388775564730167, 0.01159447617828846, 0.023305783048272133, 0.046531662344932556, 0.058873143047094345, 0.07503876090049744, 0.0337555818259716, 0.30213212966918945, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00809751357883215, 0.08670660853385925, 0.12165205925703049, 0.06173386052250862, 0.8110419511795044, 0.006245153024792671, 0.03447260707616806, 0.08050490915775299, 0.779870867729187, 0.2479465901851654, 0.38426774740219116, 0.6870184540748596, 0.2310730367898941, 0.07155610620975494, 0.05814361199736595, 0.060409948229789734, 0.03445665165781975, 0.000381257850676775, 0.0036348046269267797, 0.0002713070425670594, 0.0011815812904387712, 0.03030458651483059, 0.03435760363936424, 0.0019682012498378754, 0.00901943538337946, 0.2363511621952057, 0.7836493253707886, 0.05375572293996811, 0.0010517562041059136, 0.002096510259434581, 0.017742546275258064, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01971210353076458, 0.10859540849924088, 0.17558348178863525, 0.04931360110640526, 0.4077165424823761, 0.001824796199798584, 0.004386546555906534, 0.0422598272562027, 0.9374924302101135, 0.3226373493671417, 0.06322266161441803, 0.05341457948088646, 0.0039883931167423725, 0.004304073750972748, 0.13460686802864075, 0.19913224875926971, 0.17475517094135284, 0.0022224360145628452, 0.015882516279816628, 0.001058473251760006, 0.0005846276762895286, 0.02601638250052929, 0.037341512739658356, 0.002062901621684432, 0.01394632738083601, 0.062121838331222534, 0.09270716458559036, 0.13391432166099548, 0.011137665249407291, 0.003502808278426528, 0.007463122718036175, 0.4640289545059204, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.018049566075205803, 0.12295468151569366, 0.24470828473567963, 0.04122815281152725, 0.7332677245140076, 0.004472800530493259, 0.0029204280581325293, 0.018685931339859962, 0.4878760874271393, 0.20441682636737823, 0.08441592752933502, 0.4205068051815033, 0.04466289281845093, 0.13263334333896637, 0.0994158536195755, 0.33059969544410706, 0.017222048714756966, 0.029873082414269447, 0.008054245263338089, 0.002331576542928815, 0.0006345488945953548, 0.011296147480607033, 0.005269323009997606, 0.0004991231253370643, 0.01808379590511322, 0.0023433570750057697, 0.0409514382481575, 0.01219080574810505, 0.010968736372888088, 0.004035044461488724, 0.000618473335634917, 0.01301309373229742, 0.04461785778403282, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.007120466325432062, 0.02300306409597397, 0.2714575231075287, 0.07745856046676636, 0.6446666717529297, 0.0059507740661501884, 0.011145476251840591, 0.13244189321994781, 0.38060593605041504, 0.06726288050413132, 0.22673718631267548, 0.3522229492664337, 0.17927831411361694, 0.524927020072937, 0.09379637986421585, 0.11787470430135727, 0.013379373587667942, 0.03657921776175499, 0.007838133722543716, 0.006328434217721224, 0.0013346761697903275, 0.005374525673687458, 0.005563441663980484, 0.0013783610193058848, 0.003622437361627817, 0.10895299166440964, 0.17491653561592102, 0.013411260209977627, 0.006658618804067373, 0.013080593198537827, 0.0013389869127422571, 0.03540230169892311, 0.3923792839050293, 0.2429211437702179, NaN, NaN, NaN, NaN, NaN, NaN], [0.03649899363517761, 0.08160936087369919, 0.2519805133342743, 0.07504414021968842, 0.1795702874660492, 0.006024391856044531, 0.0073743402026593685, 0.061968039721250534, 0.7520835995674133, 0.28517279028892517, 0.1493321657180786, 0.3589819371700287, 0.04636238142848015, 0.16408585011959076, 0.046330999583005905, 0.03099578432738781, 0.01363852247595787, 8.312943100463599e-05, 4.0873743273550645e-05, 3.1056373700266704e-05, 8.971957868197933e-05, 0.0004970009904354811, 0.0021136843133717775, 0.00015606316446792334, 0.0008045462891459465, 0.029241982847452164, 0.24120952188968658, 0.011327153071761131, 0.006169632077217102, 0.004105421248823404, 0.0017298789462074637, 0.09891722351312637, 0.13539430499076843, 0.3545337915420532, 0.03266340494155884, NaN, NaN, NaN, NaN, NaN], [0.009416425600647926, 0.1558573991060257, 0.15325002372264862, 0.08311447501182556, 0.6221630573272705, 0.0029961667023599148, 0.006436231546103954, 0.027678541839122772, 0.2543543577194214, 0.47390833497047424, 0.28851544857025146, 0.6220062375068665, 0.014266690239310265, 0.05054754391312599, 0.0578170008957386, 0.05892227217555046, 0.006390280555933714, 0.00726453959941864, 0.002730957930907607, 0.0007821861072443426, 5.8160956541541964e-05, 0.0015625637024641037, 0.007388831116259098, 0.0016573512693867087, 0.027249574661254883, 0.062049947679042816, 0.056622181087732315, 0.2355845421552658, 0.04601869359612465, 0.006218506023287773, 0.00966239720582962, 0.07739637047052383, 0.4012998342514038, 0.09626632183790207, 0.38049787282943726, 0.10569068044424057, NaN, NaN, NaN, NaN], [0.04693470522761345, 0.0011674511479213834, 0.01364858541637659, 0.06039872020483017, 0.0427468940615654, 0.0009404723532497883, 0.007858873344957829, 0.0028007859364151955, 0.06382106244564056, 0.03982963413000107, 0.05175205320119858, 0.011254650540649891, 0.0001272865483770147, 0.001588277518749237, 0.15313954651355743, 0.09179559350013733, 0.00951253343373537, 0.010748236440122128, 0.0033872865606099367, 0.04677930101752281, 0.0018132117111235857, 0.0035809800028800964, 0.005968866869807243, 0.0062707834877073765, 0.02606387436389923, 0.033457815647125244, 0.03605461120605469, 0.04817588999867439, 0.03754975646734238, 0.2781437933444977, 0.015551367774605751, 0.2560427486896515, 0.08298799395561218, 0.06865174323320389, 0.12361031025648117, 0.04344068095088005, 0.28463616967201233, NaN, NaN, NaN], [0.017768997699022293, 0.1465732455253601, 0.15898801386356354, 0.12304693460464478, 0.8442554473876953, 0.006285809446126223, 0.04204265773296356, 0.12739135324954987, 0.8276333808898926, 0.5079721808433533, 0.5299316644668579, 0.8274551630020142, 0.09790517389774323, 0.02651425078511238, 0.11435628682374954, 0.02905191108584404, 0.012088212184607983, 0.00011298860044917092, 0.0012518719304352999, 4.317293132771738e-05, 0.0001948956778505817, 0.008923283778131008, 0.008874665014445782, 0.00048750368296168745, 0.0041984752751886845, 0.08557221293449402, 0.46109655499458313, 0.018593793734908104, 0.0004841866611968726, 0.0006005582981742918, 0.004410868044942617, 0.1617877185344696, 0.2815479040145874, 0.7414005398750305, 0.06452517956495285, 0.0009642028599046171, 0.0012653517769649625, 0.012943175621330738, NaN, NaN], [0.017107579857110977, 0.05770094692707062, 0.07052541524171829, 0.059498131275177, 0.2613165080547333, 0.0009367912425659597, 0.0028308003675192595, 0.01869240775704384, 0.8671534061431885, 0.40041688084602356, 0.03947103023529053, 0.0349445715546608, 0.00177917187102139, 0.002164072822779417, 0.1562660187482834, 0.1381005197763443, 0.0952477678656578, 0.0011117071844637394, 0.007693122606724501, 0.0001761779421940446, 8.233776316046715e-05, 0.0067709037102758884, 0.015442474745213985, 0.0005836034542880952, 0.005857429001480341, 0.020792629569768906, 0.02682901732623577, 0.05164036154747009, 0.0043857707642018795, 0.0008507486782036722, 0.004215322434902191, 0.19233396649360657, 0.21357974410057068, 0.14138071238994598, 0.12764914333820343, 0.011541306972503662, 0.001996394479647279, 0.004979089833796024, 0.4768531322479248, NaN], [0.006599111016839743, 0.004138579126447439, 0.06047067046165466, 0.013185898773372173, 0.15347044169902802, 0.000755132467020303, 0.007522573694586754, 0.002741254400461912, 0.10833818465471268, 0.005474736914038658, 0.009540018625557423, 0.00040286476723849773, 0.004092549905180931, 0.002003892557695508, 0.13896189630031586, 0.14079369604587555, 0.0077750058844685555, 0.008707624860107899, 0.002215370535850525, 0.0003697987995110452, 8.685041393619031e-05, 6.568676326423883e-05, 0.0005928067839704454, 0.00018151948461309075, 0.0013713521184399724, 0.003134837606921792, 0.004530616104602814, 0.0021016064565628767, 0.0014590725768357515, 0.01743447594344616, 0.0004639088874682784, 0.00557903666049242, 0.015868593007326126, 0.012156624346971512, 0.006375743541866541, 0.004486390855163336, 0.037133798003196716, 0.0008373309392482042, 0.015209782868623734, 0.053904592990875244]]], [[[0.042950913310050964, 0.0007196685182861984, 0.027302199974656105, 0.006393556483089924, 0.09642192721366882, 0.01637418009340763, 0.0023990001063793898, 0.0024961719755083323, 0.0020593979861587286, 0.0015603104839101434, 0.03318732604384422, 0.35782966017723083, 0.0989728793501854, 0.061845745891332626, 0.203965961933136, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.10955026745796204, 0.02388770505785942, 0.04351670667529106, 0.023162608966231346, 0.012142845429480076, 0.035775765776634216, 0.03457501530647278, 0.11992064118385315, 0.01240380760282278, 0.007506475783884525, 0.05337386205792427, 0.6535924673080444, 0.5536571145057678, 0.19680790603160858, 0.140446737408638, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.005947283003479242, 0.0010204642312601209, 0.18009734153747559, 0.006447697523981333, 0.012463629245758057, 7.613956404384226e-05, 7.241032290039584e-05, 0.00011841111700050533, 0.0034185522235929966, 0.0034766956232488155, 0.002135018352419138, 0.005925178527832031, 0.003751354990527034, 0.0019247139571234584, 0.28479355573654175, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.014483454637229443, 0.022866876795887947, 0.32726621627807617, 0.007662326563149691, 0.09431912004947662, 0.0004296264669392258, 0.0011131323408335447, 0.0014158609556034207, 0.018019702285528183, 0.01865016296505928, 0.0020740600302815437, 0.0029411758296191692, 0.0016890126280486584, 0.0063899424858391285, 0.12852828204631805, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.030419446527957916, 0.058438073843717575, 0.3924228250980377, 0.035587672144174576, 0.08137891441583633, 0.010925069451332092, 0.001356365391984582, 0.0012006007600575686, 0.053269751369953156, 0.0027948038186877966, 0.04010261595249176, 0.01993635483086109, 0.004820133093744516, 0.004111820366233587, 0.21765674650669098, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.07767480611801147, 0.006269918289035559, 0.09326869994401932, 0.6196063756942749, 0.11043263971805573, 0.052975643426179886, 0.02037718892097473, 0.0008919782703742385, 0.008360025472939014, 0.002104781800881028, 0.0179440937936306, 0.10498880594968796, 0.011864815838634968, 0.002359954407438636, 0.24602332711219788, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00026913435431197286, 8.159392746165395e-05, 0.007915529422461987, 0.05068095400929451, 0.6570689678192139, 0.32081079483032227, 0.05758208408951759, 0.0006442792946472764, 0.0015821922570466995, 6.469202344305813e-05, 0.003034515306353569, 0.0310077928006649, 0.025656316429376602, 0.0025228438898921013, 0.023106882348656654, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0005435149651020765, 0.0005490019102580845, 0.034476928412914276, 0.01287262886762619, 0.25229769945144653, 0.4536571502685547, 0.10281822830438614, 0.012222280725836754, 0.016108570620417595, 0.00031008716905489564, 0.0026372161228209734, 0.0034134499728679657, 0.0248859953135252, 0.017225822433829308, 0.02475895546376705, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.000726195692550391, 0.00036735343746840954, 0.007114858832210302, 0.0026034389156848192, 0.01250846590846777, 0.009484091773629189, 0.0354158952832222, 0.0016834242269396782, 0.19215336441993713, 0.007594457361847162, 0.003938279580324888, 2.8376112823025323e-05, 0.001137340790592134, 0.00011368053674232215, 0.29228782653808594, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0005387092242017388, 0.0003453432582318783, 0.015091696754097939, 0.06184916943311691, 0.003162123030051589, 0.014056581072509289, 0.012467358261346817, 0.009164737537503242, 0.05548334866762161, 0.008076494559645653, 0.005971547681838274, 0.001972777536138892, 0.006774900481104851, 0.001264052465558052, 0.2362799048423767, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0025044670328497887, 0.0023456772323697805, 0.07385681569576263, 0.006188494618982077, 0.021690815687179565, 0.0007893598522059619, 0.002135526854544878, 0.006048245821148157, 0.25190338492393494, 0.09442908316850662, 0.19532348215579987, 0.031008923426270485, 0.009561427868902683, 0.0021240306086838245, 0.21234139800071716, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.015501828864216805, 0.0072255814447999, 0.006012998055666685, 0.008203291334211826, 0.0171041339635849, 0.001770812552422285, 0.00655776634812355, 0.002186145167797804, 0.15154685080051422, 0.5713958144187927, 0.05368567630648613, 0.051326390355825424, 0.01612916588783264, 0.0019418209558352828, 0.18746227025985718, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.05876695737242699, 0.005032649263739586, 0.05515526235103607, 0.012789947912096977, 0.017388533800840378, 0.00580496434122324, 0.015462081879377365, 0.009339934214949608, 0.0222479198127985, 0.03960718587040901, 0.14906688034534454, 0.2817051410675049, 0.14850065112113953, 0.09505022317171097, 0.10619710385799408, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.012425977736711502, 0.0006452641100622714, 0.00298808584921062, 0.001349467202089727, 0.014642779715359211, 0.0010115096811205149, 0.0033098396379500628, 0.00038259345456026495, 0.0035037249326705933, 0.008293021470308304, 0.03801131248474121, 0.8317341208457947, 0.018821584060788155, 0.057542454451322556, 0.011905365623533726, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04682805389165878, 0.01908799074590206, 0.10485747456550598, 0.060083843767642975, 0.15075230598449707, 0.029059063643217087, 0.04093548655509949, 0.03368941321969032, 0.017014725133776665, 0.011203174479305744, 0.0391479916870594, 0.24882012605667114, 0.37940239906311035, 0.12485622614622116, 0.12782400846481323, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.010830877348780632, 0.011870973743498325, 0.10922139137983322, 0.013140714727342129, 0.060979437083005905, 0.24213501811027527, 0.056873127818107605, 0.0565403513610363, 0.1606917381286621, 0.004471848253160715, 0.04391508549451828, 0.16444265842437744, 0.14521700143814087, 0.12183647602796555, 0.18165212869644165, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1442122757434845, 0.026047294959425926, 0.4262431859970093, 0.3211715519428253, 0.7946609258651733, 0.48857852816581726, 0.31943926215171814, 0.3322535455226898, 0.8442224860191345, 0.37700119614601135, 0.4491288661956787, 0.725179135799408, 0.5425247550010681, 0.7077597379684448, 0.47353750467300415, 0.12363631278276443, 0.14845161139965057, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.004308484960347414, 0.0038143862038850784, 0.01376394834369421, 0.007213444449007511, 0.0352218858897686, 0.009065943770110607, 0.00796457938849926, 0.009648038074374199, 0.012818497605621815, 0.005304576829075813, 0.00578665267676115, 0.025514552369713783, 0.003588201943784952, 0.005116589833050966, 0.1385156214237213, 0.14363405108451843, 0.021847352385520935, 0.10135873407125473, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.37350767850875854, 0.33144617080688477, 0.1264321357011795, 0.21400198340415955, 0.32627996802330017, 0.09132378548383713, 0.05067773535847664, 0.05911920592188835, 0.47554144263267517, 0.5285797715187073, 0.055136121809482574, 0.07909779250621796, 0.0048016151413321495, 0.023815851658582687, 0.05086187273263931, 0.13959342241287231, 0.059129536151885986, 0.04632453992962837, 0.0506979376077652, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.026979738846421242, 0.17144815623760223, 0.016802728176116943, 0.011190843768417835, 0.05719228833913803, 0.006600439548492432, 0.02541169337928295, 0.056367360055446625, 0.2566111385822296, 0.13847731053829193, 0.02390860766172409, 0.10821771621704102, 0.004193281754851341, 0.024024199694395065, 0.1485961675643921, 0.1401052325963974, 0.20328059792518616, 0.08711162209510803, 0.021569250151515007, 0.06437158584594727, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.010539665818214417, 0.02736317366361618, 0.020729688927531242, 0.012272891588509083, 0.037458207458257675, 0.020133765414357185, 0.006475721951574087, 0.0135318823158741, 0.14018985629081726, 0.043190933763980865, 0.014518915675580502, 0.06027117371559143, 0.013409063220024109, 0.008036705665290356, 0.12864065170288086, 0.14849096536636353, 0.24162742495536804, 0.13733072578907013, 0.023916935548186302, 0.4261094033718109, 0.034874048084020615, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06693296134471893, 0.05517994612455368, 0.31718623638153076, 0.09396946430206299, 0.13595829904079437, 0.09244473278522491, 0.0043823812156915665, 0.004134675953537226, 0.9252469539642334, 0.10048755258321762, 0.12945091724395752, 0.21572811901569366, 0.034586720168590546, 0.0726432204246521, 0.04207848384976387, 0.1122843325138092, 0.27548718452453613, 0.3164171576499939, 0.11597670614719391, 0.521038293838501, 0.1305568367242813, 0.04802507162094116, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07686225324869156, 0.019675375893712044, 0.2417416274547577, 0.08641211688518524, 0.27890217304229736, 0.038729339838027954, 0.01047417800873518, 0.015033761039376259, 0.4832261800765991, 0.05870191380381584, 0.2969569265842438, 0.6193534731864929, 0.12871475517749786, 0.22289764881134033, 0.5152896642684937, 0.13016629219055176, 0.2326299250125885, 0.3132029175758362, 0.32591310143470764, 0.1516764611005783, 0.09795279055833817, 0.02053435519337654, 0.1865263283252716, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.27357029914855957, 0.46676310896873474, 0.3964380621910095, 0.19407758116722107, 0.11257106065750122, 0.014855606481432915, 0.047355495393276215, 0.03237777575850487, 0.3466991186141968, 0.3347361087799072, 0.40522828698158264, 0.5460160970687866, 0.16927282512187958, 0.30020883679389954, 0.04839835315942764, 0.121080182492733, 0.4840172827243805, 0.47487083077430725, 0.3000609576702118, 0.5299880504608154, 0.09183567762374878, 0.057097259908914566, 0.12967270612716675, 0.04215369373559952, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03550037741661072, 0.12907657027244568, 0.07532694190740585, 0.016156595200300217, 0.003630127990618348, 0.01967703178524971, 0.04095811769366264, 0.0179570484906435, 0.39472800493240356, 0.07661326229572296, 0.4370958209037781, 0.4819755256175995, 0.022724222391843796, 0.033822834491729736, 0.04362141340970993, 0.08035996556282043, 0.5049515962600708, 0.21779249608516693, 0.22551923990249634, 0.48642098903656006, 0.17451445758342743, 0.14853931963443756, 0.2973877787590027, 0.02990546263754368, 0.12922555208206177, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.021909046918153763, 0.030848275870084763, 0.046106528490781784, 0.06202828511595726, 0.0325893796980381, 0.03412875533103943, 0.03159455209970474, 0.053456224501132965, 0.16627800464630127, 0.058593228459358215, 0.13071225583553314, 0.20816291868686676, 0.06561117619276047, 0.04416830837726593, 0.03868245705962181, 0.15412510931491852, 0.24815845489501953, 0.21706829965114594, 0.15909965336322784, 0.3919820487499237, 0.2097313106060028, 0.05961627885699272, 0.10788830369710922, 0.04644578695297241, 0.008778278715908527, 0.1666601300239563, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.012810717336833477, 0.0013835412682965398, 0.03224228695034981, 0.08643268793821335, 0.03331959247589111, 0.030278367921710014, 0.07819522172212601, 0.03789946064352989, 0.1521843820810318, 0.04584735259413719, 0.022775838151574135, 0.3594759702682495, 0.37505412101745605, 0.4203481376171112, 0.0833948627114296, 0.1319347769021988, 0.07332690805196762, 0.3709748387336731, 0.10343886911869049, 0.2416648119688034, 0.273651659488678, 0.142499178647995, 0.032821010798215866, 0.08169299364089966, 0.04221141338348389, 0.04960552975535393, 0.14849121868610382, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12084313482046127, 0.009313090704381466, 0.17649081349372864, 0.125856414437294, 0.03634244203567505, 0.028733352199196815, 0.006864639464765787, 0.002353896852582693, 0.16829386353492737, 0.1124483197927475, 0.061692144721746445, 0.19240431487560272, 0.09329058974981308, 0.18641597032546997, 0.018957242369651794, 0.15117543935775757, 0.09085448831319809, 0.23665060102939606, 0.09974268078804016, 0.5293540358543396, 0.2969721853733063, 0.0923411101102829, 0.04701923578977585, 0.47750627994537354, 0.31436240673065186, 0.11817371100187302, 0.08098391443490982, 0.05702001228928566, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.026597192510962486, 0.005893908906728029, 0.12369649112224579, 0.06400194019079208, 0.07115989178419113, 0.0058293454349040985, 0.008344992063939571, 0.00957680307328701, 0.04244829714298248, 0.036994293332099915, 0.07189996540546417, 0.04466360807418823, 0.12661096453666687, 0.2742233872413635, 0.042464204132556915, 0.2022491842508316, 0.0666579008102417, 0.032761361449956894, 0.03407268971204758, 0.3113752603530884, 0.5905517935752869, 0.21839523315429688, 0.043745849281549454, 0.02789805829524994, 0.042396336793899536, 0.08724991232156754, 0.07408890873193741, 0.010044119320809841, 0.12108539044857025, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0012156351003795862, 0.0009695529006421566, 0.021633058786392212, 0.003243132960051298, 0.017804604023694992, 0.006560572423040867, 0.00960883591324091, 0.043045539408922195, 0.008467147126793861, 0.0006170565611682832, 0.0028031598776578903, 0.004630656447261572, 1.7895566998049617e-05, 0.00023196694382932037, 0.14134538173675537, 0.14857184886932373, 0.38842764496803284, 0.16100677847862244, 0.1839173436164856, 0.03719957172870636, 0.5251989364624023, 0.25831982493400574, 0.06345110386610031, 0.01966739259660244, 0.013820506632328033, 0.10135386884212494, 0.06285497546195984, 0.037499457597732544, 0.09235794097185135, 0.06518241763114929, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3736850321292877, 0.29077818989753723, 0.43184730410575867, 0.4823248088359833, 0.7379603385925293, 0.5093098282814026, 0.5006043910980225, 0.3135696351528168, 0.5183887481689453, 0.13794882595539093, 0.04961319640278816, 0.12779268622398376, 0.1589212864637375, 0.22346213459968567, 0.1422436237335205, 0.15810954570770264, 0.08897967636585236, 0.2754043936729431, 0.11542505025863647, 0.7166418433189392, 0.6856120824813843, 0.15602687001228333, 0.03588242083787918, 0.10233978182077408, 0.06907100230455399, 0.13906386494636536, 0.06064911186695099, 0.02474391460418701, 0.09316151589155197, 0.5409220457077026, 0.18577302992343903, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15325459837913513, 0.1614270806312561, 0.4186149537563324, 0.16462315618991852, 0.44647181034088135, 0.7114150524139404, 0.12785741686820984, 0.04132780805230141, 0.047578196972608566, 0.12349404394626617, 0.3133608400821686, 0.35326144099235535, 0.30924320220947266, 0.31196898221969604, 0.028064150363206863, 0.07972963899374008, 0.06995329260826111, 0.2565014958381653, 0.11985079944133759, 0.5429201126098633, 0.3072132468223572, 0.04467121511697769, 0.06233014911413193, 0.06391221284866333, 0.06306523084640503, 0.04008801653981209, 0.16940940916538239, 0.21208623051643372, 0.3237960636615753, 0.4987465739250183, 0.14530567824840546, 0.42085787653923035, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06399086862802505, 0.06306004524230957, 0.1948489397764206, 0.12845031917095184, 0.26295408606529236, 0.38098499178886414, 0.0839061513543129, 0.02110268920660019, 0.07144157588481903, 0.01679118163883686, 0.14834797382354736, 0.479995995759964, 0.24741992354393005, 0.2288939356803894, 0.04729384183883667, 0.057688161730766296, 0.05957844480872154, 0.09227755665779114, 0.06308872997760773, 0.6051628589630127, 0.41719216108322144, 0.06513097882270813, 0.11441777646541595, 0.2576654255390167, 0.039566945284605026, 0.04989808052778244, 0.41204503178596497, 0.6269510388374329, 0.0653882622718811, 0.2309982180595398, 0.05030554160475731, 0.12162061780691147, 0.2016562819480896, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.041305530816316605, 0.00217662681825459, 0.29091107845306396, 0.12698692083358765, 0.3031243085861206, 0.1103614866733551, 0.14891935884952545, 0.018863126635551453, 0.033797744661569595, 0.008303376846015453, 0.009713392704725266, 0.31765925884246826, 0.4755025804042816, 0.4005468487739563, 0.10761724412441254, 0.08513950556516647, 0.05776134505867958, 0.44855204224586487, 0.15441171824932098, 0.37962910532951355, 0.43142464756965637, 0.21386101841926575, 0.07478547096252441, 0.22071515023708344, 0.1727379858493805, 0.06471506506204605, 0.1414414495229721, 0.20356127619743347, 0.23849359154701233, 0.28116941452026367, 0.22387196123600006, 0.24124523997306824, 0.10411572456359863, 0.14086224138736725, NaN, NaN, NaN, NaN, NaN, NaN], [0.4954506754875183, 0.04642331227660179, 0.603453516960144, 0.26468321681022644, 0.3210473358631134, 0.15078485012054443, 0.027168329805135727, 0.004181328695267439, 0.10826757550239563, 0.10845811665058136, 0.053085505962371826, 0.20335085690021515, 0.12072784453630447, 0.17107200622558594, 0.059424202889204025, 0.09857918322086334, 0.08268877118825912, 0.17155912518501282, 0.08326277136802673, 0.3910389840602875, 0.23102693259716034, 0.0706368237733841, 0.04062340036034584, 0.34264665842056274, 0.40400993824005127, 0.14310938119888306, 0.07597656548023224, 0.059025220572948456, 0.46083009243011475, 0.6441643834114075, 0.8002472519874573, 0.34466618299484253, 0.10859531164169312, 0.04317509010434151, 0.042760394513607025, NaN, NaN, NaN, NaN, NaN], [0.21408557891845703, 0.03960772231221199, 0.43507251143455505, 0.10961537808179855, 0.42240580916404724, 0.06637464463710785, 0.08428787440061569, 0.03856734186410904, 0.0027873425278812647, 0.012926235795021057, 0.019708000123500824, 0.017574653029441833, 0.10679914057254791, 0.20499441027641296, 0.14648839831352234, 0.07982634007930756, 0.027687683701515198, 0.01305405143648386, 0.01568622700870037, 0.15395750105381012, 0.36470726132392883, 0.09429053217172623, 0.02618592418730259, 0.00988653302192688, 0.03718657046556473, 0.057223062962293625, 0.036843542009592056, 0.008861655369400978, 0.039983998984098434, 0.5628355145454407, 0.5858935713768005, 0.11540589481592178, 0.07112369686365128, 0.022479010745882988, 0.0049066911451518536, 0.07443748414516449, NaN, NaN, NaN, NaN], [0.002137779025360942, 0.0005492505733855069, 0.03787382319569588, 0.004300523083657026, 0.03090864233672619, 0.003432363970205188, 0.010591491125524044, 0.028211969882249832, 0.003533262060955167, 0.0003883022291120142, 0.0014010752784088254, 0.0010855919681489468, 8.133743904181756e-06, 7.628504681633785e-05, 0.13786831498146057, 0.13230623304843903, 0.39635705947875977, 0.12619565427303314, 0.23844560980796814, 0.04749276116490364, 0.5552228093147278, 0.304650217294693, 0.16151569783687592, 0.05923860892653465, 0.03940735384821892, 0.37161606550216675, 0.13852664828300476, 0.1098584458231926, 0.421970933675766, 0.059641290456056595, 0.35413044691085815, 0.2336989790201187, 0.21869167685508728, 0.04408164322376251, 0.03093402087688446, 0.08392708003520966, 0.038801465183496475, NaN, NaN, NaN], [0.39364972710609436, 0.15414100885391235, 0.5289453864097595, 0.2158767729997635, 0.8369554877281189, 0.5879349708557129, 0.29191306233406067, 0.1240038275718689, 0.0375535674393177, 0.006134674418717623, 0.003127586329355836, 0.02892274223268032, 0.023530103266239166, 0.026029296219348907, 0.16074688732624054, 0.06938444077968597, 0.08034616708755493, 0.1555827558040619, 0.07347460091114044, 0.4763748347759247, 0.40589335560798645, 0.07265187799930573, 0.022002995014190674, 0.0527057945728302, 0.07314148545265198, 0.11090734601020813, 0.03504399210214615, 0.0172868762165308, 0.14030121266841888, 0.3467526137828827, 0.21038202941417694, 0.6312639117240906, 0.1208876520395279, 0.020520374178886414, 0.014591614715754986, 0.03736459091305733, 0.22129306197166443, 0.05682671070098877, NaN, NaN], [0.2684386968612671, 0.29252222180366516, 0.6921796798706055, 0.1771971732378006, 0.6445736885070801, 0.7333542704582214, 0.14767038822174072, 0.04686985909938812, 0.030383678153157234, 0.06000908464193344, 0.1879548877477646, 0.5258318781852722, 0.3533342778682709, 0.3370157778263092, 0.05586722865700722, 0.08218587934970856, 0.08353152126073837, 0.244074746966362, 0.15340235829353333, 0.5709766745567322, 0.4268343448638916, 0.06391507387161255, 0.13458560407161713, 0.14046461880207062, 0.13024689257144928, 0.043825987726449966, 0.1802380084991455, 0.2593124508857727, 0.4235299825668335, 0.23401854932308197, 0.23376718163490295, 0.4458163380622864, 0.1644086241722107, 0.22351105511188507, 0.25077733397483826, 0.28149890899658203, 0.3320602774620056, 0.05098887160420418, 0.4388013482093811, NaN], [0.0015460141003131866, 0.010688474401831627, 0.09971211850643158, 0.017146917060017586, 0.1899741291999817, 0.03437719866633415, 0.022833971306681633, 0.015900788828730583, 0.05731913447380066, 0.0008445536368526518, 0.0073861475102603436, 0.06343144923448563, 0.11084617674350739, 0.11975067108869553, 0.13715405762195587, 0.13887250423431396, 0.1972966492176056, 0.3352757692337036, 0.30585116147994995, 0.6380553841590881, 0.5158089995384216, 0.3850407004356384, 0.3912012279033661, 0.2877788245677948, 0.30187875032424927, 0.20025724172592163, 0.34020906686782837, 0.47167572379112244, 0.3815076947212219, 0.5385518074035645, 0.20663535594940186, 0.37741178274154663, 0.29376763105392456, 0.3577961027622223, 0.21765607595443726, 0.14290691912174225, 0.3544510304927826, 0.07646653801202774, 0.1391337811946869, 0.019570577889680862]], [[0.010500228963792324, 0.7224081754684448, 0.030353030189871788, 0.00683749420568347, 0.007232841569930315, 0.018554184585809708, 0.0004432629211805761, 0.02719983458518982, 0.0006519495509564877, 0.0012597806053236127, 0.006804677192121744, 0.0011734187137335539, 0.003679303452372551, 0.010371293872594833, 0.019012004137039185, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0004097823693882674, 0.007568135391920805, 0.05432860180735588, 0.08570658415555954, 0.005480978172272444, 0.0009473124518990517, 0.000799189496319741, 0.0012391285272315145, 0.00044785221689380705, 0.0009745006100274622, 0.013956908136606216, 0.00011593959061428905, 0.004404959734529257, 0.0031790253706276417, 0.20507724583148956, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.022728245705366135, 0.0194535069167614, 0.024020839482545853, 0.023168254643678665, 0.45748311281204224, 0.5855799913406372, 0.21754446625709534, 0.1001717820763588, 0.0221620611846447, 0.0033511894289404154, 0.03508710116147995, 0.20201759040355682, 0.2973189353942871, 0.04947788640856743, 0.0494859553873539, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.010499863885343075, 0.004784405697137117, 0.0035181313287466764, 0.007238015066832304, 0.4155227243900299, 0.8333501219749451, 0.07475034892559052, 0.20445603132247925, 0.005854693241417408, 0.001852003508247435, 0.02841898612678051, 0.243921160697937, 0.10275343060493469, 0.13816815614700317, 0.07406751066446304, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00768234534189105, 0.012151399627327919, 0.0006104251369833946, 0.0018971813842654228, 0.08389636874198914, 0.7291921973228455, 0.2573831081390381, 0.13359335064888, 0.0011000150116160512, 0.0005446228897199035, 0.036390628665685654, 0.06110000237822533, 0.1527252048254013, 0.14593005180358887, 0.05624886974692345, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0037335127126425505, 0.004452059045433998, 0.00018280810036230832, 0.016856878995895386, 0.0016014263965189457, 0.05306785926222801, 0.5318921208381653, 0.2889253497123718, 0.0004385874199215323, 0.007465890143066645, 0.0005691659171134233, 0.008836256340146065, 0.00793292187154293, 0.0033322598319500685, 0.1706118881702423, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00023320072796195745, 0.0486629419028759, 0.0005405444535426795, 0.005952970590442419, 0.0009982762858271599, 0.004001363180577755, 0.009125707671046257, 0.6945337057113647, 0.006549985148012638, 0.007807720452547073, 0.003924727905541658, 0.004149672109633684, 0.003537258366122842, 0.001676861196756363, 0.11541670560836792, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0021667596884071827, 0.0005287157837301493, 0.009149480611085892, 0.024324318394064903, 0.0018866003956645727, 0.0003624066011980176, 0.0004668526817113161, 0.0064473398961126804, 0.0217228215187788, 0.0031395854894071817, 0.0052951243706047535, 0.004629157949239016, 0.003511544084176421, 0.0017145106103271246, 0.2705381214618683, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0036477160174399614, 0.018601393327116966, 0.00400471780449152, 0.016223786398768425, 0.015442389994859695, 0.030637366697192192, 0.04816145822405815, 0.009263478219509125, 0.08580432087182999, 0.07024423778057098, 0.17587034404277802, 0.2670482397079468, 0.10741393268108368, 0.11723090708255768, 0.197556272149086, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0067135002464056015, 0.005400336813181639, 0.002429268090054393, 0.0005210567032918334, 0.0009090648964047432, 0.056922394782304764, 0.006305574905127287, 0.02051912061870098, 0.009087055921554565, 0.0029723523184657097, 0.5903128385543823, 0.4623943269252777, 0.5148944854736328, 0.10147220641374588, 0.10177940130233765, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.016283290460705757, 0.004236595239490271, 0.00024049253261182457, 0.00013081195356789976, 0.004825976211577654, 0.03370611369609833, 0.030076656490564346, 0.006495397537946701, 0.015585500746965408, 0.0006116450531408191, 0.009124655276536942, 0.7220618724822998, 0.5160555839538574, 0.16948190331459045, 0.04205150157213211, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04056651145219803, 0.05449386313557625, 0.007923644036054611, 0.00034379694261588156, 0.0072999089024960995, 0.005707062315195799, 0.018278487026691437, 0.00924981851130724, 0.0004191468469798565, 0.0015566512010991573, 0.0019580996595323086, 0.06517467647790909, 0.4938390851020813, 0.1360015720129013, 0.14540629088878632, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.02595147117972374, 0.0358305424451828, 0.021912503987550735, 0.01559682097285986, 0.0029425774700939655, 0.008820675313472748, 0.259022980928421, 0.24083182215690613, 0.0008326273527927697, 0.009937180206179619, 0.008380424231290817, 0.0008840225636959076, 0.11912944912910461, 0.5976794362068176, 0.17433230578899384, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.024576334282755852, 0.01131413970142603, 0.0036256120074540377, 0.007047882303595543, 0.015460383147001266, 0.007877636700868607, 0.035456594079732895, 0.017273712903261185, 0.0020541276317089796, 0.005268692504614592, 0.003138576401397586, 0.0058868261985480785, 0.09279357641935349, 0.45485755801200867, 0.2460370808839798, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.02016485668718815, 0.03839857131242752, 0.0345035195350647, 0.005700604524463415, 0.03111962042748928, 0.03698137030005455, 0.056010663509368896, 0.043163470923900604, 0.004449993837624788, 0.000997284660115838, 0.006035848520696163, 0.0027079761493951082, 0.009604639373719692, 0.02099894918501377, 0.13394789397716522, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.021257108077406883, 0.04756314679980278, 0.05559564009308815, 0.030912479385733604, 0.2625647187232971, 0.138688862323761, 0.027820995077490807, 0.05787678435444832, 0.3002224862575531, 0.018701573833823204, 0.027547171339392662, 0.19844435155391693, 0.1917300671339035, 0.07151354849338531, 0.16648255288600922, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4235764741897583, 0.10086580365896225, 0.07221788167953491, 0.13654322922229767, 0.04923773929476738, 0.06516944617033005, 0.07642015814781189, 0.147566020488739, 0.013325832784175873, 0.07923475652933121, 0.03588176146149635, 0.02368854358792305, 0.12847480177879333, 0.04384613409638405, 0.18713882565498352, 0.10658828914165497, 0.44162610173225403, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.8895729184150696, 0.7431688904762268, 0.3041851818561554, 0.5492796897888184, 0.7013789415359497, 0.2035668045282364, 0.4541507959365845, 0.17740322649478912, 0.37418368458747864, 0.7257221937179565, 0.3302299678325653, 0.32646968960762024, 0.4535413682460785, 0.2710181474685669, 0.06444819271564484, 0.14346696436405182, 0.1105659008026123, 0.04705679044127464, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18918083608150482, 0.07354198396205902, 0.03709281235933304, 0.039312511682510376, 0.2119109183549881, 0.32255253195762634, 0.06547961384057999, 0.022612132132053375, 0.0069438498467206955, 0.04682554677128792, 0.04775600507855415, 0.10260774195194244, 0.060122229158878326, 0.07651683688163757, 0.11037445813417435, 0.14569434523582458, 0.006359750870615244, 0.06321832537651062, 0.009962446056306362, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05778415873646736, 0.1888784021139145, 0.12087801843881607, 0.08340981602668762, 0.2725185453891754, 0.956253707408905, 0.6455949544906616, 0.6532288789749146, 0.3585406243801117, 0.18532338738441467, 0.18782632052898407, 0.09142936766147614, 0.8097347617149353, 0.3558001220226288, 0.037162330001592636, 0.14614860713481903, 0.0770370289683342, 0.14572308957576752, 0.11918944120407104, 0.003047030884772539, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04896414652466774, 0.25620371103286743, 0.11985385417938232, 0.0157163105905056, 0.14219185709953308, 0.22957918047904968, 0.36173656582832336, 0.07001917064189911, 0.3676673173904419, 0.12105175852775574, 0.22853095829486847, 0.07480601221323013, 0.5630075335502625, 0.8219463229179382, 0.12425509095191956, 0.16211360692977905, 0.1199408695101738, 0.008137544617056847, 0.026895001530647278, 0.022997038438916206, 0.0004772362008225173, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04714362695813179, 0.01630709134042263, 0.04501143842935562, 0.03696214035153389, 0.036871057003736496, 0.14248797297477722, 0.08399422466754913, 0.03027486614882946, 0.0030259382911026478, 0.019033554941415787, 0.2224818617105484, 0.033125121146440506, 0.02079186774790287, 0.04913722351193428, 0.46250322461128235, 0.1276824176311493, 0.05415544658899307, 0.008876973763108253, 0.006533092353492975, 0.16286829113960266, 0.4191088378429413, 0.11241274327039719, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.033912286162376404, 0.0072718155570328236, 0.013269636780023575, 0.010754123330116272, 0.003932052757591009, 0.022333307191729546, 0.05135813727974892, 0.17082874476909637, 0.004249163903295994, 0.009168761782348156, 0.00692910747602582, 0.00042953240335918963, 0.008801857940852642, 0.008872170932590961, 0.02866899035871029, 0.1310766041278839, 0.09720440953969955, 0.005617472343146801, 0.018550021573901176, 0.07474999874830246, 0.03211009502410889, 0.01561786886304617, 0.5897646546363831, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.026226887479424477, 0.006219716742634773, 0.016528652980923653, 0.019500089809298515, 0.009756595827639103, 0.01771577261388302, 0.10877248644828796, 0.07924166321754456, 0.026382839307188988, 0.007807224057614803, 0.018975039944052696, 0.009491248056292534, 0.042680755257606506, 0.025040525943040848, 0.31068748235702515, 0.07142644375562668, 0.019657818600535393, 0.044225241988897324, 0.006672952324151993, 0.015112369321286678, 0.03715437650680542, 0.012035970576107502, 0.08684496581554413, 0.5578015446662903, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0181743074208498, 0.0022439020685851574, 0.027739310637116432, 0.07926302403211594, 0.007397042121738195, 0.01831221394240856, 0.057637136429548264, 0.025927647948265076, 0.03431807458400726, 0.03189869597554207, 0.20874466001987457, 0.006929311901330948, 0.08810199052095413, 0.09789149463176727, 0.25120988488197327, 0.06384367495775223, 0.009399783797562122, 0.06692944467067719, 0.013825987465679646, 0.01438650768250227, 0.11814092099666595, 0.025182364508509636, 0.04756484180688858, 0.4922580420970917, 0.010614832863211632, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0006848929915577173, 0.00015734595945104957, 0.0022563491947948933, 0.00281638465821743, 0.00390908308327198, 0.012311742641031742, 0.006667551584541798, 0.010898235253989697, 0.18826207518577576, 0.0010989188449457288, 0.003811799455434084, 0.0007082286756485701, 0.0025871950201690197, 0.0005297476891428232, 0.004719105549156666, 0.21570175886154175, 0.004600263200700283, 0.0039491499774158, 0.0010213260538876057, 0.00511409854516387, 0.00780195789411664, 0.0035460677463561296, 0.06005942076444626, 0.002209970960393548, 0.0011990047059953213, 0.010184505954384804, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.008918036706745625, 0.01932302489876747, 0.1743663251399994, 0.04276113957166672, 0.17357498407363892, 0.05217360332608223, 0.01903947815299034, 0.006896412931382656, 0.02532179281115532, 0.019349897280335426, 0.14434273540973663, 0.2454780638217926, 0.06247624009847641, 0.03444024175405502, 0.2827233076095581, 0.15804870426654816, 0.10358668118715286, 0.018792977556586266, 0.0036350360605865717, 0.02226737141609192, 0.007843486964702606, 0.002713214373216033, 0.3624168336391449, 0.00397031893953681, 0.013842551037669182, 0.05391863361001015, 0.040338534861803055, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.014348846860229969, 0.006216275505721569, 0.06011093780398369, 0.05047134682536125, 0.013856974430382252, 0.08402124047279358, 0.0029483914840966463, 0.0018935499247163534, 0.004232283215969801, 0.022591279819607735, 0.34387707710266113, 0.06330335885286331, 0.20501238107681274, 0.1859048306941986, 0.0244001317769289, 0.0703621581196785, 0.01676221750676632, 0.03283774480223656, 0.005265639629215002, 0.016811830922961235, 0.008307189680635929, 0.0008217993890866637, 0.06662888079881668, 0.006444453727453947, 0.0015952866524457932, 0.03341786190867424, 0.28674793243408203, 0.09830270707607269, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.016000788658857346, 0.003648907644674182, 0.07618206739425659, 0.26581478118896484, 0.00828572828322649, 0.01491115428507328, 0.006984202191233635, 0.00572665361687541, 0.007784067187458277, 0.03336494415998459, 0.19996345043182373, 0.0026567107997834682, 0.14645317196846008, 0.1677580624818802, 0.0739188864827156, 0.00274313404224813, 0.01220498327165842, 0.001565106911584735, 0.014617281965911388, 0.0015394951915368438, 0.00014163085143081844, 0.0032730719540268183, 0.04253724217414856, 0.01929563470184803, 0.0011092370841652155, 0.008900013752281666, 0.14250728487968445, 0.44352540373802185, 0.012739983387291431, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.033913157880306244, 0.5720782279968262, 0.09820353239774704, 0.06329890340566635, 0.10058190673589706, 0.8026418685913086, 0.08380495011806488, 0.37448471784591675, 0.04885341227054596, 0.01422097533941269, 0.32552391290664673, 0.701602578163147, 0.9988673329353333, 0.9602208137512207, 0.015194611623883247, 0.12441921979188919, 0.09727630764245987, 0.031539320945739746, 0.0390433706343174, 0.004017204977571964, 0.003718326799571514, 0.06902258098125458, 0.21229486167430878, 0.1692674309015274, 0.507585346698761, 0.24224399030208588, 0.4713107943534851, 0.22175242006778717, 0.1071210727095604, 0.001354279462248087, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01701497472822666, 0.004510161932557821, 0.04222021996974945, 0.131240576505661, 0.007172171492129564, 0.0009335885988548398, 0.0025300730485469103, 0.0012859954731538892, 0.013300590217113495, 0.05520036071538925, 0.2908037602901459, 0.0021335158962756395, 0.11976832151412964, 0.046004947274923325, 0.029495948925614357, 0.11131177842617035, 0.045754965394735336, 0.13187335431575775, 0.021390099078416824, 0.2008819729089737, 0.1753949522972107, 0.029810786247253418, 0.1191062182188034, 0.0330519825220108, 0.021209293976426125, 0.007793682627379894, 0.004569755867123604, 0.21031485497951508, 0.08390634506940842, 0.11696453392505646, 0.2920413017272949, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0007848403765819967, 0.002563882153481245, 0.003471110016107559, 0.009534057229757309, 0.012083875946700573, 0.006908607203513384, 0.0028729254845529795, 0.0018324146512895823, 0.009593485854566097, 0.008395246230065823, 0.009609236381947994, 0.05064208433032036, 0.00595981115475297, 0.002902570180594921, 0.2071433663368225, 0.28942060470581055, 0.004874760750681162, 0.02575746178627014, 0.03629674017429352, 0.0339069589972496, 0.06067432835698128, 0.06949229538440704, 0.17600718140602112, 0.04042575880885124, 0.0021073101088404655, 0.002125136088579893, 0.0013297069817781448, 0.013164625503122807, 0.019647862762212753, 0.0625171884894371, 0.003036472015082836, 0.15673543512821198, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.008253121748566628, 0.01393465232104063, 0.03316362947225571, 0.045629892498254776, 0.015712177380919456, 0.15894818305969238, 0.02510240487754345, 0.013996893540024757, 0.6886083483695984, 0.014645315706729889, 0.04062162712216377, 0.02812274731695652, 0.10265076905488968, 0.10770027339458466, 0.07716524600982666, 0.29843398928642273, 0.006499151699244976, 0.002175502711907029, 0.00474061444401741, 0.012194045819342136, 0.024305779486894608, 0.05332900583744049, 0.20892387628555298, 0.06725459545850754, 0.0056669809855520725, 0.023831704631447792, 0.0038352743722498417, 0.008001168258488178, 0.00692057004198432, 0.006051996257156134, 0.0008782879449427128, 0.0244371946901083, 0.05294432491064072, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0017006727866828442, 0.008613905869424343, 0.08540165424346924, 0.014788517728447914, 0.11802737414836884, 0.058780014514923096, 0.008085138164460659, 0.003584004705771804, 0.06396479159593582, 0.006658769678324461, 0.02042919024825096, 0.3806440234184265, 0.01375669613480568, 0.01512871216982603, 0.1676391214132309, 0.19362471997737885, 0.05030333995819092, 0.012831996195018291, 0.0028119448106735945, 0.011659904383122921, 0.0070129260420799255, 0.002673238283023238, 0.1857692450284958, 0.0015845311572775245, 0.003893241984769702, 0.009055504575371742, 0.013083641417324543, 0.009338575415313244, 0.007860029116272926, 0.009482803754508495, 0.019751103594899178, 0.03845033049583435, 0.03947525471448898, 0.03009573556482792, NaN, NaN, NaN, NaN, NaN, NaN], [0.017164628952741623, 0.028738657012581825, 0.06823595613241196, 0.08604145050048828, 0.04855107143521309, 0.24198594689369202, 0.008688676171004772, 0.003311790293082595, 0.059665460139513016, 0.08214288204908371, 0.34741461277008057, 0.15404720604419708, 0.18822570145130157, 0.19501997530460358, 0.062469229102134705, 0.08181142061948776, 0.013090993277728558, 0.025600923225283623, 0.0045991819351911545, 0.007844633422791958, 0.0066622160375118256, 0.0006054755649529397, 0.01805841363966465, 0.0025927021633833647, 0.0006796378293074667, 0.012531430460512638, 0.18806973099708557, 0.04688132554292679, 0.005460845306515694, 0.053047653287649155, 0.013497358188033104, 0.040136244148015976, 0.022071214392781258, 0.31691932678222656, 0.07654344290494919, NaN, NaN, NaN, NaN, NaN], [0.04490135982632637, 0.02318926900625229, 0.15967297554016113, 0.36984479427337646, 0.027114713564515114, 0.1867561787366867, 0.04668368771672249, 0.02171866036951542, 0.05653616786003113, 0.08818016946315765, 0.14142879843711853, 0.002535451203584671, 0.06232175603508949, 0.12099058926105499, 0.16113655269145966, 0.003571689361706376, 0.007330529857426882, 0.0009176949388347566, 0.011351491324603558, 0.0005700239562429488, 0.0001114286933443509, 0.0023790227714926004, 0.011217805556952953, 0.004490875173360109, 0.00038650527130812407, 0.0025467458181083202, 0.048559535294771194, 0.22723886370658875, 0.0019670024048537016, 0.0002542402071412653, 0.027445662766695023, 0.015111691318452358, 0.029036840423941612, 0.2144545316696167, 0.4208240211009979, 0.013829981908202171, NaN, NaN, NaN, NaN], [0.07898441702127457, 0.817236065864563, 0.29267793893814087, 0.16063392162322998, 0.31295838952064514, 0.9265751838684082, 0.1967003047466278, 0.5436303615570068, 0.2332589328289032, 0.04864489659667015, 0.5440958142280579, 0.8931991457939148, 0.9993566870689392, 0.9798612594604492, 0.03687797114253044, 0.11162849515676498, 0.06633912026882172, 0.017337389290332794, 0.030477523803710938, 0.0024834000505506992, 0.001867939718067646, 0.03932232782244682, 0.1628599613904953, 0.14192035794258118, 0.2944621741771698, 0.21811458468437195, 0.42557209730148315, 0.2638176381587982, 0.14630424976348877, 0.0005040403339080513, 0.32521945238113403, 0.2411627173423767, 0.28287336230278015, 0.40539565682411194, 0.1682160645723343, 0.08244442939758301, 0.001218001707457006, NaN, NaN, NaN], [0.051174335181713104, 0.009388554841279984, 0.15813162922859192, 0.3707107603549957, 0.02142486348748207, 0.01361497025936842, 0.01679075136780739, 0.00489152641966939, 0.08238242566585541, 0.07653495669364929, 0.14888693392276764, 0.003932347521185875, 0.1416105329990387, 0.05760091543197632, 0.13266737759113312, 0.20973265171051025, 0.07712213695049286, 0.20427735149860382, 0.025535617023706436, 0.4053865373134613, 0.41131824254989624, 0.030548784881830215, 0.060146916657686234, 0.012079673819243908, 0.01592317223548889, 0.0048461491242051125, 0.0021770852617919445, 0.09957096725702286, 0.1170588806271553, 0.13386258482933044, 0.16141492128372192, 0.004613581579178572, 0.015190798789262772, 0.003683852730318904, 0.1389266699552536, 0.07006954401731491, 0.1815212517976761, 0.17825333774089813, NaN, NaN], [0.00042274355655536056, 0.0019217034569010139, 0.0013128711143508554, 0.004135955590754747, 0.004101510625332594, 0.004091422073543072, 0.0013299065176397562, 0.0007323773461394012, 0.006002569571137428, 0.003528070170432329, 0.004258603788912296, 0.04385730251669884, 0.006557406857609749, 0.0025679266545921564, 0.1728060394525528, 0.3360293209552765, 0.0046190484426915646, 0.024437543004751205, 0.03736568242311478, 0.023848971351981163, 0.05927197262644768, 0.0542423352599144, 0.09209144860506058, 0.023972967639565468, 0.000766670098528266, 0.0006589474505744874, 0.0007115502958185971, 0.00637162895873189, 0.012912634760141373, 0.014624576084315777, 0.0019432539120316505, 0.05897590517997742, 0.0038116518408060074, 0.0016802565660327673, 0.011611220426857471, 0.025170182809233665, 0.04455949738621712, 0.0020357028115540743, 0.14134161174297333, NaN], [0.0034927180968225002, 0.014745223335921764, 0.025302981957793236, 0.04650698974728584, 0.0658985823392868, 0.10278132557868958, 0.009682145901024342, 0.010841106064617634, 0.1757735013961792, 0.03157021477818489, 0.006062814965844154, 0.2611170709133148, 0.3153221011161804, 0.08490109443664551, 0.13624651730060577, 0.187117338180542, 0.005916869733482599, 0.020901108160614967, 0.0559980571269989, 0.0324174202978611, 0.008547084406018257, 0.044511571526527405, 0.04880741238594055, 0.05289075896143913, 0.038245368748903275, 0.003611604683101177, 0.002279189880937338, 0.01790045015513897, 0.008863909170031548, 0.01127588003873825, 0.005861865822225809, 0.17173975706100464, 0.009364882484078407, 0.005221609957516193, 0.012455414980649948, 0.007264893501996994, 0.016177698969841003, 0.008824422955513, 0.18642237782478333, 0.0006185321253724396]], [[0.11855445802211761, 0.018203705549240112, 0.014699782244861126, 0.005997231230139732, 0.012317956425249577, 0.005482070613652468, 0.020501872524619102, 0.04173066467046738, 0.028033137321472168, 0.007907108403742313, 0.13633504509925842, 0.11779958009719849, 0.02402079664170742, 0.08686818182468414, 0.19919154047966003, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.015789268538355827, 0.07802969217300415, 0.024552250280976295, 0.007203033193945885, 0.015197299420833588, 0.0086579704657197, 0.005928180180490017, 0.015956610441207886, 0.019966211169958115, 0.002508557867258787, 0.048071712255477905, 0.0452260747551918, 0.027286410331726074, 0.034357864409685135, 0.19209280610084534, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.7560696601867676, 0.09646204113960266, 0.24264514446258545, 0.03150765225291252, 0.15196740627288818, 0.027980739250779152, 0.025865402072668076, 0.037002913653850555, 0.02429634891450405, 0.014392002485692501, 0.11331582069396973, 0.2883520722389221, 0.24113057553768158, 0.5529852509498596, 0.13967400789260864, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.6593953371047974, 0.14735713601112366, 0.007992099039256573, 0.03938791900873184, 0.047611087560653687, 0.002478603972122073, 0.00756214139983058, 0.01120123453438282, 0.017771385610103607, 0.011085578240454197, 0.01766165718436241, 0.07185176759958267, 0.01590064913034439, 0.05699647217988968, 0.22524236142635345, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.8214750289916992, 0.5506035089492798, 0.04117008298635483, 0.00517136137932539, 0.5628769993782043, 0.013714980334043503, 0.018153639510273933, 0.019494647160172462, 0.02796507254242897, 0.003693098435178399, 0.052905939519405365, 0.024033749476075172, 0.017759546637535095, 0.154443621635437, 0.2181331366300583, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.47579920291900635, 0.4996025860309601, 0.02201933227479458, 0.032786499708890915, 0.003352785250172019, 0.402157723903656, 0.028392860665917397, 0.03425603359937668, 0.017302367836236954, 0.007774383760988712, 0.03628184646368027, 0.015436487272381783, 0.09682580828666687, 0.09163853526115417, 0.1807471215724945, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.6324970722198486, 0.5132108926773071, 0.14723047614097595, 0.10531618446111679, 0.14770705997943878, 0.01965152472257614, 0.16446776688098907, 0.023718399927020073, 0.014144167304039001, 0.003392518265172839, 0.03989372402429581, 0.048702552914619446, 0.05385157838463783, 0.06003360450267792, 0.2021118402481079, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2804942727088928, 0.4447323679924011, 0.40719398856163025, 0.15280602872371674, 0.5485119223594666, 0.006256175693124533, 0.005905789323151112, 0.0894087627530098, 0.014159541577100754, 0.0037697115913033485, 0.08780182898044586, 0.04568948596715927, 0.08344046771526337, 0.08309336006641388, 0.1791403889656067, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.38668709993362427, 0.3767029941082001, 0.5765653848648071, 0.14457443356513977, 0.830109715461731, 0.558448314666748, 0.2105703204870224, 0.015437009744346142, 0.0802588015794754, 0.0035789015237241983, 0.009509528055787086, 0.011719968169927597, 0.04601259157061577, 0.015442220494151115, 0.02989899180829525, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.42374563217163086, 0.4557475447654724, 0.5995064973831177, 0.22240440547466278, 0.8298278450965881, 0.26192477345466614, 0.5618261694908142, 0.2755923569202423, 0.03321446478366852, 0.014314521104097366, 0.030895033851265907, 0.0061126528307795525, 0.0033166268840432167, 0.0021476708352565765, 0.12580153346061707, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.4742293357849121, 0.32335561513900757, 0.5931060910224915, 0.0772920548915863, 0.3757626712322235, 0.211185023188591, 0.42018893361091614, 0.37329575419425964, 0.26276469230651855, 0.012583179399371147, 0.3317490220069885, 0.002885210793465376, 0.011435287073254585, 0.00757939275354147, 0.1435183733701706, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.21439705789089203, 0.17853425443172455, 0.32548797130584717, 0.06489395350217819, 0.64824378490448, 0.1159982681274414, 0.19616922736167908, 0.27417391538619995, 0.6047332286834717, 0.1810707151889801, 0.034782104194164276, 0.10310898721218109, 0.0316632017493248, 0.025309519842267036, 0.09833981841802597, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.19860051572322845, 0.10174965113401413, 0.08606765419244766, 0.053267233073711395, 0.11251617968082428, 0.2378872036933899, 0.16651752591133118, 0.1490997076034546, 0.4605393707752228, 0.18029887974262238, 0.1883857697248459, 0.007075145840644836, 0.25310245156288147, 0.08171047270298004, 0.15088772773742676, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2976968586444855, 0.21286718547344208, 0.04716610535979271, 0.025928588584065437, 0.1317281424999237, 0.12927810847759247, 0.2939497232437134, 0.23276808857917786, 0.5986261367797852, 0.05386120826005936, 0.05668044835329056, 0.025143466889858246, 0.007965278811752796, 0.03647890314459801, 0.16275253891944885, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.34472423791885376, 0.33325105905532837, 0.5841152667999268, 0.8456752300262451, 0.4377557933330536, 0.4159393310546875, 0.33224907517433167, 0.1488359123468399, 0.2203720510005951, 0.7425854206085205, 0.7086009383201599, 0.5293036699295044, 0.2777566909790039, 0.22530661523342133, 0.09936152398586273, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01888529770076275, 0.5547894835472107, 0.0062187607400119305, 0.02304725907742977, 0.007431741803884506, 0.05333258956670761, 0.13557927310466766, 0.09608769416809082, 0.011193820275366306, 0.006900292821228504, 0.007560353726148605, 0.018807610496878624, 0.018169475719332695, 0.07717052102088928, 0.1439915895462036, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.045791856944561005, 0.14471176266670227, 0.057932548224925995, 0.15441685914993286, 0.011981116607785225, 0.030152589082717896, 0.13976308703422546, 0.003811573376879096, 0.010053272359073162, 0.1557283103466034, 0.05080341920256615, 0.00967743806540966, 0.003085661679506302, 0.003445286303758621, 0.08783376961946487, 0.12484697252511978, 0.1276315450668335, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.010936958715319633, 0.0031021125614643097, 0.009866965003311634, 0.09017129242420197, 0.02775183692574501, 0.0016267865430563688, 0.01958146132528782, 0.003049993421882391, 0.009465858340263367, 0.022049162536859512, 0.013875926844775677, 0.002902107546105981, 0.0008567434852011502, 0.0034160439390689135, 0.13799139857292175, 0.15841424465179443, 0.03031034581363201, 0.02654799446463585, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10994840413331985, 0.15032780170440674, 0.0035718681756407022, 0.1491042822599411, 0.020450405776500702, 0.013510379940271378, 0.47067153453826904, 0.6447877883911133, 0.18023402988910675, 0.1876010298728943, 0.011866661719977856, 0.006677938625216484, 0.0005242988117970526, 0.004238110035657883, 0.29615819454193115, 0.13769303262233734, 0.09575259685516357, 0.025977646932005882, 0.052591271698474884, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06992093473672867, 0.2791251242160797, 0.006900451611727476, 0.053067900240421295, 0.010168666951358318, 0.0023874202743172646, 0.05137968435883522, 0.06462283432483673, 0.11192043125629425, 0.10690896213054657, 0.009735661558806896, 0.04335656389594078, 0.0031411510426551104, 0.011707558296620846, 0.14929862320423126, 0.15085087716579437, 0.15096567571163177, 0.09222358465194702, 0.028469638898968697, 0.0012114758137613535, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24040630459785461, 0.43853774666786194, 0.0175826046615839, 0.06282828748226166, 0.03055599145591259, 0.20223812758922577, 0.5439046025276184, 0.8139520287513733, 0.30283859372138977, 0.4911571145057678, 0.09772597998380661, 0.1337594985961914, 0.08667796850204468, 0.03606351464986801, 0.12256386131048203, 0.16431185603141785, 0.07204771786928177, 0.05053501948714256, 0.012478960677981377, 0.05114812031388283, 0.00039714027661830187, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03999294713139534, 0.1864590346813202, 0.003897173795849085, 0.04184543341398239, 0.0012414547381922603, 0.025941016152501106, 0.05348599702119827, 0.5434274673461914, 0.012460692785680294, 0.31306707859039307, 0.06930337846279144, 0.0021947044879198074, 0.023592861369252205, 0.04260588437318802, 0.01969532109797001, 0.1666734665632248, 0.06891340762376785, 0.013632094487547874, 0.018171580508351326, 0.002599227475002408, 0.0009873181115835905, 0.0006481229793280363, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.053744781762361526, 0.006899113766849041, 0.0563664473593235, 0.12695427238941193, 0.012777185067534447, 0.08455551415681839, 0.11441048979759216, 0.13062608242034912, 0.19371363520622253, 0.6254263520240784, 0.24294114112854004, 0.020724456757307053, 0.019838949665427208, 0.022365091368556023, 0.1131007969379425, 0.14423918724060059, 0.12251336872577667, 0.10176724940538406, 0.33380815386772156, 0.1583750993013382, 0.023372141644358635, 0.026839546859264374, 0.06730155646800995, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11661048978567123, 0.35882315039634705, 0.03118491731584072, 0.06881216168403625, 0.014698721468448639, 0.0038598491810262203, 0.1485612690448761, 0.39066970348358154, 0.07792866975069046, 0.22571811079978943, 0.040231697261333466, 0.265895277261734, 0.2000368982553482, 0.1125464141368866, 0.24931347370147705, 0.2790219187736511, 0.15446610748767853, 0.015893638134002686, 0.03619629144668579, 0.003051391802728176, 0.00038247412885539234, 0.0007123185787349939, 0.010222047567367554, 0.0010863485513255, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03291217237710953, 0.23853188753128052, 0.04644821211695671, 0.031600918620824814, 0.045192934572696686, 0.0019951597787439823, 0.11113008856773376, 0.36339887976646423, 0.010439107194542885, 0.20188210904598236, 0.027288423851132393, 0.21054767072200775, 0.04143378138542175, 0.0853629931807518, 0.2336580902338028, 0.26870372891426086, 0.10405707359313965, 0.00916238222271204, 0.058617573231458664, 0.0049601029604673386, 0.0005682760966010392, 0.004407011903822422, 0.03309918940067291, 0.0036104319151490927, 0.12174393236637115, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07334253191947937, 0.14656193554401398, 0.004660916980355978, 0.03353964164853096, 0.00998624786734581, 0.00235390174202621, 0.04832129552960396, 0.031250230967998505, 0.0017524310387670994, 0.10710166394710541, 0.04863408952951431, 0.11276239901781082, 0.00949337612837553, 0.024303043261170387, 0.5020502805709839, 0.05985519662499428, 0.14893494546413422, 0.09544339030981064, 0.18974637985229492, 0.1120084673166275, 0.28269606828689575, 0.4275827407836914, 0.12184610962867737, 0.40095797181129456, 0.08120625466108322, 0.27448615431785583, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15921767055988312, 0.18694822490215302, 0.011401425115764141, 0.15920288860797882, 0.0017978762043640018, 0.00600996520370245, 0.1401643455028534, 0.08585444837808609, 0.05989503860473633, 0.2726706564426422, 0.041456613689661026, 0.0019109381828457117, 0.0026012342423200607, 0.00675933575257659, 0.05683350935578346, 0.06809581816196442, 0.09586934000253677, 0.10229554027318954, 0.057183876633644104, 0.25635847449302673, 0.19582371413707733, 0.4237477481365204, 0.37648820877075195, 0.48733898997306824, 0.20777222514152527, 0.24944597482681274, 0.45371755957603455, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.6248686909675598, 0.8166397213935852, 0.05456394702196121, 0.3034517765045166, 0.0032548136077821255, 0.03656908869743347, 0.3933179974555969, 0.635881781578064, 0.4090532660484314, 0.6309216618537903, 0.09238837659358978, 0.01225167978554964, 0.0038302247412502766, 0.05015851929783821, 0.4316881597042084, 0.05513762682676315, 0.16880887746810913, 0.02300925739109516, 0.03029457852244377, 0.032050080597400665, 0.0745139941573143, 0.08332593739032745, 0.5048279166221619, 0.051856089383363724, 0.16889351606369019, 0.22218117117881775, 0.29087209701538086, 0.03443009778857231, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.6506885886192322, 0.26984432339668274, 0.19192098081111908, 0.45030322670936584, 0.018604522570967674, 0.06438936293125153, 0.16284945607185364, 0.46218666434288025, 0.2198290228843689, 0.6063108444213867, 0.13934792578220367, 0.19822801649570465, 0.009406321682035923, 0.07906869053840637, 0.39550670981407166, 0.07503295689821243, 0.22708888351917267, 0.011672623455524445, 0.03240634873509407, 0.051372844725847244, 0.0555996336042881, 0.1055832952260971, 0.27455389499664307, 0.019383858889341354, 0.29115474224090576, 0.25329896807670593, 0.3762655258178711, 0.06596359610557556, 0.027243560180068016, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.6516265273094177, 0.3494286835193634, 0.13445304334163666, 0.40472084283828735, 0.05377691984176636, 0.043724507093429565, 0.6220480799674988, 0.09338771551847458, 0.1620686650276184, 0.8232020139694214, 0.17699383199214935, 0.03535428270697594, 4.775904380949214e-05, 0.000580178399104625, 0.13870029151439667, 0.15851522982120514, 0.22386471927165985, 0.13473065197467804, 0.10273782163858414, 0.539568305015564, 0.23089595139026642, 0.2947250008583069, 0.2566256523132324, 0.08758009225130081, 0.04963833838701248, 0.026406293734908104, 0.02359875850379467, 0.06999926269054413, 0.014701825566589832, 0.008440684527158737, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.40970566868782043, 0.3527304232120514, 0.004458754323422909, 0.09938450157642365, 0.006175781134516001, 0.014084810391068459, 0.22543573379516602, 0.4835565686225891, 0.025563040748238564, 0.39703506231307983, 0.00602720445021987, 0.0051488312892615795, 0.0008810341823846102, 0.0033910071942955256, 0.2277533859014511, 0.1888987272977829, 0.22277534008026123, 0.06621028482913971, 0.04940320923924446, 0.013609242625534534, 0.012980671599507332, 0.0275713000446558, 0.5000426769256592, 0.025658253580331802, 0.28077542781829834, 0.21061377227306366, 0.1005047932267189, 0.0123829934746027, 0.005874408408999443, 0.04495157673954964, 0.007559731602668762, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19487805664539337, 0.1991150975227356, 0.010765495710074902, 0.08231080323457718, 0.014791524969041348, 0.005413876846432686, 0.2905171811580658, 0.06453394889831543, 0.003980779554694891, 0.08378233760595322, 0.012941073626279831, 0.009292078204452991, 0.0008543379371985793, 0.002103410428389907, 0.1794004589319229, 0.10630622506141663, 0.1130438968539238, 0.04711592569947243, 0.14829613268375397, 0.0012987125664949417, 0.0009870391804724932, 0.002409427659586072, 0.10731083154678345, 0.010861101560294628, 0.02266101725399494, 0.22295407950878143, 0.37738272547721863, 0.21324896812438965, 0.09625840187072754, 0.01478838175535202, 0.004724964965134859, 0.13376930356025696, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12092277407646179, 0.17967110872268677, 0.0018819703254848719, 0.04615653306245804, 0.002711376640945673, 0.0007180452230386436, 0.10793514549732208, 0.09669310599565506, 0.0005949889309704304, 0.15432700514793396, 0.015202132984995842, 0.003636009059846401, 0.00047353014815598726, 0.0022874167189002037, 0.22825637459754944, 0.0042772903107106686, 0.006450775545090437, 0.00791113544255495, 0.01871791109442711, 0.02349945716559887, 0.036059893667697906, 0.09560179710388184, 0.01157363597303629, 0.020316841080784798, 0.002858342370018363, 0.0015840751584619284, 0.03869258984923363, 0.04008479043841362, 0.0456826388835907, 0.061234306544065475, 0.32812535762786865, 0.4548730254173279, 0.048923686146736145, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14498451352119446, 0.2535317540168762, 0.027076847851276398, 0.14632807672023773, 0.0057570356875658035, 0.011071202345192432, 0.31473973393440247, 0.2956455647945404, 0.07720959931612015, 0.1944134682416916, 0.008117430843412876, 0.0006636073812842369, 0.0008167477208189666, 0.0018315445631742477, 0.15913215279579163, 0.034464891999959946, 0.04304976761341095, 0.0730237364768982, 0.07959159463644028, 0.156441330909729, 0.14927342534065247, 0.37836754322052, 0.2500280439853668, 0.265838086605072, 0.038285933434963226, 0.0458042174577713, 0.2175784856081009, 0.055615901947021484, 0.32925114035606384, 0.23017114400863647, 0.5254709720611572, 0.3807608187198639, 0.4477500319480896, 0.3941081464290619, NaN, NaN, NaN, NaN, NaN, NaN], [0.22215187549591064, 0.47823596000671387, 0.018273456022143364, 0.13293205201625824, 0.0049734353087842464, 0.0265207476913929, 0.27213141322135925, 0.33180302381515503, 0.1344960778951645, 0.335622638463974, 0.010143149644136429, 0.0012862810399383307, 0.00035499766818247736, 0.0037611438892781734, 0.27220219373703003, 0.024431752040982246, 0.057854264974594116, 0.009785568341612816, 0.015689833089709282, 0.010099711827933788, 0.022971261292696, 0.026158222928643227, 0.08270542323589325, 0.00771379703655839, 0.023359954357147217, 0.06216609850525856, 0.1452798992395401, 0.010090651921927929, 0.13497084379196167, 0.023736534640192986, 0.06422590464353561, 0.2799428105354309, 0.34307411313056946, 0.27198341488838196, 0.018816450610756874, NaN, NaN, NaN, NaN, NaN], [0.3673586845397949, 0.057844266295433044, 0.06040150299668312, 0.09888742864131927, 0.023171812295913696, 0.05270017683506012, 0.11794743686914444, 0.1507657766342163, 0.008498218841850758, 0.09498187899589539, 0.003615680383518338, 0.010834122076630592, 0.00024780313833616674, 0.0017297717276960611, 0.20351538062095642, 0.032250434160232544, 0.07008427381515503, 0.003495490411296487, 0.011726448312401772, 0.013232100754976273, 0.021211393177509308, 0.02240551821887493, 0.050749149173498154, 0.0020511853508651257, 0.034987252205610275, 0.05167752131819725, 0.10231753438711166, 0.017492327839136124, 0.0036121474113315344, 0.0030979528091847897, 0.14347726106643677, 0.4107814431190491, 0.18759746849536896, 0.28042495250701904, 0.02327493391931057, 0.023935986682772636, NaN, NaN, NaN, NaN], [0.6060628294944763, 0.1373525857925415, 0.13755829632282257, 0.4113396406173706, 0.07285188883543015, 0.014519162476062775, 0.5372579097747803, 0.0630655512213707, 0.14564833045005798, 0.695697009563446, 0.06662726402282715, 0.006644518580287695, 1.2849791346525308e-05, 0.00011718441965058446, 0.13694217801094055, 0.17385193705558777, 0.24280618131160736, 0.0901411697268486, 0.1509939581155777, 0.5964542627334595, 0.18189039826393127, 0.25377142429351807, 0.39126867055892944, 0.11990400403738022, 0.04869762808084488, 0.06967514008283615, 0.0491257943212986, 0.1536286324262619, 0.04553663358092308, 0.006321897264569998, 0.008409527130424976, 0.01950901933014393, 0.028066763654351234, 0.039955586194992065, 0.08575458079576492, 0.02489100769162178, 0.0107131227850914, NaN, NaN, NaN], [0.16518473625183105, 0.10184229910373688, 0.002064367523416877, 0.05309450253844261, 0.004080682527273893, 0.012669779360294342, 0.18988992273807526, 0.5354599356651306, 0.004024976398795843, 0.07357845455408096, 0.00022774768876843154, 0.00034433722612448037, 4.428778629517183e-05, 0.00011935137445107102, 0.17481543123722076, 0.18693126738071442, 0.25040745735168457, 0.07803116738796234, 0.06071358174085617, 0.018153348937630653, 0.012512190267443657, 0.012858238071203232, 0.18478038907051086, 0.008756724186241627, 0.14063727855682373, 0.16963867843151093, 0.06472224742174149, 0.008233368396759033, 0.010625114664435387, 0.04533438757061958, 0.004584541078656912, 0.04685693234205246, 0.3269248306751251, 0.13935554027557373, 0.022706659510731697, 0.015514994971454144, 0.09856907278299332, 0.009564985521137714, NaN, NaN], [0.060375016182661057, 0.09738604724407196, 0.004719918128103018, 0.05357348173856735, 0.007510221563279629, 0.002087255474179983, 0.1777726411819458, 0.04658319056034088, 0.0022654803469777107, 0.02657914347946644, 0.002838509390130639, 0.0023206211626529694, 0.00029234393150545657, 0.0006460589938797057, 0.15720529854297638, 0.10220125317573547, 0.06584151834249496, 0.046970706433057785, 0.16499453783035278, 0.0008504274883307517, 0.000721337681170553, 0.0015187861863523722, 0.050142802298069, 0.005332621280103922, 0.005509581416845322, 0.0572623535990715, 0.172898530960083, 0.12213093042373657, 0.0640687644481659, 0.004657925106585026, 0.002522988012060523, 0.028443191200494766, 0.29674383997917175, 0.3544806241989136, 0.20916549861431122, 0.09151047468185425, 0.014975211583077908, 0.0019209993770346045, 0.07398010790348053, NaN], [0.006292517296969891, 0.056422796100378036, 0.003871192689985037, 0.016857203096151352, 0.0060961381532251835, 0.01021772250533104, 0.02558758109807968, 0.004345982801169157, 0.003136568469926715, 0.011386821046471596, 0.0007550015579909086, 0.014218548312783241, 0.002899263286963105, 0.00665974011644721, 0.1386014223098755, 0.014319260604679585, 0.019726725295186043, 0.010809341445565224, 0.06728478521108627, 0.024899542331695557, 0.06927011907100677, 0.2726534307003021, 0.06849226355552673, 0.06274150311946869, 0.0032663261517882347, 0.007571991998702288, 0.011041088029742241, 0.0653790682554245, 0.06552072614431381, 0.10165777057409286, 0.05923810228705406, 0.20752549171447754, 0.1128133162856102, 0.041725482791662216, 0.12833572924137115, 0.10405165702104568, 0.2233171910047531, 0.10715138167142868, 0.3742898404598236, 0.43902406096458435]], [[0.3582096993923187, 0.12323450297117233, 0.41414904594421387, 0.12697191536426544, 0.2567327618598938, 0.12921607494354248, 0.303745299577713, 0.26060354709625244, 0.2067556530237198, 0.0739586353302002, 0.038356974720954895, 0.018690073862671852, 0.019858568906784058, 0.03828525170683861, 0.09448481351137161, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.034560851752758026, 0.06147807836532593, 0.09719342738389969, 0.03090484067797661, 0.05040246620774269, 0.10769589245319366, 0.28225648403167725, 0.03959896042943001, 0.04561477154493332, 0.015998149290680885, 0.010396423749625683, 0.0027313604950904846, 0.02088637463748455, 0.02540828473865986, 0.1729334592819214, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.031599532812833786, 0.03154325857758522, 0.01938430592417717, 0.10300880670547485, 0.07719798386096954, 0.3211115002632141, 0.5488157868385315, 0.6110779047012329, 0.03511836752295494, 0.03874386474490166, 0.02549627609550953, 0.08684590458869934, 0.1071673184633255, 0.10855282843112946, 0.09071482717990875, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.05947110056877136, 0.046990834176540375, 0.001917339744977653, 0.019972380250692368, 0.14856000244617462, 0.10937333106994629, 0.7613639235496521, 0.43800127506256104, 0.038890283554792404, 0.0702563002705574, 0.052807219326496124, 0.20175476372241974, 0.09827514737844467, 0.19838720560073853, 0.1799801141023636, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.010548654943704605, 0.056933727115392685, 0.0004277318366803229, 0.0005220972234383225, 0.03427216783165932, 0.15697234869003296, 0.44382861256599426, 0.28639304637908936, 0.1278306096792221, 0.0589531809091568, 0.07240739464759827, 0.21584689617156982, 0.623681902885437, 0.39177897572517395, 0.053747572004795074, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.012333033606410027, 0.11936485022306442, 0.0015480549773201346, 0.05167163908481598, 0.003915506415069103, 0.05033823475241661, 0.18770258128643036, 0.5247471332550049, 0.13492631912231445, 0.0999734029173851, 0.02801361307501793, 0.04943297058343887, 0.067798912525177, 0.02220618724822998, 0.04863249137997627, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.023225123062729836, 0.03936318680644035, 0.0654693990945816, 0.0780135840177536, 0.03190883249044418, 0.007237496320158243, 0.3230750560760498, 0.11266676336526871, 0.3152024447917938, 0.12503208220005035, 0.08215073496103287, 0.20814812183380127, 0.054794978350400925, 0.014369799755513668, 0.31165388226509094, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.021642545238137245, 0.05032852664589882, 0.10916808992624283, 0.14173567295074463, 0.025796422734856606, 0.002176823327317834, 0.004212724044919014, 0.11230720579624176, 0.2761599123477936, 0.18545517325401306, 0.30032697319984436, 0.18456220626831055, 0.1202857494354248, 0.02383211813867092, 0.22383396327495575, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.014165909960865974, 0.030938388779759407, 0.019327908754348755, 0.025021186098456383, 0.018685894086956978, 0.058899857103824615, 0.05705944076180458, 0.013411193154752254, 0.27564239501953125, 0.14192135632038116, 0.4484158754348755, 0.49174171686172485, 0.42328834533691406, 0.5148258805274963, 0.024227913469076157, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.030343737453222275, 0.035576362162828445, 0.011198173277080059, 0.0029289661906659603, 0.004656192846596241, 0.19044476747512817, 0.14425727725028992, 0.14593322575092316, 0.02429576776921749, 0.03922351822257042, 0.03158531337976456, 0.3954472541809082, 0.18761666119098663, 0.829915463924408, 0.05755764618515968, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.07378673553466797, 0.08269044756889343, 0.008506381884217262, 0.004565858747810125, 0.0033621611073613167, 0.47163471579551697, 0.3437289595603943, 0.16293375194072723, 0.0103234788402915, 0.006828381214290857, 0.025515833869576454, 0.13491219282150269, 0.23380780220031738, 0.7675665616989136, 0.06853343546390533, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.19539110362529755, 0.20751968026161194, 0.012997383251786232, 0.004634191282093525, 0.004486567340791225, 0.10301963984966278, 0.2361651211977005, 0.10510270297527313, 0.007245894055813551, 0.02498149685561657, 0.005201807711273432, 0.12586773931980133, 0.2985144853591919, 0.741521954536438, 0.061252206563949585, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3654796779155731, 0.656768798828125, 0.02389511466026306, 0.057929087430238724, 0.025417884811758995, 0.2985052168369293, 0.29244741797447205, 0.15614598989486694, 0.02199239283800125, 0.027919312939047813, 0.024499662220478058, 0.0015409317566081882, 0.18344998359680176, 0.05587974563241005, 0.11099682748317719, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.24996283650398254, 0.30432745814323425, 0.08651068061590195, 0.27794384956359863, 0.10948572307825089, 0.32318809628486633, 0.40224379301071167, 0.24700750410556793, 0.016620514914393425, 0.03902489319443703, 0.01563531532883644, 0.008603462018072605, 0.029363060370087624, 0.20380347967147827, 0.1635625809431076, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.08184575289487839, 0.05559774115681648, 0.012900986708700657, 0.004766350146383047, 0.02465618960559368, 0.0658264234662056, 0.16982027888298035, 0.09995799511671066, 0.1946410834789276, 0.03345171734690666, 0.026332948356866837, 0.010880211368203163, 0.01684177853167057, 0.011932285502552986, 0.13059602677822113, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.19101674854755402, 0.0880991518497467, 0.25550922751426697, 0.3376496732234955, 0.25425824522972107, 0.2177356481552124, 0.35922226309776306, 0.13405567407608032, 0.2859460711479187, 0.47983312606811523, 0.235154390335083, 0.26708394289016724, 0.2646999657154083, 0.4890832304954529, 0.0349225178360939, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12788966298103333, 0.14897412061691284, 0.18708589673042297, 0.1539590060710907, 0.06750026345252991, 0.06459501385688782, 0.24742794036865234, 0.0008040289394557476, 0.08417094498872757, 0.08338519930839539, 0.09756942838430405, 0.05163748189806938, 0.06044981628656387, 0.1204136312007904, 0.005185095127671957, 0.12878015637397766, 0.05999259278178215, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00823432207107544, 0.006774595472961664, 0.011488616466522217, 0.031759701669216156, 0.014620696194469929, 0.015192853286862373, 0.015498323366045952, 0.001623230637051165, 0.04214249551296234, 0.022796856239438057, 0.0813785269856453, 0.058821164071559906, 0.018185952678322792, 0.030505431815981865, 0.13797427713871002, 0.16734670102596283, 0.0018487111665308475, 0.002184537472203374, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07304069399833679, 0.17316529154777527, 0.0638275146484375, 0.06216027960181236, 0.10879980027675629, 0.2286580353975296, 0.12489848583936691, 0.06798849999904633, 0.12340370565652847, 0.11364749073982239, 0.33209869265556335, 0.7156579494476318, 0.917570948600769, 0.8780012726783752, 0.004697424825280905, 0.06620991975069046, 0.4480140209197998, 0.42379117012023926, 0.3748236298561096, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04041377454996109, 0.06032548099756241, 0.013153426349163055, 0.12010756880044937, 0.032379359006881714, 0.02533758245408535, 0.03651244193315506, 0.05168384686112404, 0.05184069648385048, 0.20407944917678833, 0.10554968565702438, 0.5571502447128296, 0.039276935160160065, 0.10380254685878754, 0.1458612084388733, 0.1498516947031021, 0.091057188808918, 0.11073686927556992, 0.05954570695757866, 0.00012444167805369943, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.025283029302954674, 0.14580176770687103, 0.0262577123939991, 0.01834816485643387, 0.02426275424659252, 0.5010125637054443, 0.025797395035624504, 0.08120379596948624, 0.10846563428640366, 0.05807282403111458, 0.047331083565950394, 0.01890925131738186, 0.041984543204307556, 0.021773895248770714, 0.12734822928905487, 0.15789009630680084, 0.05178086459636688, 0.2272004932165146, 0.05532779544591904, 0.002530630910769105, 0.00011625503975665197, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11099886894226074, 0.272359162569046, 0.07267793267965317, 0.02685651369392872, 0.04662291333079338, 0.6599292755126953, 0.15850403904914856, 0.1944371908903122, 0.02196124941110611, 0.18415939807891846, 0.2094753533601761, 0.11699666827917099, 0.8625363111495972, 0.6611498594284058, 0.034588079899549484, 0.05158510431647301, 0.42307329177856445, 0.4962795376777649, 0.6637455821037292, 0.11636865884065628, 0.027691489085555077, 0.059323750436306, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10045554488897324, 0.003808635985478759, 0.012772331945598125, 0.008206314407289028, 0.016907531768083572, 0.2308196723461151, 0.04502535238862038, 0.16794730722904205, 0.14683513343334198, 0.07804886251688004, 0.12962646782398224, 0.03242946416139603, 0.45433515310287476, 0.3931583762168884, 0.023861808702349663, 0.1440366506576538, 0.37752795219421387, 0.42684903740882874, 0.13104133307933807, 0.0449170246720314, 0.0360451340675354, 0.007316120434552431, 0.03281773626804352, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.020261207595467567, 0.011864200234413147, 0.013516101986169815, 0.00783876795321703, 0.006360001862049103, 0.5825139880180359, 0.27136117219924927, 0.28645893931388855, 0.002775657456368208, 0.05587191879749298, 0.01021821890026331, 0.03437367081642151, 0.37942126393318176, 0.11788230389356613, 0.047214996069669724, 0.018571142107248306, 0.11001976579427719, 0.16728174686431885, 0.33147770166397095, 0.29621925950050354, 0.11174014210700989, 0.46736985445022583, 0.18467408418655396, 0.05186863988637924, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3444993495941162, 0.4299255907535553, 0.3897337317466736, 0.11608962714672089, 0.07001375406980515, 0.1826992928981781, 0.3195875883102417, 0.1513850837945938, 0.014436168596148491, 0.25265297293663025, 0.18822813034057617, 0.20145024359226227, 0.648497998714447, 0.6856710314750671, 0.13566814363002777, 0.0193540807813406, 0.11997552216053009, 0.4339123070240021, 0.4291674792766571, 0.22741732001304626, 0.21840345859527588, 0.4310562014579773, 0.16546283662319183, 0.05634206160902977, 0.03477246314287186, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.37375974655151367, 0.2605052888393402, 0.636468231678009, 0.14340142905712128, 0.5107957124710083, 0.683059811592102, 0.3617965579032898, 0.3775153160095215, 0.0734284520149231, 0.5245854258537292, 0.5329803228378296, 0.541839063167572, 0.8546188473701477, 0.8892531991004944, 0.08003345131874084, 0.07166115939617157, 0.34385329484939575, 0.5272834300994873, 0.4769807457923889, 0.34829023480415344, 0.19288644194602966, 0.1752767115831375, 0.3240547180175781, 0.026788396760821342, 0.09653788805007935, 0.14339366555213928, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1478864699602127, 0.26107946038246155, 0.2706110179424286, 0.022070137783885002, 0.08394861966371536, 0.7104908227920532, 0.22173403203487396, 0.18465854227542877, 0.3481738865375519, 0.02706378884613514, 0.14399166405200958, 0.24452990293502808, 0.3432118594646454, 0.3138853907585144, 0.0603480227291584, 0.09568949043750763, 0.2010803371667862, 0.1452081948518753, 0.13633964955806732, 0.13264110684394836, 0.11369673907756805, 0.18754418194293976, 0.10573749244213104, 0.12209529429674149, 0.3772747814655304, 0.4260762333869934, 0.1448964774608612, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03315366804599762, 0.109662726521492, 0.165960431098938, 0.03089676797389984, 0.00589095801115036, 0.7119044065475464, 0.04612211138010025, 0.03627030551433563, 0.019800378009676933, 0.02169116772711277, 0.07954178750514984, 0.014483828097581863, 0.3210127055644989, 0.25073835253715515, 0.021559905260801315, 0.1600937843322754, 0.32966408133506775, 0.46643200516700745, 0.2761552929878235, 0.1128716766834259, 0.16030451655387878, 0.13808301091194153, 0.12019707262516022, 0.08980843424797058, 0.23569302260875702, 0.18699060380458832, 0.06252679228782654, 0.02190866880118847, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1801593005657196, 0.7095129489898682, 0.41699883341789246, 0.14223065972328186, 0.03218872845172882, 0.8857168555259705, 0.325775682926178, 0.46090880036354065, 0.31827157735824585, 0.19596631824970245, 0.36584827303886414, 0.568932831287384, 0.05918605625629425, 0.12899020314216614, 0.03239220380783081, 0.09671676903963089, 0.3181785047054291, 0.5044789910316467, 0.5311775803565979, 0.43058764934539795, 0.24623769521713257, 0.546705424785614, 0.20948244631290436, 0.5971428155899048, 0.15125280618667603, 0.21692372858524323, 0.08393274247646332, 0.0805632621049881, 0.11463441699743271, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15587098896503448, 0.007851594127714634, 0.38951343297958374, 0.26023998856544495, 0.2678505480289459, 0.04164084047079086, 0.060063086450099945, 0.06729273498058319, 0.019880756735801697, 0.0442759171128273, 0.10040930658578873, 0.1083277016878128, 0.0003995952138211578, 0.001039322349242866, 0.14095477759838104, 0.17538371682167053, 0.005170984659343958, 0.01562126912176609, 0.012803001329302788, 0.0004321248270571232, 0.003303500125184655, 0.010391591116786003, 0.0083633316680789, 0.001453742035664618, 0.0005911564221605659, 0.001968160504475236, 0.018067756667733192, 0.0012553221313282847, 0.0006174716982059181, 0.0014710418181493878, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08899319916963577, 0.2356371134519577, 0.40766164660453796, 0.08200893551111221, 0.14033742249011993, 0.12043434381484985, 0.050508081912994385, 0.04391980916261673, 0.2084629088640213, 0.07807423919439316, 0.06514080613851547, 0.6571899652481079, 0.6522034406661987, 0.4899447560310364, 0.0237458273768425, 0.00964878499507904, 0.07296860218048096, 0.1732037365436554, 0.2482636272907257, 0.018695944920182228, 0.04061494395136833, 0.019565006718039513, 0.048743683844804764, 0.15582872927188873, 0.0506676621735096, 0.08059392869472504, 0.2691291868686676, 0.4701274335384369, 0.05269847437739372, 0.15863555669784546, 0.011098350398242474, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3269592225551605, 0.23715397715568542, 0.21103474497795105, 0.29856637120246887, 0.031984660774469376, 0.019636303186416626, 0.2648169696331024, 0.0041971527971327305, 0.6909844875335693, 0.5414000153541565, 0.4092715382575989, 0.02185220457613468, 0.006548420060425997, 0.013211028650403023, 0.06752441078424454, 0.023792432621121407, 0.42975902557373047, 0.3812340199947357, 0.23295366764068604, 0.2699258625507355, 0.32472288608551025, 0.04527096822857857, 0.2556793987751007, 0.5905154347419739, 0.8116171360015869, 0.684613823890686, 0.13916483521461487, 0.05671815946698189, 0.0401710644364357, 0.30002903938293457, 0.014873968437314034, 0.1109585389494896, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.40959432721138, 0.2696213126182556, 0.4055677354335785, 0.265968382358551, 0.12281941622495651, 0.10883577167987823, 0.16766701638698578, 0.053767129778862, 0.028326192870736122, 0.5353591442108154, 0.3247348368167877, 0.03339260071516037, 0.1199125200510025, 0.14055927097797394, 0.07849014550447464, 0.07327478379011154, 0.42313894629478455, 0.7821765542030334, 0.6752634048461914, 0.18926696479320526, 0.27897483110427856, 0.1972714066505432, 0.26650866866111755, 0.21928414702415466, 0.6610813736915588, 0.8023169040679932, 0.32853400707244873, 0.043605707585811615, 0.04177317023277283, 0.5147100687026978, 0.014965414069592953, 0.041893746703863144, 0.10476090759038925, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0703776553273201, 0.17115768790245056, 0.14820680022239685, 0.014450321905314922, 0.036940984427928925, 0.4336852431297302, 0.18269671499729156, 0.1382565200328827, 0.5314536690711975, 0.05019254609942436, 0.11642822623252869, 0.17526941001415253, 0.3684784173965454, 0.3591882586479187, 0.09016428142786026, 0.09543995559215546, 0.1369307041168213, 0.1906978189945221, 0.1367466300725937, 0.17180036008358002, 0.12260185182094574, 0.13847540318965912, 0.1559406965970993, 0.13510896265506744, 0.4644373655319214, 0.6843520402908325, 0.2938932180404663, 0.08134166151285172, 0.16692468523979187, 0.35020914673805237, 0.0983358696103096, 0.26928237080574036, 0.11322443932294846, 0.14002281427383423, NaN, NaN, NaN, NaN, NaN, NaN], [0.020959746092557907, 0.2473447471857071, 0.04995026811957359, 0.032434724271297455, 0.004538285546004772, 0.38885483145713806, 0.04268676042556763, 0.035024866461753845, 0.14864443242549896, 0.14174208045005798, 0.13687251508235931, 0.021197974681854248, 0.4566997289657593, 0.37854352593421936, 0.051512595266103745, 0.17294523119926453, 0.44891712069511414, 0.5596615076065063, 0.3151743412017822, 0.15508009493350983, 0.20398668944835663, 0.18162229657173157, 0.14380685985088348, 0.09279182553291321, 0.25614914298057556, 0.37145668268203735, 0.2047339379787445, 0.05775143578648567, 0.06389063596725464, 0.19947569072246552, 0.07508620619773865, 0.162083700299263, 0.036575064063072205, 0.05963924527168274, 0.02704720012843609, NaN, NaN, NaN, NaN, NaN], [0.11558277904987335, 0.8023946285247803, 0.11340320110321045, 0.07801315933465958, 0.012690390460193157, 0.363363116979599, 0.22989940643310547, 0.28700947761535645, 0.3164795935153961, 0.28987860679626465, 0.20186272263526917, 0.5113669037818909, 0.04614659398794174, 0.13675883412361145, 0.05756649002432823, 0.09450869262218475, 0.5263407230377197, 0.5685468316078186, 0.6246378421783447, 0.5457862615585327, 0.4288109838962555, 0.7265884876251221, 0.4213257133960724, 0.7441360354423523, 0.37028953433036804, 0.4906199276447296, 0.24940308928489685, 0.2854059636592865, 0.25606390833854675, 0.06486664712429047, 0.03651905804872513, 0.215606689453125, 0.16494624316692352, 0.07126681506633759, 0.0978088453412056, 0.18553400039672852, NaN, NaN, NaN, NaN], [0.13439694046974182, 0.004173143766820431, 0.22800596058368683, 0.19857077300548553, 0.1396344006061554, 0.007145485375076532, 0.03306930512189865, 0.026599518954753876, 0.02599666267633438, 0.04890456795692444, 0.0713912844657898, 0.040079280734062195, 0.00020046728604938835, 0.0004629320465028286, 0.13767622411251068, 0.19233128428459167, 0.0069253402762115, 0.019198253750801086, 0.024288823828101158, 0.0006626379326917231, 0.0032825330272316933, 0.012745865620672703, 0.02121213637292385, 0.004573441576212645, 0.001344278221949935, 0.010449343360960484, 0.07998955249786377, 0.008849495090544224, 0.005957764107733965, 0.00281895836815238, 0.0006993816932663321, 0.0011300387559458613, 0.0034355262760072947, 0.006048144306987524, 0.0007683978183194995, 0.00029024321702308953, 0.0009215899626724422, NaN, NaN, NaN], [0.21178027987480164, 0.5613860487937927, 0.18598653376102448, 0.13814353942871094, 0.06437420845031738, 0.1469835489988327, 0.09205848723649979, 0.07043211162090302, 0.3314816355705261, 0.1618121713399887, 0.0553976409137249, 0.7871544361114502, 0.7398563027381897, 0.533365786075592, 0.06109875440597534, 0.00490582175552845, 0.09978753328323364, 0.17523892223834991, 0.18201382458209991, 0.025161702185869217, 0.0351867638528347, 0.008898423984646797, 0.033712878823280334, 0.06612548977136612, 0.044598400592803955, 0.0818907842040062, 0.31783777475357056, 0.6522275805473328, 0.26521986722946167, 0.31609129905700684, 0.0543142631649971, 0.07028744369745255, 0.06436092406511307, 0.12702754139900208, 0.4257008731365204, 0.05356784537434578, 0.20406562089920044, 0.022904740646481514, NaN, NaN], [0.308572918176651, 0.1810312271118164, 0.10904403775930405, 0.38784971833229065, 0.013434378430247307, 0.011286276392638683, 0.26633715629577637, 0.0027595413848757744, 0.7609409689903259, 0.7608016729354858, 0.6143397688865662, 0.036307673901319504, 0.013564765453338623, 0.02826162986457348, 0.07738469541072845, 0.02933959849178791, 0.5456263422966003, 0.4945109188556671, 0.26123103499412537, 0.3237256109714508, 0.3705388903617859, 0.04209306091070175, 0.3351372182369232, 0.658141016960144, 0.8126230239868164, 0.8673186898231506, 0.28273773193359375, 0.11254162341356277, 0.17348313331604004, 0.7003386616706848, 0.1474425047636032, 0.36997753381729126, 0.41849759221076965, 0.091117262840271, 0.03724836930632591, 0.036747273057699203, 0.47380825877189636, 0.017722588032484055, 0.0920308530330658, NaN], [0.1500416249036789, 0.027276279404759407, 0.32022449374198914, 0.45847558975219727, 0.23693141341209412, 0.1596660166978836, 0.2821829915046692, 0.005833256058394909, 0.32143598794937134, 0.14477354288101196, 0.029714325442910194, 0.15291856229305267, 0.007731991354376078, 0.029727784916758537, 0.12283544987440109, 0.1429738998413086, 0.11406568437814713, 0.30407312512397766, 0.04420004412531853, 0.050888776779174805, 0.009020227938890457, 0.026264725252985954, 0.20154790580272675, 0.284900963306427, 0.16813665628433228, 0.6384625434875488, 0.35198092460632324, 0.0041788192465901375, 0.017796171829104424, 0.06702794879674911, 0.017356209456920624, 0.11703062057495117, 0.363391250371933, 0.08829980343580246, 0.0006652214215137064, 0.002063008025288582, 0.01232101023197174, 0.0010344748152419925, 0.005295889917761087, 0.10532692819833755]], [[0.06378140300512314, 0.013955923728644848, 0.058693334460258484, 0.014864355325698853, 0.02882157638669014, 0.02533077634871006, 0.013877282850444317, 0.02919653430581093, 0.029733512550592422, 0.010929838754236698, 0.2184230536222458, 0.404588907957077, 0.5044611692428589, 0.4171900451183319, 0.18600669503211975, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.09787620604038239, 0.3741878271102905, 0.1718531847000122, 0.22170154750347137, 0.11211875081062317, 0.06884550303220749, 0.023903023451566696, 0.00765330670401454, 0.043831951916217804, 0.04742401838302612, 0.08705892413854599, 0.19904442131519318, 0.1439688503742218, 0.08975595235824585, 0.124632827937603, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.024405136704444885, 0.006321595516055822, 0.03571266308426857, 0.0050111510790884495, 0.01807553507387638, 6.11300565651618e-05, 0.0022184934932738543, 0.002461126074194908, 0.00987271312624216, 0.03944821655750275, 0.02587837167084217, 0.009154303930699825, 0.018459370359778404, 0.07083768397569656, 0.2838045060634613, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.02829434722661972, 0.05303699150681496, 0.03342747688293457, 0.026768406853079796, 0.06776657700538635, 0.0015663451049476862, 0.0066550131887197495, 0.028257621452212334, 0.02201445959508419, 0.024995435029268265, 0.014314326457679272, 0.019762825220823288, 0.019060753285884857, 0.09995586425065994, 0.2721303105354309, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.011709636077284813, 0.13082386553287506, 0.3091292977333069, 0.012390679679811, 0.06598176062107086, 0.0025066242087632418, 0.008877930231392384, 0.03396160528063774, 0.01681593246757984, 0.01466491911560297, 0.12272557616233826, 0.010357965715229511, 0.009066522121429443, 0.12291242927312851, 0.3062548041343689, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.05738264322280884, 0.12342102825641632, 0.7862259149551392, 0.20355252921581268, 0.007363088894635439, 0.0717976987361908, 0.032159313559532166, 0.018495721742510796, 0.0034321516286581755, 0.0013732254737988114, 0.006710591726005077, 0.0023603499867022038, 0.007563347462564707, 0.05948156490921974, 0.12037239223718643, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.015277753584086895, 0.006394209805876017, 0.6686000227928162, 0.29117655754089355, 0.06745831668376923, 0.2462725043296814, 0.06154515966773033, 0.015117062255740166, 0.004134421236813068, 0.0023558081593364477, 0.08952713012695312, 0.04650713875889778, 0.023702487349510193, 0.01321239210665226, 0.09701406955718994, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.028385812416672707, 0.012191490270197392, 0.27066752314567566, 0.18411272764205933, 0.040896836668252945, 0.48173367977142334, 0.02650352008640766, 0.07071101665496826, 0.007758310064673424, 0.001958101289346814, 0.01839292421936989, 0.023066602647304535, 0.03435399383306503, 0.03657263144850731, 0.029525745660066605, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04876675456762314, 0.422792911529541, 0.22041767835617065, 0.2559551000595093, 0.08884847164154053, 0.01230597123503685, 0.025672338902950287, 0.003895203350111842, 0.022659877315163612, 0.0043840305879712105, 0.007982935756444931, 0.010924039408564568, 0.06971067935228348, 0.0061518345028162, 0.21563398838043213, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.015657104551792145, 0.02366352081298828, 0.07373688369989395, 0.10379613190889359, 0.013535204343497753, 0.07323776930570602, 0.048540983349084854, 0.008235346525907516, 0.01638718694448471, 0.012322558090090752, 0.073370561003685, 0.03809332847595215, 0.021602218970656395, 0.003090204205363989, 0.23272792994976044, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.018198516219854355, 0.011175387538969517, 0.02189311571419239, 0.012938260100781918, 0.09454065561294556, 0.010837653651833534, 0.04214898869395256, 0.03231353685259819, 0.2788335978984833, 0.02807164192199707, 0.0381515808403492, 0.013884211890399456, 0.014051362872123718, 0.00934662390500307, 0.24102351069450378, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01114112138748169, 0.11382883787155151, 0.017900465056300163, 0.008639826439321041, 0.024639632552862167, 0.020821422338485718, 0.022935912013053894, 0.04321465268731117, 0.055257730185985565, 0.0561254657804966, 0.006350866984575987, 0.034159135073423386, 0.001170721254311502, 0.00040716465446166694, 0.2438717484474182, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01806582696735859, 0.014762195758521557, 0.02654433250427246, 0.025726040825247765, 0.03240499645471573, 0.020733002573251724, 0.04244884103536606, 0.02047092467546463, 0.13412125408649445, 0.512605607509613, 0.5156171321868896, 0.023306455463171005, 0.0489252470433712, 0.06594526767730713, 0.173824280500412, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.018763704225420952, 0.010509289801120758, 0.06387435644865036, 0.02487548068165779, 0.10975509881973267, 0.01984621025621891, 0.06460897624492645, 0.03137337416410446, 0.1802622228860855, 0.7354047894477844, 0.7864400148391724, 0.1003832221031189, 0.007522855885326862, 0.14785504341125488, 0.08187610656023026, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.02117479033768177, 0.061044495552778244, 0.02157888375222683, 0.021421663463115692, 0.04618487507104874, 0.05167240649461746, 0.01054168026894331, 0.009977741166949272, 0.0295058935880661, 0.008349624462425709, 0.02268156036734581, 0.026699911803007126, 0.020697196945548058, 0.013632250018417835, 0.13365623354911804, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2602275013923645, 0.0514441579580307, 0.4731021821498871, 0.5077798962593079, 0.22717851400375366, 0.04740440100431442, 0.27564913034439087, 0.24302659928798676, 0.05887439846992493, 0.3509802222251892, 0.6124410033226013, 0.11394976824522018, 0.0489780493080616, 0.04593530669808388, 0.01042554248124361, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.032066281884908676, 0.1349876970052719, 0.04647025838494301, 0.02243492752313614, 0.02574889175593853, 0.03298051655292511, 0.026965852826833725, 0.3248708248138428, 0.005728535819798708, 0.08351098001003265, 0.1499667763710022, 0.16844461858272552, 0.05473209172487259, 0.05656114220619202, 0.10718395560979843, 0.1283751130104065, 0.06695841252803802, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.005181984044611454, 0.0008690498070791364, 0.00864254217594862, 0.00306740403175354, 0.10709173232316971, 0.0007182863773778081, 0.004329775460064411, 0.010956686921417713, 0.06760676205158234, 0.010445973835885525, 0.012115269899368286, 0.06696799397468567, 0.0054829977452754974, 0.025371035560965538, 0.13854098320007324, 5.319380943547003e-05, 9.114345448324457e-05, 0.7905611991882324, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03556624799966812, 0.11754146218299866, 0.010577056556940079, 0.008073115721344948, 0.06965696066617966, 0.0032990325707942247, 0.011276635341346264, 0.09485359489917755, 0.10517128556966782, 0.0125450249761343, 0.007751243654638529, 0.0650070384144783, 0.0006160335033200681, 0.002038064645603299, 0.4774436056613922, 0.10777772217988968, 0.19019582867622375, 0.12566408514976501, 0.295462429523468, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13858208060264587, 0.06875398755073547, 0.01532802265137434, 0.10744626820087433, 0.18273182213306427, 0.002165634883567691, 0.069672591984272, 0.11672408878803253, 0.005795653443783522, 0.0880894884467125, 0.05771886929869652, 0.025581423193216324, 0.03904194384813309, 0.07354751974344254, 0.14365413784980774, 2.4899240088416263e-05, 2.9243250537547283e-05, 0.0014855118934065104, 3.888772698701359e-05, 0.9169090986251831, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16291819512844086, 0.050931405276060104, 0.14806726574897766, 0.2683573365211487, 0.2810481786727905, 0.002092417562380433, 0.012745368294417858, 0.01212888304144144, 0.014305775985121727, 0.17753903567790985, 0.1299620419740677, 0.10299177467823029, 0.21836693584918976, 0.06576120108366013, 0.12406044453382492, 3.5349924587535497e-07, 4.689470642915694e-06, 0.02691131830215454, 1.3325815416465048e-05, 0.19568589329719543, 0.956480085849762, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12156791239976883, 0.39120492339134216, 0.1209033653140068, 0.08395244181156158, 0.29989197850227356, 0.044024936854839325, 0.023133939132094383, 0.05934688448905945, 0.02561376802623272, 0.024757277220487595, 0.04535222053527832, 0.11912120133638382, 0.02126661129295826, 0.03811139240860939, 0.248785600066185, 0.08490768820047379, 0.04920955002307892, 0.012384464032948017, 0.04339546710252762, 0.010612337850034237, 0.05702771991491318, 0.7263003587722778, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.106705442070961, 0.8169862627983093, 0.1967339813709259, 0.01375850010663271, 0.13418887555599213, 0.16134029626846313, 0.005958847235888243, 0.09247319400310516, 0.04806499928236008, 0.025876127183437347, 0.08311128616333008, 0.22926460206508636, 0.05653654783964157, 0.04726153612136841, 0.20836575329303741, 0.16491760313510895, 0.04815620183944702, 0.0007595600909553468, 0.006606678944081068, 0.0006115635624155402, 0.0007167417788878083, 0.0015418223338201642, 0.0024032427463680506, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04722486063838005, 0.04722658172249794, 0.05176655203104019, 0.00462702801451087, 0.20528024435043335, 0.0011717488523572683, 0.004415996838361025, 0.014451048336923122, 0.028127426281571388, 0.007240481209009886, 0.004411954898387194, 0.10081291943788528, 0.07703132927417755, 0.033158108592033386, 0.21852079033851624, 0.012053201906383038, 0.18336322903633118, 0.0033893296495079994, 0.22584111988544464, 0.004534169565886259, 0.003455487545579672, 0.30805450677871704, 0.5499533414840698, 0.13390673696994781, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.032722555100917816, 0.027063244953751564, 0.014943713322281837, 0.0013555125333368778, 0.016471203416585922, 0.005467826500535011, 0.02999643050134182, 0.014794600196182728, 0.03837134689092636, 0.004397213459014893, 0.01024235412478447, 0.04855721816420555, 0.05723624676465988, 0.051476139575242996, 0.2643129825592041, 0.02224119007587433, 0.09969844669103622, 0.01827961951494217, 0.1828235685825348, 0.009660250507295132, 0.005268027540296316, 0.13511976599693298, 0.39505934715270996, 0.1772008240222931, 0.6222725510597229, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.052069392055273056, 0.003948261961340904, 0.01313212513923645, 0.010319330729544163, 0.04011767730116844, 0.00066552241332829, 0.01502715889364481, 0.007099903654307127, 0.16779832541942596, 0.03226454555988312, 0.052614975720644, 0.014822165481746197, 0.002071568975225091, 0.001763610984198749, 0.05304422974586487, 0.19008594751358032, 0.025696618482470512, 0.004118501208722591, 0.03605509176850319, 0.002144730417057872, 0.0023362801875919104, 0.16961191594600677, 0.015426162630319595, 0.016875047236680984, 0.017404966056346893, 0.032629188150167465, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.022045070305466652, 0.036587294191122055, 0.06798984855413437, 0.040110163390636444, 0.5405737161636353, 0.015278805047273636, 0.02948732301592827, 0.034845639020204544, 0.27487096190452576, 0.008005083538591862, 0.012681123800575733, 0.10707750916481018, 0.02124345488846302, 0.00868641585111618, 0.4183328449726105, 0.1594686657190323, 0.03835373371839523, 0.021387629210948944, 0.028402678668498993, 0.12163796275854111, 0.1348690688610077, 0.027878204360604286, 0.016979072242975235, 0.009301519952714443, 0.047045812010765076, 0.103324294090271, 0.0978349894285202, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07479816675186157, 0.018890362232923508, 0.2873721718788147, 0.028116360306739807, 0.7967413067817688, 0.008446138352155685, 0.020726248621940613, 0.018564706668257713, 0.33813604712486267, 0.003492887830361724, 0.010393181815743446, 0.18903475999832153, 0.00443642633035779, 0.0231452826410532, 0.42231008410453796, 0.08206925541162491, 0.0482555516064167, 0.03066202998161316, 0.14434732496738434, 0.10149279236793518, 0.1536794900894165, 0.16425268352031708, 0.00592045346274972, 0.002011190867051482, 0.030538976192474365, 0.015422381460666656, 0.0400862954556942, 0.6933969259262085, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07108656316995621, 0.0021144712809473276, 0.0671088695526123, 0.03148089721798897, 0.7113023400306702, 0.006737539079040289, 0.2500847280025482, 0.023258471861481667, 0.23158760368824005, 0.011219021864235401, 0.04227704927325249, 0.03650788217782974, 0.15078191459178925, 0.09633734077215195, 0.15066072344779968, 0.11962933838367462, 0.08867897093296051, 0.023231033235788345, 0.019267449155449867, 0.06578893214464188, 0.01314490009099245, 0.028238458558917046, 0.2009190320968628, 0.005505711771547794, 0.024347275495529175, 0.005847027525305748, 0.13606473803520203, 0.11386173218488693, 0.6883828639984131, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04487757384777069, 0.009540342725813389, 0.2420971691608429, 0.01275626104325056, 0.3918483257293701, 0.0218670591711998, 0.022137846797704697, 0.08132637292146683, 0.11900310963392258, 0.000993919325992465, 0.03630243241786957, 0.087126724421978, 0.0003738462692126632, 0.02454514056444168, 0.14072805643081665, 0.004133098293095827, 0.007605875376611948, 0.380069762468338, 0.01569206453859806, 0.3162667751312256, 0.06185031309723854, 0.003268925240263343, 0.007663627155125141, 0.00711404625326395, 0.0016827658982947469, 0.002885768422856927, 0.009058460593223572, 0.0104479705914855, 0.0013903286308050156, 0.9176042079925537, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0048965876922011375, 0.019337626174092293, 0.002879639156162739, 0.0027576948050409555, 0.04260760545730591, 0.003218113211914897, 0.003307115286588669, 0.026640478521585464, 0.011750566773116589, 0.0005104524316266179, 9.575913281878456e-05, 0.057879798114299774, 0.004244217649102211, 0.00609983503818512, 0.28528884053230286, 0.19946889579296112, 0.004915847908705473, 0.0015343156410381198, 0.012221671640872955, 0.003153382334858179, 0.0001576353097334504, 0.0020530277397483587, 0.003957398701459169, 0.010446527041494846, 0.012547693215310574, 0.03473197668790817, 0.06650777161121368, 0.014228541404008865, 0.02601468935608864, 0.0018418998224660754, 0.08826413750648499, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0335795059800148, 0.030716734007000923, 0.023829646408557892, 0.03415534272789955, 0.08875380456447601, 0.0019310596399009228, 0.017619425430893898, 0.012105603702366352, 0.002468202030286193, 0.010380377061665058, 0.01267782598733902, 0.10606792569160461, 0.0014069904573261738, 0.0004161447286605835, 0.19442977011203766, 0.14040440320968628, 0.29221969842910767, 0.09665771573781967, 0.2947876751422882, 0.00611721258610487, 0.012681002728641033, 0.7610099911689758, 0.27993685007095337, 0.19895455241203308, 0.07963719218969345, 0.025141140446066856, 0.30299919843673706, 0.4374280273914337, 0.12315846234560013, 0.011889583431184292, 0.00027308438438922167, 0.03226177766919136, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17404082417488098, 0.05758971348404884, 0.12847737967967987, 0.07598815858364105, 0.49957963824272156, 0.003085564589127898, 0.05114232748746872, 0.011464038863778114, 0.06926580518484116, 0.06844814121723175, 0.06813240051269531, 0.08604259043931961, 0.004740274045616388, 0.009239559061825275, 0.19994765520095825, 0.22362156212329865, 0.19648011028766632, 0.02122899703681469, 0.12822405993938446, 0.013841216452419758, 0.009505078196525574, 0.4746513366699219, 0.1753886640071869, 0.09167484194040298, 0.038334570825099945, 0.04122844338417053, 0.14653263986110687, 0.17874038219451904, 0.023550381883978844, 0.014212163165211678, 0.001423373818397522, 0.0059451088309288025, 0.09707646816968918, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.011875619180500507, 0.026503771543502808, 0.054018229246139526, 0.01668175496160984, 0.3499281406402588, 0.01803278550505638, 0.01878167688846588, 0.01221490278840065, 0.15005004405975342, 0.0046301730908453465, 0.005843435879796743, 0.032064031809568405, 0.010490885935723782, 0.00555034726858139, 0.27147379517555237, 0.167328879237175, 0.06208498775959015, 0.010482249781489372, 0.03574186563491821, 0.0675959512591362, 0.06477286666631699, 0.04995346441864967, 0.05412250757217407, 0.009984727017581463, 0.03347667679190636, 0.11074735969305038, 0.16135196387767792, 0.07774785906076431, 0.01735900156199932, 0.007863441482186317, 0.019525114446878433, 0.005842071026563644, 0.1275986284017563, 0.0955328494310379, NaN, NaN, NaN, NaN, NaN, NaN], [0.0646943747997284, 0.047236885875463486, 0.11903148144483566, 0.02203843556344509, 0.4764179587364197, 0.008550588972866535, 0.013687309809029102, 0.008890991099178791, 0.32491248846054077, 0.011557912454009056, 0.009869826957583427, 0.0921611338853836, 0.0031256151851266623, 0.016340140253305435, 0.3438139855861664, 0.05032582953572273, 0.03989394009113312, 0.02223959006369114, 0.07248460501432419, 0.04305185005068779, 0.04872481897473335, 0.09144517779350281, 0.0032577940728515387, 0.000561918190214783, 0.015125684440135956, 0.018474824726581573, 0.0519116036593914, 0.7149417400360107, 0.023930398747324944, 0.005549557972699404, 0.0027118371799588203, 0.08418004959821701, 0.22684048116207123, 0.052481237798929214, 0.7548789381980896, NaN, NaN, NaN, NaN, NaN], [0.17560914158821106, 0.007353567518293858, 0.056802812963724136, 0.032415200024843216, 0.4015137553215027, 0.02137722261250019, 0.35710790753364563, 0.018633568659424782, 0.05862341821193695, 0.02506905421614647, 0.018169963732361794, 0.009134531952440739, 0.07779684662818909, 0.07867905497550964, 0.1750962883234024, 0.14971917867660522, 0.12296220660209656, 0.03256092593073845, 0.015910452231764793, 0.08324312418699265, 0.010959222912788391, 0.03249981626868248, 0.2630986273288727, 0.0023772413842380047, 0.021863164380192757, 0.014683729968965054, 0.3797665238380432, 0.26638853549957275, 0.6724205613136292, 0.015757206827402115, 0.01569446735084057, 0.01732691004872322, 0.06738004088401794, 0.17602917551994324, 0.12501026690006256, 0.6636221408843994, NaN, NaN, NaN, NaN], [0.05210466682910919, 0.006375414319336414, 0.22638031840324402, 0.012961659580469131, 0.3225522041320801, 0.012402641586959362, 0.024030247703194618, 0.056293144822120667, 0.11919546872377396, 0.0012290689628571272, 0.027758106589317322, 0.025181178003549576, 0.00022994892788119614, 0.012616506777703762, 0.1375768631696701, 0.0045495470985770226, 0.007598123978823423, 0.48235079646110535, 0.017675379291176796, 0.30638325214385986, 0.03773635998368263, 0.0025513810105621815, 0.013349749147891998, 0.011474208906292915, 0.002688285429030657, 0.009704438969492912, 0.024301802739501, 0.030528949573636055, 0.006023744586855173, 0.9289764761924744, 0.008095184341073036, 0.015121471136808395, 0.003912394400686026, 0.005678378511220217, 0.005922055337578058, 0.0012866485631093383, 0.9431078433990479, NaN, NaN, NaN], [0.005459210369735956, 0.03143180534243584, 0.0014205367770045996, 0.0012642937945201993, 0.01687682792544365, 0.007108580321073532, 0.004234722815454006, 0.017920657992362976, 0.003724986221641302, 0.0002761750074569136, 2.4563792976550758e-05, 0.011889445595443249, 0.0013067404506728053, 0.002636768389493227, 0.19040453433990479, 0.25144028663635254, 0.013477480970323086, 0.004043558146804571, 0.02197866141796112, 0.005731666926294565, 0.00035365403164178133, 0.0028230457101017237, 0.003569219959899783, 0.00616231607273221, 0.023324957117438316, 0.07691453397274017, 0.11847300082445145, 0.025281671434640884, 0.05239935964345932, 0.002384425140917301, 0.16120819747447968, 0.011955172754824162, 0.09212952852249146, 0.03993848338723183, 0.017148757353425026, 0.01459744293242693, 0.0018050760263577104, 0.08139479160308838, NaN, NaN], [0.031027475371956825, 0.05656901001930237, 0.0113890515640378, 0.024300340563058853, 0.03550150617957115, 0.0024159413296729326, 0.02035972848534584, 0.01581081561744213, 0.002032301388680935, 0.009238713420927525, 0.01651322841644287, 0.11367840319871902, 0.003108791308477521, 0.00086622079834342, 0.16520220041275024, 0.08713241666555405, 0.22884246706962585, 0.12139283120632172, 0.21789073944091797, 0.00419022049754858, 0.011025986634194851, 0.8093750476837158, 0.24520863592624664, 0.11868450790643692, 0.037659380584955215, 0.014297883957624435, 0.35379931330680847, 0.4382935166358948, 0.17632676661014557, 0.006937071681022644, 0.0007303177262656391, 0.027538392692804337, 0.0690605565905571, 0.3237524628639221, 0.41753751039505005, 0.09520361572504044, 0.013310365378856659, 0.0003602981742005795, 0.032565031200647354, NaN], [0.7154905796051025, 0.15825338661670685, 0.49722805619239807, 0.38231807947158813, 0.39668020606040955, 0.051081933081150055, 0.4188354015350342, 0.3623049259185791, 0.3077245056629181, 0.4494604766368866, 0.7933229804039001, 0.20231026411056519, 0.27286192774772644, 0.2623305022716522, 0.06808917224407196, 0.01268855668604374, 0.009620537050068378, 0.0011078648967668414, 0.01395372860133648, 0.00034480926115065813, 0.0002369812864344567, 0.14032205939292908, 0.12187758088111877, 0.004498081747442484, 6.632315489696339e-05, 0.01873306930065155, 0.07693066447973251, 0.06357964873313904, 0.012718681246042252, 0.02489433065056801, 0.4312428832054138, 0.013737366534769535, 0.0326746366918087, 0.34456172585487366, 0.0668448805809021, 0.006646350026130676, 0.04233057424426079, 0.4123155176639557, 0.007851892150938511, 0.43338367342948914]], [[4.754594192490913e-05, 2.1380438752771624e-08, 2.918067565360616e-08, 2.8621201408896013e-08, 2.499384379461844e-07, 0.0002631827082950622, 5.21495513439163e-10, 2.490414274802788e-08, 1.4592379216082918e-07, 4.660217989282955e-09, 1.3478041793746343e-08, 1.530838318331007e-07, 4.6195887989597395e-05, 8.429636181972455e-06, 0.2157532423734665, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.6645432114601135, 0.00044607618474401534, 8.70102576300269e-06, 1.056492124007491e-06, 4.43653931370136e-07, 3.5252294310339494e-06, 0.013106754049658775, 0.0008970960625447333, 5.719662112824153e-07, 3.2791810156140855e-08, 1.0544068729245737e-08, 3.57371057191358e-08, 0.00012361648259684443, 0.0008665899513289332, 0.00011794524471042678, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [5.6636022236489225e-06, 0.771808385848999, 0.2603715658187866, 7.618767995154485e-05, 2.6443340175319463e-05, 1.448297037853763e-08, 1.7459943213449236e-10, 0.0005545829189941287, 1.3129211993145873e-06, 0.0003596498572733253, 1.3187416243454209e-06, 1.2532552773336647e-08, 5.7067543821176514e-05, 1.4676837054139469e-05, 8.822963764032465e-07, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [7.866851170490463e-09, 0.0015575109282508492, 0.5911858677864075, 0.005255529191344976, 0.00012560673349071294, 1.2381517144888221e-08, 1.3975322635251253e-12, 4.631081083061872e-06, 1.8297629367225454e-06, 0.043241821229457855, 0.00025465109501965344, 1.6550380621538352e-07, 1.5873881693551084e-06, 1.3629888329091955e-08, 2.2046858560997862e-08, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.6020940130090366e-10, 3.2446525892737554e-06, 0.1964423805475235, 0.9067507982254028, 4.244087540428154e-05, 3.027215825568419e-05, 6.154020626425449e-10, 3.570748958736658e-07, 2.493328743469192e-08, 1.327106815551815e-07, 5.116170723340474e-05, 7.67620722541551e-09, 6.538175512105227e-07, 1.6885725528936746e-07, 1.9495971503857845e-09, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [4.057985947270026e-09, 1.6926858803500977e-09, 0.00014235911658033729, 0.0026504932902753353, 0.8634750843048096, 1.9555229300749488e-05, 1.294085109293519e-06, 2.6649362894204387e-07, 3.0507638082433175e-10, 5.069419550807197e-09, 1.108148239836737e-07, 1.7377595213474706e-05, 9.726352800498717e-06, 1.823265733946755e-06, 5.869507617717318e-07, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.9094309466893833e-12, 2.4682887027685507e-13, 6.382604444965523e-10, 6.302604549368596e-10, 1.4692274817207363e-05, 0.3734012544155121, 3.483030241113738e-06, 1.1820202594492457e-08, 1.9522692351614523e-09, 1.394072303342181e-13, 1.7670450172535546e-11, 1.716609077107023e-09, 3.7749509829154704e-06, 2.593782255644328e-06, 3.855710133393586e-07, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [8.508453674949124e-08, 1.863478038544031e-09, 1.257351167627263e-10, 5.331373190142763e-11, 3.337832410466035e-08, 1.777973557182122e-05, 0.8244234323501587, 8.755041926633567e-05, 1.7572835409040977e-09, 1.3142270258170718e-11, 7.735358035533546e-13, 4.927841815161038e-11, 5.296478775562719e-07, 0.000259329448454082, 1.8429471282388477e-08, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.2582735964272729e-09, 2.3675827378610848e-06, 5.770066309196409e-07, 5.0431950282536775e-11, 2.6034334410507398e-11, 1.7287857190240175e-07, 9.084228622668888e-06, 0.8877476453781128, 0.0008898449596017599, 7.2106473680833e-08, 1.9634756043274137e-08, 4.930736808433922e-13, 3.217972377456135e-08, 1.2906410120194778e-05, 9.568290160189008e-09, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.8039692789860737e-09, 1.3000158105569426e-06, 4.493769978353157e-08, 2.493898698663344e-10, 7.932443764346875e-12, 1.7288407150317653e-08, 2.642636942606913e-10, 3.576151357265189e-05, 0.8324669599533081, 5.240505197434686e-05, 8.11301958947297e-07, 9.422521651814009e-10, 4.6924657937097436e-08, 2.8963553333483105e-08, 6.33739318800508e-08, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.873091320410026e-09, 7.32139524188824e-05, 1.393846559949452e-05, 2.2707215663331226e-08, 3.602095333121724e-08, 7.893682235637911e-12, 1.2799745258921386e-13, 1.2971109697446082e-07, 4.534097752184607e-05, 0.7187873721122742, 0.0028858170844614506, 4.860597982769832e-06, 3.316463335067965e-06, 6.64895694058032e-08, 4.189383506769673e-09, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [3.5802516507033033e-10, 3.3775189312024168e-09, 1.689890041234321e-06, 2.72409181434341e-07, 2.3650377656281307e-08, 3.1582386705863996e-10, 4.773196676235644e-14, 6.179980832632381e-11, 1.0790042637154329e-07, 0.00019566719129215926, 0.8666706681251526, 0.00033315850305370986, 7.101260734998505e-07, 3.226231015673875e-08, 6.780910499770698e-09, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [7.800644574729176e-09, 1.700809604265885e-09, 9.215954577257435e-08, 4.046364665555302e-07, 0.00011374137102393433, 5.132134901941754e-06, 5.991689921991394e-10, 9.107053305923429e-11, 5.105777606262407e-11, 3.3974476565390432e-09, 3.904122058884241e-05, 0.65162193775177, 0.00035754009149968624, 6.446759653044865e-05, 8.575011065659055e-07, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [5.410449865905775e-10, 1.9016622998524468e-10, 1.651180719930423e-10, 9.184660809680167e-10, 4.749936000081334e-09, 6.8993631430203095e-06, 9.186856830822876e-10, 1.2120262259107673e-11, 1.0679299241797557e-12, 7.136916383397585e-13, 1.9098522763272285e-10, 9.612936082703527e-06, 0.7662882208824158, 0.00778515450656414, 3.0943773765557125e-08, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0058370670303702354, 0.00017831011791713536, 6.727457275701454e-06, 4.542615897662472e-06, 0.0008248149533756077, 0.04996809363365173, 0.010534689761698246, 8.931134652812034e-05, 2.4081384708551923e-07, 6.080232139993313e-08, 3.077615701840841e-06, 0.00041306819184683263, 0.062034472823143005, 0.37576472759246826, 0.1323644071817398, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.437301367521286, 0.15179137885570526, 0.09085877984762192, 0.06997784972190857, 0.17732757329940796, 0.23180970549583435, 0.11514479666948318, 0.32073739171028137, 0.15501314401626587, 0.1294255405664444, 0.06762269139289856, 0.21488851308822632, 0.2614101469516754, 0.12734454870224, 0.049641113728284836, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.028495818376541138, 0.1544514149427414, 0.06366834789514542, 0.016971074044704437, 0.02302762120962143, 0.054101087152957916, 0.012630121782422066, 0.018889501690864563, 0.004939573351293802, 0.01251249760389328, 0.1164683923125267, 0.009905983693897724, 0.01818472519516945, 0.01017050538212061, 0.04256897792220116, 0.13150663673877716, 0.013105388730764389, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.007633751258254051, 0.002589557319879532, 0.02251260355114937, 0.05040144920349121, 0.032673582434654236, 0.0022981506772339344, 0.00627527991309762, 0.0006094649434089661, 0.01362280547618866, 0.006205975078046322, 0.006417383905500174, 0.0010467394022271037, 0.0010408272501081228, 0.007578521966934204, 0.13823428750038147, 0.16704899072647095, 0.0014066778821870685, 0.003860085504129529, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0074798669666051865, 0.011802621185779572, 0.3115181624889374, 0.22458955645561218, 0.10706131160259247, 0.016402821987867355, 0.046956516802310944, 0.004200803115963936, 0.01468481682240963, 0.014471452683210373, 0.27619558572769165, 0.0038709931541234255, 0.00034889893140643835, 0.0020716534927487373, 0.01783183217048645, 0.14769184589385986, 0.005059333052486181, 0.0053715878166258335, 0.026609797030687332, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.015254770405590534, 0.01172303594648838, 0.002065492793917656, 0.005149758420884609, 0.013159574940800667, 0.001197350095026195, 0.018971139565110207, 0.004385960288345814, 0.06813318282365799, 0.021520443260669708, 0.005575989838689566, 0.001505104242824018, 0.0019181625684723258, 0.005167691968381405, 0.15193934738636017, 0.15381431579589844, 0.05056624114513397, 0.015615872107446194, 0.004382571205496788, 0.00015187788812909275, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.026872141286730766, 0.003412047168239951, 0.03895608335733414, 0.03612855076789856, 0.02536499686539173, 0.03102046251296997, 0.004315483849495649, 0.0027427596505731344, 0.03512648865580559, 0.022632958367466927, 0.05171700567007065, 0.0026941397227346897, 0.0031264815479516983, 0.024213580414652824, 0.12838274240493774, 0.16606314480304718, 0.03878505155444145, 0.01631396822631359, 0.011268166825175285, 0.00036908386391587555, 0.00010962320084217936, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0600903183221817, 0.002928798785433173, 0.0064612883143126965, 0.05414368212223053, 0.029363246634602547, 0.006244697142392397, 0.397325724363327, 0.040878646075725555, 0.005305922590196133, 0.27715954184532166, 0.04618077725172043, 0.008418801240622997, 0.01155431941151619, 0.05281350389122963, 0.025860372930765152, 0.16556474566459656, 0.059035927057266235, 0.018687130883336067, 0.020593103021383286, 0.0006985706277191639, 0.0006753651541657746, 0.01174053642898798, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0013151391176506877, 0.002262294292449951, 0.0012738551013171673, 0.0034272209741175175, 0.0030726443510502577, 0.04279911145567894, 0.008567760698497295, 0.17885291576385498, 0.00929640606045723, 0.001624501310288906, 0.02533317357301712, 0.005113683640956879, 0.027247918769717216, 0.07258909195661545, 0.014188846573233604, 0.16100119054317474, 0.03705580160021782, 0.08672276139259338, 0.05696912482380867, 0.00507472176104784, 0.006951047107577324, 0.0023692583199590445, 0.004235508386045694, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3408622145652771, 0.07445694506168365, 0.03113507851958275, 0.0754152163863182, 0.014415460638701916, 0.002693483140319586, 0.09953030943870544, 0.11086118221282959, 0.5124953985214233, 0.329039990901947, 0.5092117786407471, 0.027396254241466522, 0.055544231086969376, 0.4057520925998688, 0.09588415175676346, 0.288095086812973, 0.011840847320854664, 0.005622565280646086, 0.00535928551107645, 0.0008760345517657697, 0.0004899614141322672, 0.001179057639092207, 0.0010409504175186157, 0.0012723063118755817, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09238530695438385, 0.007053247652947903, 0.0017291916301473975, 0.005093103274703026, 0.0007437380263581872, 0.0014228186337277293, 0.02520381473004818, 0.019087698310613632, 0.47848576307296753, 0.29748132824897766, 0.057576071470975876, 0.01139640249311924, 0.004621520172804594, 0.02937469258904457, 0.015335291624069214, 0.2984195351600647, 0.024577315896749496, 0.008883590810000896, 0.0237559974193573, 0.001871026586741209, 0.002048116410151124, 0.00452006608247757, 0.0067189703695476055, 0.002311990363523364, 0.0035932722967118025, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0720675140619278, 0.012255199253559113, 0.04221949726343155, 0.09128241240978241, 0.009349699132144451, 0.008273615501821041, 0.014371694065630436, 0.01100369542837143, 0.1737149953842163, 0.16746114194393158, 0.1696900725364685, 0.014558696188032627, 0.01365632750093937, 0.0269284937530756, 0.016150163486599922, 0.19755195081233978, 0.08605571836233139, 0.04371126368641853, 0.045333728194236755, 0.005393510684370995, 0.006479238625615835, 0.018500106409192085, 0.012994848191738129, 0.011254888959228992, 0.03004884347319603, 0.011813223361968994, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.052127860486507416, 0.0038822691421955824, 0.01307338010519743, 0.12611117959022522, 0.013002983294427395, 0.054914653301239014, 0.022843925282359123, 0.0017219025176018476, 0.025739489123225212, 0.3090609014034271, 0.10414470732212067, 0.006550551857799292, 0.006861968897283077, 0.010005415417253971, 0.011784915812313557, 0.05165635421872139, 0.44527125358581543, 0.31059694290161133, 0.6649516224861145, 0.027770839631557465, 0.02873762883245945, 0.17512862384319305, 0.06940869987010956, 0.1633579134941101, 0.028000785037875175, 0.003091411432251334, 0.016245586797595024, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.074305959045887, 0.010457544587552547, 0.07050318270921707, 0.4022633135318756, 0.04945780336856842, 0.04771194979548454, 0.4660364091396332, 0.07594453543424606, 0.018491366878151894, 0.1513216346502304, 0.09796185791492462, 0.23858080804347992, 0.011272062547504902, 0.09385059028863907, 0.06640274822711945, 0.19151811301708221, 0.1383962333202362, 0.13229386508464813, 0.35712042450904846, 0.18756243586540222, 0.2871147096157074, 0.5138459801673889, 0.22405852377414703, 0.28785935044288635, 0.04021993279457092, 0.0012617700267583132, 0.004019713494926691, 0.003964945673942566, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.025815313681960106, 0.0033349080476909876, 0.00924734864383936, 0.012487816624343395, 0.03726305067539215, 0.016575457528233528, 0.23753590881824493, 0.025156090036034584, 0.11919926106929779, 0.04390435293316841, 0.0095932362601161, 0.04137176275253296, 0.08216788619756699, 0.1757660061120987, 0.30195334553718567, 0.24189773201942444, 0.08955204486846924, 0.32067012786865234, 0.20245005190372467, 0.11740265786647797, 0.08460556715726852, 0.044664137065410614, 0.025831788778305054, 0.07413194328546524, 0.0068964180536568165, 0.002961511956527829, 0.005619046278297901, 0.0014741680352017283, 0.00546230049803853, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05659867450594902, 0.020075146108865738, 0.01205957867205143, 0.004331792704761028, 0.052221644669771194, 0.0230423454195261, 0.0683140978217125, 0.09752152115106583, 0.2100839763879776, 0.0003861601871903986, 0.0032946986611932516, 0.0004593236662913114, 5.027504084864631e-05, 0.0022022551856935024, 0.14128009974956512, 0.1724659651517868, 0.13219435513019562, 0.15014058351516724, 0.12075512856245041, 0.0006761215627193451, 0.10174072533845901, 0.19516822695732117, 0.009559075348079205, 0.057678524404764175, 0.08239483833312988, 0.0039215064607560635, 0.0027616096194833517, 0.013109313324093819, 0.002305442001670599, 0.00021083203318994492, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08638240396976471, 0.0710444375872612, 0.06771891564130783, 0.17398057878017426, 0.05179189518094063, 0.34193578362464905, 0.2095513492822647, 0.09331211447715759, 0.052257001399993896, 0.006232596468180418, 0.002646914916113019, 0.06318453699350357, 0.019070196896791458, 0.02972061187028885, 0.2659039795398712, 0.19843007624149323, 0.15979865193367004, 0.14398488402366638, 0.41609427332878113, 0.010126790963113308, 0.04840107262134552, 0.7232485413551331, 0.22829605638980865, 0.34322667121887207, 0.08224418759346008, 0.03167981281876564, 0.020198417827486992, 0.013381149619817734, 0.0009459191933274269, 0.006438484415411949, 0.008794432505965233, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.26895081996917725, 0.1478959172964096, 0.3258365988731384, 0.404258131980896, 0.3733697533607483, 0.19055484235286713, 0.19857566058635712, 0.01781378500163555, 0.07512970268726349, 0.11693259328603745, 0.1175057590007782, 0.24425068497657776, 0.20241285860538483, 0.2411348670721054, 0.06638508290052414, 0.30347728729248047, 0.04726674035191536, 0.010849116370081902, 0.12094812840223312, 0.0013257962418720126, 0.0025908409152179956, 0.0014983253786340356, 0.03437754884362221, 0.009621781297028065, 0.006184253375977278, 0.00671237800270319, 0.0018636187305673957, 0.01123903226107359, 0.0035993149504065514, 0.0012990115210413933, 0.00021464838937390596, 0.001025065197609365, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17850612103939056, 0.12822727859020233, 0.17801056802272797, 0.28459492325782776, 0.058830633759498596, 0.03884930908679962, 0.3513718843460083, 0.061017971485853195, 0.06718380004167557, 0.071348175406456, 0.23821549117565155, 0.03658399358391762, 0.03897847980260849, 0.20709341764450073, 0.13892877101898193, 0.2792417109012604, 0.26782968640327454, 0.03489779308438301, 0.07551994919776917, 0.018111348152160645, 0.04002813994884491, 0.03850500285625458, 0.11152958869934082, 0.21995633840560913, 0.07949108630418777, 0.0037619988434016705, 0.03436713665723801, 0.020695386454463005, 0.017524488270282745, 0.010141805745661259, 0.003556826151907444, 0.0020958345849066973, 0.0058519174344837666, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4637373983860016, 0.04377487301826477, 0.15646661818027496, 0.36986854672431946, 0.09056738018989563, 0.23626187443733215, 0.11398540437221527, 0.0026716177817434072, 0.006399102043360472, 0.2626173198223114, 0.20860937237739563, 0.01349638868123293, 0.014208723790943623, 0.042171213775873184, 0.08208009600639343, 0.05386974662542343, 0.6086578965187073, 0.22683310508728027, 0.5828835964202881, 0.02668178826570511, 0.03663201630115509, 0.14977867901325226, 0.2173178791999817, 0.2744499444961548, 0.08338183909654617, 0.008825525641441345, 0.06588608771562576, 0.5592238306999207, 0.17532478272914886, 0.006846817210316658, 0.028904464095830917, 0.01721598580479622, 0.006393561605364084, 0.010461881756782532, NaN, NaN, NaN, NaN, NaN, NaN], [0.13806220889091492, 0.04062362387776375, 0.09515099227428436, 0.37904345989227295, 0.10653041303157806, 0.052835192531347275, 0.5728973150253296, 0.03487204387784004, 0.0029783223289996386, 0.07966885715723038, 0.03475099802017212, 0.13843636214733124, 0.006917618680745363, 0.06183210015296936, 0.1688811033964157, 0.24167264997959137, 0.2504684031009674, 0.15247754752635956, 0.4417489171028137, 0.37691444158554077, 0.47509273886680603, 0.6227271556854248, 0.6949021220207214, 0.5199849605560303, 0.14203055202960968, 0.006932773161679506, 0.02713918127119541, 0.026524275541305542, 0.28478434681892395, 0.05304509028792381, 0.03063105419278145, 0.007391192018985748, 0.001299944007769227, 0.0022179351653903723, 0.0017378581687808037, NaN, NaN, NaN, NaN, NaN], [0.02612869068980217, 0.003477374091744423, 0.007765303365886211, 0.0023155075032263994, 0.018893033266067505, 0.022398637607693672, 0.09549611806869507, 0.004012360703200102, 0.0013466936070472002, 0.0021441734861582518, 0.0004924506065435708, 0.006835760548710823, 0.011635211296379566, 0.023846328258514404, 0.22376547753810883, 0.3587647080421448, 0.13152657449245453, 0.3170546591281891, 0.1872878074645996, 0.17338471114635468, 0.16099165380001068, 0.050314128398895264, 0.07316549867391586, 0.1506616473197937, 0.027928102761507034, 0.013985591009259224, 0.03077181987464428, 0.00928373821079731, 0.01458327379077673, 0.34401679039001465, 0.1675042062997818, 0.008024912327528, 0.00340651860460639, 0.001158604514785111, 0.0004595925274770707, 0.0022153020836412907, NaN, NaN, NaN, NaN], [0.08347997069358826, 0.014491320587694645, 0.015744350850582123, 0.0043899440206587315, 0.05038629099726677, 0.008546282537281513, 0.06458569318056107, 0.03869106248021126, 0.0615551732480526, 0.0002168803766835481, 0.0014501431724056602, 0.00013847390073351562, 1.5032101146061905e-05, 0.0007368824444711208, 0.13783538341522217, 0.18021628260612488, 0.21554027497768402, 0.22428971529006958, 0.28362634778022766, 0.0019759181886911392, 0.19364571571350098, 0.3129161596298218, 0.05571373924612999, 0.43670228123664856, 0.5364305973052979, 0.045233964920043945, 0.02291695959866047, 0.15668357908725739, 0.03788933902978897, 0.0009749932214617729, 0.15011590719223022, 0.009233620017766953, 0.023490505293011665, 0.0018092861864715815, 0.01433361042290926, 0.002351803006604314, 0.00025271173217333853, NaN, NaN, NaN], [0.072405144572258, 0.036094967275857925, 0.060353852808475494, 0.1382489949464798, 0.03810955956578255, 0.1803218573331833, 0.3716851472854614, 0.04992733895778656, 0.002898369450122118, 0.0008571037324145436, 0.00035707451752386987, 0.02692999318242073, 0.003073085332289338, 0.009645520709455013, 0.17640869319438934, 0.18984580039978027, 0.30305740237236023, 0.22004783153533936, 0.5488721132278442, 0.023633448407053947, 0.10360189527273178, 0.8517335653305054, 0.6748489141464233, 0.77315753698349, 0.4876308739185333, 0.2048063576221466, 0.14540305733680725, 0.08473058044910431, 0.012403973378241062, 0.06795734912157059, 0.17164894938468933, 0.18992502987384796, 0.12247806042432785, 0.011528578586876392, 0.009636401198804379, 0.0008312705904245377, 0.013430905528366566, 0.011612125672399998, NaN, NaN], [0.30767515301704407, 0.17313888669013977, 0.17682777345180511, 0.3453424274921417, 0.2732711434364319, 0.18888972699642181, 0.2821650207042694, 0.011036374606192112, 0.013345124199986458, 0.030917862430214882, 0.037141598761081696, 0.14430613815784454, 0.09504004567861557, 0.16429893672466278, 0.0962204858660698, 0.3384567201137543, 0.062264904379844666, 0.014819102361798286, 0.14853152632713318, 0.0019540644716471434, 0.003596463706344366, 0.001872691442258656, 0.11878995597362518, 0.02639206312596798, 0.009769541211426258, 0.011811794713139534, 0.006684192456305027, 0.045877717435359955, 0.019279729574918747, 0.005480214022099972, 0.003932234365493059, 0.006437724456191063, 0.0240105502307415, 0.0011211916571483016, 0.004233745392411947, 0.001469226786866784, 0.0013713098596781492, 0.00014342667418532073, 0.0008160521974787116, NaN], [0.038221023976802826, 0.4632723033428192, 0.022520000115036964, 0.005303966347128153, 0.07163825631141663, 0.030774233862757683, 0.006099082063883543, 0.008936556056141853, 0.02098681591451168, 0.004558844491839409, 0.0029896388296037912, 0.018592750653624535, 0.20478543639183044, 0.08578886091709137, 0.1358346790075302, 0.1837155818939209, 0.5941455364227295, 0.2251758873462677, 0.3662757873535156, 0.039659783244132996, 0.3226933479309082, 0.014135366305708885, 0.028798755258321762, 0.10863638669252396, 0.34925851225852966, 0.03930900990962982, 0.08864527195692062, 0.10118203610181808, 0.05801505595445633, 0.11320658773183823, 0.05595846846699715, 0.0026757779996842146, 0.007132661063224077, 0.010286321863532066, 0.015962811186909676, 0.004528969060629606, 0.01888921484351158, 0.004036444239318371, 0.00027040645363740623, 0.0002387895801803097]], [[0.278582364320755, 0.012074317783117294, 0.4035726487636566, 0.05818924307823181, 0.5308449864387512, 0.7759386301040649, 0.6032847166061401, 0.04120228812098503, 0.6623223423957825, 0.4034832715988159, 0.2541539669036865, 0.023309720680117607, 0.054716046899557114, 0.3570294678211212, 0.004749305546283722, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03977029398083687, 0.025161603465676308, 0.4579423666000366, 0.3708552420139313, 0.767479419708252, 0.5835962295532227, 0.5609359741210938, 0.14304085075855255, 0.8166816234588623, 0.848468542098999, 0.5771627426147461, 0.07112090289592743, 0.12416274100542068, 0.618628740310669, 0.06885465234518051, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.004083612468093634, 0.0006101519684307277, 0.12011494487524033, 0.04229450225830078, 0.17203551530838013, 0.013333754613995552, 0.01874622330069542, 0.021773431450128555, 0.8914079666137695, 0.25239333510398865, 0.2674473226070404, 0.0986163467168808, 0.10968483239412308, 0.05420238524675369, 0.020816486328840256, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00974054355174303, 0.009372939355671406, 0.016473596915602684, 0.12944141030311584, 0.06805374473333359, 0.019993484020233154, 0.038472987711429596, 0.21791628003120422, 0.8550615310668945, 0.2646826505661011, 0.7350810766220093, 0.17277619242668152, 0.36265626549720764, 0.3741258382797241, 0.06228891760110855, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0007183643756434321, 0.0016902177594602108, 0.0015671673463657498, 0.000663107552099973, 0.015286565758287907, 0.000776923552621156, 0.007700319401919842, 0.11482121050357819, 0.7658083438873291, 0.5443719625473022, 0.22170989215373993, 0.027013972401618958, 0.025342080742120743, 0.049981117248535156, 0.0074298488907516, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.011776593513786793, 0.00668947771191597, 0.05204532667994499, 0.026732588186860085, 0.007738037500530481, 0.19347773492336273, 0.08661007881164551, 0.02065080776810646, 0.8265263438224792, 0.77967369556427, 0.8155033588409424, 0.7568296194076538, 0.6889008283615112, 0.7797287106513977, 0.04647013917565346, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03701920434832573, 0.011276619508862495, 0.026248518377542496, 0.01771446317434311, 0.046063318848609924, 0.020064320415258408, 0.23005641996860504, 0.032302577048540115, 0.6365551948547363, 0.6746889352798462, 0.6497765183448792, 0.5260909199714661, 0.6955898404121399, 0.8770567178726196, 0.04424796253442764, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3583561182022095, 0.034818924963474274, 0.1010005921125412, 0.08171684294939041, 0.0902533084154129, 0.0273053590208292, 0.029195906594395638, 0.10516665875911713, 0.5163984894752502, 0.7107389569282532, 0.5390304327011108, 0.6552954316139221, 0.648922324180603, 0.8148984909057617, 0.13771982491016388, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04790134355425835, 0.016352321952581406, 0.004838719964027405, 0.039540428668260574, 0.004614146891981363, 0.10033231228590012, 0.05411757901310921, 0.012187371961772442, 0.25466611981391907, 0.4822390675544739, 0.22996564209461212, 0.2013523131608963, 0.3018202781677246, 0.325538694858551, 0.10763657093048096, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.18817435204982758, 0.007200991734862328, 0.0915139690041542, 0.00800582580268383, 0.007660675328224897, 0.27090781927108765, 0.08786749839782715, 0.014442713931202888, 0.017244037240743637, 0.8212726712226868, 0.22018176317214966, 0.05063365772366524, 0.16457810997962952, 0.059498634189367294, 0.11578860878944397, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1423795521259308, 0.008703344501554966, 0.2208349108695984, 0.02527845837175846, 0.027401143684983253, 0.09980836510658264, 0.024800043553113937, 0.009310302324593067, 0.11915526539087296, 0.048824433237314224, 0.23738479614257812, 0.04641610383987427, 0.11649724096059799, 0.03864651918411255, 0.200869619846344, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.19247660040855408, 0.028833042830228806, 0.1872357279062271, 0.03232081979513168, 0.031028537079691887, 0.3644941747188568, 0.11239293217658997, 0.0803447812795639, 0.13423573970794678, 0.07468846440315247, 0.009079186245799065, 0.19545331597328186, 0.09625646471977234, 0.07526607811450958, 0.1802312582731247, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1263553649187088, 0.009648445062339306, 0.47829046845436096, 0.22347994148731232, 0.2749265432357788, 0.23197446763515472, 0.05249631777405739, 0.01617230661213398, 0.3326357305049896, 0.1497221142053604, 0.04782721772789955, 0.011572148650884628, 0.1354474574327469, 0.0791783407330513, 0.15636207163333893, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.166306734085083, 0.04561271890997887, 0.48400574922561646, 0.31743937730789185, 0.4171416163444519, 0.1806352734565735, 0.04328177124261856, 0.022486848756670952, 0.1779668778181076, 0.03957689553499222, 0.009708160534501076, 0.01422630064189434, 0.013467496261000633, 0.06257133930921555, 0.22838094830513, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.39438390731811523, 0.20185884833335876, 0.19486168026924133, 0.053202297538518906, 0.29429352283477783, 0.31667405366897583, 0.3313867747783661, 0.37864530086517334, 0.4971301257610321, 0.178373321890831, 0.16689708828926086, 0.16029801964759827, 0.22925321757793427, 0.22496484220027924, 0.11296840012073517, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04784957319498062, 0.004609245341271162, 0.006819143425673246, 0.0166594497859478, 0.006965316366404295, 0.000989345251582563, 0.006434451788663864, 0.005414100829511881, 0.027048002928495407, 0.008730669505894184, 0.003844247665256262, 0.0032386775128543377, 0.00916406698524952, 0.02474893629550934, 0.20862001180648804, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07474544644355774, 0.14463284611701965, 0.06348620355129242, 0.11649901419878006, 0.010943777859210968, 0.05790672451257706, 0.023460205644369125, 0.09132371097803116, 0.013804412446916103, 0.11923354864120483, 0.04609918221831322, 0.0031168698333203793, 0.02482042834162712, 0.018085025250911713, 0.06715727597475052, 0.12851747870445251, 0.06451001763343811, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07159372419118881, 0.23599489033222198, 0.6269188523292542, 0.2670744061470032, 0.07840307801961899, 0.7659233808517456, 0.4897821247577667, 0.7919513583183289, 0.47275444865226746, 0.20698092877864838, 0.5493778586387634, 0.516223669052124, 0.5164197683334351, 0.6560667753219604, 0.10535097867250443, 0.16148854792118073, 0.04709945246577263, 0.0016553826862946153, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.030506769195199013, 0.030577607452869415, 0.37364113330841064, 0.17907775938510895, 0.011576596647500992, 0.0018289608415216208, 0.0013806972419843078, 0.0006740305689163506, 0.006688407156616449, 0.02554805763065815, 0.1984224021434784, 0.0020999175030738115, 0.0001219362675328739, 0.0009508132934570312, 0.00851912796497345, 0.12575848400592804, 0.13552792370319366, 0.1085570901632309, 0.11512085795402527, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.6425503492355347, 0.21330313384532928, 0.8213226199150085, 0.6104346513748169, 0.4307103455066681, 0.005470798350870609, 0.1284545361995697, 0.017213305458426476, 0.14068865776062012, 0.2507726550102234, 0.6069697737693787, 0.17266355454921722, 0.10257546603679657, 0.4255537688732147, 0.07138645648956299, 0.14333586394786835, 0.24668441712856293, 0.19262480735778809, 0.13920731842517853, 0.0020065978169441223, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4833258390426636, 0.07765677571296692, 0.6261626482009888, 0.5845412611961365, 0.457427054643631, 0.012895571999251842, 0.037013884633779526, 0.0045295762829482555, 0.030468540266156197, 0.08583686500787735, 0.4300892949104309, 0.6064226627349854, 0.07339996099472046, 0.02218388393521309, 0.11548874527215958, 0.1578390896320343, 0.19358907639980316, 0.02251395769417286, 0.04702039062976837, 0.018520673736929893, 0.0005939522525295615, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.47047996520996094, 0.06838852912187576, 0.42273014783859253, 0.6319702863693237, 0.4177776277065277, 0.0021309976000338793, 0.00800495408475399, 0.0009326375438831747, 0.00536699453368783, 0.07440605759620667, 0.2710660994052887, 0.5013447999954224, 0.021646764129400253, 0.07749785482883453, 0.039263706654310226, 0.14088943600654602, 0.05360155552625656, 0.043673839420080185, 0.0087194312363863, 0.14876413345336914, 0.3311525881290436, 0.029076436534523964, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5323148965835571, 0.13256511092185974, 0.352451890707016, 0.6556484699249268, 0.4897412359714508, 0.22345507144927979, 0.17913641035556793, 0.12689323723316193, 0.025374194607138634, 0.169284388422966, 0.17072416841983795, 0.08815333992242813, 0.10821512341499329, 0.18704712390899658, 0.05398408696055412, 0.11886978894472122, 0.08032860606908798, 0.053777631372213364, 0.06359982490539551, 0.49348562955856323, 0.7690801620483398, 0.032007213681936264, 0.00921344943344593, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14081209897994995, 0.02785991132259369, 0.37397870421409607, 0.3742114305496216, 0.4757237732410431, 0.0011322007048875093, 0.0019287536852061749, 0.00011125820310553536, 0.00032575102522969246, 0.0042410544119775295, 0.007025705184787512, 0.007957610301673412, 0.0022035131696611643, 0.0008391661685891449, 0.0013405061326920986, 0.013988303020596504, 0.031309448182582855, 0.021422432735562325, 0.015959911048412323, 0.13852538168430328, 0.7482463121414185, 0.1306946873664856, 0.0026366086676716805, 0.006285007111728191, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17781563103199005, 0.10205524414777756, 0.04494810104370117, 0.011432765983045101, 0.0031803075689822435, 0.6873405575752258, 0.1935015618801117, 0.2538544535636902, 0.0006125010550022125, 0.0012519293231889606, 0.0009674279135651886, 0.0007319907890632749, 0.006560447160154581, 0.0005926102166995406, 0.045413821935653687, 0.02759428508579731, 0.1341203898191452, 0.1143924742937088, 0.04895513132214546, 0.2507959306240082, 0.47495928406715393, 0.24884849786758423, 0.04048554226756096, 0.06435439735651016, 0.02207104302942753, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24551935493946075, 0.010881111957132816, 0.16116493940353394, 0.28567203879356384, 0.017490731552243233, 0.03198051080107689, 0.25225502252578735, 0.04009091481566429, 0.1379493623971939, 0.030329206958413124, 0.00725751556456089, 0.0005535308737307787, 0.0001769027003319934, 0.0002177381538785994, 0.11288075149059296, 0.08376637101173401, 0.08644555509090424, 0.08414626121520996, 0.08246676623821259, 0.09393073618412018, 0.2536129355430603, 0.09570588916540146, 0.057335685938596725, 0.27625876665115356, 0.23640654981136322, 0.22554923593997955, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2663186192512512, 0.0841110497713089, 0.39283427596092224, 0.3631373345851898, 0.12446267902851105, 0.0023146900348365307, 0.05166012421250343, 0.025394057855010033, 0.09723125398159027, 0.2633029520511627, 0.09458169341087341, 0.0066002910025417805, 0.0024958536960184574, 0.0033851033076643944, 0.0521465502679348, 0.16592197120189667, 0.037314873188734055, 0.020350072532892227, 0.005164262373000383, 0.009123047813773155, 0.005826999898999929, 0.003451529424637556, 0.017567342147231102, 0.055315494537353516, 0.2317170798778534, 0.05933540314435959, 0.06010079011321068, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.032533496618270874, 0.005542360246181488, 0.14801643788814545, 0.028237437829375267, 0.09192534536123276, 0.002004631096497178, 0.0014868990983814, 0.0018816014053300023, 0.026168106123805046, 0.03666744753718376, 0.2621643543243408, 0.27366670966148376, 0.011460919864475727, 0.012693443335592747, 0.006134080700576305, 0.07053745537996292, 0.19491763412952423, 0.06705262511968613, 0.08265279233455658, 0.006405644118785858, 0.0031596925109624863, 0.005410268437117338, 0.030676638707518578, 0.08307406306266785, 0.20774710178375244, 0.4213918149471283, 0.23337899148464203, 0.08583765476942062, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.028670914471149445, 0.004855436272919178, 0.1069486141204834, 0.02764085866510868, 0.11977140605449677, 0.002686614403501153, 0.007388734724372625, 0.00704799173399806, 0.05677136406302452, 0.0688808336853981, 0.16234178841114044, 0.10548661649227142, 0.1935848444700241, 0.06036479026079178, 0.0025575226172804832, 0.13580749928951263, 0.17484943568706512, 0.09017936140298843, 0.11502011120319366, 0.015199831686913967, 0.008567527867853642, 0.04639086127281189, 0.16773870587348938, 0.16907723248004913, 0.43436557054519653, 0.2870768904685974, 0.10786425322294235, 0.08931463956832886, 0.011009148322045803, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04708265885710716, 0.030478408560156822, 0.0932990089058876, 0.24881142377853394, 0.1139858141541481, 0.03301549330353737, 0.12353643029928207, 0.18121947348117828, 0.3742617964744568, 0.11242274194955826, 0.2673158049583435, 0.05749531090259552, 0.00021243211813271046, 0.005648713558912277, 0.14063234627246857, 0.1727631837129593, 0.039101891219615936, 0.0065339612774550915, 0.0278339721262455, 0.004674504045397043, 0.014613990671932697, 0.03457005321979523, 0.04850766807794571, 0.02412491664290428, 0.009369020350277424, 0.022906647995114326, 0.04899173229932785, 0.01023520715534687, 0.0022774694953113794, 7.664388976991177e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0034641579259186983, 0.015587975271046162, 0.04098831117153168, 0.025328122079372406, 0.012870541773736477, 0.002695741830393672, 0.0012444279855117202, 0.005834754556417465, 0.005115050356835127, 0.10742342472076416, 0.29450723528862, 0.004624508786946535, 0.028462348505854607, 0.09151851385831833, 0.02349407598376274, 0.08213489502668381, 0.3905046880245209, 0.07204636186361313, 0.08312273025512695, 0.02625700645148754, 0.02937941811978817, 0.04131421819329262, 0.05289716273546219, 0.16493423283100128, 0.290347158908844, 0.47713640332221985, 0.44352003931999207, 0.11574649810791016, 0.0847686156630516, 0.047198787331581116, 0.1300322264432907, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00187075010035187, 0.017386021092534065, 0.0033179710153490305, 0.00216178921982646, 0.0006196821923367679, 0.0036519868299365044, 0.020315727218985558, 0.0735914558172226, 0.011879049241542816, 0.05418893322348595, 0.04255518689751625, 0.006776698864996433, 0.007105604745447636, 0.005562894977629185, 0.20312508940696716, 0.056048911064863205, 0.04177262261509895, 0.18134142458438873, 0.04556399583816528, 0.1435631662607193, 0.2900937497615814, 0.07549438625574112, 0.08105770498514175, 0.08377190679311752, 0.011481991037726402, 0.017289845272898674, 0.006863615941256285, 0.013694294728338718, 0.13657283782958984, 0.0735873132944107, 0.3659329116344452, 0.0919225886464119, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.018124327063560486, 0.011053304187953472, 0.041496749967336655, 0.08067373931407928, 0.008039752952754498, 0.27361106872558594, 0.12004023045301437, 0.14489491283893585, 0.05115145817399025, 0.09850911796092987, 0.102595254778862, 0.03553636744618416, 0.03690872713923454, 0.062350839376449585, 0.18180564045906067, 0.06230737641453743, 0.038521286100149155, 0.05914388969540596, 0.03398321941494942, 0.13657090067863464, 0.19265799224376678, 0.07424072921276093, 0.08660972863435745, 0.10718739032745361, 0.16533604264259338, 0.0767570361495018, 0.03204379230737686, 0.028188396245241165, 0.21943823993206024, 0.11997849494218826, 0.2698959410190582, 0.12308003753423691, 0.45223531126976013, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12148405611515045, 0.0812632218003273, 0.2165963500738144, 0.1931358426809311, 0.08697410672903061, 0.006551810074597597, 0.06685828417539597, 0.03445844352245331, 0.0957593098282814, 0.40685340762138367, 0.14669549465179443, 0.05295614153146744, 0.013317806646227837, 0.016840115189552307, 0.07654187083244324, 0.18667352199554443, 0.0350969135761261, 0.030425790697336197, 0.0065561928786337376, 0.028277983888983727, 0.010725672356784344, 0.005219776649028063, 0.03378060460090637, 0.04241056367754936, 0.18939200043678284, 0.06338198482990265, 0.08136797696352005, 0.004227515775710344, 0.024540461599826813, 0.057830944657325745, 0.038525767624378204, 0.0177453625947237, 0.06933332234621048, 0.08866386860609055, NaN, NaN, NaN, NaN, NaN, NaN], [0.00987213384360075, 0.006524993572384119, 0.026135168969631195, 0.011839349754154682, 0.033334147185087204, 0.0041054473258554935, 0.0015945311170071363, 0.0032734640408307314, 0.04142798110842705, 0.08157128095626831, 0.26105597615242004, 0.34578391909599304, 0.018666768446564674, 0.02866668626666069, 0.00917118415236473, 0.04736897721886635, 0.0950922816991806, 0.05233628675341606, 0.0639958381652832, 0.009022187441587448, 0.002768130972981453, 0.005348078906536102, 0.016458049416542053, 0.03350484371185303, 0.1584910899400711, 0.3849281072616577, 0.30566492676734924, 0.08282434195280075, 0.02534077689051628, 0.01897522434592247, 0.013481524772942066, 0.08136109262704849, 0.25969398021698, 0.2513872981071472, 0.07361149042844772, NaN, NaN, NaN, NaN, NaN], [0.024172252044081688, 0.01827125810086727, 0.0764245018362999, 0.024589890614151955, 0.045055974274873734, 0.08366040140390396, 0.049236495047807693, 0.16330885887145996, 0.05235174670815468, 0.18916647136211395, 0.2596777379512787, 0.12284716963768005, 0.3776375353336334, 0.3416304290294647, 0.00993264652788639, 0.15279658138751984, 0.09928575158119202, 0.0573631152510643, 0.10790141671895981, 0.026906443759799004, 0.012519991025328636, 0.06774256378412247, 0.1448669582605362, 0.07826853543519974, 0.4991803467273712, 0.34429702162742615, 0.12145370990037918, 0.10719165205955505, 0.008088642731308937, 0.007662023417651653, 0.013441860675811768, 0.13362208008766174, 0.34251537919044495, 0.10342243313789368, 0.07045409828424454, 0.010391364805400372, NaN, NaN, NaN, NaN], [0.03498423844575882, 0.015507807955145836, 0.05400218814611435, 0.2035217136144638, 0.06879755109548569, 0.01839861460030079, 0.1265679895877838, 0.19229170680046082, 0.28682830929756165, 0.19846217334270477, 0.19391797482967377, 0.03128731623291969, 0.00016305393364746124, 0.003939830232411623, 0.1374405473470688, 0.1865139603614807, 0.02971193566918373, 0.005512321833521128, 0.039164237678050995, 0.007472363766282797, 0.012969624251127243, 0.03476016968488693, 0.0836154893040657, 0.050758667290210724, 0.017821883782744408, 0.08676476776599884, 0.13045690953731537, 0.03245873004198074, 0.009119128808379173, 7.800521416356787e-05, 0.0006276130443438888, 0.0024839011020958424, 0.06682475656270981, 0.06347990781068802, 0.009879485704004765, 0.0017003080574795604, 6.444661266868934e-05, NaN, NaN, NaN], [0.013754391111433506, 0.07632532715797424, 0.05588589236140251, 0.060033075511455536, 0.015113652683794498, 0.024528013542294502, 0.0056539555080235004, 0.025407979264855385, 0.0030256062746047974, 0.3076882064342499, 0.2846599221229553, 0.01613902486860752, 0.07589408755302429, 0.25697121024131775, 0.08533195406198502, 0.029208103194832802, 0.15452517569065094, 0.02615012601017952, 0.034968301653862, 0.030517179518938065, 0.023491270840168, 0.02012590691447258, 0.01683984510600567, 0.047155413776636124, 0.1569623053073883, 0.34555378556251526, 0.29876279830932617, 0.06633269041776657, 0.090775266289711, 0.05117363482713699, 0.14964616298675537, 0.024973956868052483, 0.22028914093971252, 0.5953715443611145, 0.10930891335010529, 0.05826140195131302, 0.08348876982927322, 0.2024080604314804, NaN, NaN], [0.0015476603293791413, 0.017548631876707077, 0.0017550711054354906, 0.0017123925499618053, 0.0004861274501308799, 0.0013240363914519548, 0.007671059109270573, 0.03281305357813835, 0.0013763409806415439, 0.060824256390333176, 0.04298469424247742, 0.011416267603635788, 0.012759965844452381, 0.012971585616469383, 0.16966485977172852, 0.023966457694768906, 0.008770916610956192, 0.0534873865544796, 0.015555462799966335, 0.07408829033374786, 0.12750747799873352, 0.026930494233965874, 0.023400133475661278, 0.02665247581899166, 0.00316479685716331, 0.004739005118608475, 0.002742160577327013, 0.006070322822779417, 0.09564805775880814, 0.029174519702792168, 0.5144217014312744, 0.05911846086382866, 0.020064763724803925, 0.0023497287184000015, 0.004584830719977617, 0.10225256532430649, 0.05520752817392349, 0.4466201066970825, 0.09660884737968445, NaN], [0.005211545154452324, 0.0055291797034442425, 0.0040288688614964485, 0.011110500432550907, 0.002710954286158085, 0.0645279660820961, 0.01716793328523636, 0.025083528831601143, 0.010282285511493683, 0.009002536535263062, 0.0011292833369225264, 0.0045064822770655155, 0.007478337734937668, 0.004868943244218826, 0.13875910639762878, 0.18986307084560394, 0.036011889576911926, 0.08335232734680176, 0.12826237082481384, 0.08758756518363953, 0.027860891073942184, 0.10198243707418442, 0.0981309786438942, 0.17985263466835022, 0.11864234507083893, 0.08274368196725845, 0.1066904067993164, 0.051979877054691315, 0.06548189371824265, 0.03337343409657478, 0.0824524462223053, 0.012718076817691326, 0.0349668525159359, 0.03024965338408947, 0.01082769688218832, 0.0127665214240551, 0.014164488762617111, 0.01925024762749672, 0.0028478982858359814, 0.0007362329051829875]], [[0.12737327814102173, 0.10940374433994293, 0.05123003572225571, 0.7807462215423584, 0.0676276683807373, 0.02884089946746826, 0.05574861168861389, 0.5975708961486816, 0.07044392824172974, 0.5009010434150696, 0.31273892521858215, 0.07660850137472153, 0.29424503445625305, 0.028401609510183334, 0.07683643698692322, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03750006482005119, 0.429240882396698, 0.15060469508171082, 0.2604650557041168, 0.037177786231040955, 0.1944778561592102, 0.07849539071321487, 0.6716934442520142, 0.06105323135852814, 0.07711976766586304, 0.20997941493988037, 0.028168758377432823, 0.12550987303256989, 0.030995607376098633, 0.0958443135023117, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.15516091883182526, 0.07278051972389221, 0.11765316128730774, 0.7884857058525085, 0.11075033247470856, 0.051856692880392075, 0.18673725426197052, 0.2268398553133011, 0.013722711242735386, 0.6478350162506104, 0.5306386947631836, 0.3090885877609253, 0.22243055701255798, 0.16200464963912964, 0.13070979714393616, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.21811531484127045, 0.7140333652496338, 0.018219277262687683, 0.764274001121521, 0.15804116427898407, 0.03280843421816826, 0.11008237302303314, 0.09874711185693741, 0.0423860140144825, 0.5652360320091248, 0.14938808977603912, 0.2869919240474701, 0.39966318011283875, 0.1259765923023224, 0.0577625073492527, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.11744663864374161, 0.1893559694290161, 0.05823011323809624, 0.03701714053750038, 0.15626470744609833, 0.08588159829378128, 0.26269999146461487, 0.41053518652915955, 0.007210245821624994, 0.3749772906303406, 0.4537068009376526, 0.6417111158370972, 0.1666039228439331, 0.13084180653095245, 0.14052902162075043, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3613002598285675, 0.240200012922287, 0.044567547738552094, 0.04614294692873955, 0.0021214759908616543, 0.17616558074951172, 0.11286458373069763, 0.11203286051750183, 0.009014172479510307, 0.10163455456495285, 0.0949772298336029, 0.06209810823202133, 0.11910365521907806, 0.04125094786286354, 0.1871420443058014, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2914785146713257, 0.381010502576828, 0.08399549126625061, 0.4511452913284302, 0.048780620098114014, 0.008560722693800926, 0.1541443020105362, 0.12101723253726959, 0.02183164842426777, 0.18665823340415955, 0.13169258832931519, 0.13539372384548187, 0.14286382496356964, 0.031125182285904884, 0.2064482420682907, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3084108829498291, 0.4568510055541992, 0.068343386054039, 0.40243175625801086, 0.04035715013742447, 0.028490515425801277, 0.006473515648394823, 0.6036491990089417, 0.14769236743450165, 0.09462843090295792, 0.04651549458503723, 0.08334364742040634, 0.08459941297769547, 0.022403797134757042, 0.13448290526866913, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.4981050491333008, 0.13424238562583923, 0.16773013770580292, 0.5160816311836243, 0.029790958389639854, 0.22989192605018616, 0.568993866443634, 0.056374672800302505, 0.08792523294687271, 0.2900378406047821, 0.12431738525629044, 0.017185388132929802, 0.05061684548854828, 0.020683959126472473, 0.13275840878486633, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.33482691645622253, 0.4720645546913147, 0.20652346312999725, 0.6004944443702698, 0.1402488797903061, 0.13250590860843658, 0.13873517513275146, 0.5260767936706543, 0.01182119082659483, 0.1017654612660408, 0.047682080417871475, 0.04534589499235153, 0.10121697187423706, 0.0026118881069123745, 0.13006491959095, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.27261805534362793, 0.5674196481704712, 0.08154824376106262, 0.8736060261726379, 0.4724165201187134, 0.1720387041568756, 0.13692085444927216, 0.40960294008255005, 0.06138879805803299, 0.0898643285036087, 0.15986473858356476, 0.04882661625742912, 0.09858791530132294, 0.005254920106381178, 0.09166211634874344, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.33052578568458557, 0.40956470370292664, 0.44244009256362915, 0.8809638619422913, 0.26719745993614197, 0.38818857073783875, 0.40750059485435486, 0.4857279658317566, 0.04656125605106354, 0.08998580276966095, 0.02227160707116127, 0.42457664012908936, 0.06242617964744568, 0.019552020356059074, 0.08343644440174103, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.20678018033504486, 0.17620769143104553, 0.3081345558166504, 0.6112105250358582, 0.534289538860321, 0.19626931846141815, 0.17160479724407196, 0.4079393148422241, 0.027630727738142014, 0.07990976423025131, 0.0661839172244072, 0.022294294089078903, 0.11108729988336563, 0.024492109194397926, 0.12739884853363037, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2302674651145935, 0.4147239625453949, 0.3118293881416321, 0.3454154133796692, 0.20178626477718353, 0.3381562829017639, 0.1571493148803711, 0.4487079083919525, 0.02096635475754738, 0.11857040971517563, 0.09038619697093964, 0.01401298213750124, 0.06377796083688736, 0.029106009751558304, 0.10548537224531174, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0850413590669632, 0.2905830442905426, 0.047175440937280655, 0.009145522490143776, 0.014412813819944859, 0.03387918695807457, 0.04852135106921196, 0.2856408655643463, 0.03688584640622139, 0.02503933012485504, 0.030300520360469818, 0.020876996219158173, 0.004409631714224815, 0.0025441893376410007, 0.1292814165353775, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01263146661221981, 0.08983241021633148, 0.002674827352166176, 0.0008326905663125217, 0.0032944290433079004, 0.06790440529584885, 0.02327594719827175, 0.08626140654087067, 0.0010102109517902136, 0.0009567838278599083, 0.001915089669637382, 0.019144434481859207, 0.060631223022937775, 0.04236740246415138, 0.2042645514011383, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12322216480970383, 0.14532910287380219, 0.08289580047130585, 0.07800436019897461, 0.016899574548006058, 0.20651613175868988, 0.15389330685138702, 0.08048079907894135, 0.023754820227622986, 0.08939354121685028, 0.05408218502998352, 0.0083498889580369, 0.16772767901420593, 0.03971855714917183, 0.029394451528787613, 0.12774905562400818, 0.07772441953420639, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.002537816995754838, 0.0036866364534944296, 0.0026212686207145452, 0.0010326605988666415, 0.0028582154773175716, 0.0016078348271548748, 0.0024177017621695995, 0.004757970105856657, 0.007405414246022701, 0.0004943490494042635, 0.0008183143800124526, 0.0020540759433060884, 0.0008841927628964186, 0.0009274804615415633, 0.13894422352313995, 0.058547187596559525, 0.7868303656578064, 0.02677525207400322, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18076959252357483, 0.11159703880548477, 0.07333940267562866, 0.12368053197860718, 0.1442640721797943, 0.3224244713783264, 0.2286587655544281, 0.10576390475034714, 0.0873323604464531, 0.0707816481590271, 0.07077325880527496, 0.024980774149298668, 0.015894055366516113, 0.01236753724515438, 0.034113459289073944, 0.12958122789859772, 0.05996095389127731, 0.20109553635120392, 0.07473170012235641, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.008514223620295525, 0.006442691199481487, 0.003549255197867751, 0.00919315591454506, 0.0011393448803573847, 0.0005870977183803916, 0.02400296926498413, 0.03577389195561409, 0.006469632964581251, 0.004828252829611301, 0.0027150637470185757, 9.597353346180171e-05, 0.00011822552187368274, 0.000396552961319685, 0.1521017998456955, 0.11586850136518478, 0.18037959933280945, 0.354478657245636, 0.6275972127914429, 0.01217791810631752, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0016907083336263895, 9.336868970422074e-05, 0.0023900996893644333, 0.0018071996746584773, 0.001690928009338677, 0.0010278637055307627, 0.008010926656425, 0.0018918663263320923, 0.0009378245449624956, 0.0005185406771488488, 0.00012474792310968041, 0.00014544214354828, 2.7525844416231848e-05, 2.095987474604044e-05, 0.12926018238067627, 0.04329086095094681, 0.2822243273258209, 0.5110569596290588, 0.8230794668197632, 0.28263914585113525, 0.006951561663299799, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08279342949390411, 0.00717265997081995, 0.01113244891166687, 0.030300047248601913, 0.03227340802550316, 0.02679654024541378, 0.2711687386035919, 0.12656770646572113, 0.0010184150887653232, 0.0069296094588935375, 0.006689318455755711, 0.00307065830565989, 0.004024384077638388, 0.006041096989065409, 0.12722525000572205, 0.15041278302669525, 0.01652364432811737, 0.09004879742860794, 0.1228649914264679, 0.03705046698451042, 0.03279988467693329, 0.012472960166633129, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09468965977430344, 0.010531323030591011, 0.1253902167081833, 0.09483902901411057, 0.060478318482637405, 0.1959676593542099, 0.5850688219070435, 0.11734473705291748, 0.08924026787281036, 0.031869061291217804, 0.04437774419784546, 0.004531644284725189, 0.19630968570709229, 0.04580901935696602, 0.04253998026251793, 0.005692727863788605, 0.004583822097629309, 0.011303454637527466, 0.06351188570261002, 0.07110948860645294, 0.03377191722393036, 0.8937738537788391, 0.1077374666929245, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03443194553256035, 0.006786322686821222, 0.08545193076133728, 0.2555176913738251, 0.16119416058063507, 0.3760574460029602, 0.3180745542049408, 0.0858285129070282, 0.0052651395089924335, 0.035345133394002914, 0.0046972003765404224, 0.00805696938186884, 0.0738091915845871, 0.004572577308863401, 0.028640231117606163, 0.1957636922597885, 0.00532554043456912, 0.2672942280769348, 0.07843183726072311, 0.01169322058558464, 0.006695515010505915, 0.022856300696730614, 0.03495524823665619, 0.2056257426738739, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.26599034667015076, 0.06405031681060791, 0.39913085103034973, 0.7390084862709045, 0.8533709049224854, 0.0830850899219513, 0.22198519110679626, 0.15359464287757874, 0.0286090150475502, 0.1338224709033966, 0.06985709816217422, 0.03841168060898781, 0.1308237761259079, 0.01580808497965336, 0.010780439712107182, 0.21948350965976715, 0.003219911362975836, 0.13064762949943542, 0.017335020005702972, 0.004487968049943447, 0.006097455509006977, 0.0023269150406122208, 0.014221499674022198, 0.1740167737007141, 0.05570632219314575, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16064751148223877, 0.5348425507545471, 0.09399141371250153, 0.3709404170513153, 0.3757614493370056, 0.2272261530160904, 0.2699662148952484, 0.46868544816970825, 0.09081633388996124, 0.07856583595275879, 0.054298948496580124, 0.10659310221672058, 0.05178465321660042, 0.012835889123380184, 0.19243957102298737, 0.027252521365880966, 0.05625513195991516, 0.024279700592160225, 0.009296371601521969, 0.04113621264696121, 0.04445572942495346, 0.05016031116247177, 0.300394743680954, 0.219209223985672, 0.5284181833267212, 0.13528388738632202, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.33067551255226135, 0.40668511390686035, 0.03748138248920441, 0.16017457842826843, 0.02931954525411129, 0.1285390406847, 0.43687552213668823, 0.6227295398712158, 0.016583241522312164, 0.054699335247278214, 0.43602558970451355, 0.028376825153827667, 0.1860552728176117, 0.202489972114563, 0.03443598374724388, 0.16918426752090454, 0.005196947604417801, 0.010393726639449596, 0.0008839815272949636, 0.18853645026683807, 0.23955073952674866, 0.03703731670975685, 0.018581384792923927, 0.07692746073007584, 0.05213537812232971, 0.05520249530673027, 0.03837481513619423, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.025147954002022743, 0.023277895525097847, 0.036982107907533646, 0.030706623569130898, 0.00253032217733562, 0.08060919493436813, 0.062497250735759735, 0.22720953822135925, 0.015824737027287483, 0.020865583792328835, 0.051981136202812195, 0.016274577006697655, 0.3496847152709961, 0.19709302484989166, 0.00854758732020855, 0.21910618245601654, 0.012340836226940155, 0.011061819270253181, 0.004421355202794075, 0.01345156505703926, 0.015948239713907242, 0.001919197733514011, 0.0006712953327223659, 0.0014401280786842108, 0.0009498890140093863, 0.0011606297921389341, 0.0013843519845977426, 0.005138876382261515, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0009813109645619988, 0.0007951235747896135, 0.007896890863776207, 0.006039812229573727, 0.001424357295036316, 0.003153599100187421, 0.0010362794855609536, 0.006138501223176718, 0.00410880520939827, 0.003359388094395399, 0.008728301152586937, 0.0021525975316762924, 0.2318088710308075, 0.017491629347205162, 0.0005464124260470271, 0.12592341005802155, 0.022789308801293373, 0.01544136367738247, 0.05098855495452881, 0.006733328104019165, 0.0011512627825140953, 0.0067494111135602, 0.03519098460674286, 0.08756479620933533, 0.04847756400704384, 0.13774195313453674, 0.07365753501653671, 0.19525301456451416, 0.019442297518253326, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.008814784698188305, 0.009578033350408077, 0.008741176687180996, 0.002597709419205785, 0.0019302073633298278, 0.02750723622739315, 0.010486552491784096, 0.061721935868263245, 0.05738110467791557, 0.0038812088314443827, 0.08735688030719757, 0.00500333309173584, 3.085857315454632e-05, 0.005531619768589735, 0.14116442203521729, 0.04374772310256958, 0.10635814815759659, 0.1203576922416687, 0.4972172677516937, 0.09716533124446869, 0.05867829546332359, 0.13453392684459686, 0.39353471994400024, 0.6331138610839844, 0.33491814136505127, 0.5983138680458069, 0.3633559048175812, 0.6357010006904602, 0.7792285084724426, 0.005659972317516804, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.015857994556427002, 0.010374038480222225, 0.002225207630544901, 0.002974742790684104, 0.0010843537747859955, 0.007387869525700808, 0.006818806286901236, 0.0318806953728199, 0.1651621013879776, 0.21757511794567108, 0.2911650240421295, 0.08204617351293564, 0.016449127346277237, 0.10985822230577469, 0.0020742996130138636, 0.05199728533625603, 0.014302223920822144, 0.13574257493019104, 0.05407930538058281, 0.010633953846991062, 0.007459194865077734, 0.0004102779785171151, 0.01107444055378437, 0.16451390087604523, 0.19313758611679077, 0.018386593088507652, 0.03492085263133049, 0.1390746384859085, 0.6526300311088562, 0.08304706960916519, 0.27643677592277527, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01972219906747341, 0.20374125242233276, 0.0031293979845941067, 0.004390338435769081, 0.031924858689308167, 0.06048818305134773, 0.0774247944355011, 0.7845978140830994, 0.15838612616062164, 0.06142642721533775, 0.0820784792304039, 0.20785683393478394, 0.46646884083747864, 0.42270010709762573, 0.053927596658468246, 0.0008206118363887072, 0.0011099595576524734, 0.0005428412696346641, 0.0013029578840360045, 0.0009422241128049791, 0.001036918954923749, 0.00015340711979661137, 0.003300317795947194, 0.0019372785463929176, 0.003245894331485033, 0.0010756017873063684, 0.0009867959888651967, 0.04242069274187088, 0.25679609179496765, 0.03714281693100929, 0.46563825011253357, 0.052469443529844284, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.026567673310637474, 0.2768426239490509, 0.016553064808249474, 0.07253812253475189, 0.029352964833378792, 0.034967049956321716, 0.09283487498760223, 0.5970632433891296, 0.02342795394361019, 0.04057195410132408, 0.06215028092265129, 0.2966896891593933, 0.4489157795906067, 0.24187524616718292, 0.048112284392118454, 0.0011551693314686418, 0.0015016108518466353, 0.00018865184392780066, 0.0004620797117240727, 0.001353209256194532, 0.001276124152354896, 0.001269699539989233, 0.02504812367260456, 0.016660472378134727, 0.007664685603231192, 0.000621759332716465, 0.0039494638331234455, 0.05373308062553406, 0.5797222256660461, 0.04267296567559242, 0.3308492600917816, 0.22605444490909576, 0.03655111417174339, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14453455805778503, 0.4129781723022461, 0.021322425454854965, 0.11776001751422882, 0.008680691011250019, 0.12525556981563568, 0.1459336131811142, 0.4943058490753174, 0.041365865617990494, 0.06633096933364868, 0.48416346311569214, 0.027247071266174316, 0.10342812538146973, 0.15874288976192474, 0.04535134881734848, 0.18345873057842255, 0.006115049123764038, 0.007153322920203209, 0.00125643250066787, 0.15791349112987518, 0.17755654454231262, 0.06167090684175491, 0.028255566954612732, 0.04990806803107262, 0.014394938945770264, 0.013118196278810501, 0.02539716847240925, 0.00894339382648468, 0.04024626687169075, 0.05642623454332352, 0.04561464861035347, 0.029457826167345047, 0.09210912138223648, 0.1002524197101593, NaN, NaN, NaN, NaN, NaN, NaN], [0.03164434805512428, 0.10487183183431625, 0.019769076257944107, 0.0709872916340828, 0.0046073514968156815, 0.12636253237724304, 0.06114564463496208, 0.5786424875259399, 0.17960773408412933, 0.15923625230789185, 0.14680741727352142, 0.04373620077967644, 0.20528176426887512, 0.14476445317268372, 0.03252548724412918, 0.2828649580478668, 0.011994204483926296, 0.006339475512504578, 0.0030444697476923466, 0.006948052905499935, 0.008767204359173775, 0.0014567734906449914, 0.00018795454525388777, 0.00020330831466708332, 0.0001539710647193715, 0.0004007722018286586, 0.0012242270167917013, 0.001961026806384325, 0.0007920600473880768, 0.002005743095651269, 0.00011892847396666184, 0.00023868663993198425, 0.0018499011639505625, 0.002196513582020998, 0.004604275804013014, NaN, NaN, NaN, NaN, NaN], [0.03216148540377617, 0.04786192253232002, 0.0904572606086731, 0.284318745136261, 0.04915444552898407, 0.20336958765983582, 0.019341057166457176, 0.31598398089408875, 0.503376841545105, 0.2976534068584442, 0.3550446927547455, 0.318871408700943, 0.31741514801979065, 0.09137054532766342, 0.022498751059174538, 0.128562331199646, 0.014782274141907692, 0.007007280830293894, 0.02549830637872219, 0.0029198189731687307, 0.0006880113505758345, 0.0037798655685037374, 0.009390356950461864, 0.008127862587571144, 0.00817851535975933, 0.024966517463326454, 0.0308842696249485, 0.07813727855682373, 0.003280356992036104, 0.001509596244432032, 0.010023933835327625, 0.08412036299705505, 0.1339937299489975, 0.13076454401016235, 0.2572615444660187, 0.02603374607861042, NaN, NaN, NaN, NaN], [0.00784912146627903, 0.004314524121582508, 0.007757026236504316, 0.004281783476471901, 0.001910648075863719, 0.00898022297769785, 0.007197065278887749, 0.05121663585305214, 0.12398385256528854, 0.006457128562033176, 0.09335841238498688, 0.0023844544775784016, 1.3785818737233058e-05, 0.0021891386713832617, 0.13778245449066162, 0.018602287396788597, 0.034721970558166504, 0.034974802285432816, 0.21532808244228363, 0.037075310945510864, 0.013384592719376087, 0.039282385259866714, 0.11046459525823593, 0.17542847990989685, 0.05914776027202606, 0.1884417086839676, 0.12911023199558258, 0.24417443573474884, 0.327198326587677, 0.0006843891460448503, 0.1527024656534195, 0.4776603579521179, 0.37270504236221313, 0.4335513412952423, 0.6841917634010315, 0.8031085133552551, 0.004920803010463715, NaN, NaN, NaN], [0.0865921899676323, 0.029389984905719757, 0.007211814168840647, 0.022628001868724823, 0.003064699238166213, 0.026838112622499466, 0.02777392417192459, 0.17195671796798706, 0.5349084734916687, 0.37311822175979614, 0.5073185563087463, 0.12468769401311874, 0.014684900641441345, 0.11363118886947632, 0.01852630451321602, 0.05855157971382141, 0.021276630461215973, 0.13662834465503693, 0.05244326964020729, 0.015041220933198929, 0.007642571348696947, 0.00036013865610584617, 0.004098850768059492, 0.033856965601444244, 0.05778159946203232, 0.005442364141345024, 0.017580043524503708, 0.04633626714348793, 0.3112163841724396, 0.03644357994198799, 0.0868009626865387, 0.020123973488807678, 0.03773906081914902, 0.06257405877113342, 0.2619801461696625, 0.7497928738594055, 0.19582624733448029, 0.4370352327823639, NaN, NaN], [0.021940317004919052, 0.17988227307796478, 0.0027716639451682568, 0.0058884406462311745, 0.02112143486738205, 0.056551095098257065, 0.09669405966997147, 0.8433947563171387, 0.1836535632610321, 0.048101164400577545, 0.0939687192440033, 0.12228170782327652, 0.5153423547744751, 0.4533718526363373, 0.10564926266670227, 0.0006882869056425989, 0.0005033394554629922, 0.00030677669565193355, 0.001028614118695259, 0.00036578672006726265, 0.0005035633221268654, 5.2447539928834885e-05, 0.0006442382582463324, 0.0003597578906919807, 0.0002600657753646374, 8.536354289390147e-05, 0.00018848010222427547, 0.00940172839909792, 0.03475101292133331, 0.004768407437950373, 0.09523987770080566, 0.0036924693267792463, 0.0034024319611489773, 0.001987446565181017, 0.06484154611825943, 0.36614781618118286, 0.06470755487680435, 0.48020803928375244, 0.12385622411966324, NaN], [0.07970402389764786, 0.263812392950058, 0.027112353593111038, 0.06228066235780716, 0.03007029928267002, 0.5465735197067261, 0.2176109254360199, 0.5667538046836853, 0.10334119945764542, 0.3484029769897461, 0.1586397886276245, 0.28290486335754395, 0.07807470858097076, 0.405972421169281, 0.12247955799102783, 0.13044977188110352, 0.023216107860207558, 0.019304566085338593, 0.018173998221755028, 0.12614674866199493, 0.04656239226460457, 0.015089727938175201, 0.04114385321736336, 0.018700774759054184, 0.020505733788013458, 0.009310846216976643, 0.02222343534231186, 0.22412429749965668, 0.3900958001613617, 0.1100122332572937, 0.14125461876392365, 0.09716113656759262, 0.14588865637779236, 0.12185929715633392, 0.5472521185874939, 0.7197717428207397, 0.31834876537323, 0.37092098593711853, 0.2838878929615021, 0.0011011400492861867]]], [[[0.00039591442327946424, 4.3682277464540675e-05, 1.7448855942348018e-05, 4.859234650211874e-06, 1.1413659422032651e-06, 1.0625568393152207e-05, 1.9137923246148603e-08, 5.615326585939329e-07, 5.487099315359956e-06, 2.1910665282121045e-07, 2.532970881929941e-07, 7.501878940274764e-07, 1.657212578720646e-06, 1.0862070212169783e-06, 0.18717002868652344, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.6005652546882629, 0.09179380536079407, 0.017407523468136787, 0.009556752629578114, 0.001977206440642476, 0.02417689561843872, 0.001285116421058774, 0.0015866898465901613, 0.0007265046588145196, 0.0008927723974920809, 0.008914382196962833, 0.0016361800953745842, 0.1313493698835373, 0.006872364319860935, 0.052507203072309494, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00456381356343627, 0.8302816152572632, 0.11558636277914047, 0.010320104658603668, 0.00024428890901617706, 9.749805758474395e-05, 7.678471774852369e-06, 0.0030259541235864162, 3.9539358112961054e-05, 7.781033491482958e-05, 0.0003711417084559798, 9.1652873379644e-06, 0.0006458949064835906, 0.00023330377007368952, 0.00865631178021431, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0011992683866992593, 0.008629350923001766, 0.6251504421234131, 0.015135818161070347, 0.001978840446099639, 0.000745285302400589, 5.708653407054953e-05, 0.00043479635496623814, 0.0005481417756527662, 0.0016355890547856688, 0.0002436988870613277, 5.164237336430233e-06, 4.976044510840438e-05, 3.400173591217026e-05, 0.00024351823958568275, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.006698334589600563, 0.006304558366537094, 0.34660738706588745, 0.7217360138893127, 0.06864907592535019, 0.0027605369687080383, 0.0006927561480551958, 0.00010832686530193314, 0.0002978279662784189, 0.007849807851016521, 0.0023863124661147594, 8.873132173903286e-06, 2.0952818886144087e-05, 4.62439584225649e-06, 0.000559441396035254, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0006861803703941405, 0.036174044013023376, 0.4128260612487793, 0.09897080808877945, 0.6376775503158569, 0.19431157410144806, 0.0007082957308739424, 0.05852581560611725, 0.0003548018867149949, 0.00026609119959175587, 0.0006576658925041556, 0.0007862210040912032, 0.027955245226621628, 0.006076914723962545, 0.0010327105410397053, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.7293352305713938e-09, 1.4693102912133327e-06, 3.0192679332685657e-05, 1.0152590220968705e-05, 0.005660888738930225, 0.5108420252799988, 0.0005426039570011199, 0.0008102089632302523, 3.168102921335958e-06, 6.12798771726375e-08, 2.5310575324510864e-07, 5.088519174023531e-06, 0.00021843344438821077, 2.5946601454052143e-06, 2.594279294498847e-06, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [7.755387923680246e-05, 3.5259185096947476e-05, 0.0012139425380155444, 0.00035162578569725156, 0.00505053298547864, 0.4696201980113983, 0.5859625339508057, 0.009771172888576984, 0.0005853781476616859, 3.0261137453635456e-06, 1.2206013707327656e-05, 2.2465645088232122e-05, 0.013555033132433891, 0.0011026648571714759, 7.656160596525297e-05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [3.390625025190275e-08, 5.7732322602532804e-05, 3.19563605444273e-06, 2.0829493507790175e-07, 5.039521965954918e-06, 0.00017657184798736125, 0.000729007413610816, 0.8331114649772644, 0.0037640428636223078, 1.5948112377373036e-06, 5.8014775277115405e-06, 4.528372699041938e-07, 0.00020723954366985708, 0.00025866259238682687, 1.95706252270611e-06, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.7739795882553153e-07, 2.501485141692683e-05, 4.778147285833256e-06, 3.7190903867667657e-07, 9.610201523457818e-09, 1.1292572708043735e-06, 1.2355405942798825e-07, 3.984562499681488e-05, 0.6202287077903748, 0.0002610959345474839, 0.00017016819037962705, 9.242457963409834e-07, 2.799387630147976e-06, 3.2760857493485673e-07, 1.038134087139042e-06, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.2775580216839444e-05, 0.0010497755138203502, 6.564326031366363e-05, 4.172011358605232e-06, 4.676745959386608e-07, 3.6489967669695034e-07, 8.09820832614605e-08, 5.78842673348845e-06, 0.0015375507064163685, 0.7445451617240906, 0.026254041120409966, 8.213486580643803e-05, 1.1159563655382954e-05, 3.0355058697750792e-05, 2.6809220798895694e-06, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.3068409316474572e-05, 0.00010775982809718698, 0.00024633039720356464, 3.3576598070794716e-05, 4.556980275083333e-05, 1.0597023702985098e-07, 9.86238859468358e-08, 2.1072135041322326e-06, 0.0013669389300048351, 0.5916010141372681, 0.4436832368373871, 0.0013138806680217385, 4.73510908705066e-06, 6.116700660641072e-06, 2.961193558803643e-06, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [4.950460061081685e-05, 0.0011237917933613062, 0.017257435247302055, 0.0011414129985496402, 0.025087760761380196, 0.00036485170130617917, 3.213326635886915e-05, 5.293267349770758e-06, 4.4593522034119815e-05, 0.001686945091933012, 0.00823597889393568, 0.8047888278961182, 0.014818375930190086, 0.006413417402654886, 2.281446177221369e-05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.000998240546323359, 0.1768636256456375, 0.0663335844874382, 0.02716292440891266, 0.03197554498910904, 0.001621886040084064, 0.00012482069723773748, 7.020989141892642e-05, 0.08078382909297943, 0.1701173484325409, 0.08303841948509216, 0.5506232380867004, 0.06293172389268875, 0.03332124650478363, 0.0033543158788233995, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.021357281133532524, 0.0013016555458307266, 0.00422634556889534, 0.00104909623041749, 0.012563652358949184, 0.07401228696107864, 0.007866809144616127, 0.0024991247337311506, 0.0011657974682748318, 5.4276370065053925e-06, 0.0024851916823536158, 0.0298884529620409, 0.4522511959075928, 0.2182934284210205, 0.14462554454803467, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.02659090794622898, 0.049626123160123825, 0.04500019550323486, 0.012677792459726334, 0.33557751774787903, 0.02776678465306759, 0.02675992250442505, 0.09967876970767975, 0.04216820374131203, 0.009756066836416721, 0.0133897690102458, 0.12886802852153778, 0.03152704983949661, 0.046163998544216156, 0.21004843711853027, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05978302285075188, 0.18161648511886597, 0.038620203733444214, 0.022025080397725105, 0.09790226072072983, 0.04398013651371002, 0.00788698997348547, 0.04135579988360405, 0.0068543110974133015, 0.03809167072176933, 0.03150040656328201, 0.0462106354534626, 0.024762138724327087, 0.011792140081524849, 0.015839271247386932, 0.16810710728168488, 0.017288343980908394, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.005166883580386639, 0.0005590450600720942, 0.007114546839147806, 0.0015656572068110108, 0.02179996483027935, 0.0010864944197237492, 0.0051814797334373, 0.0011148365447297692, 0.00816393457353115, 0.0019027285743504763, 0.005033016670495272, 0.010743028484284878, 0.0006906923954375088, 0.0011143455049023032, 0.16189540922641754, 0.12647151947021484, 0.25301796197891235, 0.03169602155685425, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17136499285697937, 0.002046054694801569, 0.4725193679332733, 0.24347566068172455, 0.1026763990521431, 0.00369152519851923, 0.013768541626632214, 0.003912978805601597, 0.022358577698469162, 0.06323882192373276, 0.28539538383483887, 0.009778834879398346, 0.0043070269748568535, 0.020384330302476883, 0.006856778170913458, 0.15976493060588837, 0.03159531578421593, 0.05609510838985443, 0.007400199305266142, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18433871865272522, 0.013500750064849854, 0.42166435718536377, 0.1935500204563141, 0.3502363860607147, 0.0009389789775013924, 0.0472395233809948, 0.015336934477090836, 0.07204270362854004, 0.07276465743780136, 0.4023721218109131, 0.016390468925237656, 0.00493515282869339, 0.01088448241353035, 0.18081046640872955, 0.16021955013275146, 0.26433131098747253, 0.07329617440700531, 0.11257290840148926, 0.001577433431521058, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01929071731865406, 3.154709338559769e-05, 0.04895680397748947, 0.04499320685863495, 0.03726757690310478, 0.0012487026397138834, 0.06078735366463661, 0.0025376947596669197, 0.023622047156095505, 0.008605116978287697, 0.05601886287331581, 0.011475598439574242, 0.0013240767875686288, 0.009706309996545315, 0.13962702453136444, 0.22870834171772003, 0.043985288590192795, 0.04075293987989426, 0.0035545979626476765, 0.0075324228964746, 0.00014864112017676234, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.032548993825912476, 0.0047013829462230206, 0.08043498545885086, 0.08197268843650818, 0.43236956000328064, 0.013080407865345478, 0.006017346400767565, 0.05529334023594856, 0.01970849372446537, 0.004050384275615215, 0.0073967562057077885, 0.005829385481774807, 0.0008975209202617407, 0.0025361862499266863, 0.011671289801597595, 0.047688793390989304, 0.14664201438426971, 0.03658692538738251, 0.6408759355545044, 0.43873438239097595, 0.20478755235671997, 0.00511742290109396, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.046304989606142044, 0.026358718052506447, 0.20277923345565796, 0.3021180331707001, 0.6281617879867554, 0.19840610027313232, 0.12000668793916702, 0.21165543794631958, 0.0507807619869709, 0.10083203762769699, 0.17539183795452118, 0.08392243832349777, 0.036049142479896545, 0.06088141351938248, 0.024198466911911964, 0.07761336117982864, 0.07061085104942322, 0.041570939123630524, 0.1916733682155609, 0.159084752202034, 0.3477410674095154, 0.5968326330184937, 0.004175147507339716, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.016816509887576103, 0.003118144813925028, 0.035858120769262314, 0.02315649762749672, 0.2957051992416382, 0.0033856350928545, 0.008419573307037354, 0.013085800223052502, 0.0065522813238203526, 0.004261805210262537, 0.0022621729876846075, 0.0015856586396694183, 0.00012999074533581734, 0.00036330719012767076, 0.004947974346578121, 0.07191380113363266, 0.05497179180383682, 0.3517811894416809, 0.9035707116127014, 0.14233137667179108, 0.1767667979001999, 0.04289708659052849, 0.00892895832657814, 0.001834895578213036, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13966688513755798, 0.051315873861312866, 0.16794879734516144, 0.17204447090625763, 0.02530861273407936, 0.1971883773803711, 0.6035643219947815, 0.35590535402297974, 0.01904589682817459, 0.14328262209892273, 0.05827813595533371, 0.12283631414175034, 0.08582676202058792, 0.021607764065265656, 0.09174748510122299, 0.21536989510059357, 0.19956108927726746, 0.3517906069755554, 0.458966463804245, 0.09842110425233841, 0.08277469873428345, 0.03296331316232681, 0.04812879115343094, 0.009344152174890041, 0.006280441302806139, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07622234523296356, 0.021088531240820885, 0.13214311003684998, 0.1876712292432785, 0.09946685284376144, 0.0739995539188385, 0.16667790710926056, 0.06527374684810638, 0.2691768705844879, 0.1298666000366211, 0.20347969233989716, 0.28972044587135315, 0.16063560545444489, 0.23408198356628418, 0.02879655919969082, 0.24051256477832794, 0.10134825110435486, 0.04672827199101448, 0.021085558459162712, 0.02245912328362465, 0.026835136115550995, 0.005604758393019438, 0.028772464022040367, 0.01708872988820076, 0.008745603263378143, 0.02540087327361107, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04186922311782837, 0.028065834194421768, 0.2365874946117401, 0.22718128561973572, 0.717268168926239, 0.0283160749822855, 0.047574929893016815, 0.22635598480701447, 0.046485841274261475, 0.11764083057641983, 0.11684223264455795, 0.600357711315155, 0.07936308532953262, 0.1614740490913391, 0.02326863817870617, 0.18141932785511017, 0.024432087317109108, 0.0408032201230526, 0.004596539307385683, 0.0778040885925293, 0.025828123092651367, 0.04467899724841118, 0.0885351300239563, 0.026468785479664803, 0.030213410034775734, 0.16925157606601715, 0.003915028180927038, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.002160860225558281, 0.00041385856457054615, 0.0032894921023398638, 0.004175879992544651, 0.09230346977710724, 0.00037096597952768207, 0.00036027038004249334, 0.000777967507019639, 0.0010948613053187728, 0.006351495627313852, 0.00803811103105545, 0.2546491026878357, 0.005140772555023432, 0.0052158161997795105, 0.0018242541700601578, 0.0821177139878273, 0.0264634620398283, 0.01841210387647152, 0.010007970035076141, 0.006691556889563799, 0.0167625043541193, 0.0005595253896899521, 0.020632673054933548, 0.0021230748388916254, 0.10790054500102997, 0.5654488801956177, 0.3003200888633728, 0.01571945659816265, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01453752163797617, 0.0016249779146164656, 0.07837095856666565, 0.046283330768346786, 0.5220571756362915, 0.00571427633985877, 0.011274048127233982, 0.0005770810530520976, 0.06172677502036095, 0.028573052957654, 0.1375623345375061, 0.2926015257835388, 0.17741695046424866, 0.13592077791690826, 0.025488857179880142, 0.0726943239569664, 0.09770844131708145, 0.050709616392850876, 0.04594658315181732, 0.009083828888833523, 0.024983327835798264, 0.021837929263710976, 0.11926575750112534, 0.11382617056369781, 0.22249171137809753, 0.3826439678668976, 0.22458447515964508, 0.24531354010105133, 0.05176876112818718, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0018199050100520253, 1.759366932674311e-05, 0.005607981700450182, 0.029583722352981567, 0.009902501478791237, 0.00240499060600996, 0.016255119815468788, 0.008434450253844261, 0.0070381201803684235, 0.006882159970700741, 0.008103356696665287, 0.009371891617774963, 3.180988642270677e-05, 0.0005422193789854646, 0.14323127269744873, 0.28158777952194214, 0.045097555965185165, 0.02117414027452469, 0.05809389799833298, 0.0014524150174111128, 0.006964406464248896, 0.010582090355455875, 0.011965163983404636, 0.02265000529587269, 0.020484870299696922, 0.019729144871234894, 0.028731632977724075, 0.004907289054244757, 0.0051048253662884235, 0.00039794077747501433, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04913536086678505, 0.005111359525471926, 0.3943053185939789, 0.16504207253456116, 0.1333204060792923, 0.007373967207968235, 0.00649205781519413, 0.005781218875199556, 0.0696163922548294, 0.17078818380832672, 0.43588367104530334, 0.2441176176071167, 0.044073574244976044, 0.13962700963020325, 0.0038013174198567867, 0.18024474382400513, 0.03336771950125694, 0.025161737576127052, 0.03788529708981514, 0.010167604312300682, 0.0039537386037409306, 3.701886089402251e-05, 0.046124417334795, 0.08654022216796875, 0.06664562225341797, 0.11276466399431229, 0.09791301190853119, 0.08758807182312012, 0.277656227350235, 0.5478507876396179, 0.06896418333053589, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02972331829369068, 0.032405998557806015, 0.13676248490810394, 0.2985995411872864, 0.6838041543960571, 0.17950911819934845, 0.02566559985280037, 0.299430251121521, 0.06906868517398834, 0.09219349920749664, 0.14271143078804016, 0.15384355187416077, 0.31184810400009155, 0.37699857354164124, 0.11869719624519348, 0.10793236643075943, 0.04864804446697235, 0.0019557650666683912, 0.14817607402801514, 0.0378977507352829, 0.049347102642059326, 0.0036467635072767735, 0.0038541490212082863, 0.0034904496278613806, 0.0012115711579099298, 0.047197386622428894, 0.05697714909911156, 0.11328870058059692, 0.8784908056259155, 0.019691603258252144, 0.23420120775699615, 0.004765921737998724, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.035901740193367004, 0.049252428114414215, 0.13651704788208008, 0.3431343734264374, 0.4621880352497101, 0.07741573452949524, 0.035817742347717285, 0.1879495084285736, 0.09167803823947906, 0.15167558193206787, 0.20264029502868652, 0.22310277819633484, 0.27972275018692017, 0.27912822365760803, 0.1079779863357544, 0.1524984985589981, 0.08107080310583115, 0.005865868646651506, 0.00971321389079094, 0.007243088912218809, 0.011549782939255238, 0.00268083019182086, 0.03457775339484215, 0.0031127233523875475, 0.000510410696733743, 0.009807620197534561, 0.008875550702214241, 0.023541534319519997, 0.527433454990387, 0.015368063934147358, 0.16288210451602936, 0.20708848536014557, 0.014573587104678154, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03869367763400078, 0.07609386742115021, 0.09811960905790329, 0.19582945108413696, 0.7770717144012451, 0.05828123167157173, 0.03398818522691727, 0.4334997236728668, 0.06648975610733032, 0.07675088942050934, 0.06197739765048027, 0.7435874938964844, 0.14106591045856476, 0.2445826381444931, 0.04634908586740494, 0.16305263340473175, 0.020936982706189156, 0.020989498123526573, 0.007437185384333134, 0.034894589334726334, 0.016221558675169945, 0.04928300529718399, 0.02460765466094017, 0.006940784398466349, 0.010303718037903309, 0.11923910677433014, 0.002430608496069908, 0.020191287621855736, 0.019723495468497276, 0.015607062727212906, 0.14493703842163086, 0.29023703932762146, 0.2954525649547577, 0.024419967085123062, NaN, NaN, NaN, NaN, NaN, NaN], [0.0033209763932973146, 0.0013802923494949937, 0.007923663593828678, 0.01537866611033678, 0.27329060435295105, 0.0012711664894595742, 0.000925537955481559, 0.0031033798586577177, 0.00518713379278779, 0.008014743216335773, 0.01865261048078537, 0.32840412855148315, 0.015081376768648624, 0.0187647957354784, 0.007287481799721718, 0.04235544800758362, 0.014461617916822433, 0.006770138628780842, 0.009241613559424877, 0.002999901305884123, 0.0037356300745159388, 0.00043396188993938267, 0.005936506669968367, 0.00027135247364640236, 0.00836905650794506, 0.38652852177619934, 0.1805782914161682, 0.00859912484884262, 0.13720881938934326, 0.026457296684384346, 0.044793374836444855, 0.41905051469802856, 0.48846107721328735, 0.271888792514801, 0.02787640690803528, NaN, NaN, NaN, NaN, NaN], [0.012120293453335762, 0.00801909901201725, 0.05887366458773613, 0.08173726499080658, 0.42918333411216736, 0.0074272770434618, 0.018144551664590836, 0.002390465000644326, 0.19959968328475952, 0.01595914363861084, 0.19477497041225433, 0.24081164598464966, 0.32190656661987305, 0.2620943486690521, 0.06223426014184952, 0.03824670985341072, 0.05110237002372742, 0.016365332528948784, 0.027689939364790916, 0.004054062534123659, 0.0016762956511229277, 0.0059990487061440945, 0.061629924923181534, 0.02193543128669262, 0.004144957754760981, 0.11336920410394669, 0.0855039581656456, 0.16943661868572235, 0.007511935196816921, 0.0029296777211129665, 0.005633122753351927, 0.04470856487751007, 0.19621509313583374, 0.1449754536151886, 0.4407651424407959, 0.012849990278482437, NaN, NaN, NaN, NaN], [0.001324097509495914, 1.9873512428603135e-05, 0.0026336663868278265, 0.025088831782341003, 0.006480309646576643, 0.0015246026450768113, 0.009156930260360241, 0.006450172513723373, 0.006447002291679382, 0.003797400277107954, 0.0037222199607640505, 0.006030225194990635, 1.9453302229521796e-05, 0.0003723614208865911, 0.13770580291748047, 0.29710885882377625, 0.04157622903585434, 0.022785142064094543, 0.06820578873157501, 0.0019051277777180076, 0.004196317866444588, 0.012664434500038624, 0.010533612221479416, 0.00958634540438652, 0.006948783528059721, 0.024731770157814026, 0.04424457997083664, 0.0092665059491992, 0.008317369967699051, 0.00025302590802311897, 0.03921425715088844, 0.024433301761746407, 0.005475904326885939, 0.02041386440396309, 0.005526822991669178, 0.006030899006873369, 0.000147900907904841, NaN, NaN, NaN], [0.23361828923225403, 0.06709202378988266, 0.7719610333442688, 0.734594464302063, 0.7922726273536682, 0.049216482788324356, 0.04663456231355667, 0.060855433344841, 0.40224209427833557, 0.20935069024562836, 0.5060975551605225, 0.5454070568084717, 0.2919921875, 0.420108824968338, 0.08753460645675659, 0.15116539597511292, 0.029300624504685402, 0.014213098213076591, 0.04858435317873955, 0.008192096836864948, 0.0029929669108241796, 0.00010039177868748084, 0.02851700410246849, 0.014845605008304119, 0.01335279829800129, 0.07330357283353806, 0.08230004459619522, 0.06801280379295349, 0.12962418794631958, 0.38807213306427, 0.021973537281155586, 0.0005578201962634921, 0.13413770496845245, 0.18835364282131195, 0.15109674632549286, 0.5815849900245667, 0.6008182764053345, 0.10515720397233963, NaN, NaN], [0.01675574854016304, 0.0394110269844532, 0.07827049493789673, 0.20941881835460663, 0.5690934658050537, 0.13831959664821625, 0.015872817486524582, 0.2790753245353699, 0.07380014657974243, 0.05484941974282265, 0.11329877376556396, 0.046586740761995316, 0.27540746331214905, 0.3769146502017975, 0.12728242576122284, 0.05911188945174217, 0.013889956288039684, 0.00048160224105231464, 0.10393460839986801, 0.009916743263602257, 0.013972792774438858, 0.0005543273873627186, 0.0008135904208756983, 0.0005866698920726776, 0.00012856724788434803, 0.016669562086462975, 0.022332170978188515, 0.03126570209860802, 0.39481881260871887, 0.0021035531535744667, 0.09696949273347855, 0.0003469766234047711, 0.012058700434863567, 0.1351245492696762, 0.1276140809059143, 0.8529128432273865, 0.013427066616714, 0.3029053509235382, 0.0016288348706439137, NaN], [0.13399043679237366, 0.38312259316444397, 0.21414920687675476, 0.1335369348526001, 0.883351743221283, 0.17629003524780273, 0.21391625702381134, 0.35840436816215515, 0.7405950427055359, 0.11166028678417206, 0.2222289741039276, 0.2562817633152008, 0.20710349082946777, 0.2988908290863037, 0.10401280969381332, 0.22241219878196716, 0.00997188687324524, 0.004307668190449476, 0.0318865031003952, 0.026490027084946632, 0.04937301576137543, 0.016565896570682526, 0.0013930558925494552, 0.01958940364420414, 0.015218929387629032, 0.1830211728811264, 0.11458480358123779, 0.1729872077703476, 0.047152113169431686, 0.017883911728858948, 0.118315190076828, 0.07728181034326553, 0.31889867782592773, 0.1497264951467514, 0.2596881091594696, 0.15263305604457855, 0.024473916739225388, 0.19167250394821167, 0.12363447993993759, 0.010316992178559303]], [[0.03249572962522507, 0.01680905371904373, 0.01368993055075407, 0.005182549823075533, 0.0014828554121777415, 0.0045396420173347, 0.0006250899168662727, 0.01684878207743168, 0.005824672989547253, 0.007428525947034359, 0.009805276058614254, 0.003550198394805193, 0.007900950498878956, 0.009690256789326668, 0.18011362850666046, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.11159665137529373, 0.10346578061580658, 0.414338618516922, 0.08694489300251007, 0.2136271595954895, 0.10264819115400314, 0.023593097925186157, 0.0335584320127964, 0.0575689822435379, 0.06024341657757759, 0.1307218372821808, 0.13801440596580505, 0.1756829470396042, 0.14866231381893158, 0.1320090889930725, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1948547214269638, 0.038279034197330475, 0.07790879160165787, 0.04177340865135193, 0.004589961376041174, 0.0009778933599591255, 0.002051346004009247, 0.006739486940205097, 0.009280361235141754, 0.0007642557029612362, 0.0012637393083423376, 0.00433916924521327, 0.00236115837469697, 0.008354227058589458, 0.2381056696176529, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.07799407094717026, 0.10201291739940643, 0.037178199738264084, 0.03369736298918724, 0.035083431750535965, 0.003606606973335147, 0.0009816481033340096, 0.010917055420577526, 0.019562464207410812, 0.004011118784546852, 0.0029224867466837168, 0.0011325542582198977, 0.00486336974427104, 0.007979645393788815, 0.2784355580806732, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.11467810720205307, 0.4025481641292572, 0.4041208028793335, 0.13489782810211182, 0.520052433013916, 0.013409112580120564, 0.0056337821297347546, 0.04408307746052742, 0.06485209614038467, 0.0023049998562783003, 0.0050890627317130566, 0.004091872368007898, 0.006159461103379726, 0.0242836382240057, 0.07189745455980301, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1516697108745575, 0.2241159826517105, 0.5074643492698669, 0.3874017000198364, 0.2519407868385315, 0.032381314784288406, 0.015091626904904842, 0.006451433524489403, 0.09749187529087067, 0.007731522433459759, 0.00912014115601778, 0.029297562316060066, 0.05765664204955101, 0.059585090726614, 0.023513801395893097, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01171550527215004, 0.10137046873569489, 0.870269238948822, 0.5154522657394409, 0.6626715660095215, 0.08923148363828659, 0.047533176839351654, 0.015608957968652248, 0.11948943883180618, 0.008091520518064499, 0.008133050054311752, 0.012773845344781876, 0.051611315459012985, 0.01502595841884613, 0.00961183663457632, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01722140610218048, 0.036506716161966324, 0.7147647738456726, 0.20675897598266602, 0.8291797637939453, 0.31030455231666565, 0.11803850531578064, 0.03327609598636627, 0.4245462417602539, 0.013293992727994919, 0.008976193144917488, 0.054750751703977585, 0.1754072904586792, 0.04528210312128067, 0.012820743955671787, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01982569508254528, 0.15988187491893768, 0.12975367903709412, 0.1326102912425995, 0.6299260258674622, 0.28946900367736816, 0.34108322858810425, 0.11804011464118958, 0.16752222180366516, 0.01777276024222374, 0.0021109972149133682, 0.0006076672580093145, 0.0030632279813289642, 0.00126487051602453, 0.1333881914615631, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.005461913999170065, 0.03046412020921707, 0.008993657305836678, 0.005659051705151796, 0.004244270734488964, 0.02773391455411911, 0.042834386229515076, 0.13534432649612427, 0.27069228887557983, 0.04962563514709473, 0.015227400697767735, 0.0016283531440421939, 0.0014969720505177975, 0.0027089377399533987, 0.17130999267101288, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01672529987990856, 0.10339350253343582, 0.009749630466103554, 0.02030925825238228, 0.017326004803180695, 0.03957638517022133, 0.030999623239040375, 0.10308665037155151, 0.5008098483085632, 0.09767498821020126, 0.09780175238847733, 0.025981366634368896, 0.003117683343589306, 0.00962040200829506, 0.1932818591594696, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.026731140911579132, 0.05838552862405777, 0.07611822336912155, 0.05796685442328453, 0.5904980301856995, 0.010755263268947601, 0.0517524816095829, 0.055663660168647766, 0.29654714465141296, 0.1307908594608307, 0.1585402488708496, 0.03976760059595108, 0.07525579631328583, 0.16488958895206451, 0.1035238653421402, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.024593327194452286, 0.12932555377483368, 0.13568159937858582, 0.16021546721458435, 0.3227141201496124, 0.029398979619145393, 0.01611196994781494, 0.016819216310977936, 0.2378186136484146, 0.5602607131004333, 0.7615779638290405, 0.08417549729347229, 0.10783103108406067, 0.2013072967529297, 0.06744378060102463, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.018169090151786804, 0.26050350069999695, 0.078061044216156, 0.023439347743988037, 0.05254700779914856, 0.0014709478709846735, 0.002907117595896125, 0.009980114176869392, 0.1381266713142395, 0.5626046061515808, 0.5405392646789551, 0.11909772455692291, 0.008021530695259571, 0.06359856575727463, 0.009888176806271076, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.08646434545516968, 0.009946366772055626, 0.041608210653066635, 0.009163393639028072, 0.12723588943481445, 0.17822976410388947, 0.01437843032181263, 0.0057503837160766125, 0.008486853912472725, 0.002935740165412426, 0.019836073741316795, 0.07525425404310226, 0.02854214422404766, 0.0230310820043087, 0.1518138200044632, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.169734388589859, 0.018695855513215065, 0.1739528477191925, 0.1591939628124237, 0.2628772258758545, 0.10412096232175827, 0.10786166787147522, 0.024563027545809746, 0.26776236295700073, 0.15710414946079254, 0.04751116409897804, 0.10171505063772202, 0.02745870314538479, 0.022933470085263252, 0.11237789690494537, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04881957918405533, 0.17062845826148987, 0.0187830850481987, 0.030382977798581123, 0.08311481773853302, 0.03788991644978523, 0.005156277678906918, 0.026916639879345894, 0.06639944016933441, 0.03180782124400139, 0.02173716016113758, 0.05343012511730194, 0.01850084401667118, 0.0033381145913153887, 0.04681381955742836, 0.12855423986911774, 0.11611904203891754, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11046597361564636, 0.13029024004936218, 0.30802851915359497, 0.31618139147758484, 0.21513698995113373, 0.08858107775449753, 0.07770872116088867, 0.030179373919963837, 0.2956576347351074, 0.19506438076496124, 0.06668522953987122, 0.15814362466335297, 0.07954283803701401, 0.09008871018886566, 0.11347464472055435, 0.1812644749879837, 0.04049589857459068, 0.04480821266770363, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14630576968193054, 0.10272074490785599, 0.06626180559396744, 0.39613619446754456, 0.5213132500648499, 0.09462913125753403, 0.19745559990406036, 0.14176879823207855, 0.45916420221328735, 0.2814978361129761, 0.19076579809188843, 0.7478294968605042, 0.15201923251152039, 0.4428024888038635, 0.11204658448696136, 0.14001408219337463, 0.11702272295951843, 0.5616602897644043, 0.021032487973570824, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17077980935573578, 0.372023344039917, 0.03066021017730236, 0.20403380692005157, 0.25160810351371765, 0.047236956655979156, 0.19034826755523682, 0.09997845441102982, 0.22249065339565277, 0.14956896007061005, 0.12211201339960098, 0.43811750411987305, 0.32559871673583984, 0.4463178217411041, 0.1688702404499054, 0.17309650778770447, 0.011261633597314358, 0.0023054813500493765, 0.0014516497030854225, 0.17103753983974457, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.001587467617355287, 0.0028523027431219816, 0.001275891438126564, 0.007771230302751064, 0.06833823025226593, 0.016362184658646584, 0.01554875634610653, 0.0395360104739666, 0.020186755806207657, 0.02848842740058899, 0.006796931382268667, 0.08043718338012695, 0.1258731484413147, 0.048048797994852066, 0.14538481831550598, 0.21775518357753754, 0.1599237471818924, 0.031671781092882156, 0.0027859890833497047, 0.1030324175953865, 0.009803196415305138, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19441094994544983, 0.026329312473535538, 0.03907056525349617, 0.5187185406684875, 0.06508557498455048, 0.04464683309197426, 0.23734036087989807, 0.10510969161987305, 0.23671847581863403, 0.2550508677959442, 0.2969563603401184, 0.31371036171913147, 0.023362383246421814, 0.04756302013993263, 0.09379850327968597, 0.1265520304441452, 0.2245447188615799, 0.3357183039188385, 0.19591355323791504, 0.030100535601377487, 0.11038237810134888, 0.012957160361111164, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.009693926200270653, 0.06855454295873642, 0.04046608507633209, 0.021632034331560135, 0.07003092765808105, 0.1099655032157898, 0.02166297659277916, 0.14673617482185364, 0.08559776097536087, 0.021444879472255707, 0.06376301497220993, 0.07838241755962372, 0.2981177270412445, 0.05645254626870155, 0.11510419100522995, 0.12113019824028015, 0.07331034541130066, 0.073086217045784, 0.038516201078891754, 0.16168329119682312, 0.12152494490146637, 0.1929183006286621, 0.11648087203502655, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1475960612297058, 0.11415769904851913, 0.09677327424287796, 0.22716772556304932, 0.05128113925457001, 0.0685737207531929, 0.17258046567440033, 0.05221087113022804, 0.2985250651836395, 0.36185649037361145, 0.6199293732643127, 0.5016448497772217, 0.08136574923992157, 0.06544326990842819, 0.09482244402170181, 0.15162895619869232, 0.16000056266784668, 0.47010278701782227, 0.008242717012763023, 0.016423694789409637, 0.19619418680667877, 0.014187236316502094, 0.2187093049287796, 0.3917299807071686, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16866622865200043, 0.03890697658061981, 0.038960762321949005, 0.045146964490413666, 0.003443084890022874, 0.025941072031855583, 0.02535194903612137, 0.01214737631380558, 0.39030662178993225, 0.11890958994626999, 0.2736153304576874, 0.3244759440422058, 0.00968784186989069, 0.014615286141633987, 0.03826850652694702, 0.1371021270751953, 0.24055053293704987, 0.39826682209968567, 0.0653936043381691, 0.06886317580938339, 0.1729464828968048, 0.02453671395778656, 0.2748231589794159, 0.23215962946414948, 0.03306089714169502, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08395736664533615, 0.10560688376426697, 0.29490047693252563, 0.15838190913200378, 0.20854075253009796, 0.047574300318956375, 0.025914132595062256, 0.0076736449263989925, 0.23083198070526123, 0.11239635199308395, 0.08150741457939148, 0.3915822207927704, 0.126749187707901, 0.08327525854110718, 0.07453686743974686, 0.05615014582872391, 0.17226241528987885, 0.4426397681236267, 0.534454345703125, 0.0034056571312248707, 0.0038566330913454294, 0.24011781811714172, 0.31882721185684204, 0.4456172287464142, 0.1489524245262146, 0.03087311051785946, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08537011593580246, 0.01334940642118454, 0.026223814114928246, 0.09485415369272232, 0.04081009700894356, 0.021519087255001068, 0.04835912212729454, 0.008561250753700733, 0.1425430029630661, 0.15310505032539368, 0.12245412170886993, 0.15674236416816711, 0.03265313804149628, 0.020860055461525917, 0.1338454782962799, 0.037336766719818115, 0.065662682056427, 0.18869149684906006, 0.795316219329834, 0.14649540185928345, 0.021824514493346214, 0.13452036678791046, 0.026823654770851135, 0.35548609495162964, 0.18523786962032318, 0.020790524780750275, 0.09485815465450287, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.009048069827258587, 0.008220783434808254, 0.0010462020291015506, 0.0073586152866482735, 0.01628630980849266, 0.0030796914361417294, 0.0014804736711084843, 0.0016866090008988976, 0.021953675895929337, 0.024090107530355453, 0.02321471832692623, 0.2417944222688675, 0.00791110284626484, 0.012413977645337582, 0.02231968566775322, 0.17983746528625488, 0.09746579825878143, 0.46259593963623047, 0.706605851650238, 0.09193093329668045, 0.2823830544948578, 0.007526541594415903, 0.10234087705612183, 0.24847157299518585, 0.2038285881280899, 0.012590465135872364, 0.002493936335667968, 0.04428662359714508, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02412300556898117, 0.02128133550286293, 0.018482450395822525, 0.016898121684789658, 0.07439899444580078, 0.03563898429274559, 0.04473365843296051, 0.0026737016160041094, 0.06965204328298569, 0.10727399587631226, 0.046027760952711105, 0.33166152238845825, 0.12371443957090378, 0.07036767154932022, 0.15801618993282318, 0.1421777307987213, 0.23310348391532898, 0.2705342471599579, 0.5351002812385559, 0.02795390971004963, 0.06031421944499016, 0.012775074690580368, 0.20022329688072205, 0.6570897698402405, 0.2668534517288208, 0.033325545489788055, 0.023841219022870064, 0.1455993354320526, 0.03172359615564346, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.007644897326827049, 0.000292555516352877, 0.08444877713918686, 0.17402730882167816, 0.16615508496761322, 0.013423392549157143, 0.054235123097896576, 0.007257240824401379, 0.08712441474199295, 0.012547464109957218, 0.0328214131295681, 0.2736492455005646, 0.0037261026445776224, 0.09982366114854813, 0.13941559195518494, 0.11665362864732742, 0.1886645257472992, 0.03897944837808609, 0.07137740403413773, 0.15634050965309143, 0.15400150418281555, 0.13745756447315216, 0.05537642911076546, 0.2729690372943878, 0.04749782383441925, 0.05948880687355995, 0.014797642827033997, 0.11365658044815063, 0.002582019427791238, 0.20324750244617462, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07466596364974976, 0.11066461354494095, 0.02582395263016224, 0.1052846685051918, 0.0988694354891777, 0.13372771441936493, 0.10285167396068573, 0.04043884575366974, 0.12614820897579193, 0.00874736811965704, 0.006169801577925682, 0.3642371892929077, 0.13258321583271027, 0.14621633291244507, 0.16873647272586823, 0.29635345935821533, 0.04781435802578926, 0.41243496537208557, 0.03004680573940277, 0.13952067494392395, 0.045467544347047806, 4.634694050764665e-05, 0.20948387682437897, 0.002634957665577531, 0.005124728661030531, 0.0019075855379924178, 0.0009838729165494442, 0.0013485344825312495, 0.004148871172219515, 0.03574635088443756, 0.23113909363746643, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.23522600531578064, 0.0398484542965889, 0.3737937808036804, 0.288825660943985, 0.10485613346099854, 0.11366727948188782, 0.29695606231689453, 0.06251946091651917, 0.35146233439445496, 0.04921486973762512, 0.25325968861579895, 0.33112239837646484, 0.06967249512672424, 0.050063006579875946, 0.0896972194314003, 0.22071197628974915, 0.019423967227339745, 0.06694509834051132, 0.2386176735162735, 0.015943216159939766, 0.14270655810832977, 0.039743710309267044, 0.014324809424579144, 0.581375777721405, 0.040944233536720276, 0.011615565046668053, 0.02482481673359871, 0.06486763060092926, 0.002298883395269513, 0.009274494834244251, 0.012798607349395752, 0.009606687352061272, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1151093989610672, 0.085483118891716, 0.1238018348813057, 0.10984596610069275, 0.07372570037841797, 0.07080911099910736, 0.04283013194799423, 0.011434272862970829, 0.6184931993484497, 0.031299810856580734, 0.1232943907380104, 0.4399086534976959, 0.16973690688610077, 0.18915507197380066, 0.06319096684455872, 0.04979729279875755, 0.005993144121021032, 0.05621323734521866, 0.3196869492530823, 0.0036542851012200117, 0.006608159281313419, 0.07202935218811035, 0.023804083466529846, 0.08581908792257309, 0.002907529706135392, 0.0022882334887981415, 0.155064657330513, 0.6752456426620483, 0.19066885113716125, 0.033486951142549515, 0.1545412391424179, 0.3257397711277008, 0.07836033403873444, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.23179487884044647, 0.03441762179136276, 0.058240070939064026, 0.17834095656871796, 0.049968671053647995, 0.038375332951545715, 0.05405527353286743, 0.00672679441049695, 0.09475977718830109, 0.0764862671494484, 0.1440851390361786, 0.11337311565876007, 0.06998162716627121, 0.031302694231271744, 0.13650138676166534, 0.02027127519249916, 0.036089565604925156, 0.0908525288105011, 0.6094546914100647, 0.035198476165533066, 0.01578100211918354, 0.08828305453062057, 0.00740778585895896, 0.08938029408454895, 0.055872198194265366, 0.01406459603458643, 0.05842210724949837, 0.7085317969322205, 0.04043729975819588, 0.00861792266368866, 0.05839632451534271, 0.306302547454834, 0.11257344484329224, 0.09490343183279037, NaN, NaN, NaN, NaN, NaN, NaN], [0.037197839468717575, 0.022889001294970512, 0.00443503400310874, 0.02830665186047554, 0.056754183024168015, 0.011282439343631268, 0.008815057575702667, 0.005641489755362272, 0.03366301208734512, 0.01200089417397976, 0.022881681099534035, 0.24835483729839325, 0.020306341350078583, 0.028865927830338478, 0.09140723943710327, 0.2219613641500473, 0.0726998969912529, 0.3657586872577667, 0.6172192692756653, 0.07194076478481293, 0.17607101798057556, 0.009873087517917156, 0.09032700955867767, 0.1240842267870903, 0.06592906266450882, 0.021971723064780235, 0.004476875066757202, 0.04292584955692291, 0.013240871019661427, 0.03868407383561134, 0.0364602766931057, 0.007298360578715801, 0.02817610278725624, 0.0009550384129397571, 0.033005379140377045, NaN, NaN, NaN, NaN, NaN], [0.019821494817733765, 0.0461096465587616, 0.009799499064683914, 0.008886821568012238, 0.03164605051279068, 0.03408728539943695, 0.06531291455030441, 0.004583337344229221, 0.015776870772242546, 0.0067581660114228725, 0.005247185938060284, 0.0803409293293953, 0.12878651916980743, 0.033680036664009094, 0.15540239214897156, 0.2832254469394684, 0.40537261962890625, 0.25111812353134155, 0.4335843026638031, 0.05173255130648613, 0.02949104830622673, 0.00834138598293066, 0.5043417811393738, 0.45271721482276917, 0.10732957720756531, 0.08741836994886398, 0.06616821885108948, 0.1252485066652298, 0.04288535565137863, 0.0027607728261500597, 0.11496254801750183, 0.007436650805175304, 0.04789961501955986, 0.014611729420721531, 0.05419020354747772, 0.013982507400214672, NaN, NaN, NaN, NaN], [0.006374652031809092, 0.0003620072384364903, 0.05079201981425285, 0.10443739593029022, 0.13200052082538605, 0.007841442711651325, 0.04038690775632858, 0.005943085998296738, 0.04502689838409424, 0.005707652773708105, 0.010736361145973206, 0.17095635831356049, 0.0034604808315634727, 0.08947119116783142, 0.1356668770313263, 0.1133793368935585, 0.2190774381160736, 0.04727642610669136, 0.08785698562860489, 0.22799502313137054, 0.1395695060491562, 0.17899513244628906, 0.05776361748576164, 0.19579172134399414, 0.03426501154899597, 0.08577524870634079, 0.027239171788096428, 0.22711482644081116, 0.005856664851307869, 0.3394412696361542, 0.03666312247514725, 0.053877539932727814, 0.02460121363401413, 0.02095765992999077, 0.08733106404542923, 0.0007995758787728846, 0.19509249925613403, NaN, NaN, NaN], [0.05784226581454277, 0.06101800128817558, 0.011293647810816765, 0.030310506001114845, 0.02692366950213909, 0.10355494171380997, 0.1643158346414566, 0.02146345190703869, 0.10686127096414566, 0.0006235101609490812, 0.001034505432471633, 0.12770172953605652, 0.08152752369642258, 0.06569667905569077, 0.13584844768047333, 0.32134389877319336, 0.08582156896591187, 0.36053547263145447, 0.06279635429382324, 0.1449708491563797, 0.041098933666944504, 0.0002254477294627577, 0.3326246738433838, 0.0031729326583445072, 0.011426791548728943, 0.00305219367146492, 0.0021134610287845135, 0.0029090954922139645, 0.0035086346324533224, 0.0884322077035904, 0.7275413274765015, 4.6366836613742635e-05, 0.004567307885736227, 0.00048746803076937795, 0.0006845259922556579, 0.00036436106893233955, 0.0336419902741909, 0.19370199739933014, NaN, NaN], [0.24130187928676605, 0.04057329148054123, 0.37395209074020386, 0.32695549726486206, 0.18701796233654022, 0.1542418897151947, 0.4307348132133484, 0.07850468903779984, 0.24226921796798706, 0.027551302686333656, 0.17328326404094696, 0.256756991147995, 0.1007629856467247, 0.0746576264500618, 0.1026487648487091, 0.2431764006614685, 0.00993723887950182, 0.023469794541597366, 0.12711890041828156, 0.013049022294580936, 0.09880916029214859, 0.014819139614701271, 0.015189954079687595, 0.19677633047103882, 0.012298321351408958, 0.006653454154729843, 0.017306946218013763, 0.044382814317941666, 0.005554118659347296, 0.008197239600121975, 0.025704391300678253, 0.01238576602190733, 0.005520223639905453, 0.018611198291182518, 0.07344726473093033, 0.00026948421145789325, 0.012129159644246101, 0.01222553662955761, 0.005697384011000395, NaN], [0.18065117299556732, 0.0850963443517685, 0.37481072545051575, 0.36960142850875854, 0.042269542813301086, 0.04689870774745941, 0.10553675144910812, 0.031215613707900047, 0.03850337490439415, 0.055640675127506256, 0.11964564025402069, 0.20274300873279572, 0.22541530430316925, 0.07314471900463104, 0.12492100149393082, 0.018590128049254417, 0.012204503640532494, 0.0029425490647554398, 0.01610950194299221, 0.024503106251358986, 0.04006015509366989, 0.018976394087076187, 0.006591797806322575, 0.002320006489753723, 0.001339062349870801, 0.028667215257883072, 0.03959575667977333, 0.00960585381835699, 0.009797154925763607, 0.022796805948019028, 0.1637655347585678, 0.20084494352340698, 0.05620957538485527, 0.12549559772014618, 0.022888751700520515, 0.037492163479328156, 0.04711981862783432, 0.44462573528289795, 0.3949664235115051, 0.3300856053829193]], [[0.7472922801971436, 0.06644202023744583, 0.12477048486471176, 0.07691145688295364, 0.17426471412181854, 0.17453429102897644, 0.8713244795799255, 0.22852616012096405, 0.7413471937179565, 0.5253387689590454, 0.16250024735927582, 0.19445888698101044, 0.10716042667627335, 0.2310180366039276, 0.05536508187651634, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.13811203837394714, 0.40626850724220276, 0.2430061399936676, 0.22277961671352386, 0.18414726853370667, 0.21574343740940094, 0.8225958943367004, 0.5822084546089172, 0.41659367084503174, 0.35776287317276, 0.4909748136997223, 0.39181941747665405, 0.34554892778396606, 0.6003718972206116, 0.043436333537101746, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03130434453487396, 0.0024298657663166523, 0.43690061569213867, 0.5043830275535583, 0.07530603557825089, 0.015139158815145493, 0.03498073294758797, 0.012510559521615505, 0.6034607291221619, 0.7801509499549866, 0.8402397036552429, 0.5008089542388916, 0.17657218873500824, 0.11879491806030273, 0.05205746740102768, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.09661327302455902, 0.049034956842660904, 0.05331439897418022, 0.7222777009010315, 0.25703296065330505, 0.020087046548724174, 0.06235986202955246, 0.0651831179857254, 0.32113927602767944, 0.5460676550865173, 0.7442458271980286, 0.5571728348731995, 0.08091285824775696, 0.059992171823978424, 0.029936296865344048, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00972762517631054, 0.007879518903791904, 0.02767527848482132, 0.019306808710098267, 0.22303025424480438, 0.007516835816204548, 0.007440114859491587, 0.022099999710917473, 0.29848337173461914, 0.9075287580490112, 0.5192471742630005, 0.8959035873413086, 0.055479276925325394, 0.04288056865334511, 0.021558567881584167, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03836950287222862, 0.05839527025818825, 0.005887853913009167, 0.08494037389755249, 0.012977076694369316, 0.5726994872093201, 0.09935679286718369, 0.13719113171100616, 0.448569655418396, 0.5218547582626343, 0.13800226151943207, 0.1732572466135025, 0.4354798197746277, 0.4542965292930603, 0.12337890267372131, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.17566490173339844, 0.03925755247473717, 0.01956782303750515, 0.04187121242284775, 0.02149910107254982, 0.049183186143636703, 0.5663522481918335, 0.045388396829366684, 0.45039302110671997, 0.19015204906463623, 0.22913624346256256, 0.10953018814325333, 0.21400360763072968, 0.572381854057312, 0.1667298972606659, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2136794924736023, 0.20810233056545258, 0.08830246329307556, 0.27903637290000916, 0.02317022904753685, 0.10591837763786316, 0.15087167918682098, 0.5299598574638367, 0.3452024757862091, 0.15965056419372559, 0.2765912711620331, 0.516273021697998, 0.2846863567829132, 0.3888777792453766, 0.0719258189201355, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.07398565858602524, 0.04620325192809105, 0.3374384939670563, 0.19415578246116638, 0.025615269318223, 0.010194968432188034, 0.018451105803251266, 0.0005573831731453538, 0.5073301196098328, 0.25312942266464233, 0.15244188904762268, 0.143111914396286, 0.051979612559080124, 0.04884689673781395, 0.12363318353891373, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.5805832147598267, 0.09438126534223557, 0.24455930292606354, 0.06023820489645004, 0.03943831846117973, 0.021930387243628502, 0.026398053392767906, 0.012488989159464836, 0.011794325895607471, 0.767930269241333, 0.4412824809551239, 0.07896611094474792, 0.01228941697627306, 0.018458310514688492, 0.10866446793079376, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1145540103316307, 0.05171298235654831, 0.7072227597236633, 0.4839639961719513, 0.11294537037611008, 0.06211492419242859, 0.021921994164586067, 0.0025394419208168983, 0.0033554628025740385, 0.07357389479875565, 0.7795555591583252, 0.05686911940574646, 0.022035235539078712, 0.034172482788562775, 0.07262071967124939, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.08121224492788315, 0.025126218795776367, 0.4891066551208496, 0.29065003991127014, 0.20622830092906952, 0.36699986457824707, 0.07864820212125778, 0.014422299340367317, 0.016684990376234055, 0.0649130716919899, 0.07936163991689682, 0.6605017185211182, 0.18783104419708252, 0.08294262737035751, 0.03477967903017998, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0700722336769104, 0.1311686784029007, 0.5332850813865662, 0.1558467000722885, 0.36321985721588135, 0.7912644743919373, 0.32202765345573425, 0.1934671401977539, 0.031114375218749046, 0.09986341744661331, 0.08630139380693436, 0.055017780512571335, 0.44781896471977234, 0.42446693778038025, 0.1060790941119194, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.08875010907649994, 0.06247853487730026, 0.4616371989250183, 0.12711729109287262, 0.3074216842651367, 0.19363558292388916, 0.2020244151353836, 0.0779867023229599, 0.019831692799925804, 0.03570472076535225, 0.07392378151416779, 0.04282142594456673, 0.0921483263373375, 0.3143211603164673, 0.22281906008720398, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.5682113766670227, 0.1249876543879509, 0.7342633008956909, 0.902918815612793, 0.7035764455795288, 0.3718622326850891, 0.6157594919204712, 0.15625660121440887, 0.8438207507133484, 0.9341241121292114, 0.8159937858581543, 0.6624717712402344, 0.3264457583427429, 0.5970154404640198, 0.003644895739853382, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2626786530017853, 0.0849713385105133, 0.11954734474420547, 0.09299539029598236, 0.12019845843315125, 0.1675114780664444, 0.12060416489839554, 0.1292921006679535, 0.33819568157196045, 0.3146125078201294, 0.20831438899040222, 0.39596518874168396, 0.2145393043756485, 0.2666572332382202, 0.05294949933886528, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1368129849433899, 0.16135744750499725, 0.15528292953968048, 0.24771884083747864, 0.1416730433702469, 0.05803852900862694, 0.07394444942474365, 0.10563277453184128, 0.033661823719739914, 0.18054474890232086, 0.1985052525997162, 0.05316935107111931, 0.05009648948907852, 0.043446026742458344, 0.03412564843893051, 0.16815106570720673, 0.017178548499941826, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0030849967151880264, 0.0006440586876124144, 0.016017315909266472, 0.0037563794758170843, 0.009170617908239365, 0.0008218333241529763, 0.0032779525499790907, 0.0006974118296056986, 0.12044321000576019, 0.005983977112919092, 0.011704917997121811, 0.023849062621593475, 0.0031650178134441376, 0.01169323269277811, 0.16145823895931244, 0.2022658735513687, 0.005017802584916353, 0.01763225719332695, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02798222377896309, 0.012448069639503956, 0.018199993297457695, 0.0069459048099815845, 0.042531996965408325, 0.009718443267047405, 0.013791781850159168, 0.04370715469121933, 0.21814176440238953, 0.024645699188113213, 0.0633857473731041, 0.0802498310804367, 0.006771658081561327, 0.040147896856069565, 0.4109969139099121, 0.16166983544826508, 0.033678483217954636, 0.014520054683089256, 0.003462842432782054, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02001010812819004, 0.02580004744231701, 0.006869276985526085, 0.007543967105448246, 0.017537932842969894, 0.00023914838675409555, 0.006739956792443991, 0.008227680809795856, 0.05446772649884224, 0.03320171311497688, 0.022232946008443832, 0.01063306163996458, 0.0007752752280794084, 0.0028256638906896114, 0.2078467756509781, 0.10712886601686478, 0.3422684967517853, 0.05748933553695679, 0.2768969237804413, 0.004922540858387947, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0034786108881235123, 0.00011826713307527825, 0.002407492371276021, 0.005452741403132677, 0.002847136929631233, 0.003419033018872142, 0.013516861945390701, 0.002940082224085927, 0.002004653448238969, 0.006652397103607655, 0.004079414997249842, 0.0028307989705353975, 0.0006369714974425733, 0.002542868722230196, 0.1463778167963028, 0.047501806169748306, 0.48201972246170044, 0.4827657639980316, 0.48466482758522034, 0.022285524755716324, 0.00022009640815667808, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0762338638305664, 0.11778479814529419, 0.03105221875011921, 0.006415408570319414, 0.0190818402916193, 0.027191398665308952, 0.005222225561738014, 0.0170834269374609, 0.05309534817934036, 0.00936796236783266, 0.03816217556595802, 0.17940494418144226, 0.020440110936760902, 0.13513173162937164, 0.3000544309616089, 0.1517350822687149, 0.04445230960845947, 0.09343461692333221, 0.05873756855726242, 0.07171032577753067, 0.22849556803703308, 0.05614512786269188, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16228125989437103, 0.35454851388931274, 0.04026315361261368, 0.03822629526257515, 0.023396998643875122, 0.30800631642341614, 0.24136781692504883, 0.15176478028297424, 0.0788438618183136, 0.07347536832094193, 0.030298085883259773, 0.007365733850747347, 0.1061745211482048, 0.2841038405895233, 0.07787416130304337, 0.25680339336395264, 0.00010820403986144811, 0.0123103903606534, 0.007049524690955877, 0.001952940714545548, 0.027401963248848915, 0.0028134624008089304, 0.00041907382546924055, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05645078793168068, 0.023840615525841713, 0.013567867688834667, 0.00750470208004117, 0.07643276453018188, 0.08809614926576614, 0.06102507561445236, 0.021034346893429756, 0.039108242839574814, 0.02081543207168579, 0.011458326131105423, 0.20520520210266113, 0.027348484843969345, 0.06299317628145218, 0.2514360249042511, 0.005559808574616909, 0.007462772540748119, 0.013313480652868748, 0.017376750707626343, 0.0038542840629816055, 0.006728595122694969, 0.5333897471427917, 0.03155524656176567, 0.15571120381355286, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.016126127913594246, 0.01087501272559166, 0.01213990617543459, 0.004450921434909105, 0.014690833166241646, 0.30525338649749756, 0.02716207131743431, 0.09981174021959305, 0.027048761025071144, 0.01336466334760189, 0.006663064938038588, 0.0520603246986866, 0.042623523622751236, 0.018071996048092842, 0.1948687732219696, 0.004124458413571119, 0.004751718603074551, 0.016015900298953056, 0.01742120459675789, 0.032125748693943024, 0.010460411198437214, 0.45809611678123474, 0.07138781994581223, 0.5171095728874207, 0.17626723647117615, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04185086488723755, 0.034399643540382385, 0.041276611387729645, 0.0584070086479187, 0.019824109971523285, 0.00856409315019846, 0.08867836743593216, 0.10337970405817032, 0.09468665719032288, 0.02033121883869171, 0.018058426678180695, 0.059728462249040604, 0.09321711957454681, 0.20168805122375488, 0.1941128522157669, 0.24881334602832794, 0.005821824539452791, 0.031170587986707687, 0.009853766299784184, 0.027254868298768997, 0.01885347068309784, 0.02900754101574421, 0.013663586229085922, 0.012090054340660572, 0.0009272377355955541, 0.0030740045476704836, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01436887588351965, 0.027922889217734337, 0.046481672674417496, 0.010071231983602047, 0.026127830147743225, 0.06003356724977493, 0.022118212655186653, 0.08160483092069626, 0.07784195244312286, 0.010694753378629684, 0.017130734398961067, 0.05340806022286415, 0.041410259902477264, 0.035884104669094086, 0.2491855025291443, 0.19627800583839417, 0.054823894053697586, 0.1886557787656784, 0.00739922234788537, 0.09451853483915329, 0.01572227105498314, 0.0010023268405348063, 0.0061036646366119385, 0.0014733865391463041, 0.0003654434985946864, 0.006776102818548679, 0.0027319795917719603, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.053393200039863586, 0.04828185588121414, 0.03453819081187248, 0.013636122457683086, 0.25098806619644165, 0.12313847243785858, 0.02266266942024231, 0.017618268728256226, 0.019785437732934952, 0.005274764262139797, 0.021053072065114975, 0.20679616928100586, 0.021523641422390938, 0.03855947405099869, 0.1109846979379654, 0.07900664210319519, 0.04510375112295151, 0.002657376928254962, 0.0032053724862635136, 0.0027717212215065956, 0.008140889927744865, 0.0011833005119115114, 0.04105996713042259, 0.0017470002640038729, 0.008194361813366413, 0.019470002502202988, 0.3834601640701294, 0.013146632350981236, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12851715087890625, 0.12400124222040176, 0.2637093663215637, 0.02439347468316555, 0.07038086652755737, 0.12665364146232605, 0.04898465424776077, 0.03412041813135147, 0.0263816025108099, 0.023226425051689148, 0.11513664573431015, 0.09503531455993652, 0.1215861439704895, 0.11158601939678192, 0.14799171686172485, 0.06578069925308228, 0.08975866436958313, 0.022234706208109856, 0.015388325788080692, 0.006578383035957813, 0.011582762002944946, 0.014906905591487885, 0.04645423963665962, 0.008417387492954731, 0.0318351611495018, 0.024524353444576263, 0.5050408244132996, 0.1078883558511734, 0.09876319766044617, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0010214513167738914, 0.004835289902985096, 0.0042709591798484325, 0.0026378841139376163, 0.005866974592208862, 0.008331544697284698, 0.006240549497306347, 0.01365274004638195, 0.1720106601715088, 0.0005307683604769409, 0.0007543729152530432, 0.004353509750217199, 0.0002490385086275637, 0.0017186965560540557, 0.14317919313907623, 0.010224410332739353, 0.16048979759216309, 0.09242240339517593, 0.259725958108902, 0.06779038906097412, 0.007232773117721081, 0.09601377695798874, 0.28109633922576904, 0.2723717987537384, 0.1275584101676941, 0.06318827718496323, 0.25179460644721985, 0.2496732771396637, 0.6837621927261353, 0.0018262360244989395, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07205050438642502, 0.12816517055034637, 0.23753608763217926, 0.08243206143379211, 0.5041552186012268, 0.11970840394496918, 0.04837331175804138, 0.034129947423934937, 0.16484025120735168, 0.011070297099649906, 0.05054215341806412, 0.039082955569028854, 0.09205758571624756, 0.1322212517261505, 0.16203875839710236, 0.04991341754794121, 0.05319196358323097, 0.14821480214595795, 0.020963814109563828, 0.03095317631959915, 0.024693654850125313, 0.008621936663985252, 0.14259999990463257, 0.042305052280426025, 0.09002435952425003, 0.005839803721755743, 0.061309609562158585, 0.23589004576206207, 0.30903181433677673, 0.18008928000926971, 0.49815359711647034, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.014979850500822067, 0.03769220784306526, 0.04367470741271973, 0.009415187872946262, 0.019922776147723198, 0.11522040516138077, 0.014906312339007854, 0.04722318425774574, 0.06570684164762497, 0.008925273083150387, 0.019600573927164078, 0.0472339391708374, 0.005348374601453543, 0.0017698986921459436, 0.1612817794084549, 0.015294999815523624, 0.03185835853219032, 0.0202027577906847, 0.03976168856024742, 0.0711589902639389, 0.13473857939243317, 0.0059967683628201485, 0.0031582280062139034, 0.003374348394572735, 0.002362155122682452, 0.015532899647951126, 0.038825590163469315, 0.08611883223056793, 0.03844507411122322, 0.009673628956079483, 0.7068554162979126, 0.013729983940720558, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.023198002949357033, 0.06148262694478035, 0.046858664602041245, 0.013079512864351273, 0.08762317895889282, 0.00949429627507925, 0.0484880767762661, 0.025388503447175026, 0.04432932287454605, 0.006038118619471788, 0.010164186358451843, 0.08949221670627594, 0.06122652441263199, 0.11895263940095901, 0.16355113685131073, 0.2531464695930481, 0.013071080669760704, 0.035546887665987015, 0.020458703860640526, 0.01740572415292263, 0.009577612392604351, 0.014396607875823975, 0.05952044576406479, 0.013841827400028706, 0.0003843819722533226, 0.0024746267590671778, 0.007157978601753712, 0.013787134550511837, 0.033782534301280975, 0.003469215938821435, 0.007898973301053047, 0.05525756999850273, 0.003914556000381708, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.009917332790791988, 0.01408212911337614, 0.047434139996767044, 0.005388779100030661, 0.023170381784439087, 0.034844160079956055, 0.009820640087127686, 0.03569778800010681, 0.05789060518145561, 0.0037882563192397356, 0.013808010146021843, 0.04879388585686684, 0.03114072047173977, 0.0507131889462471, 0.18661679327487946, 0.20273520052433014, 0.05025332421064377, 0.2335304319858551, 0.009442931972444057, 0.13508503139019012, 0.0181263517588377, 0.0010557285277172923, 0.003822105238214135, 0.0018545370548963547, 0.0003744752029888332, 0.0046313730999827385, 0.0008518796530552208, 0.006319030188024044, 0.014203540980815887, 0.0018540708115324378, 0.003058186499401927, 0.002516325796023011, 0.001575352856889367, 0.0014869269216433167, NaN, NaN, NaN, NaN, NaN, NaN], [0.0652787834405899, 0.04612350836396217, 0.04522763565182686, 0.014745297841727734, 0.27657532691955566, 0.16156227886676788, 0.025164838880300522, 0.017732013016939163, 0.023105354979634285, 0.005499221384525299, 0.020183373242616653, 0.19132839143276215, 0.020515967160463333, 0.056384406983852386, 0.14304831624031067, 0.059709664434194565, 0.021975213661789894, 0.002582199638709426, 0.002308695577085018, 0.00240446999669075, 0.004605048336088657, 0.0013587460853159428, 0.04497997462749481, 0.0009150391560979187, 0.0030208472162485123, 0.016492530703544617, 0.2572183907032013, 0.006429646629840136, 0.013558420352637768, 0.06110598146915436, 0.03728436306118965, 0.019318275153636932, 0.03907725587487221, 0.4492114782333374, 0.01579420454800129, NaN, NaN, NaN, NaN, NaN], [0.14539514482021332, 0.21388974785804749, 0.34906452894210815, 0.031415559351444244, 0.062017399817705154, 0.08485611528158188, 0.03913363441824913, 0.03569692373275757, 0.023448940366506577, 0.020669998601078987, 0.1622902750968933, 0.1315622329711914, 0.09182734042406082, 0.1796703040599823, 0.13702963292598724, 0.025836847722530365, 0.04185229912400246, 0.017175624147057533, 0.005038154777139425, 0.006518983747810125, 0.0043221269734203815, 0.004393702372908592, 0.03134007006883621, 0.002082354621961713, 0.00246719503775239, 0.00855192355811596, 0.28023120760917664, 0.0558621920645237, 0.020582975819706917, 0.00264686718583107, 0.052114877849817276, 0.01051351334899664, 0.0282430537045002, 0.640393853187561, 0.11605942994356155, 0.042242906987667084, NaN, NaN, NaN, NaN], [0.0009059146977961063, 0.004442692268639803, 0.002850044285878539, 0.0024173678830266, 0.006019651889801025, 0.004450949374586344, 0.003768310882151127, 0.009272964671254158, 0.19643637537956238, 0.0004391498805489391, 0.0004852984275203198, 0.005083973053842783, 0.000164541692356579, 0.001456208759918809, 0.13767127692699432, 0.00790853425860405, 0.07249781489372253, 0.09275110065937042, 0.13612288236618042, 0.0654025748372078, 0.0028184219263494015, 0.039562828838825226, 0.11378230899572372, 0.08281006664037704, 0.029445864260196686, 0.03387679159641266, 0.16786670684814453, 0.2288694977760315, 0.6801032423973083, 0.0008468713494949043, 0.32477572560310364, 0.20243169367313385, 0.04291461780667305, 0.2565927505493164, 0.2435160130262375, 0.8255255222320557, 0.0008029205491766334, NaN, NaN, NaN], [0.03601038455963135, 0.08602340519428253, 0.042799800634384155, 0.007577326148748398, 0.12637566030025482, 0.07399067282676697, 0.02205651067197323, 0.01475659292191267, 0.14170114696025848, 0.004405674524605274, 0.013175459578633308, 0.03142356127500534, 0.06839168816804886, 0.09161193668842316, 0.1376270353794098, 0.06791312247514725, 0.034157127141952515, 0.26634278893470764, 0.01933334954082966, 0.08246968686580658, 0.03419587388634682, 0.019395295530557632, 0.1259232461452484, 0.02923283353447914, 0.07644251734018326, 0.00482177222147584, 0.03381035849452019, 0.2429695725440979, 0.4201262295246124, 0.21319957077503204, 0.1469077318906784, 0.005101305432617664, 0.05322602018713951, 0.08754345029592514, 0.4596864581108093, 0.32625797390937805, 0.2286616712808609, 0.6285872459411621, NaN, NaN], [0.014056011103093624, 0.020953036844730377, 0.03237491473555565, 0.0042424313724040985, 0.017438247799873352, 0.08849667757749557, 0.005714876111596823, 0.025588830932974815, 0.08735965192317963, 0.009712125174701214, 0.02371004782617092, 0.06271149963140488, 0.00425978796556592, 0.0027238703332841396, 0.14272134006023407, 0.0236026793718338, 0.032931454479694366, 0.018642868846654892, 0.052601076662540436, 0.09147398918867111, 0.11555580049753189, 0.00512799434363842, 0.006684163119643927, 0.005264784675091505, 0.0023014512844383717, 0.005628940649330616, 0.03778252378106117, 0.09737572073936462, 0.12753169238567352, 0.00698094442486763, 0.6853439807891846, 0.02319822832942009, 0.018658116459846497, 0.08199534565210342, 0.18709556758403778, 0.07321563363075256, 0.027500100433826447, 0.6534799337387085, 0.01572287082672119, NaN], [0.15719948709011078, 0.03286461904644966, 0.12916648387908936, 0.10299614071846008, 0.014032969251275063, 0.011700707487761974, 0.06680437922477722, 0.016068298369646072, 0.04505150765180588, 0.056866806000471115, 0.07287567108869553, 0.09101171046495438, 0.06734755635261536, 0.17371943593025208, 0.1297563910484314, 0.24674107134342194, 0.007728901691734791, 0.010779940523207188, 0.01413859985768795, 0.08573849499225616, 0.014258946292102337, 0.014431791380047798, 0.00199147523380816, 0.006254997570067644, 0.003036148613318801, 0.015209752134978771, 0.015118316747248173, 0.05811062082648277, 0.01987045258283615, 0.012226228602230549, 0.021392136812210083, 0.08141177892684937, 0.016042163595557213, 0.01565614528954029, 0.05352389067411423, 0.01607833430171013, 0.014641694724559784, 0.020306598395109177, 0.06722531467676163, 0.005379782523959875]], [[0.0183254461735487, 0.00659788167104125, 0.046570390462875366, 0.04327844828367233, 0.10241857916116714, 0.5407979488372803, 0.0026681027375161648, 0.15349310636520386, 0.0016508381813764572, 0.010916458442807198, 0.036675866693258286, 0.15769276022911072, 0.4073828458786011, 0.04228133708238602, 0.15622197091579437, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.07985992729663849, 0.06383417546749115, 0.024972105398774147, 0.18746882677078247, 0.11770728975534439, 0.13333363831043243, 0.006719768047332764, 0.04288880154490471, 0.001412510173395276, 0.058754052966833115, 0.14280158281326294, 0.13529875874519348, 0.08268098533153534, 0.02367851696908474, 0.1494951695203781, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01403640117496252, 0.014278309419751167, 0.1034439280629158, 0.022417087107896805, 0.10706920921802521, 0.018271848559379578, 0.046350300312042236, 0.04233889281749725, 0.037542134523391724, 0.0005760823260061443, 0.004724643658846617, 0.233056902885437, 0.2574465572834015, 0.1892177164554596, 0.21611936390399933, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.032590243965387344, 0.14464972913265228, 0.1993260532617569, 0.12327495217323303, 0.27639931440353394, 0.011173157021403313, 0.012838426046073437, 0.0802190750837326, 0.0400678850710392, 0.013469994999468327, 0.025247203186154366, 0.30583158135414124, 0.6397863626480103, 0.258308470249176, 0.08317234367132187, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.007401467300951481, 0.04209339618682861, 0.1104009672999382, 0.04737341031432152, 0.06253770738840103, 0.0023836863692849874, 0.05026397854089737, 0.01439946424216032, 0.006556188687682152, 0.001721409265883267, 0.01908556930720806, 0.022761031985282898, 0.01600046642124653, 0.22344018518924713, 0.2855986952781677, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00031611474696546793, 0.010241325944662094, 0.005327185150235891, 0.007503898814320564, 0.009216651320457458, 0.08986854553222656, 0.0022410263773053885, 0.04830501973628998, 0.013246790505945683, 0.0036830154713243246, 0.001605262397788465, 0.004246865399181843, 0.005818811245262623, 0.00778583250939846, 0.2319662719964981, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00028042105259373784, 0.004604758229106665, 0.008834331296384335, 0.010530425235629082, 0.04934454336762428, 0.3239482641220093, 0.02964387647807598, 0.041019540280103683, 0.028070107102394104, 0.002580034313723445, 0.0034616885241121054, 0.006594499107450247, 0.07731658220291138, 0.01784621551632881, 0.10414844751358032, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.002352550160139799, 0.00811008270829916, 0.007519579492509365, 0.09616736322641373, 0.00784054771065712, 0.06404154002666473, 0.025837063789367676, 0.06720300018787384, 0.008001329377293587, 0.016075177118182182, 0.0036620565224438906, 0.031110821291804314, 0.1529460847377777, 0.03003939613699913, 0.19531111419200897, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.014062762260437012, 0.03979215770959854, 0.0070105125196278095, 0.010145032778382301, 0.023933248594403267, 0.08613994717597961, 0.027301009744405746, 0.007488427218049765, 0.04610109701752663, 0.00706111453473568, 0.005716769024729729, 0.008516461588442326, 0.04168170318007469, 0.004054774064570665, 0.3198099434375763, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0027477010153234005, 0.009237049147486687, 0.005884162615984678, 0.004349177703261375, 0.039300523698329926, 0.06504905968904495, 0.005921225529164076, 0.05048412084579468, 0.004538795445114374, 0.019958311691880226, 0.08035917580127716, 0.1339075267314911, 0.45191076397895813, 0.1108468547463417, 0.15996994078159332, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0004566281568259001, 0.0044615683145821095, 0.008062957786023617, 0.0003266451822128147, 0.032452184706926346, 0.004190187435597181, 0.0009983428753912449, 0.0015420016134157777, 0.025539150461554527, 0.0009114624699577689, 0.001308016013354063, 0.11249691247940063, 0.5262115597724915, 0.16036535799503326, 0.02284345217049122, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.006384413689374924, 0.006966868881136179, 0.013256898149847984, 0.008146845735609531, 0.005910678766667843, 0.005924733821302652, 0.0029809526167809963, 0.004338744096457958, 0.0021091948729008436, 0.02691148780286312, 0.09123647958040237, 0.0904775932431221, 0.10420377552509308, 0.019918829202651978, 0.21981710195541382, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.004395737312734127, 0.0342060811817646, 0.08344801515340805, 0.012639162130653858, 0.07537969946861267, 0.00383414002135396, 0.007808698806911707, 0.007516762241721153, 0.0023650380317121744, 0.055798787623643875, 0.025632014498114586, 0.040716953575611115, 0.16482838988304138, 0.13848447799682617, 0.17180821299552917, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0016022673808038235, 0.013307235203683376, 0.012306403368711472, 0.0029055906925350428, 0.06092625483870506, 0.01653674617409706, 0.008309547789394855, 0.00395687622949481, 0.002493055537343025, 0.0038927635177969933, 0.009680269286036491, 0.23031921684741974, 0.35693949460983276, 0.1708209365606308, 0.050492819398641586, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.009627100080251694, 0.006502249743789434, 0.0023533182684332132, 0.0021814347710460424, 0.007286426145583391, 0.024909881874918938, 0.01453662570565939, 0.010449647903442383, 0.0028000103775411844, 0.001988302916288376, 0.001580765936523676, 0.013102496974170208, 0.001836722600273788, 0.0008430163725279272, 0.15720587968826294, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.010018138214945793, 0.02516627125442028, 0.027397310361266136, 0.005101055838167667, 0.025938771665096283, 0.13529063761234283, 0.02690303698182106, 0.11719205975532532, 0.027814749628305435, 0.019565219059586525, 0.07996311038732529, 0.0991574078798294, 0.16288702189922333, 0.1113416850566864, 0.22370746731758118, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05219842493534088, 0.1440066546201706, 0.27922260761260986, 0.2058621197938919, 0.11230742931365967, 0.6016822457313538, 0.20846855640411377, 0.04777589067816734, 0.20611444115638733, 0.15481434762477875, 0.11950203776359558, 0.02679699845612049, 0.0639302060008049, 0.047183193266391754, 0.04897741973400116, 0.147435262799263, 0.06894105672836304, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01555164996534586, 0.0014379153726622462, 0.01706753298640251, 0.003720618085935712, 0.10093016922473907, 0.027928827330470085, 0.015380543656647205, 0.0025812943931668997, 0.020822137594223022, 0.014309070073068142, 0.017923271283507347, 0.0120958611369133, 0.014481468126177788, 0.009491728618741035, 0.15904544293880463, 0.18660759925842285, 0.013697005808353424, 0.050341442227363586, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11612647771835327, 0.0010205605067312717, 0.020188286900520325, 0.027076182886958122, 0.09822120517492294, 0.3221674859523773, 0.1250218003988266, 0.002691123867407441, 0.005359187722206116, 0.04976291581988335, 0.023232540115714073, 0.04237976670265198, 0.028708819299936295, 0.049411751329898834, 0.005618311930447817, 0.14907698333263397, 0.12682567536830902, 0.14014844596385956, 0.024977339431643486, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0470837838947773, 0.007497857324779034, 0.004583081230521202, 0.022991856560111046, 0.0278051495552063, 0.00051211251411587, 0.0627230703830719, 0.011764267459511757, 0.010903585702180862, 0.07272983342409134, 0.011678352952003479, 0.09392477571964264, 0.01558940764516592, 0.03351595252752304, 0.2068868726491928, 0.20074230432510376, 0.11179281026124954, 0.012457489967346191, 0.01455892063677311, 0.011106430552899837, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0024584962520748377, 8.163625898305327e-05, 0.00016154914919752628, 0.0002508168399799615, 0.0019916424062103033, 0.0004536219348665327, 0.0036078437697142363, 0.0008641426684334874, 0.00021941671730019152, 0.0014423344982787967, 0.0004360634775366634, 0.004383172374218702, 0.0009428760386072099, 0.0009436326217837632, 0.14683274924755096, 0.20768699049949646, 0.16985096037387848, 0.19526726007461548, 0.016829432919621468, 0.05647609382867813, 0.022808711975812912, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02989446185529232, 0.007703323382884264, 0.12996061146259308, 0.025068828836083412, 0.2812304198741913, 0.0071953474543988705, 0.0021352169569581747, 0.0025125211104750633, 0.0014658492291346192, 0.007028855849057436, 0.0448734275996685, 0.09462164342403412, 0.0503704659640789, 0.11768583953380585, 0.12974096834659576, 0.14349573850631714, 0.41078659892082214, 0.5100967288017273, 0.04046756774187088, 0.2924310266971588, 0.07987978309392929, 0.007180717773735523, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16756094992160797, 0.028098214417696, 0.20756086707115173, 0.2207580953836441, 0.10928753018379211, 0.13773545622825623, 0.2233184576034546, 0.1774815022945404, 0.13830231130123138, 0.20932619273662567, 0.18267595767974854, 0.05961548537015915, 0.07697918266057968, 0.18739080429077148, 0.06796090304851532, 0.11146429926156998, 0.3579395115375519, 0.7730652093887329, 0.5723751783370972, 0.2817910611629486, 0.25461745262145996, 0.060240793973207474, 0.08399515599012375, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.017068415880203247, 0.00098085415083915, 0.010854640044271946, 0.006490680854767561, 0.29060667753219604, 0.006710599176585674, 0.0118483304977417, 0.0008181483135558665, 0.00011296885350020602, 0.0034601599909365177, 0.005098147317767143, 0.010750477202236652, 0.010399019345641136, 0.009376241825520992, 0.017405353486537933, 0.13904383778572083, 0.44345301389694214, 0.1345542073249817, 0.05706587806344032, 0.7818705439567566, 0.04436418041586876, 0.015915511175990105, 0.31926584243774414, 0.26167550683021545, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1331326961517334, 0.019769106060266495, 0.01612294837832451, 0.028521019965410233, 0.007509702816605568, 0.2665199935436249, 0.19958320260047913, 0.1385747790336609, 0.0059373765252530575, 0.08046255260705948, 0.052418529987335205, 0.004961848258972168, 0.10941796749830246, 0.06705309450626373, 0.17611992359161377, 0.12236351519823074, 0.40148651599884033, 0.12099923938512802, 0.38539087772369385, 0.6352627873420715, 0.0574735552072525, 0.027495326474308968, 0.25199854373931885, 0.07788273692131042, 0.1824284791946411, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.019668979570269585, 0.0081618782132864, 0.12552350759506226, 0.0802406370639801, 0.07089362293481827, 0.18871739506721497, 0.12778939306735992, 0.04829992726445198, 0.04307088255882263, 0.02314154990017414, 0.14194107055664062, 0.05861861631274223, 0.19650596380233765, 0.11930099874734879, 0.18420156836509705, 0.0776049941778183, 0.26076433062553406, 0.12800094485282898, 0.15216867625713348, 0.36678510904312134, 0.31404268741607666, 0.13151897490024567, 0.1709745228290558, 0.2591820955276489, 0.18929390609264374, 0.08235450834035873, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00538466265425086, 0.0270208939909935, 0.18066750466823578, 0.06076826527714729, 0.035171061754226685, 0.411039799451828, 0.09634009003639221, 0.26394954323768616, 0.1915867179632187, 0.03318370133638382, 0.3213040828704834, 0.10995125770568848, 0.5320225954055786, 0.4394112527370453, 0.15243512392044067, 0.08287283033132553, 0.26698997616767883, 0.29562729597091675, 0.13922370970249176, 0.3693794012069702, 0.22139106690883636, 0.612119734287262, 0.1618482619524002, 0.40734153985977173, 0.10604425519704819, 0.2217203825712204, 0.14197519421577454, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0030147582292556763, 0.00625306461006403, 0.017102748155593872, 0.008551767095923424, 0.0727200135588646, 0.015153692103922367, 0.0023096217773854733, 0.011201570741832256, 0.002435098635032773, 0.006847116630524397, 0.016829995438456535, 0.12519565224647522, 0.3878204822540283, 0.13249750435352325, 0.028183329850435257, 0.0676846131682396, 0.5803259611129761, 0.47128230333328247, 0.2430339902639389, 0.43893957138061523, 0.5822793245315552, 0.9563859105110168, 0.5092246532440186, 0.7397804260253906, 0.6675750613212585, 0.2242172360420227, 0.046741336584091187, 0.09371624141931534, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.066617950797081, 0.006649812217801809, 0.04142908379435539, 0.13957993686199188, 0.025706114247441292, 0.08231058716773987, 0.08377126604318619, 0.02330365777015686, 0.04652002453804016, 0.11060080677270889, 0.09014575183391571, 0.07117310166358948, 0.15938407182693481, 0.1624550223350525, 0.05356656014919281, 0.16273218393325806, 0.4245251417160034, 0.44257473945617676, 0.1064363345503807, 0.22264361381530762, 0.638583779335022, 0.7456080913543701, 0.17856015264987946, 0.09681503474712372, 0.3901955187320709, 0.4154786765575409, 0.10903800278902054, 0.0281606987118721, 0.027353502810001373, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.004379222169518471, 0.0002637936850078404, 0.0022587613202631474, 0.006711117923259735, 0.0006837267428636551, 0.007989797741174698, 0.02997850626707077, 0.045127563178539276, 0.008224103599786758, 0.0034686585422605276, 0.0038658890407532454, 0.00034815416438505054, 7.646608719369397e-05, 0.00017854337056633085, 0.14325816929340363, 0.2541956901550293, 0.2554672658443451, 0.13483673334121704, 0.33163735270500183, 0.11067650467157364, 0.3400806486606598, 0.4272999167442322, 0.2955835163593292, 0.293487548828125, 0.2820315957069397, 0.17141510546207428, 0.08369391411542892, 0.012903732247650623, 0.010530934669077396, 0.015047149732708931, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.25216665863990784, 0.1422366499900818, 0.10172943770885468, 0.3735504150390625, 0.0612066313624382, 0.06238102167844772, 0.11154207587242126, 0.031159698963165283, 0.011768986470997334, 0.4107469618320465, 0.1557808816432953, 0.07179611176252365, 0.186580628156662, 0.18789765238761902, 0.099563829600811, 0.07456009835004807, 0.09125705808401108, 0.20381297171115875, 0.09053967893123627, 0.6734579801559448, 0.8927901983261108, 0.9854956865310669, 0.19160649180412292, 0.848483681678772, 0.3795100748538971, 0.0351644828915596, 0.06069617718458176, 0.0190274715423584, 0.13319239020347595, 0.1618155688047409, 0.029784632846713066, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0073658498004078865, 0.1486257165670395, 0.03456511348485947, 0.0081891855224967, 0.009660922922194004, 0.09341325610876083, 0.010183881968259811, 0.09390538185834885, 0.005950886756181717, 0.019719628617167473, 0.060451164841651917, 0.021925343200564384, 0.19991156458854675, 0.17004182934761047, 0.15761280059814453, 0.13663174211978912, 0.5250937938690186, 0.20416004955768585, 0.37758082151412964, 0.7281314134597778, 0.24714940786361694, 0.006291824858635664, 0.029336191713809967, 0.258807897567749, 0.17944614589214325, 0.2768983840942383, 0.49996671080589294, 0.6760725975036621, 0.0684136375784874, 0.9500845074653625, 0.04427658021450043, 0.027829600498080254, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0057948376052081585, 0.023180164396762848, 0.018019115552306175, 0.008233858272433281, 0.005580522585660219, 0.09526203572750092, 0.025384269654750824, 0.05396068096160889, 0.022398412227630615, 0.010895788669586182, 0.02884012460708618, 0.008390026167035103, 0.1754663735628128, 0.0998048186302185, 0.1692073941230774, 0.05520259216427803, 0.4062710404396057, 0.11698392778635025, 0.09814880043268204, 0.8328142166137695, 0.46247926354408264, 0.07190129905939102, 0.3418641984462738, 0.14486591517925262, 0.025201991200447083, 0.042143724858760834, 0.4074908196926117, 0.1494714319705963, 0.17342594265937805, 0.908286988735199, 0.5950636863708496, 0.14296366274356842, 0.20851416885852814, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0038264640606939793, 0.023839879781007767, 0.12264026701450348, 0.02543032169342041, 0.01467527449131012, 0.22457416355609894, 0.02885078825056553, 0.18430863320827484, 0.08557040989398956, 0.016987022012472153, 0.3513573110103607, 0.04023189842700958, 0.40384334325790405, 0.4235673248767853, 0.16652488708496094, 0.08497714251279831, 0.5087416172027588, 0.4508724510669708, 0.33144411444664, 0.600685715675354, 0.523800790309906, 0.4743403494358063, 0.10964386910200119, 0.6009643077850342, 0.29714730381965637, 0.1661888062953949, 0.10026849061250687, 0.19036318361759186, 0.07889659702777863, 0.29447081685066223, 0.5917950868606567, 0.05482999235391617, 0.0994495078921318, 0.08629819005727768, NaN, NaN, NaN, NaN, NaN, NaN], [0.006266402080655098, 0.015031179413199425, 0.02853887900710106, 0.010518345981836319, 0.09044987708330154, 0.021657679229974747, 0.0031435268465429544, 0.020945381373167038, 0.004824943374842405, 0.0127853499725461, 0.04820985347032547, 0.12459135800600052, 0.5573670268058777, 0.2566193640232086, 0.05160163715481758, 0.04716389998793602, 0.6635201573371887, 0.5744545459747314, 0.33429521322250366, 0.755266010761261, 0.7800281643867493, 0.9541771411895752, 0.5776658058166504, 0.8714791536331177, 0.9158549308776855, 0.2818737030029297, 0.06938906759023666, 0.10379814356565475, 0.3064776659011841, 0.7474142909049988, 0.7715258002281189, 0.37782159447669983, 0.057383324950933456, 0.013433223590254784, 0.03400390222668648, NaN, NaN, NaN, NaN, NaN], [0.3002758324146271, 0.08866846561431885, 0.06544900685548782, 0.25531354546546936, 0.028160221874713898, 0.12210531532764435, 0.16810676455497742, 0.0764283761382103, 0.17981933057308197, 0.3050864636898041, 0.2806880474090576, 0.13050490617752075, 0.19047558307647705, 0.3216065764427185, 0.07704814523458481, 0.1486319750547409, 0.22267495095729828, 0.42902871966362, 0.07982667535543442, 0.5459871888160706, 0.9060689210891724, 0.8350642919540405, 0.10920917987823486, 0.4773065447807312, 0.7826967239379883, 0.5733710527420044, 0.26356616616249084, 0.040332335978746414, 0.031653065234422684, 0.8572309613227844, 0.5636150240898132, 0.07464684545993805, 0.03465104475617409, 0.03009859099984169, 0.008700854144990444, 0.005375253036618233, NaN, NaN, NaN, NaN], [0.005926316604018211, 0.0003559965989552438, 0.0015365411527454853, 0.005924532189965248, 0.0005743101937696338, 0.007415232714265585, 0.024156678467988968, 0.045611582696437836, 0.009969166480004787, 0.003380746114999056, 0.003106702584773302, 0.0003880919248331338, 4.0538176108384505e-05, 0.00014580521383322775, 0.13770556449890137, 0.25873932242393494, 0.5196211338043213, 0.3300914764404297, 0.5837901830673218, 0.4101006090641022, 0.7175306677818298, 0.6572118401527405, 0.6919461488723755, 0.6594171524047852, 0.7066829204559326, 0.46555259823799133, 0.3380126953125, 0.05317035689949989, 0.053740378469228745, 0.031323984265327454, 0.30507126450538635, 0.1422475129365921, 0.03319966048002243, 0.08714800328016281, 0.01252773217856884, 0.006611488293856382, 0.007115270011126995, NaN, NaN, NaN], [0.1617586314678192, 0.29556339979171753, 0.028325924649834633, 0.059843577444553375, 0.009868957102298737, 0.03965649753808975, 0.07811643928289413, 0.06809397041797638, 0.009963614866137505, 0.11740529537200928, 0.08369920402765274, 0.039758261293172836, 0.13982373476028442, 0.1197674348950386, 0.13220268487930298, 0.011579165235161781, 0.05381239950656891, 0.044945720583200455, 0.035533830523490906, 0.6624263525009155, 0.8997865319252014, 0.9679857492446899, 0.17051655054092407, 0.940772533416748, 0.6132625341415405, 0.01721411757171154, 0.04632151871919632, 0.010550450533628464, 0.08354383707046509, 0.12839946150779724, 0.02755529060959816, 0.44050073623657227, 0.04286862909793854, 0.01342833787202835, 0.003870438551530242, 0.026607532054185867, 0.02663758397102356, 0.005111980251967907, NaN, NaN], [0.012153265066444874, 0.16048333048820496, 0.041802890598773956, 0.00796045083552599, 0.018259191885590553, 0.10963782668113708, 0.009757153689861298, 0.07023902982473373, 0.01128031499683857, 0.030125515535473824, 0.0943576917052269, 0.02206866256892681, 0.1321137398481369, 0.19507774710655212, 0.1400403380393982, 0.13300661742687225, 0.5851269960403442, 0.20284885168075562, 0.5700805187225342, 0.7479174137115479, 0.39722636342048645, 0.004733124747872353, 0.0698152482509613, 0.6515945196151733, 0.5409151315689087, 0.25820717215538025, 0.4583084285259247, 0.6744768619537354, 0.3421478569507599, 0.9633424878120422, 0.1852269172668457, 0.04996338114142418, 0.5482219457626343, 0.296283096075058, 0.48366567492485046, 0.06441208720207214, 0.9149421453475952, 0.02780383825302124, 0.0073219588957726955, NaN], [0.005033975467085838, 0.01824766956269741, 0.015512547455728054, 0.006673634983599186, 0.005676268134266138, 0.04240407794713974, 0.023996027186512947, 0.1038113459944725, 0.02023463323712349, 0.0080516142770648, 0.052543867379426956, 0.1188565045595169, 0.05977800861001015, 0.05786403268575668, 0.13343320786952972, 0.14593175053596497, 0.2687321603298187, 0.04604685679078102, 0.30660173296928406, 0.3806478679180145, 0.38105660676956177, 0.15303322672843933, 0.014211257919669151, 0.05383581668138504, 0.20604565739631653, 0.2462100237607956, 0.5718756914138794, 0.5113963484764099, 0.21981710195541382, 0.4276719391345978, 0.5577609539031982, 0.4118191599845886, 0.31598320603370667, 0.5468451976776123, 0.4359907805919647, 0.2059280127286911, 0.3916337192058563, 0.2548142671585083, 0.2198532670736313, 0.026425611227750778]], [[0.060514166951179504, 0.09119007736444473, 0.5136731863021851, 0.024349171668291092, 0.41056114435195923, 0.043175265192985535, 0.016160618513822556, 0.12711943686008453, 0.029147693887352943, 0.01592664048075676, 0.04504424333572388, 0.03736018016934395, 0.026280265301465988, 0.042564861476421356, 0.13562467694282532, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.009338664822280407, 0.09596994519233704, 0.12376897037029266, 0.01794583536684513, 0.059337858110666275, 0.04990454390645027, 0.003890786785632372, 0.07171432673931122, 0.0057785604149103165, 0.005389686673879623, 0.009663187898695469, 0.014342015609145164, 0.020640142261981964, 0.04060304909944534, 0.16408833861351013, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.07689530402421951, 0.027863014489412308, 0.15549975633621216, 0.2693096697330475, 0.73520827293396, 0.03749871999025345, 0.3640631139278412, 0.14002074301242828, 0.16656053066253662, 0.02643253095448017, 0.0061660525389015675, 0.054253485053777695, 0.14240022003650665, 0.14975441992282867, 0.13701564073562622, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.21953634917736053, 0.22122228145599365, 0.04846278205513954, 0.07968296110630035, 0.3619323670864105, 0.03181222453713417, 0.6669740080833435, 0.3975786566734314, 0.11174946278333664, 0.15518029034137726, 0.004886193200945854, 0.010736972093582153, 0.07725195586681366, 0.09191425889730453, 0.1523013859987259, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0740056112408638, 0.054083533585071564, 0.027193741872906685, 0.014972379431128502, 0.04523617774248123, 0.012482533231377602, 0.4212614595890045, 0.25695085525512695, 0.3699147403240204, 0.013461914844810963, 0.08041262626647949, 0.015268572606146336, 0.627507209777832, 0.13811761140823364, 0.19850368797779083, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.029503263533115387, 0.09333665668964386, 0.016309864819049835, 0.1364656686782837, 0.03873518481850624, 0.019083604216575623, 0.758955180644989, 0.6250144243240356, 0.10551930963993073, 0.0059091635048389435, 0.001959211425855756, 0.004587537609040737, 0.0029548059683293104, 0.011073557659983635, 0.10497581213712692, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0038599083200097084, 0.03815716505050659, 0.004112291149795055, 0.0037336996756494045, 0.02896580658853054, 0.003606554586440325, 0.2724342346191406, 0.5795999765396118, 0.041377726942300797, 0.01812332309782505, 0.006642999593168497, 0.006629596464335918, 0.018780261278152466, 0.00801254715770483, 0.11063171178102493, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.023342538625001907, 0.1589166522026062, 0.01254882663488388, 0.01894153468310833, 0.04743911698460579, 0.015340029262006283, 0.06989605724811554, 0.22605817019939423, 0.016811540350317955, 0.014681086875498295, 0.0061398339457809925, 0.02630683407187462, 0.032653048634529114, 0.05358496680855751, 0.18197578191757202, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01728241890668869, 0.12100599706172943, 0.003952578641474247, 0.038103699684143066, 0.00803869217634201, 0.017839567735791206, 0.040644098073244095, 0.014622771181166172, 0.07288665324449539, 0.4550913870334625, 0.18886235356330872, 0.2150641530752182, 0.487347275018692, 0.42817094922065735, 0.12942945957183838, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.011775199323892593, 0.1349712610244751, 0.005470172502100468, 0.003098055487498641, 0.028361253440380096, 0.03303566575050354, 0.007174484897404909, 0.015601159073412418, 0.006606224924325943, 0.08859884738922119, 0.18040567636489868, 0.31761303544044495, 0.2462366670370102, 0.4818485677242279, 0.12394269555807114, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.05270439758896828, 0.1637289971113205, 0.009510326199233532, 0.008013473823666573, 0.14090411365032196, 0.011389089748263359, 0.013123652897775173, 0.023534703999757767, 0.009078129194676876, 0.02855684608221054, 0.026650836691260338, 0.39132389426231384, 0.16291603446006775, 0.25967708230018616, 0.10212607681751251, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.19571052491664886, 0.10246216505765915, 0.02142595686018467, 0.012254489585757256, 0.00365867605432868, 0.007110960781574249, 0.020346596837043762, 0.03192196041345596, 0.00833944883197546, 0.07423693686723709, 0.09786227345466614, 0.08075869083404541, 0.1330210417509079, 0.26891645789146423, 0.17930860817432404, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.11616674810647964, 0.175978422164917, 0.00425378605723381, 0.017427049577236176, 0.011484457179903984, 0.030517226085066795, 0.08637198060750961, 0.1500588357448578, 0.0009573447750881314, 0.044167183339595795, 0.005869577638804913, 0.0011607500491663814, 0.014711305499076843, 0.027834221720695496, 0.18594378232955933, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.11675343662500381, 0.17556257545948029, 0.016423039138317108, 0.02097608894109726, 0.06606884300708771, 0.06371303647756577, 0.09760221093893051, 0.2481643557548523, 0.0015754855703562498, 0.03009907715022564, 0.03618617355823517, 0.012020162306725979, 0.17486301064491272, 0.22630257904529572, 0.2108311653137207, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.004961065016686916, 0.011551961302757263, 0.006318831816315651, 0.002851473866030574, 0.003461753251031041, 0.011111320927739143, 0.004611799493432045, 0.004697122145444155, 0.0026004482060670853, 0.0010426584631204605, 0.0060967751778662205, 0.01239971723407507, 0.004622939508408308, 0.002610035240650177, 0.15716104209423065, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1022859737277031, 0.17571765184402466, 0.1416551172733307, 0.11749783158302307, 0.09062699973583221, 0.07838433235883713, 0.09344526380300522, 0.3238999545574188, 0.11371968686580658, 0.10100032389163971, 0.09302259236574173, 0.0389624647796154, 0.16697892546653748, 0.1419355273246765, 0.1285012662410736, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24028724431991577, 0.14351274073123932, 0.051798444241285324, 0.16382630169391632, 0.04226303845643997, 0.020662518218159676, 0.11527843773365021, 0.29321926832199097, 0.02218940667808056, 0.0878078043460846, 0.10535410046577454, 0.011972848325967789, 0.07032275199890137, 0.04715458303689957, 0.0739566907286644, 0.1684475541114807, 0.01643766649067402, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2799055874347687, 0.11053244769573212, 0.1936434954404831, 0.029654914513230324, 0.3583168685436249, 0.552708625793457, 0.34459343552589417, 0.33612802624702454, 0.17023301124572754, 0.19969996809959412, 0.18768110871315002, 0.6793866157531738, 0.791401207447052, 0.7463385462760925, 0.09094473719596863, 0.20323613286018372, 0.02236698381602764, 0.0030780781526118517, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1572730988264084, 0.12077052146196365, 0.0489557608962059, 0.1575693041086197, 0.05669395253062248, 0.21311312913894653, 0.07387427985668182, 0.12006285786628723, 0.06427917629480362, 0.05486075580120087, 0.09722346067428589, 0.0672946497797966, 0.519307017326355, 0.15919242799282074, 0.07895061373710632, 0.15523119270801544, 0.029148569330573082, 0.04869325831532478, 0.027081435546278954, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.056666091084480286, 0.13304737210273743, 0.023897293955087662, 0.04679059237241745, 0.045941345393657684, 0.32384783029556274, 0.44531556963920593, 0.533463716506958, 0.08588721603155136, 0.10118058323860168, 0.027683693915605545, 0.15270595252513885, 0.45412689447402954, 0.19033603370189667, 0.009601723402738571, 0.20906439423561096, 0.016835892572999, 0.005647255107760429, 0.004844226874411106, 0.00019458922906778753, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.026866083964705467, 0.01856745034456253, 0.00889106560498476, 0.023431263864040375, 0.014423922635614872, 0.06721587479114532, 0.30465173721313477, 0.5084072351455688, 0.06748852878808975, 0.09416066110134125, 0.028160765767097473, 0.08301042765378952, 0.13479003310203552, 0.08470122516155243, 0.14269311726093292, 0.19736447930335999, 0.01826038584113121, 0.012854915112257004, 0.09684289991855621, 0.0006958578014746308, 4.3345058656996116e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07283831387758255, 0.02513016201555729, 0.513066828250885, 0.1692790985107422, 0.12089971452951431, 0.05420007184147835, 0.019427694380283356, 0.038392528891563416, 0.31973040103912354, 0.29048243165016174, 0.4046151340007782, 0.10607112944126129, 0.0885496586561203, 0.07017665356397629, 0.1372956782579422, 0.16369424760341644, 0.023256592452526093, 0.01855486072599888, 0.06154748797416687, 0.06098903343081474, 0.10795246064662933, 0.023746412247419357, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.27857187390327454, 0.3617483973503113, 0.2938012182712555, 0.22770966589450836, 0.06824903935194016, 0.055705904960632324, 0.2735913395881653, 0.10727421194314957, 0.15245027840137482, 0.12983311712741852, 0.2781352400779724, 0.010307536460459232, 0.09433942288160324, 0.07780664414167404, 0.13000918924808502, 0.19143380224704742, 0.11398851871490479, 0.03716170787811279, 0.07628969103097916, 0.38886839151382446, 0.24263328313827515, 0.13712459802627563, 0.02201412245631218, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09918209165334702, 0.053455647081136703, 0.645177960395813, 0.40746453404426575, 0.08205579966306686, 0.11053493618965149, 0.09200509637594223, 0.0519426129758358, 0.15867555141448975, 0.14363400638103485, 0.08945868164300919, 0.009240956045687199, 0.05626320466399193, 0.024817338213324547, 0.10628006607294083, 0.2130274772644043, 0.007986752316355705, 0.02235114760696888, 0.0019427334191277623, 0.005593507084995508, 0.012699572369456291, 0.006745419930666685, 0.06126464158296585, 0.14077326655387878, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21029417216777802, 0.16975507140159607, 0.4791514277458191, 0.5080997347831726, 0.14877668023109436, 0.04306463524699211, 0.02225780300796032, 0.027854960411787033, 0.09907854348421097, 0.17716829478740692, 0.027767561376094818, 0.04010230675339699, 0.1045137569308281, 0.07445494085550308, 0.1349247545003891, 0.22579564154148102, 0.013292824849486351, 0.10215212404727936, 0.005943832919001579, 0.013894540257751942, 0.01404587086290121, 0.02319374494254589, 0.10344905406236649, 0.1325504034757614, 0.008661924861371517, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05318222567439079, 0.11344952136278152, 0.09562063962221146, 0.10165436565876007, 0.11442670226097107, 0.07387696951627731, 0.04448265954852104, 0.12469986081123352, 0.10296554863452911, 0.029610879719257355, 0.006854650564491749, 0.06481806933879852, 0.038151390850543976, 0.029200172051787376, 0.19021393358707428, 0.1733061671257019, 0.07715445756912231, 0.2302267998456955, 0.05804288014769554, 0.07560069113969803, 0.23177897930145264, 0.2901765704154968, 0.042333029210567474, 0.08450006693601608, 0.04456959664821625, 0.015471314080059528, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.024841444566845894, 0.16249340772628784, 0.20643305778503418, 0.09402812272310257, 0.0850510448217392, 0.023708872497081757, 0.027868179604411125, 0.16653721034526825, 0.2575382590293884, 0.07176022976636887, 0.04638299718499184, 0.019721999764442444, 0.08340867608785629, 0.04306621477007866, 0.19255293905735016, 0.16428759694099426, 0.01361166127026081, 0.2167942076921463, 0.03707392141222954, 0.09917350113391876, 0.2872558534145355, 0.08793877810239792, 0.03127053380012512, 0.051127880811691284, 0.02603980340063572, 0.12251178920269012, 0.06466985493898392, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24242781102657318, 0.4547469913959503, 0.7904132008552551, 0.7443370819091797, 0.4808639585971832, 0.2640213668346405, 0.06001711264252663, 0.24681034684181213, 0.5675581097602844, 0.2725449204444885, 0.247804656624794, 0.029579274356365204, 0.19247104227542877, 0.09198179841041565, 0.18542104959487915, 0.2214493751525879, 0.0034381633158773184, 0.025536755099892616, 0.005642351228743792, 0.0024517737329006195, 0.00733930105343461, 0.0003064426709897816, 0.024970028549432755, 0.0009503457695245743, 0.0013023557839915156, 0.012362079694867134, 0.002213133964687586, 0.0037243058905005455, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10456986725330353, 0.23679938912391663, 0.29603201150894165, 0.2020668387413025, 0.14429134130477905, 0.4285147190093994, 0.3221139907836914, 0.592944860458374, 0.47945162653923035, 0.273953914642334, 0.2270997315645218, 0.05125115066766739, 0.15167200565338135, 0.14498752355575562, 0.03565559163689613, 0.21803884208202362, 0.044672977179288864, 0.15033316612243652, 0.24480289220809937, 0.0010314357932657003, 0.006885815411806107, 0.017953861504793167, 0.09280995279550552, 0.09214792400598526, 0.01309943851083517, 0.026278402656316757, 0.029330603778362274, 0.10137840360403061, 0.0009828503243625164, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.005393329542130232, 0.004602347034960985, 0.02125353366136551, 0.017772456631064415, 0.029431374743580818, 0.06670433282852173, 0.07382840663194656, 0.05640842020511627, 0.2022721767425537, 0.02110537886619568, 0.006757265422493219, 0.0065305884927511215, 0.00012849831546191126, 0.0015581984771415591, 0.14312443137168884, 0.28474918007850647, 0.005827821791172028, 0.0010850036051124334, 0.005180059466511011, 0.00018831032502930611, 0.002925402717664838, 0.0029562395066022873, 0.005281978752464056, 0.002952893264591694, 0.013548285700380802, 0.01663871854543686, 0.02234998345375061, 0.001472283387556672, 0.00024227210087701678, 9.911999950418249e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03693488612771034, 0.3099628686904907, 0.02452116832137108, 0.038606833666563034, 0.04603191837668419, 0.056979674845933914, 0.014461892656981945, 0.021202413365244865, 0.4372372031211853, 0.02073492854833603, 0.005594322457909584, 0.11605570465326309, 0.05724794790148735, 0.01605997234582901, 0.1753198802471161, 0.11472342163324356, 0.017006950452923775, 0.03429265320301056, 0.05351921543478966, 0.010289198718965054, 0.02545105293393135, 0.002036151010543108, 0.08590202778577805, 0.007977829314768314, 0.008050770498812199, 0.02079172432422638, 0.07815419882535934, 0.25072064995765686, 0.11726108938455582, 0.04080193489789963, 0.020839283242821693, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17487157881259918, 0.2829012870788574, 0.22657853364944458, 0.2227388322353363, 0.09278897941112518, 0.05522100254893303, 0.023270972073078156, 0.031554628163576126, 0.32194823026657104, 0.13948096334934235, 0.09803083539009094, 0.2809208631515503, 0.14969345927238464, 0.03018103539943695, 0.10283161699771881, 0.25351014733314514, 0.018978603184223175, 0.013279697857797146, 0.14657457172870636, 0.0005683518829755485, 0.003044809214770794, 0.0003673452010843903, 0.0009085922501981258, 0.00026260188315063715, 6.703466351609677e-05, 0.00393629027530551, 0.0411190427839756, 0.014572926796972752, 0.0009043514728546143, 0.001453216653317213, 0.001335341832600534, 0.0036634530406445265, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06711219251155853, 0.13971862196922302, 0.10573939234018326, 0.08062157034873962, 0.22173365950584412, 0.04757346957921982, 0.02002648264169693, 0.06195787340402603, 0.09553409367799759, 0.04351034387946129, 0.015184497460722923, 0.17841440439224243, 0.07658158242702484, 0.04646967723965645, 0.1461518555879593, 0.2249869406223297, 0.0773954764008522, 0.10561174154281616, 0.3267342746257782, 0.011780736967921257, 0.03227663040161133, 0.09185110032558441, 0.03840579837560654, 0.01289159432053566, 0.002641883445903659, 0.03386297821998596, 0.16820214688777924, 0.06345225125551224, 0.027306171134114265, 0.007737002335488796, 0.018253128975629807, 0.0508209764957428, 0.015562118031084538, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.015694430097937584, 0.09081663191318512, 0.2731003761291504, 0.09780610352754593, 0.06437630951404572, 0.024092676118016243, 0.017730340361595154, 0.09997125715017319, 0.24317535758018494, 0.06615940481424332, 0.05322461575269699, 0.013002216815948486, 0.10308460891246796, 0.03947872668504715, 0.16966252028942108, 0.17073971033096313, 0.01119090337306261, 0.07090220600366592, 0.026190776377916336, 0.04357914999127388, 0.10384812206029892, 0.05681576952338219, 0.008270802907645702, 0.011212479323148727, 0.016114890575408936, 0.1306251734495163, 0.04437248408794403, 0.022720789536833763, 0.0017881430685520172, 0.005742507986724377, 0.03271590173244476, 0.12170897424221039, 0.18442584574222565, 0.07238933444023132, NaN, NaN, NaN, NaN, NaN, NaN], [0.19514591991901398, 0.2590837776660919, 0.7111572027206421, 0.6245842576026917, 0.2279123067855835, 0.21324849128723145, 0.0465325303375721, 0.16129039227962494, 0.5552195906639099, 0.24888396263122559, 0.16995932161808014, 0.017819084227085114, 0.13601525127887726, 0.04923256114125252, 0.1924036145210266, 0.2460513859987259, 0.004599481821060181, 0.030415518209338188, 0.006707339081913233, 0.001940727117471397, 0.0018293699249625206, 0.0002438600640743971, 0.021702459082007408, 0.00019114103633910418, 0.0004616644873749465, 0.02795419655740261, 0.007376548834145069, 0.009364028461277485, 0.0008695388678461313, 0.027626920491456985, 0.002984545426443219, 0.0021758046932518482, 0.005276597570627928, 0.0015223525697365403, 0.0046029179356992245, NaN, NaN, NaN, NaN, NaN], [0.11466818302869797, 0.23749157786369324, 0.22078867256641388, 0.21260471642017365, 0.1054922342300415, 0.38443663716316223, 0.35735341906547546, 0.3432110548019409, 0.45766645669937134, 0.30316272377967834, 0.15794025361537933, 0.23222389817237854, 0.18522031605243683, 0.12369272857904434, 0.062224190682172775, 0.1682240217924118, 0.15532228350639343, 0.17499232292175293, 0.31528380513191223, 0.0016938054468482733, 0.0013859918108209968, 0.0071086762472987175, 0.08609996736049652, 0.02145048975944519, 0.00334079097956419, 0.08546027541160583, 0.16909679770469666, 0.5000762343406677, 0.012536582536995411, 0.0033327846322208643, 0.01681024581193924, 0.01291667390614748, 0.11205089092254639, 0.06917328387498856, 0.24062496423721313, 0.003104837378486991, NaN, NaN, NaN, NaN], [0.004928229842334986, 0.004764902405440807, 0.014567935839295387, 0.014073353260755539, 0.020878629758954048, 0.04901519790291786, 0.05124438554048538, 0.042454566806554794, 0.19801755249500275, 0.018003307282924652, 0.004736864008009434, 0.006620202213525772, 0.00011398878996260464, 0.001381832524202764, 0.13761556148529053, 0.30163663625717163, 0.008585775271058083, 0.0018221536884084344, 0.004949942696839571, 0.0002661931503098458, 0.0017199779395014048, 0.00286088977009058, 0.004591777920722961, 0.0013412131229415536, 0.009152509272098541, 0.029603971168398857, 0.059182800352573395, 0.004352512303739786, 0.0009281163802370429, 0.00013420419418253005, 0.0015637356555089355, 0.004895435180515051, 0.0020298720337450504, 0.016267914324998856, 0.0014363413210958242, 0.00015049855574034154, 4.989441003999673e-05, NaN, NaN, NaN], [0.013776288367807865, 0.25124475359916687, 0.00789756141602993, 0.00910337083041668, 0.005072988104075193, 0.015830766409635544, 0.005818341393023729, 0.011153762228786945, 0.14152461290359497, 0.008211367763578892, 0.002360414480790496, 0.06666377186775208, 0.057822320610284805, 0.009000283665955067, 0.13980405032634735, 0.1420876681804657, 0.030559053644537926, 0.035777460783720016, 0.0549585185945034, 0.010907668620347977, 0.018195953220129013, 0.005288956221193075, 0.07946551591157913, 0.003352995030581951, 0.00945360492914915, 0.03057919070124626, 0.20277532935142517, 0.5438944697380066, 0.2487112432718277, 0.11027072370052338, 0.03672702983021736, 0.009589559398591518, 0.03681262582540512, 0.12653782963752747, 0.3100517988204956, 0.04488144814968109, 0.07299992442131042, 0.024292031303048134, NaN, NaN], [0.25532495975494385, 0.3110601603984833, 0.28066542744636536, 0.29941898584365845, 0.09561395645141602, 0.06004221364855766, 0.0257351566106081, 0.04446575790643692, 0.3475395441055298, 0.2538500130176544, 0.25107017159461975, 0.4736424386501312, 0.29699820280075073, 0.06975124776363373, 0.11745814979076385, 0.2571920156478882, 0.012253361754119396, 0.00982633139938116, 0.09085621684789658, 0.00026428516139276326, 0.001174133620224893, 0.00010905979434028268, 0.0006958161829970777, 9.435929678147659e-05, 1.889842314994894e-05, 0.0019355103140696883, 0.03233037516474724, 0.014144179411232471, 0.0034062752965837717, 0.0014896523207426071, 0.0032966958824545145, 0.0043079969473183155, 0.002425077836960554, 0.0237245112657547, 0.017915409058332443, 0.0004631538176909089, 0.0033925946336239576, 0.0019653798080980778, 0.0010656031081452966, NaN], [0.06876020133495331, 0.07319146394729614, 0.08357107639312744, 0.06905727088451385, 0.010884120129048824, 0.012632370926439762, 0.04344229772686958, 0.06033884361386299, 0.05559740215539932, 0.048808641731739044, 0.06204793229699135, 0.017201891168951988, 0.028970519080758095, 0.021960163488984108, 0.13179059326648712, 0.25252944231033325, 0.012149164453148842, 0.019892947748303413, 0.013666713610291481, 0.05940697342157364, 0.04882493242621422, 0.025430571287870407, 0.00045668394886888564, 0.0054928152821958065, 0.005623141769319773, 0.004253733437508345, 0.014798035845160484, 0.012909402139484882, 0.011927488259971142, 0.007018915377557278, 0.021986471489071846, 0.016502689570188522, 0.002887164242565632, 0.006932961288839579, 0.007926056161522865, 0.015145027078688145, 0.005945136770606041, 0.016453862190246582, 0.011257275938987732, 0.0009747393196448684]], [[0.027552247047424316, 0.013821233063936234, 0.004237555433064699, 0.0007387229125015438, 0.0009859473211690784, 0.001997306477278471, 0.002160864183679223, 0.009250090457499027, 0.0009738927474245429, 0.0009403586154803634, 0.003406830132007599, 0.0010056114988401532, 0.008306043222546577, 0.06191018968820572, 0.18169914186000824, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0056476471945643425, 0.0617278628051281, 0.026225095614790916, 0.009516767226159573, 0.019543437287211418, 0.011766157113015652, 0.0015307252760976553, 0.004000868182629347, 0.006223553325980902, 0.02180931344628334, 0.02397397719323635, 0.025289250537753105, 0.01872297003865242, 0.05591608211398125, 0.17309869825839996, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.5742589831352234, 0.02769068442285061, 0.03131784498691559, 0.008496972732245922, 0.005279624368995428, 0.0009009581408463418, 0.013010378926992416, 0.009255914948880672, 0.08095329999923706, 0.0017015798948705196, 0.0027918636333197355, 0.01474103331565857, 0.07241056859493256, 0.2960302531719208, 0.1991364061832428, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3870091140270233, 0.24428580701351166, 0.004871743265539408, 0.01251932606101036, 0.004600874613970518, 0.007045479491353035, 0.011942178010940552, 0.06100638955831528, 0.06223933771252632, 0.00421120086684823, 0.0017708303639665246, 0.010406754910945892, 0.016386834904551506, 0.038040366023778915, 0.25559180974960327, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.6136646866798401, 0.2692064642906189, 0.043582458049058914, 0.00652115186676383, 0.05291604623198509, 0.006654517259448767, 0.03398957848548889, 0.03886384516954422, 0.13169772922992706, 0.002106831641867757, 0.005907678045332432, 0.01888049766421318, 0.04876947030425072, 0.2226717472076416, 0.22327177226543427, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.685612678527832, 0.0861489400267601, 0.03236214071512222, 0.16196951270103455, 0.03394145518541336, 0.05551951378583908, 0.027528556063771248, 0.06770895421504974, 0.19389298558235168, 0.03780713677406311, 0.0038191182538866997, 0.05989958345890045, 0.13479465246200562, 0.24111053347587585, 0.15613426268100739, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.6876600384712219, 0.0606975182890892, 0.05783677101135254, 0.05387236177921295, 0.11914167553186417, 0.004756046459078789, 0.031782086938619614, 0.011465699411928654, 0.1448838710784912, 0.09538520872592926, 0.007872258313000202, 0.033316925168037415, 0.09786565601825714, 0.08940181881189346, 0.23629719018936157, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.5363585352897644, 0.11579979956150055, 0.10718797892332077, 0.21453110873699188, 0.030864767730236053, 0.026318436488509178, 0.03807519003748894, 0.12262200564146042, 0.08015674352645874, 0.06537020206451416, 0.004594390746206045, 0.015254726633429527, 0.06485987454652786, 0.039039257913827896, 0.16586215794086456, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.6220377087593079, 0.17304541170597076, 0.23731492459774017, 0.32412996888160706, 0.2203587144613266, 0.09306959062814713, 0.2822628319263458, 0.008407875895500183, 0.14113475382328033, 0.022416740655899048, 0.005183607805520296, 0.0005837879725731909, 0.00799399521201849, 0.006284625735133886, 0.12005029618740082, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.18509520590305328, 0.21334251761436462, 0.12845394015312195, 0.3693835139274597, 0.41559898853302, 0.19613976776599884, 0.7053389549255371, 0.3886314332485199, 0.06599769741296768, 0.04325481504201889, 0.029052795842289925, 0.001557054347358644, 0.0018087843200191855, 0.0036887156311422586, 0.18107539415359497, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.612794041633606, 0.24153079092502594, 0.076973557472229, 0.17341682314872742, 0.06242084503173828, 0.2242424041032791, 0.8304246068000793, 0.5655775666236877, 0.4262824058532715, 0.00936043355613947, 0.03881426528096199, 0.0046007027849555016, 0.005786797031760216, 0.020520325750112534, 0.226027712225914, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.21637925505638123, 0.22487440705299377, 0.19202512502670288, 0.3957260847091675, 0.15970049798488617, 0.16693006455898285, 0.3690066933631897, 0.5193001627922058, 0.6459834575653076, 0.047006867825984955, 0.06868032366037369, 0.043628890067338943, 0.02405296452343464, 0.05333276465535164, 0.08607933670282364, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.5923737287521362, 0.3536633849143982, 0.08390633016824722, 0.2980528473854065, 0.042989592999219894, 0.026934657245874405, 0.1647067815065384, 0.1620720773935318, 0.6647022366523743, 0.13678880035877228, 0.10115252435207367, 0.012052871286869049, 0.2444845736026764, 0.1799331158399582, 0.10357851535081863, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3260110914707184, 0.10825559496879578, 0.040669191628694534, 0.08903322368860245, 0.055108752101659775, 0.014200238510966301, 0.06877616047859192, 0.07561883330345154, 0.7116665244102478, 0.08518233895301819, 0.13964912295341492, 0.01787719503045082, 0.027594367042183876, 0.0709126889705658, 0.09409899264574051, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.26070404052734375, 0.8011303544044495, 0.17980173230171204, 0.0725909024477005, 0.12434736639261246, 0.28980228304862976, 0.3281027674674988, 0.7843722701072693, 0.12677432596683502, 0.054726697504520416, 0.13370326161384583, 0.19018130004405975, 0.1707623451948166, 0.14939220249652863, 0.07447532564401627, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1855485588312149, 0.4779467284679413, 0.0886944904923439, 0.027812138199806213, 0.051930978894233704, 0.20570456981658936, 0.13285183906555176, 0.12479114532470703, 0.03275279700756073, 0.13280591368675232, 0.10831113904714584, 0.13358037173748016, 0.31709861755371094, 0.18639257550239563, 0.0658930093050003, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04738391190767288, 0.17884546518325806, 0.030679181218147278, 0.09374479204416275, 0.015219364315271378, 0.004209337756037712, 0.011544613167643547, 0.014519347809255123, 0.0008998611010611057, 0.03714418038725853, 0.02808041125535965, 0.0015275280456990004, 0.014074422419071198, 0.01773718185722828, 0.02865048497915268, 0.14568212628364563, 0.073321633040905, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4282352328300476, 0.07421883940696716, 0.37614062428474426, 0.6016114950180054, 0.16448479890823364, 0.10949403792619705, 0.43647968769073486, 0.17394804954528809, 0.2346193641424179, 0.5131813287734985, 0.6543169021606445, 0.06318124383687973, 0.059741634875535965, 0.08049911260604858, 0.08155221492052078, 0.07740449905395508, 0.019538799300789833, 0.31676185131073, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04248558357357979, 0.005498564336448908, 0.015051363967359066, 0.021896474063396454, 0.031015703454613686, 0.23631463944911957, 0.5231030583381653, 0.1651564985513687, 0.010708797723054886, 0.0702022984623909, 0.015817642211914062, 0.01968570239841938, 0.2309122085571289, 0.11954572051763535, 0.04909561946988106, 0.11254165321588516, 0.04977253079414368, 0.12113941460847855, 0.18998825550079346, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.019823409616947174, 0.02119731903076172, 0.0447932668030262, 0.04950243979692459, 0.11350910365581512, 0.3172611892223358, 0.1175147220492363, 0.16474604606628418, 0.025614900514483452, 0.11684545129537582, 0.027774598449468613, 0.03366768732666969, 0.1657668650150299, 0.20241110026836395, 0.02058284729719162, 0.09693466126918793, 0.12094055861234665, 0.48810020089149475, 0.07605772465467453, 0.10663138329982758, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.024027986451983452, 0.07085671275854111, 0.014559593982994556, 0.003951122052967548, 0.5812088251113892, 0.07389754801988602, 0.10464153438806534, 0.06822511553764343, 0.1849648803472519, 0.02429678477346897, 0.014226456172764301, 0.2123226672410965, 0.1049809455871582, 0.17609325051307678, 0.13661964237689972, 0.002718105213716626, 0.037000641226768494, 0.1506986916065216, 0.012303436174988747, 0.09212689101696014, 0.5217995047569275, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20496347546577454, 0.09403666108846664, 0.02112487144768238, 0.025338320061564445, 0.008130905218422413, 0.1783977895975113, 0.3754851818084717, 0.0950397253036499, 0.0030220954213291407, 0.08205359429121017, 0.011042395606637001, 0.018588367849588394, 0.1888807862997055, 0.10302136838436127, 0.14473272860050201, 0.17887507379055023, 0.10589989274740219, 0.004075651057064533, 0.0014342612121254206, 0.00521382549777627, 0.031908128410577774, 0.003124895039945841, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.037373751401901245, 0.07382072508335114, 0.08205787092447281, 0.10832883417606354, 0.02859049290418625, 0.1663966327905655, 0.058918725699186325, 0.17053310573101044, 0.011018002405762672, 0.15213745832443237, 0.027154715731739998, 0.0019660431426018476, 0.22162862122058868, 0.11411792784929276, 0.08493959158658981, 0.23519471287727356, 0.3653021454811096, 0.05512593686580658, 0.10675911605358124, 0.0014886436983942986, 0.001230676076374948, 0.003634560154750943, 0.00975269265472889, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.015705576166510582, 0.016172299161553383, 0.006149389781057835, 0.0038101596292108297, 0.007736767642199993, 0.20371977984905243, 0.12438680231571198, 0.06649734079837799, 0.004926482681185007, 0.004153827205300331, 0.0012289183214306831, 0.003863752353936434, 0.0550994910299778, 0.04052891582250595, 0.36571574211120605, 0.19171930849552155, 0.3204987347126007, 0.0060858046635985374, 0.010409774258732796, 0.003722283523529768, 0.0010954621247947216, 0.0028676562942564487, 0.35306307673454285, 0.01622932404279709, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.008730506524443626, 0.002757954876869917, 0.0122150257229805, 0.006305738352239132, 0.004681416787207127, 0.06460410356521606, 0.008150112815201283, 0.010960009880363941, 0.004299533553421497, 0.004670997615903616, 0.0034528695978224277, 0.0024545302148908377, 0.005013267509639263, 0.008545692078769207, 0.23703089356422424, 0.25555557012557983, 0.13076956570148468, 0.003832729533314705, 0.0447237528860569, 0.014599477872252464, 0.0024878191761672497, 0.0016443775966763496, 0.20187559723854065, 0.0005508072790689766, 0.0029457835480570793, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09499987959861755, 0.010673395358026028, 0.007046178914606571, 0.020993953570723534, 0.010670008137822151, 0.07466354966163635, 0.06417079269886017, 0.023990478366613388, 0.17728924751281738, 0.15624059736728668, 0.004560643341392279, 0.010690598748624325, 0.03727814555168152, 0.017693333327770233, 0.14084658026695251, 0.13948844373226166, 0.2463626265525818, 0.09502393007278442, 0.197096586227417, 0.47678983211517334, 0.3142886161804199, 0.09103813022375107, 0.10499368607997894, 0.07698603719472885, 0.026083102449774742, 0.3110981583595276, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.688500165939331, 0.16286028921604156, 0.04583478718996048, 0.22473743557929993, 0.025797681882977486, 0.04771623760461807, 0.5437547564506531, 0.0642164871096611, 0.01443459838628769, 0.2519066631793976, 0.017869845032691956, 0.003991205245256424, 0.04630482196807861, 0.029587149620056152, 0.049375567585229874, 0.1511228382587433, 0.027682308107614517, 0.014322453178465366, 0.0030328254215419292, 0.04723867028951645, 0.30981165170669556, 0.025852922350168228, 0.018514074385166168, 0.01515920553356409, 0.009253463707864285, 0.10175863653421402, 0.16996310651302338, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14772717654705048, 0.11627800017595291, 0.034884992986917496, 0.02596234902739525, 0.031621210277080536, 0.39286479353904724, 0.6627658009529114, 0.20747745037078857, 0.019052494317293167, 0.06071586161851883, 0.014515946619212627, 0.03545556217432022, 0.1622975915670395, 0.05619712546467781, 0.4560142755508423, 0.1847103387117386, 0.05052594095468521, 0.005765186157077551, 0.018545929342508316, 0.00881477165967226, 0.0375242680311203, 0.027162199839949608, 0.09025334566831589, 0.0028228689916431904, 0.0033718899358063936, 0.1103500947356224, 0.0837099552154541, 0.0044236015528440475, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3253695070743561, 0.18678773939609528, 0.23196454346179962, 0.43925735354423523, 0.09974130243062973, 0.1577768325805664, 0.26045241951942444, 0.07323815673589706, 0.005399893503636122, 0.23951157927513123, 0.04431937262415886, 0.013187061063945293, 0.0749824121594429, 0.025474021211266518, 0.2768867611885071, 0.27341794967651367, 0.03427007421851158, 0.008004172705113888, 0.009254892356693745, 0.005621441174298525, 0.00972525030374527, 0.005248658824712038, 0.02184745855629444, 0.0006181569187901914, 0.0005494534852914512, 0.06994801014661789, 0.02213645726442337, 0.004287416115403175, 0.0008399627404287457, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.049311667680740356, 0.10222040861845016, 0.30249276757240295, 0.11109475791454315, 0.4333159327507019, 0.4476950168609619, 0.14919614791870117, 0.45436185598373413, 0.10977044701576233, 0.101465605199337, 0.28612539172172546, 0.15904487669467926, 0.4858849048614502, 0.19411928951740265, 0.08273273706436157, 0.008804291486740112, 0.07617928832769394, 0.47516930103302, 0.07513945549726486, 0.5241973400115967, 0.4384346902370453, 0.06213618069887161, 0.06345370411872864, 0.0682281106710434, 0.15877418220043182, 0.023486817255616188, 0.026526909321546555, 0.0028373831883072853, 0.001617963775061071, 0.37629759311676025, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08865676820278168, 0.0832996591925621, 0.0360012948513031, 0.026901112869381905, 0.0488949753344059, 0.5697077512741089, 0.2118675261735916, 0.21166029572486877, 0.009457184933125973, 0.042189937084913254, 0.010147118009626865, 0.027016732841730118, 0.1966082751750946, 0.18848717212677002, 0.17412608861923218, 0.26533833146095276, 0.10994716733694077, 0.010266831144690514, 0.037150826305150986, 0.009969023987650871, 0.00030588259687647223, 8.988264016807079e-05, 0.07940464466810226, 0.00027601365582086146, 0.0013282618019729853, 0.009904097765684128, 0.03278518095612526, 0.0630892813205719, 0.10911130160093307, 0.016624033451080322, 0.011541539803147316, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09455566853284836, 0.047932155430316925, 0.06032469496130943, 0.027359262108802795, 0.004525639116764069, 0.19231697916984558, 0.29536089301109314, 0.10446369647979736, 0.004957688972353935, 0.22148354351520538, 0.017980555072426796, 0.016062501817941666, 0.01227590162307024, 0.007468203082680702, 0.14047065377235413, 0.2451263964176178, 0.014867580495774746, 0.0005470102187246084, 0.0054298522882163525, 0.0004450916312634945, 0.0006575370789505541, 3.8741818570997566e-05, 0.0010275153908878565, 0.0013172366889193654, 0.0019110681023448706, 0.13600468635559082, 0.29138538241386414, 0.011091821826994419, 0.0002334356977371499, 0.0002162840828532353, 0.0001727231137920171, 0.004782650154083967, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18475790321826935, 0.03305341675877571, 0.022945405915379524, 0.02499788999557495, 0.016275716945528984, 0.44049808382987976, 0.3255404233932495, 0.03656867519021034, 0.008760510943830013, 0.28132569789886475, 0.00872495025396347, 0.02103549800813198, 0.09103824943304062, 0.045535117387771606, 0.1431308537721634, 0.18341027200222015, 0.31211209297180176, 0.08544175326824188, 0.17215219140052795, 0.07786234468221664, 0.033002957701683044, 0.028957894071936607, 0.08467604964971542, 0.018818018957972527, 0.0016417433507740498, 0.15075404942035675, 0.1522863805294037, 0.03350237384438515, 0.006119633559137583, 0.022573737427592278, 0.03810621052980423, 0.13675758242607117, 0.1992093175649643, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5226730704307556, 0.08511564135551453, 0.13128292560577393, 0.22977954149246216, 0.025636736303567886, 0.14430683851242065, 0.697600245475769, 0.08303582668304443, 0.03326253592967987, 0.30183717608451843, 0.04944504052400589, 0.004384536296129227, 0.07144975662231445, 0.05258011445403099, 0.06879302859306335, 0.1540856957435608, 0.05453011393547058, 0.023697303608059883, 0.003979950677603483, 0.014029269106686115, 0.1104540005326271, 0.019629694521427155, 0.011429534293711185, 0.010672842152416706, 0.00807265006005764, 0.1843080371618271, 0.19234825670719147, 0.0017768212128430605, 0.006891301833093166, 0.08265318721532822, 0.014878016896545887, 0.09550431370735168, 0.1691773235797882, 0.20674942433834076, NaN, NaN, NaN, NaN, NaN, NaN], [0.06703877449035645, 0.049393996596336365, 0.041539933532476425, 0.021373772993683815, 0.02868128940463066, 0.32991066575050354, 0.488584041595459, 0.0702073872089386, 0.0075523643754422665, 0.038572411984205246, 0.012813442386686802, 0.04136957228183746, 0.06929102540016174, 0.03757195174694061, 0.23515936732292175, 0.21139073371887207, 0.06409671157598495, 0.007977590896189213, 0.017582383006811142, 0.004139575641602278, 0.008497070521116257, 0.024324562400579453, 0.12332659959793091, 0.0006915424601174891, 0.0006991134723648429, 0.09821731597185135, 0.18821127712726593, 0.009975801222026348, 0.024784373119473457, 0.009686794131994247, 0.0016004297649487853, 0.006526788230985403, 0.04246864095330238, 0.05479469522833824, 0.004482009913772345, NaN, NaN, NaN, NaN, NaN], [0.15618596971035004, 0.12941822409629822, 0.2654253840446472, 0.28590527176856995, 0.31243884563446045, 0.1085575670003891, 0.15852880477905273, 0.026613548398017883, 0.004155577160418034, 0.15324708819389343, 0.037679530680179596, 0.09416285902261734, 0.02134908176958561, 0.010629331693053246, 0.17846201360225677, 0.33224669098854065, 0.07294216006994247, 0.01592269167304039, 0.006994656287133694, 0.003661615075543523, 0.0007586313877254725, 0.0006907262722961605, 0.022764746099710464, 0.000276167003903538, 9.849678463069722e-05, 0.08613532781600952, 0.07070992141962051, 0.03258151933550835, 0.002256957348436117, 0.00035050295991823077, 0.002809839555993676, 0.005992868449538946, 0.14088936150074005, 0.024111032485961914, 0.015468394383788109, 0.000736193498596549, NaN, NaN, NaN, NaN], [0.058257974684238434, 0.12017454952001572, 0.32657214999198914, 0.12284700572490692, 0.5568311810493469, 0.41536086797714233, 0.16300946474075317, 0.49100223183631897, 0.15462136268615723, 0.11520260572433472, 0.260068416595459, 0.28476831316947937, 0.501883327960968, 0.21151991188526154, 0.09330709278583527, 0.00368693470954895, 0.0603332445025444, 0.389295369386673, 0.03955860063433647, 0.26089394092559814, 0.125760018825531, 0.029167605563998222, 0.03710402920842171, 0.03377004712820053, 0.08135493099689484, 0.01946301944553852, 0.033920928835868835, 0.00409010099247098, 0.0020981510169804096, 0.4028157889842987, 0.01821253076195717, 0.03254074230790138, 0.005954912398010492, 0.016414301469922066, 0.0033934058155864477, 0.0012025205651298165, 0.37666910886764526, NaN, NaN, NaN], [0.04007576033473015, 0.04011448100209236, 0.02015572600066662, 0.006723308004438877, 0.01584162376821041, 0.6745935082435608, 0.14270515739917755, 0.05812964215874672, 0.0018657244509086013, 0.018765496090054512, 0.004551106132566929, 0.05217724293470383, 0.21886952221393585, 0.13090433180332184, 0.13149680197238922, 0.30478137731552124, 0.23805196583271027, 0.009743728674948215, 0.02953244559466839, 0.005627358797937632, 0.00013927526015322655, 0.00016958850028458983, 0.09182754158973694, 0.00019882968626916409, 0.0018803260754793882, 0.01743759773671627, 0.09691343456506729, 0.09625609964132309, 0.0949849784374237, 0.057061683386564255, 0.028116967529058456, 0.00013736996334046125, 0.022905906662344933, 0.02515738271176815, 0.029101604595780373, 0.01233749371021986, 0.027021989226341248, 0.012159456498920918, NaN, NaN], [0.051524627953767776, 0.037071868777275085, 0.09267362952232361, 0.03285788744688034, 0.006808253470808268, 0.2584725618362427, 0.21142001450061798, 0.06556515395641327, 0.003410812932997942, 0.18829914927482605, 0.028329605236649513, 0.02864006720483303, 0.014232979156076908, 0.014326054602861404, 0.12804241478443146, 0.2508227825164795, 0.013127491809427738, 0.0004774215049110353, 0.005875048227608204, 0.00014762053615413606, 0.0003128673997707665, 1.7799626220948994e-05, 0.0017815351020544767, 0.0009225650574080646, 0.0009481729357503355, 0.09391504526138306, 0.24316561222076416, 0.008820290677249432, 0.0015348505694419146, 0.0002856143401004374, 0.00038499117363244295, 0.010248353704810143, 0.0923430323600769, 0.1539699137210846, 0.0089821582660079, 0.00013843990745954216, 0.0004539538058452308, 6.709429726470262e-05, 0.0014084051363170147, NaN], [0.13503411412239075, 0.06798373907804489, 0.08072269707918167, 0.04104887321591377, 0.027653640136122704, 0.5933560132980347, 0.15723249316215515, 0.044575583189725876, 0.017590617761015892, 0.04771400988101959, 0.07117579132318497, 0.10345834493637085, 0.10624422132968903, 0.027206260710954666, 0.1271171271800995, 0.06230561435222626, 0.051613274961709976, 0.02077883668243885, 0.04204944148659706, 0.07247611880302429, 0.11675790697336197, 0.004215644672513008, 0.00555834174156189, 0.008976897224783897, 0.017200933769345284, 0.007355507928878069, 0.06492317467927933, 0.04215962812304497, 0.02968345396220684, 0.23223130404949188, 0.03253115341067314, 0.08794146776199341, 0.025323374196887016, 0.08459514379501343, 0.05644838511943817, 0.04970480501651764, 0.3588789105415344, 0.028869707137346268, 0.11940079927444458, 0.27181047201156616]], [[0.10194799304008484, 0.042179130017757416, 0.27587375044822693, 0.8387316465377808, 0.3051532208919525, 0.225641667842865, 0.10655678808689117, 0.4426303505897522, 0.21958006918430328, 0.4376780688762665, 0.7421585917472839, 0.6036965250968933, 0.4420715570449829, 0.6119644045829773, 0.08460802584886551, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.052479684352874756, 0.018692737445235252, 0.13130725920200348, 0.4463008642196655, 0.4007475674152374, 0.4465942680835724, 0.13863760232925415, 0.26287177205085754, 0.5015351176261902, 0.48749616742134094, 0.19089040160179138, 0.2783986032009125, 0.20843097567558289, 0.11412637680768967, 0.11901978403329849, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.09998084604740143, 0.05760321766138077, 0.06884635984897614, 0.1367950737476349, 0.03696327656507492, 0.02052011340856552, 0.23966658115386963, 0.6639524102210999, 0.08913422375917435, 0.1896458864212036, 0.14239966869354248, 0.18587030470371246, 0.2512775659561157, 0.1800404042005539, 0.13985422253608704, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.17776982486248016, 0.2164098620414734, 0.03016561083495617, 0.006355184596031904, 0.04318562150001526, 0.004709928296506405, 0.02340516820549965, 0.07859960943460464, 0.3921053409576416, 0.27134451270103455, 0.2182498425245285, 0.1118401437997818, 0.13378913700580597, 0.4978374242782593, 0.18931511044502258, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.16739480197429657, 0.20097726583480835, 0.038037389516830444, 0.05488090589642525, 0.020769814029335976, 0.044557277113199234, 0.32692524790763855, 0.5529306530952454, 0.06495681405067444, 0.061963245272636414, 0.3602059483528137, 0.040287844836711884, 0.11072657257318497, 0.3166219890117645, 0.19249440729618073, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.07948607206344604, 0.4389178156852722, 0.019072405993938446, 0.11389600485563278, 0.015004596672952175, 0.0008035529754124582, 0.00560334138572216, 0.007579134311527014, 0.12602436542510986, 0.4041804373264313, 0.8435949087142944, 0.7255359292030334, 0.3334953784942627, 0.21919409930706024, 0.13174442946910858, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.11827840656042099, 0.43549492955207825, 0.035650141537189484, 0.3500109016895294, 0.10479609668254852, 0.0029047641437500715, 0.016262628138065338, 0.008920608088374138, 0.1923075020313263, 0.6588289737701416, 0.7271849513053894, 0.8207041025161743, 0.5342087149620056, 0.29674431681632996, 0.16698533296585083, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.19771254062652588, 0.43774574995040894, 0.057631127536296844, 0.15638697147369385, 0.05497771501541138, 0.0015852008946239948, 0.004800108727067709, 0.0038221883587539196, 0.11230877041816711, 0.6780416369438171, 0.6535694003105164, 0.33372464776039124, 0.2617355287075043, 0.4378974735736847, 0.15096917748451233, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2510830760002136, 0.455088347196579, 0.2769528925418854, 0.28598156571388245, 0.08308438956737518, 0.495423823595047, 0.2878262400627136, 0.017540372908115387, 0.036487918347120285, 0.07030303031206131, 0.04537871107459068, 0.017587929964065552, 0.15749330818653107, 0.15622387826442719, 0.134229376912117, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2108728438615799, 0.12734071910381317, 0.6047671437263489, 0.5566261410713196, 0.4727993309497833, 0.6295000314712524, 0.20963285863399506, 0.3828260004520416, 0.01981351152062416, 0.02910005673766136, 0.17932364344596863, 0.029557999223470688, 0.02868420071899891, 0.05513756722211838, 0.1339428722858429, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2013130933046341, 0.35711804032325745, 0.18803814053535461, 0.31239861249923706, 0.6328845024108887, 0.6068195104598999, 0.09879770874977112, 0.295420378446579, 0.033300116658210754, 0.04495004564523697, 0.027333615347743034, 0.034196678549051285, 0.011724627576768398, 0.023517103865742683, 0.3543241322040558, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.27807915210723877, 0.07025524973869324, 0.15421687066555023, 0.23079168796539307, 0.0323871448636055, 0.4182601273059845, 0.43312954902648926, 0.3330070972442627, 0.027521615847945213, 0.03977188467979431, 0.03152378648519516, 0.00340716983191669, 0.005408053286373615, 0.0057552107609808445, 0.23170912265777588, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.15765754878520966, 0.07761365175247192, 0.1382310688495636, 0.33822664618492126, 0.15857987105846405, 0.11602839827537537, 0.3749851584434509, 0.3412497341632843, 0.06253337115049362, 0.09931040555238724, 0.010201470926404, 0.0010190334869548678, 0.0007929145358502865, 0.0016151106683537364, 0.1723894327878952, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.39988550543785095, 0.09145350754261017, 0.3013111352920532, 0.5813722610473633, 0.4042908251285553, 0.2935561537742615, 0.4903331696987152, 0.4357178807258606, 0.04456466808915138, 0.10430204123258591, 0.10590728372335434, 0.007762597873806953, 0.0026525144930928946, 0.0052152471616864204, 0.24974997341632843, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03366217389702797, 0.03653215244412422, 0.027766529470682144, 0.007369572762399912, 0.014929202385246754, 0.04527684673666954, 0.00940654892474413, 0.023517949506640434, 0.010960820131003857, 0.0019369145156815648, 0.01981637440621853, 0.00444602407515049, 0.014915830455720425, 0.007271313574165106, 0.15384840965270996, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04247138649225235, 0.01728098653256893, 0.06617120653390884, 0.009399485774338245, 0.0730140432715416, 0.14221039414405823, 0.11889991164207458, 0.10651882737874985, 0.10687308758497238, 0.0351867638528347, 0.09164245426654816, 0.06160420924425125, 0.04699656739830971, 0.14884592592716217, 0.20088525116443634, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.35919252038002014, 0.017007382586598396, 0.3711448311805725, 0.05260182172060013, 0.23237934708595276, 0.17189942300319672, 0.06846722215414047, 0.25480321049690247, 0.4269619286060333, 0.141769677400589, 0.19745108485221863, 0.3101239502429962, 0.12419883906841278, 0.061588384211063385, 0.3489930033683777, 0.04884753376245499, 0.31528204679489136, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1570073962211609, 0.6818748116493225, 0.08056136965751648, 0.04282544180750847, 0.09609510749578476, 0.21831035614013672, 0.11452964693307877, 0.4344905614852905, 0.09872471541166306, 0.06769980490207672, 0.054214250296354294, 0.015440859831869602, 0.04572026804089546, 0.05267196521162987, 0.06955287605524063, 7.444373295584228e-06, 4.17321571148932e-05, 0.5221405029296875, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1362180858850479, 0.01786869764328003, 0.3548091650009155, 0.13650378584861755, 0.07479218393564224, 0.08773932605981827, 0.007214170414954424, 0.020996512845158577, 0.09793394804000854, 0.26323461532592773, 0.31718939542770386, 0.004400049336254597, 0.01118874829262495, 0.016452480107545853, 0.0059462906792759895, 0.09023705869913101, 0.59262615442276, 0.038057319819927216, 0.1896824985742569, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13787487149238586, 0.02221597172319889, 0.46063661575317383, 0.42787930369377136, 0.16819633543491364, 0.30927538871765137, 0.10940644890069962, 0.14741046726703644, 0.3708270192146301, 0.08424455672502518, 0.34931957721710205, 0.015041538514196873, 0.02219252847135067, 0.0637117251753807, 0.001682900357991457, 0.0001943353418027982, 0.004992108792066574, 0.35714879631996155, 0.028785984963178635, 0.7041940689086914, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09526984393596649, 0.013222168199717999, 0.9035038352012634, 0.8715099692344666, 0.20107677578926086, 0.7829492688179016, 0.28305909037590027, 0.141366645693779, 0.15355023741722107, 0.11376345157623291, 0.804192841053009, 0.012117957696318626, 0.3312073349952698, 0.4514775276184082, 0.016239164397120476, 1.0879062756430358e-05, 5.022298137191683e-05, 0.0836932584643364, 0.0041815838776528835, 0.7177854776382446, 0.4451410174369812, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.34537556767463684, 0.010514522902667522, 0.04824088513851166, 0.12771852314472198, 0.005308120045810938, 0.17857761681079865, 0.2263273000717163, 0.26537755131721497, 0.3297313451766968, 0.3104889690876007, 0.11654951423406601, 0.08535956591367722, 0.02363554947078228, 0.031254567205905914, 0.10634612292051315, 0.003986984025686979, 0.03902542591094971, 0.00027279910864308476, 0.00016326647892128676, 0.09999275952577591, 0.23601794242858887, 0.8888784646987915, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2808375656604767, 0.07436379790306091, 0.11235158890485764, 0.07017786800861359, 0.034851111471652985, 0.01653558947145939, 0.025893066078424454, 0.02911091037094593, 0.23654304444789886, 0.2646749019622803, 0.20617236196994781, 0.25081631541252136, 0.013157923705875874, 0.04621773213148117, 0.2354249358177185, 0.0004483810334932059, 0.01581367664039135, 0.00053547159768641, 0.005416989792138338, 0.0004931549192406237, 1.743426764733158e-06, 0.0002464183489792049, 0.38669928908348083, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5487799644470215, 0.03728892654180527, 0.05227963626384735, 0.18957917392253876, 0.014632479287683964, 0.19499987363815308, 0.29326584935188293, 0.6778355836868286, 0.45779454708099365, 0.33408117294311523, 0.11356081813573837, 0.01941866986453533, 0.010207045823335648, 0.013884961605072021, 0.09069465100765228, 0.0014915558276697993, 0.0036082565784454346, 0.0005674233543686569, 0.0010717788245528936, 0.04321836307644844, 0.5446166396141052, 0.38359156250953674, 0.006869717035442591, 0.0028910271357744932, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09531711786985397, 0.03595840558409691, 0.017401238903403282, 0.061305541545152664, 0.1627957820892334, 0.050434935837984085, 0.05516263470053673, 0.23917846381664276, 0.3637218177318573, 0.09729932248592377, 0.03891580551862717, 0.19205324351787567, 0.041229162365198135, 0.046046942472457886, 0.03756402060389519, 8.035104838199914e-05, 0.005924052093178034, 0.005847892723977566, 0.020417997613549232, 0.11436353623867035, 0.6555760502815247, 0.4247216582298279, 0.04553407058119774, 0.00039129320066422224, 0.013846640475094318, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08811857551336288, 0.010963470675051212, 0.2593647241592407, 0.26678594946861267, 0.42746680974960327, 0.41530901193618774, 0.07491520792245865, 0.18910719454288483, 0.04928334057331085, 0.04599721357226372, 0.4843277335166931, 0.07717985659837723, 0.09353034198284149, 0.07800954580307007, 0.08156391978263855, 0.0012459981953725219, 0.12171746790409088, 0.022806251421570778, 0.021380947902798653, 0.018195364624261856, 0.08835338801145554, 0.20732422173023224, 0.30439698696136475, 0.09951408952474594, 0.2512991428375244, 0.4290468692779541, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04596662148833275, 0.005170373246073723, 0.12165658175945282, 0.15079215168952942, 0.04554709792137146, 0.08856093138456345, 0.04626012593507767, 0.020681705325841904, 0.17637456953525543, 0.26189061999320984, 0.13335715234279633, 0.046832337975502014, 0.018430203199386597, 0.01621258072555065, 0.10917440801858902, 0.007976139895617962, 0.03435874730348587, 0.026849543675780296, 0.002102706115692854, 0.13315419852733612, 0.1177494078874588, 0.08904305100440979, 0.576798677444458, 0.140389084815979, 0.6266443729400635, 0.32779327034950256, 0.5110495090484619, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5138411521911621, 0.0654044821858406, 0.1128465011715889, 0.18054738640785217, 0.038166921585798264, 0.13531430065631866, 0.12295213341712952, 0.28065726161003113, 0.2875981628894806, 0.5909985899925232, 0.601227879524231, 0.03077608533203602, 0.04096299037337303, 0.09236451238393784, 0.1495288461446762, 0.0015641784993931651, 0.09294694662094116, 0.006881145294755697, 0.0020365919917821884, 0.4301930069923401, 0.06383264064788818, 0.0045266724191606045, 0.17422647774219513, 0.00404678238555789, 0.006469257641583681, 0.052995309233665466, 0.1725381463766098, 0.668171763420105, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07072688639163971, 0.012152088806033134, 0.021357353776693344, 0.04663744568824768, 0.020319821313023567, 0.05489102751016617, 0.07223928719758987, 0.23148301243782043, 0.18188072741031647, 0.10590049624443054, 0.10450157523155212, 0.03876996785402298, 0.13536545634269714, 0.10362161695957184, 0.12556865811347961, 0.004304439760744572, 0.05993141233921051, 0.054169829934835434, 0.025809768587350845, 0.7262899279594421, 0.2466905415058136, 0.15344326198101044, 0.33606013655662537, 0.02952432446181774, 0.07010773569345474, 0.008777104318141937, 0.03394261747598648, 0.032566726207733154, 0.6152393221855164, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07390952110290527, 0.023819932714104652, 0.4992673993110657, 0.293674498796463, 0.18016116321086884, 0.3294305205345154, 0.5326097011566162, 0.20817913115024567, 0.231731578707695, 0.17336609959602356, 0.4696378707885742, 0.3560185134410858, 0.5055418610572815, 0.687153697013855, 0.06569264829158783, 1.0540320545260329e-05, 0.0013190202880650759, 0.20101842284202576, 0.004686327185481787, 0.13271625339984894, 0.04526880756020546, 0.0007031870190985501, 0.0011485026916489005, 0.002882149303331971, 0.0005991549696773291, 0.0030197217129170895, 0.004800362046808004, 0.004403174854815006, 0.002436757553368807, 0.4002683460712433, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19887569546699524, 0.009285598993301392, 0.17495201528072357, 0.1799449920654297, 0.0410592183470726, 0.0050115324556827545, 0.025978662073612213, 0.011312133632600307, 0.04069671407341957, 0.23767657577991486, 0.3294059634208679, 0.09899688512086868, 0.03285939246416092, 0.08387716114521027, 0.04885585233569145, 0.0003210107679478824, 0.5876501798629761, 0.16318874061107635, 0.7096263766288757, 0.11595475673675537, 0.007003267295658588, 0.001205803593620658, 0.1902448534965515, 0.011727835983037949, 0.44888344407081604, 0.8117052912712097, 0.45698752999305725, 0.023960944265127182, 0.010929742828011513, 0.005293603055179119, 0.00987145397812128, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.054675761610269547, 0.04458622261881828, 0.0536046139895916, 0.016943499445915222, 0.02146792784333229, 0.1686052531003952, 0.036354243755340576, 0.08614800870418549, 0.1611979901790619, 0.170720174908638, 0.163726344704628, 0.09202460944652557, 0.016866492107510567, 0.019021833315491676, 0.13082824647426605, 0.020372437313199043, 0.3410835862159729, 0.6929088234901428, 0.04383905977010727, 0.1458517462015152, 0.4223538339138031, 0.9439106583595276, 0.9473816156387329, 0.15120889246463776, 0.7730743288993835, 0.5082507133483887, 0.0460858978331089, 0.032336097210645676, 0.011211436241865158, 0.009573124349117279, 0.0003536108124535531, 0.06564418971538544, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.254617303609848, 0.09600356966257095, 0.5283652544021606, 0.35948434472084045, 0.11690203100442886, 0.22449535131454468, 0.07030754536390305, 0.14074397087097168, 0.11056768894195557, 0.2017645388841629, 0.5897989273071289, 0.032950446009635925, 0.0850306898355484, 0.16881772875785828, 0.07667817175388336, 0.020423829555511475, 0.09150233864784241, 0.593336284160614, 0.050333935767412186, 0.04262891411781311, 0.44151586294174194, 0.7098277807235718, 0.36869171261787415, 0.7183430194854736, 0.3146522641181946, 0.5934929251670837, 0.08962199836969376, 0.01141325756907463, 0.0268073882907629, 0.008290876634418964, 0.022364463657140732, 0.0520397312939167, 0.3134966492652893, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06611059606075287, 0.009380446746945381, 0.1600489318370819, 0.18714633584022522, 0.028496628627181053, 0.28509950637817383, 0.06793918460607529, 0.036412376910448074, 0.3864555358886719, 0.38031718134880066, 0.19321800768375397, 0.03279240429401398, 0.024823389947414398, 0.02684853971004486, 0.10572600364685059, 0.008604546077549458, 0.07562410086393356, 0.10463645309209824, 0.003217896446585655, 0.1296835094690323, 0.21162182092666626, 0.30799001455307007, 0.7962209582328796, 0.27782267332077026, 0.5974112749099731, 0.3643631041049957, 0.5975222587585449, 0.032379183918237686, 0.8344925045967102, 0.5903766751289368, 0.1521190106868744, 0.10492946952581406, 0.10503242909908295, 0.5022279620170593, NaN, NaN, NaN, NaN, NaN, NaN], [0.5806823372840881, 0.09046274423599243, 0.1468239277601242, 0.2587219774723053, 0.018666794523596764, 0.17986845970153809, 0.1758078932762146, 0.26734092831611633, 0.30597683787345886, 0.6407824158668518, 0.6427304148674011, 0.011203133501112461, 0.017842967063188553, 0.05609212443232536, 0.1528221219778061, 0.0010157334618270397, 0.08574047684669495, 0.010654903016984463, 0.003869200125336647, 0.15051355957984924, 0.02434478886425495, 0.005829520523548126, 0.10341739654541016, 0.0023463659454137087, 0.00469975033774972, 0.1621563881635666, 0.27765417098999023, 0.6246147155761719, 0.44377410411834717, 0.0757245346903801, 0.08620554953813553, 0.08146335929632187, 0.32109129428863525, 0.1958039551973343, 0.5327519178390503, NaN, NaN, NaN, NaN, NaN], [0.09578646719455719, 0.04883359372615814, 0.014442636631429195, 0.07719788700342178, 0.013871591538190842, 0.24272511899471283, 0.11848346889019012, 0.48695430159568787, 0.10090471804141998, 0.15632015466690063, 0.12246286869049072, 0.056596189737319946, 0.051980338990688324, 0.03806659206748009, 0.1369783878326416, 0.0009064326295629144, 0.04867112636566162, 0.09537991136312485, 0.12993541359901428, 0.38632717728614807, 0.056282784789800644, 0.13602504134178162, 0.18383464217185974, 0.024170320481061935, 0.09972675889730453, 0.022063996642827988, 0.042059145867824554, 0.01842264086008072, 0.8592916131019592, 0.1306053251028061, 0.06485681235790253, 0.048735883086919785, 0.037178389728069305, 0.017466288059949875, 0.006924192421138287, 0.8764364123344421, NaN, NaN, NaN, NaN], [0.12923087179660797, 0.04506811499595642, 0.5631698966026306, 0.4945719838142395, 0.16776354610919952, 0.4656532406806946, 0.6344242095947266, 0.28209388256073, 0.297488808631897, 0.3520771265029907, 0.6463941931724548, 0.3803158104419708, 0.4924411177635193, 0.6891878843307495, 0.08469904214143753, 1.2418378219081205e-06, 0.0003037750138901174, 0.10264009237289429, 0.0010840333998203278, 0.03004724159836769, 0.00720690144225955, 0.00017297905287705362, 0.00021026108879595995, 0.0005732537247240543, 0.00013229742762632668, 0.0014890850288793445, 0.0027206502854824066, 0.0022100789938122034, 0.0018764312844723463, 0.22427155077457428, 0.0012303950497880578, 0.0001426686649210751, 0.0015814924845471978, 0.00487141590565443, 0.0029599322006106377, 0.003610847517848015, 0.41901907324790955, NaN, NaN, NaN], [0.3177553117275238, 0.027823492884635925, 0.11541304737329483, 0.1464630663394928, 0.010460668243467808, 0.028609508648514748, 0.14352867007255554, 0.043905869126319885, 0.18215790390968323, 0.6030426025390625, 0.38763877749443054, 0.1293274313211441, 0.07180552184581757, 0.1464845985174179, 0.10971048474311829, 0.00015546051145065576, 0.5271192193031311, 0.2684091329574585, 0.7487277388572693, 0.0846778005361557, 0.003557654097676277, 0.0064069912768900394, 0.16770148277282715, 0.008421340025961399, 0.27412623167037964, 0.8534677624702454, 0.5243650078773499, 0.02665238454937935, 0.01776440255343914, 0.013793676160275936, 0.00868560466915369, 0.08064579218626022, 0.69512540102005, 0.49261555075645447, 0.010526523925364017, 0.0028473760467022657, 0.008281596936285496, 0.007198471110314131, NaN, NaN], [0.03459807112812996, 0.05000016465783119, 0.02839210256934166, 0.008521324954926968, 0.009519261308014393, 0.12168280780315399, 0.03372196480631828, 0.07665831595659256, 0.21765880286693573, 0.11945746093988419, 0.0821232944726944, 0.058310747146606445, 0.011853469535708427, 0.02031784877181053, 0.13586042821407318, 0.03285643830895424, 0.3327244818210602, 0.7442528605461121, 0.049526505172252655, 0.13722854852676392, 0.37294694781303406, 0.9746374487876892, 0.9050161242485046, 0.144730344414711, 0.44314900040626526, 0.6168692708015442, 0.18840178847312927, 0.12898683547973633, 0.1250022053718567, 0.01759251020848751, 0.0030696040485054255, 0.6704888939857483, 0.3205258250236511, 0.28675025701522827, 0.09770815074443817, 0.0085873082280159, 0.028106005862355232, 0.0015327840810641646, 0.12156207114458084, NaN], [0.02964477799832821, 0.1353258490562439, 0.017653465270996094, 0.011115004308521748, 0.008141545578837395, 0.05911250412464142, 0.01831989735364914, 0.05519499629735947, 0.03573962301015854, 0.02204814739525318, 0.05097896233201027, 0.08341387659311295, 0.08060181885957718, 0.10490117967128754, 0.13247323036193848, 0.027913866564631462, 0.6360336542129517, 0.8947576880455017, 0.5603421926498413, 0.3501611351966858, 0.3494046926498413, 0.7655782103538513, 0.9696423411369324, 0.8922762274742126, 0.42980051040649414, 0.4555767774581909, 0.17016178369522095, 0.1410100758075714, 0.652664303779602, 0.2781027853488922, 0.07839874923229218, 0.11400053650140762, 0.10023999214172363, 0.04957454651594162, 0.07193805277347565, 0.5185664892196655, 0.15356925129890442, 0.02747632935643196, 0.046240244060754776, 0.017650051042437553]], [[0.011476250365376472, 0.7629169225692749, 0.02116730809211731, 0.010803135111927986, 0.005132503807544708, 0.009303245693445206, 0.0005040443502366543, 0.022131631150841713, 0.001470191520638764, 0.0017710012616589665, 0.0004086543631274253, 0.0022351557854562998, 0.000896299781743437, 0.0005698543391190469, 0.019197434186935425, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0024000771809369326, 0.158247172832489, 0.01897430047392845, 0.019486481323838234, 0.0029122373089194298, 0.015832845121622086, 0.0017470666207373142, 0.00117065932136029, 0.01016113068908453, 0.007651789113879204, 0.0020597530528903008, 0.015201352536678314, 0.016943661496043205, 0.009769451804459095, 0.16634535789489746, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00410552928224206, 0.0015743908006697893, 0.01049637421965599, 0.006504607852548361, 0.035339318215847015, 0.9065937995910645, 0.2998698651790619, 0.12215600907802582, 0.013029203750193119, 0.000650988076813519, 0.002043183660134673, 0.006920983083546162, 0.09688588231801987, 0.057574767619371414, 0.009054930880665779, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.007287806831300259, 0.01375514268875122, 0.001530585577711463, 0.007056740578263998, 0.01978658139705658, 0.9208202958106995, 0.2214416116476059, 0.30606138706207275, 0.052588097751140594, 0.004079628270119429, 0.0024339878000319004, 0.0028739250265061855, 0.04695972800254822, 0.045893676578998566, 0.0110039496794343, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.006429406348615885, 0.016907041892409325, 0.0023819799534976482, 0.0003115522558800876, 0.006808500271290541, 0.9102355241775513, 0.15379303693771362, 0.07056371122598648, 0.06324119120836258, 0.0030630400869995356, 0.007665702607482672, 0.002797773340716958, 0.13533660769462585, 0.03197972849011421, 0.006115978583693504, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.014356410130858421, 0.0526699461042881, 0.0007501932559534907, 0.008851941674947739, 0.0005067299935035408, 0.035332534462213516, 0.09051518887281418, 0.049224019050598145, 0.014900125563144684, 0.01856788620352745, 0.0012414768571034074, 0.002389064058661461, 0.0018446464091539383, 0.000877396494615823, 0.22725383937358856, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0025407460052520037, 0.32041609287261963, 0.0036992463283240795, 0.02451898716390133, 0.007920290343463421, 0.015527674928307533, 0.03544912114739418, 0.29718661308288574, 0.02347515895962715, 0.026838794350624084, 0.01756858080625534, 0.010445725172758102, 0.005995406303554773, 0.0005847325082868338, 0.2055930197238922, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.009255345910787582, 0.034783441573381424, 0.010831266641616821, 0.02782595343887806, 0.001477425335906446, 0.006871670484542847, 0.006518858019262552, 0.0072874827310442924, 0.012387615628540516, 0.05288432911038399, 0.04645476117730141, 0.02255677618086338, 0.014156763441860676, 0.00417641457170248, 0.22105874121189117, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0017225841293111444, 0.0049251834861934185, 0.007573804818093777, 0.014873476698994637, 0.00903867557644844, 0.0076865823939442635, 0.0017025101697072387, 0.00023153165238909423, 0.024773191660642624, 0.1742238849401474, 0.6002998948097229, 0.6145275831222534, 0.25023365020751953, 0.35489538311958313, 0.039457567036151886, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0034636815544217825, 0.39023807644844055, 0.0018667654367163777, 0.0006454490358009934, 0.00025732445647008717, 0.026610050350427628, 0.0026998629327863455, 0.014584111049771309, 0.00032847325201146305, 0.0012709795264527202, 0.07417861372232437, 0.43676891922950745, 0.25757044553756714, 0.32731080055236816, 0.12109360098838806, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0014396773185580969, 0.07700426131486893, 0.0003769460890907794, 0.0015669490676373243, 0.0010665652807801962, 0.05166712775826454, 0.003733921330422163, 0.00829349085688591, 9.729996236274019e-05, 0.0004270579374860972, 0.0022819112055003643, 0.3744491934776306, 0.2681969404220581, 0.4920969009399414, 0.028773367404937744, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.19549021124839783, 0.5118218064308167, 0.053603943437337875, 0.004430307075381279, 0.0015711480518803, 0.024018822237849236, 0.0441354438662529, 0.04134393110871315, 0.0014472270850092173, 0.024767767637968063, 0.029112013056874275, 0.08014442026615143, 0.4702226519584656, 0.40423843264579773, 0.14477935433387756, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.034691162407398224, 0.09692039340734482, 0.003936667460948229, 0.0164506658911705, 0.0005446859868243337, 0.0016573348548263311, 0.02795562334358692, 0.12881094217300415, 0.0004645287699531764, 0.0021237744949758053, 0.0010291342623531818, 0.001068241661414504, 0.00471450574696064, 0.019945403560996056, 0.19273433089256287, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04783029109239578, 0.11157537996768951, 0.02325829118490219, 0.12799327075481415, 0.0216610599309206, 0.41526544094085693, 0.129922553896904, 0.14850500226020813, 0.0009580283658578992, 0.008097043260931969, 0.01107556838542223, 0.019478609785437584, 0.2748490571975708, 0.11550750583410263, 0.15876543521881104, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.015012643299996853, 0.00804762914776802, 0.00366173661313951, 0.0018753333715721965, 0.0065993256866931915, 0.00479541253298521, 0.005337378475815058, 0.012457020580768585, 0.0033909485209733248, 0.0032401280477643013, 0.00048777347547002137, 0.012255984358489513, 0.0006230318685993552, 0.001543535152450204, 0.1572250872850418, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.20067201554775238, 0.150595024228096, 0.3375815153121948, 0.5753223896026611, 0.03983612731099129, 0.13901081681251526, 0.37267425656318665, 0.07406412810087204, 0.07071352750062943, 0.22996902465820312, 0.35784539580345154, 0.0401473231613636, 0.03251379355788231, 0.07572956383228302, 0.005637211725115776, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.055522263050079346, 0.0030253075528889894, 0.054468654096126556, 0.18383808434009552, 0.2751407325267792, 0.06163792684674263, 0.5092534422874451, 0.21577699482440948, 0.23691882193088531, 0.32801976799964905, 0.29786956310272217, 0.4967685043811798, 0.6341143250465393, 0.7677603363990784, 0.40264371037483215, 0.02477514185011387, 0.37543168663978577, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0005822544917464256, 0.0004425827646628022, 0.0014265297213569283, 0.0006841197027824819, 0.03406556695699692, 0.0010687633184716105, 0.0028485425282269716, 0.020860498771071434, 0.05133597180247307, 0.002158694202080369, 0.002441320102661848, 0.037159714847803116, 0.005256796721369028, 0.008102376013994217, 0.16207638382911682, 0.02274254709482193, 0.6458237767219543, 0.013541627675294876, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20224374532699585, 0.7376267313957214, 0.004014236852526665, 0.0103965038433671, 0.07275543361902237, 0.03262623772025108, 0.04577071964740753, 0.5017040371894836, 0.12205435335636139, 0.19255708158016205, 0.006990006659179926, 0.028381695970892906, 0.046785227954387665, 0.15206293761730194, 0.330488920211792, 0.03146426007151604, 0.019330549985170364, 0.019686071202158928, 0.5363749265670776, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3634231686592102, 0.404717355966568, 0.00689590023830533, 0.04770800471305847, 0.0251657422631979, 0.0006883289897814393, 0.02071242779493332, 0.019072405993938446, 0.15776626765727997, 0.3694642186164856, 0.036826737225055695, 0.23951902985572815, 0.011015082709491253, 0.04999716952443123, 0.2037181556224823, 0.05261930450797081, 0.12757715582847595, 0.003555318573489785, 0.48483166098594666, 0.00033596818684600294, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.8270207643508911, 0.8942698836326599, 0.020243747159838676, 0.04263966530561447, 0.09284591674804688, 0.054453812539577484, 0.21418678760528564, 0.23612302541732788, 0.5479635000228882, 0.7225908041000366, 0.08608872443437576, 0.5934221148490906, 0.30024465918540955, 0.22648638486862183, 0.12622572481632233, 0.09825422614812851, 0.08890903741121292, 0.0022953739389777184, 0.3788372278213501, 6.525879871333018e-05, 3.547202504705638e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.043734412640333176, 0.7137998342514038, 0.1370490938425064, 0.045488547533750534, 0.06789389997720718, 0.49671053886413574, 0.1280447244644165, 0.4211912155151367, 0.03652801364660263, 0.041476957499980927, 0.08040425181388855, 0.19641457498073578, 0.603863537311554, 0.49263066053390503, 0.07636027038097382, 0.1839720457792282, 0.005392392631620169, 0.0012601928319782019, 0.000860364583786577, 0.0008281354093924165, 0.0005760629428550601, 0.002849774667993188, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.017375759780406952, 0.012506993487477303, 0.020720014348626137, 0.011049210093915462, 0.03743210807442665, 0.0072485157288610935, 0.03524084761738777, 0.005443913396447897, 0.24646395444869995, 0.048276107758283615, 0.03640883043408394, 0.507624089717865, 0.15355341136455536, 0.1730290949344635, 0.2644885182380676, 0.005911883432418108, 0.0029267233330756426, 0.007144090253859758, 0.001919957809150219, 0.004637785721570253, 0.004848909098654985, 0.006189228966832161, 0.3764636814594269, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09840062260627747, 0.7509858012199402, 0.13933908939361572, 0.13482652604579926, 0.18154919147491455, 0.32397931814193726, 0.23646889626979828, 0.11657525599002838, 0.03430478647351265, 0.1277371644973755, 0.15700362622737885, 0.24829043447971344, 0.7591869831085205, 0.7825927138328552, 0.06869770586490631, 0.2256152480840683, 0.0020181250292807817, 0.0012439934071153402, 0.00031968209077604115, 0.0029859780333936214, 0.017534615471959114, 0.0004058087943121791, 0.00034323628642596304, 0.029154805466532707, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.22806629538536072, 0.6706615686416626, 0.2560598850250244, 0.17412559688091278, 0.6327939033508301, 0.04699348285794258, 0.058767881244421005, 0.11556732654571533, 0.09056147933006287, 0.3648419678211212, 0.5388886332511902, 0.261055588722229, 0.6016876697540283, 0.7496042847633362, 0.0894755870103836, 0.03960844501852989, 0.0036635666619986296, 0.00109457119833678, 0.0017422186210751534, 0.022469639778137207, 0.004235065542161465, 0.007348764222115278, 0.00280297570861876, 0.030011437833309174, 0.576508641242981, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5419997572898865, 0.6956567168235779, 0.044124722480773926, 0.12586495280265808, 0.048711128532886505, 0.11729516834020615, 0.4073715806007385, 0.43757542967796326, 0.032695479691028595, 0.4824156165122986, 0.05927032604813576, 0.04766178876161575, 0.25393223762512207, 0.23675066232681274, 0.10572775453329086, 0.0628783106803894, 0.014568633399903774, 0.003403500886633992, 0.005917230620980263, 0.009509358555078506, 0.0019911406561732292, 0.005211993586272001, 0.01603839360177517, 0.00502167409285903, 0.3301290273666382, 0.10268117487430573, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09369882941246033, 0.5731168985366821, 0.13611510396003723, 0.13756731152534485, 0.024227088317275047, 0.31910547614097595, 0.16772453486919403, 0.1680929958820343, 0.09319504350423813, 0.0998181626200676, 0.22465890645980835, 0.00899507012218237, 0.16640731692314148, 0.25350457429885864, 0.09016240388154984, 0.178706556558609, 0.5124386548995972, 0.028256116434931755, 0.011254883371293545, 0.03223628178238869, 0.0004171380714979023, 0.004843876231461763, 0.09010603278875351, 0.0025540743954479694, 0.016201328486204147, 0.029397757723927498, 0.010837158188223839, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02838694490492344, 0.30040091276168823, 0.005878766532987356, 0.015430719591677189, 0.017050068825483322, 0.06605669111013412, 0.12745192646980286, 0.23377051949501038, 0.08052214235067368, 0.033177152276039124, 0.06731567531824112, 0.07575374841690063, 0.18187224864959717, 0.570769727230072, 0.04572387412190437, 0.18362975120544434, 0.10373001545667648, 0.006869313772767782, 0.010921900160610676, 0.01820673979818821, 0.0017379705095663667, 0.002349345711991191, 0.03729201853275299, 5.792165029561147e-05, 0.0013579311780631542, 0.0025659396778792143, 0.008523254655301571, 0.1568114459514618, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2655380666255951, 0.4107033908367157, 0.04865417629480362, 0.08488347381353378, 0.04310445114970207, 0.10849997401237488, 0.15643075108528137, 0.04165918007493019, 0.12898734211921692, 0.11095981299877167, 0.23520684242248535, 0.10632039606571198, 0.055878568440675735, 0.24558725953102112, 0.17682571709156036, 0.060853905975818634, 0.016029829159379005, 0.001439533894881606, 0.017260756343603134, 0.0007974627078510821, 0.0012342276750132442, 0.028226196765899658, 0.0047790613025426865, 0.0015612602001056075, 0.004867547657340765, 0.039023980498313904, 0.05208572745323181, 0.33480554819107056, 0.17332881689071655, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.8565200567245483, 0.8639481067657471, 0.0803997814655304, 0.36449819803237915, 0.17448320984840393, 0.12402030825614929, 0.13765643537044525, 0.2065785825252533, 0.18182852864265442, 0.6806339025497437, 0.1919344812631607, 0.19068314135074615, 0.004361266735941172, 0.01490570418536663, 0.13936595618724823, 0.043774526566267014, 0.2669547498226166, 0.035314492881298065, 0.1941595822572708, 0.006638282909989357, 0.005091785918921232, 0.2628510892391205, 0.2860943675041199, 0.06445851922035217, 0.34950578212738037, 0.6430334448814392, 0.5673049688339233, 0.6101463437080383, 0.29372307658195496, 0.0028161092195659876, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.22751423716545105, 0.21127405762672424, 0.005130667705088854, 0.028237944468855858, 0.06646221876144409, 0.045109983533620834, 0.478432834148407, 0.6443154215812683, 0.140235036611557, 0.0980456992983818, 0.006476161070168018, 0.038696710020303726, 0.25798937678337097, 0.10561345517635345, 0.16755780577659607, 0.018545497208833694, 0.059764593839645386, 0.0026272537652403116, 0.020267995074391365, 0.009687644429504871, 0.00033462722785770893, 0.0024671528954058886, 0.054633729159832, 5.4464391723740846e-05, 0.00043273900519125164, 0.0019224031129851937, 0.21117039024829865, 0.3183750510215759, 0.03866858780384064, 0.011778384447097778, 0.1297062188386917, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3886019289493561, 0.36600789427757263, 0.07069597393274307, 0.12792876362800598, 0.0629734918475151, 0.0820467472076416, 0.2973020672798157, 0.27475541830062866, 0.019707435742020607, 0.2982620298862457, 0.24423947930335999, 0.05686682090163231, 0.23438367247581482, 0.3444555997848511, 0.09858046472072601, 0.0004199208051431924, 4.603992783813737e-05, 8.09443406524224e-07, 2.029701317951549e-05, 3.386533080629306e-06, 2.203315261795069e-06, 4.220597020321293e-06, 8.901660294213798e-06, 0.00016298270202241838, 0.000983458710834384, 0.0005640776362270117, 0.0008154786773957312, 0.001651398022659123, 2.400618996034609e-06, 3.3168395020766184e-05, 6.549440058734035e-06, 0.8699775338172913, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.31350865960121155, 0.5118260383605957, 0.01775331422686577, 0.060602445155382156, 0.015971101820468903, 0.03445184975862503, 0.4316053092479706, 0.4819965064525604, 0.008238772861659527, 0.27349013090133667, 0.02135261707007885, 0.006705985404551029, 0.06119696795940399, 0.05213680863380432, 0.13011163473129272, 0.06053417548537254, 0.012584012933075428, 0.0010002547642216086, 0.0027718576602637768, 0.006610550452023745, 0.0029896856285631657, 0.008355176076292992, 0.048459943383932114, 0.002307809190824628, 0.65205979347229, 0.1651758849620819, 0.011300449259579182, 0.029586348682641983, 0.014456091448664665, 0.0007872084970586002, 0.0008902085828594863, 0.029332326725125313, 0.16636918485164642, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11128952354192734, 0.6662537455558777, 0.10913366079330444, 0.08027850091457367, 0.016604425385594368, 0.1904260814189911, 0.09001538157463074, 0.12034764140844345, 0.032395973801612854, 0.07767382264137268, 0.13288450241088867, 0.0038343279156833887, 0.15461067855358124, 0.13092683255672455, 0.1198263093829155, 0.19553376734256744, 0.2426333725452423, 0.004519153386354446, 0.00883245188742876, 0.006844275165349245, 0.00014635240950156003, 0.00260242260992527, 0.03859727829694748, 0.0011520206462591887, 0.014703472144901752, 0.016579829156398773, 0.003783928230404854, 0.01771795004606247, 0.0035672299563884735, 0.000677697011269629, 0.002100451150909066, 0.023971345275640488, 0.03231354430317879, 0.011524699628353119, NaN, NaN, NaN, NaN, NaN, NaN], [0.045069050043821335, 0.5156355500221252, 0.014353718608617783, 0.026371080428361893, 0.027669712901115417, 0.08119883388280869, 0.2510265111923218, 0.45373910665512085, 0.0644708126783371, 0.03346102684736252, 0.06456929445266724, 0.036929432302713394, 0.1635800451040268, 0.4964689314365387, 0.12627021968364716, 0.17035169899463654, 0.07290639728307724, 0.0013864204520359635, 0.008776376023888588, 0.010795027948915958, 0.0008890280150808394, 0.00375909055583179, 0.03264426812529564, 2.1074760297778994e-05, 0.0009656226029619575, 0.004805654752999544, 0.015095297247171402, 0.19429266452789307, 0.060086220502853394, 0.013300183229148388, 0.019145654514431953, 0.08634541183710098, 0.018065713346004486, 0.012390428222715855, 0.3474832773208618, NaN, NaN, NaN, NaN, NaN], [0.15574656426906586, 0.22756966948509216, 0.016156630590558052, 0.0469389408826828, 0.01719032973051071, 0.01580459624528885, 0.07493647187948227, 0.02412206307053566, 0.018628407269716263, 0.03879624605178833, 0.03891688585281372, 0.03379734605550766, 0.008454171009361744, 0.03055991418659687, 0.1906210333108902, 0.002681915881112218, 0.0020622191950678825, 1.740588413667865e-05, 0.001647116499952972, 2.462047996232286e-05, 1.4256034774007276e-05, 0.0023770714178681374, 0.0007797144935466349, 6.146806117612869e-05, 0.00019536878971848637, 0.023629816249012947, 0.022664623335003853, 0.058040015399456024, 0.02328144572675228, 0.00014305225340649486, 0.1791975051164627, 0.7950490117073059, 0.40287262201309204, 0.05916967615485191, 0.11726692318916321, 0.045271970331668854, NaN, NaN, NaN, NaN], [0.7930518984794617, 0.8248118162155151, 0.03787774592638016, 0.2306395173072815, 0.10945193469524384, 0.048738475888967514, 0.07385316491127014, 0.1171715259552002, 0.09199279546737671, 0.5013920664787292, 0.07074998319149017, 0.14583703875541687, 0.0018764830892905593, 0.00646476075053215, 0.13562877476215363, 0.017539121210575104, 0.07800457626581192, 0.013338283635675907, 0.07843150943517685, 0.003389358287677169, 0.0011982140131294727, 0.07936429977416992, 0.08406823873519897, 0.016710255295038223, 0.13201765716075897, 0.339507520198822, 0.3268124461174011, 0.4709261357784271, 0.24707961082458496, 0.0009133804705925286, 0.27326905727386475, 0.539431095123291, 0.8842423558235168, 0.5773340463638306, 0.643308699131012, 0.15606866776943207, 0.0011033734772354364, NaN, NaN, NaN], [0.139163076877594, 0.17112046480178833, 0.0021531793754547834, 0.0053843106143176556, 0.013183848932385445, 0.014547600410878658, 0.39682450890541077, 0.7216413021087646, 0.013683686964213848, 0.038195278495550156, 0.0014429710572585464, 0.0075409854762256145, 0.06976743042469025, 0.016425929963588715, 0.1257757991552353, 0.0009739195229485631, 0.0011780881322920322, 3.265493069193326e-05, 0.0005334040033631027, 0.0007281061843968928, 3.2774634746601805e-05, 0.0004276044783182442, 0.00342408730648458, 2.9227990125946235e-06, 5.522280844161287e-05, 0.00012372780474834144, 0.011400841176509857, 0.008755120448768139, 0.0017365129897370934, 0.0007705622701905668, 0.0024924452882260084, 0.4634210169315338, 0.010356471873819828, 0.06587640196084976, 0.03498200699687004, 0.005118835251778364, 0.0019369632937014103, 0.023791478946805, NaN, NaN], [0.37428542971611023, 0.3404470980167389, 0.07186836749315262, 0.11062464118003845, 0.09624961018562317, 0.06910651177167892, 0.26704323291778564, 0.35990291833877563, 0.016681469976902008, 0.31615501642227173, 0.23382727801799774, 0.051282789558172226, 0.1643712818622589, 0.24623094499111176, 0.1059461385011673, 0.00023119446996133775, 9.065014637599234e-06, 3.0932378081161005e-07, 7.128239758458221e-06, 2.417179757685517e-06, 1.9917408735636855e-06, 1.0686825362427044e-06, 3.5747166293731425e-06, 3.038432441826444e-05, 0.00024045849568210542, 0.00012102597975172102, 0.0003720777458511293, 0.0005474414792843163, 4.2138731259910855e-06, 8.004362825886346e-06, 4.010584234492853e-06, 0.22906039655208588, 0.00024706448311917484, 0.003541025100275874, 0.0035716970451176167, 1.1338630656609894e-06, 4.888530747848563e-05, 2.00755093828775e-05, 0.8455927968025208, NaN], [0.2896858751773834, 0.2041676938533783, 0.0844137892127037, 0.26597079634666443, 0.007990201003849506, 0.057605594396591187, 0.37075188755989075, 0.33039090037345886, 0.04668770357966423, 0.6492098569869995, 0.34850311279296875, 0.12703292071819305, 0.22453922033309937, 0.2423134297132492, 0.11649563163518906, 0.023575956001877785, 0.001566409133374691, 0.0004935376346111298, 0.015205318108201027, 0.0005761805805377662, 0.00026375881861895323, 0.0017682479228824377, 0.00015503005124628544, 0.011253873817622662, 0.321735680103302, 0.05970581993460655, 0.008942467160522938, 0.051820773631334305, 0.009087985381484032, 0.002068085130304098, 0.00584985688328743, 0.01019755844026804, 0.16441591084003448, 0.021173937246203423, 0.09159599989652634, 0.004452125634998083, 0.0037374526727944613, 0.01578103005886078, 0.01742226630449295, 0.3373567461967468]]], [[[0.016101790592074394, 0.0050575402565300465, 0.008322462439537048, 0.006855499465018511, 0.003766664071008563, 0.0032708626240491867, 0.008669405244290829, 0.016983401030302048, 0.023632090538740158, 0.0007983215618878603, 0.006762287113815546, 0.019076332449913025, 0.0018054646207019687, 0.011848386377096176, 0.23875673115253448, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03118298575282097, 0.022700916975736618, 0.01820814236998558, 0.011041272431612015, 0.013735579326748848, 0.003388292621821165, 0.014374880120158195, 0.0029534229543060064, 0.06276529282331467, 0.0010488847037777305, 0.005698299501091242, 0.018068330362439156, 0.009247002191841602, 0.010645000264048576, 0.2274351567029953, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.10749327391386032, 0.01361121516674757, 0.01930609717965126, 0.025707745924592018, 0.010174103081226349, 0.0019352196250110865, 0.006933925207704306, 0.026056114584207535, 0.003662128932774067, 0.006897854618728161, 0.0015213300939649343, 0.006132383830845356, 0.0028239174280315638, 0.013304864056408405, 0.22739072144031525, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.25010421872138977, 0.005582309328019619, 0.006115755997598171, 0.08664196729660034, 0.005224197171628475, 0.005311913322657347, 0.03281412273645401, 0.024678068235516548, 0.018595430999994278, 0.0819764956831932, 0.005479714833199978, 0.008821909315884113, 0.02042486146092415, 0.03525637462735176, 0.19444485008716583, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1781134456396103, 0.021083489060401917, 0.038613177835941315, 0.16417931020259857, 0.0029645320028066635, 0.00899361353367567, 0.009076704271137714, 0.01357053779065609, 0.01101364754140377, 0.04086701199412346, 0.014270029030740261, 0.011464214883744717, 0.011689195409417152, 0.0706799253821373, 0.3730076551437378, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3090042769908905, 0.031162124127149582, 0.033009856939315796, 0.14512063562870026, 0.00411824369803071, 0.07382509857416153, 0.02702517993748188, 0.07667822390794754, 0.021658627316355705, 0.01615101285278797, 0.0066233747638762, 0.008623828180134296, 0.0008525048615410924, 0.011195158585906029, 0.2578849792480469, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3291372060775757, 0.0561586357653141, 0.4192807674407959, 0.4571635127067566, 0.057550910860300064, 0.04359428584575653, 0.005270917434245348, 0.03804505616426468, 0.03733760863542557, 0.20409555733203888, 0.04554562643170357, 0.024629684165120125, 0.018161950632929802, 0.04353561997413635, 0.145583838224411, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3828665316104889, 0.019200418144464493, 0.34599530696868896, 0.4376910328865051, 0.07537391781806946, 0.036528222262859344, 0.04610925167798996, 0.04538694769144058, 0.1663823127746582, 0.04690397158265114, 0.05553056299686432, 0.021811597049236298, 0.012554574757814407, 0.03599526360630989, 0.1534716635942459, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.08861738443374634, 0.06363938748836517, 0.7135313749313354, 0.146565243601799, 0.3346884250640869, 0.3544132113456726, 0.12204702943563461, 0.028818881139159203, 0.04564356431365013, 0.03288809210062027, 0.06753166019916534, 0.12387087196111679, 0.029650555923581123, 0.014753012917935848, 0.04379607364535332, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03655187785625458, 0.006058508530259132, 0.04018249735236168, 0.08900216966867447, 0.027111714705824852, 0.006408872082829475, 0.03783104568719864, 0.010064247064292431, 0.2550305724143982, 0.008420061320066452, 0.012097015976905823, 0.017737949267029762, 0.0012783813290297985, 0.0026436946354806423, 0.172612726688385, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1163061186671257, 0.04424217715859413, 0.014033653773367405, 0.03590161353349686, 0.06527962535619736, 0.00195779325440526, 0.027195196598768234, 0.1581626534461975, 0.30849722027778625, 0.1652299016714096, 0.04234298691153526, 0.05585171654820442, 0.016547594219446182, 0.04909297078847885, 0.08752257376909256, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1013311892747879, 0.06866802275180817, 0.06425411254167557, 0.4572087228298187, 0.04987834766507149, 0.005650981329381466, 0.053177352994680405, 0.04739876464009285, 0.2551265060901642, 0.06654207408428192, 0.20209699869155884, 0.04737241193652153, 0.042119286954402924, 0.22778292000293732, 0.10508881509304047, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.24632138013839722, 0.045121580362319946, 0.12561434507369995, 0.43826135993003845, 0.07532560080289841, 0.002372375223785639, 0.0398109070956707, 0.026653334498405457, 0.5938559174537659, 0.12655052542686462, 0.04707850515842438, 0.018195422366261482, 0.010826833546161652, 0.023274976760149002, 0.14916135370731354, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.12666325271129608, 0.047387395054101944, 0.04497509077191353, 0.23918962478637695, 0.016611548140645027, 0.009305250830948353, 0.02713325433433056, 0.030590379610657692, 0.4573454260826111, 0.17728003859519958, 0.08635216951370239, 0.05938294902443886, 0.008936652913689613, 0.028742672875523567, 0.15077541768550873, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03701020032167435, 0.037774376571178436, 0.1161394715309143, 0.09335700422525406, 0.015312368050217628, 0.026739761233329773, 0.013009096495807171, 0.005902147851884365, 0.07189750671386719, 0.00625182269141078, 0.056744903326034546, 0.06423129141330719, 0.06661844998598099, 0.02100159414112568, 0.2252311259508133, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.12698857486248016, 0.15100647509098053, 0.08910781890153885, 0.09401589632034302, 0.14288602769374847, 0.07712502032518387, 0.1496707946062088, 0.23784373700618744, 0.024656152352690697, 0.07261883467435837, 0.11269068717956543, 0.10889188945293427, 0.23155105113983154, 0.10633593797683716, 0.14060717821121216, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.33520859479904175, 0.17541100084781647, 0.043081097304821014, 0.07071122527122498, 0.031066332012414932, 0.05302952229976654, 0.13712948560714722, 0.0819549486041069, 0.010218805633485317, 0.05350261554121971, 0.03376028686761856, 0.016291575506329536, 0.04384060204029083, 0.016914406791329384, 0.06937505304813385, 0.1729947179555893, 0.014742943458259106, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2972787618637085, 0.14542943239212036, 0.2801832854747772, 0.6946116089820862, 0.3750338852405548, 0.09368664771318436, 0.11078806221485138, 0.124379463493824, 0.028408339247107506, 0.3442523181438446, 0.15075638890266418, 0.08511755615472794, 0.32891392707824707, 0.12337944656610489, 0.05913665145635605, 0.11518532782793045, 0.28854820132255554, 0.0005498379468917847, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06821048259735107, 0.007578656077384949, 0.033511072397232056, 0.039627932012081146, 0.016393400728702545, 0.20925503969192505, 0.15704192221164703, 0.024064799770712852, 0.005696912761777639, 0.01698312722146511, 0.15042142570018768, 0.0017041407991200686, 0.016995420679450035, 0.005758653394877911, 0.015053601935505867, 0.12768876552581787, 0.007979520596563816, 0.05741023272275925, 0.14377589523792267, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05268644914031029, 0.018480738624930382, 0.006206580437719822, 0.01908770017325878, 0.009213676676154137, 0.012446015141904354, 0.2606332302093506, 0.15275397896766663, 0.004711512941867113, 0.01064901053905487, 0.00940486416220665, 0.00429189158603549, 0.014810611493885517, 0.012880465015769005, 0.15466143190860748, 0.25598737597465515, 0.03471918776631355, 0.08263758569955826, 0.03616967797279358, 0.0012629067059606314, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.017502065747976303, 0.09008979797363281, 0.045234303921461105, 0.04321402683854103, 0.014162504114210606, 0.2841097414493561, 0.10382679849863052, 0.4497845470905304, 0.042821191251277924, 0.03918898105621338, 0.06416238099336624, 0.04602029174566269, 0.2197093665599823, 0.07547488063573837, 0.13285692036151886, 0.29742351174354553, 0.10481993854045868, 0.07552393525838852, 0.008401650935411453, 0.3407011330127716, 0.028353586792945862, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02909473329782486, 0.05293780937790871, 0.025932423770427704, 0.061369478702545166, 0.12287095934152603, 0.12207728624343872, 0.20267462730407715, 0.3647293746471405, 0.036313559859991074, 0.028358493000268936, 0.054471470415592194, 0.007501897402107716, 0.10796680301427841, 0.05851392075419426, 0.12157665193080902, 0.17861823737621307, 0.07256677001714706, 0.1795390099287033, 0.04586997628211975, 0.27750420570373535, 0.0032322825863957405, 0.09472999721765518, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02889016829431057, 0.05256107077002525, 0.05110660940408707, 0.09513585269451141, 0.049980901181697845, 0.07343146204948425, 0.21190620958805084, 0.10279127210378647, 0.1787082403898239, 0.022944355383515358, 0.03947293758392334, 0.008258121088147163, 0.09723227471113205, 0.030062679201364517, 0.14898137748241425, 0.1281835287809372, 0.008169662207365036, 0.10209551453590393, 0.22781534492969513, 0.13339588046073914, 0.022249281406402588, 0.2580547630786896, 0.0071509419940412045, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.027054987847805023, 0.06796294450759888, 0.02347770519554615, 0.04540639370679855, 0.13579830527305603, 0.1935206949710846, 0.09281998127698898, 0.22921815514564514, 0.012567882426083088, 0.02752627059817314, 0.05939676612615585, 0.00633750855922699, 0.24427738785743713, 0.10302533209323883, 0.18246731162071228, 0.19490991532802582, 0.0105251120403409, 0.07082764059305191, 0.07746586948633194, 0.10047772526741028, 0.007984980009496212, 0.045915842056274414, 0.030714787542819977, 0.09154831618070602, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13923436403274536, 0.07431720942258835, 0.06541924923658371, 0.14132679998874664, 0.10506866127252579, 0.06156519800424576, 0.21440355479717255, 0.06509862840175629, 0.02759510651230812, 0.10144857317209244, 0.13265900313854218, 0.048845868557691574, 0.16166719794273376, 0.1116088330745697, 0.15105699002742767, 0.2116595059633255, 0.006228659767657518, 0.09237925708293915, 0.33000993728637695, 0.06037600710988045, 0.06468494236469269, 0.028822004795074463, 0.015993207693099976, 0.023504862561821938, 0.014777855016291142, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14352908730506897, 0.10288456827402115, 0.05261845886707306, 0.1541282832622528, 0.05661991983652115, 0.12065587192773819, 0.10697692632675171, 0.15951323509216309, 0.1055477038025856, 0.14385449886322021, 0.23090383410453796, 0.08539394289255142, 0.09938428550958633, 0.08322764188051224, 0.11896289885044098, 0.11546289920806885, 0.0627092570066452, 0.1015198826789856, 0.17440570890903473, 0.11644574254751205, 0.15138378739356995, 0.17151175439357758, 0.07174428552389145, 0.1994275599718094, 0.20994937419891357, 0.08254047483205795, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24387870728969574, 0.11191204935312271, 0.06428070366382599, 0.3038298189640045, 0.14750736951828003, 0.1200045570731163, 0.46686112880706787, 0.3116493225097656, 0.10273779183626175, 0.10795925557613373, 0.1416371762752533, 0.09460661560297012, 0.27618303894996643, 0.09149192273616791, 0.10828596353530884, 0.13584046065807343, 0.09117304533720016, 0.15590398013591766, 0.10968183726072311, 0.5585501790046692, 0.07535546272993088, 0.2762793302536011, 0.32588398456573486, 0.3246583938598633, 0.41251155734062195, 0.043567951768636703, 0.0185235645622015, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1039203479886055, 0.05052376165986061, 0.051659513264894485, 0.18036356568336487, 0.11265069991350174, 0.047071922570466995, 0.3453211784362793, 0.29340654611587524, 0.007079527713358402, 0.06730296462774277, 0.08055143058300018, 0.02563900128006935, 0.19650228321552277, 0.060815099626779556, 0.13184599578380585, 0.1674133688211441, 0.12648360431194305, 0.27492284774780273, 0.24355122447013855, 0.8769406676292419, 0.6096609234809875, 0.4704851806163788, 0.055198147892951965, 0.6140321493148804, 0.2705269455909729, 0.07450747489929199, 0.04471021145582199, 0.05369797348976135, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1947154402732849, 0.003113611601293087, 0.028957238420844078, 0.026910793036222458, 0.017121652141213417, 0.08169777691364288, 0.32467299699783325, 0.05661681666970253, 0.007502032909542322, 0.02869880571961403, 0.020577264949679375, 0.0070375413633883, 0.16551434993743896, 0.06083058565855026, 0.06852211803197861, 0.035074394196271896, 0.012203776277601719, 0.2713678479194641, 0.27628132700920105, 0.5399907231330872, 0.3242804706096649, 0.5765586495399475, 0.02925838902592659, 0.3159044086933136, 0.11935708671808243, 0.16010764241218567, 0.31936678290367126, 0.22831447422504425, 0.09149928390979767, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.018467016518115997, 0.004791167099028826, 0.015553582459688187, 0.021664531901478767, 0.025298617780208588, 0.1971224695444107, 0.13395515084266663, 0.1881190687417984, 0.05309745669364929, 0.018728721886873245, 0.018886514008045197, 0.023248562589287758, 0.008927382528781891, 0.03253133222460747, 0.130488321185112, 0.1354324370622635, 0.08839684724807739, 0.010535157285630703, 0.3809414505958557, 0.006101538427174091, 0.04204240441322327, 0.6714356541633606, 0.02054513990879059, 0.44751474261283875, 0.5217893123626709, 0.16833685338497162, 0.4138224124908447, 0.5945862531661987, 0.14406909048557281, 0.000551112403627485, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4018593430519104, 0.09619066119194031, 0.047895513474941254, 0.0887020081281662, 0.04670756310224533, 0.17605426907539368, 0.21604543924331665, 0.1403813511133194, 0.0010993692558258772, 0.07762767374515533, 0.0958188846707344, 0.1024225577712059, 0.06565871089696884, 0.04857100546360016, 0.1717240959405899, 0.26645413041114807, 0.038747917860746384, 0.15441381931304932, 0.6166976094245911, 0.04416924715042114, 0.07849516719579697, 0.41569313406944275, 0.018940549343824387, 0.18770581483840942, 0.11268321424722672, 0.0962471142411232, 0.028718965128064156, 0.019747000187635422, 0.011864973232150078, 0.07090434432029724, 0.02976600080728531, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.31909966468811035, 0.26355716586112976, 0.16833621263504028, 0.334572434425354, 0.18670302629470825, 0.11206400394439697, 0.46585598587989807, 0.15377958118915558, 0.014857469126582146, 0.07049962878227234, 0.1590365469455719, 0.09933225810527802, 0.23580892384052277, 0.09940709918737411, 0.11795931309461594, 0.26584282517433167, 0.03641113266348839, 0.24681606888771057, 0.03326011076569557, 0.5612249970436096, 0.11044078320264816, 0.038705065846443176, 0.07638699561357498, 0.20042885839939117, 0.41367095708847046, 0.16446417570114136, 0.05500950291752815, 0.0458536334335804, 0.038293108344078064, 0.05886702984571457, 0.005421455018222332, 0.03447017818689346, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3361136317253113, 0.18450267612934113, 0.10482683777809143, 0.3672127425670624, 0.09347432106733322, 0.06302808225154877, 0.17493662238121033, 0.11965186893939972, 0.06742112338542938, 0.13331438601016998, 0.26999813318252563, 0.03264465183019638, 0.07908355444669724, 0.09376725554466248, 0.11511774361133575, 0.052208781242370605, 0.10399425774812698, 0.2661847770214081, 0.06582632660865784, 0.5218088626861572, 0.41107869148254395, 0.18652401864528656, 0.10915308445692062, 0.2499890774488449, 0.21385571360588074, 0.11996328830718994, 0.2169666439294815, 0.17541900277137756, 0.34852319955825806, 0.29904353618621826, 0.3583068549633026, 0.0660485103726387, 0.0772518739104271, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.271436870098114, 0.16103556752204895, 0.09723401814699173, 0.3494490087032318, 0.1582973301410675, 0.11393263936042786, 0.41371721029281616, 0.2938876152038574, 0.08068472146987915, 0.08301044255495071, 0.11968915909528732, 0.07779402285814285, 0.24559125304222107, 0.07589462399482727, 0.1087639182806015, 0.1452419012784958, 0.08285138756036758, 0.20162978768348694, 0.10332676023244858, 0.7324197292327881, 0.1815183311700821, 0.27558720111846924, 0.41944485902786255, 0.4614993929862976, 0.7035390734672546, 0.14779764413833618, 0.07484183460474014, 0.09274464100599289, 0.1956741362810135, 0.4027537703514099, 0.17018413543701172, 0.15845544636249542, 0.03217604011297226, 0.027846908196806908, NaN, NaN, NaN, NaN, NaN, NaN], [0.1091129332780838, 0.08970999717712402, 0.08557470142841339, 0.23009367287158966, 0.13180004060268402, 0.0638015940785408, 0.31095248460769653, 0.2814267873764038, 0.0075759077444672585, 0.039292845875024796, 0.06780961900949478, 0.013560868799686432, 0.15987654030323029, 0.04180291295051575, 0.12740370631217957, 0.06803880631923676, 0.0777740478515625, 0.3149954080581665, 0.17862020432949066, 0.9274848103523254, 0.6797788739204407, 0.28538215160369873, 0.04841757193207741, 0.524702250957489, 0.33268001675605774, 0.06556227803230286, 0.08207366615533829, 0.08443650603294373, 0.19301387667655945, 0.68314129114151, 0.7843886613845825, 0.24039600789546967, 0.0983721911907196, 0.035574402660131454, 0.04086223617196083, NaN, NaN, NaN, NaN, NaN], [0.4568881392478943, 0.01152532733976841, 0.12744615972042084, 0.16633041203022003, 0.05682089552283287, 0.22013583779335022, 0.46718865633010864, 0.06831676512956619, 0.011846139095723629, 0.051503561437129974, 0.07631707936525345, 0.017341753467917442, 0.16032609343528748, 0.06682911515235901, 0.06364742666482925, 0.004222579766064882, 0.012189013883471489, 0.38177239894866943, 0.23501808941364288, 0.3822557032108307, 0.273560494184494, 0.28252631425857544, 0.039307549595832825, 0.41269388794898987, 0.3037600517272949, 0.1617780327796936, 0.33094146847724915, 0.37525615096092224, 0.1388353556394577, 0.8142803907394409, 0.5916069149971008, 0.18943282961845398, 0.08566068857908249, 0.11778654158115387, 0.1818830519914627, 0.04465563967823982, NaN, NaN, NaN, NaN], [0.0270079392939806, 0.003701634705066681, 0.024473953992128372, 0.035727839916944504, 0.031186459586024284, 0.22590965032577515, 0.1764952838420868, 0.1725662350654602, 0.06108492240309715, 0.017804577946662903, 0.01644762232899666, 0.018474329262971878, 0.0059660994447767735, 0.026993868872523308, 0.12890712916851044, 0.0780838280916214, 0.07355974614620209, 0.01093215774744749, 0.22770193219184875, 0.008550305850803852, 0.06503485888242722, 0.5060688257217407, 0.02145100012421608, 0.43843212723731995, 0.6872871518135071, 0.1969044953584671, 0.45010682940483093, 0.7415768504142761, 0.3103433847427368, 0.001054091495461762, 0.20113487541675568, 0.21400661766529083, 0.41673052310943604, 0.3260871469974518, 0.620118260383606, 0.12724098563194275, 0.0004952864837832749, NaN, NaN, NaN], [0.32686647772789, 0.10561588406562805, 0.10599718242883682, 0.08397059142589569, 0.05158340185880661, 0.22573474049568176, 0.19403943419456482, 0.08219113945960999, 0.0007591660832986236, 0.028280239552259445, 0.06139420345425606, 0.03943438082933426, 0.025857241824269295, 0.027251310646533966, 0.1435350626707077, 0.3314567506313324, 0.06341477483510971, 0.5618032217025757, 0.642646074295044, 0.27415919303894043, 0.23788774013519287, 0.38833677768707275, 0.08984735608100891, 0.42147237062454224, 0.6564009785652161, 0.2928015887737274, 0.1047874391078949, 0.1023104265332222, 0.06365151703357697, 0.39097070693969727, 0.14560170471668243, 0.23420175909996033, 0.08592629432678223, 0.02493405155837536, 0.011453422717750072, 0.006046658381819725, 0.1451905518770218, 0.005812718998640776, NaN, NaN], [0.21139562129974365, 0.21867576241493225, 0.17973701655864716, 0.29884445667266846, 0.19560806453227997, 0.11132223159074783, 0.28179141879081726, 0.10507592558860779, 0.014165982604026794, 0.04481332749128342, 0.1297360062599182, 0.07738039642572403, 0.2323194295167923, 0.09134778380393982, 0.12234959006309509, 0.21756824851036072, 0.03937938064336777, 0.3266570568084717, 0.05877631530165672, 0.5281912088394165, 0.11102446913719177, 0.03890432044863701, 0.10487684607505798, 0.2815292179584503, 0.4750865697860718, 0.3058159351348877, 0.11602579057216644, 0.12021853774785995, 0.06692790240049362, 0.1190272718667984, 0.019106050953269005, 0.21307361125946045, 0.15337608754634857, 0.06824280321598053, 0.040861621499061584, 0.032932352274656296, 0.052440475672483444, 0.005818615201860666, 0.0524408333003521, NaN], [0.2484172284603119, 0.2714419662952423, 0.13623963296413422, 0.33317360281944275, 0.14056812226772308, 0.16453251242637634, 0.23482279479503632, 0.2797185182571411, 0.08398787677288055, 0.13855448365211487, 0.19988903403282166, 0.12159004807472229, 0.21263501048088074, 0.1342880129814148, 0.11613592505455017, 0.21100056171417236, 0.13406150043010712, 0.10563220083713531, 0.15389345586299896, 0.10192565619945526, 0.07836726307868958, 0.22881029546260834, 0.05055452138185501, 0.24765580892562866, 0.48160815238952637, 0.2201593518257141, 0.1761431246995926, 0.21236160397529602, 0.20979638397693634, 0.10962515324354172, 0.09009265154600143, 0.0623038187623024, 0.17415094375610352, 0.13285446166992188, 0.11576873064041138, 0.10801524668931961, 0.0743527039885521, 0.03413216769695282, 0.027520645409822464, 0.06626196205615997]], [[0.0034671342000365257, 0.05013812705874443, 0.16192083060741425, 0.3595426082611084, 0.20735634863376617, 0.08139260113239288, 0.009979248046875, 0.05037669837474823, 0.0023427342530339956, 6.08037480560597e-05, 0.003484810469672084, 0.023961462080478668, 0.38460296392440796, 0.24992075562477112, 0.13989195227622986, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.6699675917625427, 0.09382463991641998, 0.2939082980155945, 0.17940783500671387, 0.06414232403039932, 0.05161670595407486, 0.09315118193626404, 0.0025183490943163633, 0.0024716362822800875, 0.00784118939191103, 0.06077995523810387, 0.010742363519966602, 0.027031319215893745, 0.033606547862291336, 0.020909229293465614, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2646949589252472, 0.029353437945246696, 0.21451972424983978, 0.10881441831588745, 0.06597915291786194, 0.0030848400201648474, 0.011694483458995819, 0.021679535508155823, 0.002872215351089835, 0.013158812187612057, 0.002100167330354452, 6.679360376438126e-05, 0.004520595073699951, 0.019191764295101166, 0.15631338953971863, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.040224652737379074, 0.02035309188067913, 0.3179875612258911, 0.11730892956256866, 0.5032125115394592, 0.4173433780670166, 0.2045394331216812, 0.3468436896800995, 0.0142394183203578, 0.034110911190509796, 0.0166803989559412, 0.0005183254834264517, 0.014372344128787518, 0.013749183155596256, 0.07609989494085312, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0153636634349823, 0.002009550342336297, 0.5970484614372253, 0.5668097734451294, 0.03708057850599289, 0.030387206003069878, 0.003990367520600557, 0.00021067907800897956, 0.0006718098884448409, 0.004241611808538437, 0.01157804112881422, 0.0002699779870454222, 0.0015558624872937799, 0.0029094237834215164, 0.04601351544260979, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03574535250663757, 0.009626551531255245, 0.4402237832546234, 0.2294078767299652, 0.26443710923194885, 0.01504121907055378, 0.016090886667370796, 0.007329131942242384, 0.002309221774339676, 0.0030864060390740633, 0.0026519321836531162, 0.0004272839578334242, 0.0011082548880949616, 0.01614256016910076, 0.03275791555643082, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [6.553631828865036e-05, 0.000357702374458313, 0.08750326931476593, 0.01436514500528574, 0.006815748754888773, 0.6623476147651672, 0.0034670215100049973, 0.0015547194052487612, 0.00029766204534098506, 1.8653441657079384e-05, 0.0003687080170493573, 0.00015007570618763566, 0.0009929342195391655, 0.00030579339363612235, 0.0016504023224115372, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0004548979632090777, 7.145033305278048e-05, 0.025678247213363647, 0.00989772193133831, 0.007979623042047024, 0.6904858946800232, 0.04177143797278404, 0.0005172804230824113, 0.00045151059748604894, 9.678980859462172e-05, 0.0003766386944334954, 0.00020437331113498658, 0.0009936039568856359, 0.0004823105991818011, 0.001104293274693191, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.02770741656422615, 0.15481999516487122, 0.0164713803678751, 0.029219333082437515, 0.01727348566055298, 0.0033895254600793123, 0.08395758271217346, 0.08886045962572098, 0.06561290472745895, 0.23454923927783966, 0.01131775975227356, 0.00014876923523843288, 0.021633606404066086, 0.032435301691293716, 0.2441566288471222, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0002423129917588085, 0.0011915951035916805, 0.0022339578717947006, 0.006169029977172613, 0.0026169228367507458, 0.006970150861889124, 0.0023872333113104105, 0.020186979323625565, 0.5034035444259644, 0.061859097331762314, 0.01802009530365467, 0.08541904389858246, 0.11395227909088135, 0.12879255414009094, 0.06123032420873642, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0016445622313767672, 0.0006882954621687531, 0.0003155411686748266, 0.0014561355346813798, 0.0007120753289200366, 0.00010650769399944693, 0.0005508221802301705, 0.004306118004024029, 0.4519909620285034, 0.2298276424407959, 0.04858560487627983, 0.008956322446465492, 0.005770590156316757, 0.011063157580792904, 0.0306133683770895, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0032223593443632126, 0.0006265831179916859, 0.002176017500460148, 0.010606854222714901, 0.0010762742022052407, 6.259929068619385e-05, 0.0013370343949645758, 0.0014808439882472157, 0.030783534049987793, 0.7491747736930847, 0.34058046340942383, 0.00350938574410975, 0.02303031086921692, 0.0742756798863411, 0.006112673785537481, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.010601752437651157, 0.009935700334608555, 0.0694134384393692, 0.14514312148094177, 0.01701076701283455, 0.0001025431411108002, 0.003628269536420703, 0.007610301487147808, 0.1447119563817978, 0.2691461443901062, 0.7685887217521667, 0.06739932298660278, 0.05600086599588394, 0.567065417766571, 0.01997430995106697, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0020818221382796764, 0.006225256249308586, 0.007747206371277571, 0.02054281160235405, 0.00644321832805872, 0.00019787036580964923, 0.0007576930802315474, 0.0013290452770888805, 0.1748982071876526, 0.20870953798294067, 0.6057864427566528, 0.2165842056274414, 0.10265108197927475, 0.12960675358772278, 0.026959752663969994, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0929064005613327, 0.3412420153617859, 0.13197122514247894, 0.20421825349330902, 0.6308890581130981, 0.08085004985332489, 0.35388287901878357, 0.3416491150856018, 0.024628864601254463, 0.013967287726700306, 0.0762757882475853, 0.26007020473480225, 0.3328040838241577, 0.09019435197114944, 0.014360385946929455, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1659475415945053, 0.1821746528148651, 0.2680368423461914, 0.3257308900356293, 0.2135642170906067, 0.10952500998973846, 0.23729652166366577, 0.15246635675430298, 0.09328519552946091, 0.22413431107997894, 0.22322525084018707, 0.11237151175737381, 0.18681256473064423, 0.1572018712759018, 0.06837792694568634, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14290380477905273, 0.026570750400424004, 0.14845344424247742, 0.26635152101516724, 0.12476544827222824, 0.1522083431482315, 0.287058562040329, 0.16522644460201263, 0.21008911728858948, 0.3761942982673645, 0.12840349972248077, 0.0757022351026535, 0.39944273233413696, 0.379029244184494, 0.1911974847316742, 0.0702696219086647, 0.2507307231426239, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00885845348238945, 0.005625984165817499, 0.0020030708983540535, 0.005766861606389284, 0.001782223698683083, 0.004346099682152271, 0.014438317157328129, 0.010037342086434364, 0.0175970196723938, 0.0067982920445501804, 0.003056151093915105, 0.005088370759040117, 0.0035549686290323734, 0.002117584692314267, 0.17935973405838013, 0.028418319299817085, 0.003963488154113293, 0.4144974946975708, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04871530085802078, 0.2322341799736023, 0.043161727488040924, 0.046935759484767914, 0.04166096821427345, 0.048159919679164886, 0.2838554382324219, 0.5679410696029663, 0.17445935308933258, 0.05776107683777809, 0.14550535380840302, 0.04300517588853836, 0.2332015484571457, 0.28196635842323303, 0.4675023853778839, 0.13786309957504272, 0.03506092354655266, 0.02415982447564602, 0.10726116597652435, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03277377411723137, 0.28776609897613525, 0.0018310850718989968, 0.006392122711986303, 0.0034063432831317186, 0.0006021481240168214, 0.02006486989557743, 0.09552518278360367, 0.02804744802415371, 0.060428690165281296, 0.004742977675050497, 0.018782831728458405, 0.016696294769644737, 0.023774143308401108, 0.16262513399124146, 0.011229841969907284, 0.008138949982821941, 0.04613415151834488, 0.2518063187599182, 0.013397655449807644, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.006045958958566189, 0.0958699956536293, 0.007954242639243603, 0.011606856249272823, 0.004544504452496767, 0.010406642220914364, 0.011899203062057495, 0.07300186902284622, 0.002370428293943405, 0.012239865958690643, 0.020374998450279236, 0.012496876530349255, 0.024265890941023827, 0.0274967048317194, 0.1423870474100113, 0.0016812672838568687, 0.012760624289512634, 0.002261990448459983, 0.2769384980201721, 0.03090759925544262, 0.0014064738061279058, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.008809137158095837, 0.13565093278884888, 0.03191651031374931, 0.0483417883515358, 0.028707973659038544, 0.039296794682741165, 0.018359076231718063, 0.07145766168832779, 0.13921810686588287, 0.01646633818745613, 0.06145479157567024, 0.028490308672189713, 0.056069642305374146, 0.13838331401348114, 0.19134177267551422, 0.11822758615016937, 0.07095540314912796, 0.030966516584157944, 0.03516996279358864, 0.2070395052433014, 0.02684318646788597, 0.2317354679107666, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.39272594451904297, 0.39728477597236633, 0.32111606001853943, 0.41796234250068665, 0.15293559432029724, 0.04586965963244438, 0.16940170526504517, 0.022719532251358032, 0.14239482581615448, 0.5121501088142395, 0.19016578793525696, 0.06530822068452835, 0.29211705923080444, 0.14742477238178253, 0.11553633958101273, 0.23311708867549896, 0.026411496102809906, 0.011159970425069332, 0.03808103874325752, 0.017219573259353638, 0.006694006733596325, 0.001702688867226243, 0.009211051277816296, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.009060109965503216, 0.08736205101013184, 0.03623565658926964, 0.046393588185310364, 0.04293924570083618, 0.049119193106889725, 0.018734706565737724, 0.10957584530115128, 0.04821338504552841, 0.02008068934082985, 0.029284991323947906, 0.015971768647432327, 0.05779576674103737, 0.21830672025680542, 0.21264111995697021, 0.1427604705095291, 0.06787170469760895, 0.04101337492465973, 0.04024908319115639, 0.2669386863708496, 0.04579312726855278, 0.07587221264839172, 0.10059545934200287, 0.18715938925743103, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02833615615963936, 0.24966742098331451, 0.06237170845270157, 0.03993965685367584, 0.10454770177602768, 0.019859671592712402, 0.03772445023059845, 0.19178973138332367, 0.012827831320464611, 0.03533304110169411, 0.024230163544416428, 0.054630037397146225, 0.032379381358623505, 0.08906079828739166, 0.17152637243270874, 0.059837497770786285, 0.10673120617866516, 0.06554628908634186, 0.047321293503046036, 0.26084935665130615, 0.05379262939095497, 0.09055614471435547, 0.09319713711738586, 0.334230899810791, 0.23545128107070923, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.015255320817232132, 0.21888743340969086, 0.1253896951675415, 0.08362822234630585, 0.12500159442424774, 0.02890017069876194, 0.03405824303627014, 0.07477163523435593, 0.0229325033724308, 0.01863025315105915, 0.044950928539037704, 0.0560457706451416, 0.04699615016579628, 0.08650227636098862, 0.1548503190279007, 0.06699422001838684, 0.48348554968833923, 0.10470042377710342, 0.2643885016441345, 0.49639153480529785, 0.11732041090726852, 0.061902400106191635, 0.1530170738697052, 0.11711295694112778, 0.23237623274326324, 0.09402092546224594, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.011826024390757084, 0.10608652234077454, 0.04723645746707916, 0.057715099304914474, 0.03395959734916687, 0.028910892084240913, 0.011586843058466911, 0.050380002707242966, 0.030421555042266846, 0.00583301018923521, 0.015118762850761414, 0.014350258745253086, 0.01606619358062744, 0.025515934452414513, 0.18496018648147583, 0.050390250980854034, 0.2627623975276947, 0.057036180049180984, 0.10587681084871292, 0.22481703758239746, 0.07078704982995987, 0.028480585664510727, 0.47086307406425476, 0.03990349546074867, 0.16108965873718262, 0.02393723465502262, 0.06960758566856384, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.015032858587801456, 0.5077551603317261, 0.07541441917419434, 0.08020945638418198, 0.10545077919960022, 0.2137133628129959, 0.01040775515139103, 0.09528981149196625, 0.09038985520601273, 0.012094871141016483, 0.025733938440680504, 0.06706724315881729, 0.03145073354244232, 0.09538157284259796, 0.34148263931274414, 0.29633763432502747, 0.1570599228143692, 0.07358378916978836, 0.08321648091077805, 0.01657349243760109, 0.02100137248635292, 0.019902318716049194, 0.5162196755409241, 0.03987365961074829, 0.018146652728319168, 0.026169516146183014, 0.00614600395783782, 0.07103840261697769, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.32250380516052246, 0.7984310388565063, 0.3962976634502411, 0.40014326572418213, 0.3554738759994507, 0.47898975014686584, 0.10853014886379242, 0.20243746042251587, 0.127571240067482, 0.2699570655822754, 0.16473528742790222, 0.08001074939966202, 0.03713205084204674, 0.14643853902816772, 0.4229389429092407, 0.1833065152168274, 0.0826280415058136, 0.06509751826524734, 0.017351830378174782, 0.08598462492227554, 0.028223805129528046, 0.03195580840110779, 0.045467328280210495, 0.041934747248888016, 0.016390223056077957, 0.05298775061964989, 0.05077003315091133, 0.2718433141708374, 0.04039132222533226, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.023898553103208542, 0.03448064997792244, 0.007101188413798809, 0.020377272740006447, 0.09085186570882797, 0.008504875935614109, 0.01689869724214077, 0.021393392235040665, 0.03013733960688114, 0.004040753003209829, 0.000672544410917908, 0.0007860396872274578, 0.0003324192948639393, 0.0003073772240895778, 0.13160185515880585, 0.09722712635993958, 0.09857381135225296, 0.2290657013654709, 0.162257120013237, 0.3208743929862976, 0.7083525657653809, 0.08285251259803772, 0.05820265784859657, 0.14296579360961914, 0.06442547589540482, 0.3963678479194641, 0.1963234394788742, 0.13509824872016907, 0.0551372766494751, 0.1773844212293625, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.025859396904706955, 0.29733914136886597, 0.09033425897359848, 0.06196272000670433, 0.10889838635921478, 0.14661002159118652, 0.034964289516210556, 0.07059973478317261, 0.007527152542024851, 0.007617437280714512, 0.006072000600397587, 0.0492180734872818, 0.0069811418652534485, 0.011496509425342083, 0.22706106305122375, 0.1786596029996872, 0.03035295568406582, 0.011360704898834229, 0.0041356864385306835, 0.02253635786473751, 0.032254207879304886, 0.05765725299715996, 0.06512543559074402, 0.26075252890586853, 0.14487245678901672, 0.06064848601818085, 0.02561355009675026, 0.06785233318805695, 0.08367668837308884, 0.11658230423927307, 0.21664968132972717, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.014849718660116196, 0.1462036818265915, 0.11065799742937088, 0.06219353526830673, 0.08005399256944656, 0.016894571483135223, 0.010269397869706154, 0.02562439627945423, 0.009192260913550854, 0.009821194224059582, 0.015785057097673416, 0.019254932180047035, 0.01222837995737791, 0.011684795841574669, 0.16154925525188446, 0.02336198277771473, 0.027563903480768204, 0.02503703534603119, 0.002219978952780366, 0.024155667051672935, 0.005802824627608061, 0.011775066144764423, 0.03527237847447395, 0.0438326895236969, 0.16127318143844604, 0.07829897105693817, 0.04636809974908829, 0.16168944537639618, 0.17395752668380737, 0.5116502642631531, 0.11367138475179672, 0.24585914611816406, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01973692700266838, 0.11480830609798431, 0.07148479670286179, 0.05237298831343651, 0.0777522474527359, 0.019268590956926346, 0.01592963933944702, 0.01235677395015955, 0.06519288569688797, 0.019938096404075623, 0.03185376524925232, 0.0271891038864851, 0.01742159202694893, 0.040164995938539505, 0.1837940812110901, 0.14312313497066498, 0.6151867508888245, 0.2511911392211914, 0.34089455008506775, 0.21357816457748413, 0.06974375993013382, 0.04017443582415581, 0.4436698257923126, 0.0627409890294075, 0.029346130788326263, 0.06214871257543564, 0.07426106929779053, 0.37162381410598755, 0.1908751130104065, 0.2730017304420471, 0.09601876139640808, 0.07787502557039261, 0.1985486000776291, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.006014276295900345, 0.07228019088506699, 0.029915854334831238, 0.031709808856248856, 0.01963544264435768, 0.01660715602338314, 0.00532315531745553, 0.03606380149722099, 0.029185649007558823, 0.0046777487732470036, 0.01710142381489277, 0.013257446698844433, 0.01389795821160078, 0.02201540581882, 0.16183340549468994, 0.05929486081004143, 0.1356429159641266, 0.08288607001304626, 0.1716676652431488, 0.17707081139087677, 0.11502664536237717, 0.023076828569173813, 0.41179341077804565, 0.03153251111507416, 0.08080360293388367, 0.03793509677052498, 0.0956316813826561, 0.40457794070243835, 0.3355584144592285, 0.2116643786430359, 0.2117510586977005, 0.0911363810300827, 0.13469243049621582, 0.08244834095239639, NaN, NaN, NaN, NaN, NaN, NaN], [0.008549164049327374, 0.34144893288612366, 0.03957316279411316, 0.03764811158180237, 0.04039980471134186, 0.07271253317594528, 0.00613941578194499, 0.04612124711275101, 0.0911136344075203, 0.008750539273023605, 0.01715807057917118, 0.03749352693557739, 0.024577608332037926, 0.06848984956741333, 0.2503378689289093, 0.34530380368232727, 0.14280815422534943, 0.08469259738922119, 0.20386184751987457, 0.018106382340192795, 0.025206930935382843, 0.03376462310552597, 0.665645956993103, 0.06945709139108658, 0.030968131497502327, 0.031062953174114227, 0.015101979486644268, 0.10170532017946243, 0.03453005850315094, 0.05652596056461334, 0.028510402888059616, 0.036133769899606705, 0.04489430412650108, 0.010548176243901253, 0.07425779104232788, NaN, NaN, NaN, NaN, NaN], [0.1472499966621399, 0.4703251123428345, 0.2558133602142334, 0.283985435962677, 0.21470209956169128, 0.17662864923477173, 0.07007063925266266, 0.06038873642683029, 0.20766907930374146, 0.26984694600105286, 0.16889145970344543, 0.27114859223365784, 0.03473396599292755, 0.13903996348381042, 0.2962591350078583, 0.21361097693443298, 0.09641434252262115, 0.0472431480884552, 0.030436551198363304, 0.12823571264743805, 0.024378983303904533, 0.03781319037079811, 0.04478050768375397, 0.04302188381552696, 0.031242409721016884, 0.06916327774524689, 0.08240062743425369, 0.2609483301639557, 0.04106062278151512, 0.01303931511938572, 0.014160559512674809, 0.011109860613942146, 0.034855347126722336, 0.10407929867506027, 0.21024775505065918, 0.08525354415178299, NaN, NaN, NaN, NaN], [0.020655758678913116, 0.020222418010234833, 0.006879583932459354, 0.019070995971560478, 0.07609020173549652, 0.006032301113009453, 0.015974652022123337, 0.01717195473611355, 0.05267442390322685, 0.004277344327419996, 0.0005684247589670122, 0.0007490122807212174, 0.0002994663082063198, 0.0002370573638472706, 0.12958088517189026, 0.056013792753219604, 0.04104574769735336, 0.13420559465885162, 0.14404895901679993, 0.30753612518310547, 0.5552563667297363, 0.06356479972600937, 0.02527950517833233, 0.09324341267347336, 0.03306487947702408, 0.2522013187408447, 0.14255186915397644, 0.09901494532823563, 0.06439376622438431, 0.10042564570903778, 0.43083739280700684, 0.20968028903007507, 0.35324180126190186, 0.2700602114200592, 0.23262809216976166, 0.11776822060346603, 0.14138048887252808, NaN, NaN, NaN], [0.009374987334012985, 0.23445867002010345, 0.05258592590689659, 0.020285839214920998, 0.024131227284669876, 0.0535256564617157, 0.01552440132945776, 0.032435644418001175, 0.006646827794611454, 0.005740212742239237, 0.005195626523345709, 0.07125341892242432, 0.0043562185019254684, 0.01014760322868824, 0.17807012796401978, 0.1699744164943695, 0.02438814751803875, 0.00377153092995286, 0.0020952692721039057, 0.017941365018486977, 0.009907160885632038, 0.04197421669960022, 0.08005423098802567, 0.16825814545154572, 0.08759146183729172, 0.037892259657382965, 0.02378804422914982, 0.12696562707424164, 0.21072204411029816, 0.039158232510089874, 0.12900760769844055, 0.018357207998633385, 0.09957201033830643, 0.024237502366304398, 0.12091250717639923, 0.2524404227733612, 0.044468626379966736, 0.19958341121673584, NaN, NaN], [0.018758203834295273, 0.11843696236610413, 0.09101122617721558, 0.0610043928027153, 0.06165887042880058, 0.012400476261973381, 0.011786350980401039, 0.021215293556451797, 0.014211799949407578, 0.011016220785677433, 0.02130991406738758, 0.02418670989573002, 0.015627985820174217, 0.013993974775075912, 0.14536960422992706, 0.016944430768489838, 0.011726072989404202, 0.017351148650050163, 0.0028529188130050898, 0.013441222719848156, 0.005811003036797047, 0.010734970681369305, 0.020825698971748352, 0.04144507274031639, 0.0777476355433464, 0.07330787181854248, 0.0589311420917511, 0.1305314600467682, 0.09686601907014847, 0.49986732006073, 0.09861493855714798, 0.24486178159713745, 0.2709232568740845, 0.08328418433666229, 0.1665872186422348, 0.2741791903972626, 0.5570544600486755, 0.09308093041181564, 0.18428745865821838, NaN], [0.03985379636287689, 0.12957410514354706, 0.13386031985282898, 0.10592924803495407, 0.09455320239067078, 0.03913174197077751, 0.052976641803979874, 0.03812992200255394, 0.11070051789283752, 0.042073190212249756, 0.05433963984251022, 0.058929286897182465, 0.03380222246050835, 0.05054538697004318, 0.1317562311887741, 0.043635401874780655, 0.027883753180503845, 0.11735352873802185, 0.09225393831729889, 0.11462916433811188, 0.1478782296180725, 0.04645288363099098, 0.049018505960702896, 0.08540874719619751, 0.16189652681350708, 0.081883005797863, 0.13365384936332703, 0.17616337537765503, 0.16547891497612, 0.3400772511959076, 0.14388780295848846, 0.2768324613571167, 0.1609276533126831, 0.18515954911708832, 0.2950800061225891, 0.32982173562049866, 0.4366631507873535, 0.3681013882160187, 0.34051525592803955, 0.05319627374410629]], [[0.014275058172643185, 0.006687531713396311, 0.3026585280895233, 0.06917963922023773, 0.2396276444196701, 0.6229325532913208, 0.15904799103736877, 0.13992713391780853, 0.10272591561079025, 0.6685669422149658, 0.22624024748802185, 0.09492585808038712, 0.40837499499320984, 0.2735627591609955, 0.011893448419868946, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.021194536238908768, 0.020265106111764908, 0.1736137419939041, 0.08712188154459, 0.3174395263195038, 0.3545694649219513, 0.3640749752521515, 0.11553992331027985, 0.3069344758987427, 0.7487083673477173, 0.45964598655700684, 0.41950592398643494, 0.6157799363136292, 0.47228363156318665, 0.04039919748902321, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.008898869156837463, 0.002019912237301469, 0.021509699523448944, 0.0182319525629282, 0.07474909722805023, 0.02385670319199562, 0.013716273009777069, 0.008799813687801361, 0.3437807857990265, 0.008914400823414326, 0.012629772536456585, 0.10342472046613693, 0.0370708666741848, 0.023541903123259544, 0.18654775619506836, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01223641075193882, 0.003142833709716797, 0.006001354195177555, 0.003996475599706173, 0.0579916350543499, 0.01896491087973118, 0.01948327198624611, 0.013184066861867905, 0.30560916662216187, 0.015957718715071678, 0.016950437799096107, 0.06207568570971489, 0.044481322169303894, 0.01894378289580345, 0.19150091707706451, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.003971019294112921, 0.0012432326329872012, 0.005908531602472067, 0.0021760377567261457, 0.002044213702902198, 0.01004379615187645, 0.01574278064072132, 0.026324355974793434, 0.4105670154094696, 0.05117517337203026, 0.02775881439447403, 0.023424910381436348, 0.009920927695930004, 0.011210974305868149, 0.16597995162010193, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.007421860471367836, 0.006305157672613859, 0.011464249342679977, 0.020268600434064865, 0.025753991678357124, 0.031131377443671227, 0.03418951481580734, 0.0052986773662269115, 0.5788748264312744, 0.46168622374534607, 0.07252157479524612, 0.06022901460528374, 0.017210712656378746, 0.04054110497236252, 0.15131165087223053, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.001541785546578467, 0.0008907613810151815, 0.004846525378525257, 0.001811343478038907, 0.0069520194083452225, 0.008084121160209179, 0.021458715200424194, 0.02802192233502865, 0.3832707405090332, 0.25552085041999817, 0.014592574909329414, 0.01065820176154375, 0.012523604556918144, 0.010731800459325314, 0.22416816651821136, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.004116748925298452, 0.0016883857315406203, 0.014749680645763874, 0.00869818776845932, 0.01003838051110506, 0.007631313521414995, 0.02068890631198883, 0.027104953303933144, 0.13497500121593475, 0.6378710865974426, 0.10288828611373901, 0.0942029282450676, 0.028772620484232903, 0.05935161933302879, 0.21764545142650604, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.06222981959581375, 0.01881357654929161, 0.00486758491024375, 0.015509632416069508, 0.0009378677350468934, 0.004574655555188656, 0.005093523766845465, 0.0076056248508393764, 0.02507362887263298, 0.02107030339539051, 0.007815904915332794, 0.010442771948873997, 0.011698074638843536, 0.006942160427570343, 0.31572407484054565, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.01727244071662426, 0.009210732765495777, 0.005953751504421234, 0.0013454181607812643, 0.005081892944872379, 0.04435739293694496, 0.006434922106564045, 0.0007962443050928414, 0.0007702711154706776, 0.16453301906585693, 0.5625144839286804, 0.34227296710014343, 0.6355522871017456, 0.6161591410636902, 0.02771596610546112, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.12786830961704254, 0.008172453381121159, 0.0017843057867139578, 0.004017683211714029, 0.007877650670707226, 0.0018398476531729102, 0.01566770300269127, 0.0026914728805422783, 0.0035052604507654905, 0.0037441153544932604, 0.011492998339235783, 0.10472051054239273, 0.01954079605638981, 0.025050928816199303, 0.24727097153663635, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1465907245874405, 0.037033673375844955, 0.013877127319574356, 0.00413108617067337, 0.00966043584048748, 0.02326187677681446, 0.04576379433274269, 0.010370912030339241, 0.05009477958083153, 0.002161832293495536, 0.012562266550958157, 0.08835282921791077, 0.018735390156507492, 0.07781965285539627, 0.21298982203006744, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.018177246674895287, 0.009594686329364777, 0.010616189800202847, 0.003939185757189989, 0.020018288865685463, 0.006944165099412203, 0.014553648419678211, 0.014575640670955181, 0.031773608177900314, 0.0201406329870224, 0.008282337337732315, 0.02822018228471279, 0.008926213718950748, 0.030271533876657486, 0.18345791101455688, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.029857823625206947, 0.018949948251247406, 0.0061294399201869965, 0.002908851485699415, 0.00919707678258419, 0.00952958408743143, 0.01205661240965128, 0.00758303003385663, 0.05086279660463333, 0.007759919855743647, 0.006360263098031282, 0.02717713639140129, 0.006157578434795141, 0.027468249201774597, 0.21562480926513672, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.035946138203144073, 0.021175134927034378, 0.025809520855545998, 0.0228139478713274, 0.02454732172191143, 0.008901212364435196, 0.01817207969725132, 0.024075007066130638, 0.042662542313337326, 0.10151555389165878, 0.03429628908634186, 0.025050567463040352, 0.015684176236391068, 0.028640326112508774, 0.23519039154052734, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.038382355123758316, 0.16509199142456055, 0.03795319423079491, 0.018471574410796165, 0.017937200143933296, 0.20822547376155853, 0.036850690841674805, 0.07025959342718124, 0.026183662936091423, 0.008891633711755276, 0.011525453999638557, 0.06559614092111588, 0.10240377485752106, 0.05705304443836212, 0.19186913967132568, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18736660480499268, 0.12802250683307648, 0.06000450998544693, 0.07085607945919037, 0.02492770366370678, 0.13308653235435486, 0.01379183866083622, 0.01460492704063654, 0.018005041405558586, 0.18972568213939667, 0.18918126821517944, 0.05261359363794327, 0.08419474214315414, 0.039842329919338226, 0.12843605875968933, 0.1755252629518509, 0.00892956368625164, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.003212069161236286, 0.04924406483769417, 0.010131219401955605, 0.0015629208646714687, 0.009065762162208557, 0.04507109895348549, 0.003221129300072789, 0.07382506877183914, 0.0011923180427402258, 0.004047631751745939, 0.006328214425593615, 0.012952281162142754, 0.0641837865114212, 0.02541324496269226, 0.1715373396873474, 0.18403629958629608, 0.12486936897039413, 0.01289399154484272, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.002438034862279892, 0.0007996301865205169, 0.10929557681083679, 0.030698396265506744, 0.007961505092680454, 0.21520712971687317, 0.0018748894799500704, 0.0015670642023906112, 0.00039643081254325807, 0.0017966092564165592, 0.010619523003697395, 0.0026792865246534348, 0.0035868084523826838, 0.001077426946721971, 0.003137440187856555, 0.07995349168777466, 0.1140136644244194, 0.16089488565921783, 0.271826833486557, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04913554713129997, 0.023452362045645714, 0.16805477440357208, 0.2746557891368866, 0.369334876537323, 0.025402046740055084, 0.03595297038555145, 0.27975642681121826, 0.005478397477418184, 0.044800374656915665, 0.028408128768205643, 0.025396348908543587, 0.1202942430973053, 0.22760754823684692, 0.12602998316287994, 0.19368642568588257, 0.20833823084831238, 0.38513559103012085, 0.0724099725484848, 0.026710418984293938, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0008230121457017958, 0.006709535606205463, 0.005090394522994757, 0.005009432788938284, 0.0009200142812915146, 0.002589132636785507, 0.003276216797530651, 0.011904137209057808, 0.0009605096420273185, 0.0016532291192561388, 0.001647727913223207, 0.0010296034161001444, 0.00474548852071166, 0.004530362784862518, 0.14385877549648285, 0.2920932173728943, 0.20408804714679718, 0.47836723923683167, 0.009784400463104248, 0.41401228308677673, 0.0022880665492266417, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.011407818645238876, 0.11073090881109238, 0.11066732555627823, 0.07063236832618713, 0.2326628416776657, 0.057718440890312195, 0.005228970665484667, 0.12933272123336792, 0.010014788247644901, 0.0034599530044943094, 0.015450170263648033, 0.004393222741782665, 0.010258005000650883, 0.00790967233479023, 0.16524673998355865, 0.2459677904844284, 0.013399376533925533, 0.165635347366333, 0.0016970435390248895, 0.00861914549022913, 0.0019094902090728283, 0.006659353617578745, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.024886149913072586, 0.019822845235466957, 0.050577834248542786, 0.042761147022247314, 0.013624369166791439, 0.03171548992395401, 0.03447520360350609, 0.057101696729660034, 0.018126925453543663, 0.012612801045179367, 0.056599393486976624, 0.005686976481229067, 0.022324958816170692, 0.021004129201173782, 0.18438492715358734, 0.1659669429063797, 0.3024148941040039, 0.4638516902923584, 0.19814886152744293, 0.06386706978082657, 0.37022748589515686, 0.096834197640419, 0.004976118449121714, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.012148641981184483, 0.047028496861457825, 0.07792042940855026, 0.1455426812171936, 0.3985011875629425, 0.08270914107561111, 0.0031603944953531027, 0.07123681157827377, 0.020226983353495598, 0.005742877256125212, 0.009367674589157104, 0.007002389058470726, 0.013849785551428795, 0.006732230074703693, 0.14449873566627502, 0.23605915904045105, 0.015010624192655087, 0.29689958691596985, 0.002272083656862378, 0.02557971514761448, 0.04829570651054382, 0.03933914750814438, 0.012097989208996296, 0.005491157062351704, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.029934342950582504, 0.04287242144346237, 0.10493571311235428, 0.10647397488355637, 0.01039193756878376, 0.1410648375749588, 0.06155749782919884, 0.08983614295721054, 0.05490254610776901, 0.038721270859241486, 0.021267540752887726, 0.05536682903766632, 0.019229264929890633, 0.008436290547251701, 0.15105655789375305, 0.2229652851819992, 0.011020033620297909, 0.07613904774188995, 0.00492003234103322, 0.11613531410694122, 0.12462546676397324, 0.03799906745553017, 0.029671484604477882, 0.022334527224302292, 0.003809461137279868, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.009979508817195892, 0.08308109641075134, 0.026161497458815575, 0.023276647552847862, 0.0017319537000730634, 0.056630972772836685, 0.012614267878234386, 0.041058339178562164, 0.026752248406410217, 0.01169703807681799, 0.011314285919070244, 0.007283498533070087, 0.05053415521979332, 0.019243547692894936, 0.16277745366096497, 0.30055463314056396, 0.03860635682940483, 0.08235271275043488, 0.12519411742687225, 0.07496307790279388, 0.24307869374752045, 0.02970520593225956, 0.043270040303468704, 0.01804984174668789, 0.008444367907941341, 0.04573319852352142, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04712976887822151, 0.24274323880672455, 0.053717970848083496, 0.06948067992925644, 0.009206406772136688, 0.0471884086728096, 0.010105792433023453, 0.05801715701818466, 0.01891178824007511, 0.07684698700904846, 0.07729421555995941, 0.042662668973207474, 0.10241091996431351, 0.038032110780477524, 0.15563422441482544, 0.361846923828125, 0.0072926427237689495, 0.07028269022703171, 0.038334887474775314, 0.02117738127708435, 0.035939738154411316, 0.03011121228337288, 0.01985063962638378, 0.03699057549238205, 0.0448327511548996, 0.07655268162488937, 0.03217002749443054, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.009955390356481075, 0.06358544528484344, 0.028598172590136528, 0.04170457646250725, 0.01363537646830082, 0.011423949152231216, 0.003101062262430787, 0.04170127958059311, 0.01145926769822836, 0.01274544931948185, 0.020664334297180176, 0.15329574048519135, 0.20515742897987366, 0.07666952162981033, 0.13521607220172882, 0.18510019779205322, 0.0857149139046669, 0.2959531545639038, 0.10870446264743805, 0.034602705389261246, 0.04019882157444954, 0.02403290942311287, 0.05409723520278931, 0.04566982761025429, 0.19149497151374817, 0.23549742996692657, 0.074503093957901, 0.01255789864808321, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.006747167091816664, 0.006801524665206671, 0.007903891615569592, 0.00237295706756413, 0.0009535709978081286, 0.0006887177005410194, 0.0011137888068333268, 0.0005580680444836617, 0.004365934059023857, 0.0043631866574287415, 0.004836279433220625, 0.0014166004257276654, 0.1882382482290268, 0.04424351081252098, 0.006875277496874332, 0.03710656613111496, 0.054964251816272736, 0.037898506969213486, 0.3724515438079834, 0.058691613376140594, 0.03363177552819252, 0.06933214515447617, 0.05247700959444046, 0.15643684566020966, 0.589249849319458, 0.349843829870224, 0.29659491777420044, 0.2287619560956955, 0.05358140170574188, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0040101236663758755, 0.00047035442548803985, 0.0008357138140127063, 0.009736553765833378, 0.00025759977870620787, 2.9679033104912378e-05, 0.008525178767740726, 0.0036214631982147694, 0.0009930779924616218, 0.0008531230851076543, 0.0029921825043857098, 7.93160234024981e-06, 6.746472354279831e-05, 0.0017078705132007599, 0.13162609934806824, 0.2688547670841217, 0.1434442549943924, 0.18350595235824585, 0.07485228031873703, 0.0647219642996788, 0.04773847386240959, 0.14254990220069885, 0.03905782103538513, 0.2126167118549347, 0.24802155792713165, 0.30339401960372925, 0.17472584545612335, 0.03891041502356529, 0.02338952198624611, 0.026767900213599205, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.021027032285928726, 0.04388788715004921, 0.07337366044521332, 0.13240061700344086, 0.005691900383681059, 0.08179081231355667, 0.010154702700674534, 0.019539857283234596, 0.013572044670581818, 0.03972425311803818, 0.14196330308914185, 0.0491810142993927, 0.029326222836971283, 0.024830663576722145, 0.1775946319103241, 0.1340402513742447, 0.12347351759672165, 0.42842522263526917, 0.0631304681301117, 0.06392616778612137, 0.1770109236240387, 0.11116458475589752, 0.04706185683608055, 0.09571156650781631, 0.3872493505477905, 0.5415271520614624, 0.14801958203315735, 0.013348261825740337, 0.016769861802458763, 0.019784821197390556, 0.012107723392546177, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.020570920780301094, 0.07008225470781326, 0.05771828070282936, 0.10093566030263901, 0.0037175160832703114, 0.10588520765304565, 0.008791210129857063, 0.07720224559307098, 0.037850137799978256, 0.016810759902000427, 0.0763774886727333, 0.06772230565547943, 0.10185997188091278, 0.02133399061858654, 0.1501101702451706, 0.3128407299518585, 0.02314484678208828, 0.20690661668777466, 0.0038596922531723976, 0.10119188576936722, 0.375572144985199, 0.077932208776474, 0.16011959314346313, 0.07805528491735458, 0.020400837063789368, 0.2237216979265213, 0.1006372720003128, 0.022764090448617935, 0.005061473231762648, 0.0205483790487051, 0.0018506759079173207, 0.001139476546086371, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.027059482410550117, 0.22707954049110413, 0.13379518687725067, 0.08346803486347198, 0.011664706282317638, 0.1994924694299698, 0.013729198835790157, 0.07924864441156387, 0.10303384810686111, 0.02253318764269352, 0.06352351605892181, 0.13561668992042542, 0.3492315113544464, 0.13069112598896027, 0.12187084555625916, 0.5802629590034485, 0.17577120661735535, 0.22907592356204987, 0.3224048614501953, 0.21584153175354004, 0.3719359040260315, 0.08852899819612503, 0.18978306651115417, 0.06894023716449738, 0.008546161465346813, 0.34136468172073364, 0.44251179695129395, 0.07915834337472916, 0.27557075023651123, 0.0915302038192749, 0.0036887326277792454, 0.0038842300418764353, 0.015524323098361492, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.038929592818021774, 0.2334582358598709, 0.12089657783508301, 0.17347271740436554, 0.023068996146321297, 0.04853734001517296, 0.008499456569552422, 0.0867975577712059, 0.02351396717131138, 0.04524386301636696, 0.12492679059505463, 0.06575564295053482, 0.10587428510189056, 0.055128976702690125, 0.1414995789527893, 0.5194967985153198, 0.010316978208720684, 0.10247951745986938, 0.03023943491280079, 0.02351299114525318, 0.05376119539141655, 0.03751303628087044, 0.02858700230717659, 0.03933052346110344, 0.026450933888554573, 0.16396890580654144, 0.08825679868459702, 0.01957540772855282, 0.02957809716463089, 0.0652899444103241, 0.003373907646164298, 0.007670924998819828, 0.004321575630456209, 0.024295708164572716, NaN, NaN, NaN, NaN, NaN, NaN], [0.011872883886098862, 0.08469298481941223, 0.054403409361839294, 0.08831894397735596, 0.02684788778424263, 0.021699469536542892, 0.0027920349966734648, 0.05190650746226311, 0.006984782870858908, 0.008844600059092045, 0.02751598134636879, 0.22613400220870972, 0.15431185066699982, 0.06476734578609467, 0.1412026435136795, 0.2508450150489807, 0.1962328553199768, 0.3596697747707367, 0.1504865288734436, 0.029224414378404617, 0.0663013905286789, 0.043777331709861755, 0.06269483268260956, 0.06556038558483124, 0.2250475436449051, 0.35171735286712646, 0.22191122174263, 0.018188640475273132, 0.026326660066843033, 0.017122289165854454, 0.0037187051493674517, 0.024730468168854713, 0.035062648355960846, 0.09351257234811783, 0.011442800983786583, NaN, NaN, NaN, NaN, NaN], [0.015115483663976192, 0.08628259599208832, 0.023322032764554024, 0.012461238540709019, 0.0028755213133990765, 0.010226217098534107, 0.0010302395094186068, 0.002081838669255376, 0.003762529231607914, 0.013111302629113197, 0.0290949996560812, 0.013309521600604057, 0.22778895497322083, 0.05992528051137924, 0.00796937569975853, 0.007168593350797892, 0.033368390053510666, 0.00873665139079094, 0.16062632203102112, 0.028196215629577637, 0.02527499757707119, 0.06866460293531418, 0.0198657363653183, 0.1544157713651657, 0.2752910256385803, 0.14698350429534912, 0.1242247000336647, 0.13061578571796417, 0.010920656844973564, 0.0055906628258526325, 0.006986986380070448, 0.030699225142598152, 0.36674854159355164, 0.2189747393131256, 0.2510429620742798, 0.04264682158827782, NaN, NaN, NaN, NaN], [0.0057023135013878345, 0.0003758604871109128, 0.0009645622340030968, 0.01432577334344387, 0.00027227052487432957, 3.7724938010796905e-05, 0.007459490094333887, 0.0037525389343500137, 0.001061747083440423, 0.0008801367366686463, 0.0023195864632725716, 8.150678695528768e-06, 4.0667833673069254e-05, 0.001007204526104033, 0.12961283326148987, 0.317547470331192, 0.16016888618469238, 0.1976199448108673, 0.10644932836294174, 0.09830258786678314, 0.07801979035139084, 0.301817923784256, 0.05034731701016426, 0.32512444257736206, 0.2241876721382141, 0.4657731354236603, 0.2891538441181183, 0.08093820512294769, 0.06031876429915428, 0.06730521470308304, 0.14267991483211517, 0.289673775434494, 0.1076083853840828, 0.2949788272380829, 0.0365237332880497, 0.015645001083612442, 0.03993191570043564, NaN, NaN, NaN], [0.017900969833135605, 0.026770949363708496, 0.15903817117214203, 0.31877970695495605, 0.014844128862023354, 0.10845804959535599, 0.00868347566574812, 0.015460771508514881, 0.008762474171817303, 0.01190071552991867, 0.07999671250581741, 0.053750935941934586, 0.013735906220972538, 0.020958656445145607, 0.15606556832790375, 0.17233391106128693, 0.22507980465888977, 0.300968736410141, 0.03457535058259964, 0.06539295613765717, 0.2556630074977875, 0.12555503845214844, 0.08745130896568298, 0.10011813044548035, 0.13041436672210693, 0.501103937625885, 0.14929187297821045, 0.03132137656211853, 0.02265048772096634, 0.03383776918053627, 0.006481703836470842, 0.011523596942424774, 0.35894638299942017, 0.1662973165512085, 0.034177642315626144, 0.02702290564775467, 0.036704160273075104, 0.014952532015740871, NaN, NaN], [0.022256335243582726, 0.07135839015245438, 0.07359576225280762, 0.12423767894506454, 0.006224590353667736, 0.13500085473060608, 0.008429165929555893, 0.08156562596559525, 0.02983916364610195, 0.013062523677945137, 0.10225346684455872, 0.04065772891044617, 0.06899033486843109, 0.012502058409154415, 0.13831046223640442, 0.4115316569805145, 0.042032964527606964, 0.21366682648658752, 0.010602481663227081, 0.11737099289894104, 0.5779745578765869, 0.13523340225219727, 0.2636784315109253, 0.170937180519104, 0.020469455048441887, 0.3112620711326599, 0.17165400087833405, 0.044973500072956085, 0.006653682328760624, 0.053596071898937225, 0.008654352277517319, 0.002382548525929451, 0.02675137296319008, 0.09427332878112793, 0.01890433207154274, 0.002222384326159954, 0.018390605226159096, 0.0013299400452524424, 0.0009657714981585741, NaN], [0.016071150079369545, 0.06728275120258331, 0.025518205016851425, 0.023689931258559227, 0.0069392030127346516, 0.04150809720158577, 0.00898416806012392, 0.016712933778762817, 0.005143268499523401, 0.020111138001084328, 0.03020956739783287, 0.01359627302736044, 0.018198341131210327, 0.01637156493961811, 0.1379418522119522, 0.38502925634384155, 0.1563987135887146, 0.13578397035598755, 0.1404726654291153, 0.14828255772590637, 0.28480827808380127, 0.15350891649723053, 0.09994281083345413, 0.06321649998426437, 0.030282480642199516, 0.13266463577747345, 0.1722954362630844, 0.07113035768270493, 0.024887708947062492, 0.016665330156683922, 0.03949398547410965, 0.020136239007115364, 0.01368448045104742, 0.09379612654447556, 0.030771953985095024, 0.011002926155924797, 0.007083212956786156, 0.009242233820259571, 0.007993990555405617, 0.018528543412685394]], [[0.29903000593185425, 0.5539957880973816, 0.06723504513502121, 0.06922264397144318, 0.12363186478614807, 0.04431891441345215, 0.10694187879562378, 0.08094406872987747, 0.15170463919639587, 0.05897890776395798, 0.026665056124329567, 0.04277891665697098, 0.011532573029398918, 0.016366619616746902, 0.08233406394720078, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.030788322910666466, 0.06814564764499664, 0.1441766321659088, 0.42568475008010864, 0.23481200635433197, 0.09723259508609772, 0.20801249146461487, 0.2833361029624939, 0.12989479303359985, 0.09075285494327545, 0.02217184565961361, 0.10632100701332092, 0.07123817503452301, 0.18399499356746674, 0.11842577904462814, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.21215111017227173, 0.2570435404777527, 0.03298918902873993, 0.11753708124160767, 0.2531988024711609, 0.2834656238555908, 0.13087181746959686, 0.14389817416667938, 0.06408312171697617, 0.023736948147416115, 0.043677639216184616, 0.007582403719425201, 0.08098249137401581, 0.042930904775857925, 0.09848955273628235, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.24232596158981323, 0.4370230436325073, 0.27921250462532043, 0.32216426730155945, 0.14763100445270538, 0.1446210741996765, 0.041608523577451706, 0.05782362446188927, 0.03667302429676056, 0.015881532803177834, 0.09886573255062103, 0.0007486737449653447, 0.022804880514740944, 0.01436265092343092, 0.04328664019703865, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0417991504073143, 0.06808368116617203, 0.22980956733226776, 0.06044253334403038, 0.09120408445596695, 0.3664403557777405, 0.01738058589398861, 0.026107804849743843, 0.16878005862236023, 0.007388730999082327, 0.6907519698143005, 0.00283504044637084, 0.004864559043198824, 0.017621232196688652, 0.04920867085456848, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.07025078684091568, 0.08007846027612686, 0.18737106025218964, 0.08649075031280518, 0.14398247003555298, 0.03926409035921097, 0.10999412834644318, 0.10028164088726044, 0.2733333110809326, 0.07497494667768478, 0.6277027726173401, 0.03760387748479843, 0.07242996245622635, 0.04469411447644234, 0.0635850802063942, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.18292218446731567, 0.29889917373657227, 0.16216641664505005, 0.041324593126773834, 0.08738134056329727, 0.03374062106013298, 0.10780933499336243, 0.1685270518064499, 0.3661736249923706, 0.13795819878578186, 0.7607439160346985, 0.022037923336029053, 0.11896573007106781, 0.017960727214813232, 0.09792909026145935, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.29104405641555786, 0.7119240164756775, 0.16990531980991364, 0.02345188707113266, 0.15646961331367493, 0.008449066430330276, 0.06418811529874802, 0.018176060169935226, 0.3091927766799927, 0.08911041170358658, 0.3005200922489166, 0.04236089810729027, 0.2996547222137451, 0.08733220398426056, 0.07523740082979202, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.046947941184043884, 0.14375551044940948, 0.004344047512859106, 0.0067795743234455585, 0.02948000282049179, 0.08397668600082397, 0.06400846689939499, 0.18865461647510529, 0.023663662374019623, 0.08527978509664536, 0.02815503440797329, 0.04117048531770706, 0.5833349823951721, 0.0677085593342781, 0.23153413832187653, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.08349642902612686, 0.4532567262649536, 0.004409583285450935, 0.009004302322864532, 0.007938031107187271, 0.13749390840530396, 0.1858609914779663, 0.31525370478630066, 0.018453413620591164, 0.12712040543556213, 0.04680929332971573, 0.12408707290887833, 0.13737666606903076, 0.12311573326587677, 0.142713725566864, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.05042501911520958, 0.07026762515306473, 0.0020696106366813183, 0.010109566152095795, 0.07710029184818268, 0.05610239878296852, 0.05948542803525925, 0.19247274100780487, 0.001940111513249576, 0.05155838653445244, 0.04620450362563133, 0.20989066362380981, 0.485702246427536, 0.4166657328605652, 0.18102103471755981, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.09080760926008224, 0.09187275916337967, 0.012195594608783722, 0.021634280681610107, 0.019499676302075386, 0.09054076671600342, 0.11008334904909134, 0.23214302957057953, 0.0423310361802578, 0.034868963062763214, 0.06751228123903275, 0.049237679690122604, 0.03915484994649887, 0.08995199203491211, 0.1941523253917694, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0706457570195198, 0.10473088920116425, 0.039385173469781876, 0.02697153575718403, 0.04372800514101982, 0.06655491143465042, 0.23491710424423218, 0.19935868680477142, 0.036273516714572906, 0.06345809996128082, 0.020782677456736565, 0.12393849343061447, 0.05726756155490875, 0.041495081037282944, 0.15982753038406372, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.039186086505651474, 0.11076691001653671, 0.03891725465655327, 0.009549588896334171, 0.01825849525630474, 0.051163915544748306, 0.1146436408162117, 0.1649821698665619, 0.03586947172880173, 0.06679365783929825, 0.09092967957258224, 0.14827685058116913, 0.10948126018047333, 0.10746686905622482, 0.1515202671289444, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.14541134238243103, 0.05313154682517052, 0.01991144008934498, 0.08764121681451797, 0.014597749337553978, 0.03937898576259613, 0.04872390255331993, 0.04689335823059082, 0.04558950290083885, 0.051970891654491425, 0.02520112879574299, 0.022838978096842766, 0.00921469647437334, 0.00801294855773449, 0.21471147239208221, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.029921628534793854, 0.09876842796802521, 0.1324968934059143, 0.09236511588096619, 0.02831152267754078, 0.08077768236398697, 0.03118293546140194, 0.1750149130821228, 0.015778981149196625, 0.07032441347837448, 0.22269371151924133, 0.07579661160707474, 0.029184984043240547, 0.053061336278915405, 0.18562854826450348, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07805982232093811, 0.05365234240889549, 0.2842547595500946, 0.2606758773326874, 0.21293140947818756, 0.02651267871260643, 0.08033362030982971, 0.07913534343242645, 0.17101624608039856, 0.12522375583648682, 0.14315897226333618, 0.16815446317195892, 0.0695369690656662, 0.13316825032234192, 0.19111928343772888, 0.17860974371433258, 0.0018437139224261045, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11272483319044113, 0.11636882275342941, 0.45685258507728577, 0.0910579040646553, 0.3091263473033905, 0.12632955610752106, 0.1822080761194229, 0.18498732149600983, 0.6353387832641602, 0.08394157886505127, 0.3285849094390869, 0.4818887710571289, 0.08592816442251205, 0.3495768904685974, 0.07449600845575333, 0.20284786820411682, 0.0034877806901931763, 0.08334594964981079, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2834128737449646, 0.1102365031838417, 0.1840669959783554, 0.5708534121513367, 0.3157653212547302, 0.041008107364177704, 0.038309745490550995, 0.03211268410086632, 0.6102551817893982, 0.20786605775356293, 0.21116787195205688, 0.10018377006053925, 0.04653669148683548, 0.17929011583328247, 0.11314841359853745, 0.1494244486093521, 0.3379342555999756, 0.0649241954088211, 0.006597604602575302, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5993789434432983, 0.0908532664179802, 0.49218761920928955, 0.41100576519966125, 0.18825526535511017, 0.4342217445373535, 0.12116678059101105, 0.10673660039901733, 0.822167158126831, 0.4385586380958557, 0.6995345950126648, 0.18085956573486328, 0.1357179582118988, 0.2864921987056732, 0.034255724400281906, 0.2969810962677002, 0.005403619725257158, 0.054099179804325104, 0.0006044544279575348, 0.009600944817066193, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.858432412147522, 0.34460219740867615, 0.7778953909873962, 0.7743141651153564, 0.4405529797077179, 0.4761039614677429, 0.6155950427055359, 0.06873662024736404, 0.7323919534683228, 0.7086790204048157, 0.6720118522644043, 0.45794978737831116, 0.1628962755203247, 0.4249861538410187, 0.040913816541433334, 0.32280662655830383, 0.01735025830566883, 0.15535852313041687, 0.00028658873634412885, 0.016427762806415558, 0.001579301548190415, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04546767473220825, 0.0383436344563961, 0.10268200188875198, 0.20100316405296326, 0.185649111866951, 0.08432896435260773, 0.060354892164468765, 0.07717668265104294, 0.3201402723789215, 0.04503992572426796, 0.088813915848732, 0.3990366756916046, 0.1564548909664154, 0.08066049963235855, 0.11440145969390869, 0.016787199303507805, 0.10643576830625534, 0.24800433218479156, 0.4802894592285156, 0.03762362524867058, 0.06816797703504562, 0.10676699876785278, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21178147196769714, 0.043018583208322525, 0.1065564677119255, 0.10858221352100372, 0.05675008147954941, 0.06700197607278824, 0.12675313651561737, 0.058651700615882874, 0.18508696556091309, 0.05493801832199097, 0.037313126027584076, 0.19010567665100098, 0.07823225855827332, 0.034572359174489975, 0.16783590614795685, 0.22070105373859406, 0.03063296526670456, 0.12860903143882751, 0.04803713783621788, 0.06528759002685547, 0.3172104060649872, 0.012414618395268917, 0.008628717623651028, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.053469568490982056, 0.03894811123609543, 0.06651152670383453, 0.10646583139896393, 0.08985435962677002, 0.07578439265489578, 0.03395741805434227, 0.09802807122468948, 0.190333291888237, 0.07748086005449295, 0.07400990277528763, 0.6643930077552795, 0.07830479741096497, 0.07947986572980881, 0.11464671790599823, 0.0170818492770195, 0.2921580374240875, 0.24774892628192902, 0.2979756295681, 0.16657015681266785, 0.03825104981660843, 0.39123743772506714, 0.0541624091565609, 0.01715947687625885, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1680978536605835, 0.06724530458450317, 0.16071708500385284, 0.2987021803855896, 0.11997595429420471, 0.007637033239006996, 0.05953739956021309, 0.06456195563077927, 0.07405640929937363, 0.11493658274412155, 0.07269633561372757, 0.12183233350515366, 0.019239120185375214, 0.0931614562869072, 0.15387272834777832, 0.06952934712171555, 0.09443160146474838, 0.3155873417854309, 0.2511345446109772, 0.20146684348583221, 0.17959536612033844, 0.500001072883606, 0.3407229483127594, 0.15127938985824585, 0.026401039212942123, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09433168172836304, 0.05311369523406029, 0.44581180810928345, 0.2857709527015686, 0.11141614615917206, 0.04973546415567398, 0.10592624545097351, 0.0732862576842308, 0.26435965299606323, 0.07302475720643997, 0.17637307941913605, 0.06760746240615845, 0.052111051976680756, 0.29667070508003235, 0.11431443691253662, 0.12491581588983536, 0.08139167726039886, 0.045777399092912674, 0.07585746794939041, 0.05243801325559616, 0.09790124744176865, 0.17415514588356018, 0.44996151328086853, 0.13761505484580994, 0.06580806523561478, 0.1016187071800232, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07687122374773026, 0.10929025709629059, 0.4687592387199402, 0.20397132635116577, 0.26744040846824646, 0.03514130413532257, 0.033296968787908554, 0.08783485740423203, 0.22074763476848602, 0.08713625371456146, 0.12920482456684113, 0.05166565254330635, 0.07679110020399094, 0.17419996857643127, 0.1387287825345993, 0.03772348165512085, 0.0006561332265846431, 0.04040418565273285, 0.23337695002555847, 0.0037602160591632128, 0.1251135915517807, 0.07994246482849121, 0.0032252452801913023, 0.044697076082229614, 0.05314825102686882, 0.16676445305347443, 0.42838534712791443, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.061203911900520325, 0.12594261765480042, 0.353413462638855, 0.22131817042827606, 0.41015592217445374, 0.11432977020740509, 0.010031531564891338, 0.048355478793382645, 0.27572426199913025, 0.07773520797491074, 0.2322542816400528, 0.1527126431465149, 0.05797232687473297, 0.09810248017311096, 0.16366761922836304, 0.008380687795579433, 0.11938491463661194, 0.03761400282382965, 0.10612092912197113, 0.004111893475055695, 0.07536520808935165, 0.06150262430310249, 0.010061400011181831, 0.01712355576455593, 0.026476707309484482, 0.05440329760313034, 0.37643373012542725, 0.12204637378454208, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10230414569377899, 0.03857935592532158, 0.05230129137635231, 0.14396332204341888, 0.09251677989959717, 0.03541665896773338, 0.005624003708362579, 0.014271721243858337, 0.042375415563583374, 0.13543996214866638, 0.061749108135700226, 0.00788076315075159, 0.1602918803691864, 0.07564403861761093, 0.09375559538602829, 0.0973815768957138, 0.1330094188451767, 0.2356250286102295, 0.23801013827323914, 0.16962124407291412, 0.3808935284614563, 0.19062454998493195, 0.12487400323152542, 0.4241224527359009, 0.1858355700969696, 0.1843334436416626, 0.17186462879180908, 0.1674181967973709, 0.03679514676332474, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.705120861530304, 0.026186510920524597, 0.8528315424919128, 0.8252069354057312, 0.24319231510162354, 0.07270172983407974, 0.09487330913543701, 0.07207771390676498, 0.4722364544868469, 0.7067926526069641, 0.8624283075332642, 0.07399676740169525, 0.0075901346281170845, 0.016478050500154495, 0.12560917437076569, 0.28161293268203735, 0.39586660265922546, 0.35408592224121094, 0.26687130331993103, 0.036089953035116196, 0.12106626480817795, 0.05175312981009483, 0.6374836564064026, 0.06537415832281113, 0.01867927983403206, 0.03261437267065048, 0.05161871388554573, 0.026679201051592827, 0.0063977655954658985, 0.0581950880587101, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.27840110659599304, 0.06363435834646225, 0.3689763844013214, 0.33064448833465576, 0.25749024748802185, 0.1453908383846283, 0.03645810857415199, 0.00836147554218769, 0.3977815508842468, 0.41805213689804077, 0.17756043374538422, 0.05318059027194977, 0.011340576224029064, 0.020938394591212273, 0.05934957042336464, 0.052721865475177765, 0.30848002433776855, 0.24953237175941467, 0.2790854275226593, 0.7654650807380676, 0.6871634125709534, 0.13210926949977875, 0.673875629901886, 0.04467727988958359, 0.018614191561937332, 0.08283445239067078, 0.0906965509057045, 0.06073237210512161, 0.12131030112504959, 0.06997358053922653, 0.3489122688770294, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17816129326820374, 0.10609658807516098, 0.17893879115581512, 0.28182876110076904, 0.15060719847679138, 0.03372456133365631, 0.04276707395911217, 0.050946421921253204, 0.04137968271970749, 0.16634012758731842, 0.16395889222621918, 0.24548840522766113, 0.05229371041059494, 0.09448723495006561, 0.12793652713298798, 0.03943483531475067, 0.28613966703414917, 0.07243800908327103, 0.8744964599609375, 0.029915155842900276, 0.331167072057724, 0.4079437255859375, 0.5431530475616455, 0.3259604275226593, 0.1150238886475563, 0.3324905335903168, 0.44221389293670654, 0.2450132817029953, 0.12577538192272186, 0.11014749854803085, 0.1900990903377533, 0.042790502309799194, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14424489438533783, 0.0705854520201683, 0.24214811623096466, 0.24549053609371185, 0.19939330220222473, 0.02639644220471382, 0.021373553201556206, 0.024115193635225296, 0.08405331522226334, 0.14685925841331482, 0.15661610662937164, 0.06219787895679474, 0.032059792429208755, 0.09036684036254883, 0.15146715939044952, 0.06558705866336823, 0.020870981737971306, 0.007642277050763369, 0.028054187074303627, 0.010532653890550137, 0.10334379225969315, 0.12033270299434662, 0.1911371499300003, 0.30930495262145996, 0.04741071164608002, 0.06516209989786148, 0.09313901513814926, 0.24243950843811035, 0.15116305649280548, 0.09231718629598618, 0.47254911065101624, 0.053373783826828, 0.18162642419338226, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06650430709123611, 0.10705426335334778, 0.3146411180496216, 0.1647443175315857, 0.23945462703704834, 0.035643309354782104, 0.026562364771962166, 0.09605439007282257, 0.19827118515968323, 0.1037423387169838, 0.14283734560012817, 0.08165161311626434, 0.07012972235679626, 0.11072988063097, 0.13417953252792358, 0.017124762758612633, 0.00014164860476739705, 0.01482362300157547, 0.13952724635601044, 0.0008921221597120166, 0.07150562852621078, 0.037848807871341705, 0.0009583857608959079, 0.0160027127712965, 0.01657933183014393, 0.09754330664873123, 0.3402610719203949, 0.02766183763742447, 0.011668790131807327, 0.019427720457315445, 0.01879642903804779, 0.06977814435958862, 0.23379765450954437, 0.41046860814094543, NaN, NaN, NaN, NaN, NaN, NaN], [0.06460674107074738, 0.10897383838891983, 0.18354696035385132, 0.20187535881996155, 0.38844820857048035, 0.04722803831100464, 0.010622762143611908, 0.04332485795021057, 0.31279584765434265, 0.11892355233430862, 0.20366235077381134, 0.1460915356874466, 0.041410893201828, 0.060890424996614456, 0.16885291039943695, 0.0033047832548618317, 0.043024010956287384, 0.009507044218480587, 0.05758155509829521, 0.0012058177962899208, 0.04777836054563522, 0.038867104798555374, 0.0027761561796069145, 0.008453112095594406, 0.011027430184185505, 0.021058345213532448, 0.3453521430492401, 0.05058252438902855, 0.004837945103645325, 0.0014179014833644032, 0.06873936206102371, 0.10687354952096939, 0.21186815202236176, 0.44615596532821655, 0.10872229933738708, NaN, NaN, NaN, NaN, NaN], [0.08445128798484802, 0.07278266549110413, 0.017734743654727936, 0.12906457483768463, 0.17354236543178558, 0.01439378596842289, 0.0032682251185178757, 0.009051240049302578, 0.02403325028717518, 0.17859239876270294, 0.05114053934812546, 0.026160510256886482, 0.17188863456249237, 0.059929899871349335, 0.12745818495750427, 0.05260666832327843, 0.09784732013940811, 0.08957145363092422, 0.40504154562950134, 0.2393025904893875, 0.37446328997612, 0.33926665782928467, 0.06915906071662903, 0.28494811058044434, 0.18951286375522614, 0.21801336109638214, 0.2963850796222687, 0.09700386226177216, 0.02254888415336609, 0.016780056059360504, 0.3380737006664276, 0.17247304320335388, 0.15711140632629395, 0.27414536476135254, 0.12462585419416428, 0.05461693927645683, NaN, NaN, NaN, NaN], [0.6940725445747375, 0.016104217618703842, 0.8427497148513794, 0.8075915575027466, 0.2572270333766937, 0.04667792096734047, 0.07690176367759705, 0.06650352478027344, 0.4641934931278229, 0.7403572797775269, 0.892522931098938, 0.08286882191896439, 0.00509345019236207, 0.009769911877810955, 0.1252693384885788, 0.4168609082698822, 0.5786882042884827, 0.4795728027820587, 0.4880480170249939, 0.07741907238960266, 0.22295767068862915, 0.10229793190956116, 0.7397969365119934, 0.09120289236307144, 0.02111845649778843, 0.040493883192539215, 0.06478337198495865, 0.029333919286727905, 0.01266437117010355, 0.08807221800088882, 0.12442159652709961, 0.019878262653946877, 0.02248454838991165, 0.045759230852127075, 0.02396523579955101, 0.002620323793962598, 0.04143214225769043, NaN, NaN, NaN], [0.47638654708862305, 0.08160793781280518, 0.2188907116651535, 0.3983159363269806, 0.3041192293167114, 0.0773146003484726, 0.041229549795389175, 0.00785501953214407, 0.20719125866889954, 0.6323855519294739, 0.1790589690208435, 0.15920953452587128, 0.005728188902139664, 0.011172757484018803, 0.10331764072179794, 0.05813424289226532, 0.29987069964408875, 0.06046860292553902, 0.2948205769062042, 0.6036045551300049, 0.4684220552444458, 0.10851431638002396, 0.5970842242240906, 0.03630568087100983, 0.009022231213748455, 0.034897517412900925, 0.044963937252759933, 0.06918716430664062, 0.06464210897684097, 0.027029458433389664, 0.39741793274879456, 0.1858920007944107, 0.0860959067940712, 0.03553689271211624, 0.03651457652449608, 0.07401836663484573, 0.02850046567618847, 0.457316130399704, NaN, NaN], [0.3162515461444855, 0.12029282748699188, 0.1898643672466278, 0.3138664960861206, 0.22235795855522156, 0.03812789171934128, 0.07994988560676575, 0.07006566971540451, 0.06856126338243484, 0.2470276951789856, 0.2142392098903656, 0.4667101502418518, 0.07071195542812347, 0.09391427785158157, 0.11791101843118668, 0.011862307786941528, 0.06274299323558807, 0.019264375790953636, 0.7077140212059021, 0.009838010184466839, 0.08938813954591751, 0.2665976285934448, 0.21134285628795624, 0.19931168854236603, 0.029879093170166016, 0.11873869597911835, 0.2187809944152832, 0.10740162432193756, 0.03893040865659714, 0.02778119407594204, 0.17118902504444122, 0.03705315291881561, 0.41107529401779175, 0.3035467863082886, 0.1782693862915039, 0.062172479927539825, 0.04369974508881569, 0.43116021156311035, 0.04090215638279915, NaN], [0.15722334384918213, 0.11492010205984116, 0.22595097124576569, 0.17283931374549866, 0.11246844381093979, 0.07424511015415192, 0.1308857947587967, 0.1509532928466797, 0.12219540029764175, 0.14498494565486908, 0.13763099908828735, 0.16327989101409912, 0.12245305627584457, 0.21428720653057098, 0.12265608459711075, 0.13294808566570282, 0.07747184485197067, 0.06700501590967178, 0.24500344693660736, 0.07035010308027267, 0.06088097393512726, 0.15465889871120453, 0.22422827780246735, 0.20946520566940308, 0.06346394866704941, 0.1416163444519043, 0.10671631991863251, 0.07756247371435165, 0.14874279499053955, 0.2551397681236267, 0.18877547979354858, 0.07302238047122955, 0.24805422127246857, 0.1228112131357193, 0.08095405995845795, 0.12022056430578232, 0.20888803899288177, 0.1654488444328308, 0.07207347452640533, 0.12261014431715012]], [[0.009874092414975166, 0.0475393682718277, 0.0700187012553215, 0.05995699018239975, 0.023110831156373024, 0.04304451867938042, 0.02397323027253151, 0.09104450792074203, 0.13320927321910858, 0.0718994140625, 0.16378211975097656, 0.06306017935276031, 0.03516274318099022, 0.06407153606414795, 0.1927335411310196, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.007679122034460306, 0.008519956842064857, 0.023641018196940422, 0.036320336163043976, 0.005810021422803402, 0.002834178740158677, 0.01027101743966341, 0.005131446290761232, 0.05288401618599892, 0.022729018703103065, 0.02885960415005684, 0.007142365910112858, 0.005423326510936022, 0.00592823838815093, 0.23125353455543518, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.17363575100898743, 0.08529574424028397, 0.018747013062238693, 0.09323837608098984, 0.07366655766963959, 0.2784116566181183, 0.6226999759674072, 0.6422466039657593, 0.18433590233325958, 0.44911590218544006, 0.07703087478876114, 0.23628254234790802, 0.37835898995399475, 0.3362680971622467, 0.10061702132225037, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.039354946464300156, 0.028671007603406906, 0.0009692042949609458, 0.010166235268115997, 0.003592043649405241, 0.024686597287654877, 0.0576656274497509, 0.10543617606163025, 0.069565050303936, 0.23999209702014923, 0.0370241142809391, 0.07099387794733047, 0.08031197637319565, 0.0629396140575409, 0.19831009209156036, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.07821620255708694, 0.07413192838430405, 0.008470119908452034, 0.005837618373334408, 0.016890503466129303, 0.34118980169296265, 0.6424257159233093, 0.5736639499664307, 0.18751046061515808, 0.08286380022764206, 0.013973995111882687, 0.16452431678771973, 0.6265572905540466, 0.24633896350860596, 0.03771306574344635, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.08601168543100357, 0.11519530415534973, 0.00501672737300396, 0.0384475477039814, 0.0009856059914454818, 0.020220156759023666, 0.4602939486503601, 0.41334664821624756, 0.011432202532887459, 0.039776530116796494, 0.004202698357403278, 0.012451107613742352, 0.012797003611922264, 0.0109980758279562, 0.22371669113636017, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.05821564793586731, 0.2493630200624466, 0.017187682911753654, 0.007334073074162006, 0.002277297666296363, 0.012770043686032295, 0.014771709218621254, 0.06810285151004791, 0.008148171938955784, 0.093966543674469, 0.03078475221991539, 0.016961626708507538, 0.009818210266530514, 0.005369590129703283, 0.2805846929550171, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0315314382314682, 0.006441309116780758, 0.005187691655009985, 0.0023020647931843996, 0.001103160553611815, 0.0010285694152116776, 0.0036586276255548, 0.0034369472414255142, 0.02540425956249237, 0.018933216109871864, 0.011261656880378723, 0.014689027331769466, 0.0047272746451199055, 0.003173592034727335, 0.27608010172843933, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.052501752972602844, 0.03902341425418854, 0.022159013897180557, 0.15980832278728485, 0.04565480723977089, 0.04961955174803734, 0.10487794876098633, 0.03556728735566139, 0.011893571354448795, 0.350600004196167, 0.8153157234191895, 0.696418821811676, 0.19642634689807892, 0.7945331335067749, 0.025074943900108337, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.008775658905506134, 0.0231929961591959, 0.001974506536498666, 0.02221933752298355, 0.002016729209572077, 0.03464629501104355, 0.020560195669531822, 0.015741808339953423, 0.024821357801556587, 0.03194829449057579, 0.062133170664310455, 0.009445058181881905, 0.008440939709544182, 0.031038939952850342, 0.24359388649463654, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.15448324382305145, 0.15535393357276917, 0.0009195139864459634, 0.02347545325756073, 0.010745828039944172, 0.05933469906449318, 0.0886014774441719, 0.09891750663518906, 0.008176282048225403, 0.17814745008945465, 0.04613054543733597, 0.10348650068044662, 0.06132601201534271, 0.10257216542959213, 0.2144334316253662, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1637454628944397, 0.3587695062160492, 0.013175190426409245, 0.027070751413702965, 0.009701711125671864, 0.027045298367738724, 0.06057014688849449, 0.08674251288175583, 0.018084047362208366, 0.012978773564100266, 0.04984384402632713, 0.0746963769197464, 0.21545591950416565, 0.18275731801986694, 0.18403297662734985, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04016833007335663, 0.03071952983736992, 0.0073937661945819855, 0.044594794511795044, 0.005693770945072174, 0.007929249666631222, 0.19023852050304413, 0.12198647856712341, 0.00967123731970787, 0.05747445672750473, 0.006795276887714863, 0.006636326666921377, 0.014849998988211155, 0.02297961339354515, 0.1823122203350067, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.08359953761100769, 0.14515268802642822, 0.009139984846115112, 0.10055579245090485, 0.007817201316356659, 0.06191832944750786, 0.24591712653636932, 0.26670339703559875, 0.008127851411700249, 0.05132465437054634, 0.011226493865251541, 0.020721180364489555, 0.025672290474176407, 0.06137499585747719, 0.19538666307926178, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.004038439132273197, 0.01158715970814228, 0.012492671608924866, 0.008604439906775951, 0.0044732466340065, 0.001471644383855164, 0.003622728632763028, 0.005392232909798622, 0.024040954187512398, 0.002572751836851239, 0.011896335519850254, 0.00655994052067399, 0.004419950768351555, 0.0023605322930961847, 0.2578853368759155, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03995227441191673, 0.02612248808145523, 0.09039098769426346, 0.04685363546013832, 0.14171013236045837, 0.3046724796295166, 0.08713044226169586, 0.11726538836956024, 0.3945818245410919, 0.03867875412106514, 0.060879118740558624, 0.3211958110332489, 0.1562168449163437, 0.1954476237297058, 0.12928469479084015, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.138319730758667, 0.1925395429134369, 0.06914161890745163, 0.1830926090478897, 0.22252067923545837, 0.24239645898342133, 0.2738734483718872, 0.3115195333957672, 0.287569522857666, 0.12556934356689453, 0.047479670494794846, 0.1859251707792282, 0.015966184437274933, 0.050888173282146454, 0.04287213087081909, 0.04818185046315193, 0.30147239565849304, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.059622667729854584, 0.19761067628860474, 0.019807182252407074, 0.02911451645195484, 0.11472073942422867, 0.03754669055342674, 0.08183436095714569, 0.09122617542743683, 0.10595303028821945, 0.094895139336586, 0.022252719849348068, 0.087751105427742, 0.015402892604470253, 0.02668953314423561, 0.15029701590538025, 0.000490668579004705, 0.5364181399345398, 0.0016803600592538714, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4440009295940399, 0.5055950880050659, 0.14072291553020477, 0.20776981115341187, 0.24339812994003296, 0.01946749910712242, 0.1477651447057724, 0.24892206490039825, 0.13990418612957, 0.5277839303016663, 0.22113053500652313, 0.7815175652503967, 0.04741470143198967, 0.31336119771003723, 0.318754643201828, 0.17249688506126404, 0.003960400819778442, 1.1815190191555303e-05, 0.00205309153534472, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.003975332248955965, 0.09357346594333649, 0.000580776366405189, 0.001556370290927589, 0.0040078358724713326, 0.00020105167641304433, 0.005314813926815987, 0.0463886484503746, 0.0025405578780919313, 0.008098164573311806, 0.0004367573419585824, 0.0955028310418129, 0.0013312119990587234, 0.008472515270113945, 0.16612127423286438, 0.08659190684556961, 0.2260276973247528, 0.018877657130360603, 0.019257033243775368, 0.9179584980010986, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00713347876444459, 0.11304348707199097, 0.007166451308876276, 0.017305465415120125, 0.01892760582268238, 0.004294875077903271, 0.013284130021929741, 0.05641845986247063, 0.006293897051364183, 0.008091668598353863, 0.004229044076055288, 0.03852742537856102, 0.036073870956897736, 0.030675750225782394, 0.1423715502023697, 2.1155383365112357e-05, 0.00016346832853741944, 0.0004644138098228723, 9.852640505414456e-05, 0.009302367456257343, 0.8758521676063538, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.112990602850914, 0.20299020409584045, 0.29141831398010254, 0.1917479783296585, 0.25626659393310547, 0.40023526549339294, 0.045914653688669205, 0.05403761938214302, 0.3577503561973572, 0.11164049804210663, 0.20054538547992706, 0.23382915556430817, 0.3541012704372406, 0.39880213141441345, 0.05442150682210922, 0.0038963633123785257, 0.11578002572059631, 0.06833135336637497, 0.2930091321468353, 0.06728219240903854, 0.588379442691803, 0.190787211060524, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11769542098045349, 0.22490660846233368, 0.16446754336357117, 0.17726869881153107, 0.24409359693527222, 0.16966795921325684, 0.06426751613616943, 0.1868649125099182, 0.17593497037887573, 0.10732528567314148, 0.1210716962814331, 0.18835949897766113, 0.07820838689804077, 0.12172650545835495, 0.0815061554312706, 0.04113525524735451, 0.03917931765317917, 0.013817446306347847, 0.06874216347932816, 0.027753230184316635, 0.04752122610807419, 0.17637789249420166, 0.2964049279689789, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08801974356174469, 0.2964327037334442, 0.17140379548072815, 0.1086457222700119, 0.1790848970413208, 0.042561717331409454, 0.02568918652832508, 0.12736740708351135, 0.4644424617290497, 0.09952269494533539, 0.1403166949748993, 0.12085206061601639, 0.2499331831932068, 0.14905890822410583, 0.04691213369369507, 0.006397286430001259, 0.008155078627169132, 0.02385183423757553, 0.08218340575695038, 0.09733399748802185, 0.7216709852218628, 0.11420661956071854, 0.028804002329707146, 0.49512770771980286, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.28339406847953796, 0.25363603234291077, 0.49371209740638733, 0.28714650869369507, 0.42171764373779297, 0.03586414083838463, 0.140908345580101, 0.27345338463783264, 0.06897412985563278, 0.24740128219127655, 0.5061832070350647, 0.4192107915878296, 0.43851029872894287, 0.29079654812812805, 0.10071542859077454, 0.007080267183482647, 0.010165071114897728, 0.007166726514697075, 0.04547898843884468, 0.014898931607604027, 0.06153866648674011, 0.05960511788725853, 0.025653565302491188, 0.05574938654899597, 0.5054050087928772, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.049345988780260086, 0.1473262906074524, 0.10952533781528473, 0.16707968711853027, 0.25493475794792175, 0.03866606950759888, 0.046480532735586166, 0.16288119554519653, 0.06614720076322556, 0.0629507377743721, 0.07218940556049347, 0.3448391556739807, 0.06943795084953308, 0.058807674795389175, 0.135455921292305, 0.12821261584758759, 0.09823491424322128, 0.2407415509223938, 0.03722868487238884, 0.07500484585762024, 0.23719841241836548, 0.08696958422660828, 0.10033686459064484, 0.08637046813964844, 0.05946339666843414, 0.17889682948589325, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05557708069682121, 0.024377070367336273, 0.171014666557312, 0.1548214852809906, 0.21205416321754456, 0.29049578309059143, 0.08155391365289688, 0.2053205668926239, 0.09979691356420517, 0.11640740185976028, 0.23155182600021362, 0.4772811830043793, 0.2134055644273758, 0.3209300637245178, 0.0739695355296135, 0.018611561506986618, 0.530681848526001, 0.37442806363105774, 0.09326046705245972, 0.039934538304805756, 0.607749342918396, 0.1011725440621376, 0.041957128793001175, 0.061673425137996674, 0.012941170483827591, 0.012897199019789696, 0.02531522512435913, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.046621087938547134, 0.02855776995420456, 0.11975010484457016, 0.2049850970506668, 0.16244490444660187, 0.14614170789718628, 0.03785347566008568, 0.2537410259246826, 0.3719625771045685, 0.1159287542104721, 0.23734091222286224, 0.26474830508232117, 0.04938332363963127, 0.17566856741905212, 0.034675102680921555, 0.025258230045437813, 0.013820141553878784, 0.020238902419805527, 0.20186173915863037, 0.008764497935771942, 0.044081512838602066, 0.11685895919799805, 0.12131167203187943, 0.03466574102640152, 0.0033257410395890474, 0.009427645243704319, 0.00932170171290636, 0.6215367317199707, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08535599708557129, 0.01230260543525219, 0.28460273146629333, 0.3323705196380615, 0.13364574313163757, 0.14216013252735138, 0.16550986468791962, 0.36634352803230286, 0.3233327269554138, 0.13755354285240173, 0.6341029405593872, 0.1276889443397522, 0.0818048045039177, 0.2633805274963379, 0.10007897019386292, 0.0027034373488277197, 0.008653531782329082, 0.0021412167698144913, 0.02395743690431118, 0.06537352502346039, 0.05110874027013779, 0.050060901790857315, 0.023448945954442024, 0.0059632728807628155, 0.0016337132547050714, 0.0060929651372134686, 0.00957516860216856, 0.05008334666490555, 0.696637749671936, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.014263293705880642, 0.07173046469688416, 0.01932992786169052, 0.01909404993057251, 0.16755935549736023, 0.2271488904953003, 0.1093294620513916, 0.14342457056045532, 0.0580194853246212, 0.01671113632619381, 0.03395597264170647, 0.0692841187119484, 0.07175575196743011, 0.04972841590642929, 0.12856654822826385, 5.63129390229733e-07, 0.00027805642457678914, 1.7160025890916586e-05, 5.958595011179568e-06, 0.00078710971865803, 1.2566613349918043e-06, 9.03528507478768e-06, 2.1993335394654423e-05, 4.528845238382928e-06, 1.0594538935038145e-06, 2.375837993895402e-06, 1.0765622391772922e-05, 0.00012861557479482144, 0.000270194374024868, 0.4203896224498749, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06590985506772995, 0.1636172980070114, 0.09935098141431808, 0.20126965641975403, 0.4101002812385559, 0.21936923265457153, 0.26084569096565247, 0.3593950569629669, 0.014820259064435959, 0.05201014503836632, 0.03426084294915199, 0.38774317502975464, 0.1401163786649704, 0.3782513439655304, 0.13036324083805084, 0.19651824235916138, 0.009276115335524082, 0.0007576652569696307, 0.02043321169912815, 0.000937489268835634, 0.0014158851699903607, 0.02691410481929779, 0.025149332359433174, 0.015754513442516327, 0.002638434525579214, 0.03568584471940994, 0.28478676080703735, 0.08937329053878784, 0.04057440906763077, 0.41798362135887146, 0.02812151424586773, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05128908529877663, 0.11090300232172012, 0.24501535296440125, 0.07115167379379272, 0.3950805068016052, 0.2010982632637024, 0.08927696198225021, 0.2923780679702759, 0.11195118725299835, 0.05971711874008179, 0.14540457725524902, 0.4000069797039032, 0.2374461144208908, 0.47139719128608704, 0.10731440782546997, 0.0009883381426334381, 0.005475975573062897, 0.017872320488095284, 0.0038598645478487015, 0.01383217889815569, 0.1060260757803917, 0.010558119975030422, 0.0004280287539586425, 0.011488020420074463, 0.004323506727814674, 0.015877770259976387, 0.025533713400363922, 0.06758329272270203, 0.005362953990697861, 0.03033292666077614, 0.3987913429737091, 0.22715723514556885, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.014083221554756165, 0.029302498325705528, 0.019839908927679062, 0.019802037626504898, 0.11310776323080063, 0.014347831718623638, 0.013065088540315628, 0.0404186025261879, 0.14103254675865173, 0.01056672353297472, 0.02028844505548477, 0.4335528016090393, 0.019943613559007645, 0.08491621166467667, 0.15365199744701385, 0.025437461212277412, 0.027387555688619614, 0.0211916733533144, 0.0013409400125965476, 0.0016278955154120922, 0.0205780491232872, 0.006606978829950094, 0.005105526186525822, 0.008417481556534767, 0.008475488983094692, 0.016475802287459373, 0.021865585818886757, 0.04041945934295654, 0.001965513452887535, 0.030297037214040756, 0.018051480874419212, 0.2940014600753784, 0.09546513855457306, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04251990094780922, 0.025738505646586418, 0.19788101315498352, 0.08900192379951477, 0.20504283905029297, 0.36725619435310364, 0.05852765589952469, 0.12635937333106995, 0.07596885412931442, 0.055006030946969986, 0.1975020170211792, 0.39253395795822144, 0.2602497935295105, 0.3791850209236145, 0.11310473829507828, 0.014116446487605572, 0.6685785055160522, 0.40577325224876404, 0.09365412592887878, 0.008716625161468983, 0.504762589931488, 0.11037815362215042, 0.03693895787000656, 0.066362664103508, 0.025546396151185036, 0.030971869826316833, 0.07333581149578094, 0.21910515427589417, 0.03128749132156372, 0.013437384739518166, 0.06674141436815262, 0.055549826472997665, 0.02615067921578884, 0.05289305001497269, NaN, NaN, NaN, NaN, NaN, NaN], [0.06150972843170166, 0.049163203686475754, 0.14174170792102814, 0.13322500884532928, 0.16170991957187653, 0.21354396641254425, 0.04667104035615921, 0.26311540603637695, 0.32218027114868164, 0.0809161439538002, 0.18361496925354004, 0.23948682844638824, 0.09133663028478622, 0.25973111391067505, 0.07212682068347931, 0.01752244122326374, 0.013681006617844105, 0.015325021930038929, 0.15400148928165436, 0.0017620606813579798, 0.03783759847283363, 0.07285356521606445, 0.042190372943878174, 0.019725583493709564, 0.004497688263654709, 0.010335608385503292, 0.023485884070396423, 0.5969190001487732, 0.22785267233848572, 0.05655405670404434, 0.05765213817358017, 0.006416310556232929, 0.029401889070868492, 0.022928474470973015, 0.6468356251716614, NaN, NaN, NaN, NaN, NaN], [0.12382826954126358, 0.035204268991947174, 0.3469122052192688, 0.27821084856987, 0.12485836446285248, 0.1130678728222847, 0.12963837385177612, 0.3451126217842102, 0.16417652368545532, 0.12570835649967194, 0.5000419616699219, 0.09880878776311874, 0.042446259409189224, 0.2635292708873749, 0.16834798455238342, 0.003705248236656189, 0.09392052888870239, 0.0011726000811904669, 0.042238909751176834, 0.07787514477968216, 0.11800158768892288, 0.09318403154611588, 0.018972182646393776, 0.022339271381497383, 0.02290215529501438, 0.009648749604821205, 0.020298194140195847, 0.09632600843906403, 0.6665039658546448, 0.01913357712328434, 0.016501925885677338, 0.01550414226949215, 0.014767719432711601, 0.035943012684583664, 0.1298983097076416, 0.7307590246200562, NaN, NaN, NaN, NaN], [0.010800065472722054, 0.04851265624165535, 0.01629789173603058, 0.013155121356248856, 0.14412836730480194, 0.10944324731826782, 0.08000180870294571, 0.10409139841794968, 0.054843056946992874, 0.011575616896152496, 0.02017728053033352, 0.044063322246074677, 0.04816943034529686, 0.03936787694692612, 0.1280953288078308, 3.2450822118335054e-07, 0.0001958437787834555, 1.195628647110425e-05, 3.192948497598991e-06, 0.00034392892848700285, 1.3818779507346335e-06, 6.319523890851997e-06, 9.25252061279025e-06, 3.2897685287025524e-06, 1.041492623699014e-06, 2.450263082209858e-06, 1.1291336704744026e-05, 9.216016042046249e-05, 0.00025747373001649976, 0.3770022690296173, 7.494814053643495e-05, 0.00011931787594221532, 5.454379424918443e-05, 3.481862586340867e-05, 0.0001493972522439435, 6.532184488605708e-05, 0.4379080533981323, NaN, NaN, NaN], [0.03501533716917038, 0.12365423142910004, 0.058643028140068054, 0.026187611743807793, 0.2106953263282776, 0.09627192467451096, 0.1373300403356552, 0.209503173828125, 0.00544273667037487, 0.010177833028137684, 0.00795654021203518, 0.17826952040195465, 0.06280092895030975, 0.2785777747631073, 0.15446779131889343, 0.11172444373369217, 0.00812594499439001, 0.000803561822976917, 0.011673782020807266, 0.00013412271800916642, 0.002435607835650444, 0.021002406254410744, 0.009926681406795979, 0.014218374155461788, 0.0044799866154789925, 0.03462693840265274, 0.49634605646133423, 0.1610735058784485, 0.03537029027938843, 0.3717024624347687, 0.0470024012029171, 0.0025306264869868755, 0.08426976948976517, 0.5137573480606079, 0.047759927809238434, 0.008752438239753246, 0.5270217657089233, 0.020567137748003006, NaN, NaN], [0.055331505835056305, 0.14680130779743195, 0.22850985825061798, 0.040600359439849854, 0.2299574315547943, 0.21366852521896362, 0.10291176289319992, 0.2649042010307312, 0.07482050359249115, 0.04207760840654373, 0.11352740973234177, 0.22353075444698334, 0.2551318407058716, 0.4900997579097748, 0.11985023319721222, 0.00039373920299112797, 0.00142151047475636, 0.016346368938684464, 0.0038184949662536383, 0.00426360173150897, 0.10012070834636688, 0.007060237228870392, 0.00022489627008326352, 0.006389277055859566, 0.0014407823327928782, 0.01344740204513073, 0.019176417961716652, 0.04953484237194061, 0.003102741902694106, 0.017501499503850937, 0.25968801975250244, 0.12805432081222534, 0.03450275957584381, 0.03214799612760544, 0.06495527178049088, 0.007038496434688568, 0.018200475722551346, 0.2228115350008011, 0.24082934856414795, NaN], [0.04223596677184105, 0.14613933861255646, 0.08112313598394394, 0.04192597419023514, 0.11981905251741409, 0.18680673837661743, 0.07695262134075165, 0.14058402180671692, 0.1875196099281311, 0.05864474177360535, 0.0581248439848423, 0.23554684221744537, 0.21983209252357483, 0.1619952768087387, 0.12595340609550476, 0.004585978575050831, 0.008592751808464527, 0.20804427564144135, 0.003501898143440485, 0.01809401623904705, 0.0088487658649683, 0.01839679665863514, 0.009930659085512161, 0.019693726673722267, 0.015943868085741997, 0.06719032675027847, 0.03678698092699051, 0.03292753919959068, 0.02313893660902977, 0.023240724578499794, 0.03294161707162857, 0.24390928447246552, 0.10472099483013153, 0.0623757429420948, 0.06489475816488266, 0.03424002602696419, 0.03615953400731087, 0.05666068568825722, 0.29077935218811035, 0.20903274416923523]], [[0.020951254293322563, 0.19576001167297363, 0.05422525107860565, 0.000516751199029386, 0.0576050765812397, 0.039616964757442474, 0.0011584623716771603, 0.06260760873556137, 0.05524995177984238, 5.760174462920986e-05, 0.0005486492882482708, 0.01856253668665886, 0.008022493682801723, 0.0032547120936214924, 0.1980074942111969, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.15878187119960785, 0.5755441188812256, 0.073322594165802, 0.006848999299108982, 0.04221894592046738, 0.057610929012298584, 0.01498481910675764, 0.15564584732055664, 0.02557745948433876, 0.010493909008800983, 0.04444737732410431, 0.10564734041690826, 0.04703369736671448, 0.007807346060872078, 0.10371111333370209, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0667557343840599, 0.5756934881210327, 0.02783285267651081, 0.001271323417313397, 0.13096383213996887, 0.007863562554121017, 0.0004880728665739298, 0.00786207988858223, 0.030193913727998734, 0.0004458925104700029, 0.0008183285826817155, 0.003005507169291377, 0.008833326399326324, 0.014566708356142044, 0.09050195664167404, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.006902126595377922, 0.22582471370697021, 0.027240794152021408, 0.000252248632023111, 0.08146748691797256, 0.008376134559512138, 0.0017193618696182966, 0.010283069685101509, 0.09191752970218658, 1.873078872449696e-05, 0.0001427968527423218, 0.0006295929779298604, 0.016630304977297783, 0.005029548890888691, 0.17517179250717163, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.46813952922821045, 0.7474208474159241, 0.04419572278857231, 0.039987821131944656, 0.07900705188512802, 0.010286353528499603, 0.008277984336018562, 0.21022778749465942, 0.018339863047003746, 0.003122991183772683, 0.0047759185545146465, 0.0031952662393450737, 0.0037801233120262623, 0.005526377819478512, 0.11187370121479034, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.08057912439107895, 0.09254536032676697, 0.26037144660949707, 0.04459136351943016, 0.19053104519844055, 0.18187369406223297, 0.04494835063815117, 0.08866222947835922, 0.05515718460083008, 0.011219717562198639, 0.041749756783246994, 0.13417255878448486, 0.43527963757514954, 0.4240920841693878, 0.05903848633170128, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.005677447654306889, 0.1104632169008255, 0.17886187136173248, 0.06816153228282928, 0.31320425868034363, 0.08580746501684189, 0.044242095202207565, 0.4031389355659485, 0.13310441374778748, 8.991359209176153e-05, 0.00051962147699669, 0.017516016960144043, 0.02517649158835411, 0.02827705629169941, 0.13873830437660217, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.009441166184842587, 0.04568161070346832, 0.08503290265798569, 0.055850934237241745, 0.15800173580646515, 0.09921947866678238, 0.2719998359680176, 0.7131122350692749, 0.12690743803977966, 0.0015569856623187661, 0.019959524273872375, 0.06398878246545792, 0.1124982088804245, 0.07506788522005081, 0.06075114384293556, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1778930425643921, 0.41812169551849365, 0.05459700897336006, 0.015388981439173222, 0.296997606754303, 0.041353121399879456, 0.1696915328502655, 0.1226804181933403, 0.3453136682510376, 0.006036087870597839, 0.008416525088250637, 0.004891113843768835, 0.003974124789237976, 0.0023401544895023108, 0.04184575751423836, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0018550200620666146, 0.2628808617591858, 0.0018376001389697194, 9.925621998263523e-05, 0.008250601589679718, 0.11965687572956085, 0.011913565918803215, 0.3649533987045288, 0.12527383863925934, 0.0011617891723290086, 0.002173396060243249, 0.011088940314948559, 0.02579125389456749, 0.004398738034069538, 0.18079015612602234, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0033212341368198395, 0.4786561131477356, 0.00019389556837268174, 4.100392834516242e-05, 0.03255903348326683, 0.004482456482946873, 0.0018638258334249258, 0.04032744839787483, 0.151435986161232, 0.0011174781247973442, 0.0008650964009575546, 0.049343932420015335, 0.013284855522215366, 0.009702197276055813, 0.17111515998840332, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.015286837704479694, 0.17760051786899567, 0.012107143178582191, 0.004069492220878601, 0.40114596486091614, 0.005856915842741728, 0.025313973426818848, 0.23595470190048218, 0.5599475502967834, 0.019674712792038918, 0.01789786107838154, 0.0449712835252285, 0.024323459714651108, 0.008310162462294102, 0.10516723990440369, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.013816175982356071, 0.10832668840885162, 0.014126134105026722, 0.0044770012609660625, 0.18972823023796082, 0.04144473373889923, 0.013167506083846092, 0.0398833267390728, 0.08117146790027618, 0.03379456326365471, 0.04336484149098396, 0.6766878366470337, 0.6025072932243347, 0.24042664468288422, 0.05677386373281479, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.010657100938260555, 0.1729527860879898, 0.006031150463968515, 0.006062258500605822, 0.10042858123779297, 0.007653414737433195, 0.0031583579257130623, 0.014785557985305786, 0.13275322318077087, 0.05689838156104088, 0.04302775487303734, 0.36964303255081177, 0.3870774507522583, 0.31299954652786255, 0.07590257376432419, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.014769526198506355, 0.05199434980750084, 0.11582475155591965, 0.14804258942604065, 0.05702318996191025, 0.3275434374809265, 0.3759170472621918, 0.3329218327999115, 0.027774346992373466, 0.12548163533210754, 0.13219930231571198, 0.029332099482417107, 0.2028164267539978, 0.518939197063446, 4.3280975660309196e-05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.24939602613449097, 0.0921018123626709, 0.20195554196834564, 0.25931593775749207, 0.24976609647274017, 0.08025927096605301, 0.10602997988462448, 0.08455296605825424, 0.038250602781772614, 0.34039628505706787, 0.2528480887413025, 0.17168891429901123, 0.12038858979940414, 0.16591216623783112, 0.05973837152123451, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04881530627608299, 0.07757209986448288, 0.080610491335392, 0.047049663960933685, 0.2744564712047577, 0.18291208148002625, 0.11781244724988937, 0.130965456366539, 0.16412131488323212, 0.049904536455869675, 0.10192018002271652, 0.46385079622268677, 0.23078110814094543, 0.23192283511161804, 0.17445482313632965, 0.15880486369132996, 0.04734092205762863, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11153621971607208, 0.27696484327316284, 0.0350787453353405, 0.011731116101145744, 0.08945441246032715, 0.2750371992588043, 0.07341955602169037, 0.12011690437793732, 0.026965567842125893, 0.023494159802794456, 0.015654105693101883, 0.05704642832279205, 0.11022293567657471, 0.0463077574968338, 0.1307818740606308, 0.22883240878582, 0.015307039953768253, 0.023610780015587807, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06216026097536087, 0.123567596077919, 0.044055916368961334, 0.012494971975684166, 0.045035671442747116, 0.18137943744659424, 0.1501520872116089, 0.0996006652712822, 0.05310875549912453, 0.11289763450622559, 0.05045852065086365, 0.055306825786828995, 0.3424266576766968, 0.1600506752729416, 0.04121629521250725, 0.15376803278923035, 0.17623378336429596, 0.16427822411060333, 0.018553992733359337, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03470996022224426, 0.38486456871032715, 0.007671448867768049, 0.014272118918597698, 0.01295357197523117, 0.001353065250441432, 0.035229261964559555, 0.10929086059331894, 0.03641098737716675, 0.08741087466478348, 0.01870635710656643, 0.10011491179466248, 0.03142678365111351, 0.12343490868806839, 0.15971165895462036, 0.12576976418495178, 0.44071146845817566, 0.38860467076301575, 0.12043511122465134, 0.027116619050502777, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03053746558725834, 0.24113330245018005, 0.009466315619647503, 0.01980357989668846, 0.04114365205168724, 0.05523357167840004, 0.027042368426918983, 0.10979101061820984, 0.004461985547095537, 0.04689180105924606, 0.04529552906751633, 0.1364448219537735, 0.054305437952280045, 0.06579019129276276, 0.13895106315612793, 0.03928220644593239, 0.42239660024642944, 0.2546820342540741, 0.22367709875106812, 0.1215892881155014, 0.001983387628570199, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3289671242237091, 0.3443813920021057, 0.38217487931251526, 0.32642021775245667, 0.12515123188495636, 0.04144418612122536, 0.06740343570709229, 0.024584289640188217, 0.007359183859080076, 0.39375364780426025, 0.38123685121536255, 0.3035361170768738, 0.18788036704063416, 0.13260427117347717, 0.09976762533187866, 0.17152060568332672, 0.49365419149398804, 0.08085957914590836, 0.02207508496940136, 0.19231174886226654, 0.008304901421070099, 0.03878962993621826, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1711268573999405, 0.1900682896375656, 0.20778892934322357, 0.08847668021917343, 0.39589688181877136, 0.3955995440483093, 0.3348483741283417, 0.11133389919996262, 0.10861264914274216, 0.14033687114715576, 0.26926568150520325, 0.4846358299255371, 0.23405344784259796, 0.4343181252479553, 0.08998383581638336, 0.13843253254890442, 0.07047099620103836, 0.2525072991847992, 0.13487939536571503, 0.27911728620529175, 0.11727599054574966, 0.022392159327864647, 0.1764850914478302, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4154844284057617, 0.4073733687400818, 0.5541329383850098, 0.43809109926223755, 0.11503908038139343, 0.02849700301885605, 0.025097709149122238, 0.014711813069880009, 0.006424109451472759, 0.39197838306427, 0.4694826304912567, 0.17039237916469574, 0.16142874956130981, 0.19919125735759735, 0.054951149970293045, 0.10915631055831909, 0.30942168831825256, 0.19657404720783234, 0.031007295474410057, 0.23716343939304352, 0.05435822904109955, 0.08149112015962601, 0.6613667011260986, 0.11670006066560745, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24498042464256287, 0.277620404958725, 0.060333866626024246, 0.030503980815410614, 0.04090564325451851, 0.4659561812877655, 0.2110646367073059, 0.11101182550191879, 0.028219982981681824, 0.10508411377668381, 0.025386929512023926, 0.0648839995265007, 0.13676653802394867, 0.07622335106134415, 0.09164498746395111, 0.0640818402171135, 0.41535088419914246, 0.29784247279167175, 0.05657188221812248, 0.036311421543359756, 0.08192699402570724, 0.16688455641269684, 0.10144203901290894, 0.346017450094223, 0.15466110408306122, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4220424294471741, 0.21296784281730652, 0.10483475774526596, 0.11319100856781006, 0.14396990835666656, 0.1309618502855301, 0.13656088709831238, 0.2097199261188507, 0.1397993415594101, 0.263439804315567, 0.10735370218753815, 0.27457332611083984, 0.26051631569862366, 0.18891198933124542, 0.10100831091403961, 0.04877842590212822, 0.16450235247612, 0.23761717975139618, 0.0720985159277916, 0.12954245507717133, 0.08035153150558472, 0.18124118447303772, 0.05973014980554581, 0.26483285427093506, 0.39028850197792053, 0.05098416656255722, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12607140839099884, 0.08847615122795105, 0.09191321581602097, 0.06030821427702904, 0.21649383008480072, 0.10438336431980133, 0.07331530004739761, 0.1330888420343399, 0.04176999628543854, 0.06727378815412521, 0.06257567554712296, 0.21110908687114716, 0.09018781781196594, 0.09389244765043259, 0.13621515035629272, 0.11044558137655258, 0.08550350368022919, 0.2513507902622223, 0.28401821851730347, 0.12441904842853546, 0.05029991641640663, 0.42405593395233154, 0.08374682813882828, 0.43869927525520325, 0.14253327250480652, 0.10876792669296265, 0.09369473904371262, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.062066610902547836, 0.07845254987478256, 0.24838510155677795, 0.16541223227977753, 0.16867581009864807, 0.019677892327308655, 0.021460779011249542, 0.018530650064349174, 0.023010587319731712, 0.10349667817354202, 0.16099916398525238, 0.3089703619480133, 0.08426959812641144, 0.16459643840789795, 0.06073381006717682, 0.08764015138149261, 0.46941375732421875, 0.23278135061264038, 0.11763583868741989, 0.0354606918990612, 0.16624747216701508, 0.2793619632720947, 0.1965668648481369, 0.23052528500556946, 0.3914787769317627, 0.08669382333755493, 0.10678009688854218, 0.08708767592906952, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11642084270715714, 0.11190053075551987, 0.12368596345186234, 0.04549993947148323, 0.3567850887775421, 0.06569506227970123, 0.07286660373210907, 0.03259556367993355, 0.09530685096979141, 0.19273261725902557, 0.06463074684143066, 0.7640278339385986, 0.06371455639600754, 0.1593337506055832, 0.2193848341703415, 0.2116944044828415, 0.06720030307769775, 0.29984304308891296, 0.010844358243048191, 0.051072586327791214, 0.15023349225521088, 0.04554526135325432, 0.1560167670249939, 0.03609438240528107, 0.026584016159176826, 0.14512087404727936, 0.05890262499451637, 0.015816861763596535, 0.07422769069671631, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11034999042749405, 0.03210863843560219, 0.010996339842677116, 0.026450032368302345, 0.051475513726472855, 0.02743532694876194, 0.3610350787639618, 0.20538736879825592, 0.017281753942370415, 0.05300014466047287, 0.012052728794515133, 0.08001075685024261, 0.0069017065688967705, 0.010893179103732109, 0.13085691630840302, 0.056502565741539, 0.15541820228099823, 0.07158821076154709, 0.00490804947912693, 0.015012365765869617, 0.06302572786808014, 0.01116714347153902, 0.22065599262714386, 0.021468764171004295, 0.01365464273840189, 0.022816751152276993, 0.019708380103111267, 0.0059420084580779076, 0.0700121819972992, 0.287899911403656, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07615644484758377, 0.1536630541086197, 0.1253354847431183, 0.048576656728982925, 0.05276811867952347, 0.1611642986536026, 0.12317243963479996, 0.32385867834091187, 0.012925365939736366, 0.0864856168627739, 0.08918802440166473, 0.23886144161224365, 0.20351386070251465, 0.20744860172271729, 0.13318131864070892, 0.058403778821229935, 0.0693131536245346, 0.04999461770057678, 0.004054869059473276, 0.0624610111117363, 0.018093721941113472, 0.07961009442806244, 0.1545858234167099, 0.3008257746696472, 0.14455094933509827, 0.09800520539283752, 0.09531621634960175, 0.27401015162467957, 0.4782770574092865, 0.11211755871772766, 0.01358953770250082, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.051417503505945206, 0.1600690335035324, 0.08639511466026306, 0.02997625432908535, 0.08503448963165283, 0.32695260643959045, 0.06822863221168518, 0.16364485025405884, 0.06138167902827263, 0.07786902785301208, 0.04443247988820076, 0.0585777647793293, 0.1263807862997055, 0.10769001394510269, 0.13808733224868774, 0.1399688720703125, 0.5559014678001404, 0.20350231230258942, 0.042011573910713196, 0.020507201552391052, 0.03915366902947426, 0.4243565797805786, 0.11376935243606567, 0.31140708923339844, 0.051479678601026535, 0.07416504621505737, 0.2654426097869873, 0.3960915207862854, 0.5790604948997498, 0.18063338100910187, 0.1939544379711151, 0.04191381484270096, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1321558654308319, 0.24967153370380402, 0.0761917233467102, 0.044561922550201416, 0.12028387933969498, 0.19908402860164642, 0.04708404839038849, 0.10076720267534256, 0.09921064227819443, 0.18345412611961365, 0.09404058009386063, 0.21650025248527527, 0.11625839024782181, 0.1530369222164154, 0.12011245638132095, 0.027515297755599022, 0.0486784465610981, 0.06845460832118988, 0.023408811539411545, 0.008863206952810287, 0.008533195592463017, 0.24178741872310638, 0.01229054294526577, 0.25817692279815674, 0.6869812607765198, 0.049950506538152695, 0.12178820371627808, 0.0564231351017952, 0.02026011236011982, 0.004908477421849966, 0.03562311828136444, 0.12746450304985046, 0.0016219470417127013, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10757170617580414, 0.1042957603931427, 0.13590699434280396, 0.06331591308116913, 0.24158470332622528, 0.09161848574876785, 0.0633605495095253, 0.13977625966072083, 0.03925082087516785, 0.07121878862380981, 0.1023484393954277, 0.26378345489501953, 0.10990181565284729, 0.12030858546495438, 0.1261080652475357, 0.11620164662599564, 0.09937138110399246, 0.17538107931613922, 0.40406307578086853, 0.043817292898893356, 0.05759625509381294, 0.49306368827819824, 0.09120260924100876, 0.36450278759002686, 0.08042807132005692, 0.1856311559677124, 0.1376025527715683, 0.1998283714056015, 0.3654005527496338, 0.15910619497299194, 0.4969707429409027, 0.08565060794353485, 0.02514367550611496, 0.090617336332798, NaN, NaN, NaN, NaN, NaN, NaN], [0.06512168049812317, 0.13837532699108124, 0.3250073194503784, 0.16753129661083221, 0.21647527813911438, 0.04118574038147926, 0.03336784988641739, 0.029927842319011688, 0.03334499150514603, 0.08782976865768433, 0.17631417512893677, 0.3171449303627014, 0.10520178824663162, 0.15139654278755188, 0.0914224162697792, 0.0739481970667839, 0.5182103514671326, 0.19721719622612, 0.21118015050888062, 0.015751224011182785, 0.12249443680047989, 0.5174803733825684, 0.17075838148593903, 0.30025264620780945, 0.29246312379837036, 0.0875946432352066, 0.2326347827911377, 0.13986286520957947, 0.511695921421051, 0.12602318823337555, 0.03662485629320145, 0.1263200044631958, 0.0166145209223032, 0.19702456891536713, 0.09621746093034744, NaN, NaN, NaN, NaN, NaN], [0.06382797658443451, 0.2566763758659363, 0.11056842654943466, 0.028001734986901283, 0.2813059389591217, 0.24806144833564758, 0.07807287573814392, 0.05373501405119896, 0.21183612942695618, 0.09658068418502808, 0.05084875971078873, 0.501965343952179, 0.06208595260977745, 0.10913741588592529, 0.26912179589271545, 0.3052336871623993, 0.37224864959716797, 0.45515015721321106, 0.04986808821558952, 0.05332064628601074, 0.13846120238304138, 0.15990367531776428, 0.20659208297729492, 0.06640873104333878, 0.035323526710271835, 0.30340465903282166, 0.10174556821584702, 0.02102985605597496, 0.11508277803659439, 0.09203195571899414, 0.0029288395307958126, 0.023838462308049202, 0.004605103749781847, 0.052648112177848816, 0.006431906949728727, 0.026736242696642876, NaN, NaN, NaN, NaN], [0.08548272401094437, 0.017544403672218323, 0.011271107010543346, 0.022962557151913643, 0.05241750180721283, 0.02648325450718403, 0.3057800531387329, 0.19772306084632874, 0.025625178590416908, 0.03652432560920715, 0.006945622619241476, 0.05576859414577484, 0.00584550853818655, 0.008180957287549973, 0.12917736172676086, 0.047024402767419815, 0.1257133185863495, 0.052377521991729736, 0.009844984859228134, 0.015597687102854252, 0.06965665519237518, 0.01849394477903843, 0.1603521853685379, 0.02587857097387314, 0.00957732368260622, 0.023523790761828423, 0.020081259310245514, 0.008425970561802387, 0.10955916345119476, 0.35300737619400024, 0.023505402728915215, 0.00786643661558628, 0.007557017263025045, 0.013908758759498596, 0.004675114993005991, 0.035296451300382614, 0.3261549174785614, NaN, NaN, NaN], [0.03209112584590912, 0.1926622986793518, 0.09989916533231735, 0.02044818177819252, 0.04127199947834015, 0.22930434346199036, 0.09912838786840439, 0.3779822289943695, 0.007566491607576609, 0.046152934432029724, 0.04734500125050545, 0.35250937938690186, 0.10047939419746399, 0.16575956344604492, 0.13635975122451782, 0.11014947295188904, 0.08461853116750717, 0.02981843426823616, 0.004099451471120119, 0.009237504564225674, 0.011130756698548794, 0.132149338722229, 0.11619938164949417, 0.22203940153121948, 0.02292616292834282, 0.06793706119060516, 0.07227552682161331, 0.3262397348880768, 0.40601006150245667, 0.08270477503538132, 0.013506797142326832, 0.03135772421956062, 0.07034049183130264, 0.09623772650957108, 0.20842698216438293, 0.2752794623374939, 0.1234828308224678, 0.04129752516746521, NaN, NaN], [0.05301084369421005, 0.1661737710237503, 0.08216799795627594, 0.025789698585867882, 0.07900767773389816, 0.3054123520851135, 0.08738221228122711, 0.17720931768417358, 0.06289011240005493, 0.06967967748641968, 0.05491774156689644, 0.02886299602687359, 0.10253670811653137, 0.09415244311094284, 0.129754438996315, 0.1182219609618187, 0.7384620308876038, 0.11492461711168289, 0.09884578734636307, 0.012010940350592136, 0.038200050592422485, 0.4905328154563904, 0.23439669609069824, 0.2528713345527649, 0.015177865512669086, 0.07817362248897552, 0.33532261848449707, 0.4971323609352112, 0.7384514212608337, 0.2383432686328888, 0.2306600660085678, 0.025716517120599747, 0.023198120296001434, 0.3352215886116028, 0.4797173738479614, 0.5688640475273132, 0.2555003762245178, 0.1890360713005066, 0.06237812712788582, NaN], [0.1895110011100769, 0.09308972954750061, 0.1887637972831726, 0.14927715063095093, 0.3653167188167572, 0.1686658412218094, 0.1126369759440422, 0.17013703286647797, 0.0685301423072815, 0.15278968214988708, 0.19327588379383087, 0.18825437128543854, 0.143904447555542, 0.143670454621315, 0.1203024610877037, 0.13153354823589325, 0.5476850867271423, 0.27465543150901794, 0.27658137679100037, 0.5121651291847229, 0.3939417600631714, 0.2527337968349457, 0.41937416791915894, 0.2437492311000824, 0.1485103964805603, 0.10651403665542603, 0.241710364818573, 0.34289923310279846, 0.3691290616989136, 0.108230821788311, 0.32214298844337463, 0.08876177668571472, 0.03369928151369095, 0.23942533135414124, 0.302080899477005, 0.3531237244606018, 0.09724070131778717, 0.19267186522483826, 0.06874143332242966, 0.052875734865665436]], [[0.5917359590530396, 0.12410512566566467, 0.24872945249080658, 0.20040015876293182, 0.21720361709594727, 0.11561702191829681, 0.58521568775177, 0.41413450241088867, 0.22558750212192535, 0.117314413189888, 0.3378458619117737, 0.10710897296667099, 0.0625920221209526, 0.24034489691257477, 0.0060951621271669865, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03933318331837654, 0.17479471862316132, 0.1999012678861618, 0.1507989913225174, 0.2344110906124115, 0.41628938913345337, 0.19733835756778717, 0.42009472846984863, 0.32125937938690186, 0.09302358329296112, 0.29758843779563904, 0.2500022351741791, 0.15192696452140808, 0.19621950387954712, 0.06078135594725609, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03998054191470146, 0.02165106125175953, 0.5779209733009338, 0.4094802737236023, 0.3219829499721527, 0.23359909653663635, 0.15223096311092377, 0.0776560828089714, 0.11850404739379883, 0.1752316802740097, 0.7765606641769409, 0.15624035894870758, 0.19448350369930267, 0.3389243483543396, 0.015656093135476112, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.2606712579727173, 0.23122362792491913, 0.33188652992248535, 0.327752023935318, 0.0930425301194191, 0.13157396018505096, 0.5079332590103149, 0.15524731576442719, 0.2039693295955658, 0.336448073387146, 0.7406277656555176, 0.11173539608716965, 0.03980698063969612, 0.2757716476917267, 0.009055807255208492, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.03992704302072525, 0.03562299162149429, 0.05761631205677986, 0.04593607783317566, 0.747100830078125, 0.13848423957824707, 0.25807130336761475, 0.11098858714103699, 0.025020861998200417, 0.027831630781292915, 0.07712040096521378, 0.5344594120979309, 0.28488224744796753, 0.37143638730049133, 0.060307834297418594, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.146702840924263, 0.5779150128364563, 0.04704871401190758, 0.12512727081775665, 0.05839477851986885, 0.5817644596099854, 0.2541782557964325, 0.167904794216156, 0.020014837384223938, 0.0557471327483654, 0.1778557300567627, 0.29983726143836975, 0.34978994727134705, 0.3759990334510803, 0.07532685250043869, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.14372284710407257, 0.20398879051208496, 0.060162752866744995, 0.022449441254138947, 0.15882903337478638, 0.12907396256923676, 0.7781419157981873, 0.20689332485198975, 0.023098474368453026, 0.02567201852798462, 0.04225016012787819, 0.05647281929850578, 0.5644452571868896, 0.8062969446182251, 0.0037398021668195724, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.09274263679981232, 0.19406189024448395, 0.18035270273685455, 0.18292436003684998, 0.2674761116504669, 0.1057504341006279, 0.5214765071868896, 0.1765710562467575, 0.15375129878520966, 0.08563723415136337, 0.35003283619880676, 0.12250327318906784, 0.4574505388736725, 0.6043637990951538, 0.046846963465213776, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3136129081249237, 0.10648278146982193, 0.02492944709956646, 0.07937752455472946, 0.16382691264152527, 0.40212482213974, 0.2148500233888626, 0.5046796798706055, 0.25625455379486084, 0.10382789373397827, 0.027611082419753075, 0.07138189673423767, 0.1265101283788681, 0.05298655480146408, 0.01642199046909809, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.7252353429794312, 0.23862500488758087, 0.17466871440410614, 0.2584758698940277, 0.15821219980716705, 0.41019105911254883, 0.4795793294906616, 0.2558479905128479, 0.061036378145217896, 0.5831483006477356, 0.23237691819667816, 0.36767491698265076, 0.07294586300849915, 0.0734395682811737, 0.006080146878957748, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.18402060866355896, 0.2199273407459259, 0.10670217871665955, 0.36498934030532837, 0.37264159321784973, 0.5975290536880493, 0.641157865524292, 0.4798426032066345, 0.07047704607248306, 0.30389490723609924, 0.6835307478904724, 0.29959914088249207, 0.32009243965148926, 0.2076108753681183, 0.015385132282972336, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.18547095358371735, 0.1046445369720459, 0.17664410173892975, 0.031107882037758827, 0.4872691333293915, 0.6876094937324524, 0.29805243015289307, 0.2697339355945587, 0.03289056569337845, 0.04577193781733513, 0.2390383929014206, 0.650258481502533, 0.6253164410591125, 0.2719551920890808, 0.042574722319841385, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.06026101112365723, 0.4596063494682312, 0.11362233757972717, 0.050736263394355774, 0.47900232672691345, 0.8146356344223022, 0.23428170382976532, 0.5258204936981201, 0.07407079637050629, 0.24087238311767578, 0.04631686583161354, 0.04097185283899307, 0.24002470076084137, 0.051092784851789474, 0.10185284167528152, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.05915316566824913, 0.3385859429836273, 0.23845957219600677, 0.13520635664463043, 0.49372056126594543, 0.8321547508239746, 0.47351959347724915, 0.4942004382610321, 0.11661165207624435, 0.273796945810318, 0.09639480710029602, 0.07113680988550186, 0.3545372784137726, 0.3069557547569275, 0.026768943294882774, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.6326229572296143, 0.28129494190216064, 0.2424720972776413, 0.23961131274700165, 0.1532977670431137, 0.03248026221990585, 0.07237446308135986, 0.03991716355085373, 0.058106135576963425, 0.6791825294494629, 0.4868316352367401, 0.4841252863407135, 0.1838759332895279, 0.16229771077632904, 0.03779346123337746, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.20045556128025055, 0.06346653401851654, 0.1246497705578804, 0.132145956158638, 0.18068760633468628, 0.0611145943403244, 0.3011611998081207, 0.09648064523935318, 0.3848741054534912, 0.20776434242725372, 0.09024091809988022, 0.10095226764678955, 0.05726093426346779, 0.17784324288368225, 0.06983170658349991, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06639314442873001, 0.03837187588214874, 0.306266725063324, 0.09758531302213669, 0.10875808447599411, 0.20901371538639069, 0.0894559919834137, 0.21620051562786102, 0.13805773854255676, 0.07912127673625946, 0.3521624505519867, 0.036526914685964584, 0.1551785171031952, 0.14622288942337036, 0.19236178696155548, 0.03290099650621414, 0.3365767002105713, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03379146009683609, 0.11666905134916306, 0.02791847102344036, 0.04754703491926193, 0.02039634808897972, 0.23185299336910248, 0.07985613495111465, 0.3240954875946045, 0.04561735317111015, 0.061520081013441086, 0.18156962096691132, 0.10860903561115265, 0.3409081995487213, 0.3218340575695038, 0.13103368878364563, 0.003547579748556018, 0.004082763101905584, 0.4616691768169403, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06278766691684723, 0.001863734913058579, 0.30563783645629883, 0.056017640978097916, 0.245498925447464, 0.11060530692338943, 0.09064232558012009, 0.004372697789222002, 0.007118886336684227, 0.06251134723424911, 0.17941752076148987, 0.004394095856696367, 0.11450538039207458, 0.046043287962675095, 0.021101655438542366, 0.03595791012048721, 0.1313885897397995, 0.007101066876202822, 0.42131781578063965, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11553236097097397, 0.0885467380285263, 0.2750205993652344, 0.21104735136032104, 0.3459762930870056, 0.07976578176021576, 0.218110129237175, 0.05760955810546875, 0.09680842608213425, 0.2662138342857361, 0.21090076863765717, 0.41520535945892334, 0.21548694372177124, 0.2248467653989792, 0.10481394827365875, 0.007601147051900625, 0.014137630350887775, 0.01938864029943943, 0.2572920322418213, 0.0011994435917586088, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03112325258553028, 0.08175794035196304, 0.035110849887132645, 0.038375336676836014, 0.2468937784433365, 0.060934457927942276, 0.0843387246131897, 0.03423367813229561, 0.02026834897696972, 0.07970783859491348, 0.08959806710481644, 0.1693299561738968, 0.16057033836841583, 0.21660663187503815, 0.13329552114009857, 0.00011468974116723984, 0.0032473355531692505, 0.00037737423554062843, 0.2793608605861664, 0.003465541172772646, 5.061212868895382e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09539461880922318, 0.058681365102529526, 0.01674766093492508, 0.02866855263710022, 0.012030106969177723, 0.21465063095092773, 0.034089475870132446, 0.04479566961526871, 0.014019637368619442, 0.035355255007743835, 0.1569557934999466, 0.01038492750376463, 0.06631091982126236, 0.1547483503818512, 0.19284123182296753, 0.21311266720294952, 0.10434294492006302, 0.011484598740935326, 0.0013334749964997172, 0.03845251351594925, 0.028238367289304733, 0.05654546618461609, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04954487085342407, 0.07065968960523605, 0.07275094836950302, 0.040997497737407684, 0.07946129143238068, 0.17300859093666077, 0.03222974017262459, 0.02469809167087078, 0.18557047843933105, 0.13542628288269043, 0.26776814460754395, 0.056715987622737885, 0.15973475575447083, 0.19029632210731506, 0.17610958218574524, 0.052184704691171646, 0.499632865190506, 0.005138374865055084, 0.10169705748558044, 0.09997230768203735, 0.036990027874708176, 0.07566682249307632, 0.32418423891067505, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.047577280551195145, 0.02606579288840294, 0.0165295097976923, 0.04137043654918671, 0.013305035419762135, 0.32835593819618225, 0.026565413922071457, 0.06772360950708389, 0.010228256694972515, 0.041277337819337845, 0.1336892545223236, 0.008326719515025616, 0.10322394222021103, 0.1976388841867447, 0.21077491343021393, 0.23645982146263123, 0.016864946112036705, 0.013305210508406162, 0.0007752762176096439, 0.017555342987179756, 0.03100133314728737, 0.04085567593574524, 0.029846351593732834, 0.010373883880674839, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.043893925845623016, 0.021177353337407112, 0.028366681188344955, 0.07016126066446304, 0.07573862373828888, 0.22699910402297974, 0.055615294724702835, 0.07980518788099289, 0.009269739501178265, 0.09460800141096115, 0.16427507996559143, 0.20832805335521698, 0.1427353024482727, 0.2680304944515228, 0.13907650113105774, 0.18805328011512756, 0.046367619186639786, 0.10314629226922989, 0.018223291262984276, 0.27720585465431213, 0.3798944056034088, 0.09291481226682663, 0.09293034672737122, 0.04290880635380745, 0.03370373696088791, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03411688283085823, 0.056632235646247864, 0.07365043461322784, 0.10934542864561081, 0.09185239672660828, 0.5077250003814697, 0.05141168087720871, 0.047258101403713226, 0.053326722234487534, 0.13365329802036285, 0.28296661376953125, 0.041020717471838, 0.08861301094293594, 0.13371184468269348, 0.11519401520490646, 0.028641005977988243, 0.03295213729143143, 0.0065453751012682915, 0.16686026751995087, 0.028714975342154503, 0.015397193841636181, 0.02003423683345318, 0.019093815237283707, 0.020523719489574432, 0.016172079369425774, 0.3490104377269745, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04096442833542824, 0.07374820858240128, 0.07300861179828644, 0.10121195018291473, 0.051522452384233475, 0.3508135676383972, 0.03948133811354637, 0.047985587269067764, 0.06340529769659042, 0.06765846908092499, 0.281475692987442, 0.05536516010761261, 0.1822110116481781, 0.22272904217243195, 0.13150985538959503, 0.10839971899986267, 0.004465002100914717, 0.016082070767879486, 0.035488102585077286, 0.015600458718836308, 0.012030484154820442, 0.015872180461883545, 0.01552913524210453, 0.03533920273184776, 0.11401902139186859, 0.31523072719573975, 0.20448055863380432, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07982534170150757, 0.06016559898853302, 0.03820561617612839, 0.02410227432847023, 0.006901262793689966, 0.42442968487739563, 0.02364957146346569, 0.07835549116134644, 0.027230771258473396, 0.12123586237430573, 0.15446297824382782, 0.018115278333425522, 0.21087171137332916, 0.29417684674263, 0.08362340182065964, 0.18776558339595795, 0.0060520414263010025, 0.017473671585321426, 0.005528539884835482, 0.0027145782951265574, 0.012176988646388054, 0.0031525399535894394, 0.004637573380023241, 0.011988476850092411, 0.06979440897703171, 0.38327983021736145, 0.020156072452664375, 0.010166948661208153, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05696694925427437, 0.014171368442475796, 0.06200120970606804, 0.021368764340877533, 0.012162269093096256, 0.0841592326760292, 0.03827953711152077, 0.07895056158304214, 0.01159723848104477, 0.05937046930193901, 0.023348387330770493, 0.008824712596833706, 0.13521961867809296, 0.23698511719703674, 0.03196632117033005, 0.3064975440502167, 0.004262991715222597, 0.009997943416237831, 0.00034317225799895823, 0.013912403024733067, 0.02852706052362919, 0.004078225698322058, 0.001928618410602212, 0.006367305759340525, 0.035507142543792725, 0.050674788653850555, 0.007057875394821167, 0.0049485149793326855, 0.0049379738047719, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11678174138069153, 0.8205142617225647, 0.01038320455700159, 0.023903295397758484, 0.21764065325260162, 0.2580764889717102, 0.20165181159973145, 0.2900886535644531, 0.03504627197980881, 0.10256802290678024, 0.03713424876332283, 0.7063723206520081, 0.8779962062835693, 0.8367014527320862, 0.0919082760810852, 0.14988604187965393, 0.015584584325551987, 0.137997567653656, 0.0031439096201211214, 0.5546696782112122, 0.01658078096807003, 0.0025873971171677113, 0.0010246702004224062, 0.019667595624923706, 0.012580120004713535, 0.015491531230509281, 0.029023459181189537, 0.021588340401649475, 0.25595030188560486, 0.02325037308037281, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.038494985550642014, 0.05109047889709473, 0.07501792907714844, 0.04001014679670334, 0.021166233345866203, 0.03079657442867756, 0.01494709774851799, 0.010983827523887157, 0.0029027159325778484, 0.0995086133480072, 0.350593626499176, 0.02021479234099388, 0.34575650095939636, 0.21952421963214874, 0.05450797453522682, 0.07357528805732727, 0.007756352424621582, 0.002724927617236972, 0.001402079127728939, 0.0004431438574101776, 0.00010925461538136005, 0.0029409730341285467, 0.005563507787883282, 0.012139370664954185, 0.03890732303261757, 0.05558362230658531, 0.03318313509225845, 0.4270496368408203, 0.07112571597099304, 0.15036046504974365, 0.020786603912711143, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.028108511120080948, 0.08174566179513931, 0.03328564018011093, 0.03230520337820053, 0.012646276503801346, 0.1872790902853012, 0.025206655263900757, 0.06737280637025833, 0.033121660351753235, 0.08641302585601807, 0.2848047614097595, 0.059273794293403625, 0.18425194919109344, 0.15244826674461365, 0.1352420449256897, 0.012120572850108147, 0.0003307444858364761, 0.009640182368457317, 0.00017808230768423527, 0.0021490382496267557, 0.0008148089982569218, 0.0008481521508656442, 0.0019973982125520706, 0.005024890415370464, 0.01719486527144909, 0.044799502938985825, 0.006444229744374752, 0.018026985228061676, 0.0067391968332231045, 0.061299871653318405, 0.01281613577157259, 0.3084925711154938, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07509021461009979, 0.05027765780687332, 0.23718997836112976, 0.11438266932964325, 0.11051909625530243, 0.431958943605423, 0.046987809240818024, 0.021854011341929436, 0.15366314351558685, 0.1928708851337433, 0.2900879681110382, 0.052021902054548264, 0.11538787186145782, 0.25173547863960266, 0.10233873873949051, 0.011204708367586136, 0.0033799665980041027, 0.008117830380797386, 0.1567971557378769, 0.012545537203550339, 0.002854604972526431, 0.0037395430263131857, 0.0003391341888345778, 0.002928558737039566, 0.004266565665602684, 0.28180748224258423, 0.005543314386159182, 0.0059068226255476475, 0.004401014186441898, 0.09436267614364624, 0.003524675266817212, 0.09697568416595459, 0.3818984925746918, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03257948160171509, 0.08023553341627121, 0.06238585337996483, 0.06856023520231247, 0.02927098423242569, 0.2968010902404785, 0.03317389637231827, 0.04758336395025253, 0.07943073660135269, 0.053982626646757126, 0.21416282653808594, 0.05025764927268028, 0.14347779750823975, 0.19969123601913452, 0.13921964168548584, 0.1085091158747673, 0.0013132937019690871, 0.011304548010230064, 0.014309195801615715, 0.009265521541237831, 0.00682368129491806, 0.01179590355604887, 0.005223054438829422, 0.01697726733982563, 0.05782441794872284, 0.2522926330566406, 0.16053971648216248, 0.020927468314766884, 0.02051178365945816, 0.1114674061536789, 0.014847181737422943, 0.40623563528060913, 0.12017090618610382, 0.2281613051891327, NaN, NaN, NaN, NaN, NaN, NaN], [0.07817428559064865, 0.11046875268220901, 0.040724072605371475, 0.024797527119517326, 0.004808576311916113, 0.5141928791999817, 0.024754824116826057, 0.080713652074337, 0.03179122135043144, 0.12244449555873871, 0.22665926814079285, 0.013305582106113434, 0.23485711216926575, 0.323343425989151, 0.10171245783567429, 0.23926517367362976, 0.007461922243237495, 0.015478387475013733, 0.02120528556406498, 0.0046339076943695545, 0.01287792343646288, 0.005305645987391472, 0.0037130024284124374, 0.011430526152253151, 0.10132863372564316, 0.42019084095954895, 0.03134358674287796, 0.006659360136836767, 0.0015345009742304683, 0.05340040102601051, 0.0021821516565978527, 0.15366847813129425, 0.09343723207712173, 0.04055917635560036, 0.009410854429006577, NaN, NaN, NaN, NaN, NaN], [0.03765244409441948, 0.0463164821267128, 0.06456112116575241, 0.05319739878177643, 0.010156691074371338, 0.1155625581741333, 0.02458079345524311, 0.07648347318172455, 0.019683409482240677, 0.06488858163356781, 0.09342794120311737, 0.059032924473285675, 0.15581923723220825, 0.2894386053085327, 0.04157077521085739, 0.3882482349872589, 0.012203006073832512, 0.008404962718486786, 0.0008633172838017344, 0.07213836163282394, 0.03903299570083618, 0.006879106629639864, 0.0025245456490665674, 0.011604986153542995, 0.1302306056022644, 0.05970751494169235, 0.005057368893176317, 0.0025832061655819416, 0.003548768814653158, 0.03821956738829613, 0.0041786422953009605, 0.029319334775209427, 0.009258194826543331, 0.010013489983975887, 0.0024901984725147486, 0.009316755458712578, NaN, NaN, NaN, NaN], [0.14924734830856323, 0.8862696886062622, 0.013125438243150711, 0.033269379287958145, 0.22599543631076813, 0.33975404500961304, 0.25561264157295227, 0.36481109261512756, 0.05327271297574043, 0.09902165085077286, 0.03598061203956604, 0.754990816116333, 0.9104278087615967, 0.8631682395935059, 0.10125402361154556, 0.08333727717399597, 0.009125825949013233, 0.12352871894836426, 0.0034849271178245544, 0.49194949865341187, 0.008760062977671623, 0.002427457133308053, 0.0004761714953929186, 0.014378424733877182, 0.007653949782252312, 0.010163314640522003, 0.018072640523314476, 0.014914281666278839, 0.33540958166122437, 0.012212751433253288, 0.050671979784965515, 0.08942927420139313, 0.0058481828309595585, 0.02088618278503418, 0.013520943000912666, 0.3026564419269562, 0.011637967079877853, NaN, NaN, NaN], [0.03672042489051819, 0.12888115644454956, 0.1578092873096466, 0.056865133345127106, 0.03288109228014946, 0.1379515379667282, 0.021150214597582817, 0.013284055516123772, 0.003249341854825616, 0.08646353334188461, 0.5471532940864563, 0.0361909456551075, 0.5093809366226196, 0.39931434392929077, 0.07520455867052078, 0.019913960248231888, 0.003490668721497059, 0.00020567848696373403, 0.00036819992237724364, 0.00019341551524121314, 3.8652269722661003e-05, 0.0008544524316675961, 0.002890991745516658, 0.001110991695895791, 0.005157719366252422, 0.008338885381817818, 0.0030357406940311193, 0.14557099342346191, 0.021602485328912735, 0.04367346689105034, 0.0015647107502445579, 0.009655454196035862, 0.14827704429626465, 0.008163533173501492, 0.49237948656082153, 0.06938102096319199, 0.08394628763198853, 0.049248531460762024, NaN, NaN], [0.03492635861039162, 0.09938696771860123, 0.028945090249180794, 0.03084651380777359, 0.012707062065601349, 0.15071596205234528, 0.029011720791459084, 0.05455483868718147, 0.03256314992904663, 0.07100401073694229, 0.2587825059890747, 0.05546442046761513, 0.17298617959022522, 0.15517692267894745, 0.13362783193588257, 0.010580360889434814, 0.00023049254377838224, 0.00745873898267746, 0.00016025979130063206, 0.002226235345005989, 0.0004258991975802928, 0.000578688399400562, 0.0014760587364435196, 0.002039685845375061, 0.0048048608005046844, 0.019996320828795433, 0.0029125709552317858, 0.006709430366754532, 0.0017099445685744286, 0.02097223326563835, 0.0024284888058900833, 0.10361000150442123, 0.022238893434405327, 0.009704988449811935, 0.017071064561605453, 0.011506098322570324, 0.0406200997531414, 0.0063119689002633095, 0.36112311482429504, NaN], [0.050736088305711746, 0.10139954090118408, 0.08949553966522217, 0.0938185378909111, 0.06053004041314125, 0.18139560520648956, 0.0767659917473793, 0.11340610682964325, 0.19499026238918304, 0.11419404298067093, 0.23666803538799286, 0.05730360746383667, 0.07293370366096497, 0.11558260023593903, 0.12613430619239807, 0.07011571526527405, 0.029766615480184555, 0.05616272985935211, 0.02569880336523056, 0.02553572878241539, 0.010698755271732807, 0.02022577077150345, 0.01824677176773548, 0.03918607532978058, 0.034657131880521774, 0.11515442281961441, 0.05569382756948471, 0.035370998084545135, 0.047812946140766144, 0.1140216588973999, 0.018943075090646744, 0.09709078818559647, 0.08172454684972763, 0.04602199047803879, 0.02941049635410309, 0.031383853405714035, 0.10708537697792053, 0.012693268246948719, 0.07050468772649765, 0.25427982211112976]], [[0.04456469416618347, 0.016716457903385162, 0.08688971400260925, 0.23432573676109314, 0.12769784033298492, 0.0498066172003746, 0.10501405596733093, 0.14398211240768433, 0.3055479824542999, 0.0823235884308815, 0.23467087745666504, 0.6305257678031921, 0.08790664374828339, 0.14063040912151337, 0.13028757274150848, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.04107241332530975, 0.03620494529604912, 0.07322828471660614, 0.1027759537100792, 0.08743055909872055, 0.016458408907055855, 0.09779228270053864, 0.014780157245695591, 0.09821301698684692, 0.025402111932635307, 0.0808086097240448, 0.08257035166025162, 0.07231960445642471, 0.0895148441195488, 0.19708459079265594, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1263897716999054, 0.01533158216625452, 0.08717449009418488, 0.22571881115436554, 0.06928549706935883, 0.16778334975242615, 0.06136450543999672, 0.07180161774158478, 0.2525678873062134, 0.32249853014945984, 0.08566119521856308, 0.48726531863212585, 0.2929263114929199, 0.21127133071422577, 0.12448348850011826, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1481804996728897, 0.04817945510149002, 0.03058626689016819, 0.13171793520450592, 0.10783855617046356, 0.24912205338478088, 0.1342363804578781, 0.28650397062301636, 0.25943103432655334, 0.2756144404411316, 0.08422903716564178, 0.7444766163825989, 0.7611673474311829, 0.5739472508430481, 0.11213001608848572, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1744699776172638, 0.050404343754053116, 0.018338145688176155, 0.11463086307048798, 0.02370826154947281, 0.09417468309402466, 0.04503462836146355, 0.0389062762260437, 0.1780962496995926, 0.7825090885162354, 0.15977078676223755, 0.2598268687725067, 0.05674973130226135, 0.2742767333984375, 0.15589554607868195, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.26428407430648804, 0.0871720165014267, 0.015494171530008316, 0.31054598093032837, 0.31179672479629517, 0.05687993764877319, 0.05327969416975975, 0.14049863815307617, 0.03721972927451134, 0.33735793828964233, 0.06669215857982635, 0.44665512442588806, 0.1105320155620575, 0.07633788883686066, 0.13637836277484894, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.27871736884117126, 0.07987862080335617, 0.06999076902866364, 0.3873903453350067, 0.3669894337654114, 0.0245819091796875, 0.02483827993273735, 0.08571609854698181, 0.04856930300593376, 0.2826782464981079, 0.10519464313983917, 0.8515737056732178, 0.24991582334041595, 0.08752243965864182, 0.1076057106256485, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.18780259788036346, 0.02093103528022766, 0.1730981320142746, 0.27918383479118347, 0.32355740666389465, 0.05090703070163727, 0.030107326805591583, 0.015694553032517433, 0.08293543756008148, 0.11989035457372665, 0.1594303995370865, 0.6402391195297241, 0.08334839344024658, 0.13423335552215576, 0.16886292397975922, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.23048973083496094, 0.05534357205033302, 0.15910016000270844, 0.5473513603210449, 0.11114095151424408, 0.060548413544893265, 0.23547381162643433, 0.0231330469250679, 0.22654443979263306, 0.16574865579605103, 0.03383632004261017, 0.05167527496814728, 0.026772163808345795, 0.028301218524575233, 0.08144620060920715, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.126570925116539, 0.0055835917592048645, 0.7687394022941589, 0.6136845350265503, 0.7887718677520752, 0.24027548730373383, 0.25543272495269775, 0.017155619338154793, 0.01121050026267767, 0.02180907502770424, 0.06387564539909363, 0.04227403923869133, 0.004662328865379095, 0.0204116590321064, 0.16526305675506592, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.3619309663772583, 0.022692076861858368, 0.8739812970161438, 0.5600091814994812, 0.4330839216709137, 0.27864721417427063, 0.1654776781797409, 0.02327956072986126, 0.003977042157202959, 0.0664801374077797, 0.12084753066301346, 0.16815124452114105, 0.07773539423942566, 0.17824198305606842, 0.05263833701610565, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.29354482889175415, 0.16078433394432068, 0.705570638179779, 0.44417092204093933, 0.02176845259964466, 0.15997210144996643, 0.4057019054889679, 0.11617531627416611, 0.010741903446614742, 0.06882698833942413, 0.07046788930892944, 0.041601523756980896, 0.011864392086863518, 0.06714706867933273, 0.14988133311271667, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.5400083065032959, 0.2319646179676056, 0.6198285818099976, 0.2858767509460449, 0.1694929450750351, 0.06001640111207962, 0.26940232515335083, 0.06411167979240417, 0.02847147174179554, 0.18856319785118103, 0.05879069119691849, 0.03795049339532852, 0.009596540592610836, 0.023393897339701653, 0.14663995802402496, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.6488012075424194, 0.15997910499572754, 0.6486002802848816, 0.4859846830368042, 0.34752336144447327, 0.028076842427253723, 0.12281371653079987, 0.019826101139187813, 0.023531395941972733, 0.15743687748908997, 0.059922393411397934, 0.08707788586616516, 0.005486410576850176, 0.025385212153196335, 0.15706156194210052, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.037294961512088776, 0.2018004208803177, 0.33537882566452026, 0.19571122527122498, 0.0998593419790268, 0.48263466358184814, 0.11429780721664429, 0.20324908196926117, 0.7053001523017883, 0.01905757561326027, 0.1765546351671219, 0.10779165476560593, 0.18456625938415527, 0.16855330765247345, 0.014784654602408409, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.1489560306072235, 0.2212677150964737, 0.055408962070941925, 0.03110104240477085, 0.02513720653951168, 0.07830048352479935, 0.05067736655473709, 0.06611648201942444, 0.02238955721259117, 0.03719142824411392, 0.025896798819303513, 0.04350690543651581, 0.11618120968341827, 0.08714473247528076, 0.15466241538524628, 0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.002932992298156023, 0.307859867811203, 0.008187332190573215, 0.003677746979519725, 0.0005738585605286062, 0.0008406178676523268, 0.0005446207360364497, 0.00039283244404941797, 0.0009221792570315301, 0.000758469570428133, 0.003933709114789963, 0.0009352274937555194, 0.001059120986610651, 0.0020118390675634146, 0.010183396749198437, 0.1627129465341568, 0.03836298733949661, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.37297555804252625, 0.09208715707063675, 0.16802547872066498, 0.11860792338848114, 0.08042033761739731, 0.18612971901893616, 0.45423436164855957, 0.07133221626281738, 0.13892753422260284, 0.3810507357120514, 0.291797935962677, 0.16154640913009644, 0.050885219126939774, 0.10468144714832306, 0.10335776954889297, 0.23664157092571259, 0.02332315407693386, 0.0017523575806990266, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.028274476528167725, 0.018124615773558617, 0.13954800367355347, 0.03560209274291992, 0.08428613841533661, 0.17491763830184937, 0.13035845756530762, 0.0214189775288105, 0.009060325101017952, 0.012400318868458271, 0.031279344111680984, 0.011209131218492985, 0.19533281028270721, 0.012452301569283009, 0.020085560157895088, 0.14284735918045044, 0.19342879951000214, 0.5212197303771973, 0.028613613918423653, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11180772632360458, 0.012462746351957321, 0.04844700172543526, 0.06198285147547722, 0.06685204058885574, 0.44600817561149597, 0.30352795124053955, 0.1519387811422348, 0.003835479263216257, 0.08384031802415848, 0.027865614742040634, 0.159846231341362, 0.46423590183258057, 0.09249147027730942, 0.09178084880113602, 0.022152410820126534, 0.06252314150333405, 0.005122532602399588, 0.24202540516853333, 0.0027534610126167536, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04840230569243431, 0.026793736964464188, 0.1120820939540863, 0.09037120640277863, 0.2328549474477768, 0.1063276007771492, 0.14073747396469116, 0.19612964987754822, 0.1904316544532776, 0.10354755818843842, 0.10268037766218185, 0.13820117712020874, 0.3374333083629608, 0.15443934500217438, 0.12536528706550598, 0.04657726734876633, 0.23517371714115143, 0.03296450525522232, 0.2014523595571518, 0.06359406560659409, 0.0884864553809166, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.36786824464797974, 0.056283749639987946, 0.03846094757318497, 0.07181648164987564, 0.03666122257709503, 0.04024837538599968, 0.5659748911857605, 0.2338860183954239, 0.11518415063619614, 0.3659259080886841, 0.04107162728905678, 0.012827688828110695, 0.0609581284224987, 0.02837788313627243, 0.060403015464544296, 0.05186963453888893, 0.02286554127931595, 0.21517929434776306, 0.12055587023496628, 0.1711670458316803, 0.27492430806159973, 0.27398592233657837, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0033490851055830717, 0.001678164815530181, 0.02563566155731678, 0.028815647587180138, 0.007257265504449606, 0.04370535537600517, 0.026118090376257896, 0.435838907957077, 0.005564961116760969, 0.014266176149249077, 0.018343305215239525, 0.0009297388605773449, 0.03809681162238121, 0.020595146343111992, 0.03566184639930725, 0.020278872922062874, 0.02308776043355465, 0.022820638492703438, 0.18259893357753754, 0.3133871257305145, 0.08183155953884125, 0.35655686259269714, 0.17295894026756287, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.34718528389930725, 0.028826624155044556, 0.05378839746117592, 0.0680842474102974, 0.0254778191447258, 0.1994519978761673, 0.7739751935005188, 0.28213825821876526, 0.24756361544132233, 0.3363908529281616, 0.08445209264755249, 0.0067241075448691845, 0.09118638187646866, 0.04656682163476944, 0.0331079363822937, 0.057175230234861374, 0.2799927890300751, 0.10977934300899506, 0.4680712819099426, 0.08838099986314774, 0.05264464393258095, 0.21108192205429077, 0.08241217583417892, 0.0764400064945221, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06212884560227394, 0.013463910669088364, 0.024143628776073456, 0.025745615363121033, 0.12165382504463196, 0.04105379059910774, 0.21918880939483643, 0.12444313615560532, 0.7241542935371399, 0.2624671459197998, 0.05330171436071396, 0.026902005076408386, 0.04947282373905182, 0.06268218904733658, 0.04105047509074211, 0.17679302394390106, 0.30970489978790283, 0.042192552238702774, 0.2463400512933731, 0.032756272703409195, 0.05394153669476509, 0.02321716584265232, 0.30038926005363464, 0.023974716663360596, 0.0257905051112175, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.23139908909797668, 0.12510670721530914, 0.062008026987314224, 0.06357982009649277, 0.21447335183620453, 0.06672460585832596, 0.5059712529182434, 0.23151132464408875, 0.3211345672607422, 0.29274967312812805, 0.07394816726446152, 0.12323616445064545, 0.33240705728530884, 0.13292434811592102, 0.0974365845322609, 0.1864403486251831, 0.03811780363321304, 0.18074536323547363, 0.08396673202514648, 0.026499373838305473, 0.05736878141760826, 0.274480402469635, 0.10284627228975296, 0.15606749057769775, 0.017497936263680458, 0.09719526022672653, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3976813554763794, 0.24336650967597961, 0.030069073662161827, 0.04866141080856323, 0.061815883964300156, 0.023062149062752724, 0.2837987542152405, 0.10572359710931778, 0.42220908403396606, 0.47088485956192017, 0.06114182993769646, 0.05295940861105919, 0.04274435341358185, 0.033208493143320084, 0.07069624215364456, 0.1767420768737793, 0.017465414479374886, 0.034512054175138474, 0.0999627411365509, 0.011741198599338531, 0.022724410519003868, 0.04408577084541321, 0.03894393891096115, 0.018038587644696236, 0.058924250304698944, 0.2522818148136139, 0.12782295048236847, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.6213744282722473, 0.08501708507537842, 0.08457361906766891, 0.0819045826792717, 0.02008524350821972, 0.02321169711649418, 0.5481746196746826, 0.17061969637870789, 0.19314314424991608, 0.48946020007133484, 0.08799289166927338, 0.009451461024582386, 0.1643926501274109, 0.03458939492702484, 0.0487554594874382, 0.042104240506887436, 0.022070694714784622, 0.04743226245045662, 0.13338083028793335, 0.020831480622291565, 0.031267598271369934, 0.024703562259674072, 0.041907425969839096, 0.006121364887803793, 0.02875565178692341, 0.13002096116542816, 0.36194902658462524, 0.021867850795388222, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11498570442199707, 0.014700047671794891, 0.04425002261996269, 0.027370423078536987, 0.031341005116701126, 0.11119254678487778, 0.2834031581878662, 0.24822625517845154, 0.387948602437973, 0.17188440263271332, 0.026020031422376633, 0.003112945705652237, 0.1680845320224762, 0.013143973425030708, 0.05647796019911766, 0.12623563408851624, 0.6370776891708374, 0.07802888005971909, 0.06076015904545784, 0.015353387221693993, 0.0031011439859867096, 0.031844403594732285, 0.5665289163589478, 0.013176449574530125, 0.025442441925406456, 0.05083877220749855, 0.08586791157722473, 0.03281332179903984, 0.0019294946687296033, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00710845272988081, 0.009718026034533978, 0.08296849578619003, 0.05356726795434952, 0.20372402667999268, 0.20898059010505676, 0.07373131066560745, 0.07588774710893631, 0.33318811655044556, 0.09730548411607742, 0.031877510249614716, 0.04629351943731308, 0.026428943499922752, 0.05165233090519905, 0.12934288382530212, 0.010483458638191223, 0.10243765264749527, 0.013204336166381836, 0.1070198118686676, 0.001742976950481534, 0.0011925535509362817, 0.03764529153704643, 0.023008054122328758, 0.09038762003183365, 0.1208486333489418, 0.06097627431154251, 0.11476689577102661, 0.17706690728664398, 0.4447736442089081, 0.005561552010476589, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.092291921377182, 0.13057716190814972, 0.11971572786569595, 0.09643372148275375, 0.0971774011850357, 0.03882397338747978, 0.30341219902038574, 0.06688009947538376, 0.5493715405464172, 0.21897412836551666, 0.10454282909631729, 0.09917838126420975, 0.19730664789676666, 0.0889393612742424, 0.0462181456387043, 0.03962688520550728, 0.412600040435791, 0.1027907133102417, 0.011060677468776703, 0.04006139934062958, 0.005457504652440548, 0.17391063272953033, 0.009697728790342808, 0.08243320137262344, 0.1504840850830078, 0.029468167573213577, 0.29366523027420044, 0.04788699373602867, 0.17640100419521332, 0.04229334741830826, 0.3300667107105255, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3365032970905304, 0.06134270504117012, 0.11965256929397583, 0.08703643828630447, 0.08615697175264359, 0.01610170491039753, 0.289604127407074, 0.16905160248279572, 0.690265953540802, 0.5125291347503662, 0.11020015180110931, 0.05034353584051132, 0.04973014071583748, 0.04155145213007927, 0.06180096045136452, 0.20544184744358063, 0.06503231078386307, 0.21778742969036102, 0.04011436551809311, 0.2470238208770752, 0.03102266602218151, 0.027881061658263206, 0.06887322664260864, 0.023802783340215683, 0.2166331559419632, 0.06618232280015945, 0.058350641280412674, 0.04297764599323273, 0.06574989855289459, 0.02652076631784439, 0.08339553326368332, 0.09817715734243393, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.25151577591896057, 0.0737723708152771, 0.11452356725931168, 0.07270905375480652, 0.27380475401878357, 0.046423640102148056, 0.6668940782546997, 0.60158771276474, 0.286392480134964, 0.2904633581638336, 0.07359147071838379, 0.040276750922203064, 0.2706137001514435, 0.15532110631465912, 0.051646988838911057, 0.09466058760881424, 0.0047309016808867455, 0.1481417566537857, 0.06127317249774933, 0.015202163718640804, 0.011932089924812317, 0.31230586767196655, 0.04852164536714554, 0.039501819759607315, 0.001117925625294447, 0.06312739849090576, 0.023924386128783226, 0.02860989049077034, 0.007241260260343552, 0.11453913897275925, 0.012237192131578922, 0.2803768217563629, 0.0480632521212101, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4344438314437866, 0.2159019559621811, 0.0411386713385582, 0.059745997190475464, 0.08364511281251907, 0.02960371784865856, 0.3908357322216034, 0.17347759008407593, 0.4736940562725067, 0.5831181406974792, 0.08143209666013718, 0.05496616289019585, 0.0508774034678936, 0.03704635798931122, 0.07529113441705704, 0.02001449465751648, 0.0017837424529716372, 0.005722085013985634, 0.04321253299713135, 0.00430489843711257, 0.009005578234791756, 0.010736249387264252, 0.0058517144061625, 0.003792154835537076, 0.008828205987811089, 0.0838593989610672, 0.029530486091971397, 0.015579215250909328, 0.010320665314793587, 0.016853220760822296, 0.017335176467895508, 0.12552303075790405, 0.42354699969291687, 0.08326870948076248, NaN, NaN, NaN, NaN, NaN, NaN], [0.6010525822639465, 0.07716702669858932, 0.12942874431610107, 0.11651009321212769, 0.029510293155908585, 0.025635747238993645, 0.564699649810791, 0.20346374809741974, 0.1942133754491806, 0.5329980254173279, 0.09726559370756149, 0.006782675161957741, 0.1884276419878006, 0.02957840822637081, 0.046941183507442474, 0.001771818962879479, 0.000807587115559727, 0.0031146325636655092, 0.023062998428940773, 0.0018312688916921616, 0.007724495604634285, 0.002569216303527355, 0.003803644794970751, 0.00041838324978016317, 0.001987496856600046, 0.012477965094149113, 0.04809670150279999, 0.0016458284808322787, 0.00020838514319621027, 0.005814890842884779, 0.018183711916208267, 0.30546146631240845, 0.4703490138053894, 0.15369661152362823, 0.012250960804522038, NaN, NaN, NaN, NaN, NaN], [0.07098641246557236, 0.02088714949786663, 0.0536419078707695, 0.04874833673238754, 0.1357380896806717, 0.10192368179559708, 0.22615019977092743, 0.3848302960395813, 0.3569928705692291, 0.19976821541786194, 0.030237246304750443, 0.012232640758156776, 0.14491091668605804, 0.01217038556933403, 0.025625383481383324, 0.02520398050546646, 0.2818087637424469, 0.007948609068989754, 0.07590723037719727, 0.01867567002773285, 0.006826441269367933, 0.011762343347072601, 0.5987983345985413, 0.0045673479326069355, 0.01173742488026619, 0.03130093589425087, 0.03894692659378052, 0.016236862167716026, 0.0014989122282713652, 0.0009245824767276645, 0.025562506169080734, 0.5276230573654175, 0.32699310779571533, 0.1864093542098999, 0.0933799296617508, 0.0060149896889925, NaN, NaN, NaN, NaN], [0.007031308952718973, 0.007269172929227352, 0.08423776179552078, 0.053896792232990265, 0.21268267929553986, 0.2456619292497635, 0.0817742720246315, 0.07338020205497742, 0.2872445285320282, 0.08955906331539154, 0.02503780461847782, 0.043076977133750916, 0.024157537147402763, 0.05127491056919098, 0.1281031221151352, 0.0011320068733766675, 0.011502433568239212, 0.0017513524508103728, 0.020418671891093254, 0.0003008104977197945, 0.00031320590642280877, 0.0053228470496833324, 0.0022876623552292585, 0.011736828833818436, 0.017109515145421028, 0.010937619023025036, 0.015238909050822258, 0.025703608989715576, 0.10705357789993286, 0.0009204442030750215, 0.02667400799691677, 0.16934601962566376, 0.08647502958774567, 0.028284918516874313, 0.06841914355754852, 0.39870724081993103, 0.0010592876933515072, NaN, NaN, NaN], [0.06564409285783768, 0.10634885728359222, 0.14713656902313232, 0.07514703273773193, 0.3204736113548279, 0.07143916934728622, 0.4829144775867462, 0.2612879276275635, 0.7603816986083984, 0.17889906466007233, 0.07189968973398209, 0.10938191413879395, 0.2776612341403961, 0.08681799471378326, 0.052979547530412674, 0.02631283551454544, 0.29101136326789856, 0.042160265147686005, 0.009721376933157444, 0.02933679334819317, 0.014515053480863571, 0.18161341547966003, 0.016545770689845085, 0.03647695854306221, 0.0840071588754654, 0.02240183763206005, 0.1055113896727562, 0.037331126630306244, 0.17535105347633362, 0.010923052206635475, 0.2594170868396759, 0.5064816474914551, 0.06657205522060394, 0.130835622549057, 0.0483754500746727, 0.2870587110519409, 0.010685333050787449, 0.21122200787067413, NaN, NaN], [0.28806957602500916, 0.05887402966618538, 0.12616868317127228, 0.10481040924787521, 0.19247829914093018, 0.033351678401231766, 0.39873749017715454, 0.22540906071662903, 0.7029480338096619, 0.5013188719749451, 0.10523373633623123, 0.08320688456296921, 0.0816955640912056, 0.04881281033158302, 0.09282685816287994, 0.21289733052253723, 0.10400458425283432, 0.2843308448791504, 0.11722961068153381, 0.31265783309936523, 0.07705509662628174, 0.050357937812805176, 0.1631784737110138, 0.04547655209898949, 0.37539371848106384, 0.07925810664892197, 0.07719646394252777, 0.043498191982507706, 0.04735783487558365, 0.022911155596375465, 0.20965908467769623, 0.2452480047941208, 0.05793433263897896, 0.07357832789421082, 0.03363368287682533, 0.041085004806518555, 0.014093895442783833, 0.05045074224472046, 0.0570731945335865, NaN], [0.2559513747692108, 0.07615252584218979, 0.11904845386743546, 0.07934627681970596, 0.09980516135692596, 0.14371442794799805, 0.3059750497341156, 0.09035829454660416, 0.22693291306495667, 0.32864776253700256, 0.08986205607652664, 0.1614997386932373, 0.17624114453792572, 0.16325940191745758, 0.119119793176651, 0.02115148864686489, 0.018139760941267014, 0.03536282852292061, 0.06259438395500183, 0.00901759136468172, 0.014575985260307789, 0.12521256506443024, 0.12870429456233978, 0.09162478893995285, 0.06363746523857117, 0.1348179280757904, 0.07700010389089584, 0.05158444121479988, 0.01101324986666441, 0.03299920633435249, 0.163722425699234, 0.13794326782226562, 0.18303781747817993, 0.117555633187294, 0.08103907853364944, 0.012191864661872387, 0.032527241855859756, 0.16104964911937714, 0.12187117338180542, 0.22321484982967377]]]], \"bot_text\": [\"The_\", \"animal_\", \"didn_\", \"'_\", \"t_\", \"cross_\", \"the_\", \"street_\", \"because_\", \"it_\", \"was_\", \"too_\", \"tire\", \"d_\", \"Das_\", \"Tier\", \"_\", \"\\u00fcber\", \"quer\", \"te_\", \"die_\", \"Stra\\u00dfe_\", \"nicht_\", \", _\", \"weil_\", \"es_\", \"zu_\", \"m\\u00fc\", \"de_\", \"war_\", \", _\", \"weil_\", \"es_\", \"zu_\", \"m\\u00fc\", \"de_\", \"war_\", \"._\"]}, \"out_out\": {\"top_text\": [\"Das_\", \"Tier\", \"_\", \"\\u00fcber\", \"quer\", \"te_\", \"die_\", \"Stra\\u00dfe_\", \"nicht_\", \", _\", \"weil_\", \"es_\", \"zu_\", \"m\\u00fc\", \"de_\", \"war_\", \", _\", \"weil_\", \"es_\", \"zu_\", \"m\\u00fc\", \"de_\", \"war_\", \"._\"], \"att\": [[[[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.33067038655281067, 0.02820705994963646, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.43891066312789917, 0.3106566071510315, 0.006947982590645552, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.8740342259407043, 0.6547167897224426, 0.0062981778755784035, 0.46666401624679565, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.009682492353022099, 0.17458303272724152, 0.7120969891548157, 0.10496775060892105, 0.0038010317366570234, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.31054121255874634, 0.41146165132522583, 0.4573209881782532, 0.639615535736084, 0.038498248904943466, 0.06232544779777527, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2996446192264557, 0.18095439672470093, 0.8072441220283508, 0.6008384227752686, 0.045412980020046234, 0.09029265493154526, 0.15878555178642273, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07671086490154266, 0.13175785541534424, 0.032809216529130936, 0.06887537240982056, 0.32570284605026245, 0.22846734523773193, 0.06983717530965805, 0.07415641844272614, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4443431496620178, 0.2924090623855591, 0.09237049520015717, 0.07077033072710037, 0.05661908909678459, 0.1886560618877411, 0.5792031288146973, 0.23326165974140167, 0.024399278685450554, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0045473226346075535, 0.015263181179761887, 0.11153102666139603, 0.01091472152620554, 0.07137833535671234, 0.14599360525608063, 0.24649137258529663, 0.2676219940185547, 0.14942915737628937, 0.03359955921769142, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0021246292162686586, 0.019146723672747612, 0.0190261360257864, 0.004887872841209173, 0.032842181622982025, 0.009469296783208847, 0.015122202225029469, 0.056959331035614014, 0.014146327041089535, 0.2864534854888916, 0.028167642652988434, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.007321672048419714, 0.06949152052402496, 0.18409577012062073, 0.05168240889906883, 0.5332358479499817, 0.12983477115631104, 0.020923368632793427, 0.015086837112903595, 0.05491120368242264, 0.38865622878074646, 0.036598365753889084, 0.02645716816186905, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.004608431365340948, 0.07759333401918411, 0.05611182749271393, 0.031112710013985634, 0.06043193116784096, 0.023203425109386444, 0.01299421489238739, 0.011212858371436596, 0.2615091800689697, 0.5089370608329773, 0.22289350628852844, 0.10276756435632706, 0.03959360718727112, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.012221934273838997, 0.040381401777267456, 0.0694599524140358, 0.0800129845738411, 0.023234205320477486, 0.003881127340719104, 0.03062801994383335, 0.024260450154542923, 0.012832778505980968, 0.01656900905072689, 0.2333584874868393, 0.3572527766227722, 0.0072386497631669044, 0.014752739109098911, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09144259989261627, 0.1256924569606781, 0.6557105779647827, 0.1641494482755661, 0.04417502135038376, 0.42902442812919617, 0.377028226852417, 0.1956152766942978, 0.27481555938720703, 0.37677863240242004, 0.4323487877845764, 0.6219720244407654, 0.3997260332107544, 0.1145903542637825, 0.041462015360593796, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5997433662414551, 0.1045081838965416, 0.10960735380649567, 0.047688476741313934, 0.31575047969818115, 0.1532202959060669, 0.4197675585746765, 0.16546213626861572, 0.31973955035209656, 0.23332525789737701, 0.15541672706604004, 0.05988143011927605, 0.5733460187911987, 0.8565582036972046, 0.009604076854884624, 0.030047349631786346, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02339007519185543, 0.01581897959113121, 0.02374129369854927, 0.02252129279077053, 0.08995510637760162, 0.0626068115234375, 0.27313846349716187, 0.036778680980205536, 0.22608895599842072, 0.06801939755678177, 0.035735905170440674, 0.022851483896374702, 0.06078701093792915, 0.42404335737228394, 0.41984546184539795, 0.08353053033351898, 0.058427464216947556, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.034203190356492996, 0.23458202183246613, 0.15632590651512146, 0.02520577609539032, 0.26413342356681824, 0.06292548030614853, 0.06378099322319031, 0.08676797896623611, 0.02988903410732746, 0.3430734872817993, 0.007843950763344765, 0.03405369073152542, 0.01887335814535618, 0.39618176221847534, 0.2528276741504669, 0.10531513392925262, 0.12583006918430328, 0.09389571845531464, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.009769688360393047, 0.056299567222595215, 0.11172951757907867, 0.02802591770887375, 0.3647110164165497, 0.09813904017210007, 0.016619421541690826, 0.006417513824999332, 0.016537560150027275, 0.15495160222053528, 0.023067951202392578, 0.011397394351661205, 0.029141509905457497, 0.0527399443089962, 0.2784731984138489, 0.059669919312000275, 0.5969582796096802, 0.09549567103385925, 0.03235183656215668, NaN, NaN, NaN, NaN, NaN, NaN], [0.00987912341952324, 0.12349259853363037, 0.037169262766838074, 0.01944275200366974, 0.06324917078018188, 0.02598830871284008, 0.020618943497538567, 0.009103300981223583, 0.1360517293214798, 0.09789924323558807, 0.06809242814779282, 0.12332575768232346, 0.034675393253564835, 0.16954950988292694, 0.010956126265227795, 0.11111389100551605, 0.1871008574962616, 0.2434563934803009, 0.10274684429168701, 0.0379486046731472, NaN, NaN, NaN, NaN, NaN], [0.010987702757120132, 0.03791751340031624, 0.03792046010494232, 0.0400051474571228, 0.008841714821755886, 0.002161285374313593, 0.031619150191545486, 0.01907121017575264, 0.0057282340712845325, 0.002385619329288602, 0.03308374434709549, 0.11032091826200485, 0.0044158026576042175, 0.05701944977045059, 0.0651637390255928, 0.027267253026366234, 0.3151875138282776, 0.17881636321544647, 0.3164456784725189, 0.005250148009508848, 0.011875288560986519, NaN, NaN, NaN, NaN], [0.08034691959619522, 0.1792650669813156, 0.6813479661941528, 0.11697664856910706, 0.022037051618099213, 0.4362119436264038, 0.3332834541797638, 0.16648675501346588, 0.3133866786956787, 0.21180157363414764, 0.22306133806705475, 0.5634312033653259, 0.2539531886577606, 0.28583550453186035, 0.0421890914440155, 0.24185270071029663, 0.9185315370559692, 0.5444227457046509, 0.7130873799324036, 0.36675870418548584, 0.1082441657781601, 0.02894955314695835, NaN, NaN, NaN], [0.3316553831100464, 0.07297243922948837, 0.18084223568439484, 0.0543624572455883, 0.141310915350914, 0.15985439717769623, 0.22593949735164642, 0.09976530820131302, 0.2670679986476898, 0.12590403854846954, 0.10189743340015411, 0.06066418066620827, 0.14688965678215027, 0.6279550790786743, 0.004891595803201199, 0.013660040684044361, 0.19539086520671844, 0.13336770236492157, 0.11226529628038406, 0.4554508626461029, 0.7914823293685913, 0.007615156006067991, 0.015521766617894173, NaN, NaN], [0.010082974098622799, 0.009416572749614716, 0.026376336812973022, 0.021534079685807228, 0.041008636355400085, 0.028814975172281265, 0.09862472116947174, 0.019531887024641037, 0.1915404349565506, 0.055525705218315125, 0.03489372506737709, 0.035597167909145355, 0.017297467216849327, 0.13875839114189148, 0.18795406818389893, 0.13025526702404022, 0.03705297037959099, 0.016517892479896545, 0.028779756277799606, 0.02632485330104828, 0.36631691455841064, 0.4771501123905182, 0.10461407899856567, 0.07566797733306885, NaN], [0.00671275844797492, 0.019956005737185478, 0.15321078896522522, 0.00987993273884058, 0.1430601179599762, 0.02432059310376644, 0.007838046178221703, 0.016839532181620598, 0.017622128129005432, 0.03075602278113365, 0.01907699555158615, 0.30206096172332764, 0.010013632476329803, 0.06018203869462013, 0.19546428322792053, 0.020215312018990517, 0.04091925173997879, 0.022548291832208633, 0.26572445034980774, 0.010653333738446236, 0.1212434321641922, 0.3668496906757355, 0.1586136817932129, 0.14579400420188904, 0.04911552369594574]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00017037145153153688, 0.1837475299835205, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [4.619961600837996e-06, 0.00011092388740507886, 0.19595862925052643, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [7.402049959637225e-07, 0.0014410031726583838, 0.15330694615840912, 0.0009438465931452811, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [6.564930572494632e-07, 1.2471617083065212e-05, 0.0012651559663936496, 1.2094314115529414e-05, 0.2683168947696686, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [3.960849710438197e-07, 2.835777740983758e-05, 0.0015905762556940317, 5.72201497561764e-05, 0.20671997964382172, 0.03618929535150528, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [3.613545777625404e-05, 4.069158967467956e-05, 0.0019799659494310617, 4.598083614837378e-05, 0.28016433119773865, 0.1021510660648346, 0.0019787675701081753, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03414154052734375, 0.018152736127376556, 0.002861178945749998, 0.0031036457512527704, 0.2743661403656006, 0.08905426412820816, 0.058365415781736374, 0.2834230065345764, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0001288916973862797, 0.0019113116431981325, 0.0011359998025000095, 2.5460678443778306e-05, 0.0018093753606081009, 0.008086470887064934, 0.005666371434926987, 0.0014489549212157726, 0.27176737785339355, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0013363973703235388, 0.015213730745017529, 0.019847076386213303, 0.0016770424554124475, 0.6085457801818848, 0.051846977323293686, 0.06904839724302292, 0.023163089528679848, 0.0024616841692477465, 0.4075135886669159, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [1.5705205441918224e-05, 0.00011942459968850017, 3.308789018774405e-05, 0.00047703171730972826, 1.5581523257424124e-05, 3.566192026482895e-05, 0.000621139828581363, 0.002513762330636382, 0.0013953398447483778, 0.001656065694987774, 0.6708395481109619, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0009777048835530877, 0.006719581317156553, 0.017090875655412674, 0.007835427299141884, 0.0003081739123445004, 0.0027951891534030437, 0.0031432590913027525, 0.011542102321982384, 0.01903962530195713, 0.032312098890542984, 0.23448777198791504, 0.18604722619056702, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0010771078523248434, 0.00013067253166809678, 0.0004810431564692408, 0.0005832655006088316, 0.27172601222991943, 0.023587899282574654, 0.0011203349567949772, 0.0001570776366861537, 3.2636336982250214e-05, 0.008125105872750282, 0.3860749900341034, 0.011222672648727894, 0.4488545358181, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0018897228874266148, 0.00010004806244978681, 0.040837980806827545, 0.0009045379119925201, 0.4036760926246643, 0.033945482224226, 0.0009020724683068693, 2.477952148183249e-05, 0.0006147518288344145, 2.3498352675233036e-05, 0.0003015661786776036, 0.00019162058015353978, 0.0013656887458637357, 0.9207848906517029, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [3.0049262932152487e-05, 0.00032340767211280763, 0.0004620190302375704, 1.456133759347722e-05, 0.4214256703853607, 0.00038119935197755694, 2.2086916942498647e-05, 5.437946310848929e-05, 0.0005922063137404621, 0.0002251591213280335, 4.171442924416624e-05, 0.0011568808695301414, 6.667344860034063e-05, 0.004539569839835167, 0.07099039107561111, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0001142411565524526, 0.001007341779768467, 0.5582761764526367, 0.0006983705679886043, 0.04208780825138092, 0.07311324775218964, 0.011010478250682354, 0.00018356108921580017, 0.11227726191282272, 1.5535662896581925e-05, 7.865564111853018e-05, 8.497068483848125e-05, 0.007107958197593689, 0.04726947844028473, 0.03816111385822296, 0.7400538921356201, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [9.270196460420266e-05, 0.00014002913667354733, 0.006266205105930567, 8.287983655463904e-05, 0.029540851712226868, 0.019505193457007408, 0.0002005908900173381, 0.0002361711667617783, 0.002089217072352767, 0.0007247799658216536, 0.0003387654141988605, 3.3522373996675014e-05, 0.00015295531193260103, 0.005682599265128374, 0.01914886385202408, 0.006167547311633825, 0.6065680980682373, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.017243418842554092, 0.0717378556728363, 0.015470567159354687, 0.14577892422676086, 0.003815611358731985, 0.01656431145966053, 0.21609994769096375, 0.24452562630176544, 0.07360902428627014, 0.020440302789211273, 0.9522358775138855, 0.0012982342159375548, 0.00034142163349315524, 4.905217429040931e-05, 0.0002677988959476352, 0.0020047405268996954, 0.013444142416119576, 0.5238149166107178, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.006589227356016636, 0.025933612138032913, 0.05151839554309845, 0.019538801163434982, 0.000567624403629452, 0.011064885184168816, 0.018599001690745354, 0.0389220230281353, 0.03263486549258232, 0.03920944407582283, 0.309482604265213, 0.18455958366394043, 0.0028949796687811613, 0.0009189100819639862, 0.01304793544113636, 0.01903691701591015, 0.0013186958385631442, 0.1459255963563919, 0.2617945969104767, NaN, NaN, NaN, NaN, NaN, NaN], [0.000940846570301801, 6.996696902206168e-05, 0.0001185448418254964, 0.00013115631008986384, 0.04620806872844696, 0.009408986195921898, 0.0010798430303111672, 0.00010642426059348509, 1.4586596989829559e-05, 0.0008147742482833564, 0.049950405955314636, 0.0020658469293266535, 0.020368386059999466, 0.0015965981874614954, 0.0005227082292549312, 8.089001494226977e-05, 0.42970454692840576, 0.3893451988697052, 0.006195466499775648, 0.2630486488342285, NaN, NaN, NaN, NaN, NaN], [0.0015646422980353236, 5.644361226586625e-05, 0.015588155947625637, 0.0004337269929237664, 0.061090677976608276, 0.015012362040579319, 0.0009935805574059486, 3.2441483199363574e-05, 0.0006383971776813269, 7.901599929027725e-06, 0.00011085882579209283, 2.031324947893154e-05, 0.0001886440732050687, 0.1558367908000946, 2.918860081990715e-05, 0.00031420652521774173, 3.769064642256126e-05, 0.000311522075207904, 8.488001913065091e-05, 0.001447036280296743, 0.9016569256782532, NaN, NaN, NaN, NaN], [6.329882307909429e-05, 0.0007932570297271013, 0.0008974742377176881, 3.545067738741636e-05, 0.41645264625549316, 0.0012166639789938927, 5.162824527360499e-05, 0.00016062096983660012, 0.0028807471971958876, 0.0007734368555247784, 0.0001738688733894378, 0.0017386887921020389, 8.449772576568648e-05, 0.008313576690852642, 0.04833607003092766, 5.605717160506174e-05, 0.000497612461913377, 0.00019103533122688532, 0.0018799308454617858, 0.000193181011127308, 0.010939341969788074, 0.11687301844358444, NaN, NaN, NaN], [2.7039888664148748e-05, 0.0002653435221873224, 0.3520841896533966, 0.0011641159653663635, 0.017258664593100548, 0.13898366689682007, 0.004804374184459448, 0.0001136215214501135, 0.10132589936256409, 1.9021857951884158e-05, 0.00018713112513069063, 5.577637057285756e-05, 0.0021825090516358614, 0.016621561720967293, 0.003813497256487608, 0.05257569998502731, 7.136658678064123e-05, 0.00013083907833788544, 8.304342918563634e-05, 0.009517401456832886, 0.07102376222610474, 0.0242641419172287, 0.791592538356781, NaN, NaN], [1.8426982933306135e-05, 6.735812348779291e-05, 0.005383457988500595, 0.0002568464260548353, 0.03709089383482933, 0.05173188075423241, 0.00015440442075487226, 0.00026214553508907557, 0.0031172526068985462, 0.0018413036596029997, 0.001364374067634344, 0.0001026472236844711, 0.00015940713637974113, 0.00464483629912138, 0.007250420283526182, 0.006640422623604536, 0.10042263567447662, 0.00037284562131389976, 5.502302519744262e-05, 0.00017516437219455838, 0.013823487795889378, 0.028728578239679337, 0.014491567388176918, 0.5602642297744751, NaN], [1.3810687960358337e-05, 0.0002572945086285472, 0.008041280321776867, 0.00040080497274175286, 0.00010326507617719471, 0.0013340600999072194, 0.00019016038277186453, 0.00019489554688334465, 0.0007417663000524044, 0.0012533330591395497, 0.0032668926287442446, 0.001072657760232687, 5.286548912408762e-05, 4.225512952871213e-07, 1.0035311788669787e-05, 2.1279807697283104e-05, 0.0006032216479070485, 0.00048016011714935303, 0.00037273563793860376, 3.447151175350882e-05, 9.715819260236458e-07, 2.8930742701049894e-05, 0.0003854547976516187, 0.005018792115151882, 0.4505775570869446]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [4.347301455709385e-06, 0.18382565677165985, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0001576173526700586, 0.00605444610118866, 0.19315025210380554, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0015271879965439439, 0.2696094512939453, 0.0976908802986145, 0.19172586500644684, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.018620789051055908, 0.1513659805059433, 0.1261996626853943, 0.04123798385262489, 0.18324223160743713, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [7.739824650343508e-05, 0.0007302183075807989, 0.0020413347519934177, 0.0010007238015532494, 0.20195050537586212, 0.04546361416578293, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0007431988487951458, 0.330532044172287, 0.08558935672044754, 0.06556878238916397, 0.10690004378557205, 0.1145712360739708, 0.06475446373224258, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.015635214745998383, 0.050190601497888565, 0.02352251298725605, 0.24284599721431732, 0.06325101107358932, 0.02171560376882553, 0.015677697956562042, 0.4775830805301666, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03602181747555733, 0.2262161672115326, 0.11374488472938538, 0.22297167778015137, 0.018925879150629044, 0.2400040328502655, 0.13629396259784698, 0.14897051453590393, 0.11721047759056091, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.001669732853770256, 0.0008830919396132231, 0.007873992435634136, 0.004793200176209211, 0.032567575573921204, 0.019068563356995583, 0.01167156733572483, 0.006520072463899851, 0.001765590044669807, 0.479371041059494, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04264334216713905, 0.01628556102514267, 0.012549073435366154, 0.1270730197429657, 0.09553729742765427, 0.12904676795005798, 0.28088441491127014, 0.08353402465581894, 0.19219043850898743, 0.1467161476612091, 0.04815742373466492, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.006975929252803326, 0.05510300025343895, 0.007132354192435741, 0.0349782258272171, 0.02191060781478882, 0.018211986869573593, 0.026551326736807823, 0.03648876026272774, 0.06464254856109619, 0.049987878650426865, 0.05908217281103134, 0.5448521375656128, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.000807860866189003, 0.00374230626039207, 0.004482839722186327, 0.005506760906428099, 0.000447272410383448, 0.003816538956016302, 0.03234753757715225, 0.014306235127151012, 0.01718331128358841, 0.04840204864740372, 0.06595310568809509, 0.18900929391384125, 0.0723472312092781, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00447529973462224, 0.019966747611761093, 0.03737834841012955, 0.3797287940979004, 0.010614297352731228, 0.05463654175400734, 0.32780376076698303, 0.0739898681640625, 0.25606051087379456, 0.8621841073036194, 0.2645638585090637, 0.25103500485420227, 0.016027942299842834, 0.004609693773090839, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0010164460400119424, 0.011448963545262814, 0.03378765657544136, 0.02785181999206543, 0.056788451969623566, 0.07099426537752151, 0.008927138522267342, 0.01755385287106037, 0.039185769855976105, 0.09313513338565826, 0.027632856741547585, 0.12282836437225342, 0.017955774441361427, 0.02453978732228279, 0.267269104719162, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09903331845998764, 0.854941725730896, 0.020280463621020317, 0.8786925673484802, 0.37992238998413086, 0.20425425469875336, 0.32038459181785583, 0.8171603083610535, 0.2503354549407959, 0.7644308805465698, 0.7474347949028015, 0.935006856918335, 0.36836859583854675, 0.03383934497833252, 0.0021248040720820427, 0.21007098257541656, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09584157168865204, 0.00421579135581851, 0.0017077650409191847, 0.0670090913772583, 0.10943465679883957, 0.05715145170688629, 0.03694647178053856, 0.04514404758810997, 0.04956913739442825, 0.07195062190294266, 0.4566742479801178, 0.20942343771457672, 0.1548582911491394, 0.3906869888305664, 0.03925589844584465, 0.005858495831489563, 0.23115697503089905, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10393274575471878, 0.03258725255727768, 0.01998279243707657, 0.13928532600402832, 0.08602269738912582, 0.139993816614151, 0.2561682462692261, 0.08122693002223969, 0.28790318965911865, 0.34215468168258667, 0.023110536858439445, 0.8003224730491638, 0.11519370973110199, 0.5406965613365173, 0.2252652645111084, 0.07071924954652786, 0.03988110274076462, 0.09249765425920486, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.006400381214916706, 0.03668399527668953, 0.006957556586712599, 0.024804070591926575, 0.013962345197796822, 0.010118995793163776, 0.014814852736890316, 0.02360437996685505, 0.038752347230911255, 0.10996780544519424, 0.24877001345157623, 0.7050904035568237, 0.103914275765419, 0.0656881257891655, 0.03925013542175293, 0.0268316138535738, 0.009403076022863388, 0.042995911091566086, 0.38370969891548157, NaN, NaN, NaN, NaN, NaN, NaN], [0.0005728903925046325, 0.0018518416909500957, 0.003297911025583744, 0.002339646453037858, 0.0003125199000351131, 0.0013706001918762922, 0.011640608310699463, 0.005699110683053732, 0.00646078959107399, 0.029403753578662872, 0.09435103088617325, 0.4532504379749298, 0.1454003006219864, 0.08155784755945206, 0.1478416919708252, 0.06988534331321716, 0.07031917572021484, 0.08092489838600159, 0.16178953647613525, 0.09959835559129715, NaN, NaN, NaN, NaN, NaN], [0.007587960455566645, 0.01947515644133091, 0.06775914877653122, 0.37032291293144226, 0.014833947643637657, 0.04509717598557472, 0.2979332506656647, 0.08052700757980347, 0.2017516791820526, 0.8817963004112244, 0.3514429032802582, 0.3636293411254883, 0.14158478379249573, 0.09958238899707794, 0.13573585450649261, 0.27771836519241333, 0.47418463230133057, 0.36210212111473083, 0.2140081375837326, 0.022566867992281914, 0.004614678677171469, NaN, NaN, NaN, NaN], [0.0009141381597146392, 0.00906511303037405, 0.026196878403425217, 0.011460180394351482, 0.03924085199832916, 0.05833837762475014, 0.004696658346801996, 0.009781464003026485, 0.029306253418326378, 0.06398104876279831, 0.017127037048339844, 0.0922316163778305, 0.03436172753572464, 0.12105685472488403, 0.475220263004303, 0.20121201872825623, 0.0066191148944199085, 0.018271028995513916, 0.05732923001050949, 0.018915977329015732, 0.019877590239048004, 0.23682713508605957, NaN, NaN, NaN], [0.14320576190948486, 0.892350971698761, 0.030759859830141068, 0.8051734566688538, 0.7149769067764282, 0.4937312602996826, 0.3181091248989105, 0.8743517994880676, 0.3442763686180115, 0.8711729049682617, 0.7545801997184753, 0.9297782182693481, 0.6998263001441956, 0.17287810146808624, 0.008261360228061676, 0.9148194789886475, 0.7390273213386536, 0.743715763092041, 0.8801547288894653, 0.47275617718696594, 0.02699747122824192, 0.002916275057941675, 0.1803632229566574, NaN, NaN], [0.0431031733751297, 0.0034584910608828068, 0.0008681766339577734, 0.032780423760414124, 0.11873625963926315, 0.03893061354756355, 0.019801655784249306, 0.03132590278983116, 0.05763043835759163, 0.06388700753450394, 0.3317660689353943, 0.16543246805667877, 0.10311393439769745, 0.4146954417228699, 0.09686555713415146, 0.06189668923616409, 0.5733434557914734, 0.2515217959880829, 0.17396190762519836, 0.13145960867404938, 0.40639445185661316, 0.07709264755249023, 0.007335619535297155, 0.2446187138557434, NaN], [0.046706411987543106, 0.31744489073753357, 0.6429179310798645, 0.4889025092124939, 0.43930482864379883, 0.3055577576160431, 0.6935683488845825, 0.25992196798324585, 0.7758384346961975, 0.2076689600944519, 0.8320663571357727, 0.39907822012901306, 0.8469056487083435, 0.5997118353843689, 0.31635957956314087, 0.36650604009628296, 0.2247273474931717, 0.7608639597892761, 0.37947097420692444, 0.8680096864700317, 0.5816919803619385, 0.19056683778762817, 0.27210569381713867, 0.06685535609722137, 0.040061503648757935]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17503570020198822, 0.10145211219787598, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.002467370592057705, 0.014373218640685081, 0.18901397287845612, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [4.782021278515458e-05, 0.0002036100922850892, 0.15351639688014984, 0.001678619533777237, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.015930648893117905, 0.006582066882401705, 0.10560829937458038, 0.3465193808078766, 0.012144939973950386, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.010950141586363316, 0.003185260808095336, 0.03380253165960312, 0.13516294956207275, 0.16374172270298004, 0.0833682045340538, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [4.016391176264733e-05, 0.0003202538937330246, 0.0050767818465828896, 1.7212016246048734e-05, 0.5176156759262085, 0.003749872324988246, 0.00026106167933903635, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13457109034061432, 0.07774609327316284, 0.006220821291208267, 0.0008077693055383861, 0.2509746253490448, 0.17662860453128815, 0.13796226680278778, 0.053514063358306885, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06553670763969421, 0.09473168104887009, 0.013516419567167759, 0.0013789478689432144, 0.03089364431798458, 0.0676402598619461, 0.03963227570056915, 0.17151857912540436, 0.1338733434677124, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07379595190286636, 0.1714182198047638, 0.13684017956256866, 0.00734432740136981, 0.0039545828476548195, 0.09408346563577652, 0.0452522449195385, 0.2525797188282013, 0.15314188599586487, 0.008748584426939487, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.006909683812409639, 0.034793343394994736, 0.13824458420276642, 0.0004423256032168865, 0.38493895530700684, 0.12702688574790955, 0.0007700703572481871, 0.005257567390799522, 0.3978818655014038, 0.028774550184607506, 0.016022928059101105, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15589091181755066, 0.059809040278196335, 0.2019805759191513, 0.006274765357375145, 0.053891621530056, 0.38889890909194946, 0.024021193385124207, 0.016828669235110283, 0.09206627309322357, 0.15270450711250305, 0.10960505902767181, 0.14381197094917297, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0011966965394094586, 0.0013769377255812287, 0.0006101150647737086, 4.0936538425739855e-05, 0.008213219232857227, 0.03395655378699303, 0.0003392287762835622, 0.00015790743054822087, 0.000944053172133863, 0.0007261222926899791, 0.011664116755127907, 0.22049497067928314, 0.0034024016931653023, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2470119595527649, 0.22662757337093353, 0.086290642619133, 0.0011605313047766685, 0.20862528681755066, 0.31339770555496216, 0.007298772688955069, 0.00864456407725811, 0.010568802244961262, 0.01924213580787182, 0.034804634749889374, 0.16789764165878296, 0.11296499520540237, 0.017940307036042213, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3800778388977051, 0.4679488241672516, 0.19362112879753113, 0.18464821577072144, 0.046723559498786926, 0.160307839512825, 0.24654103815555573, 0.2610638439655304, 0.07595612108707428, 0.1325986683368683, 0.022732526063919067, 0.1294456422328949, 0.2688123285770416, 0.12097980827093124, 0.12297553569078445, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.005153980106115341, 0.0002073257346637547, 0.12819816172122955, 0.00011319551413180307, 0.08506736904382706, 0.013190183788537979, 0.0028314462397247553, 0.00016588614380452782, 0.009067418053746223, 0.0008525841985829175, 0.00018506577180232853, 0.0002737078757490963, 0.0002474631182849407, 0.04919072240591049, 0.1850043386220932, 0.0018668848788365722, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4235798418521881, 0.8363600969314575, 0.13292381167411804, 0.03160996362566948, 0.6294970512390137, 0.3827916085720062, 0.01768689975142479, 0.031598031520843506, 0.05291707068681717, 0.004268768709152937, 0.01666090451180935, 0.0017059938982129097, 0.03961870074272156, 0.006749838124960661, 0.2787548303604126, 0.12898604571819305, 0.00984524842351675, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.001200420199893415, 0.004923743661493063, 0.03312471881508827, 7.996988279046491e-05, 0.2118730992078781, 0.0288531631231308, 0.00010192030458711088, 0.0002958755649160594, 0.007303019054234028, 0.00011155433458043262, 2.6572593014861923e-06, 0.00035481253871694207, 2.4723947262828005e-06, 2.6933960270980606e-06, 0.017764916643500328, 0.0003658832865767181, 0.25218549370765686, 0.002238432876765728, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16854390501976013, 0.046801913529634476, 0.18834064900875092, 0.005545254796743393, 0.10321269929409027, 0.3906272351741791, 0.03742265701293945, 0.024458711966872215, 0.05521516501903534, 0.07171308994293213, 0.021107476204633713, 0.025199010968208313, 0.0027974944096058607, 0.0025010560639202595, 0.02306896261870861, 0.15930885076522827, 0.06242140382528305, 0.11754277348518372, 0.21403564512729645, NaN, NaN, NaN, NaN, NaN, NaN], [0.0004002669302280992, 0.00040952101699076593, 0.00012874403910245746, 8.880775567376986e-06, 0.005201425869017839, 0.007163480389863253, 0.0002137795090675354, 0.00012960725871380419, 0.0005550362984649837, 0.0001244707527803257, 0.0006415210082195699, 0.03161805495619774, 4.1008814150700346e-05, 0.000599265971686691, 0.00399716105312109, 5.7038221711991355e-05, 0.0033261284697800875, 0.006950944196432829, 0.22392861545085907, 0.0028074102010577917, NaN, NaN, NaN, NaN, NaN], [0.22722585499286652, 0.18426381051540375, 0.07697561383247375, 0.0012757674558088183, 0.23254786431789398, 0.14769063889980316, 0.013780240900814533, 0.02735842764377594, 0.04001649469137192, 0.031179115176200867, 0.015889445319771767, 0.062248069792985916, 0.013498637825250626, 0.0052745710127055645, 0.2219674438238144, 0.0031969451811164618, 0.0037056237924844027, 0.028058722615242004, 0.22486938536167145, 0.09661445021629333, 0.02616964653134346, NaN, NaN, NaN, NaN], [0.27366653084754944, 0.354305237531662, 0.16368547081947327, 0.1598840057849884, 0.02900015190243721, 0.10581760108470917, 0.21902981400489807, 0.27043354511260986, 0.19813168048858643, 0.2514232099056244, 0.025616073980927467, 0.12471329420804977, 0.09682969748973846, 0.07310353219509125, 0.02883375994861126, 0.09285400807857513, 0.013515813276171684, 0.021914459764957428, 0.14159631729125977, 0.3238908648490906, 0.1783936321735382, 0.11570748686790466, NaN, NaN, NaN], [0.0030968550126999617, 7.297070260392502e-05, 0.1371629387140274, 0.00018204482330475003, 0.04798782989382744, 0.01213640347123146, 0.0023585439193993807, 0.00011540603009052575, 0.016970379278063774, 0.0015150568215176463, 0.0003718302759807557, 0.00044133648043498397, 0.00012143531785113737, 0.021671650931239128, 0.023021340370178223, 0.00010860650218091905, 0.0005334930610843003, 0.000257489358773455, 0.0005856966599822044, 0.00045311596477404237, 0.09709983319044113, 0.18528476357460022, 0.0029071324970573187, NaN, NaN], [0.49188995361328125, 0.918917715549469, 0.2054058462381363, 0.08403602242469788, 0.6967929005622864, 0.5653088688850403, 0.03772272169589996, 0.04957969859242439, 0.18319177627563477, 0.012161915190517902, 0.07060753554105759, 0.009896048344671726, 0.1126827672123909, 0.010653471574187279, 0.1938174068927765, 0.1352803260087967, 0.0021707522682845592, 0.030638370662927628, 0.003963022027164698, 0.03303877264261246, 0.004082953091710806, 0.20578816533088684, 0.11854958534240723, 0.02041587606072426, NaN], [0.001465475419536233, 0.00045102695003151894, 0.017218099907040596, 0.00030212500132620335, 0.11662620306015015, 0.017841650173068047, 0.00014393724268302321, 0.0003088460653088987, 0.006560556124895811, 0.0005491081974469125, 5.78465114813298e-05, 0.0019656207878142595, 0.00016285650781355798, 0.0002489366161171347, 0.011378495953977108, 0.0017521223053336143, 0.00787137821316719, 8.434856863459572e-05, 0.0012881350703537464, 7.287580228876323e-05, 0.00021561238099820912, 0.020317554473876953, 0.04195580258965492, 0.24219898879528046, 0.0017395684262737632]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.39058852195739746, 8.28505744721042e-06, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [2.7811127438326366e-05, 0.4158080220222473, 0.0005852450849488378, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [9.039229868085252e-13, 4.1926887206500396e-05, 0.15358270704746246, 0.00044542484101839364, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [1.9216391628896996e-16, 4.9363904963684035e-08, 0.0004218998074065894, 0.40449434518814087, 4.695959432865493e-06, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [1.7349648803667746e-14, 5.141012060505545e-09, 3.7822364902240224e-06, 0.0002717413299251348, 0.22465285658836365, 2.698016260183067e-06, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [3.6696812255598843e-09, 2.368522711293508e-09, 3.1902116006676806e-06, 9.520445587440918e-08, 9.990107355406508e-05, 0.2170185148715973, 0.019131841138005257, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [2.292660354896725e-07, 1.4062491449085002e-10, 1.0373556180720556e-11, 2.945570870549474e-11, 1.3987125901948616e-09, 1.1205498822164373e-06, 0.3382871150970459, 0.0008390913717448711, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [2.3133984541345853e-06, 0.00017511146143078804, 1.441240442545677e-06, 3.064446918443764e-09, 3.097617096159411e-08, 7.23518027712089e-08, 0.0017295092111453414, 0.39626115560531616, 0.00019915253506042063, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [8.689644937311981e-15, 2.8357308110571466e-06, 5.0946681540153804e-08, 2.0269605438549831e-10, 1.289949813632063e-10, 3.375676821404383e-11, 8.602300205495794e-09, 4.5097981455910485e-06, 0.29888245463371277, 6.641173968091607e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [2.8127108337250475e-18, 1.3557467148928026e-08, 7.431774662336466e-08, 2.301476165200711e-08, 1.1707952315975767e-11, 7.274678689300762e-12, 7.034611066401852e-13, 5.257664963120856e-13, 3.4044413041556254e-05, 0.32336506247520447, 4.600838292390108e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [6.300134025583048e-13, 5.676838910062543e-08, 1.822371018533886e-06, 2.3448223146260716e-05, 2.5415656068616954e-07, 3.417801153204891e-08, 5.353474885616549e-10, 2.141239963115993e-11, 3.762530198514469e-08, 6.24434178462252e-05, 0.33693620562553406, 3.183486114721745e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [1.5877897954763576e-12, 1.2288996487086479e-09, 3.458522428445576e-07, 9.462546586291865e-06, 7.457422907464206e-05, 0.0005706463125534356, 1.4425116212635203e-08, 4.5430816769144455e-13, 2.616490357709722e-12, 3.545688542772041e-08, 0.00016559385403525084, 0.22770871222019196, 0.0009294600458815694, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [2.579016999959549e-10, 1.5412886245069757e-10, 5.557828156033118e-11, 1.2367832313842086e-09, 3.3751638284229557e-07, 4.776334208145272e-07, 1.75399406998622e-07, 9.608910021829953e-12, 7.499024594652057e-14, 2.8573548556528813e-14, 3.2670008191793e-12, 4.494925178732956e-06, 0.37381958961486816, 3.638648195192218e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [3.090227983193472e-05, 8.430293382843956e-05, 4.32313208875712e-05, 1.6493000885020592e-06, 8.794136192591395e-06, 0.0005616153357550502, 0.0013158570509403944, 0.0005267951055429876, 3.675571861094795e-05, 2.42239195813454e-07, 8.356466074666002e-10, 2.3424906885338714e-06, 0.0012797197559848428, 0.6210904717445374, 0.0014036636566743255, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [7.67247776423119e-09, 2.954437938740284e-08, 8.54147774731473e-09, 2.011255162415182e-09, 5.265776792384713e-08, 1.4630668898618637e-09, 2.2913241082278546e-06, 3.266295323101076e-08, 1.6124132571349037e-06, 1.13081211061683e-11, 2.6358108895513247e-15, 7.728456763445024e-11, 2.3767283696685126e-09, 2.1271845980663784e-05, 0.19462287425994873, 6.456446044467157e-06, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [4.312543703220706e-13, 2.1705271535665815e-07, 1.1365986551936658e-07, 1.9739390211270802e-07, 7.690645453806155e-09, 4.219609994748907e-09, 9.716764060030414e-10, 3.915795687703394e-08, 3.0873563900968293e-06, 5.5168204227129536e-08, 1.0056843552375128e-10, 6.254387632798064e-12, 4.318517331930449e-12, 1.5618051990573534e-11, 6.033264071447775e-05, 0.4116440713405609, 1.8908482161350548e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [1.797858697974407e-17, 3.5553746058347713e-10, 1.0377114723070235e-09, 5.157609006545272e-09, 5.5740526777592336e-11, 3.675403037473046e-11, 3.015720268992328e-12, 1.2632186895361434e-14, 3.2584634990229233e-09, 2.7093712162695738e-08, 2.733851353305984e-15, 2.0347772078377346e-10, 7.802066534575867e-16, 1.702402683943053e-16, 1.8298086656987067e-10, 6.30185184036236e-08, 0.2592085301876068, 3.469779585429933e-06, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [3.386366187463352e-10, 1.5587464474720036e-07, 5.430682108453766e-07, 1.926859113154933e-05, 2.7584928830037825e-06, 5.553058031182445e-07, 6.554741815989473e-08, 7.146391256540596e-10, 4.225638150501254e-08, 2.0539353045023745e-06, 0.00010312868107575923, 2.5505174860995794e-08, 1.3659710695890226e-08, 4.206753695390475e-11, 5.200286035123014e-11, 3.842067428649898e-07, 1.4282905794971157e-05, 0.31164512038230896, 0.00011869923037011176, NaN, NaN, NaN, NaN, NaN, NaN], [3.098006018387167e-10, 3.2388165482899467e-09, 1.8609943808201024e-08, 5.099297482047405e-07, 4.603737033903599e-05, 0.00016448901442345232, 1.6998721719119203e-07, 1.7718410072475876e-11, 2.5886336477154437e-11, 9.218055652127077e-09, 1.2046231745443947e-07, 7.304957398446277e-05, 2.3164133111652774e-10, 2.8952129582648922e-09, 2.9085676575557606e-11, 8.895827650901023e-12, 8.14965606110718e-09, 8.762691868469119e-05, 0.2280847281217575, 0.0004104141262359917, NaN, NaN, NaN, NaN, NaN], [1.3149543676149733e-09, 1.080373679407387e-09, 5.5150013028582023e-11, 7.800748935693491e-10, 1.7859061074432248e-07, 2.183157299384675e-08, 2.5236221290469985e-07, 2.35878039323012e-10, 9.060349692724401e-12, 1.4339956088890715e-12, 1.7799637631876752e-12, 2.9941787715870305e-08, 6.0217857935640495e-06, 3.1683756313016787e-11, 4.5713120788715145e-11, 3.4124135808721867e-13, 3.591858459424911e-15, 1.3559961530365539e-12, 3.119595021416899e-06, 0.35679423809051514, 3.964137067669071e-05, NaN, NaN, NaN, NaN], [4.326914222474443e-06, 0.00023807807883713394, 0.00026310785324312747, 8.714396244613454e-06, 1.617559973965399e-05, 0.0001319001312367618, 0.0005945482989773154, 0.000823884445708245, 0.0008506007143296301, 1.7805428797146305e-05, 2.734714854568665e-08, 2.8855724849563558e-06, 4.891938442597166e-05, 0.0011682395124807954, 8.529372053089901e-07, 0.00017029111040756106, 1.0359013202787537e-07, 7.06834313302096e-10, 1.0861956525332062e-06, 0.0008713650749996305, 0.596385657787323, 0.0009257638594135642, NaN, NaN, NaN], [1.4773272882795396e-10, 2.3448599506536993e-08, 6.434380566133768e-07, 3.8027360460546333e-07, 2.454226432746509e-06, 5.541529457531169e-09, 3.5226184991188347e-06, 2.5443886997322807e-08, 1.7749154721968807e-05, 1.8393259137994278e-09, 4.026108439691978e-12, 6.382850692432385e-09, 1.7809153263215194e-08, 8.996512974590587e-07, 0.00010512088192626834, 1.1464897607671443e-11, 2.794342757184154e-09, 2.4549680847631107e-15, 9.933188299671158e-11, 7.3009864820505754e-09, 8.105817687464878e-05, 0.2077004611492157, 2.0097606466151774e-05, NaN, NaN], [1.1257004341538607e-14, 1.3137036347643516e-08, 4.6611327775281097e-07, 3.0405328743654536e-06, 1.5423474053477548e-07, 2.520166120234535e-08, 3.4643394819511286e-09, 1.1558090484697914e-08, 1.417677253812144e-06, 9.112129362165433e-08, 4.2694305868451465e-09, 3.7723260626343347e-10, 4.1450526344632976e-10, 2.7357388923676673e-11, 6.112880441833113e-07, 3.9687514799879864e-05, 8.382351063263016e-11, 8.293656039715103e-11, 4.97465783844131e-12, 4.144883221368634e-12, 1.4191136113450575e-11, 2.5566061594872735e-05, 0.4056495428085327, 4.4409513066057116e-05, NaN], [9.215334861117716e-19, 2.6557794852166694e-10, 5.799645919069008e-07, 1.003176621633406e-11, 7.217926736302616e-07, 4.876178394397357e-08, 8.254863459455919e-11, 1.424103456687531e-12, 1.1857503423584603e-08, 1.3074058502482444e-09, 8.580362115262474e-12, 5.829819293978744e-09, 1.8017319407259702e-12, 9.234832950427707e-14, 3.576115098491428e-11, 1.9265784523270213e-09, 1.8997316146851517e-06, 1.949248054633479e-11, 8.860704392432694e-10, 2.8198800851872777e-14, 5.674391451236226e-15, 1.0258181110112119e-10, 6.93914080329705e-06, 0.25534507632255554, 2.742740150551981e-07]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0002614231198094785, 0.183704674243927, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [1.3331101555991154e-08, 0.003119559260085225, 0.19454506039619446, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [1.1244888353800775e-09, 0.0005117341643199325, 0.15345418453216553, 0.0018621939234435558, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [2.882708471929618e-08, 0.0006895777769386768, 0.008299488574266434, 0.004234161227941513, 0.26378652453422546, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [6.507164653157815e-05, 0.0030905166640877724, 0.269605815410614, 0.06594818085432053, 0.07055308669805527, 0.24370616674423218, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [5.806248736917041e-05, 0.0008924558642320335, 0.00047033390728756785, 0.003593915607780218, 0.044251326471567154, 0.18547922372817993, 0.19724349677562714, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03321969881653786, 0.1786998063325882, 0.0021111152600497007, 0.00015362887643277645, 0.0013223892310634255, 0.01674751006066799, 0.27181917428970337, 0.0704144611954689, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0005316429305821657, 0.0021434861700981855, 0.0005638045258820057, 2.0347550162114203e-05, 8.372889715246856e-05, 0.0012170294066891074, 0.0006328476592898369, 0.0015302025713026524, 0.2731996476650238, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [3.384976253073546e-06, 0.0032942681573331356, 0.003179847961291671, 0.0003072107210755348, 3.0923787562642246e-05, 0.0003082206822000444, 0.0026841319631785154, 0.011449099518358707, 0.2928124964237213, 0.0015787724405527115, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [4.910896677756682e-05, 0.01189705915749073, 0.0036808690056204796, 0.006090851966291666, 0.0029882052913308144, 0.006760776974260807, 0.0002592294185888022, 0.0001972121826838702, 0.15788163244724274, 0.14973512291908264, 0.14614373445510864, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [7.539001671830192e-05, 0.036947283893823624, 0.01112621370702982, 0.04119950905442238, 0.06979847699403763, 0.01383589580655098, 0.008948443457484245, 9.020609286380932e-05, 0.0005221512983553112, 0.34183818101882935, 0.12104173004627228, 0.027292484417557716, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [5.4811065638205037e-05, 0.015359039418399334, 0.005874635651707649, 0.024854328483343124, 0.16572602093219757, 0.13195344805717468, 0.08553953468799591, 0.00124072446487844, 0.0008515206864103675, 0.0025517549365758896, 0.03817262500524521, 0.1957935392856598, 0.020919298753142357, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [3.401398498681374e-05, 0.0008079431718215346, 0.00045223115012049675, 0.00013304724416229874, 0.0006849576020613313, 0.009534466080367565, 0.010466179810464382, 0.00030334663460962474, 0.00033610902028158307, 2.1021634893259034e-05, 6.891421071486548e-05, 0.0028196852654218674, 0.3685440421104431, 0.0008976467652246356, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0012722803512588143, 0.07485485821962357, 0.004568059463053942, 0.008557068184018135, 0.04491077736020088, 0.010689688846468925, 0.010801602154970169, 0.015439217910170555, 0.001288879313506186, 0.032191790640354156, 9.430324280401692e-05, 0.0010071481810882688, 0.03593403846025467, 0.015365669503808022, 0.28865233063697815, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0003195737663190812, 0.0016381103778257966, 0.001899963477626443, 0.000450764549896121, 0.0029568641912192106, 0.0004077073244843632, 0.006739944685250521, 5.316005626809783e-05, 0.000977654941380024, 0.00033480822457931936, 1.5544836060144007e-05, 5.177688763069455e-06, 0.000280524865956977, 8.569184137741104e-05, 0.19435854256153107, 0.0009946423815563321, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0004552309401333332, 0.00916277151554823, 0.2859989106655121, 0.028668222948908806, 0.004703177139163017, 0.013283651322126389, 0.011935138143599033, 0.00041849465924315155, 0.021506765857338905, 0.0005354905733838677, 2.3408898414345458e-05, 5.557515123655321e-06, 4.006853941973532e-06, 0.000782388960942626, 0.032734211534261703, 0.33600685000419617, 0.05645810067653656, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.001615832676179707, 0.0592908076941967, 0.004439341835677624, 0.0221478920429945, 0.05761101841926575, 0.08599329739809036, 0.009327156469225883, 0.0014337823959067464, 0.22479815781116486, 0.007599419914186001, 0.00010282513540005311, 0.003995772451162338, 0.0007532926392741501, 0.0001985877170227468, 0.042725738137960434, 0.609107255935669, 0.032340146601200104, 0.2600889503955841, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0007894318550825119, 0.08912800997495651, 0.00870462041348219, 0.062210533767938614, 0.21669252216815948, 0.04955689236521721, 0.12036743760108948, 0.001276280265301466, 0.002290783217176795, 0.4637441337108612, 0.041003014892339706, 0.007595454342663288, 0.0049859327264130116, 0.030789200216531754, 0.01441932376474142, 0.02666427381336689, 0.013092019595205784, 0.22824719548225403, 0.07290598005056381, NaN, NaN, NaN, NaN, NaN, NaN], [4.2991967347916216e-05, 0.006631283089518547, 0.0006027332856319845, 0.004053125157952309, 0.03894652798771858, 0.031787656247615814, 0.10168109834194183, 0.004267984535545111, 0.002045443281531334, 0.0010633694473654032, 0.005091637372970581, 0.031351421028375626, 6.663963722530752e-05, 0.09428737312555313, 0.0008465268765576184, 0.00024849644978530705, 0.002269570017233491, 0.01905866153538227, 0.2164839655160904, 0.010082208551466465, NaN, NaN, NaN, NaN, NaN], [1.1191940757271368e-05, 0.0006002296577207744, 0.0002709901600610465, 9.913583926390857e-05, 0.0001758227008394897, 0.0029332106932997704, 0.008675863035023212, 0.0011328428518027067, 0.0023299665190279484, 6.693489558529109e-05, 0.00013525204849429429, 0.0013442488852888346, 0.022858861833810806, 2.321010106243193e-05, 0.0010626229923218489, 2.5993340386776254e-05, 3.972689592046663e-05, 5.326797690941021e-05, 0.0033412689808756113, 0.35271701216697693, 0.0008956229430623353, NaN, NaN, NaN, NaN], [0.00036489564809016883, 0.07616367936134338, 0.00673737283796072, 0.011110173538327217, 0.021392904222011566, 0.010494116693735123, 0.006134945899248123, 0.015969248488545418, 0.005187375005334616, 0.12039955705404282, 0.0005341891082935035, 0.0022901638876646757, 0.027128320187330246, 0.005907480139285326, 0.033119603991508484, 0.002176248235628009, 0.0003625153622124344, 6.369769835146144e-05, 0.0007003483478911221, 0.03456505015492439, 0.01570759527385235, 0.28412890434265137, NaN, NaN, NaN], [3.192616713931784e-05, 0.00035208670306019485, 0.002478531561791897, 0.0006564928335137665, 0.0008886585710570216, 0.0005662215990014374, 0.0016915983287617564, 1.3900444173486903e-05, 0.0009738726075738668, 0.00042995362309738994, 8.639829320600256e-05, 1.4000924238644075e-05, 0.00033226466621272266, 2.9785558581352234e-05, 0.00921203475445509, 3.390025085536763e-06, 5.1574592362158e-05, 2.3835823412809987e-06, 1.9022172637050971e-06, 0.00016878120368346572, 9.063100151252002e-05, 0.20696188509464264, 0.001649125711992383, NaN, NaN], [0.00019471753330435604, 0.003537738462910056, 0.2800489366054535, 0.036592625081539154, 0.002127013634890318, 0.024595409631729126, 0.008275463245809078, 0.00023266732750926167, 0.021680369973182678, 0.0005173377576284111, 7.175304199336097e-05, 2.6857771445065737e-05, 1.6371919627999887e-05, 0.0012281013187021017, 0.011112956330180168, 0.058813560754060745, 0.0009629606502130628, 1.1531898962857667e-05, 4.947432444168953e-06, 2.475359451636905e-06, 0.0005685617215931416, 0.0267820842564106, 0.3296748399734497, 0.06147307902574539, NaN], [3.20236104300875e-08, 0.00013383101031649858, 0.00029007354169152677, 0.002788462908938527, 0.0014709108509123325, 0.0009710633894428611, 0.0001290659129153937, 2.0881772798020393e-05, 7.236683813971467e-06, 3.12792144541163e-05, 7.099155482137576e-05, 3.213396485080011e-05, 3.9666349039180204e-05, 0.00022854047711007297, 0.0037343965377658606, 1.487573445047019e-05, 0.00019343644089531153, 8.10168421594426e-05, 1.1448363693489227e-05, 3.5921341350331204e-06, 2.216967368440237e-05, 0.0017730530817061663, 0.0001526248233858496, 0.009769736789166927, 0.4419056475162506]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07662782073020935, 0.14776498079299927, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0006832284270785749, 0.003495789598673582, 0.19430121779441833, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00020953372586518526, 0.007476589176803827, 0.1521030217409134, 0.003494996577501297, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00048688906827010214, 0.0011088894680142403, 0.0024602855555713177, 0.0005520267877727747, 0.26744863390922546, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0004194685607217252, 0.0005068383179605007, 0.026896899566054344, 0.0004147894505877048, 0.006156287621706724, 0.4387049376964569, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [1.0518371709622443e-05, 5.5142045312095433e-05, 0.016997506842017174, 3.693701364682056e-05, 0.0006244040559977293, 0.21657241880893707, 0.01345360092818737, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3619365394115448, 0.25655418634414673, 0.3611752688884735, 0.14710570871829987, 0.018539972603321075, 0.21814967691898346, 0.09323819726705551, 0.01780291646718979, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.004012200981378555, 0.004658036399632692, 0.017421945929527283, 0.0026806569658219814, 0.590861439704895, 0.051964171230793, 0.007618917152285576, 0.0007336572161875665, 0.12340892106294632, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.44725751876831055, 0.6053639054298401, 0.07041247189044952, 0.07085516303777695, 0.003138674655929208, 0.2879992425441742, 0.049135204404592514, 0.14297868311405182, 0.06008363142609596, 0.06304289400577545, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.7072809338569641, 0.7582566142082214, 0.16150887310504913, 0.18586905300617218, 0.015776842832565308, 0.08385244756937027, 0.32581770420074463, 0.5540359020233154, 0.13379113376140594, 0.0028463751077651978, 0.051922835409641266, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4378974437713623, 0.10523661971092224, 0.014314417727291584, 0.30093127489089966, 0.06324318051338196, 0.08432605862617493, 0.2594241797924042, 0.6188808083534241, 0.3929617404937744, 0.00827555637806654, 0.07725780457258224, 0.06407154351472855, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2013174593448639, 0.5200937390327454, 0.3190821707248688, 0.5249915719032288, 0.18779213726520538, 0.1779765784740448, 0.29882070422172546, 0.5049118399620056, 0.06443758308887482, 0.007539320737123489, 0.16998757421970367, 0.031686559319496155, 0.3610091209411621, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5546301603317261, 0.5397829413414001, 0.43089261651039124, 0.08987504988908768, 0.3114354610443115, 0.4812281131744385, 0.11215226352214813, 0.17198431491851807, 0.5790820121765137, 0.03648975491523743, 0.0541677288711071, 0.04165489599108696, 0.07749651372432709, 0.030232839286327362, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.005376005079597235, 0.010858614929020405, 0.02991071715950966, 0.029742157086730003, 0.04020260274410248, 0.1695990264415741, 0.0604972317814827, 0.10318762809038162, 0.48727869987487793, 0.07163358479738235, 0.025501595810055733, 0.05125340074300766, 0.22269804775714874, 0.08394679427146912, 0.19870582222938538, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0006954512791708112, 0.0002132337394868955, 0.037006676197052, 0.0018452922813594341, 0.16118928790092468, 0.5505160689353943, 0.028353480622172356, 0.0021746368147432804, 0.027092093601822853, 0.0001434519508620724, 0.0029707583598792553, 4.2726576793938875e-05, 0.0012847317848354578, 0.0010433235438540578, 0.18891005218029022, 0.014656933024525642, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.013874622993171215, 0.0695175901055336, 0.005752294324338436, 0.005697373300790787, 0.0021822804119437933, 0.02415846660733223, 0.00723307253792882, 0.3120453357696533, 0.016472192481160164, 0.004319194238632917, 0.041901107877492905, 0.7052133083343506, 0.0035930864978581667, 0.020578961819410324, 0.0021869041956961155, 0.0003597450559027493, 0.0005889505264349282, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.29724666476249695, 0.30918487906455994, 0.0693497508764267, 0.04026606306433678, 0.00593132060021162, 0.04497085511684418, 0.07199602574110031, 0.16270284354686737, 0.058071933686733246, 0.0005904879071749747, 0.0013724194141104817, 0.013050474226474762, 0.002609569113701582, 0.013482913374900818, 0.089314766228199, 0.03341012820601463, 0.21929660439491272, 0.006776490714401007, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3422777056694031, 0.07256462424993515, 0.012822822667658329, 0.21187257766723633, 0.060081083327531815, 0.09390594810247421, 0.19744858145713806, 0.5327264666557312, 0.3024030029773712, 0.013231869786977768, 0.1601967215538025, 0.04191795364022255, 0.5788960456848145, 0.791706383228302, 0.2698511779308319, 0.26516515016555786, 0.2890409529209137, 0.032140959054231644, 0.02436642162501812, NaN, NaN, NaN, NaN, NaN, NaN], [0.15722303092479706, 0.44676893949508667, 0.24300073087215424, 0.3980245292186737, 0.29666030406951904, 0.21130049228668213, 0.31708449125289917, 0.45276522636413574, 0.04954151436686516, 0.006070373114198446, 0.23888874053955078, 0.06321726739406586, 0.48237892985343933, 0.09136107563972473, 0.571183979511261, 0.36026179790496826, 0.0799446776509285, 0.1583012342453003, 0.025381257757544518, 0.5154083371162415, NaN, NaN, NaN, NaN, NaN], [0.6566299200057983, 0.6752134561538696, 0.5489535927772522, 0.1520741730928421, 0.6433172821998596, 0.7151104211807251, 0.290630042552948, 0.3418242335319519, 0.686417818069458, 0.046654678881168365, 0.09611856192350388, 0.0634889155626297, 0.4891318380832672, 0.46607306599617004, 0.5581225156784058, 0.4337400496006012, 0.06152508407831192, 0.08386452496051788, 0.0397774837911129, 0.11068917065858841, 0.04009125009179115, NaN, NaN, NaN, NaN], [0.0024060788564383984, 0.006098441779613495, 0.013975032605230808, 0.014695755206048489, 0.022452646866440773, 0.10514718294143677, 0.04751533642411232, 0.0609392412006855, 0.31799331307411194, 0.04427095875144005, 0.01951766200363636, 0.04202713817358017, 0.3371936082839966, 0.2731744647026062, 0.3478449583053589, 0.03363266587257385, 0.011759405955672264, 0.01767517626285553, 0.024101490154862404, 0.19511322677135468, 0.05518092215061188, 0.2097322940826416, NaN, NaN, NaN], [0.000109505133877974, 2.9198725314927287e-05, 0.01053665205836296, 0.0007290886132977903, 0.055462777614593506, 0.18011406064033508, 0.013305839151144028, 0.0007181179826147854, 0.008689867332577705, 4.760328374686651e-05, 0.0016827695071697235, 2.2867327061248943e-05, 0.000821226101834327, 0.0012459746794775128, 0.2353316843509674, 0.004575389437377453, 0.003901307238265872, 0.0009429306373931468, 1.1980442650383338e-05, 0.0003497266152407974, 0.00027309934375807643, 0.1965111494064331, 0.005757085047662258, NaN, NaN], [0.0017744784709066153, 0.012578981928527355, 0.0015974465059116483, 0.002320722443982959, 0.0008557687979191542, 0.004459704738110304, 0.00322481500916183, 0.13683773577213287, 0.010506929829716682, 0.0027294831816107035, 0.03936534747481346, 0.7146239876747131, 0.0021277000196278095, 0.014929071068763733, 0.003117389976978302, 0.0010002683848142624, 0.0005979579291306436, 0.037009548395872116, 0.6984097361564636, 0.0021584301721304655, 0.012162267230451107, 0.002483450109139085, 0.00014705986541230232, 0.0003713203768711537, NaN], [0.10933294892311096, 0.0594157911837101, 0.01442565955221653, 0.027944112196564674, 0.24928514659404755, 0.3314722180366516, 0.036283038556575775, 0.01824975199997425, 0.03247179090976715, 0.02741291932761669, 0.0011664694175124168, 0.03365480154752731, 0.10097742080688477, 0.021067792549729347, 0.42791858315467834, 0.11242418736219406, 0.11434369534254074, 0.000791618600487709, 0.02291581965982914, 0.07201644033193588, 0.02081850729882717, 0.39859694242477417, 0.2763477563858032, 0.13874487578868866, 0.003258609212934971]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.026641450822353363, 0.17128966748714447, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5577486157417297, 0.24638143181800842, 0.025497647002339363, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1241803988814354, 0.06599891930818558, 0.13004763424396515, 0.33318501710891724, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.9552784562110901, 0.6656578779220581, 0.04364815354347229, 0.097982257604599, 0.0012550450628623366, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.6779462695121765, 0.5809971690177917, 0.2087380737066269, 0.15752893686294556, 0.08772724121809006, 0.09023962169885635, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.6994673609733582, 0.48720496892929077, 0.08263873308897018, 0.3298986256122589, 0.0049313209019601345, 0.07016509026288986, 0.5443912744522095, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3437848389148712, 0.28689879179000854, 0.5712999105453491, 0.5371078252792358, 0.06584293395280838, 0.2492358684539795, 0.014812931418418884, 0.02226697839796543, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.44942334294319153, 0.3777551054954529, 0.7612449526786804, 0.7021526098251343, 0.30080679059028625, 0.4424319267272949, 0.22922295331954956, 0.04627525433897972, 0.055941756814718246, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.47138965129852295, 0.18856076896190643, 0.6503154039382935, 0.9041082859039307, 0.2803841233253479, 0.4006999135017395, 0.5757170915603638, 0.295682817697525, 0.04142303764820099, 0.006079117301851511, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24097655713558197, 0.15950126945972443, 0.6649572849273682, 0.6751598119735718, 0.46790093183517456, 0.6438081860542297, 0.3765251934528351, 0.2975021302700043, 0.10267924517393112, 0.060453154146671295, 0.03869982063770294, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.39086097478866577, 0.6666929125785828, 0.5642580389976501, 0.557075023651123, 0.25761184096336365, 0.3620971143245697, 0.656988263130188, 0.301082581281662, 0.3758563995361328, 0.026163028553128242, 0.024990877136588097, 0.0074356794357299805, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.7909376621246338, 0.3817039430141449, 0.6133569478988647, 0.41290101408958435, 0.30558884143829346, 0.6049348711967468, 0.5688384175300598, 0.4680134057998657, 0.6550416946411133, 0.42371857166290283, 0.10508850961923599, 0.021316751837730408, 0.05294431000947952, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17973686754703522, 0.17233335971832275, 0.334688276052475, 0.4481850564479828, 0.04172942414879799, 0.10337609797716141, 0.5107487440109253, 0.7207926511764526, 0.1405051052570343, 0.0654703825712204, 0.41273486614227295, 0.17914383113384247, 0.042542651295661926, 0.010745447129011154, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5207539200782776, 0.308788537979126, 0.08189663290977478, 0.5850351452827454, 0.3457651734352112, 0.15844188630580902, 0.2948668897151947, 0.4065589904785156, 0.12084604799747467, 0.29343682527542114, 0.49164822697639465, 0.07233413308858871, 0.0535273477435112, 0.014947501011192799, 0.008541097864508629, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2949400544166565, 0.03748409450054169, 0.14473117887973785, 0.0705113336443901, 0.013025683350861073, 0.005298166535794735, 0.21091029047966003, 0.014800299890339375, 0.2805088758468628, 0.000897476973477751, 0.0938984826207161, 0.004705057479441166, 0.04936474934220314, 0.011992034502327442, 0.18721424043178558, 0.00230285432189703, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.44276589155197144, 0.06478449702262878, 0.543609619140625, 0.8444110155105591, 0.13468694686889648, 0.4405028522014618, 0.6528593897819519, 0.5737791061401367, 0.6313535571098328, 0.8501816987991333, 0.4486657381057739, 0.06076665595173836, 0.7409859299659729, 0.15147589147090912, 0.20801351964473724, 0.027446726337075233, 0.036936238408088684, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5445577502250671, 0.2876933515071869, 0.7013069987297058, 0.627236008644104, 0.37061285972595215, 0.6206991076469421, 0.38252583146095276, 0.4230470061302185, 0.31842562556266785, 0.28603002429008484, 0.015331648290157318, 0.14692452549934387, 0.8622261881828308, 0.049388445913791656, 0.37183380126953125, 0.17907747626304626, 0.05781394988298416, 0.020684318616986275, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.4656296670436859, 0.6725881099700928, 0.6199259161949158, 0.6479836702346802, 0.24076998233795166, 0.34658652544021606, 0.5947279930114746, 0.37259459495544434, 0.5521662831306458, 0.14718003571033478, 0.19626900553703308, 0.024240192025899887, 0.27736979722976685, 0.05565635487437248, 0.3618892729282379, 0.44332295656204224, 0.027751203626394272, 0.0260067880153656, 0.010717106983065605, NaN, NaN, NaN, NaN, NaN, NaN], [0.830940842628479, 0.42077580094337463, 0.7156820893287659, 0.57599937915802, 0.5493759512901306, 0.7128159999847412, 0.5476810932159424, 0.527928352355957, 0.8053308725357056, 0.8646240234375, 0.542984127998352, 0.2950981855392456, 0.3170693516731262, 0.5610483884811401, 0.26465174555778503, 0.45835256576538086, 0.22733505070209503, 0.10187508910894394, 0.03538959100842476, 0.07069608569145203, NaN, NaN, NaN, NaN, NaN], [0.09599269181489944, 0.08247342705726624, 0.25253206491470337, 0.4357891380786896, 0.039192523807287216, 0.0719948410987854, 0.3563676178455353, 0.5300538539886475, 0.06311739236116409, 0.037909455597400665, 0.5032193064689636, 0.39894816279411316, 0.3283153772354126, 0.21619060635566711, 0.017918655648827553, 0.2577371895313263, 0.14531975984573364, 0.346793532371521, 0.2014700472354889, 0.0539211668074131, 0.0146569162607193, NaN, NaN, NaN, NaN], [0.6422337889671326, 0.3740711212158203, 0.10689651221036911, 0.6858291029930115, 0.4494076073169708, 0.2826421856880188, 0.3886936604976654, 0.475405216217041, 0.13226336240768433, 0.3073323965072632, 0.7139697670936584, 0.17356495559215546, 0.25040003657341003, 0.23144030570983887, 0.024455448612570763, 0.4280460476875305, 0.048713963478803635, 0.3974619209766388, 0.06130422651767731, 0.05969162657856941, 0.015271119773387909, 0.00685582309961319, NaN, NaN, NaN], [0.5218734741210938, 0.03395698964595795, 0.2861349880695343, 0.13773199915885925, 0.02211177349090576, 0.014614011161029339, 0.43378758430480957, 0.02492188662290573, 0.26067787408828735, 0.0009113854030147195, 0.1411941796541214, 0.009023642167448997, 0.14982649683952332, 0.15959703922271729, 0.7153633832931519, 0.014257365837693214, 0.06102409213781357, 0.12158294767141342, 0.006897313520312309, 0.06130388379096985, 0.012951835058629513, 0.16874605417251587, 0.002189028775319457, NaN, NaN], [0.45293620228767395, 0.05202305316925049, 0.4803192913532257, 0.8224762082099915, 0.10338833183050156, 0.2861584722995758, 0.8321961760520935, 0.7622299790382385, 0.5323314070701599, 0.8633370995521545, 0.5219312310218811, 0.07432084530591965, 0.7646023631095886, 0.4150907099246979, 0.4998815357685089, 0.606073796749115, 0.2854492664337158, 0.6639280319213867, 0.09482558071613312, 0.806840717792511, 0.19665148854255676, 0.18194931745529175, 0.01953776553273201, 0.037144362926483154, NaN], [0.8357685804367065, 0.6023411154747009, 0.16389556229114532, 0.4697819948196411, 0.05014880374073982, 0.3185025751590729, 0.2618474066257477, 0.7044641375541687, 0.16675803065299988, 0.7323283553123474, 0.14429442584514618, 0.2621355652809143, 0.041847843676805496, 0.3185603618621826, 0.04513467848300934, 0.49906620383262634, 0.611339807510376, 0.21515053510665894, 0.3302164673805237, 0.04920952767133713, 0.2760073244571686, 0.0218669306486845, 0.25043201446533203, 0.13627314567565918, 0.01334126852452755]]], [[[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13569742441177368, 0.0376364141702652, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05053132027387619, 0.5417848825454712, 0.07814626395702362, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03762863576412201, 0.4749486744403839, 0.013701170682907104, 0.053301598876714706, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10598134994506836, 0.16776065528392792, 0.11929589509963989, 0.16846179962158203, 0.40715572237968445, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05147748813033104, 0.203742116689682, 0.11462464928627014, 0.46246808767318726, 0.01836300455033779, 0.02458924613893032, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17594558000564575, 0.17753779888153076, 0.024665912613272667, 0.19817322492599487, 0.008797828108072281, 0.022263213992118835, 0.29173722863197327, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.016114797443151474, 0.0061007170006632805, 0.028504224494099617, 0.017245782539248466, 0.08753485232591629, 0.11264273524284363, 0.6154332160949707, 0.029144972562789917, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.027042992413043976, 0.032212790101766586, 0.019619816914200783, 0.014702342450618744, 0.06721275299787521, 0.2560867667198181, 0.5545244216918945, 0.40561506152153015, 0.037922732532024384, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1654873937368393, 0.013622531667351723, 0.0656571239233017, 0.09179358184337616, 0.03440919890999794, 0.08533406257629395, 0.16269220411777496, 0.1151970624923706, 0.09265416115522385, 0.028269361704587936, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2598540484905243, 0.010173649527132511, 0.004170349799096584, 0.003479698905721307, 0.0014636714477092028, 0.0011101020500063896, 0.001677120802924037, 0.034040722995996475, 0.0041177538223564625, 0.024958845227956772, 0.016315795481204987, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17492477595806122, 0.010013026185333729, 0.005800239276140928, 0.0069971769116818905, 0.0036480696871876717, 0.001016399241052568, 0.0060493675991892815, 0.0034581662621349096, 0.00659980857744813, 0.0047594537027180195, 0.3941299021244049, 0.2407994568347931, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06559828668832779, 0.005602334160357714, 0.0005807551206089556, 0.0005322807701304555, 0.004617360420525074, 0.00354054500348866, 0.005599506665021181, 0.011434626765549183, 0.006905066315084696, 0.009602343663573265, 0.11027393490076065, 0.36931946873664856, 0.06368503719568253, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.015983520075678825, 0.012168757617473602, 0.0015684146201238036, 0.0005484889261424541, 0.00233695306815207, 0.0038106110878288746, 0.005947766825556755, 0.04194773733615875, 0.014443459920585155, 0.06465759128332138, 0.14989611506462097, 0.5095774531364441, 0.1882752925157547, 0.02387852594256401, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11159919947385788, 0.06036144495010376, 0.06681493669748306, 0.0798669382929802, 0.03668922558426857, 0.018710536882281303, 0.029976846650242805, 0.0675768032670021, 0.03372039645910263, 0.057603828608989716, 0.14515243470668793, 0.25060775876045227, 0.23181115090847015, 0.14262832701206207, 0.33286023139953613, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.018035059794783592, 0.02341379225254059, 0.0019442361081019044, 0.004369894042611122, 0.00136191223282367, 0.00017434914479963481, 0.0011034610215574503, 0.06787250190973282, 0.060198791325092316, 0.12004764378070831, 0.11878902465105057, 0.2063554972410202, 0.28332868218421936, 0.35319504141807556, 0.008158767595887184, 0.26057863235473633, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17278411984443665, 0.007028562016785145, 0.010641193017363548, 0.013809186406433582, 0.0005732428980991244, 0.001056239241734147, 0.0005258666351437569, 0.03639528155326843, 0.02256075292825699, 0.01660884916782379, 0.1527748554944992, 0.1477358043193817, 0.2577149271965027, 0.03867224231362343, 0.04304511100053787, 0.11759469658136368, 0.0762997567653656, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.38573285937309265, 0.0028330886270850897, 0.0014278099406510592, 0.0009824484586715698, 9.371336636831984e-05, 0.00015483389142900705, 6.760591350030154e-05, 0.0035791138652712107, 0.0002520910056773573, 0.0005180046427994967, 0.00024238335026893765, 0.011901103891432285, 0.011019378900527954, 0.006276060827076435, 0.0026990415062755346, 0.016820058226585388, 0.03330027312040329, 0.047877803444862366, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21399648487567902, 0.008264300413429737, 0.0051351506263017654, 0.005111425183713436, 0.0020249083172529936, 0.00047485672985203564, 0.0018332998733967543, 0.0008904117858037353, 0.0017731828847900033, 0.000539442349690944, 0.03944296017289162, 0.039767228066921234, 0.00580678740516305, 0.004312179517000914, 0.003937484696507454, 0.00913114845752716, 0.006211036816239357, 0.3553882837295532, 0.3024981617927551, NaN, NaN, NaN, NaN, NaN, NaN], [0.05261809378862381, 0.004144520964473486, 0.00047606538282707334, 0.0003396419051568955, 0.002880769083276391, 0.0015178520698100328, 0.0018901955336332321, 0.0029504895210266113, 0.0017174717504531145, 0.0006908842478878796, 0.0046035549603402615, 0.09042679518461227, 0.0032755613792687654, 0.007712012622505426, 0.032594844698905945, 0.02268057130277157, 0.033856723457574844, 0.07955116033554077, 0.4074561595916748, 0.07153668999671936, NaN, NaN, NaN, NaN, NaN], [0.019381573423743248, 0.012705344706773758, 0.0019882190972566605, 0.0005741973291151226, 0.0020475401543080807, 0.0023934554774314165, 0.004172713495790958, 0.021013854071497917, 0.005879250820726156, 0.006729640066623688, 0.00632414361461997, 0.09735815972089767, 0.01909361220896244, 0.00100265524815768, 0.003452989971265197, 0.008203250356018543, 0.05971603840589523, 0.11904174834489822, 0.5188009142875671, 0.2541559338569641, 0.029506316408514977, NaN, NaN, NaN, NaN], [0.10572486370801926, 0.04525948688387871, 0.055838145315647125, 0.050681136548519135, 0.027844024822115898, 0.014026278629899025, 0.025656970217823982, 0.0361209474503994, 0.017075760290026665, 0.01003955863416195, 0.016965145245194435, 0.04991300031542778, 0.01522271428257227, 0.007584442384541035, 0.03757705166935921, 0.03609456866979599, 0.10922907292842865, 0.19329114258289337, 0.2903786897659302, 0.29551932215690613, 0.1564989984035492, 0.3518115282058716, NaN, NaN, NaN], [0.017342884093523026, 0.024629754945635796, 0.0017386168474331498, 0.003977979999035597, 0.0011948446044698358, 0.0001711023651296273, 0.0019097719341516495, 0.050265345722436905, 0.048485398292541504, 0.025773482397198677, 0.011941587552428246, 0.02582539990544319, 0.014500979334115982, 0.011088544502854347, 0.0004536270862445235, 0.001346826204098761, 0.09912228584289551, 0.03899921476840973, 0.19399496912956238, 0.33165985345840454, 0.3351045250892639, 0.007158405613154173, 0.26822295784950256, NaN, NaN], [0.15815527737140656, 0.009173951111733913, 0.012453499250113964, 0.01756284572184086, 0.0007500716019421816, 0.0020462200045585632, 0.00166225153952837, 0.05335438624024391, 0.037105023860931396, 0.009711050428450108, 0.05516523867845535, 0.04893142729997635, 0.03887411952018738, 0.002221355913206935, 0.004346344619989395, 0.004376854281872511, 0.001785764587111771, 0.09844812005758286, 0.14674220979213715, 0.34636548161506653, 0.04763580113649368, 0.057022612541913986, 0.12166893482208252, 0.13556897640228271, NaN], [0.16895240545272827, 0.0006144722574390471, 0.0027162963524460793, 0.0007400937611237168, 0.0007253509247675538, 0.0007097159395925701, 0.000199983871425502, 0.0005034026107750833, 0.0002540702698752284, 0.0002154638059437275, 0.0004817947919946164, 0.0019994170870631933, 0.0003459753352217376, 6.575404404429719e-05, 0.004540599416941404, 0.00010029276745626703, 0.0005050064064562321, 0.003569946391507983, 0.008527955040335655, 0.003213587449863553, 0.0022120880894362926, 0.11142478138208389, 0.01313241571187973, 0.055687084794044495, 0.21235007047653198]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13440807163715363, 0.048166193068027496, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14904144406318665, 0.03273539990186691, 0.03615117073059082, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17614386975765228, 0.0854690745472908, 0.038236960768699646, 0.12011754512786865, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14069411158561707, 0.1466522365808487, 0.07941046357154846, 0.06070372834801674, 0.045592159032821655, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15778480470180511, 0.11167039722204208, 0.20017755031585693, 0.10082826018333435, 0.013994856737554073, 0.07346371561288834, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15305520594120026, 0.26692208647727966, 0.1222626119852066, 0.14178596436977386, 0.012799645774066448, 0.019025815650820732, 0.14782781898975372, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.050227321684360504, 0.49922510981559753, 0.2564227879047394, 0.37594476342201233, 0.05222875997424126, 0.019398091360926628, 0.07475102692842484, 0.13636687397956848, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1278427243232727, 0.4489462971687317, 0.09382158517837524, 0.09914611279964447, 0.11451858282089233, 0.14035384356975555, 0.0858180820941925, 0.1395546793937683, 0.05027398467063904, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06907324492931366, 0.44302117824554443, 0.21607427299022675, 0.21861647069454193, 0.14559195935726166, 0.12854896485805511, 0.21420170366764069, 0.5056769251823425, 0.05036870762705803, 0.14160890877246857, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08832916617393494, 0.4917650520801544, 0.16961733996868134, 0.21240676939487457, 0.17275941371917725, 0.13381528854370117, 0.1763075888156891, 0.3443826735019684, 0.022638684138655663, 0.14659351110458374, 0.05034468695521355, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10765255987644196, 0.1569133847951889, 0.14696621894836426, 0.12414205074310303, 0.1321374922990799, 0.32589367032051086, 0.09939466416835785, 0.15668180584907532, 0.035531532019376755, 0.18526552617549896, 0.100669264793396, 0.1766001582145691, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0920143872499466, 0.03631591796875, 0.10338561236858368, 0.13865944743156433, 0.14365890622138977, 0.19164490699768066, 0.08302215486764908, 0.17053648829460144, 0.20418454706668854, 0.4243081212043762, 0.23730118572711945, 0.11353020370006561, 0.062482837587594986, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14247462153434753, 0.10275112092494965, 0.08782284706830978, 0.07633533328771591, 0.09427531808614731, 0.2382509559392929, 0.11237408220767975, 0.1274290829896927, 0.09234490990638733, 0.29983192682266235, 0.19681134819984436, 0.09119200706481934, 0.1394888311624527, 0.02876400761306286, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14126147329807281, 0.06271495670080185, 0.09029032289981842, 0.10313913226127625, 0.08530516922473907, 0.05194256827235222, 0.09853952378034592, 0.05407971888780594, 0.10021005570888519, 0.14394013583660126, 0.19472479820251465, 0.17138735949993134, 0.055624835193157196, 0.022259291261434555, 0.010825252160429955, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15579406917095184, 0.5571659207344055, 0.09220181405544281, 0.09424383193254471, 0.2893342971801758, 0.14449337124824524, 0.08881417661905289, 0.09621196240186691, 0.05768556892871857, 0.34467604756355286, 0.16894927620887756, 0.32070621848106384, 0.32385867834091187, 0.08616255223751068, 0.0030245021916925907, 0.011462957598268986, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06543286889791489, 0.3303832709789276, 0.1981877088546753, 0.17906354367733002, 0.08578304201364517, 0.12075137346982956, 0.09918820112943649, 0.14948950707912445, 0.0696079283952713, 0.2870473861694336, 0.2037079930305481, 0.20505982637405396, 0.415317177772522, 0.18504147231578827, 0.05944397673010826, 0.03780561313033104, 0.06350213289260864, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08806300163269043, 0.5073549151420593, 0.15216797590255737, 0.1779468059539795, 0.08599209040403366, 0.038353316485881805, 0.05095306783914566, 0.13815101981163025, 0.05531492829322815, 0.3680262565612793, 0.045964885503053665, 0.5803228616714478, 0.2365681380033493, 0.10053237527608871, 0.016326427459716797, 0.011199035681784153, 0.02849578857421875, 0.09785498678684235, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10047968477010727, 0.17735490202903748, 0.1303417980670929, 0.1233980730175972, 0.11124629527330399, 0.27208706736564636, 0.09057758748531342, 0.20949512720108032, 0.0595981664955616, 0.32820063829421997, 0.19304482638835907, 0.3008245825767517, 0.24370267987251282, 0.0977335274219513, 0.0604717954993248, 0.08826017379760742, 0.05976974964141846, 0.11658596247434616, 0.26095637679100037, NaN, NaN, NaN, NaN, NaN, NaN], [0.08956606686115265, 0.03296149522066116, 0.07127847522497177, 0.10275094956159592, 0.12852256000041962, 0.15250688791275024, 0.05763629823923111, 0.13953621685504913, 0.2147330343723297, 0.3297017514705658, 0.25630685687065125, 0.3529660999774933, 0.05266188457608223, 0.19866161048412323, 0.08034973591566086, 0.16050152480602264, 0.12120798975229263, 0.21796129643917084, 0.13665789365768433, 0.05867582932114601, NaN, NaN, NaN, NaN, NaN], [0.16931524872779846, 0.06866136193275452, 0.058377113193273544, 0.054153572767972946, 0.06997817754745483, 0.17294903099536896, 0.06504172086715698, 0.09800923615694046, 0.07601338624954224, 0.22323867678642273, 0.17471107840538025, 0.20914696156978607, 0.32561469078063965, 0.04201642796397209, 0.014874166809022427, 0.043757203966379166, 0.11901038885116577, 0.15924809873104095, 0.08216992020606995, 0.13305248320102692, 0.031323518604040146, NaN, NaN, NaN, NaN], [0.14597494900226593, 0.05063166096806526, 0.07245789468288422, 0.08537694066762924, 0.07253167033195496, 0.03945168852806091, 0.07488631457090378, 0.04114159941673279, 0.09447583556175232, 0.11984950304031372, 0.21245841681957245, 0.24130037426948547, 0.053050536662340164, 0.036372195929288864, 0.012788524851202965, 0.05413965508341789, 0.17548364400863647, 0.18113258481025696, 0.17045176029205322, 0.056165628135204315, 0.023532675579190254, 0.007599800359457731, NaN, NaN, NaN], [0.20880575478076935, 0.4742221236228943, 0.0684090405702591, 0.07499475032091141, 0.22897963225841522, 0.11411925405263901, 0.06380540132522583, 0.06602712720632553, 0.04886250197887421, 0.25098055601119995, 0.16695836186408997, 0.41882073879241943, 0.45364588499069214, 0.19780457019805908, 0.004864717833697796, 0.007611281704157591, 0.23698794841766357, 0.08390159159898758, 0.28844529390335083, 0.28151822090148926, 0.0680297240614891, 0.0018790157046169043, 0.008693840354681015, NaN, NaN], [0.06649312376976013, 0.2272576093673706, 0.15548978745937347, 0.13675269484519958, 0.06747769564390182, 0.09888236224651337, 0.07679145783185959, 0.09811051189899445, 0.059132058173418045, 0.16564641892910004, 0.1534833461046219, 0.21299242973327637, 0.46317315101623535, 0.18783308565616608, 0.06707606464624405, 0.07066023349761963, 0.038238298147916794, 0.13390158116817474, 0.1738123893737793, 0.3894510865211487, 0.199345201253891, 0.05267143249511719, 0.03450411930680275, 0.0674150139093399, NaN], [0.13068987429141998, 0.5177554488182068, 0.21822108328342438, 0.17411521077156067, 0.11371950805187225, 0.10282127559185028, 0.14754493534564972, 0.10529720038175583, 0.04059072583913803, 0.1422514021396637, 0.16688787937164307, 0.3468432128429413, 0.07328897714614868, 0.033892080187797546, 0.005811289418488741, 0.006848806049674749, 0.033459149301052094, 0.08608346432447433, 0.29348817467689514, 0.07146795839071274, 0.05563248693943024, 0.008248405531048775, 0.00942459236830473, 0.03898181766271591, 0.13983668386936188]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13037645816802979, 0.08109150826931, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14859925210475922, 0.02925589494407177, 0.0505123995244503, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21387919783592224, 0.03206360712647438, 0.012896520085632801, 0.06630519032478333, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15968731045722961, 0.046736959367990494, 0.014681101776659489, 0.01418250147253275, 0.011044399812817574, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.22570300102233887, 0.051045093685388565, 0.020206425338983536, 0.021926334127783775, 0.008406145498156548, 0.0702541247010231, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.28555917739868164, 0.03329295665025711, 0.036049578338861465, 0.038853298872709274, 0.007190736476331949, 0.006643606815487146, 0.08228380233049393, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2511760890483856, 0.07463249564170837, 0.04988643527030945, 0.0701586976647377, 0.028143733739852905, 0.007391677238047123, 0.02261284738779068, 0.0737045407295227, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15217745304107666, 0.19177564978599548, 0.125013530254364, 0.1473270058631897, 0.20325084030628204, 0.10669662803411484, 0.07946557551622391, 0.027662983164191246, 0.09494684636592865, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13806378841400146, 0.2514709234237671, 0.17176732420921326, 0.21858137845993042, 0.17882317304611206, 0.16198168694972992, 0.20351995527744293, 0.07158615440130234, 0.0266498401761055, 0.23213928937911987, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17152094841003418, 0.15314172208309174, 0.15820659697055817, 0.19208288192749023, 0.19640566408634186, 0.061033159494400024, 0.12321671098470688, 0.07748300582170486, 0.07906179875135422, 0.032524362206459045, 0.08073069155216217, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11935991793870926, 0.25889015197753906, 0.181893989443779, 0.2521744966506958, 0.2510518431663513, 0.1320696324110031, 0.17421388626098633, 0.10352174937725067, 0.13144756853580475, 0.06071629375219345, 0.07381404936313629, 0.11898738145828247, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11384479701519012, 0.12307179719209671, 0.17695116996765137, 0.21105043590068817, 0.2652710974216461, 0.1994313895702362, 0.5530626177787781, 0.33474239706993103, 0.11353342235088348, 0.20157715678215027, 0.12058570981025696, 0.02405776083469391, 0.20302970707416534, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1661912202835083, 0.3088836967945099, 0.3049609959125519, 0.34614017605781555, 0.3287224769592285, 0.19484750926494598, 0.49978625774383545, 0.2471936047077179, 0.14924246072769165, 0.2264283001422882, 0.11719675362110138, 0.028577886521816254, 0.03125511854887009, 0.04683076590299606, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1382068395614624, 0.14312644302845, 0.15027517080307007, 0.2806132137775421, 0.10704077035188675, 0.15715429186820984, 0.3545873463153839, 0.2772214114665985, 0.11900671571493149, 0.16433128714561462, 0.08395379036664963, 0.0337035246193409, 0.08286106586456299, 0.029390821233391762, 0.07092607021331787, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.31265145540237427, 0.17018769681453705, 0.42172688245773315, 0.3373875319957733, 0.26503118872642517, 0.3668123483657837, 0.6080453991889954, 0.3421963155269623, 0.29850897192955017, 0.22005639970302582, 0.08626232296228409, 0.05660916119813919, 0.04967416450381279, 0.020023291930556297, 0.01626538299024105, 0.03365384787321091, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11847452819347382, 0.5065410137176514, 0.4161456227302551, 0.44356557726860046, 0.358999639749527, 0.34202155470848083, 0.6410406231880188, 0.5693260431289673, 0.3344528377056122, 0.3382241725921631, 0.16963228583335876, 0.12081613391637802, 0.09492655098438263, 0.06781262904405594, 0.059771545231342316, 0.013083304278552532, 0.15846344828605652, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14143924415111542, 0.33810776472091675, 0.4273369610309601, 0.4442084729671478, 0.4867575168609619, 0.40271657705307007, 0.7919159531593323, 0.5796146988868713, 0.41502290964126587, 0.19611117243766785, 0.2659074366092682, 0.0590454526245594, 0.09533000737428665, 0.06579555571079254, 0.049002423882484436, 0.011413656175136566, 0.05989237129688263, 0.0694013461470604, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06363721936941147, 0.3402014374732971, 0.30108359456062317, 0.3598821461200714, 0.356340229511261, 0.2955020070075989, 0.3913557827472687, 0.34592464566230774, 0.3881937265396118, 0.23078370094299316, 0.49122318625450134, 0.3432621657848358, 0.1563359946012497, 0.12668228149414062, 0.1534397453069687, 0.06296171993017197, 0.07472987473011017, 0.07419107109308243, 0.08810260146856308, NaN, NaN, NaN, NaN, NaN, NaN], [0.06025628373026848, 0.1445734202861786, 0.2208743691444397, 0.22917300462722778, 0.34805941581726074, 0.30598515272140503, 0.6932811141014099, 0.6030279994010925, 0.2491629421710968, 0.46458470821380615, 0.5228609442710876, 0.2136632800102234, 0.610046923160553, 0.25265923142433167, 0.14038830995559692, 0.07342293113470078, 0.22653138637542725, 0.10003089159727097, 0.02225746400654316, 0.14559555053710938, NaN, NaN, NaN, NaN, NaN], [0.0902293398976326, 0.5066702961921692, 0.45472872257232666, 0.45485398173332214, 0.5058757662773132, 0.3594079613685608, 0.7028806209564209, 0.5180745720863342, 0.25713953375816345, 0.5372852683067322, 0.6213670372962952, 0.2659974694252014, 0.3181111812591553, 0.5259383916854858, 0.33730512857437134, 0.13441412150859833, 0.36266574263572693, 0.10496268421411514, 0.02362431399524212, 0.020191077142953873, 0.04590708762407303, NaN, NaN, NaN, NaN], [0.1059701219201088, 0.2303982675075531, 0.21762119233608246, 0.3580361306667328, 0.17096057534217834, 0.24843183159828186, 0.5131583213806152, 0.47260501980781555, 0.21650557219982147, 0.38561707735061646, 0.416827529668808, 0.1716565638780594, 0.3172723054885864, 0.29216328263282776, 0.47280052304267883, 0.38235870003700256, 0.1798420399427414, 0.1762932986021042, 0.04000748321413994, 0.08066289126873016, 0.03975420445203781, 0.08505715429782867, NaN, NaN, NaN], [0.2317487895488739, 0.2560827136039734, 0.5102789998054504, 0.4199059009552002, 0.44283756613731384, 0.5258800983428955, 0.732390284538269, 0.4491574466228485, 0.4244932234287262, 0.5298821926116943, 0.43037980794906616, 0.2800268232822418, 0.3093121647834778, 0.4250229299068451, 0.19317308068275452, 0.2640416920185089, 0.38813653588294983, 0.11181202530860901, 0.054203763604164124, 0.037284549325704575, 0.018739882856607437, 0.014264266937971115, 0.035236652940511703, NaN, NaN], [0.08032029122114182, 0.6358892321586609, 0.5042787194252014, 0.5074477195739746, 0.5223307013511658, 0.5343775749206543, 0.703619122505188, 0.6657658815383911, 0.45647403597831726, 0.602655827999115, 0.5387927889823914, 0.39006462693214417, 0.39567169547080994, 0.43596506118774414, 0.41000646352767944, 0.269907683134079, 0.5412885546684265, 0.2038634866476059, 0.10306636989116669, 0.05501747503876686, 0.04515310004353523, 0.04695969074964523, 0.008877278305590153, 0.09985174983739853, NaN], [0.03129265457391739, 0.2636677324771881, 0.3672870099544525, 0.438161164522171, 0.7497870922088623, 0.43876102566719055, 0.6747432947158813, 0.5918557643890381, 0.5535795092582703, 0.7133825421333313, 0.7440239787101746, 0.3780657947063446, 0.4423457384109497, 0.6450315713882446, 0.5939705967903137, 0.7279283404350281, 0.4253756105899811, 0.4950290024280548, 0.13756991922855377, 0.08432447165250778, 0.11775307357311249, 0.12791647017002106, 0.07922011613845825, 0.04417572543025017, 0.3473970592021942]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13398022949695587, 0.051660239696502686, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14254364371299744, 0.023038247600197792, 0.14531654119491577, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17795929312705994, 0.024941343814134598, 0.06730933487415314, 0.21388311684131622, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09399491548538208, 0.3603954315185547, 0.2704434394836426, 0.1475897580385208, 0.18568314611911774, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14775781333446503, 0.19919507205486298, 0.14170727133750916, 0.05924544855952263, 0.05067846551537514, 0.45942243933677673, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14211317896842957, 0.055850330740213394, 0.31645503640174866, 0.16900919377803802, 0.038168299943208694, 0.07897188514471054, 0.2625669240951538, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08848852664232254, 0.1616290658712387, 0.37575462460517883, 0.24721546471118927, 0.16591095924377441, 0.06889674067497253, 0.052010323852300644, 0.12634019553661346, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0747382640838623, 0.14914710819721222, 0.6135430335998535, 0.5929751992225647, 0.35069379210472107, 0.2108047604560852, 0.11502823978662491, 0.02365955151617527, 0.17759312689304352, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02855301834642887, 0.21659326553344727, 0.4310435652732849, 0.40604472160339355, 0.3670090436935425, 0.48140615224838257, 0.27167943120002747, 0.09097199141979218, 0.1627163589000702, 0.1288144737482071, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03365316241979599, 0.14809295535087585, 0.3644290566444397, 0.4046455919742584, 0.26744210720062256, 0.32108214497566223, 0.1678413599729538, 0.190241739153862, 0.22121649980545044, 0.03444775566458702, 0.46765974164009094, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.038216885179281235, 0.2552680969238281, 0.4071650505065918, 0.3936895430088043, 0.4416206479072571, 0.38015541434288025, 0.1657901555299759, 0.15260477364063263, 0.22771137952804565, 0.10614379495382309, 0.0724361315369606, 0.1760038137435913, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07068492472171783, 0.07818713039159775, 0.3302493095397949, 0.299561083316803, 0.46339741349220276, 0.48102065920829773, 0.15714748203754425, 0.27301517128944397, 0.38065311312675476, 0.19789563119411469, 0.11113718152046204, 0.05171056091785431, 0.13386131823062897, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05115865543484688, 0.44867002964019775, 0.49208834767341614, 0.477664977312088, 0.4642978608608246, 0.46059542894363403, 0.25649622082710266, 0.406831830739975, 0.27858051657676697, 0.2405669242143631, 0.11958811432123184, 0.1450459510087967, 0.0628136694431305, 0.09898709505796432, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04031704366207123, 0.6707005500793457, 0.529548704624176, 0.4586588144302368, 0.3106471002101898, 0.6713098287582397, 0.4458201229572296, 0.5507155060768127, 0.6255134344100952, 0.5032600164413452, 0.18919125199317932, 0.2968505918979645, 0.3902440667152405, 0.16804949939250946, 0.088200144469738, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13188821077346802, 0.1971314549446106, 0.3902590274810791, 0.4961083233356476, 0.37017205357551575, 0.46889960765838623, 0.2874276340007782, 0.1815745085477829, 0.39618349075317383, 0.17909032106399536, 0.26052209734916687, 0.13463276624679565, 0.11223814636468887, 0.05094114691019058, 0.030694767832756042, 0.23131275177001953, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.029627619311213493, 0.0727827325463295, 0.2382729947566986, 0.16726669669151306, 0.3644602298736572, 0.47072863578796387, 0.2034798413515091, 0.1723088026046753, 0.43477845191955566, 0.18565386533737183, 0.3540991544723511, 0.2379947453737259, 0.07713616639375687, 0.19858470559120178, 0.17015229165554047, 0.0891638696193695, 0.22899208962917328, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01839388906955719, 0.10223808884620667, 0.244280606508255, 0.22035017609596252, 0.2828108072280884, 0.41914066672325134, 0.09010869264602661, 0.14338640868663788, 0.35142722725868225, 0.12073972821235657, 0.6723650693893433, 0.17433631420135498, 0.20010362565517426, 0.17566151916980743, 0.17214345932006836, 0.06743419170379639, 0.08234895765781403, 0.4274884760379791, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02117752842605114, 0.17625343799591064, 0.2448491007089615, 0.23410049080848694, 0.3357784152030945, 0.2992798388004303, 0.09099920094013214, 0.1110134869813919, 0.20308172702789307, 0.1763213574886322, 0.1646280288696289, 0.23259523510932922, 0.3615821301937103, 0.32664546370506287, 0.296549916267395, 0.2726198732852936, 0.07387500256299973, 0.07587912678718567, 0.14093360304832458, NaN, NaN, NaN, NaN, NaN, NaN], [0.05486638844013214, 0.06597498804330826, 0.2194771021604538, 0.1927901804447174, 0.37433308362960815, 0.412477970123291, 0.07100911438465118, 0.1499587744474411, 0.3056679368019104, 0.16932857036590576, 0.15193165838718414, 0.19111526012420654, 0.291239857673645, 0.37710845470428467, 0.510109543800354, 0.47089657187461853, 0.17204606533050537, 0.09759342670440674, 0.05198577418923378, 0.1557197868824005, NaN, NaN, NaN, NaN, NaN], [0.03942986950278282, 0.2940163016319275, 0.3192412853240967, 0.3550935387611389, 0.28974649310112, 0.35144588351249695, 0.111830934882164, 0.2212614268064499, 0.1942923218011856, 0.16557106375694275, 0.12293191254138947, 0.3516637980937958, 0.22679129242897034, 0.3504909574985504, 0.4427362084388733, 0.6422855854034424, 0.29741936922073364, 0.17250965535640717, 0.13341550529003143, 0.05469499155879021, 0.0792233869433403, NaN, NaN, NaN, NaN], [0.03949292004108429, 0.6095755696296692, 0.4376317858695984, 0.4024345874786377, 0.24819140136241913, 0.555855929851532, 0.2881583273410797, 0.40402302145957947, 0.5775710940361023, 0.42070186138153076, 0.22824901342391968, 0.4547353982925415, 0.567461371421814, 0.5762937664985657, 0.33163049817085266, 0.41951635479927063, 0.37286072969436646, 0.25620296597480774, 0.25266289710998535, 0.3395143151283264, 0.13239842653274536, 0.07333662360906601, NaN, NaN, NaN], [0.11607979983091354, 0.18507249653339386, 0.30528268218040466, 0.41669708490371704, 0.22673273086547852, 0.3321194052696228, 0.17922396957874298, 0.1181870847940445, 0.299829363822937, 0.11785572022199631, 0.23005077242851257, 0.1731709986925125, 0.17971253395080566, 0.2448451966047287, 0.15796169638633728, 0.701153576374054, 0.1659945547580719, 0.4861533045768738, 0.20215842127799988, 0.13506482541561127, 0.058445703238248825, 0.03114200383424759, 0.21790345013141632, NaN, NaN], [0.017429474741220474, 0.04190561920404434, 0.14842365682125092, 0.09654705971479416, 0.16489917039871216, 0.24686570465564728, 0.09686223417520523, 0.09368213266134262, 0.2918589413166046, 0.08991989493370056, 0.18521137535572052, 0.19666530191898346, 0.06316249072551727, 0.222347229719162, 0.3215444087982178, 0.3288835287094116, 0.38603323698043823, 0.4142700135707855, 0.25910744071006775, 0.0714699923992157, 0.2130158245563507, 0.1895158588886261, 0.07420682162046432, 0.2235250473022461, NaN], [0.011625233106315136, 0.13701221346855164, 0.3079974055290222, 0.17742200195789337, 0.10538481175899506, 0.17213597893714905, 0.08605048805475235, 0.13507568836212158, 0.2275547832250595, 0.07923908531665802, 0.07705283164978027, 0.2479921281337738, 0.3453103303909302, 0.2883259654045105, 0.36409828066825867, 0.18068012595176697, 0.4896908700466156, 0.399289608001709, 0.5261627435684204, 0.6339481472969055, 0.6382991671562195, 0.5417840480804443, 0.2542280852794647, 0.330732524394989, 0.21995915472507477]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04915444552898407, 0.7444152235984802, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10270431637763977, 0.20103313028812408, 0.23083212971687317, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1558120846748352, 0.09243088960647583, 0.02280065417289734, 0.32627996802330017, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1265193670988083, 0.1639627069234848, 0.12297425419092178, 0.08557231724262238, 0.1833999902009964, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11118379235267639, 0.23907560110092163, 0.16732671856880188, 0.1982172429561615, 0.02825341187417507, 0.15412425994873047, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06564534455537796, 0.4107542335987091, 0.09891282767057419, 0.3507450222969055, 0.0021941487211734056, 0.004341787192970514, 0.11288701742887497, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09254656732082367, 0.17870496213436127, 0.11882538348436356, 0.2565489113330841, 0.06709786504507065, 0.020701991394162178, 0.05621851608157158, 0.571487307548523, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12130707502365112, 0.06869146227836609, 0.052872415632009506, 0.07373122870922089, 0.03967232629656792, 0.019552208483219147, 0.024196362122893333, 0.1570335328578949, 0.3329051434993744, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12370187789201736, 0.027735348790884018, 0.007442266680300236, 0.018701551482081413, 0.04923407360911369, 0.022976329550147057, 0.06834850460290909, 0.13354788720607758, 0.13089321553707123, 0.41554775834083557, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08012630045413971, 0.020899765193462372, 0.032236725091934204, 0.011631320230662823, 0.1322554349899292, 0.13739252090454102, 0.3272823691368103, 0.10228703171014786, 0.16136890649795532, 0.12631160020828247, 0.3315902352333069, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07002493739128113, 0.03239390626549721, 0.05209453031420708, 0.033656563609838486, 0.10301846265792847, 0.08080227673053741, 0.10908480733633041, 0.10694557428359985, 0.2992934286594391, 0.26628223061561584, 0.1579413264989853, 0.18216297030448914, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.23901967704296112, 0.02059122547507286, 0.03393668681383133, 0.04736512154340744, 0.05927135422825813, 0.02361929975450039, 0.006761881057173014, 0.05556455999612808, 0.1379650980234146, 0.12424714863300323, 0.191926509141922, 0.01547694206237793, 0.05743350088596344, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0662187710404396, 0.02669837884604931, 0.008789082989096642, 0.004751283209770918, 0.0528719425201416, 0.011242655105888844, 0.018989307805895805, 0.07620660215616226, 0.012969521805644035, 0.039284493774175644, 0.22954939305782318, 0.04563957825303078, 0.029234008863568306, 0.7488549947738647, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10826153308153152, 0.014460555277764797, 0.0725417360663414, 0.03217141702771187, 0.06698039174079895, 0.08051858842372894, 0.05872708931565285, 0.022866755723953247, 0.06705553829669952, 0.07034263759851456, 0.3507814407348633, 0.05356235057115555, 0.08709309250116348, 0.23604632914066315, 0.324868768453598, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13878783583641052, 0.02536645717918873, 0.06943535804748535, 0.05891912057995796, 0.006977759767323732, 0.003910682164132595, 0.004916978534311056, 0.04463541880249977, 0.07985055446624756, 0.07872368395328522, 0.291103333234787, 0.21302121877670288, 0.16995804011821747, 0.19893744587898254, 0.01890285685658455, 0.3838881254196167, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04579493775963783, 0.04550570994615555, 0.013287660665810108, 0.023886512964963913, 0.024052713066339493, 0.017023656517267227, 0.04836693033576012, 0.030526861548423767, 0.017645621672272682, 0.03170713782310486, 0.09266000241041183, 0.23106807470321655, 0.03557471185922623, 0.12432269752025604, 0.10334902256727219, 0.3233395516872406, 0.3770029842853546, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0394071489572525, 0.011173942126333714, 0.019201254472136497, 0.012027204036712646, 0.1043756976723671, 0.09629304707050323, 0.044260744005441666, 0.010774374939501286, 0.027033720165491104, 0.01529898401349783, 0.004158060997724533, 0.03471178933978081, 0.3574643135070801, 0.04469288885593414, 0.27014297246932983, 0.10925178974866867, 0.34427598118782043, 0.2875407040119171, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08343059569597244, 0.043180350214242935, 0.0767669752240181, 0.06360654532909393, 0.1271795630455017, 0.0800960585474968, 0.06889919936656952, 0.05648425221443176, 0.1521727591753006, 0.09240606427192688, 0.03566697984933853, 0.03560119867324829, 0.1492718607187271, 0.18653850257396698, 0.3474813401699066, 0.3278762698173523, 0.10706853121519089, 0.127774178981781, 0.1299499273300171, NaN, NaN, NaN, NaN, NaN, NaN], [0.23721955716609955, 0.02343675307929516, 0.03610215708613396, 0.05973569303750992, 0.07488072663545609, 0.026813305914402008, 0.0050082337111234665, 0.03149579092860222, 0.06251367926597595, 0.02305557392537594, 0.025774041190743446, 0.007636546157300472, 0.004965651780366898, 0.09922869503498077, 0.133448526263237, 0.1956746131181717, 0.04676169902086258, 0.27956491708755493, 0.021136147901415825, 0.057313986122608185, NaN, NaN, NaN, NaN, NaN], [0.0697786882519722, 0.028010839596390724, 0.012634677812457085, 0.007894599810242653, 0.0697624459862709, 0.015741104260087013, 0.01737123914062977, 0.05471426621079445, 0.0063003492541611195, 0.009287585504353046, 0.02825707383453846, 0.016440505161881447, 0.0038715004920959473, 0.07019948214292526, 0.02518516778945923, 0.041359793394804, 0.06545242667198181, 0.29174378514289856, 0.05010553449392319, 0.020036837086081505, 0.7549301981925964, NaN, NaN, NaN, NaN], [0.12042609602212906, 0.016146911308169365, 0.09666067361831665, 0.04101520776748657, 0.09386932849884033, 0.11830881983041763, 0.08227012306451797, 0.02001151442527771, 0.0443122573196888, 0.028465820476412773, 0.11253371834754944, 0.02299223281443119, 0.013287386856973171, 0.043506089597940445, 0.09705191105604172, 0.08899306505918503, 0.14267200231552124, 0.1414598524570465, 0.04555709660053253, 0.08242949843406677, 0.2358742356300354, 0.30384859442710876, NaN, NaN, NaN], [0.14026813209056854, 0.02709769457578659, 0.07936792075634003, 0.07383942604064941, 0.01026969589293003, 0.007506935391575098, 0.01013263501226902, 0.043357811868190765, 0.054843299090862274, 0.032377004623413086, 0.07885654270648956, 0.05951513722538948, 0.021026868373155594, 0.029062975198030472, 0.004067933652549982, 0.00896876398473978, 0.031901001930236816, 0.2457016408443451, 0.1949184089899063, 0.16180625557899475, 0.23649972677230835, 0.020314330235123634, 0.390868216753006, NaN, NaN], [0.036581799387931824, 0.048626694828271866, 0.015552042052149773, 0.027681825682520866, 0.03610476478934288, 0.033903565257787704, 0.10816461592912674, 0.038128215819597244, 0.015381437726318836, 0.020138615742325783, 0.04596110060811043, 0.12391334027051926, 0.008882056921720505, 0.017164889723062515, 0.019657107070088387, 0.039318498224020004, 0.012226631864905357, 0.12883862853050232, 0.2578184902667999, 0.03228205814957619, 0.13855229318141937, 0.08962707966566086, 0.32015570998191833, 0.32621434330940247, NaN], [0.16620944440364838, 0.03880922496318817, 0.027515552937984467, 0.018877340480685234, 0.019147777929902077, 0.2389368712902069, 0.02623477764427662, 0.012871777638792992, 0.013969821855425835, 0.021991701796650887, 0.0026013199239969254, 0.00741098215803504, 0.01774594374001026, 0.003101027337834239, 0.007316285278648138, 0.009464021772146225, 0.007634901907294989, 0.005969886668026447, 0.011287253350019455, 0.04429420828819275, 0.016200777143239975, 0.03440575301647186, 0.14183124899864197, 0.1436305195093155, 0.03402799740433693]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13823550939559937, 0.01690824329853058, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1366243064403534, 0.10029595345258713, 0.03309698402881622, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14204008877277374, 0.17578311264514923, 0.058153361082077026, 0.03275991603732109, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15378697216510773, 0.06811928749084473, 0.031730279326438904, 0.02174059860408306, 0.06419884413480759, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2336570769548416, 0.05475717782974243, 0.004165933933109045, 0.0025384188629686832, 0.005177688784897327, 0.12858138978481293, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1292651742696762, 0.01662198081612587, 0.01174056064337492, 0.002378111705183983, 0.04036910459399223, 0.6038607358932495, 0.053664252161979675, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13257111608982086, 0.0015173845458775759, 0.11979293078184128, 0.025075461715459824, 0.17128729820251465, 0.38108551502227783, 0.04533570259809494, 0.02173132263123989, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12533389031887054, 0.01691550202667713, 0.03341663256287575, 0.04296481981873512, 0.13898836076259613, 0.21484552323818207, 0.09921174496412277, 0.178620383143425, 0.08540544658899307, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19628551602363586, 0.0262758769094944, 0.06177970767021179, 0.020167797803878784, 0.21508394181728363, 0.05243970826268196, 0.05236654728651047, 0.019688904285430908, 0.04470491781830788, 0.03636182099580765, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10685201734304428, 0.1520930975675583, 0.22691352665424347, 0.1206204891204834, 0.20647111535072327, 0.3387817144393921, 0.17652125656604767, 0.14866295456886292, 0.058651361614465714, 0.13512541353702545, 0.029732942581176758, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14931687712669373, 0.17397953569889069, 0.045104723423719406, 0.029273295775055885, 0.009919327683746815, 0.05321130529046059, 0.40632039308547974, 0.053491849452257156, 0.10154163092374802, 0.08916116505861282, 0.038379959762096405, 0.050926242023706436, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1467411071062088, 0.6613936424255371, 0.30691561102867126, 0.27473992109298706, 0.05103013291954994, 0.09803401678800583, 0.18992389738559723, 0.012332501821219921, 0.08918186277151108, 0.009687116369605064, 0.01925584301352501, 0.0046735359355807304, 0.006799460854381323, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.23535212874412537, 0.03722311928868294, 0.0383867472410202, 0.06886720657348633, 0.040591221302747726, 0.07368911802768707, 0.09838991612195969, 0.052333034574985504, 0.3684787154197693, 0.05692664161324501, 0.030762571841478348, 0.0074586388655006886, 0.017855344340205193, 0.004115242511034012, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17482686042785645, 0.020169643685221672, 0.038628242909908295, 0.03409411385655403, 0.011309999041259289, 0.013418656773865223, 0.010934274643659592, 0.0036632094997912645, 0.017374617978930473, 0.023464469239115715, 0.0031370571814477444, 0.004764250945299864, 0.022831382229924202, 0.0012565170181915164, 0.01132481824606657, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2204812914133072, 0.0262058824300766, 0.011961801908910275, 0.00864139012992382, 0.033310361206531525, 0.014301336370408535, 0.009627565741539001, 0.26419174671173096, 0.09070254862308502, 0.04369048774242401, 0.05080936849117279, 0.022543352097272873, 0.012377972714602947, 0.030277462676167488, 0.2341402769088745, 0.01971697248518467, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.253863126039505, 0.004828702192753553, 0.05376851186156273, 0.11550138890743256, 0.1064227893948555, 0.03894256055355072, 0.006152869202196598, 0.03161965310573578, 0.06215812265872955, 0.10950783640146255, 0.01032247580587864, 0.005066303536295891, 0.011880352161824703, 0.09494113177061081, 0.06700112670660019, 0.10617008060216904, 0.020382743328809738, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04813924431800842, 0.008662978187203407, 0.10469061881303787, 0.06787187606096268, 0.02962217852473259, 0.04144993796944618, 0.019078848883509636, 0.10597121715545654, 0.0923849567770958, 0.24696239829063416, 0.010940729640424252, 0.060362689197063446, 0.059540145099163055, 0.36283043026924133, 0.1817280501127243, 0.2542697787284851, 0.10456714779138565, 0.017782384529709816, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10143542289733887, 0.13917230069637299, 0.040259018540382385, 0.030723553150892258, 0.006155712995678186, 0.031952716410160065, 0.3338092863559723, 0.06915750354528427, 0.1324792504310608, 0.11542332917451859, 0.05764009431004524, 0.04023035988211632, 0.03596781566739082, 0.1495574563741684, 0.02840258926153183, 0.049019940197467804, 0.4096885919570923, 0.03150010108947754, 0.02953496389091015, NaN, NaN, NaN, NaN, NaN, NaN], [0.1521255224943161, 0.6490614414215088, 0.39427587389945984, 0.3861289620399475, 0.05361294746398926, 0.09808307886123657, 0.16810499131679535, 0.014004985801875591, 0.1451900601387024, 0.008040589280426502, 0.022555561736226082, 0.013471563346683979, 0.006859058979898691, 0.05312783271074295, 0.04058152437210083, 0.023753749206662178, 0.3811529278755188, 0.052651502192020416, 0.007359141018241644, 0.007947265170514584, NaN, NaN, NaN, NaN, NaN], [0.2650813162326813, 0.032561566680669785, 0.05222610384225845, 0.09714324027299881, 0.038093939423561096, 0.08016244322061539, 0.09171951562166214, 0.056265611201524734, 0.42980653047561646, 0.0462084598839283, 0.03524700179696083, 0.017182864248752594, 0.04137876257300377, 0.007372017949819565, 0.08077534288167953, 0.07507885992527008, 0.050101280212402344, 0.02560576982796192, 0.006666052620857954, 0.016142593696713448, 0.003943128511309624, NaN, NaN, NaN, NaN], [0.186274453997612, 0.02024305984377861, 0.052268851548433304, 0.04830823838710785, 0.011142827570438385, 0.015970220789313316, 0.01383616030216217, 0.004258061293512583, 0.024750858545303345, 0.02320612221956253, 0.004944193176925182, 0.006908308248966932, 0.022138824686408043, 0.002315782941877842, 0.022694725543260574, 0.010753386653959751, 0.0032616793178021908, 0.0013332129456102848, 0.0031688748858869076, 0.015737321227788925, 0.00092066585784778, 0.009911282919347286, NaN, NaN, NaN], [0.2620354890823364, 0.032388050109148026, 0.01473915670067072, 0.01008685864508152, 0.03682388737797737, 0.017798764631152153, 0.012407293543219566, 0.2692665457725525, 0.10958822816610336, 0.03793380409479141, 0.07735131680965424, 0.03087974339723587, 0.01817244663834572, 0.0740593820810318, 0.5664002895355225, 0.01639901101589203, 0.07361851632595062, 0.02498074807226658, 0.01953950524330139, 0.011185318231582642, 0.024920325726270676, 0.19407986104488373, 0.01722806692123413, NaN, NaN], [0.27593934535980225, 0.005811678245663643, 0.07111961394548416, 0.13982559740543365, 0.1345955729484558, 0.06462955474853516, 0.009384723380208015, 0.03974011912941933, 0.0818282812833786, 0.09768332540988922, 0.015042337588965893, 0.006764655001461506, 0.01590757444500923, 0.11177312582731247, 0.1289886087179184, 0.2743605673313141, 0.018859822303056717, 0.01428449247032404, 0.0072670611552894115, 0.013756940141320229, 0.08787993341684341, 0.08323681354522705, 0.09635237604379654, 0.025643613189458847, NaN], [0.17263205349445343, 0.01194645743817091, 0.02866498939692974, 0.16296441853046417, 0.0019488729303702712, 0.034664519131183624, 0.05397665500640869, 0.1285821497440338, 0.10828299820423126, 0.02950196899473667, 0.008275950327515602, 0.008977574296295643, 0.09588290750980377, 0.01758315972983837, 0.00981396809220314, 0.06520896404981613, 0.03634792938828468, 0.007794357370585203, 0.007516053505241871, 0.0633511170744896, 0.016588596627116203, 0.008872142061591148, 0.04887184873223305, 0.025813041254878044, 0.0022019031457602978]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13826748728752136, 0.016647184267640114, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12115656584501266, 0.053111400455236435, 0.35221540927886963, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06620940566062927, 0.0874415934085846, 0.3174281120300293, 0.09698687493801117, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05510773882269859, 0.045387670397758484, 0.35701045393943787, 0.5011870265007019, 0.0787656381726265, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05231153964996338, 0.1393265277147293, 0.34751832485198975, 0.15474379062652588, 0.1892920285463333, 0.06652400642633438, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04669328033924103, 0.038986966013908386, 0.38860636949539185, 0.09904015064239502, 0.3339899182319641, 0.027963249012827873, 0.04134462773799896, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20758312940597534, 0.07789289951324463, 0.047907259315252304, 0.006299893371760845, 0.2608397901058197, 0.044556185603141785, 0.061705876141786575, 0.034865181893110275, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18052776157855988, 0.08179321140050888, 0.059846919029951096, 0.02793782763183117, 0.062999427318573, 0.04310278594493866, 0.024987775832414627, 0.015387488529086113, 0.132792130112648, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03587701544165611, 0.020078828558325768, 0.04571571201086044, 0.02593454346060753, 0.007220670115202665, 0.03280382603406906, 0.012364541180431843, 0.04736338183283806, 0.48638036847114563, 0.015403805300593376, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.010417330078780651, 0.019508572295308113, 0.03964173421263695, 0.041229844093322754, 0.021899865940213203, 0.0029071751050651073, 0.010124437510967255, 0.08508285880088806, 0.40291228890419006, 0.4734281599521637, 0.015163381583988667, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08744391798973083, 0.1107466071844101, 0.15557123720645905, 0.13837403059005737, 0.05803389474749565, 0.026755833998322487, 0.03754325956106186, 0.4220706820487976, 0.16102783381938934, 0.2859216034412384, 0.1457504779100418, 0.03281670808792114, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21633882820606232, 0.07441287487745285, 0.04740259423851967, 0.026924576610326767, 0.012407396920025349, 0.002398786135017872, 0.0038467273116111755, 0.13835540413856506, 0.06710492819547653, 0.026295386254787445, 0.17057135701179504, 0.013244924135506153, 0.46883779764175415, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.027107199653983116, 0.05742119997739792, 0.06533583253622055, 0.024222400039434433, 0.014050583355128765, 0.013653005473315716, 0.0030738371424376965, 0.04425956308841705, 0.06826918572187424, 0.011929179541766644, 0.14959540963172913, 0.16161218285560608, 0.5212987065315247, 0.041249219328165054, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12232528626918793, 0.02327316626906395, 0.043996360152959824, 0.010462167672812939, 0.05786772817373276, 0.006097386125475168, 0.001271827262826264, 0.022651376202702522, 0.03627351298928261, 0.030646052211523056, 0.03145253658294678, 0.18536151945590973, 0.10030946880578995, 0.3235938847064972, 0.09760642796754837, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01696004532277584, 0.0005225083441473544, 0.012039890512824059, 0.0003213977033738047, 0.024568837136030197, 0.0005492557538673282, 6.035636397427879e-05, 0.0032521369867026806, 0.016784805804491043, 0.013033770024776459, 0.023488081991672516, 0.04594254866242409, 0.04732683673501015, 0.2366781234741211, 0.2578820288181305, 0.02447950839996338, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.016271475702524185, 0.026037830859422684, 0.05988215655088425, 0.04065781086683273, 0.0548781082034111, 0.0059303357265889645, 0.000490839418489486, 0.009792556054890156, 0.05564826726913452, 0.029693011194467545, 0.015783851966261864, 0.050408631563186646, 0.10483089834451675, 0.18894171714782715, 0.4590488076210022, 0.24355939030647278, 0.03408684581518173, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.011992339976131916, 0.02786487340927124, 0.025577154010534286, 0.02912752889096737, 0.009845648892223835, 0.0007121131638996303, 0.001387864351272583, 0.015649031847715378, 0.05334821715950966, 0.05039743706583977, 0.0003855754912365228, 0.07798124849796295, 0.03745294734835625, 0.16697214543819427, 0.29521557688713074, 0.2776513993740082, 0.29445046186447144, 0.031993161886930466, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11517049372196198, 0.11416894942522049, 0.19162771105766296, 0.14611610770225525, 0.060761958360672, 0.02055470645427704, 0.021888524293899536, 0.20655019581317902, 0.047658227384090424, 0.055987950414419174, 0.01683689095079899, 0.005808014422655106, 0.045862384140491486, 0.09340663254261017, 0.10908356308937073, 0.18944555521011353, 0.26804569363594055, 0.20485185086727142, 0.037772081792354584, NaN, NaN, NaN, NaN, NaN, NaN], [0.24184046685695648, 0.07921410351991653, 0.056290365755558014, 0.026794791221618652, 0.016941547393798828, 0.0021516080014407635, 0.0023830668069422245, 0.05685606598854065, 0.02070370689034462, 0.003236053278669715, 0.01165463775396347, 0.004370343871414661, 0.030780060216784477, 0.00907946564257145, 0.06188458576798439, 0.04407832771539688, 0.006142587400972843, 0.14762946963310242, 0.013672620058059692, 0.4999893307685852, NaN, NaN, NaN, NaN, NaN], [0.03566991165280342, 0.0538097508251667, 0.09943600744009018, 0.028607800602912903, 0.020965654402971268, 0.013461945578455925, 0.002478980924934149, 0.02911236882209778, 0.02446376532316208, 0.0022762087173759937, 0.010774179361760616, 0.04047773778438568, 0.06471210718154907, 0.0026813328731805086, 0.07523855566978455, 0.030470186844468117, 0.0345987044274807, 0.1238497719168663, 0.17781274020671844, 0.4970780611038208, 0.04515520855784416, NaN, NaN, NaN, NaN], [0.12716706097126007, 0.02434932254254818, 0.05787394568324089, 0.013031681068241596, 0.06681805849075317, 0.007088592275977135, 0.0018475945107638836, 0.021072670817375183, 0.024636711925268173, 0.010089303366839886, 0.0076353950425982475, 0.05158482864499092, 0.009980393573641777, 0.034229546785354614, 0.01627102866768837, 0.008032353594899178, 0.013575052842497826, 0.04940066114068031, 0.19428585469722748, 0.10819438844919205, 0.2976790964603424, 0.08516447991132736, NaN, NaN, NaN], [0.01713084802031517, 0.000499976216815412, 0.019638467580080032, 0.00048709739348851144, 0.03356647491455078, 0.0008144291932694614, 0.00011953162174904719, 0.003664336632937193, 0.013800683431327343, 0.004805452190339565, 0.004433726891875267, 0.011711561121046543, 0.003556638490408659, 0.01588965393602848, 0.025807680562138557, 0.00022126971452962607, 0.004036479629576206, 0.00837762001901865, 0.04655361920595169, 0.04086336866021156, 0.22630761563777924, 0.2765483856201172, 0.02425519935786724, NaN, NaN], [0.010901566594839096, 0.020337969064712524, 0.07802019268274307, 0.0504593625664711, 0.06312800198793411, 0.009868033230304718, 0.000861799344420433, 0.010114955715835094, 0.052247028797864914, 0.012602821923792362, 0.005399123765528202, 0.01934058591723442, 0.013776490464806557, 0.010564911179244518, 0.04300173744559288, 0.008748980239033699, 0.0006391598144546151, 0.006108305882662535, 0.05087457224726677, 0.09035929292440414, 0.18751013278961182, 0.4462290108203888, 0.28552356362342834, 0.05451636388897896, NaN], [0.1367119550704956, 0.02979014255106449, 0.04602046683430672, 0.022530242800712585, 0.009278235025703907, 0.01184787880629301, 0.010125648230314255, 0.02445557340979576, 0.052750833332538605, 0.013119504787027836, 0.0006633299053646624, 0.007243738044053316, 0.02398994006216526, 0.00908573716878891, 0.013761860318481922, 0.007176807615906, 0.00677318312227726, 0.0021949538495391607, 0.01309704128652811, 0.09677710384130478, 0.12711098790168762, 0.1613820642232895, 0.37058699131011963, 0.3504316806793213, 0.02586444839835167]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13988038897514343, 0.003474950324743986, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14879919588565826, 0.018745053559541702, 0.07372914999723434, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.030327370390295982, 0.02692173607647419, 0.46947386860847473, 0.09036581218242645, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.164228156208992, 0.0009850627975538373, 0.0044541023671627045, 0.0005622706958092749, 0.024160074070096016, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.020124448463320732, 0.0011880549136549234, 0.0042731426656246185, 3.242780803702772e-05, 0.6858344078063965, 0.023040860891342163, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0017230550292879343, 3.356653905939311e-05, 0.001307086437009275, 1.4968540199333802e-05, 0.5564903616905212, 0.236929789185524, 0.007688341196626425, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1612924486398697, 0.00029754414572380483, 0.0029063820838928223, 0.0015110797248780727, 0.16695675253868103, 0.3453270196914673, 0.07193248718976974, 0.006359610706567764, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1910298615694046, 0.01051796693354845, 0.0018660163041204214, 0.0012154864380136132, 0.022663934156298637, 0.008557457476854324, 0.016767704859375954, 0.05246622860431671, 0.08816055208444595, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24295811355113983, 0.0012021175352856517, 0.0005200211890041828, 0.00015996988804545254, 0.002627951791509986, 0.03450923040509224, 0.014827161096036434, 0.015967652201652527, 0.005632439162582159, 0.001854590023867786, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2492469847202301, 0.004325273912400007, 0.004784590099006891, 0.013903478160500526, 0.0013026667293161154, 0.003877879586070776, 0.017029188573360443, 0.01781909167766571, 0.05003270506858826, 0.026610376313328743, 0.008462576195597649, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.25306010246276855, 0.0017952719936147332, 0.005404005758464336, 0.021692873910069466, 0.0005702165653929114, 9.544018394080922e-05, 0.001603480544872582, 0.001225438085384667, 0.036846794188022614, 0.001749897957779467, 0.016878794878721237, 0.021703237667679787, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.055758021771907806, 0.000425096252001822, 0.0005783061496913433, 0.0011671994579955935, 0.00034630659501999617, 0.00031045774812810123, 0.0006358043756335974, 0.004018810577690601, 0.0004720573779195547, 0.006387148518115282, 0.038948215544223785, 0.40798652172088623, 0.0038703898899257183, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.29551389813423157, 0.006183725781738758, 0.0010477532632648945, 0.001470124931074679, 0.0028535614255815744, 0.003910644445568323, 0.004942604340612888, 0.003798475954681635, 0.01567114144563675, 0.060374900698661804, 0.006600319407880306, 0.010896215215325356, 0.009779008105397224, 0.007320093456655741, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1632017195224762, 0.00519327400252223, 0.00790441408753395, 0.0009941658936440945, 0.3241596221923828, 0.0008480648975819349, 0.0001429034018656239, 0.0012253100285306573, 0.0008457236108370125, 0.006411578040570021, 0.0016067628748714924, 0.003762597683817148, 0.029224932193756104, 0.07677540183067322, 0.06338826566934586, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.005401996895670891, 6.3005199990584515e-06, 0.0004310416697990149, 8.47076989884954e-06, 0.009243682958185673, 0.0008590375073254108, 4.37394373875577e-06, 6.523932825075462e-05, 8.531090134056285e-05, 0.0006816720124334097, 7.644478318979964e-05, 0.00018924157484434545, 0.0012375408550724387, 0.023784970864653587, 0.4309314787387848, 0.034907225519418716, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.29775136709213257, 0.006892140489071608, 0.009814155288040638, 0.016249310225248337, 0.004830268211662769, 0.0035455955658107996, 0.0007549467263743281, 0.000541276705916971, 0.0031480982434004545, 0.001557780895382166, 0.0010192448971793056, 0.0018504501786082983, 0.002619183622300625, 0.1016833484172821, 0.03818811476230621, 0.06928347051143646, 0.0412699431180954, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.26683223247528076, 0.0017643374158069491, 0.02531762421131134, 0.047485485672950745, 0.0005023732082918286, 0.0011795219033956528, 0.002227108459919691, 0.0028741960413753986, 0.005215880926698446, 0.001946018310263753, 3.592624852899462e-05, 0.001338632428087294, 0.0025214410852640867, 0.07723907381296158, 0.012742026709020138, 0.25196006894111633, 0.052669085562229156, 0.020061112940311432, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3006725609302521, 0.0014043879928067327, 0.009936605580151081, 0.037061650305986404, 0.0005129858036525548, 5.274279828881845e-05, 0.0006371501949615777, 0.00048446646542288363, 0.015043019317090511, 0.0003374778898432851, 0.0015171451959758997, 0.001911269617266953, 0.0014702629996463656, 0.015123972669243813, 0.0006335150101222098, 0.0006853189552202821, 0.0006114236894063652, 0.013829384930431843, 0.010252222418785095, NaN, NaN, NaN, NaN, NaN, NaN], [0.11150761693716049, 0.0006332705961540341, 0.0012255925685167313, 0.0022868558298796415, 0.0007688697660341859, 0.00046408100752159953, 0.0006869957433082163, 0.0021696356125175953, 0.0003113164857495576, 0.0013619231758639216, 0.004312699660658836, 0.1263500303030014, 0.0001710234791971743, 0.0024227115791291, 0.0006429344066418707, 0.008991677314043045, 0.01230061985552311, 0.025017380714416504, 0.33947470784187317, 0.0032216052059084177, NaN, NaN, NaN, NaN, NaN], [0.31111404299736023, 0.0035644923336803913, 0.0013678895775228739, 0.0016790243098512292, 0.0035299588926136494, 0.004438228905200958, 0.004504224751144648, 0.0015486004995182157, 0.006104794796556234, 0.009403211995959282, 0.00038756802678108215, 0.001732571516185999, 0.00042684219079092145, 0.00029873420135118067, 0.02043243870139122, 0.02443091571331024, 0.011036018840968609, 0.0030384601559489965, 0.007405058480799198, 0.004648045636713505, 0.010011163540184498, NaN, NaN, NaN, NaN], [0.16896948218345642, 0.0033956619445234537, 0.009647470898926258, 0.0011160745052620769, 0.30864211916923523, 0.0008666384965181351, 0.0001862353819888085, 0.0007671809289604425, 0.0006719603552483022, 0.002030742121860385, 0.00038655498065054417, 0.0009093419066630304, 0.0015865613240748644, 0.007534818258136511, 0.009185722097754478, 0.00011195908882655203, 0.003075815038755536, 0.000886340974830091, 0.0034873690456151962, 0.021776562556624413, 0.11334169656038284, 0.0832705944776535, NaN, NaN, NaN], [0.006588279269635677, 7.165617716964334e-06, 0.0005450915195979178, 1.0953889614029322e-05, 0.01959507167339325, 0.001590097788721323, 1.1096496564277913e-05, 7.439414184773341e-05, 9.72584675764665e-05, 0.00039174238918349147, 2.7912905352422968e-05, 4.964227991877124e-05, 7.256279786815867e-05, 0.00222678086720407, 0.04727102443575859, 0.0002576226834207773, 0.00020273383415769786, 7.391278631985188e-05, 0.00018598776659928262, 0.000617648009210825, 0.03195251524448395, 0.45461374521255493, 0.037591490894556046, NaN, NaN], [0.35417911410331726, 0.010997277684509754, 0.014662563800811768, 0.023722819983959198, 0.01071385107934475, 0.009427045471966267, 0.002653747797012329, 0.0011037624208256602, 0.005973298568278551, 0.0016420705942437053, 0.0009447215707041323, 0.001327668083831668, 0.0005524749867618084, 0.012130306102335453, 0.005379356909543276, 0.0037436189595609903, 0.0009285339619964361, 0.0002853046462405473, 0.0013114019529893994, 0.0012977200094610453, 0.08090774714946747, 0.034737478941679, 0.058711227029561996, 0.0672648623585701, NaN], [0.18188641965389252, 0.00040442554745823145, 0.0015771333128213882, 0.005189571529626846, 8.387575689994264e-06, 0.0001226859458256513, 0.0011242604814469814, 0.0013583728577941656, 0.0030172227416187525, 0.00029841059586033225, 1.2829146726289764e-05, 0.001467264024540782, 0.001090237987227738, 0.002914785873144865, 0.0006871690275147557, 0.002592542441561818, 0.00021328746515791863, 6.871169898658991e-05, 0.002350796014070511, 0.0026233955286443233, 0.02620280720293522, 0.005966363474726677, 0.08270465582609177, 0.010547555983066559, 0.018362630158662796]]], [[[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13007116317749023, 0.035988736897706985, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17991511523723602, 0.05124381557106972, 0.013642107136547565, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16831281781196594, 0.043814778327941895, 0.0950295478105545, 0.07350433617830276, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13759823143482208, 0.14112484455108643, 0.20577600598335266, 0.13910864293575287, 0.034107428044080734, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11619941890239716, 0.038306448608636856, 0.06045802682638168, 0.03494013100862503, 0.374624639749527, 0.22046393156051636, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08332619816064835, 0.009484739042818546, 0.012810231186449528, 0.0027760458178818226, 0.3268325924873352, 0.26342087984085083, 0.17634892463684082, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.057563915848731995, 0.01992173306643963, 0.03713805601000786, 0.014863312244415283, 0.25726908445358276, 0.14832180738449097, 0.402090460062027, 0.06479739397764206, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21478669345378876, 0.15359601378440857, 0.26770198345184326, 0.12653663754463196, 0.09151764959096909, 0.07003500312566757, 0.19363711774349213, 0.014233908616006374, 0.023967349901795387, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2834857702255249, 0.07559704780578613, 0.07655511796474457, 0.16202391684055328, 0.08316012471914291, 0.11911017447710037, 0.0204884335398674, 0.011816238984465599, 0.13204774260520935, 0.039266277104616165, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.23006244003772736, 0.03933367133140564, 0.07187695801258087, 0.04476522281765938, 0.01073860377073288, 0.0032203071750700474, 0.00176758982706815, 0.018770985305309296, 0.12121162563562393, 0.18536020815372467, 0.01582610420882702, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18067117035388947, 0.009833509102463722, 0.03744787722826004, 0.016920698806643486, 0.05744745582342148, 0.04540643468499184, 0.008024180307984352, 0.012110988609492779, 0.09370782226324081, 0.08820194005966187, 0.06259123980998993, 0.025030089542269707, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11523616313934326, 0.03200709819793701, 0.050564926117658615, 0.010618647560477257, 0.09430865943431854, 0.018685024231672287, 0.022438397631049156, 0.017720744013786316, 0.1592920571565628, 0.21717989444732666, 0.2463550567626953, 0.2194516956806183, 0.0009421245777048171, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09747911244630814, 0.1645127683877945, 0.1875433474779129, 0.09478750824928284, 0.08721300214529037, 0.02294742316007614, 0.02039182186126709, 0.07351931929588318, 0.1815827339887619, 0.5564144849777222, 0.41975197196006775, 0.2698606848716736, 0.05650324374437332, 0.05821085348725319, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14833268523216248, 0.1209164559841156, 0.08990822732448578, 0.0656033307313919, 0.23720099031925201, 0.11782333254814148, 0.04633651673793793, 0.16808320581912994, 0.06126163899898529, 0.43528908491134644, 0.3754012882709503, 0.13757933676242828, 0.05596579611301422, 0.16984672844409943, 0.002737722359597683, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19258342683315277, 0.05838138237595558, 0.04652376100420952, 0.017318567261099815, 0.23482391238212585, 0.16333334147930145, 0.02100907638669014, 0.048424359411001205, 0.06841404736042023, 0.3133482038974762, 0.07921069860458374, 0.021035969257354736, 0.03291412815451622, 0.18175286054611206, 0.1566929817199707, 0.053215935826301575, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17641158401966095, 0.15294750034809113, 0.15352487564086914, 0.10843643546104431, 0.08260629326105118, 0.016529222950339317, 0.012650150805711746, 0.07893627882003784, 0.1388573795557022, 0.19094663858413696, 0.03751035034656525, 0.05650494620203972, 0.2426995038986206, 0.16961677372455597, 0.07263431698083878, 0.152814581990242, 0.018521834164857864, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.25574439764022827, 0.04364950954914093, 0.05707173049449921, 0.02453112043440342, 0.016254547983407974, 0.0026636396069079638, 0.0035282839089632034, 0.015699811279773712, 0.03404982015490532, 0.04375504329800606, 0.001423283712938428, 0.05359426140785217, 0.1740386039018631, 0.10691730678081512, 0.03620539605617523, 0.04950953647494316, 0.022295303642749786, 0.025807255879044533, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.216966450214386, 0.016096990555524826, 0.08351551741361618, 0.02645382098853588, 0.05811392888426781, 0.04091750830411911, 0.014506897889077663, 0.015038754791021347, 0.07221462577581406, 0.08585365861654282, 0.059816163033246994, 0.04502185434103012, 0.00397779606282711, 0.041175276041030884, 0.04448581859469414, 0.10983181744813919, 0.01911303587257862, 0.07987141609191895, 0.062483180314302444, NaN, NaN, NaN, NaN, NaN, NaN], [0.11257521063089371, 0.027663733810186386, 0.023284420371055603, 0.0038690094370394945, 0.053685132414102554, 0.008445030078291893, 0.014706910587847233, 0.009755544364452362, 0.06406830251216888, 0.10475295782089233, 0.08554040640592575, 0.16072620451450348, 0.00029980239924043417, 0.03509804978966713, 0.03031017631292343, 0.04435117170214653, 0.06420817226171494, 0.2780051827430725, 0.2271702140569687, 0.0013584558619186282, NaN, NaN, NaN, NaN, NaN], [0.10895614326000214, 0.15509657561779022, 0.19682957231998444, 0.07681374996900558, 0.06229116767644882, 0.016663551330566406, 0.015513443388044834, 0.04232686012983322, 0.0986364334821701, 0.35070890188217163, 0.19941051304340363, 0.163076713681221, 0.026361489668488503, 0.018140846863389015, 0.016411108896136284, 0.03203867748379707, 0.053678009659051895, 0.19773079454898834, 0.3572796881198883, 0.059515852481126785, 0.04298213869333267, NaN, NaN, NaN, NaN], [0.15568822622299194, 0.11876019835472107, 0.09203660488128662, 0.059780094772577286, 0.24089980125427246, 0.06525673717260361, 0.029934749007225037, 0.11168782413005829, 0.03211824223399162, 0.30118685960769653, 0.22822384536266327, 0.08190999180078506, 0.018841415643692017, 0.1366286426782608, 0.0017427116399630904, 0.02601366490125656, 0.09386949241161346, 0.19522085785865784, 0.1546826809644699, 0.06491755694150925, 0.19679579138755798, 0.0025137634947896004, NaN, NaN, NaN], [0.26271528005599976, 0.07045364379882812, 0.0520184300839901, 0.023400958627462387, 0.11433269083499908, 0.07895253598690033, 0.012276851572096348, 0.023823700845241547, 0.04200353845953941, 0.16687022149562836, 0.05654531344771385, 0.038080912083387375, 0.012698299251496792, 0.10473722219467163, 0.0643644630908966, 0.015445034019649029, 0.014234953559935093, 0.06144930049777031, 0.05821693688631058, 0.0568128302693367, 0.1767931431531906, 0.1402994990348816, 0.07714083790779114, NaN, NaN], [0.1969611942768097, 0.16093717515468597, 0.1609625220298767, 0.11138524115085602, 0.026131147518754005, 0.00619129091501236, 0.005407778546214104, 0.04104578495025635, 0.06517186760902405, 0.06833471357822418, 0.020616043359041214, 0.03467438742518425, 0.095084547996521, 0.06247802451252937, 0.022057469934225082, 0.06569864600896835, 0.0052108620293438435, 0.03032413311302662, 0.0838729590177536, 0.3427644968032837, 0.19215865433216095, 0.08116735517978668, 0.14785417914390564, 0.015012684278190136, NaN], [0.1272672563791275, 0.008308093063533306, 0.030398543924093246, 0.02721896767616272, 0.016537277027964592, 0.021588556468486786, 0.002818688517436385, 0.010970782488584518, 0.01434051152318716, 0.012293173000216484, 0.04184769093990326, 0.03683166950941086, 0.023453323170542717, 0.020430248230695724, 0.03333409130573273, 0.068024642765522, 0.02648366242647171, 0.1640448421239853, 0.109919473528862, 0.1576652079820633, 0.14138163626194, 0.16884489357471466, 0.30372628569602966, 0.2283693552017212, 0.17022481560707092]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12451039254665375, 0.1335938721895218, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18396444618701935, 0.017508728429675102, 0.02471269853413105, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18453162908554077, 0.038695670664310455, 0.04155581444501877, 0.05072518810629845, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14826133847236633, 0.04252630099654198, 0.08689215034246445, 0.08308856934309006, 0.015247097238898277, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1348571479320526, 0.07033194601535797, 0.10030655562877655, 0.13752251863479614, 0.030713800340890884, 0.1331333965063095, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20671042799949646, 0.05809834972023964, 0.1630101054906845, 0.06033356115221977, 0.07501133531332016, 0.017328333109617233, 0.028450097888708115, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15813153982162476, 0.14090144634246826, 0.26030233502388, 0.10773709416389465, 0.16133210062980652, 0.04816069453954697, 0.01304988656193018, 0.13335363566875458, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3033713400363922, 0.22469042241573334, 0.4264413118362427, 0.3422197103500366, 0.14910078048706055, 0.06983038783073425, 0.023690486326813698, 0.010566752403974533, 0.05880258232355118, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.25368839502334595, 0.33459752798080444, 0.3829180896282196, 0.2782860994338989, 0.2427205741405487, 0.08768615871667862, 0.031752120703458786, 0.02143564634025097, 0.03798065707087517, 0.07379034906625748, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14200474321842194, 0.2391311228275299, 0.18728229403495789, 0.11236919462680817, 0.20923744142055511, 0.13365258276462555, 0.052715059369802475, 0.134474515914917, 0.14480768144130707, 0.06683899462223053, 0.104619100689888, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09595079720020294, 0.2752297520637512, 0.21842314302921295, 0.13660691678524017, 0.35477691888809204, 0.37130749225616455, 0.20556269586086273, 0.35276445746421814, 0.31008264422416687, 0.11074709892272949, 0.19841141998767853, 0.07199764251708984, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15323933959007263, 0.4611065983772278, 0.07869336754083633, 0.03600241616368294, 0.47375282645225525, 0.7350273132324219, 0.297486275434494, 0.6052883863449097, 0.4953201115131378, 0.144621342420578, 0.3493393063545227, 0.04881289228796959, 0.10520726442337036, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12003841996192932, 0.2704387903213501, 0.20063650608062744, 0.23778890073299408, 0.36254584789276123, 0.5319709777832031, 0.4483972191810608, 0.15058189630508423, 0.11134153604507446, 0.09426670521497726, 0.21241672337055206, 0.10488338023424149, 0.049764484167099, 0.15823495388031006, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15233570337295532, 0.21891875565052032, 0.13215333223342896, 0.2837490439414978, 0.08042775094509125, 0.43866410851478577, 0.2773631513118744, 0.12773916125297546, 0.3155127763748169, 0.07932031899690628, 0.1219707503914833, 0.11212008446455002, 0.1944955438375473, 0.07170752435922623, 0.004313962999731302, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2607015371322632, 0.3645761013031006, 0.37828943133354187, 0.3385462462902069, 0.2960833013057709, 0.5598280429840088, 0.544554591178894, 0.47054967284202576, 0.3477361798286438, 0.13701467216014862, 0.14822737872600555, 0.030188634991645813, 0.05528556555509567, 0.058441486209630966, 0.03410256654024124, 0.17273126542568207, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1872977614402771, 0.29805198311805725, 0.5206820368766785, 0.33024296164512634, 0.6395015716552734, 0.7210167050361633, 0.353913813829422, 0.406305193901062, 0.5096184015274048, 0.26257815957069397, 0.07301049679517746, 0.03464117646217346, 0.0787002444267273, 0.10916904360055923, 0.3557807505130768, 0.08364078402519226, 0.08538500964641571, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13269101083278656, 0.2835436165332794, 0.47488275170326233, 0.24851854145526886, 0.694171130657196, 0.6760384440422058, 0.2759343385696411, 0.29058361053466797, 0.7136873602867126, 0.20711864531040192, 0.04295802861452103, 0.07691331952810287, 0.11943909525871277, 0.1323360651731491, 0.20847304165363312, 0.05967296287417412, 0.12062160670757294, 0.09502720832824707, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.058743223547935486, 0.276242733001709, 0.29826071858406067, 0.20218241214752197, 0.4631478488445282, 0.48415693640708923, 0.2865871787071228, 0.3694051504135132, 0.4054408073425293, 0.19627220928668976, 0.2907293438911438, 0.09057808667421341, 0.11348091810941696, 0.21781016886234283, 0.38082650303840637, 0.3570795953273773, 0.22612451016902924, 0.09323522448539734, 0.03618632256984711, NaN, NaN, NaN, NaN, NaN, NaN], [0.07694489508867264, 0.41184449195861816, 0.038429711014032364, 0.018668875098228455, 0.5307568907737732, 0.7476497888565063, 0.4137455224990845, 0.6917499303817749, 0.6703397035598755, 0.3623183071613312, 0.579600989818573, 0.12613137066364288, 0.20100651681423187, 0.40998968482017517, 0.46115902066230774, 0.575211763381958, 0.35096046328544617, 0.163946270942688, 0.021770814433693886, 0.09986086189746857, NaN, NaN, NaN, NaN, NaN], [0.0834016501903534, 0.33346420526504517, 0.238715261220932, 0.28079062700271606, 0.5652539134025574, 0.6881173849105835, 0.5534363985061646, 0.22000034153461456, 0.1979052871465683, 0.3127084970474243, 0.4257359504699707, 0.18722867965698242, 0.1397658735513687, 0.3447277843952179, 0.13513657450675964, 0.31811001896858215, 0.32070791721343994, 0.12404847145080566, 0.05496959760785103, 0.04215753450989723, 0.16014836728572845, NaN, NaN, NaN, NaN], [0.13260646164417267, 0.29362690448760986, 0.18431688845157623, 0.38109344244003296, 0.20342527329921722, 0.5946046113967896, 0.4558189809322357, 0.26072001457214355, 0.5455912351608276, 0.2635512351989746, 0.31394094228744507, 0.23975242674350739, 0.36583349108695984, 0.2753828167915344, 0.01127256266772747, 0.41475725173950195, 0.29836422204971313, 0.2503683567047119, 0.10983213782310486, 0.21767295897006989, 0.0692884549498558, 0.003035380970686674, NaN, NaN, NaN], [0.2068602293729782, 0.4467880427837372, 0.4564751386642456, 0.4485791325569153, 0.45999279618263245, 0.6740500330924988, 0.7906107902526855, 0.6832103133201599, 0.5420533418655396, 0.4096798300743103, 0.3950984477996826, 0.13646338880062103, 0.10497336834669113, 0.17230592668056488, 0.07012390345335007, 0.27583980560302734, 0.3079235553741455, 0.1555996537208557, 0.038740403950214386, 0.05588690564036369, 0.03859011456370354, 0.02352789230644703, 0.12950412929058075, NaN, NaN], [0.16561447083950043, 0.3958832919597626, 0.5531814098358154, 0.4040684700012207, 0.7809365391731262, 0.8175305128097534, 0.5712264180183411, 0.6113651394844055, 0.6668697595596313, 0.4850655198097229, 0.18787693977355957, 0.08608534932136536, 0.19115354120731354, 0.2498423308134079, 0.6246696710586548, 0.31422460079193115, 0.373276948928833, 0.049351077526807785, 0.046956032514572144, 0.08076699078083038, 0.09392194449901581, 0.3349837362766266, 0.062239501625299454, 0.10001940280199051, NaN], [0.06568613648414612, 0.36780038475990295, 0.6246912479400635, 0.7116879820823669, 0.754679262638092, 0.7714072465896606, 0.7616819739341736, 0.5837911367416382, 0.9111838936805725, 0.8262851238250732, 0.6737059354782104, 0.5146453380584717, 0.7674095630645752, 0.7359525561332703, 0.5679676532745361, 0.7213301062583923, 0.6703079342842102, 0.5636342167854309, 0.38883939385414124, 0.5560528635978699, 0.518941342830658, 0.3739706873893738, 0.32013192772865295, 0.3743935525417328, 0.3977084755897522]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1305680274963379, 0.02726716920733452, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.002169837476685643, 0.0032534021884202957, 0.5694547891616821, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1568225622177124, 0.12336109578609467, 0.028200775384902954, 0.03890102356672287, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.008017625659704208, 0.013223886489868164, 0.04581261798739433, 0.017950134351849556, 0.8790656328201294, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08130903542041779, 0.2643316090106964, 0.5756329894065857, 0.29882851243019104, 0.31516125798225403, 0.09644471108913422, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20484277606010437, 0.3443664610385895, 0.0019387316424399614, 0.017399819567799568, 0.0004214652581140399, 0.00013534165918827057, 0.01563790813088417, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1571786254644394, 0.5643889307975769, 0.13441002368927002, 0.09036820381879807, 0.02947377972304821, 0.015878956764936447, 0.022048691287636757, 0.14189693331718445, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.005826869048178196, 0.13292454183101654, 0.00521356426179409, 0.005004087463021278, 0.10703893005847931, 0.26877719163894653, 0.1785666048526764, 0.23197543621063232, 0.007970587350428104, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03136341646313667, 0.08873608708381653, 0.009185479953885078, 0.03043411858379841, 0.3010490834712982, 0.36070317029953003, 0.178965762257576, 0.21872122585773468, 0.005464768502861261, 0.06020791083574295, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07854610681533813, 0.03772095590829849, 0.016643106937408447, 0.02832828275859356, 0.0785825327038765, 0.09336084127426147, 0.24177083373069763, 0.2718014717102051, 0.12932275235652924, 0.08437053114175797, 0.24188947677612305, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17239268124103546, 0.029533302411437035, 0.030515655875205994, 0.026403654366731644, 0.05037287250161171, 0.13986584544181824, 0.11416076123714447, 0.08228978514671326, 0.26975753903388977, 0.020502708852291107, 0.030797043815255165, 0.006723156664520502, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.35662412643432617, 0.005917226430028677, 0.00044432797585614026, 0.00022813511895947158, 0.0073361690156161785, 0.0027237480971962214, 0.007987208664417267, 0.021625559777021408, 0.010472757741808891, 0.0008755659800954163, 0.012584702111780643, 0.000526397256180644, 0.01033733133226633, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.189227893948555, 0.01606086827814579, 0.0030457540415227413, 0.005861388053745031, 0.04963670298457146, 0.004091562703251839, 0.01225967425853014, 0.037419673055410385, 0.01020084973424673, 0.003108290024101734, 0.01512740459293127, 0.006679146084934473, 0.014098022133111954, 0.03816642239689827, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00965302623808384, 0.0035168000031262636, 0.03902876377105713, 0.0158648993819952, 0.32648226618766785, 0.0038036927580833435, 0.002248003613203764, 0.002372291637584567, 0.014672092162072659, 0.007728067692369223, 0.022481968626379967, 0.028911879286170006, 0.044244468212127686, 0.021532919257879257, 0.6417658925056458, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.037641312927007675, 0.005557402968406677, 0.0006393054500222206, 0.006437606643885374, 0.007460788358002901, 0.0009530181414447725, 0.0016025539953261614, 0.0067516821436584, 0.02322007343173027, 0.018459537997841835, 0.011051125824451447, 0.006488891318440437, 0.04039585590362549, 0.18200218677520752, 0.0006002468289807439, 0.6243939995765686, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.01615065336227417, 0.01699231006205082, 0.00012957912986166775, 0.016060354188084602, 0.0006264564581215382, 0.0012908404460176826, 0.002684527076780796, 0.027531128376722336, 0.015566377900540829, 0.003692139405757189, 0.5753727555274963, 0.5145941376686096, 0.03750383481383324, 0.009545800276100636, 0.0034461882896721363, 0.005381980445235968, 0.00046628122800029814, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.021861553192138672, 0.01695878431200981, 0.0018149337265640497, 0.015764223411679268, 0.007719711866229773, 0.0034752548672258854, 0.007653116714209318, 0.03472340479493141, 0.038436826318502426, 0.014262136071920395, 0.8426622748374939, 0.36256304383277893, 0.21876515448093414, 0.019672129303216934, 0.020847154781222343, 0.00781619269400835, 0.005409067030996084, 0.16073459386825562, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18507197499275208, 0.027911728248000145, 0.014699580147862434, 0.025536103174090385, 0.014524195343255997, 0.045023027807474136, 0.031167738139629364, 0.07539253681898117, 0.22652071714401245, 0.011904416605830193, 0.08752688765525818, 0.03955431655049324, 0.2908211648464203, 0.03612781688570976, 0.00514488760381937, 0.017019467428326607, 0.07116629183292389, 0.03509910777211189, 0.02026083506643772, NaN, NaN, NaN, NaN, NaN, NaN], [0.40259334444999695, 0.005078054964542389, 0.00017122419376391917, 9.21270766411908e-05, 0.002624903805553913, 0.0009363252320326865, 0.00360113475471735, 0.01331485528498888, 0.008243494667112827, 0.0007176694343797863, 0.019634194672107697, 0.002027983544394374, 0.02349759265780449, 0.030203014612197876, 0.000993669149465859, 0.0008422310347668827, 0.013102295808494091, 0.025159381330013275, 0.0006507099606096745, 0.018182074651122093, NaN, NaN, NaN, NaN, NaN], [0.2579963207244873, 0.021157346665859222, 0.002921733073890209, 0.006211739499121904, 0.031850416213274, 0.0022005264181643724, 0.0070661455392837524, 0.036871425807476044, 0.012320333160459995, 0.005331193562597036, 0.033889420330524445, 0.020235266536474228, 0.07458563148975372, 0.1398555487394333, 0.008059950545430183, 0.0405682735145092, 0.03368399292230606, 0.012085597030818462, 0.010676471516489983, 0.03411625698208809, 0.08152885735034943, NaN, NaN, NaN, NaN], [0.005019576288759708, 0.001437423750758171, 0.014701779931783676, 0.005876661743968725, 0.15098156034946442, 0.001037455745972693, 0.0006782425916753709, 0.0010664333822205663, 0.006170186679810286, 0.004750464111566544, 0.015587885864078999, 0.020612932741642, 0.024904461577534676, 0.027292385697364807, 0.6522603631019592, 0.02780178189277649, 0.009980881586670876, 0.010863273404538631, 0.016993993893265724, 0.026612548157572746, 0.013426730409264565, 0.6643192768096924, NaN, NaN, NaN], [0.023952102288603783, 0.0025056565646082163, 0.0002975048264488578, 0.0031560298521071672, 0.002087814500555396, 0.00019765450269915164, 0.00028781042783521116, 0.0023521913681179285, 0.009429593570530415, 0.010675383731722832, 0.013774069957435131, 0.012372920289635658, 0.030660077929496765, 0.3810364305973053, 0.0006224916432984173, 0.6039706468582153, 0.2701583206653595, 0.012816790491342545, 0.005745226051658392, 0.052403513342142105, 0.18411211669445038, 0.00043697847286239266, 0.6234135627746582, NaN, NaN], [0.007988094352185726, 0.006256349850445986, 4.065780740347691e-05, 0.006692530121654272, 0.00010113247117260471, 0.0002641561150085181, 0.0006015493418090045, 0.009669815190136433, 0.00486318813636899, 0.0012557843001559377, 0.43231210112571716, 0.35852983593940735, 0.01959061808884144, 0.007567983586341143, 0.0019125458784401417, 0.00857639778405428, 0.0005027590086683631, 0.41286540031433105, 0.4292365312576294, 0.01753525249660015, 0.005813234485685825, 0.00216498039662838, 0.003382693277671933, 0.00027526391204446554, NaN], [0.1387476772069931, 0.027318276464939117, 0.00785337295383215, 0.019197843968868256, 0.013794281519949436, 0.020801816135644913, 0.013009469024837017, 0.07068510353565216, 0.020734209567308426, 0.024748992174863815, 0.04673967882990837, 0.025586238130927086, 0.01648368127644062, 0.06557000428438187, 0.022920427843928337, 0.013843921944499016, 0.04100487753748894, 0.0375630147755146, 0.023956134915351868, 0.018727701157331467, 0.05957711860537529, 0.020177751779556274, 0.007389482576400042, 0.027843382209539413, 0.025224220007658005]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1319446712732315, 0.003103907685726881, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.004627853631973267, 0.8189921975135803, 0.006355744786560535, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0004822930786758661, 0.5574855208396912, 0.0058120423927903175, 0.014268792234361172, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15055440366268158, 0.0014966451562941074, 0.1733904629945755, 0.05038055405020714, 0.0057296124286949635, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1304439753293991, 0.00022060537594370544, 0.03428095951676369, 0.0157721396535635, 0.20856629312038422, 0.2746620774269104, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.017820989713072777, 1.0936159014818259e-05, 0.0006241680239327252, 4.3406893382780254e-05, 0.2565733790397644, 0.5255003571510315, 0.040596142411231995, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2143511176109314, 3.818454570136964e-05, 0.0006476931739598513, 0.00012842394062317908, 0.007853559218347073, 0.008102592080831528, 0.0005345920799300075, 0.00793861411511898, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00014670012751594186, 7.536429620813578e-06, 0.0001294321846216917, 0.00024457855033688247, 0.00022483686916530132, 0.001284220488741994, 0.0014163334853947163, 0.5552030801773071, 0.006061996798962355, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09223808348178864, 0.004348577931523323, 0.013163902796804905, 0.018216131255030632, 0.035016678273677826, 0.11075899004936218, 0.1728493720293045, 0.19621391594409943, 0.029301786795258522, 0.46166056394577026, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11309938877820969, 0.004489036742597818, 0.0485633909702301, 0.021462395787239075, 0.4192940890789032, 0.26214849948883057, 0.22032421827316284, 0.0067114257253706455, 0.010406548157334328, 0.11692964285612106, 0.23004111647605896, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14281870424747467, 0.000545236689504236, 0.003893920686095953, 0.0005153689999133348, 0.01790653169155121, 0.004868220537900925, 0.0031487985979765654, 0.0011714915744960308, 0.0043698386289179325, 0.020373020321130753, 0.02358497679233551, 0.2682037353515625, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09794370085000992, 0.0018320194212719798, 0.000285644200630486, 3.260145604144782e-05, 0.00041393720312044024, 0.0043053096160292625, 0.002047628629952669, 0.0003047001373488456, 0.002447759034112096, 0.0016152235912159085, 0.024524936452507973, 0.29461416602134705, 0.014563476666808128, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13817672431468964, 0.0034516772720962763, 0.002911344636231661, 0.0003800573176704347, 0.001462712767533958, 0.001961951842531562, 0.0040230052545666695, 0.0023086154833436012, 0.002483226591721177, 0.028553131967782974, 0.014239847660064697, 0.18359807133674622, 0.09542248398065567, 0.2067933827638626, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14011409878730774, 0.01466476172208786, 0.09487155824899673, 0.03769487887620926, 0.062972791492939, 0.003495296463370323, 0.0004466120735742152, 0.0044098952785134315, 0.056031279265880585, 0.12585759162902832, 0.04736572876572609, 0.02727479301393032, 0.06542934477329254, 0.563940703868866, 0.024195805191993713, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05395817384123802, 6.747527368133888e-05, 0.0018676340114325285, 0.0002809480356518179, 0.03275269269943237, 0.005758063402026892, 9.199039777740836e-05, 0.00011598093260545284, 0.0015754709020256996, 0.026104740798473358, 0.009686414152383804, 0.001081737456843257, 0.0017741151386871934, 0.49180474877357483, 0.007121484261006117, 0.013531914912164211, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03839295729994774, 0.0002068357716780156, 0.006204192526638508, 0.0054313126020133495, 0.011207946576178074, 0.0013116636546328664, 0.008276019245386124, 0.002269806107506156, 0.004080863669514656, 0.01488969475030899, 0.0006726597202941775, 0.009391524828970432, 0.039596475660800934, 0.19840312004089355, 0.043704546988010406, 0.31202515959739685, 0.23529505729675293, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07469534128904343, 0.001304430770687759, 0.0239309910684824, 0.008060658350586891, 0.021029237657785416, 0.015191669575870037, 0.006979105528444052, 0.0016427322989329696, 0.002132130553945899, 0.015241370536386967, 0.0018563566263765097, 0.035101406276226044, 0.06515936553478241, 0.27313047647476196, 0.10352547466754913, 0.2570805549621582, 0.45083746314048767, 0.1295340657234192, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19253067672252655, 0.0008209676598198712, 0.004669400863349438, 0.00047802351764403284, 0.013135433197021484, 0.0034620855003595352, 0.0016354827675968409, 0.0008273401763290167, 0.0018895546672865748, 0.009773027151823044, 0.006215384230017662, 0.2356690764427185, 0.01036232803016901, 0.06144833192229271, 0.008870624005794525, 0.024212215095758438, 0.008509873412549496, 0.01347219105809927, 0.35532569885253906, NaN, NaN, NaN, NaN, NaN, NaN], [0.10910779982805252, 0.002221200615167618, 0.0001436042075511068, 1.1848528629343491e-05, 0.0001887700636871159, 0.0020721519831568003, 0.0009632316650822759, 0.00014056939107831568, 0.0007320817094296217, 0.0006829273188486695, 0.007395991589874029, 0.2889891564846039, 0.007074101362377405, 0.0002627878566272557, 0.004363438580185175, 0.0018575063440948725, 0.00557676050812006, 0.012322820723056793, 0.31134024262428284, 0.027276715263724327, NaN, NaN, NaN, NaN, NaN], [0.18170765042304993, 0.003209297079592943, 0.0023912524338811636, 0.00020479358499869704, 0.0009326079743914306, 0.0013757160631939769, 0.0021110770758241415, 0.0008730489062145352, 0.000792569131590426, 0.01825624145567417, 0.0059272306971251965, 0.11984144151210785, 0.05654650926589966, 0.08423373848199844, 0.024963613599538803, 0.027966396883130074, 0.1777324080467224, 0.005578523967415094, 0.14623191952705383, 0.11331525444984436, 0.2157108038663864, NaN, NaN, NaN, NaN], [0.1515214741230011, 0.008395697921514511, 0.0657893642783165, 0.019086696207523346, 0.05097401514649391, 0.0016111076110973954, 0.00021851839846931398, 0.002003778237849474, 0.01669292151927948, 0.06321260333061218, 0.015100682154297829, 0.010209205560386181, 0.015906400978565216, 0.30131736397743225, 0.012282183393836021, 0.09666845202445984, 0.00808996893465519, 0.03798958286643028, 0.013879657723009586, 0.047733187675476074, 0.5371345281600952, 0.020763304084539413, NaN, NaN, NaN], [0.07945924997329712, 4.7485355025855824e-05, 0.0020416006445884705, 0.00022757358965463936, 0.013386114500463009, 0.001981395063921809, 3.6917605029884726e-05, 2.620528539409861e-05, 0.0003202208608854562, 0.009042860940098763, 0.0030785591807216406, 0.0011855574557557702, 0.0005728560499846935, 0.20002734661102295, 0.00213914574123919, 0.002927121240645647, 0.004968173801898956, 0.0065933396108448505, 0.002585601294413209, 0.002817549044266343, 0.547335147857666, 0.006171087268739939, 0.018697692081332207, NaN, NaN], [0.059381648898124695, 0.00026094831991940737, 0.007586375344544649, 0.006061093881726265, 0.0039266073144972324, 0.0004965912085026503, 0.003665223019197583, 0.0008195870905183256, 0.0014654117403551936, 0.0045553394593298435, 0.00032001128420233727, 0.004615657962858677, 0.017150992527604103, 0.07922492176294327, 0.012805018573999405, 0.1320599913597107, 0.09461667388677597, 0.003555287839844823, 0.019601207226514816, 0.047796737402677536, 0.29085052013397217, 0.04383813217282295, 0.32529252767562866, 0.24933147430419922, NaN], [0.13618361949920654, 0.0007103006355464458, 0.025071904063224792, 0.004419561009854078, 0.001962232170626521, 0.0023795748129487038, 0.002366183791309595, 0.0003890783409588039, 0.00022811641974840313, 0.0010611300822347403, 0.001608739490620792, 0.028126444667577744, 0.005591525696218014, 0.0024579197634011507, 0.004123267717659473, 0.0409882515668869, 0.010364435613155365, 0.010518459603190422, 0.09771004319190979, 0.037823982536792755, 0.019979961216449738, 0.018303534016013145, 0.22492042183876038, 0.09256016463041306, 0.005498841404914856]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11621169000864029, 0.2792567312717438, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16788142919540405, 0.08717074245214462, 0.024576181545853615, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14762163162231445, 0.09094145894050598, 0.023598572239279747, 0.2273045778274536, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10424397885799408, 0.7145561575889587, 0.21233327686786652, 0.5272893309593201, 0.04291817173361778, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11001076549291611, 0.4734446108341217, 0.06134912371635437, 0.2925608456134796, 0.02150837518274784, 0.19962187111377716, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17212024331092834, 0.1419786959886551, 0.05631781369447708, 0.2185172289609909, 0.002532752463594079, 0.0032626313623040915, 0.18381445109844208, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09107878059148788, 0.12160263955593109, 0.2150201052427292, 0.3705081045627594, 0.07164584845304489, 0.05021890252828598, 0.14392021298408508, 0.39638784527778625, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2121918499469757, 0.20806513726711273, 0.15205760300159454, 0.38131871819496155, 0.1009124368429184, 0.09936784207820892, 0.07077471911907196, 0.05006752535700798, 0.14871110022068024, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21685828268527985, 0.23333710432052612, 0.06609098613262177, 0.12803798913955688, 0.1004808098077774, 0.025170300155878067, 0.04069148004055023, 0.10828333348035812, 0.10351972281932831, 0.29450517892837524, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05205162987112999, 0.22306090593338013, 0.049221184104681015, 0.061203524470329285, 0.09776578843593597, 0.06183243915438652, 0.17444021999835968, 0.321644127368927, 0.054029058665037155, 0.2629997134208679, 0.2757931053638458, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05800137668848038, 0.32540804147720337, 0.13333332538604736, 0.05756821855902672, 0.12640602886676788, 0.11846329271793365, 0.2918737828731537, 0.3632459342479706, 0.18816226720809937, 0.6433262228965759, 0.3291742205619812, 0.12170911580324173, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11078674346208572, 0.40781712532043457, 0.06261185556650162, 0.05779192969202995, 0.18194560706615448, 0.1120922714471817, 0.5645142793655396, 0.33037880063056946, 0.18058234453201294, 0.6155731678009033, 0.21430827677249908, 0.044265877455472946, 0.20548948645591736, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08288691937923431, 0.2962968051433563, 0.2819015085697174, 0.19574381411075592, 0.1136796846985817, 0.07755676656961441, 0.20596812665462494, 0.3330870270729065, 0.21944326162338257, 0.22804425656795502, 0.1688224822282791, 0.2872299253940582, 0.13759873807430267, 0.09907422959804535, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11118441820144653, 0.6110438108444214, 0.6292654871940613, 0.5805363655090332, 0.22765980660915375, 0.4274957776069641, 0.6573506593704224, 0.6816673278808594, 0.5361799597740173, 0.320940226316452, 0.3845328688621521, 0.6242536306381226, 0.41633498668670654, 0.12922972440719604, 0.01991792768239975, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10675505548715591, 0.1912444829940796, 0.23975566029548645, 0.32351911067962646, 0.046362437307834625, 0.08004549145698547, 0.3363644778728485, 0.2706483006477356, 0.26792168617248535, 0.2952979505062103, 0.4496033787727356, 0.1126319095492363, 0.5116660594940186, 0.015820369124412537, 0.030236991122364998, 0.03603934869170189, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2233639359474182, 0.0911012589931488, 0.12918633222579956, 0.17958812415599823, 0.037158817052841187, 0.06043876335024834, 0.43303725123405457, 0.3349981904029846, 0.09061599522829056, 0.23225362598896027, 0.1514965295791626, 0.09056703746318817, 0.2480165809392929, 0.056160230189561844, 0.015552842989563942, 0.007365798112004995, 0.17054231464862823, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09585364907979965, 0.22669152915477753, 0.08040254563093185, 0.0638674795627594, 0.15364862978458405, 0.13237975537776947, 0.3887532651424408, 0.5357696413993835, 0.07155110687017441, 0.4139500856399536, 0.05426981300115585, 0.1238613948225975, 0.07816720753908157, 0.14353296160697937, 0.021915707737207413, 0.02897939831018448, 0.22262324392795563, 0.4835837185382843, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05190133675932884, 0.3522363007068634, 0.14802464842796326, 0.07656959444284439, 0.12417534738779068, 0.17628712952136993, 0.33604755997657776, 0.38481405377388, 0.20552395284175873, 0.5797679424285889, 0.3262830972671509, 0.19466114044189453, 0.045280374586582184, 0.2712458372116089, 0.041196610778570175, 0.08666794002056122, 0.3327068090438843, 0.1922111064195633, 0.10969121754169464, NaN, NaN, NaN, NaN, NaN, NaN], [0.10818891227245331, 0.3937702178955078, 0.030490810051560402, 0.030189264565706253, 0.11243001371622086, 0.07142115384340286, 0.3648340702056885, 0.2467786818742752, 0.13009557127952576, 0.5037410855293274, 0.18716548383235931, 0.08825942128896713, 0.23451530933380127, 0.24434491991996765, 0.03496113047003746, 0.04431905224919319, 0.3934983015060425, 0.31427451968193054, 0.05462265387177467, 0.2524711489677429, NaN, NaN, NaN, NaN, NaN], [0.06088699772953987, 0.23725801706314087, 0.2046121060848236, 0.14171433448791504, 0.06688592582941055, 0.06064169481396675, 0.14286598563194275, 0.21723276376724243, 0.13491223752498627, 0.2083195000886917, 0.15285742282867432, 0.34066644310951233, 0.18166381120681763, 0.10532425343990326, 0.06318715214729309, 0.052211396396160126, 0.20970472693443298, 0.20715771615505219, 0.28281068801879883, 0.13935938477516174, 0.11923542618751526, NaN, NaN, NaN, NaN], [0.09884612262248993, 0.5530695915222168, 0.6301063299179077, 0.5187459588050842, 0.28427499532699585, 0.33059176802635193, 0.49595603346824646, 0.6107674241065979, 0.387560099363327, 0.3283739984035492, 0.3905918300151825, 0.5949583053588867, 0.2912430167198181, 0.19163259863853455, 0.03091937117278576, 0.3911139667034149, 0.3233675956726074, 0.421701043844223, 0.6310504674911499, 0.4068542718887329, 0.13317596912384033, 0.02126597985625267, NaN, NaN, NaN], [0.07192745804786682, 0.09934075176715851, 0.15662430226802826, 0.18248029053211212, 0.021172231063246727, 0.037516966462135315, 0.12766626477241516, 0.09711621701717377, 0.09662153571844101, 0.1303528994321823, 0.3114719092845917, 0.1600099802017212, 0.265144020318985, 0.011710498481988907, 0.02471126988530159, 0.012725233100354671, 0.12533646821975708, 0.446529746055603, 0.11092787981033325, 0.45893827080726624, 0.011159577406942844, 0.028070949018001556, 0.024378135800361633, NaN, NaN], [0.21178482472896576, 0.0713806003332138, 0.12116114795207977, 0.16551871597766876, 0.025692136958241463, 0.03932836279273033, 0.255863755941391, 0.20887790620326996, 0.05500240623950958, 0.14075487852096558, 0.158308207988739, 0.10016348958015442, 0.22940821945667267, 0.06542190909385681, 0.016673747450113297, 0.011679067276418209, 0.21266934275627136, 0.27460965514183044, 0.08977667987346649, 0.1985965520143509, 0.05640871822834015, 0.014301197603344917, 0.004748867359012365, 0.1251523643732071, NaN], [0.11377177387475967, 0.4656391441822052, 0.26672884821891785, 0.20802536606788635, 0.1860857605934143, 0.16829806566238403, 0.19711202383041382, 0.3023360073566437, 0.035885076969861984, 0.11114621162414551, 0.21048156917095184, 0.27827921509742737, 0.11178875714540482, 0.13154125213623047, 0.3096882104873657, 0.09530708193778992, 0.2201821655035019, 0.1989239901304245, 0.27841058373451233, 0.15223632752895355, 0.2206900417804718, 0.34536775946617126, 0.09229245036840439, 0.24595825374126434, 0.2865155339241028]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13124778866767883, 0.015335792675614357, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19323189556598663, 0.005229663103818893, 0.005805561784654856, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06695510447025299, 0.08997365087270737, 0.32878753542900085, 0.35321861505508423, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1452476531267166, 0.07996584475040436, 0.2002653181552887, 0.13149262964725494, 0.005022347904741764, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1274433135986328, 0.13577045500278473, 0.16066212952136993, 0.1959238052368164, 0.04180024936795235, 0.06788772344589233, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14809708297252655, 0.29017606377601624, 0.22457490861415863, 0.17088554799556732, 0.041788797825574875, 0.013634788803756237, 0.02984887920320034, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21402230858802795, 0.012405444867908955, 0.0014808804262429476, 0.0009161182679235935, 0.0035427443217486143, 0.0017166208708658814, 0.001927618752233684, 0.015056394040584564, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10794443637132645, 0.13477572798728943, 0.046750620007514954, 0.03419584408402443, 0.30604344606399536, 0.11879221349954605, 0.08022946119308472, 0.11745522916316986, 0.21712547540664673, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06259628385305405, 0.21873348951339722, 0.248628169298172, 0.2344663441181183, 0.09133727103471756, 0.05752522125840187, 0.03945200890302658, 0.39403918385505676, 0.15040725469589233, 0.009099425747990608, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06400181353092194, 0.3208324611186981, 0.5040323138237, 0.6282902359962463, 0.04389061778783798, 0.08030739426612854, 0.10539824515581131, 0.1485716998577118, 0.08085520565509796, 0.13963551819324493, 0.0947280004620552, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0935494601726532, 0.3055664598941803, 0.46751275658607483, 0.6914730072021484, 0.12860655784606934, 0.15726737678050995, 0.2987912595272064, 0.1529359668493271, 0.062232255935668945, 0.041881486773490906, 0.03399288281798363, 0.026789270341396332, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.012478480115532875, 0.051689472049474716, 0.7194163799285889, 0.8485123515129089, 0.006671697832643986, 0.03636787086725235, 0.05433559790253639, 0.01463489979505539, 0.0011851346353068948, 0.0010049004340544343, 0.012586181983351707, 0.0039429632015526295, 0.0029262336902320385, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16095376014709473, 0.10161679983139038, 0.15561290085315704, 0.27214428782463074, 0.06339859217405319, 0.047669682651758194, 0.16775988042354584, 0.30333516001701355, 0.29585903882980347, 0.026492541655898094, 0.03390856087207794, 0.020966142416000366, 0.027538424357771873, 0.040642742067575455, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1701768934726715, 0.015393235720694065, 0.0020776872988790274, 0.011533004231750965, 0.013215321116149426, 0.004845780786126852, 0.011772604659199715, 0.006262979004532099, 0.00390799343585968, 0.007256041280925274, 0.0014780729543417692, 0.007152961101382971, 0.1450572907924652, 0.009833375923335552, 0.004788131918758154, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.27953270077705383, 0.3106633424758911, 0.3078516721725464, 0.2835734188556671, 0.23220741748809814, 0.10028243064880371, 0.059542566537857056, 0.10900203883647919, 0.24247398972511292, 0.19294817745685577, 0.04455278813838959, 0.032558612525463104, 0.2623904049396515, 0.04071282595396042, 0.07101175934076309, 0.01397540420293808, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15828359127044678, 0.26215362548828125, 0.1828027367591858, 0.3383132517337799, 0.14976613223552704, 0.17187725007534027, 0.16098640859127045, 0.10713529586791992, 0.2253616452217102, 0.27887699007987976, 0.0991593673825264, 0.1987481713294983, 0.2010713517665863, 0.24892166256904602, 0.09143882989883423, 0.028894133865833282, 0.0226773452013731, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08621957898139954, 0.39239373803138733, 0.32060059905052185, 0.6169360876083374, 0.04211895540356636, 0.07954877614974976, 0.28241875767707825, 0.1073535904288292, 0.10431969910860062, 0.28138864040374756, 0.05428503826260567, 0.29005417227745056, 0.2829020619392395, 0.1771886944770813, 0.12728992104530334, 0.029228007420897484, 0.09527892619371414, 0.030012397095561028, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10387677699327469, 0.28899070620536804, 0.34778735041618347, 0.5978891849517822, 0.08856049180030823, 0.11093756556510925, 0.2773001492023468, 0.1387036144733429, 0.05535874143242836, 0.040542375296354294, 0.057020239531993866, 0.08593740314245224, 0.3575255870819092, 0.1780063509941101, 0.03115975111722946, 0.05683879926800728, 0.20087137818336487, 0.022991398349404335, 0.024780578911304474, NaN, NaN, NaN, NaN, NaN, NaN], [0.027872784063220024, 0.11975038051605225, 0.8484699726104736, 0.9221431016921997, 0.010032964870333672, 0.05817321315407753, 0.14408904314041138, 0.03149182349443436, 0.0027255630120635033, 0.003546576714143157, 0.054592132568359375, 0.03846639767289162, 0.0179138146340847, 0.04004756733775139, 0.0025625908747315407, 0.006073353346437216, 0.017890095710754395, 0.006128084380179644, 0.0035659971181303263, 0.005842072889208794, NaN, NaN, NaN, NaN, NaN], [0.21095024049282074, 0.16082847118377686, 0.2551726996898651, 0.40046265721321106, 0.07841236889362335, 0.05558479577302933, 0.20925307273864746, 0.4381427764892578, 0.47918838262557983, 0.07096414268016815, 0.11106863617897034, 0.09138666838407516, 0.1393880993127823, 0.1506565660238266, 0.07743309438228607, 0.06943798065185547, 0.09801105409860611, 0.017720624804496765, 0.015859564766287804, 0.029157793149352074, 0.0392736941576004, NaN, NaN, NaN, NaN], [0.17935752868652344, 0.014263968914747238, 0.0022281131241470575, 0.011617614887654781, 0.022433524951338768, 0.0047986325807869434, 0.013686214573681355, 0.007696506567299366, 0.004939754959195852, 0.012488129548728466, 0.002878576284274459, 0.013457567431032658, 0.23303280770778656, 0.030022362247109413, 0.013181640766561031, 0.027029545977711678, 0.010247751139104366, 0.0006795030203647912, 0.0032072996255010366, 0.1104368045926094, 0.006663828622549772, 0.003364446572959423, NaN, NaN, NaN], [0.3113161623477936, 0.29550519585609436, 0.2834082841873169, 0.292662650346756, 0.1380799263715744, 0.055221766233444214, 0.0487985797226429, 0.10219268500804901, 0.25612032413482666, 0.2569950222969055, 0.10279092192649841, 0.16084249317646027, 0.5340818166732788, 0.10305190831422806, 0.16831228137016296, 0.03310799598693848, 0.10521702468395233, 0.008185362443327904, 0.02029210887849331, 0.2447529286146164, 0.0189062412828207, 0.051586367189884186, 0.011271311901509762, NaN, NaN], [0.21913117170333862, 0.2667233347892761, 0.15068072080612183, 0.2934513986110687, 0.11010763049125671, 0.11770202964544296, 0.1548316478729248, 0.10880382359027863, 0.19848009943962097, 0.2926469147205353, 0.17939361929893494, 0.38748762011528015, 0.38622626662254333, 0.4369211196899414, 0.14473943412303925, 0.11290202289819717, 0.11878126114606857, 0.013051117770373821, 0.18458649516105652, 0.15622372925281525, 0.14840805530548096, 0.06742489337921143, 0.01624887064099312, 0.028317920863628387, NaN], [0.13670727610588074, 0.11102687567472458, 0.008893890306353569, 0.008979070000350475, 0.01785319298505783, 0.008134939707815647, 0.02043774165213108, 0.030145585536956787, 0.014907605946063995, 0.021436721086502075, 0.020207075402140617, 0.10284662246704102, 0.06823904067277908, 0.04208305850625038, 0.03810393810272217, 0.04656955599784851, 0.025087369605898857, 0.005296032875776291, 0.07358870655298233, 0.057817310094833374, 0.033472564071416855, 0.02220221422612667, 0.01758744567632675, 0.012124869041144848, 0.052647966891527176]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1301431953907013, 0.0347244068980217, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19350707530975342, 0.0006586865638382733, 0.008110460825264454, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07742509245872498, 0.025898784399032593, 0.46813124418258667, 0.21566073596477509, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15508510172367096, 0.002848779782652855, 0.006727630738168955, 0.01290579792112112, 0.0019038956379517913, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1506490558385849, 0.0018329949816688895, 0.0011812039883807302, 0.010563074611127377, 0.0007367127691395581, 0.0007524989196099341, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0463392436504364, 0.0861721858382225, 0.5342088341712952, 0.5262086987495422, 0.252642959356308, 0.014757110737264156, 0.02778990939259529, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08082517981529236, 0.10121051222085953, 0.3481808602809906, 0.41374534368515015, 0.38359278440475464, 0.07890304177999496, 0.1096968874335289, 0.1685827672481537, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1433362513780594, 0.13670213520526886, 0.10138670355081558, 0.1093992069363594, 0.236768901348114, 0.09415888041257858, 0.011134332977235317, 0.019298367202281952, 0.5348934531211853, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.024931270629167557, 0.02871265634894371, 0.20136752724647522, 0.1457405984401703, 0.13753218948841095, 0.13171687722206116, 0.07031083852052689, 0.04771474376320839, 0.5403124690055847, 0.04482616111636162, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.026511939242482185, 0.12058579176664352, 0.09381356090307236, 0.09726550430059433, 0.13490843772888184, 0.36408668756484985, 0.19949088990688324, 0.09435784071683884, 0.45831772685050964, 0.1274537742137909, 0.014095090329647064, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12624163925647736, 0.03293433412909508, 0.07055910676717758, 0.06304988265037537, 0.23899653553962708, 0.15645378828048706, 0.07000429183244705, 0.02516351453959942, 0.06797400116920471, 0.07094329595565796, 0.1311238706111908, 0.21208471059799194, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1118171289563179, 0.015469676814973354, 0.08768722414970398, 0.046650953590869904, 0.23542486131191254, 0.09032069146633148, 0.05012429133057594, 0.004171812906861305, 0.15006321668624878, 0.017805932089686394, 0.049085501581430435, 0.035517167299985886, 0.6428134441375732, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09301143884658813, 0.13257478177547455, 0.1489255279302597, 0.18642880022525787, 0.318376362323761, 0.31357452273368835, 0.1382697969675064, 0.07457731664180756, 0.17392435669898987, 0.00920780934393406, 0.020603884011507034, 0.049020376056432724, 0.322329580783844, 0.3050764203071594, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17444664239883423, 0.0007958812057040632, 5.6854176364140585e-05, 0.0004179355164524168, 0.00013179269444663078, 0.00024977640714496374, 0.0001107741700252518, 7.639485556865111e-05, 0.0008396806661039591, 0.00030287212575785816, 0.00023763117496855557, 0.003834246192127466, 0.003433886216953397, 0.00015348535089287907, 0.00014843019016552716, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00841783918440342, 0.03505324944853783, 0.02469123899936676, 0.026689309626817703, 0.1500382125377655, 0.08861804753541946, 0.006530162878334522, 0.060150377452373505, 0.04669034481048584, 0.007807246409356594, 0.02131708152592182, 0.012364925816655159, 0.041818197816610336, 0.02841370552778244, 0.6981374621391296, 0.06836962699890137, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0009672276792116463, 0.0037913541309535503, 0.00524782482534647, 0.006044968497008085, 0.07807419449090958, 0.026950905099511147, 0.0024354930501431227, 0.005482541862875223, 0.013836389407515526, 0.002816400956362486, 0.0006559633184224367, 0.002845867071300745, 0.018497759476304054, 0.19704575836658478, 0.41393977403640747, 0.4024144113063812, 0.00308317132294178, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0023347423411905766, 0.018236415460705757, 0.011423468589782715, 0.014267664402723312, 0.06272618472576141, 0.09006785601377487, 0.023437032476067543, 0.008957883343100548, 0.03532397374510765, 0.006200278177857399, 0.0002018583327298984, 0.016960909590125084, 0.04933774098753929, 0.1362536996603012, 0.47770828008651733, 0.5670948624610901, 0.06992122530937195, 0.03068283386528492, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0730348452925682, 0.024321116507053375, 0.06646358221769333, 0.0630527138710022, 0.23201428353786469, 0.1378810703754425, 0.04738042131066322, 0.010255109518766403, 0.0316733755171299, 0.07226394861936569, 0.06345586478710175, 0.13366159796714783, 0.1651405692100525, 0.1875276118516922, 0.475235253572464, 0.34701114892959595, 0.106105737388134, 0.17074023187160492, 0.14835108816623688, NaN, NaN, NaN, NaN, NaN, NaN], [0.1317213624715805, 0.02603350207209587, 0.05892709270119667, 0.02498493157327175, 0.2902502715587616, 0.11121267080307007, 0.057563167065382004, 0.004654969088733196, 0.12363925576210022, 0.02343585342168808, 0.03682887554168701, 0.054189957678318024, 0.5043657422065735, 0.23388440907001495, 0.46154457330703735, 0.32561513781547546, 0.055846668779850006, 0.06476935744285583, 0.026345595717430115, 0.5623452067375183, NaN, NaN, NaN, NaN, NaN], [0.037178635597229004, 0.08259578794240952, 0.0920928493142128, 0.09107104688882828, 0.19359135627746582, 0.17535823583602905, 0.06819135695695877, 0.03716395050287247, 0.07458745688199997, 0.0064619481563568115, 0.009060872718691826, 0.02094256319105625, 0.1461041122674942, 0.11104261875152588, 0.6685899496078491, 0.4500047266483307, 0.029085516929626465, 0.03437849134206772, 0.03590574488043785, 0.20188003778457642, 0.23542997241020203, NaN, NaN, NaN, NaN], [0.18516498804092407, 0.0009336460498161614, 7.266629108926281e-05, 0.00041225351742468774, 0.00023152375069912523, 0.0002865330025088042, 0.00012637366307899356, 8.909442112781107e-05, 0.0006568549433723092, 0.0003727772564161569, 0.00021836791711393744, 0.0030449857003986835, 0.002062517451122403, 0.0001740154402796179, 0.00019746039470192045, 0.0010639599058777094, 3.738106170203537e-05, 0.00018948569777421653, 0.0017019548686221242, 0.0021623496431857347, 7.414143328787759e-05, 0.00010166682477574795, NaN, NaN, NaN], [0.014717604033648968, 0.07327108085155487, 0.049021750688552856, 0.04824157431721687, 0.2509053647518158, 0.1518847495317459, 0.011399514973163605, 0.08240412920713425, 0.052963949739933014, 0.012185328640043736, 0.03166860342025757, 0.029948236420750618, 0.0332757867872715, 0.026646502315998077, 0.6691258549690247, 0.05157328397035599, 0.010373775847256184, 0.027277877554297447, 0.022091276943683624, 0.06386284530162811, 0.02213944122195244, 0.7486419677734375, 0.1026511937379837, NaN, NaN], [0.0010381464380770922, 0.0033105257898569107, 0.005275417119264603, 0.005129440221935511, 0.05292869359254837, 0.018404772505164146, 0.0016328096389770508, 0.0039754449389874935, 0.007563540246337652, 0.0015294092008844018, 0.00038045260589569807, 0.0016144785331562161, 0.00974529329687357, 0.09415796399116516, 0.176291361451149, 0.35064396262168884, 0.0026081653777509928, 0.0026635529939085245, 0.004589376971125603, 0.028667066246271133, 0.20089752972126007, 0.45412325859069824, 0.4352543354034424, 0.005037708207964897, NaN], [0.1408424973487854, 0.01142195239663124, 0.027654578909277916, 0.018255943432450294, 0.00871819257736206, 0.007302883546799421, 0.002508251927793026, 0.0010894191218540072, 0.002539109904319048, 0.0016572934109717607, 0.002274427330121398, 0.00915378425270319, 0.004932411015033722, 0.000505969044752419, 0.0064278775826096535, 0.013472460210323334, 0.0009905033512040973, 0.004150861874222755, 0.015419019386172295, 0.013300818391144276, 0.00147106999065727, 0.01399929728358984, 0.03311459720134735, 0.0035406623501330614, 0.008275571279227734]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10530310869216919, 0.47072935104370117, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07470229268074036, 0.01594272069633007, 0.3473423421382904, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19784890115261078, 0.02982909232378006, 0.008884507231414318, 0.026416730135679245, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15099161863327026, 0.004257611930370331, 0.06880252063274384, 0.03778434172272682, 0.016005711629986763, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14908726513385773, 0.01576131209731102, 0.006129090208560228, 0.013888919726014137, 0.006888655014336109, 0.007033796049654484, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1207430437207222, 0.0697125568985939, 0.0065151299349963665, 0.0038357542362064123, 0.04419673979282379, 0.16196060180664062, 0.49751368165016174, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02684849314391613, 0.03953110799193382, 0.00281998747959733, 0.001733462675474584, 0.08529012650251389, 0.6486974358558655, 0.306731641292572, 0.07198647409677505, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.012395885773003101, 0.009238478727638721, 0.0003186498652212322, 0.0010813054395839572, 0.008392964489758015, 0.2777543067932129, 0.44055092334747314, 0.0011997584952041507, 0.00246741552837193, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.034838397055864334, 0.015937600284814835, 0.002090656431391835, 0.002794815693050623, 0.008703295141458511, 0.10732896625995636, 0.4454900026321411, 0.001775766140781343, 0.0009654808673076332, 0.016644174233078957, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.293722003698349, 0.0148458918556571, 0.02856721729040146, 0.006315621547400951, 0.005582483485341072, 0.0013911855639889836, 0.004092940129339695, 0.0036679452750831842, 0.0010494120651856065, 0.016411608085036278, 0.023008037358522415, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13037414848804474, 0.020949387922883034, 0.03831411898136139, 0.007462172769010067, 0.02548721246421337, 0.006367610301822424, 0.008434200659394264, 0.010317808948457241, 0.003713584039360285, 0.00402417778968811, 0.19032441079616547, 0.26746228337287903, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.041874390095472336, 0.024160701781511307, 0.00029624058515764773, 0.00016299582784995437, 0.00014630405348725617, 0.0004776908899657428, 0.0010664566652849317, 0.005874973721802235, 0.000636687153019011, 0.0013240330154076219, 0.0912160873413086, 0.35286882519721985, 0.01772063784301281, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11822566390037537, 0.015047432854771614, 0.019423136487603188, 0.00686526857316494, 0.0036870460025966167, 0.00022719512344338, 0.002930518239736557, 0.025171050801873207, 0.005165010690689087, 0.05391281098127365, 0.11512911319732666, 0.07776232063770294, 0.2967449426651001, 0.09380093216896057, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09375648200511932, 0.01475021056830883, 0.012638024985790253, 0.0046005831100046635, 0.051909249275922775, 0.0036223391070961952, 0.004371740389615297, 0.009388775564730167, 0.01159447617828846, 0.023305783048272133, 0.046531662344932556, 0.058873143047094345, 0.07503876090049744, 0.0337555818259716, 0.30213212966918945, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.060409948229789734, 0.03445665165781975, 0.000381257850676775, 0.0036348046269267797, 0.0002713070425670594, 0.0011815812904387712, 0.03030458651483059, 0.03435760363936424, 0.0019682012498378754, 0.00901943538337946, 0.2363511621952057, 0.7836493253707886, 0.05375572293996811, 0.0010517562041059136, 0.002096510259434581, 0.017742546275258064, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19913224875926971, 0.17475517094135284, 0.0022224360145628452, 0.015882516279816628, 0.001058473251760006, 0.0005846276762895286, 0.02601638250052929, 0.037341512739658356, 0.002062901621684432, 0.01394632738083601, 0.062121838331222534, 0.09270716458559036, 0.13391432166099548, 0.011137665249407291, 0.003502808278426528, 0.007463122718036175, 0.4640289545059204, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.33059969544410706, 0.017222048714756966, 0.029873082414269447, 0.008054245263338089, 0.002331576542928815, 0.0006345488945953548, 0.011296147480607033, 0.005269323009997606, 0.0004991231253370643, 0.01808379590511322, 0.0023433570750057697, 0.0409514382481575, 0.01219080574810505, 0.010968736372888088, 0.004035044461488724, 0.000618473335634917, 0.01301309373229742, 0.04461785778403282, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11787470430135727, 0.013379373587667942, 0.03657921776175499, 0.007838133722543716, 0.006328434217721224, 0.0013346761697903275, 0.005374525673687458, 0.005563441663980484, 0.0013783610193058848, 0.003622437361627817, 0.10895299166440964, 0.17491653561592102, 0.013411260209977627, 0.006658618804067373, 0.013080593198537827, 0.0013389869127422571, 0.03540230169892311, 0.3923792839050293, 0.2429211437702179, NaN, NaN, NaN, NaN, NaN, NaN], [0.03099578432738781, 0.01363852247595787, 8.312943100463599e-05, 4.0873743273550645e-05, 3.1056373700266704e-05, 8.971957868197933e-05, 0.0004970009904354811, 0.0021136843133717775, 0.00015606316446792334, 0.0008045462891459465, 0.029241982847452164, 0.24120952188968658, 0.011327153071761131, 0.006169632077217102, 0.004105421248823404, 0.0017298789462074637, 0.09891722351312637, 0.13539430499076843, 0.3545337915420532, 0.03266340494155884, NaN, NaN, NaN, NaN, NaN], [0.05892227217555046, 0.006390280555933714, 0.00726453959941864, 0.002730957930907607, 0.0007821861072443426, 5.8160956541541964e-05, 0.0015625637024641037, 0.007388831116259098, 0.0016573512693867087, 0.027249574661254883, 0.062049947679042816, 0.056622181087732315, 0.2355845421552658, 0.04601869359612465, 0.006218506023287773, 0.00966239720582962, 0.07739637047052383, 0.4012998342514038, 0.09626632183790207, 0.38049787282943726, 0.10569068044424057, NaN, NaN, NaN, NaN], [0.09179559350013733, 0.00951253343373537, 0.010748236440122128, 0.0033872865606099367, 0.04677930101752281, 0.0018132117111235857, 0.0035809800028800964, 0.005968866869807243, 0.0062707834877073765, 0.02606387436389923, 0.033457815647125244, 0.03605461120605469, 0.04817588999867439, 0.03754975646734238, 0.2781437933444977, 0.015551367774605751, 0.2560427486896515, 0.08298799395561218, 0.06865174323320389, 0.12361031025648117, 0.04344068095088005, 0.28463616967201233, NaN, NaN, NaN], [0.02905191108584404, 0.012088212184607983, 0.00011298860044917092, 0.0012518719304352999, 4.317293132771738e-05, 0.0001948956778505817, 0.008923283778131008, 0.008874665014445782, 0.00048750368296168745, 0.0041984752751886845, 0.08557221293449402, 0.46109655499458313, 0.018593793734908104, 0.0004841866611968726, 0.0006005582981742918, 0.004410868044942617, 0.1617877185344696, 0.2815479040145874, 0.7414005398750305, 0.06452517956495285, 0.0009642028599046171, 0.0012653517769649625, 0.012943175621330738, NaN, NaN], [0.1381005197763443, 0.0952477678656578, 0.0011117071844637394, 0.007693122606724501, 0.0001761779421940446, 8.233776316046715e-05, 0.0067709037102758884, 0.015442474745213985, 0.0005836034542880952, 0.005857429001480341, 0.020792629569768906, 0.02682901732623577, 0.05164036154747009, 0.0043857707642018795, 0.0008507486782036722, 0.004215322434902191, 0.19233396649360657, 0.21357974410057068, 0.14138071238994598, 0.12764914333820343, 0.011541306972503662, 0.001996394479647279, 0.004979089833796024, 0.4768531322479248, NaN], [0.14079369604587555, 0.0077750058844685555, 0.008707624860107899, 0.002215370535850525, 0.0003697987995110452, 8.685041393619031e-05, 6.568676326423883e-05, 0.0005928067839704454, 0.00018151948461309075, 0.0013713521184399724, 0.003134837606921792, 0.004530616104602814, 0.0021016064565628767, 0.0014590725768357515, 0.01743447594344616, 0.0004639088874682784, 0.00557903666049242, 0.015868593007326126, 0.012156624346971512, 0.006375743541866541, 0.004486390855163336, 0.037133798003196716, 0.0008373309392482042, 0.015209782868623734, 0.053904592990875244]]], [[[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12363631278276443, 0.14845161139965057, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14363405108451843, 0.021847352385520935, 0.10135873407125473, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13959342241287231, 0.059129536151885986, 0.04632453992962837, 0.0506979376077652, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1401052325963974, 0.20328059792518616, 0.08711162209510803, 0.021569250151515007, 0.06437158584594727, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14849096536636353, 0.24162742495536804, 0.13733072578907013, 0.023916935548186302, 0.4261094033718109, 0.034874048084020615, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1122843325138092, 0.27548718452453613, 0.3164171576499939, 0.11597670614719391, 0.521038293838501, 0.1305568367242813, 0.04802507162094116, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13016629219055176, 0.2326299250125885, 0.3132029175758362, 0.32591310143470764, 0.1516764611005783, 0.09795279055833817, 0.02053435519337654, 0.1865263283252716, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.121080182492733, 0.4840172827243805, 0.47487083077430725, 0.3000609576702118, 0.5299880504608154, 0.09183567762374878, 0.057097259908914566, 0.12967270612716675, 0.04215369373559952, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08035996556282043, 0.5049515962600708, 0.21779249608516693, 0.22551923990249634, 0.48642098903656006, 0.17451445758342743, 0.14853931963443756, 0.2973877787590027, 0.02990546263754368, 0.12922555208206177, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15412510931491852, 0.24815845489501953, 0.21706829965114594, 0.15909965336322784, 0.3919820487499237, 0.2097313106060028, 0.05961627885699272, 0.10788830369710922, 0.04644578695297241, 0.008778278715908527, 0.1666601300239563, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1319347769021988, 0.07332690805196762, 0.3709748387336731, 0.10343886911869049, 0.2416648119688034, 0.273651659488678, 0.142499178647995, 0.032821010798215866, 0.08169299364089966, 0.04221141338348389, 0.04960552975535393, 0.14849121868610382, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15117543935775757, 0.09085448831319809, 0.23665060102939606, 0.09974268078804016, 0.5293540358543396, 0.2969721853733063, 0.0923411101102829, 0.04701923578977585, 0.47750627994537354, 0.31436240673065186, 0.11817371100187302, 0.08098391443490982, 0.05702001228928566, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2022491842508316, 0.0666579008102417, 0.032761361449956894, 0.03407268971204758, 0.3113752603530884, 0.5905517935752869, 0.21839523315429688, 0.043745849281549454, 0.02789805829524994, 0.042396336793899536, 0.08724991232156754, 0.07408890873193741, 0.010044119320809841, 0.12108539044857025, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14857184886932373, 0.38842764496803284, 0.16100677847862244, 0.1839173436164856, 0.03719957172870636, 0.5251989364624023, 0.25831982493400574, 0.06345110386610031, 0.01966739259660244, 0.013820506632328033, 0.10135386884212494, 0.06285497546195984, 0.037499457597732544, 0.09235794097185135, 0.06518241763114929, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15810954570770264, 0.08897967636585236, 0.2754043936729431, 0.11542505025863647, 0.7166418433189392, 0.6856120824813843, 0.15602687001228333, 0.03588242083787918, 0.10233978182077408, 0.06907100230455399, 0.13906386494636536, 0.06064911186695099, 0.02474391460418701, 0.09316151589155197, 0.5409220457077026, 0.18577302992343903, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07972963899374008, 0.06995329260826111, 0.2565014958381653, 0.11985079944133759, 0.5429201126098633, 0.3072132468223572, 0.04467121511697769, 0.06233014911413193, 0.06391221284866333, 0.06306523084640503, 0.04008801653981209, 0.16940940916538239, 0.21208623051643372, 0.3237960636615753, 0.4987465739250183, 0.14530567824840546, 0.42085787653923035, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.057688161730766296, 0.05957844480872154, 0.09227755665779114, 0.06308872997760773, 0.6051628589630127, 0.41719216108322144, 0.06513097882270813, 0.11441777646541595, 0.2576654255390167, 0.039566945284605026, 0.04989808052778244, 0.41204503178596497, 0.6269510388374329, 0.0653882622718811, 0.2309982180595398, 0.05030554160475731, 0.12162061780691147, 0.2016562819480896, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08513950556516647, 0.05776134505867958, 0.44855204224586487, 0.15441171824932098, 0.37962910532951355, 0.43142464756965637, 0.21386101841926575, 0.07478547096252441, 0.22071515023708344, 0.1727379858493805, 0.06471506506204605, 0.1414414495229721, 0.20356127619743347, 0.23849359154701233, 0.28116941452026367, 0.22387196123600006, 0.24124523997306824, 0.10411572456359863, 0.14086224138736725, NaN, NaN, NaN, NaN, NaN, NaN], [0.09857918322086334, 0.08268877118825912, 0.17155912518501282, 0.08326277136802673, 0.3910389840602875, 0.23102693259716034, 0.0706368237733841, 0.04062340036034584, 0.34264665842056274, 0.40400993824005127, 0.14310938119888306, 0.07597656548023224, 0.059025220572948456, 0.46083009243011475, 0.6441643834114075, 0.8002472519874573, 0.34466618299484253, 0.10859531164169312, 0.04317509010434151, 0.042760394513607025, NaN, NaN, NaN, NaN, NaN], [0.07982634007930756, 0.027687683701515198, 0.01305405143648386, 0.01568622700870037, 0.15395750105381012, 0.36470726132392883, 0.09429053217172623, 0.02618592418730259, 0.00988653302192688, 0.03718657046556473, 0.057223062962293625, 0.036843542009592056, 0.008861655369400978, 0.039983998984098434, 0.5628355145454407, 0.5858935713768005, 0.11540589481592178, 0.07112369686365128, 0.022479010745882988, 0.0049066911451518536, 0.07443748414516449, NaN, NaN, NaN, NaN], [0.13230623304843903, 0.39635705947875977, 0.12619565427303314, 0.23844560980796814, 0.04749276116490364, 0.5552228093147278, 0.304650217294693, 0.16151569783687592, 0.05923860892653465, 0.03940735384821892, 0.37161606550216675, 0.13852664828300476, 0.1098584458231926, 0.421970933675766, 0.059641290456056595, 0.35413044691085815, 0.2336989790201187, 0.21869167685508728, 0.04408164322376251, 0.03093402087688446, 0.08392708003520966, 0.038801465183496475, NaN, NaN, NaN], [0.06938444077968597, 0.08034616708755493, 0.1555827558040619, 0.07347460091114044, 0.4763748347759247, 0.40589335560798645, 0.07265187799930573, 0.022002995014190674, 0.0527057945728302, 0.07314148545265198, 0.11090734601020813, 0.03504399210214615, 0.0172868762165308, 0.14030121266841888, 0.3467526137828827, 0.21038202941417694, 0.6312639117240906, 0.1208876520395279, 0.020520374178886414, 0.014591614715754986, 0.03736459091305733, 0.22129306197166443, 0.05682671070098877, NaN, NaN], [0.08218587934970856, 0.08353152126073837, 0.244074746966362, 0.15340235829353333, 0.5709766745567322, 0.4268343448638916, 0.06391507387161255, 0.13458560407161713, 0.14046461880207062, 0.13024689257144928, 0.043825987726449966, 0.1802380084991455, 0.2593124508857727, 0.4235299825668335, 0.23401854932308197, 0.23376718163490295, 0.4458163380622864, 0.1644086241722107, 0.22351105511188507, 0.25077733397483826, 0.28149890899658203, 0.3320602774620056, 0.05098887160420418, 0.4388013482093811, NaN], [0.13887250423431396, 0.1972966492176056, 0.3352757692337036, 0.30585116147994995, 0.6380553841590881, 0.5158089995384216, 0.3850407004356384, 0.3912012279033661, 0.2877788245677948, 0.30187875032424927, 0.20025724172592163, 0.34020906686782837, 0.47167572379112244, 0.3815076947212219, 0.5385518074035645, 0.20663535594940186, 0.37741178274154663, 0.29376763105392456, 0.3577961027622223, 0.21765607595443726, 0.14290691912174225, 0.3544510304927826, 0.07646653801202774, 0.1391337811946869, 0.019570577889680862]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10658828914165497, 0.44162610173225403, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14346696436405182, 0.1105659008026123, 0.04705679044127464, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14569434523582458, 0.006359750870615244, 0.06321832537651062, 0.009962446056306362, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14614860713481903, 0.0770370289683342, 0.14572308957576752, 0.11918944120407104, 0.003047030884772539, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16211360692977905, 0.1199408695101738, 0.008137544617056847, 0.026895001530647278, 0.022997038438916206, 0.0004772362008225173, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1276824176311493, 0.05415544658899307, 0.008876973763108253, 0.006533092353492975, 0.16286829113960266, 0.4191088378429413, 0.11241274327039719, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1310766041278839, 0.09720440953969955, 0.005617472343146801, 0.018550021573901176, 0.07474999874830246, 0.03211009502410889, 0.01561786886304617, 0.5897646546363831, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07142644375562668, 0.019657818600535393, 0.044225241988897324, 0.006672952324151993, 0.015112369321286678, 0.03715437650680542, 0.012035970576107502, 0.08684496581554413, 0.5578015446662903, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06384367495775223, 0.009399783797562122, 0.06692944467067719, 0.013825987465679646, 0.01438650768250227, 0.11814092099666595, 0.025182364508509636, 0.04756484180688858, 0.4922580420970917, 0.010614832863211632, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21570175886154175, 0.004600263200700283, 0.0039491499774158, 0.0010213260538876057, 0.00511409854516387, 0.00780195789411664, 0.0035460677463561296, 0.06005942076444626, 0.002209970960393548, 0.0011990047059953213, 0.010184505954384804, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15804870426654816, 0.10358668118715286, 0.018792977556586266, 0.0036350360605865717, 0.02226737141609192, 0.007843486964702606, 0.002713214373216033, 0.3624168336391449, 0.00397031893953681, 0.013842551037669182, 0.05391863361001015, 0.040338534861803055, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0703621581196785, 0.01676221750676632, 0.03283774480223656, 0.005265639629215002, 0.016811830922961235, 0.008307189680635929, 0.0008217993890866637, 0.06662888079881668, 0.006444453727453947, 0.0015952866524457932, 0.03341786190867424, 0.28674793243408203, 0.09830270707607269, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00274313404224813, 0.01220498327165842, 0.001565106911584735, 0.014617281965911388, 0.0015394951915368438, 0.00014163085143081844, 0.0032730719540268183, 0.04253724217414856, 0.01929563470184803, 0.0011092370841652155, 0.008900013752281666, 0.14250728487968445, 0.44352540373802185, 0.012739983387291431, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12441921979188919, 0.09727630764245987, 0.031539320945739746, 0.0390433706343174, 0.004017204977571964, 0.003718326799571514, 0.06902258098125458, 0.21229486167430878, 0.1692674309015274, 0.507585346698761, 0.24224399030208588, 0.4713107943534851, 0.22175242006778717, 0.1071210727095604, 0.001354279462248087, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11131177842617035, 0.045754965394735336, 0.13187335431575775, 0.021390099078416824, 0.2008819729089737, 0.1753949522972107, 0.029810786247253418, 0.1191062182188034, 0.0330519825220108, 0.021209293976426125, 0.007793682627379894, 0.004569755867123604, 0.21031485497951508, 0.08390634506940842, 0.11696453392505646, 0.2920413017272949, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.28942060470581055, 0.004874760750681162, 0.02575746178627014, 0.03629674017429352, 0.0339069589972496, 0.06067432835698128, 0.06949229538440704, 0.17600718140602112, 0.04042575880885124, 0.0021073101088404655, 0.002125136088579893, 0.0013297069817781448, 0.013164625503122807, 0.019647862762212753, 0.0625171884894371, 0.003036472015082836, 0.15673543512821198, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.29843398928642273, 0.006499151699244976, 0.002175502711907029, 0.00474061444401741, 0.012194045819342136, 0.024305779486894608, 0.05332900583744049, 0.20892387628555298, 0.06725459545850754, 0.0056669809855520725, 0.023831704631447792, 0.0038352743722498417, 0.008001168258488178, 0.00692057004198432, 0.006051996257156134, 0.0008782879449427128, 0.0244371946901083, 0.05294432491064072, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19362471997737885, 0.05030333995819092, 0.012831996195018291, 0.0028119448106735945, 0.011659904383122921, 0.0070129260420799255, 0.002673238283023238, 0.1857692450284958, 0.0015845311572775245, 0.003893241984769702, 0.009055504575371742, 0.013083641417324543, 0.009338575415313244, 0.007860029116272926, 0.009482803754508495, 0.019751103594899178, 0.03845033049583435, 0.03947525471448898, 0.03009573556482792, NaN, NaN, NaN, NaN, NaN, NaN], [0.08181142061948776, 0.013090993277728558, 0.025600923225283623, 0.0045991819351911545, 0.007844633422791958, 0.0066622160375118256, 0.0006054755649529397, 0.01805841363966465, 0.0025927021633833647, 0.0006796378293074667, 0.012531430460512638, 0.18806973099708557, 0.04688132554292679, 0.005460845306515694, 0.053047653287649155, 0.013497358188033104, 0.040136244148015976, 0.022071214392781258, 0.31691932678222656, 0.07654344290494919, NaN, NaN, NaN, NaN, NaN], [0.003571689361706376, 0.007330529857426882, 0.0009176949388347566, 0.011351491324603558, 0.0005700239562429488, 0.0001114286933443509, 0.0023790227714926004, 0.011217805556952953, 0.004490875173360109, 0.00038650527130812407, 0.0025467458181083202, 0.048559535294771194, 0.22723886370658875, 0.0019670024048537016, 0.0002542402071412653, 0.027445662766695023, 0.015111691318452358, 0.029036840423941612, 0.2144545316696167, 0.4208240211009979, 0.013829981908202171, NaN, NaN, NaN, NaN], [0.11162849515676498, 0.06633912026882172, 0.017337389290332794, 0.030477523803710938, 0.0024834000505506992, 0.001867939718067646, 0.03932232782244682, 0.1628599613904953, 0.14192035794258118, 0.2944621741771698, 0.21811458468437195, 0.42557209730148315, 0.2638176381587982, 0.14630424976348877, 0.0005040403339080513, 0.32521945238113403, 0.2411627173423767, 0.28287336230278015, 0.40539565682411194, 0.1682160645723343, 0.08244442939758301, 0.001218001707457006, NaN, NaN, NaN], [0.20973265171051025, 0.07712213695049286, 0.20427735149860382, 0.025535617023706436, 0.4053865373134613, 0.41131824254989624, 0.030548784881830215, 0.060146916657686234, 0.012079673819243908, 0.01592317223548889, 0.0048461491242051125, 0.0021770852617919445, 0.09957096725702286, 0.1170588806271553, 0.13386258482933044, 0.16141492128372192, 0.004613581579178572, 0.015190798789262772, 0.003683852730318904, 0.1389266699552536, 0.07006954401731491, 0.1815212517976761, 0.17825333774089813, NaN, NaN], [0.3360293209552765, 0.0046190484426915646, 0.024437543004751205, 0.03736568242311478, 0.023848971351981163, 0.05927197262644768, 0.0542423352599144, 0.09209144860506058, 0.023972967639565468, 0.000766670098528266, 0.0006589474505744874, 0.0007115502958185971, 0.00637162895873189, 0.012912634760141373, 0.014624576084315777, 0.0019432539120316505, 0.05897590517997742, 0.0038116518408060074, 0.0016802565660327673, 0.011611220426857471, 0.025170182809233665, 0.04455949738621712, 0.0020357028115540743, 0.14134161174297333, NaN], [0.187117338180542, 0.005916869733482599, 0.020901108160614967, 0.0559980571269989, 0.0324174202978611, 0.008547084406018257, 0.044511571526527405, 0.04880741238594055, 0.05289075896143913, 0.038245368748903275, 0.003611604683101177, 0.002279189880937338, 0.01790045015513897, 0.008863909170031548, 0.01127588003873825, 0.005861865822225809, 0.17173975706100464, 0.009364882484078407, 0.005221609957516193, 0.012455414980649948, 0.007264893501996994, 0.016177698969841003, 0.008824422955513, 0.18642237782478333, 0.0006185321253724396]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12484697252511978, 0.1276315450668335, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15841424465179443, 0.03031034581363201, 0.02654799446463585, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13769303262233734, 0.09575259685516357, 0.025977646932005882, 0.052591271698474884, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15085087716579437, 0.15096567571163177, 0.09222358465194702, 0.028469638898968697, 0.0012114758137613535, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16431185603141785, 0.07204771786928177, 0.05053501948714256, 0.012478960677981377, 0.05114812031388283, 0.00039714027661830187, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1666734665632248, 0.06891340762376785, 0.013632094487547874, 0.018171580508351326, 0.002599227475002408, 0.0009873181115835905, 0.0006481229793280363, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14423918724060059, 0.12251336872577667, 0.10176724940538406, 0.33380815386772156, 0.1583750993013382, 0.023372141644358635, 0.026839546859264374, 0.06730155646800995, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2790219187736511, 0.15446610748767853, 0.015893638134002686, 0.03619629144668579, 0.003051391802728176, 0.00038247412885539234, 0.0007123185787349939, 0.010222047567367554, 0.0010863485513255, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.26870372891426086, 0.10405707359313965, 0.00916238222271204, 0.058617573231458664, 0.0049601029604673386, 0.0005682760966010392, 0.004407011903822422, 0.03309918940067291, 0.0036104319151490927, 0.12174393236637115, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05985519662499428, 0.14893494546413422, 0.09544339030981064, 0.18974637985229492, 0.1120084673166275, 0.28269606828689575, 0.4275827407836914, 0.12184610962867737, 0.40095797181129456, 0.08120625466108322, 0.27448615431785583, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06809581816196442, 0.09586934000253677, 0.10229554027318954, 0.057183876633644104, 0.25635847449302673, 0.19582371413707733, 0.4237477481365204, 0.37648820877075195, 0.48733898997306824, 0.20777222514152527, 0.24944597482681274, 0.45371755957603455, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05513762682676315, 0.16880887746810913, 0.02300925739109516, 0.03029457852244377, 0.032050080597400665, 0.0745139941573143, 0.08332593739032745, 0.5048279166221619, 0.051856089383363724, 0.16889351606369019, 0.22218117117881775, 0.29087209701538086, 0.03443009778857231, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07503295689821243, 0.22708888351917267, 0.011672623455524445, 0.03240634873509407, 0.051372844725847244, 0.0555996336042881, 0.1055832952260971, 0.27455389499664307, 0.019383858889341354, 0.29115474224090576, 0.25329896807670593, 0.3762655258178711, 0.06596359610557556, 0.027243560180068016, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15851522982120514, 0.22386471927165985, 0.13473065197467804, 0.10273782163858414, 0.539568305015564, 0.23089595139026642, 0.2947250008583069, 0.2566256523132324, 0.08758009225130081, 0.04963833838701248, 0.026406293734908104, 0.02359875850379467, 0.06999926269054413, 0.014701825566589832, 0.008440684527158737, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1888987272977829, 0.22277534008026123, 0.06621028482913971, 0.04940320923924446, 0.013609242625534534, 0.012980671599507332, 0.0275713000446558, 0.5000426769256592, 0.025658253580331802, 0.28077542781829834, 0.21061377227306366, 0.1005047932267189, 0.0123829934746027, 0.005874408408999443, 0.04495157673954964, 0.007559731602668762, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10630622506141663, 0.1130438968539238, 0.04711592569947243, 0.14829613268375397, 0.0012987125664949417, 0.0009870391804724932, 0.002409427659586072, 0.10731083154678345, 0.010861101560294628, 0.02266101725399494, 0.22295407950878143, 0.37738272547721863, 0.21324896812438965, 0.09625840187072754, 0.01478838175535202, 0.004724964965134859, 0.13376930356025696, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0042772903107106686, 0.006450775545090437, 0.00791113544255495, 0.01871791109442711, 0.02349945716559887, 0.036059893667697906, 0.09560179710388184, 0.01157363597303629, 0.020316841080784798, 0.002858342370018363, 0.0015840751584619284, 0.03869258984923363, 0.04008479043841362, 0.0456826388835907, 0.061234306544065475, 0.32812535762786865, 0.4548730254173279, 0.048923686146736145, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.034464891999959946, 0.04304976761341095, 0.0730237364768982, 0.07959159463644028, 0.156441330909729, 0.14927342534065247, 0.37836754322052, 0.2500280439853668, 0.265838086605072, 0.038285933434963226, 0.0458042174577713, 0.2175784856081009, 0.055615901947021484, 0.32925114035606384, 0.23017114400863647, 0.5254709720611572, 0.3807608187198639, 0.4477500319480896, 0.3941081464290619, NaN, NaN, NaN, NaN, NaN, NaN], [0.024431752040982246, 0.057854264974594116, 0.009785568341612816, 0.015689833089709282, 0.010099711827933788, 0.022971261292696, 0.026158222928643227, 0.08270542323589325, 0.00771379703655839, 0.023359954357147217, 0.06216609850525856, 0.1452798992395401, 0.010090651921927929, 0.13497084379196167, 0.023736534640192986, 0.06422590464353561, 0.2799428105354309, 0.34307411313056946, 0.27198341488838196, 0.018816450610756874, NaN, NaN, NaN, NaN, NaN], [0.032250434160232544, 0.07008427381515503, 0.003495490411296487, 0.011726448312401772, 0.013232100754976273, 0.021211393177509308, 0.02240551821887493, 0.050749149173498154, 0.0020511853508651257, 0.034987252205610275, 0.05167752131819725, 0.10231753438711166, 0.017492327839136124, 0.0036121474113315344, 0.0030979528091847897, 0.14347726106643677, 0.4107814431190491, 0.18759746849536896, 0.28042495250701904, 0.02327493391931057, 0.023935986682772636, NaN, NaN, NaN, NaN], [0.17385193705558777, 0.24280618131160736, 0.0901411697268486, 0.1509939581155777, 0.5964542627334595, 0.18189039826393127, 0.25377142429351807, 0.39126867055892944, 0.11990400403738022, 0.04869762808084488, 0.06967514008283615, 0.0491257943212986, 0.1536286324262619, 0.04553663358092308, 0.006321897264569998, 0.008409527130424976, 0.01950901933014393, 0.028066763654351234, 0.039955586194992065, 0.08575458079576492, 0.02489100769162178, 0.0107131227850914, NaN, NaN, NaN], [0.18693126738071442, 0.25040745735168457, 0.07803116738796234, 0.06071358174085617, 0.018153348937630653, 0.012512190267443657, 0.012858238071203232, 0.18478038907051086, 0.008756724186241627, 0.14063727855682373, 0.16963867843151093, 0.06472224742174149, 0.008233368396759033, 0.010625114664435387, 0.04533438757061958, 0.004584541078656912, 0.04685693234205246, 0.3269248306751251, 0.13935554027557373, 0.022706659510731697, 0.015514994971454144, 0.09856907278299332, 0.009564985521137714, NaN, NaN], [0.10220125317573547, 0.06584151834249496, 0.046970706433057785, 0.16499453783035278, 0.0008504274883307517, 0.000721337681170553, 0.0015187861863523722, 0.050142802298069, 0.005332621280103922, 0.005509581416845322, 0.0572623535990715, 0.172898530960083, 0.12213093042373657, 0.0640687644481659, 0.004657925106585026, 0.002522988012060523, 0.028443191200494766, 0.29674383997917175, 0.3544806241989136, 0.20916549861431122, 0.09151047468185425, 0.014975211583077908, 0.0019209993770346045, 0.07398010790348053, NaN], [0.014319260604679585, 0.019726725295186043, 0.010809341445565224, 0.06728478521108627, 0.024899542331695557, 0.06927011907100677, 0.2726534307003021, 0.06849226355552673, 0.06274150311946869, 0.0032663261517882347, 0.007571991998702288, 0.011041088029742241, 0.0653790682554245, 0.06552072614431381, 0.10165777057409286, 0.05923810228705406, 0.20752549171447754, 0.1128133162856102, 0.041725482791662216, 0.12833572924137115, 0.10405165702104568, 0.2233171910047531, 0.10715138167142868, 0.3742898404598236, 0.43902406096458435]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12878015637397766, 0.05999259278178215, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16734670102596283, 0.0018487111665308475, 0.002184537472203374, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06620991975069046, 0.4480140209197998, 0.42379117012023926, 0.3748236298561096, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1498516947031021, 0.091057188808918, 0.11073686927556992, 0.05954570695757866, 0.00012444167805369943, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15789009630680084, 0.05178086459636688, 0.2272004932165146, 0.05532779544591904, 0.002530630910769105, 0.00011625503975665197, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05158510431647301, 0.42307329177856445, 0.4962795376777649, 0.6637455821037292, 0.11636865884065628, 0.027691489085555077, 0.059323750436306, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1440366506576538, 0.37752795219421387, 0.42684903740882874, 0.13104133307933807, 0.0449170246720314, 0.0360451340675354, 0.007316120434552431, 0.03281773626804352, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.018571142107248306, 0.11001976579427719, 0.16728174686431885, 0.33147770166397095, 0.29621925950050354, 0.11174014210700989, 0.46736985445022583, 0.18467408418655396, 0.05186863988637924, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0193540807813406, 0.11997552216053009, 0.4339123070240021, 0.4291674792766571, 0.22741732001304626, 0.21840345859527588, 0.4310562014579773, 0.16546283662319183, 0.05634206160902977, 0.03477246314287186, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07166115939617157, 0.34385329484939575, 0.5272834300994873, 0.4769807457923889, 0.34829023480415344, 0.19288644194602966, 0.1752767115831375, 0.3240547180175781, 0.026788396760821342, 0.09653788805007935, 0.14339366555213928, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09568949043750763, 0.2010803371667862, 0.1452081948518753, 0.13633964955806732, 0.13264110684394836, 0.11369673907756805, 0.18754418194293976, 0.10573749244213104, 0.12209529429674149, 0.3772747814655304, 0.4260762333869934, 0.1448964774608612, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1600937843322754, 0.32966408133506775, 0.46643200516700745, 0.2761552929878235, 0.1128716766834259, 0.16030451655387878, 0.13808301091194153, 0.12019707262516022, 0.08980843424797058, 0.23569302260875702, 0.18699060380458832, 0.06252679228782654, 0.02190866880118847, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09671676903963089, 0.3181785047054291, 0.5044789910316467, 0.5311775803565979, 0.43058764934539795, 0.24623769521713257, 0.546705424785614, 0.20948244631290436, 0.5971428155899048, 0.15125280618667603, 0.21692372858524323, 0.08393274247646332, 0.0805632621049881, 0.11463441699743271, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17538371682167053, 0.005170984659343958, 0.01562126912176609, 0.012803001329302788, 0.0004321248270571232, 0.003303500125184655, 0.010391591116786003, 0.0083633316680789, 0.001453742035664618, 0.0005911564221605659, 0.001968160504475236, 0.018067756667733192, 0.0012553221313282847, 0.0006174716982059181, 0.0014710418181493878, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00964878499507904, 0.07296860218048096, 0.1732037365436554, 0.2482636272907257, 0.018695944920182228, 0.04061494395136833, 0.019565006718039513, 0.048743683844804764, 0.15582872927188873, 0.0506676621735096, 0.08059392869472504, 0.2691291868686676, 0.4701274335384369, 0.05269847437739372, 0.15863555669784546, 0.011098350398242474, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.023792432621121407, 0.42975902557373047, 0.3812340199947357, 0.23295366764068604, 0.2699258625507355, 0.32472288608551025, 0.04527096822857857, 0.2556793987751007, 0.5905154347419739, 0.8116171360015869, 0.684613823890686, 0.13916483521461487, 0.05671815946698189, 0.0401710644364357, 0.30002903938293457, 0.014873968437314034, 0.1109585389494896, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07327478379011154, 0.42313894629478455, 0.7821765542030334, 0.6752634048461914, 0.18926696479320526, 0.27897483110427856, 0.1972714066505432, 0.26650866866111755, 0.21928414702415466, 0.6610813736915588, 0.8023169040679932, 0.32853400707244873, 0.043605707585811615, 0.04177317023277283, 0.5147100687026978, 0.014965414069592953, 0.041893746703863144, 0.10476090759038925, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09543995559215546, 0.1369307041168213, 0.1906978189945221, 0.1367466300725937, 0.17180036008358002, 0.12260185182094574, 0.13847540318965912, 0.1559406965970993, 0.13510896265506744, 0.4644373655319214, 0.6843520402908325, 0.2938932180404663, 0.08134166151285172, 0.16692468523979187, 0.35020914673805237, 0.0983358696103096, 0.26928237080574036, 0.11322443932294846, 0.14002281427383423, NaN, NaN, NaN, NaN, NaN, NaN], [0.17294523119926453, 0.44891712069511414, 0.5596615076065063, 0.3151743412017822, 0.15508009493350983, 0.20398668944835663, 0.18162229657173157, 0.14380685985088348, 0.09279182553291321, 0.25614914298057556, 0.37145668268203735, 0.2047339379787445, 0.05775143578648567, 0.06389063596725464, 0.19947569072246552, 0.07508620619773865, 0.162083700299263, 0.036575064063072205, 0.05963924527168274, 0.02704720012843609, NaN, NaN, NaN, NaN, NaN], [0.09450869262218475, 0.5263407230377197, 0.5685468316078186, 0.6246378421783447, 0.5457862615585327, 0.4288109838962555, 0.7265884876251221, 0.4213257133960724, 0.7441360354423523, 0.37028953433036804, 0.4906199276447296, 0.24940308928489685, 0.2854059636592865, 0.25606390833854675, 0.06486664712429047, 0.03651905804872513, 0.215606689453125, 0.16494624316692352, 0.07126681506633759, 0.0978088453412056, 0.18553400039672852, NaN, NaN, NaN, NaN], [0.19233128428459167, 0.0069253402762115, 0.019198253750801086, 0.024288823828101158, 0.0006626379326917231, 0.0032825330272316933, 0.012745865620672703, 0.02121213637292385, 0.004573441576212645, 0.001344278221949935, 0.010449343360960484, 0.07998955249786377, 0.008849495090544224, 0.005957764107733965, 0.00281895836815238, 0.0006993816932663321, 0.0011300387559458613, 0.0034355262760072947, 0.006048144306987524, 0.0007683978183194995, 0.00029024321702308953, 0.0009215899626724422, NaN, NaN, NaN], [0.00490582175552845, 0.09978753328323364, 0.17523892223834991, 0.18201382458209991, 0.025161702185869217, 0.0351867638528347, 0.008898423984646797, 0.033712878823280334, 0.06612548977136612, 0.044598400592803955, 0.0818907842040062, 0.31783777475357056, 0.6522275805473328, 0.26521986722946167, 0.31609129905700684, 0.0543142631649971, 0.07028744369745255, 0.06436092406511307, 0.12702754139900208, 0.4257008731365204, 0.05356784537434578, 0.20406562089920044, 0.022904740646481514, NaN, NaN], [0.02933959849178791, 0.5456263422966003, 0.4945109188556671, 0.26123103499412537, 0.3237256109714508, 0.3705388903617859, 0.04209306091070175, 0.3351372182369232, 0.658141016960144, 0.8126230239868164, 0.8673186898231506, 0.28273773193359375, 0.11254162341356277, 0.17348313331604004, 0.7003386616706848, 0.1474425047636032, 0.36997753381729126, 0.41849759221076965, 0.091117262840271, 0.03724836930632591, 0.036747273057699203, 0.47380825877189636, 0.017722588032484055, 0.0920308530330658, NaN], [0.1429738998413086, 0.11406568437814713, 0.30407312512397766, 0.04420004412531853, 0.050888776779174805, 0.009020227938890457, 0.026264725252985954, 0.20154790580272675, 0.284900963306427, 0.16813665628433228, 0.6384625434875488, 0.35198092460632324, 0.0041788192465901375, 0.017796171829104424, 0.06702794879674911, 0.017356209456920624, 0.11703062057495117, 0.363391250371933, 0.08829980343580246, 0.0006652214215137064, 0.002063008025288582, 0.01232101023197174, 0.0010344748152419925, 0.005295889917761087, 0.10532692819833755]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1283751130104065, 0.06695841252803802, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [5.319380943547003e-05, 9.114345448324457e-05, 0.7905611991882324, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10777772217988968, 0.19019582867622375, 0.12566408514976501, 0.295462429523468, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [2.4899240088416263e-05, 2.9243250537547283e-05, 0.0014855118934065104, 3.888772698701359e-05, 0.9169090986251831, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [3.5349924587535497e-07, 4.689470642915694e-06, 0.02691131830215454, 1.3325815416465048e-05, 0.19568589329719543, 0.956480085849762, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08490768820047379, 0.04920955002307892, 0.012384464032948017, 0.04339546710252762, 0.010612337850034237, 0.05702771991491318, 0.7263003587722778, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16491760313510895, 0.04815620183944702, 0.0007595600909553468, 0.006606678944081068, 0.0006115635624155402, 0.0007167417788878083, 0.0015418223338201642, 0.0024032427463680506, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.012053201906383038, 0.18336322903633118, 0.0033893296495079994, 0.22584111988544464, 0.004534169565886259, 0.003455487545579672, 0.30805450677871704, 0.5499533414840698, 0.13390673696994781, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02224119007587433, 0.09969844669103622, 0.01827961951494217, 0.1828235685825348, 0.009660250507295132, 0.005268027540296316, 0.13511976599693298, 0.39505934715270996, 0.1772008240222931, 0.6222725510597229, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19008594751358032, 0.025696618482470512, 0.004118501208722591, 0.03605509176850319, 0.002144730417057872, 0.0023362801875919104, 0.16961191594600677, 0.015426162630319595, 0.016875047236680984, 0.017404966056346893, 0.032629188150167465, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1594686657190323, 0.03835373371839523, 0.021387629210948944, 0.028402678668498993, 0.12163796275854111, 0.1348690688610077, 0.027878204360604286, 0.016979072242975235, 0.009301519952714443, 0.047045812010765076, 0.103324294090271, 0.0978349894285202, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08206925541162491, 0.0482555516064167, 0.03066202998161316, 0.14434732496738434, 0.10149279236793518, 0.1536794900894165, 0.16425268352031708, 0.00592045346274972, 0.002011190867051482, 0.030538976192474365, 0.015422381460666656, 0.0400862954556942, 0.6933969259262085, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11962933838367462, 0.08867897093296051, 0.023231033235788345, 0.019267449155449867, 0.06578893214464188, 0.01314490009099245, 0.028238458558917046, 0.2009190320968628, 0.005505711771547794, 0.024347275495529175, 0.005847027525305748, 0.13606473803520203, 0.11386173218488693, 0.6883828639984131, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.004133098293095827, 0.007605875376611948, 0.380069762468338, 0.01569206453859806, 0.3162667751312256, 0.06185031309723854, 0.003268925240263343, 0.007663627155125141, 0.00711404625326395, 0.0016827658982947469, 0.002885768422856927, 0.009058460593223572, 0.0104479705914855, 0.0013903286308050156, 0.9176042079925537, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19946889579296112, 0.004915847908705473, 0.0015343156410381198, 0.012221671640872955, 0.003153382334858179, 0.0001576353097334504, 0.0020530277397483587, 0.003957398701459169, 0.010446527041494846, 0.012547693215310574, 0.03473197668790817, 0.06650777161121368, 0.014228541404008865, 0.02601468935608864, 0.0018418998224660754, 0.08826413750648499, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14040440320968628, 0.29221969842910767, 0.09665771573781967, 0.2947876751422882, 0.00611721258610487, 0.012681002728641033, 0.7610099911689758, 0.27993685007095337, 0.19895455241203308, 0.07963719218969345, 0.025141140446066856, 0.30299919843673706, 0.4374280273914337, 0.12315846234560013, 0.011889583431184292, 0.00027308438438922167, 0.03226177766919136, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.22362156212329865, 0.19648011028766632, 0.02122899703681469, 0.12822405993938446, 0.013841216452419758, 0.009505078196525574, 0.4746513366699219, 0.1753886640071869, 0.09167484194040298, 0.038334570825099945, 0.04122844338417053, 0.14653263986110687, 0.17874038219451904, 0.023550381883978844, 0.014212163165211678, 0.001423373818397522, 0.0059451088309288025, 0.09707646816968918, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.167328879237175, 0.06208498775959015, 0.010482249781489372, 0.03574186563491821, 0.0675959512591362, 0.06477286666631699, 0.04995346441864967, 0.05412250757217407, 0.009984727017581463, 0.03347667679190636, 0.11074735969305038, 0.16135196387767792, 0.07774785906076431, 0.01735900156199932, 0.007863441482186317, 0.019525114446878433, 0.005842071026563644, 0.1275986284017563, 0.0955328494310379, NaN, NaN, NaN, NaN, NaN, NaN], [0.05032582953572273, 0.03989394009113312, 0.02223959006369114, 0.07248460501432419, 0.04305185005068779, 0.04872481897473335, 0.09144517779350281, 0.0032577940728515387, 0.000561918190214783, 0.015125684440135956, 0.018474824726581573, 0.0519116036593914, 0.7149417400360107, 0.023930398747324944, 0.005549557972699404, 0.0027118371799588203, 0.08418004959821701, 0.22684048116207123, 0.052481237798929214, 0.7548789381980896, NaN, NaN, NaN, NaN, NaN], [0.14971917867660522, 0.12296220660209656, 0.03256092593073845, 0.015910452231764793, 0.08324312418699265, 0.010959222912788391, 0.03249981626868248, 0.2630986273288727, 0.0023772413842380047, 0.021863164380192757, 0.014683729968965054, 0.3797665238380432, 0.26638853549957275, 0.6724205613136292, 0.015757206827402115, 0.01569446735084057, 0.01732691004872322, 0.06738004088401794, 0.17602917551994324, 0.12501026690006256, 0.6636221408843994, NaN, NaN, NaN, NaN], [0.0045495470985770226, 0.007598123978823423, 0.48235079646110535, 0.017675379291176796, 0.30638325214385986, 0.03773635998368263, 0.0025513810105621815, 0.013349749147891998, 0.011474208906292915, 0.002688285429030657, 0.009704438969492912, 0.024301802739501, 0.030528949573636055, 0.006023744586855173, 0.9289764761924744, 0.008095184341073036, 0.015121471136808395, 0.003912394400686026, 0.005678378511220217, 0.005922055337578058, 0.0012866485631093383, 0.9431078433990479, NaN, NaN, NaN], [0.25144028663635254, 0.013477480970323086, 0.004043558146804571, 0.02197866141796112, 0.005731666926294565, 0.00035365403164178133, 0.0028230457101017237, 0.003569219959899783, 0.00616231607273221, 0.023324957117438316, 0.07691453397274017, 0.11847300082445145, 0.025281671434640884, 0.05239935964345932, 0.002384425140917301, 0.16120819747447968, 0.011955172754824162, 0.09212952852249146, 0.03993848338723183, 0.017148757353425026, 0.01459744293242693, 0.0018050760263577104, 0.08139479160308838, NaN, NaN], [0.08713241666555405, 0.22884246706962585, 0.12139283120632172, 0.21789073944091797, 0.00419022049754858, 0.011025986634194851, 0.8093750476837158, 0.24520863592624664, 0.11868450790643692, 0.037659380584955215, 0.014297883957624435, 0.35379931330680847, 0.4382935166358948, 0.17632676661014557, 0.006937071681022644, 0.0007303177262656391, 0.027538392692804337, 0.0690605565905571, 0.3237524628639221, 0.41753751039505005, 0.09520361572504044, 0.013310365378856659, 0.0003602981742005795, 0.032565031200647354, NaN], [0.01268855668604374, 0.009620537050068378, 0.0011078648967668414, 0.01395372860133648, 0.00034480926115065813, 0.0002369812864344567, 0.14032205939292908, 0.12187758088111877, 0.004498081747442484, 6.632315489696339e-05, 0.01873306930065155, 0.07693066447973251, 0.06357964873313904, 0.012718681246042252, 0.02489433065056801, 0.4312428832054138, 0.013737366534769535, 0.0326746366918087, 0.34456172585487366, 0.0668448805809021, 0.006646350026130676, 0.04233057424426079, 0.4123155176639557, 0.007851892150938511, 0.43338367342948914]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13150663673877716, 0.013105388730764389, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16704899072647095, 0.0014066778821870685, 0.003860085504129529, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14769184589385986, 0.005059333052486181, 0.0053715878166258335, 0.026609797030687332, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15381431579589844, 0.05056624114513397, 0.015615872107446194, 0.004382571205496788, 0.00015187788812909275, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16606314480304718, 0.03878505155444145, 0.01631396822631359, 0.011268166825175285, 0.00036908386391587555, 0.00010962320084217936, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16556474566459656, 0.059035927057266235, 0.018687130883336067, 0.020593103021383286, 0.0006985706277191639, 0.0006753651541657746, 0.01174053642898798, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16100119054317474, 0.03705580160021782, 0.08672276139259338, 0.05696912482380867, 0.00507472176104784, 0.006951047107577324, 0.0023692583199590445, 0.004235508386045694, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.288095086812973, 0.011840847320854664, 0.005622565280646086, 0.00535928551107645, 0.0008760345517657697, 0.0004899614141322672, 0.001179057639092207, 0.0010409504175186157, 0.0012723063118755817, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2984195351600647, 0.024577315896749496, 0.008883590810000896, 0.0237559974193573, 0.001871026586741209, 0.002048116410151124, 0.00452006608247757, 0.0067189703695476055, 0.002311990363523364, 0.0035932722967118025, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19755195081233978, 0.08605571836233139, 0.04371126368641853, 0.045333728194236755, 0.005393510684370995, 0.006479238625615835, 0.018500106409192085, 0.012994848191738129, 0.011254888959228992, 0.03004884347319603, 0.011813223361968994, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05165635421872139, 0.44527125358581543, 0.31059694290161133, 0.6649516224861145, 0.027770839631557465, 0.02873762883245945, 0.17512862384319305, 0.06940869987010956, 0.1633579134941101, 0.028000785037875175, 0.003091411432251334, 0.016245586797595024, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19151811301708221, 0.1383962333202362, 0.13229386508464813, 0.35712042450904846, 0.18756243586540222, 0.2871147096157074, 0.5138459801673889, 0.22405852377414703, 0.28785935044288635, 0.04021993279457092, 0.0012617700267583132, 0.004019713494926691, 0.003964945673942566, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24189773201942444, 0.08955204486846924, 0.32067012786865234, 0.20245005190372467, 0.11740265786647797, 0.08460556715726852, 0.044664137065410614, 0.025831788778305054, 0.07413194328546524, 0.0068964180536568165, 0.002961511956527829, 0.005619046278297901, 0.0014741680352017283, 0.00546230049803853, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1724659651517868, 0.13219435513019562, 0.15014058351516724, 0.12075512856245041, 0.0006761215627193451, 0.10174072533845901, 0.19516822695732117, 0.009559075348079205, 0.057678524404764175, 0.08239483833312988, 0.0039215064607560635, 0.0027616096194833517, 0.013109313324093819, 0.002305442001670599, 0.00021083203318994492, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19843007624149323, 0.15979865193367004, 0.14398488402366638, 0.41609427332878113, 0.010126790963113308, 0.04840107262134552, 0.7232485413551331, 0.22829605638980865, 0.34322667121887207, 0.08224418759346008, 0.03167981281876564, 0.020198417827486992, 0.013381149619817734, 0.0009459191933274269, 0.006438484415411949, 0.008794432505965233, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.30347728729248047, 0.04726674035191536, 0.010849116370081902, 0.12094812840223312, 0.0013257962418720126, 0.0025908409152179956, 0.0014983253786340356, 0.03437754884362221, 0.009621781297028065, 0.006184253375977278, 0.00671237800270319, 0.0018636187305673957, 0.01123903226107359, 0.0035993149504065514, 0.0012990115210413933, 0.00021464838937390596, 0.001025065197609365, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2792417109012604, 0.26782968640327454, 0.03489779308438301, 0.07551994919776917, 0.018111348152160645, 0.04002813994884491, 0.03850500285625458, 0.11152958869934082, 0.21995633840560913, 0.07949108630418777, 0.0037619988434016705, 0.03436713665723801, 0.020695386454463005, 0.017524488270282745, 0.010141805745661259, 0.003556826151907444, 0.0020958345849066973, 0.0058519174344837666, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05386974662542343, 0.6086578965187073, 0.22683310508728027, 0.5828835964202881, 0.02668178826570511, 0.03663201630115509, 0.14977867901325226, 0.2173178791999817, 0.2744499444961548, 0.08338183909654617, 0.008825525641441345, 0.06588608771562576, 0.5592238306999207, 0.17532478272914886, 0.006846817210316658, 0.028904464095830917, 0.01721598580479622, 0.006393561605364084, 0.010461881756782532, NaN, NaN, NaN, NaN, NaN, NaN], [0.24167264997959137, 0.2504684031009674, 0.15247754752635956, 0.4417489171028137, 0.37691444158554077, 0.47509273886680603, 0.6227271556854248, 0.6949021220207214, 0.5199849605560303, 0.14203055202960968, 0.006932773161679506, 0.02713918127119541, 0.026524275541305542, 0.28478434681892395, 0.05304509028792381, 0.03063105419278145, 0.007391192018985748, 0.001299944007769227, 0.0022179351653903723, 0.0017378581687808037, NaN, NaN, NaN, NaN, NaN], [0.3587647080421448, 0.13152657449245453, 0.3170546591281891, 0.1872878074645996, 0.17338471114635468, 0.16099165380001068, 0.050314128398895264, 0.07316549867391586, 0.1506616473197937, 0.027928102761507034, 0.013985591009259224, 0.03077181987464428, 0.00928373821079731, 0.01458327379077673, 0.34401679039001465, 0.1675042062997818, 0.008024912327528, 0.00340651860460639, 0.001158604514785111, 0.0004595925274770707, 0.0022153020836412907, NaN, NaN, NaN, NaN], [0.18021628260612488, 0.21554027497768402, 0.22428971529006958, 0.28362634778022766, 0.0019759181886911392, 0.19364571571350098, 0.3129161596298218, 0.05571373924612999, 0.43670228123664856, 0.5364305973052979, 0.045233964920043945, 0.02291695959866047, 0.15668357908725739, 0.03788933902978897, 0.0009749932214617729, 0.15011590719223022, 0.009233620017766953, 0.023490505293011665, 0.0018092861864715815, 0.01433361042290926, 0.002351803006604314, 0.00025271173217333853, NaN, NaN, NaN], [0.18984580039978027, 0.30305740237236023, 0.22004783153533936, 0.5488721132278442, 0.023633448407053947, 0.10360189527273178, 0.8517335653305054, 0.6748489141464233, 0.77315753698349, 0.4876308739185333, 0.2048063576221466, 0.14540305733680725, 0.08473058044910431, 0.012403973378241062, 0.06795734912157059, 0.17164894938468933, 0.18992502987384796, 0.12247806042432785, 0.011528578586876392, 0.009636401198804379, 0.0008312705904245377, 0.013430905528366566, 0.011612125672399998, NaN, NaN], [0.3384567201137543, 0.062264904379844666, 0.014819102361798286, 0.14853152632713318, 0.0019540644716471434, 0.003596463706344366, 0.001872691442258656, 0.11878995597362518, 0.02639206312596798, 0.009769541211426258, 0.011811794713139534, 0.006684192456305027, 0.045877717435359955, 0.019279729574918747, 0.005480214022099972, 0.003932234365493059, 0.006437724456191063, 0.0240105502307415, 0.0011211916571483016, 0.004233745392411947, 0.001469226786866784, 0.0013713098596781492, 0.00014342667418532073, 0.0008160521974787116, NaN], [0.1837155818939209, 0.5941455364227295, 0.2251758873462677, 0.3662757873535156, 0.039659783244132996, 0.3226933479309082, 0.014135366305708885, 0.028798755258321762, 0.10863638669252396, 0.34925851225852966, 0.03930900990962982, 0.08864527195692062, 0.10118203610181808, 0.05801505595445633, 0.11320658773183823, 0.05595846846699715, 0.0026757779996842146, 0.007132661063224077, 0.010286321863532066, 0.015962811186909676, 0.004528969060629606, 0.01888921484351158, 0.004036444239318371, 0.00027040645363740623, 0.0002387895801803097]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12851747870445251, 0.06451001763343811, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16148854792118073, 0.04709945246577263, 0.0016553826862946153, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12575848400592804, 0.13552792370319366, 0.1085570901632309, 0.11512085795402527, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14333586394786835, 0.24668441712856293, 0.19262480735778809, 0.13920731842517853, 0.0020065978169441223, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1578390896320343, 0.19358907639980316, 0.02251395769417286, 0.04702039062976837, 0.018520673736929893, 0.0005939522525295615, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14088943600654602, 0.05360155552625656, 0.043673839420080185, 0.0087194312363863, 0.14876413345336914, 0.3311525881290436, 0.029076436534523964, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11886978894472122, 0.08032860606908798, 0.053777631372213364, 0.06359982490539551, 0.49348562955856323, 0.7690801620483398, 0.032007213681936264, 0.00921344943344593, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.013988303020596504, 0.031309448182582855, 0.021422432735562325, 0.015959911048412323, 0.13852538168430328, 0.7482463121414185, 0.1306946873664856, 0.0026366086676716805, 0.006285007111728191, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02759428508579731, 0.1341203898191452, 0.1143924742937088, 0.04895513132214546, 0.2507959306240082, 0.47495928406715393, 0.24884849786758423, 0.04048554226756096, 0.06435439735651016, 0.02207104302942753, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08376637101173401, 0.08644555509090424, 0.08414626121520996, 0.08246676623821259, 0.09393073618412018, 0.2536129355430603, 0.09570588916540146, 0.057335685938596725, 0.27625876665115356, 0.23640654981136322, 0.22554923593997955, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16592197120189667, 0.037314873188734055, 0.020350072532892227, 0.005164262373000383, 0.009123047813773155, 0.005826999898999929, 0.003451529424637556, 0.017567342147231102, 0.055315494537353516, 0.2317170798778534, 0.05933540314435959, 0.06010079011321068, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07053745537996292, 0.19491763412952423, 0.06705262511968613, 0.08265279233455658, 0.006405644118785858, 0.0031596925109624863, 0.005410268437117338, 0.030676638707518578, 0.08307406306266785, 0.20774710178375244, 0.4213918149471283, 0.23337899148464203, 0.08583765476942062, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13580749928951263, 0.17484943568706512, 0.09017936140298843, 0.11502011120319366, 0.015199831686913967, 0.008567527867853642, 0.04639086127281189, 0.16773870587348938, 0.16907723248004913, 0.43436557054519653, 0.2870768904685974, 0.10786425322294235, 0.08931463956832886, 0.011009148322045803, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1727631837129593, 0.039101891219615936, 0.0065339612774550915, 0.0278339721262455, 0.004674504045397043, 0.014613990671932697, 0.03457005321979523, 0.04850766807794571, 0.02412491664290428, 0.009369020350277424, 0.022906647995114326, 0.04899173229932785, 0.01023520715534687, 0.0022774694953113794, 7.664388976991177e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08213489502668381, 0.3905046880245209, 0.07204636186361313, 0.08312273025512695, 0.02625700645148754, 0.02937941811978817, 0.04131421819329262, 0.05289716273546219, 0.16493423283100128, 0.290347158908844, 0.47713640332221985, 0.44352003931999207, 0.11574649810791016, 0.0847686156630516, 0.047198787331581116, 0.1300322264432907, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.056048911064863205, 0.04177262261509895, 0.18134142458438873, 0.04556399583816528, 0.1435631662607193, 0.2900937497615814, 0.07549438625574112, 0.08105770498514175, 0.08377190679311752, 0.011481991037726402, 0.017289845272898674, 0.006863615941256285, 0.013694294728338718, 0.13657283782958984, 0.0735873132944107, 0.3659329116344452, 0.0919225886464119, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06230737641453743, 0.038521286100149155, 0.05914388969540596, 0.03398321941494942, 0.13657090067863464, 0.19265799224376678, 0.07424072921276093, 0.08660972863435745, 0.10718739032745361, 0.16533604264259338, 0.0767570361495018, 0.03204379230737686, 0.028188396245241165, 0.21943823993206024, 0.11997849494218826, 0.2698959410190582, 0.12308003753423691, 0.45223531126976013, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18667352199554443, 0.0350969135761261, 0.030425790697336197, 0.0065561928786337376, 0.028277983888983727, 0.010725672356784344, 0.005219776649028063, 0.03378060460090637, 0.04241056367754936, 0.18939200043678284, 0.06338198482990265, 0.08136797696352005, 0.004227515775710344, 0.024540461599826813, 0.057830944657325745, 0.038525767624378204, 0.0177453625947237, 0.06933332234621048, 0.08866386860609055, NaN, NaN, NaN, NaN, NaN, NaN], [0.04736897721886635, 0.0950922816991806, 0.05233628675341606, 0.0639958381652832, 0.009022187441587448, 0.002768130972981453, 0.005348078906536102, 0.016458049416542053, 0.03350484371185303, 0.1584910899400711, 0.3849281072616577, 0.30566492676734924, 0.08282434195280075, 0.02534077689051628, 0.01897522434592247, 0.013481524772942066, 0.08136109262704849, 0.25969398021698, 0.2513872981071472, 0.07361149042844772, NaN, NaN, NaN, NaN, NaN], [0.15279658138751984, 0.09928575158119202, 0.0573631152510643, 0.10790141671895981, 0.026906443759799004, 0.012519991025328636, 0.06774256378412247, 0.1448669582605362, 0.07826853543519974, 0.4991803467273712, 0.34429702162742615, 0.12145370990037918, 0.10719165205955505, 0.008088642731308937, 0.007662023417651653, 0.013441860675811768, 0.13362208008766174, 0.34251537919044495, 0.10342243313789368, 0.07045409828424454, 0.010391364805400372, NaN, NaN, NaN, NaN], [0.1865139603614807, 0.02971193566918373, 0.005512321833521128, 0.039164237678050995, 0.007472363766282797, 0.012969624251127243, 0.03476016968488693, 0.0836154893040657, 0.050758667290210724, 0.017821883782744408, 0.08676476776599884, 0.13045690953731537, 0.03245873004198074, 0.009119128808379173, 7.800521416356787e-05, 0.0006276130443438888, 0.0024839011020958424, 0.06682475656270981, 0.06347990781068802, 0.009879485704004765, 0.0017003080574795604, 6.444661266868934e-05, NaN, NaN, NaN], [0.029208103194832802, 0.15452517569065094, 0.02615012601017952, 0.034968301653862, 0.030517179518938065, 0.023491270840168, 0.02012590691447258, 0.01683984510600567, 0.047155413776636124, 0.1569623053073883, 0.34555378556251526, 0.29876279830932617, 0.06633269041776657, 0.090775266289711, 0.05117363482713699, 0.14964616298675537, 0.024973956868052483, 0.22028914093971252, 0.5953715443611145, 0.10930891335010529, 0.05826140195131302, 0.08348876982927322, 0.2024080604314804, NaN, NaN], [0.023966457694768906, 0.008770916610956192, 0.0534873865544796, 0.015555462799966335, 0.07408829033374786, 0.12750747799873352, 0.026930494233965874, 0.023400133475661278, 0.02665247581899166, 0.00316479685716331, 0.004739005118608475, 0.002742160577327013, 0.006070322822779417, 0.09564805775880814, 0.029174519702792168, 0.5144217014312744, 0.05911846086382866, 0.020064763724803925, 0.0023497287184000015, 0.004584830719977617, 0.10225256532430649, 0.05520752817392349, 0.4466201066970825, 0.09660884737968445, NaN], [0.18986307084560394, 0.036011889576911926, 0.08335232734680176, 0.12826237082481384, 0.08758756518363953, 0.027860891073942184, 0.10198243707418442, 0.0981309786438942, 0.17985263466835022, 0.11864234507083893, 0.08274368196725845, 0.1066904067993164, 0.051979877054691315, 0.06548189371824265, 0.03337343409657478, 0.0824524462223053, 0.012718076817691326, 0.0349668525159359, 0.03024965338408947, 0.01082769688218832, 0.0127665214240551, 0.014164488762617111, 0.01925024762749672, 0.0028478982858359814, 0.0007362329051829875]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12774905562400818, 0.07772441953420639, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.058547187596559525, 0.7868303656578064, 0.02677525207400322, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12958122789859772, 0.05996095389127731, 0.20109553635120392, 0.07473170012235641, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11586850136518478, 0.18037959933280945, 0.354478657245636, 0.6275972127914429, 0.01217791810631752, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04329086095094681, 0.2822243273258209, 0.5110569596290588, 0.8230794668197632, 0.28263914585113525, 0.006951561663299799, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15041278302669525, 0.01652364432811737, 0.09004879742860794, 0.1228649914264679, 0.03705046698451042, 0.03279988467693329, 0.012472960166633129, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.005692727863788605, 0.004583822097629309, 0.011303454637527466, 0.06351188570261002, 0.07110948860645294, 0.03377191722393036, 0.8937738537788391, 0.1077374666929245, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1957636922597885, 0.00532554043456912, 0.2672942280769348, 0.07843183726072311, 0.01169322058558464, 0.006695515010505915, 0.022856300696730614, 0.03495524823665619, 0.2056257426738739, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21948350965976715, 0.003219911362975836, 0.13064762949943542, 0.017335020005702972, 0.004487968049943447, 0.006097455509006977, 0.0023269150406122208, 0.014221499674022198, 0.1740167737007141, 0.05570632219314575, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.027252521365880966, 0.05625513195991516, 0.024279700592160225, 0.009296371601521969, 0.04113621264696121, 0.04445572942495346, 0.05016031116247177, 0.300394743680954, 0.219209223985672, 0.5284181833267212, 0.13528388738632202, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16918426752090454, 0.005196947604417801, 0.010393726639449596, 0.0008839815272949636, 0.18853645026683807, 0.23955073952674866, 0.03703731670975685, 0.018581384792923927, 0.07692746073007584, 0.05213537812232971, 0.05520249530673027, 0.03837481513619423, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21910618245601654, 0.012340836226940155, 0.011061819270253181, 0.004421355202794075, 0.01345156505703926, 0.015948239713907242, 0.001919197733514011, 0.0006712953327223659, 0.0014401280786842108, 0.0009498890140093863, 0.0011606297921389341, 0.0013843519845977426, 0.005138876382261515, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12592341005802155, 0.022789308801293373, 0.01544136367738247, 0.05098855495452881, 0.006733328104019165, 0.0011512627825140953, 0.0067494111135602, 0.03519098460674286, 0.08756479620933533, 0.04847756400704384, 0.13774195313453674, 0.07365753501653671, 0.19525301456451416, 0.019442297518253326, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04374772310256958, 0.10635814815759659, 0.1203576922416687, 0.4972172677516937, 0.09716533124446869, 0.05867829546332359, 0.13453392684459686, 0.39353471994400024, 0.6331138610839844, 0.33491814136505127, 0.5983138680458069, 0.3633559048175812, 0.6357010006904602, 0.7792285084724426, 0.005659972317516804, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05199728533625603, 0.014302223920822144, 0.13574257493019104, 0.05407930538058281, 0.010633953846991062, 0.007459194865077734, 0.0004102779785171151, 0.01107444055378437, 0.16451390087604523, 0.19313758611679077, 0.018386593088507652, 0.03492085263133049, 0.1390746384859085, 0.6526300311088562, 0.08304706960916519, 0.27643677592277527, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0008206118363887072, 0.0011099595576524734, 0.0005428412696346641, 0.0013029578840360045, 0.0009422241128049791, 0.001036918954923749, 0.00015340711979661137, 0.003300317795947194, 0.0019372785463929176, 0.003245894331485033, 0.0010756017873063684, 0.0009867959888651967, 0.04242069274187088, 0.25679609179496765, 0.03714281693100929, 0.46563825011253357, 0.052469443529844284, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0011551693314686418, 0.0015016108518466353, 0.00018865184392780066, 0.0004620797117240727, 0.001353209256194532, 0.001276124152354896, 0.001269699539989233, 0.02504812367260456, 0.016660472378134727, 0.007664685603231192, 0.000621759332716465, 0.0039494638331234455, 0.05373308062553406, 0.5797222256660461, 0.04267296567559242, 0.3308492600917816, 0.22605444490909576, 0.03655111417174339, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18345873057842255, 0.006115049123764038, 0.007153322920203209, 0.00125643250066787, 0.15791349112987518, 0.17755654454231262, 0.06167090684175491, 0.028255566954612732, 0.04990806803107262, 0.014394938945770264, 0.013118196278810501, 0.02539716847240925, 0.00894339382648468, 0.04024626687169075, 0.05642623454332352, 0.04561464861035347, 0.029457826167345047, 0.09210912138223648, 0.1002524197101593, NaN, NaN, NaN, NaN, NaN, NaN], [0.2828649580478668, 0.011994204483926296, 0.006339475512504578, 0.0030444697476923466, 0.006948052905499935, 0.008767204359173775, 0.0014567734906449914, 0.00018795454525388777, 0.00020330831466708332, 0.0001539710647193715, 0.0004007722018286586, 0.0012242270167917013, 0.001961026806384325, 0.0007920600473880768, 0.002005743095651269, 0.00011892847396666184, 0.00023868663993198425, 0.0018499011639505625, 0.002196513582020998, 0.004604275804013014, NaN, NaN, NaN, NaN, NaN], [0.128562331199646, 0.014782274141907692, 0.007007280830293894, 0.02549830637872219, 0.0029198189731687307, 0.0006880113505758345, 0.0037798655685037374, 0.009390356950461864, 0.008127862587571144, 0.00817851535975933, 0.024966517463326454, 0.0308842696249485, 0.07813727855682373, 0.003280356992036104, 0.001509596244432032, 0.010023933835327625, 0.08412036299705505, 0.1339937299489975, 0.13076454401016235, 0.2572615444660187, 0.02603374607861042, NaN, NaN, NaN, NaN], [0.018602287396788597, 0.034721970558166504, 0.034974802285432816, 0.21532808244228363, 0.037075310945510864, 0.013384592719376087, 0.039282385259866714, 0.11046459525823593, 0.17542847990989685, 0.05914776027202606, 0.1884417086839676, 0.12911023199558258, 0.24417443573474884, 0.327198326587677, 0.0006843891460448503, 0.1527024656534195, 0.4776603579521179, 0.37270504236221313, 0.4335513412952423, 0.6841917634010315, 0.8031085133552551, 0.004920803010463715, NaN, NaN, NaN], [0.05855157971382141, 0.021276630461215973, 0.13662834465503693, 0.05244326964020729, 0.015041220933198929, 0.007642571348696947, 0.00036013865610584617, 0.004098850768059492, 0.033856965601444244, 0.05778159946203232, 0.005442364141345024, 0.017580043524503708, 0.04633626714348793, 0.3112163841724396, 0.03644357994198799, 0.0868009626865387, 0.020123973488807678, 0.03773906081914902, 0.06257405877113342, 0.2619801461696625, 0.7497928738594055, 0.19582624733448029, 0.4370352327823639, NaN, NaN], [0.0006882869056425989, 0.0005033394554629922, 0.00030677669565193355, 0.001028614118695259, 0.00036578672006726265, 0.0005035633221268654, 5.2447539928834885e-05, 0.0006442382582463324, 0.0003597578906919807, 0.0002600657753646374, 8.536354289390147e-05, 0.00018848010222427547, 0.00940172839909792, 0.03475101292133331, 0.004768407437950373, 0.09523987770080566, 0.0036924693267792463, 0.0034024319611489773, 0.001987446565181017, 0.06484154611825943, 0.36614781618118286, 0.06470755487680435, 0.48020803928375244, 0.12385622411966324, NaN], [0.13044977188110352, 0.023216107860207558, 0.019304566085338593, 0.018173998221755028, 0.12614674866199493, 0.04656239226460457, 0.015089727938175201, 0.04114385321736336, 0.018700774759054184, 0.020505733788013458, 0.009310846216976643, 0.02222343534231186, 0.22412429749965668, 0.3900958001613617, 0.1100122332572937, 0.14125461876392365, 0.09716113656759262, 0.14588865637779236, 0.12185929715633392, 0.5472521185874939, 0.7197717428207397, 0.31834876537323, 0.37092098593711853, 0.2838878929615021, 0.0011011400492861867]]], [[[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16810710728168488, 0.017288343980908394, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12647151947021484, 0.25301796197891235, 0.03169602155685425, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15976493060588837, 0.03159531578421593, 0.05609510838985443, 0.007400199305266142, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16021955013275146, 0.26433131098747253, 0.07329617440700531, 0.11257290840148926, 0.001577433431521058, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.22870834171772003, 0.043985288590192795, 0.04075293987989426, 0.0035545979626476765, 0.0075324228964746, 0.00014864112017676234, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.047688793390989304, 0.14664201438426971, 0.03658692538738251, 0.6408759355545044, 0.43873438239097595, 0.20478755235671997, 0.00511742290109396, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07761336117982864, 0.07061085104942322, 0.041570939123630524, 0.1916733682155609, 0.159084752202034, 0.3477410674095154, 0.5968326330184937, 0.004175147507339716, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07191380113363266, 0.05497179180383682, 0.3517811894416809, 0.9035707116127014, 0.14233137667179108, 0.1767667979001999, 0.04289708659052849, 0.00892895832657814, 0.001834895578213036, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21536989510059357, 0.19956108927726746, 0.3517906069755554, 0.458966463804245, 0.09842110425233841, 0.08277469873428345, 0.03296331316232681, 0.04812879115343094, 0.009344152174890041, 0.006280441302806139, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24051256477832794, 0.10134825110435486, 0.04672827199101448, 0.021085558459162712, 0.02245912328362465, 0.026835136115550995, 0.005604758393019438, 0.028772464022040367, 0.01708872988820076, 0.008745603263378143, 0.02540087327361107, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18141932785511017, 0.024432087317109108, 0.0408032201230526, 0.004596539307385683, 0.0778040885925293, 0.025828123092651367, 0.04467899724841118, 0.0885351300239563, 0.026468785479664803, 0.030213410034775734, 0.16925157606601715, 0.003915028180927038, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0821177139878273, 0.0264634620398283, 0.01841210387647152, 0.010007970035076141, 0.006691556889563799, 0.0167625043541193, 0.0005595253896899521, 0.020632673054933548, 0.0021230748388916254, 0.10790054500102997, 0.5654488801956177, 0.3003200888633728, 0.01571945659816265, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0726943239569664, 0.09770844131708145, 0.050709616392850876, 0.04594658315181732, 0.009083828888833523, 0.024983327835798264, 0.021837929263710976, 0.11926575750112534, 0.11382617056369781, 0.22249171137809753, 0.3826439678668976, 0.22458447515964508, 0.24531354010105133, 0.05176876112818718, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.28158777952194214, 0.045097555965185165, 0.02117414027452469, 0.05809389799833298, 0.0014524150174111128, 0.006964406464248896, 0.010582090355455875, 0.011965163983404636, 0.02265000529587269, 0.020484870299696922, 0.019729144871234894, 0.028731632977724075, 0.004907289054244757, 0.0051048253662884235, 0.00039794077747501433, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18024474382400513, 0.03336771950125694, 0.025161737576127052, 0.03788529708981514, 0.010167604312300682, 0.0039537386037409306, 3.701886089402251e-05, 0.046124417334795, 0.08654022216796875, 0.06664562225341797, 0.11276466399431229, 0.09791301190853119, 0.08758807182312012, 0.277656227350235, 0.5478507876396179, 0.06896418333053589, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10793236643075943, 0.04864804446697235, 0.0019557650666683912, 0.14817607402801514, 0.0378977507352829, 0.049347102642059326, 0.0036467635072767735, 0.0038541490212082863, 0.0034904496278613806, 0.0012115711579099298, 0.047197386622428894, 0.05697714909911156, 0.11328870058059692, 0.8784908056259155, 0.019691603258252144, 0.23420120775699615, 0.004765921737998724, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1524984985589981, 0.08107080310583115, 0.005865868646651506, 0.00971321389079094, 0.007243088912218809, 0.011549782939255238, 0.00268083019182086, 0.03457775339484215, 0.0031127233523875475, 0.000510410696733743, 0.009807620197534561, 0.008875550702214241, 0.023541534319519997, 0.527433454990387, 0.015368063934147358, 0.16288210451602936, 0.20708848536014557, 0.014573587104678154, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16305263340473175, 0.020936982706189156, 0.020989498123526573, 0.007437185384333134, 0.034894589334726334, 0.016221558675169945, 0.04928300529718399, 0.02460765466094017, 0.006940784398466349, 0.010303718037903309, 0.11923910677433014, 0.002430608496069908, 0.020191287621855736, 0.019723495468497276, 0.015607062727212906, 0.14493703842163086, 0.29023703932762146, 0.2954525649547577, 0.024419967085123062, NaN, NaN, NaN, NaN, NaN, NaN], [0.04235544800758362, 0.014461617916822433, 0.006770138628780842, 0.009241613559424877, 0.002999901305884123, 0.0037356300745159388, 0.00043396188993938267, 0.005936506669968367, 0.00027135247364640236, 0.00836905650794506, 0.38652852177619934, 0.1805782914161682, 0.00859912484884262, 0.13720881938934326, 0.026457296684384346, 0.044793374836444855, 0.41905051469802856, 0.48846107721328735, 0.271888792514801, 0.02787640690803528, NaN, NaN, NaN, NaN, NaN], [0.03824670985341072, 0.05110237002372742, 0.016365332528948784, 0.027689939364790916, 0.004054062534123659, 0.0016762956511229277, 0.0059990487061440945, 0.061629924923181534, 0.02193543128669262, 0.004144957754760981, 0.11336920410394669, 0.0855039581656456, 0.16943661868572235, 0.007511935196816921, 0.0029296777211129665, 0.005633122753351927, 0.04470856487751007, 0.19621509313583374, 0.1449754536151886, 0.4407651424407959, 0.012849990278482437, NaN, NaN, NaN, NaN], [0.29710885882377625, 0.04157622903585434, 0.022785142064094543, 0.06820578873157501, 0.0019051277777180076, 0.004196317866444588, 0.012664434500038624, 0.010533612221479416, 0.00958634540438652, 0.006948783528059721, 0.024731770157814026, 0.04424457997083664, 0.0092665059491992, 0.008317369967699051, 0.00025302590802311897, 0.03921425715088844, 0.024433301761746407, 0.005475904326885939, 0.02041386440396309, 0.005526822991669178, 0.006030899006873369, 0.000147900907904841, NaN, NaN, NaN], [0.15116539597511292, 0.029300624504685402, 0.014213098213076591, 0.04858435317873955, 0.008192096836864948, 0.0029929669108241796, 0.00010039177868748084, 0.02851700410246849, 0.014845605008304119, 0.01335279829800129, 0.07330357283353806, 0.08230004459619522, 0.06801280379295349, 0.12962418794631958, 0.38807213306427, 0.021973537281155586, 0.0005578201962634921, 0.13413770496845245, 0.18835364282131195, 0.15109674632549286, 0.5815849900245667, 0.6008182764053345, 0.10515720397233963, NaN, NaN], [0.05911188945174217, 0.013889956288039684, 0.00048160224105231464, 0.10393460839986801, 0.009916743263602257, 0.013972792774438858, 0.0005543273873627186, 0.0008135904208756983, 0.0005866698920726776, 0.00012856724788434803, 0.016669562086462975, 0.022332170978188515, 0.03126570209860802, 0.39481881260871887, 0.0021035531535744667, 0.09696949273347855, 0.0003469766234047711, 0.012058700434863567, 0.1351245492696762, 0.1276140809059143, 0.8529128432273865, 0.013427066616714, 0.3029053509235382, 0.0016288348706439137, NaN], [0.22241219878196716, 0.00997188687324524, 0.004307668190449476, 0.0318865031003952, 0.026490027084946632, 0.04937301576137543, 0.016565896570682526, 0.0013930558925494552, 0.01958940364420414, 0.015218929387629032, 0.1830211728811264, 0.11458480358123779, 0.1729872077703476, 0.047152113169431686, 0.017883911728858948, 0.118315190076828, 0.07728181034326553, 0.31889867782592773, 0.1497264951467514, 0.2596881091594696, 0.15263305604457855, 0.024473916739225388, 0.19167250394821167, 0.12363447993993759, 0.010316992178559303]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12855423986911774, 0.11611904203891754, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1812644749879837, 0.04049589857459068, 0.04480821266770363, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14001408219337463, 0.11702272295951843, 0.5616602897644043, 0.021032487973570824, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17309650778770447, 0.011261633597314358, 0.0023054813500493765, 0.0014516497030854225, 0.17103753983974457, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21775518357753754, 0.1599237471818924, 0.031671781092882156, 0.0027859890833497047, 0.1030324175953865, 0.009803196415305138, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1265520304441452, 0.2245447188615799, 0.3357183039188385, 0.19591355323791504, 0.030100535601377487, 0.11038237810134888, 0.012957160361111164, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12113019824028015, 0.07331034541130066, 0.073086217045784, 0.038516201078891754, 0.16168329119682312, 0.12152494490146637, 0.1929183006286621, 0.11648087203502655, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15162895619869232, 0.16000056266784668, 0.47010278701782227, 0.008242717012763023, 0.016423694789409637, 0.19619418680667877, 0.014187236316502094, 0.2187093049287796, 0.3917299807071686, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1371021270751953, 0.24055053293704987, 0.39826682209968567, 0.0653936043381691, 0.06886317580938339, 0.1729464828968048, 0.02453671395778656, 0.2748231589794159, 0.23215962946414948, 0.03306089714169502, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05615014582872391, 0.17226241528987885, 0.4426397681236267, 0.534454345703125, 0.0034056571312248707, 0.0038566330913454294, 0.24011781811714172, 0.31882721185684204, 0.4456172287464142, 0.1489524245262146, 0.03087311051785946, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.037336766719818115, 0.065662682056427, 0.18869149684906006, 0.795316219329834, 0.14649540185928345, 0.021824514493346214, 0.13452036678791046, 0.026823654770851135, 0.35548609495162964, 0.18523786962032318, 0.020790524780750275, 0.09485815465450287, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17983746528625488, 0.09746579825878143, 0.46259593963623047, 0.706605851650238, 0.09193093329668045, 0.2823830544948578, 0.007526541594415903, 0.10234087705612183, 0.24847157299518585, 0.2038285881280899, 0.012590465135872364, 0.002493936335667968, 0.04428662359714508, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1421777307987213, 0.23310348391532898, 0.2705342471599579, 0.5351002812385559, 0.02795390971004963, 0.06031421944499016, 0.012775074690580368, 0.20022329688072205, 0.6570897698402405, 0.2668534517288208, 0.033325545489788055, 0.023841219022870064, 0.1455993354320526, 0.03172359615564346, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11665362864732742, 0.1886645257472992, 0.03897944837808609, 0.07137740403413773, 0.15634050965309143, 0.15400150418281555, 0.13745756447315216, 0.05537642911076546, 0.2729690372943878, 0.04749782383441925, 0.05948880687355995, 0.014797642827033997, 0.11365658044815063, 0.002582019427791238, 0.20324750244617462, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.29635345935821533, 0.04781435802578926, 0.41243496537208557, 0.03004680573940277, 0.13952067494392395, 0.045467544347047806, 4.634694050764665e-05, 0.20948387682437897, 0.002634957665577531, 0.005124728661030531, 0.0019075855379924178, 0.0009838729165494442, 0.0013485344825312495, 0.004148871172219515, 0.03574635088443756, 0.23113909363746643, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.22071197628974915, 0.019423967227339745, 0.06694509834051132, 0.2386176735162735, 0.015943216159939766, 0.14270655810832977, 0.039743710309267044, 0.014324809424579144, 0.581375777721405, 0.040944233536720276, 0.011615565046668053, 0.02482481673359871, 0.06486763060092926, 0.002298883395269513, 0.009274494834244251, 0.012798607349395752, 0.009606687352061272, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04979729279875755, 0.005993144121021032, 0.05621323734521866, 0.3196869492530823, 0.0036542851012200117, 0.006608159281313419, 0.07202935218811035, 0.023804083466529846, 0.08581908792257309, 0.002907529706135392, 0.0022882334887981415, 0.155064657330513, 0.6752456426620483, 0.19066885113716125, 0.033486951142549515, 0.1545412391424179, 0.3257397711277008, 0.07836033403873444, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02027127519249916, 0.036089565604925156, 0.0908525288105011, 0.6094546914100647, 0.035198476165533066, 0.01578100211918354, 0.08828305453062057, 0.00740778585895896, 0.08938029408454895, 0.055872198194265366, 0.01406459603458643, 0.05842210724949837, 0.7085317969322205, 0.04043729975819588, 0.00861792266368866, 0.05839632451534271, 0.306302547454834, 0.11257344484329224, 0.09490343183279037, NaN, NaN, NaN, NaN, NaN, NaN], [0.2219613641500473, 0.0726998969912529, 0.3657586872577667, 0.6172192692756653, 0.07194076478481293, 0.17607101798057556, 0.009873087517917156, 0.09032700955867767, 0.1240842267870903, 0.06592906266450882, 0.021971723064780235, 0.004476875066757202, 0.04292584955692291, 0.013240871019661427, 0.03868407383561134, 0.0364602766931057, 0.007298360578715801, 0.02817610278725624, 0.0009550384129397571, 0.033005379140377045, NaN, NaN, NaN, NaN, NaN], [0.2832254469394684, 0.40537261962890625, 0.25111812353134155, 0.4335843026638031, 0.05173255130648613, 0.02949104830622673, 0.00834138598293066, 0.5043417811393738, 0.45271721482276917, 0.10732957720756531, 0.08741836994886398, 0.06616821885108948, 0.1252485066652298, 0.04288535565137863, 0.0027607728261500597, 0.11496254801750183, 0.007436650805175304, 0.04789961501955986, 0.014611729420721531, 0.05419020354747772, 0.013982507400214672, NaN, NaN, NaN, NaN], [0.1133793368935585, 0.2190774381160736, 0.04727642610669136, 0.08785698562860489, 0.22799502313137054, 0.1395695060491562, 0.17899513244628906, 0.05776361748576164, 0.19579172134399414, 0.03426501154899597, 0.08577524870634079, 0.027239171788096428, 0.22711482644081116, 0.005856664851307869, 0.3394412696361542, 0.03666312247514725, 0.053877539932727814, 0.02460121363401413, 0.02095765992999077, 0.08733106404542923, 0.0007995758787728846, 0.19509249925613403, NaN, NaN, NaN], [0.32134389877319336, 0.08582156896591187, 0.36053547263145447, 0.06279635429382324, 0.1449708491563797, 0.041098933666944504, 0.0002254477294627577, 0.3326246738433838, 0.0031729326583445072, 0.011426791548728943, 0.00305219367146492, 0.0021134610287845135, 0.0029090954922139645, 0.0035086346324533224, 0.0884322077035904, 0.7275413274765015, 4.6366836613742635e-05, 0.004567307885736227, 0.00048746803076937795, 0.0006845259922556579, 0.00036436106893233955, 0.0336419902741909, 0.19370199739933014, NaN, NaN], [0.2431764006614685, 0.00993723887950182, 0.023469794541597366, 0.12711890041828156, 0.013049022294580936, 0.09880916029214859, 0.014819139614701271, 0.015189954079687595, 0.19677633047103882, 0.012298321351408958, 0.006653454154729843, 0.017306946218013763, 0.044382814317941666, 0.005554118659347296, 0.008197239600121975, 0.025704391300678253, 0.01238576602190733, 0.005520223639905453, 0.018611198291182518, 0.07344726473093033, 0.00026948421145789325, 0.012129159644246101, 0.01222553662955761, 0.005697384011000395, NaN], [0.018590128049254417, 0.012204503640532494, 0.0029425490647554398, 0.01610950194299221, 0.024503106251358986, 0.04006015509366989, 0.018976394087076187, 0.006591797806322575, 0.002320006489753723, 0.001339062349870801, 0.028667215257883072, 0.03959575667977333, 0.00960585381835699, 0.009797154925763607, 0.022796805948019028, 0.1637655347585678, 0.20084494352340698, 0.05620957538485527, 0.12549559772014618, 0.022888751700520515, 0.037492163479328156, 0.04711981862783432, 0.44462573528289795, 0.3949664235115051, 0.3300856053829193]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16815106570720673, 0.017178548499941826, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2022658735513687, 0.005017802584916353, 0.01763225719332695, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16166983544826508, 0.033678483217954636, 0.014520054683089256, 0.003462842432782054, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10712886601686478, 0.3422684967517853, 0.05748933553695679, 0.2768969237804413, 0.004922540858387947, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.047501806169748306, 0.48201972246170044, 0.4827657639980316, 0.48466482758522034, 0.022285524755716324, 0.00022009640815667808, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1517350822687149, 0.04445230960845947, 0.09343461692333221, 0.05873756855726242, 0.07171032577753067, 0.22849556803703308, 0.05614512786269188, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.25680339336395264, 0.00010820403986144811, 0.0123103903606534, 0.007049524690955877, 0.001952940714545548, 0.027401963248848915, 0.0028134624008089304, 0.00041907382546924055, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.005559808574616909, 0.007462772540748119, 0.013313480652868748, 0.017376750707626343, 0.0038542840629816055, 0.006728595122694969, 0.5333897471427917, 0.03155524656176567, 0.15571120381355286, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.004124458413571119, 0.004751718603074551, 0.016015900298953056, 0.01742120459675789, 0.032125748693943024, 0.010460411198437214, 0.45809611678123474, 0.07138781994581223, 0.5171095728874207, 0.17626723647117615, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.24881334602832794, 0.005821824539452791, 0.031170587986707687, 0.009853766299784184, 0.027254868298768997, 0.01885347068309784, 0.02900754101574421, 0.013663586229085922, 0.012090054340660572, 0.0009272377355955541, 0.0030740045476704836, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19627800583839417, 0.054823894053697586, 0.1886557787656784, 0.00739922234788537, 0.09451853483915329, 0.01572227105498314, 0.0010023268405348063, 0.0061036646366119385, 0.0014733865391463041, 0.0003654434985946864, 0.006776102818548679, 0.0027319795917719603, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07900664210319519, 0.04510375112295151, 0.002657376928254962, 0.0032053724862635136, 0.0027717212215065956, 0.008140889927744865, 0.0011833005119115114, 0.04105996713042259, 0.0017470002640038729, 0.008194361813366413, 0.019470002502202988, 0.3834601640701294, 0.013146632350981236, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06578069925308228, 0.08975866436958313, 0.022234706208109856, 0.015388325788080692, 0.006578383035957813, 0.011582762002944946, 0.014906905591487885, 0.04645423963665962, 0.008417387492954731, 0.0318351611495018, 0.024524353444576263, 0.5050408244132996, 0.1078883558511734, 0.09876319766044617, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.010224410332739353, 0.16048979759216309, 0.09242240339517593, 0.259725958108902, 0.06779038906097412, 0.007232773117721081, 0.09601377695798874, 0.28109633922576904, 0.2723717987537384, 0.1275584101676941, 0.06318827718496323, 0.25179460644721985, 0.2496732771396637, 0.6837621927261353, 0.0018262360244989395, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04991341754794121, 0.05319196358323097, 0.14821480214595795, 0.020963814109563828, 0.03095317631959915, 0.024693654850125313, 0.008621936663985252, 0.14259999990463257, 0.042305052280426025, 0.09002435952425003, 0.005839803721755743, 0.061309609562158585, 0.23589004576206207, 0.30903181433677673, 0.18008928000926971, 0.49815359711647034, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.015294999815523624, 0.03185835853219032, 0.0202027577906847, 0.03976168856024742, 0.0711589902639389, 0.13473857939243317, 0.0059967683628201485, 0.0031582280062139034, 0.003374348394572735, 0.002362155122682452, 0.015532899647951126, 0.038825590163469315, 0.08611883223056793, 0.03844507411122322, 0.009673628956079483, 0.7068554162979126, 0.013729983940720558, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2531464695930481, 0.013071080669760704, 0.035546887665987015, 0.020458703860640526, 0.01740572415292263, 0.009577612392604351, 0.014396607875823975, 0.05952044576406479, 0.013841827400028706, 0.0003843819722533226, 0.0024746267590671778, 0.007157978601753712, 0.013787134550511837, 0.033782534301280975, 0.003469215938821435, 0.007898973301053047, 0.05525756999850273, 0.003914556000381708, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20273520052433014, 0.05025332421064377, 0.2335304319858551, 0.009442931972444057, 0.13508503139019012, 0.0181263517588377, 0.0010557285277172923, 0.003822105238214135, 0.0018545370548963547, 0.0003744752029888332, 0.0046313730999827385, 0.0008518796530552208, 0.006319030188024044, 0.014203540980815887, 0.0018540708115324378, 0.003058186499401927, 0.002516325796023011, 0.001575352856889367, 0.0014869269216433167, NaN, NaN, NaN, NaN, NaN, NaN], [0.059709664434194565, 0.021975213661789894, 0.002582199638709426, 0.002308695577085018, 0.00240446999669075, 0.004605048336088657, 0.0013587460853159428, 0.04497997462749481, 0.0009150391560979187, 0.0030208472162485123, 0.016492530703544617, 0.2572183907032013, 0.006429646629840136, 0.013558420352637768, 0.06110598146915436, 0.03728436306118965, 0.019318275153636932, 0.03907725587487221, 0.4492114782333374, 0.01579420454800129, NaN, NaN, NaN, NaN, NaN], [0.025836847722530365, 0.04185229912400246, 0.017175624147057533, 0.005038154777139425, 0.006518983747810125, 0.0043221269734203815, 0.004393702372908592, 0.03134007006883621, 0.002082354621961713, 0.00246719503775239, 0.00855192355811596, 0.28023120760917664, 0.0558621920645237, 0.020582975819706917, 0.00264686718583107, 0.052114877849817276, 0.01051351334899664, 0.0282430537045002, 0.640393853187561, 0.11605942994356155, 0.042242906987667084, NaN, NaN, NaN, NaN], [0.00790853425860405, 0.07249781489372253, 0.09275110065937042, 0.13612288236618042, 0.0654025748372078, 0.0028184219263494015, 0.039562828838825226, 0.11378230899572372, 0.08281006664037704, 0.029445864260196686, 0.03387679159641266, 0.16786670684814453, 0.2288694977760315, 0.6801032423973083, 0.0008468713494949043, 0.32477572560310364, 0.20243169367313385, 0.04291461780667305, 0.2565927505493164, 0.2435160130262375, 0.8255255222320557, 0.0008029205491766334, NaN, NaN, NaN], [0.06791312247514725, 0.034157127141952515, 0.26634278893470764, 0.01933334954082966, 0.08246968686580658, 0.03419587388634682, 0.019395295530557632, 0.1259232461452484, 0.02923283353447914, 0.07644251734018326, 0.00482177222147584, 0.03381035849452019, 0.2429695725440979, 0.4201262295246124, 0.21319957077503204, 0.1469077318906784, 0.005101305432617664, 0.05322602018713951, 0.08754345029592514, 0.4596864581108093, 0.32625797390937805, 0.2286616712808609, 0.6285872459411621, NaN, NaN], [0.0236026793718338, 0.032931454479694366, 0.018642868846654892, 0.052601076662540436, 0.09147398918867111, 0.11555580049753189, 0.00512799434363842, 0.006684163119643927, 0.005264784675091505, 0.0023014512844383717, 0.005628940649330616, 0.03778252378106117, 0.09737572073936462, 0.12753169238567352, 0.00698094442486763, 0.6853439807891846, 0.02319822832942009, 0.018658116459846497, 0.08199534565210342, 0.18709556758403778, 0.07321563363075256, 0.027500100433826447, 0.6534799337387085, 0.01572287082672119, NaN], [0.24674107134342194, 0.007728901691734791, 0.010779940523207188, 0.01413859985768795, 0.08573849499225616, 0.014258946292102337, 0.014431791380047798, 0.00199147523380816, 0.006254997570067644, 0.003036148613318801, 0.015209752134978771, 0.015118316747248173, 0.05811062082648277, 0.01987045258283615, 0.012226228602230549, 0.021392136812210083, 0.08141177892684937, 0.016042163595557213, 0.01565614528954029, 0.05352389067411423, 0.01607833430171013, 0.014641694724559784, 0.020306598395109177, 0.06722531467676163, 0.005379782523959875]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.147435262799263, 0.06894105672836304, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18660759925842285, 0.013697005808353424, 0.050341442227363586, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14907698333263397, 0.12682567536830902, 0.14014844596385956, 0.024977339431643486, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20074230432510376, 0.11179281026124954, 0.012457489967346191, 0.01455892063677311, 0.011106430552899837, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20768699049949646, 0.16985096037387848, 0.19526726007461548, 0.016829432919621468, 0.05647609382867813, 0.022808711975812912, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14349573850631714, 0.41078659892082214, 0.5100967288017273, 0.04046756774187088, 0.2924310266971588, 0.07987978309392929, 0.007180717773735523, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11146429926156998, 0.3579395115375519, 0.7730652093887329, 0.5723751783370972, 0.2817910611629486, 0.25461745262145996, 0.060240793973207474, 0.08399515599012375, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13904383778572083, 0.44345301389694214, 0.1345542073249817, 0.05706587806344032, 0.7818705439567566, 0.04436418041586876, 0.015915511175990105, 0.31926584243774414, 0.26167550683021545, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12236351519823074, 0.40148651599884033, 0.12099923938512802, 0.38539087772369385, 0.6352627873420715, 0.0574735552072525, 0.027495326474308968, 0.25199854373931885, 0.07788273692131042, 0.1824284791946411, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0776049941778183, 0.26076433062553406, 0.12800094485282898, 0.15216867625713348, 0.36678510904312134, 0.31404268741607666, 0.13151897490024567, 0.1709745228290558, 0.2591820955276489, 0.18929390609264374, 0.08235450834035873, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08287283033132553, 0.26698997616767883, 0.29562729597091675, 0.13922370970249176, 0.3693794012069702, 0.22139106690883636, 0.612119734287262, 0.1618482619524002, 0.40734153985977173, 0.10604425519704819, 0.2217203825712204, 0.14197519421577454, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0676846131682396, 0.5803259611129761, 0.47128230333328247, 0.2430339902639389, 0.43893957138061523, 0.5822793245315552, 0.9563859105110168, 0.5092246532440186, 0.7397804260253906, 0.6675750613212585, 0.2242172360420227, 0.046741336584091187, 0.09371624141931534, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16273218393325806, 0.4245251417160034, 0.44257473945617676, 0.1064363345503807, 0.22264361381530762, 0.638583779335022, 0.7456080913543701, 0.17856015264987946, 0.09681503474712372, 0.3901955187320709, 0.4154786765575409, 0.10903800278902054, 0.0281606987118721, 0.027353502810001373, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2541956901550293, 0.2554672658443451, 0.13483673334121704, 0.33163735270500183, 0.11067650467157364, 0.3400806486606598, 0.4272999167442322, 0.2955835163593292, 0.293487548828125, 0.2820315957069397, 0.17141510546207428, 0.08369391411542892, 0.012903732247650623, 0.010530934669077396, 0.015047149732708931, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07456009835004807, 0.09125705808401108, 0.20381297171115875, 0.09053967893123627, 0.6734579801559448, 0.8927901983261108, 0.9854956865310669, 0.19160649180412292, 0.848483681678772, 0.3795100748538971, 0.0351644828915596, 0.06069617718458176, 0.0190274715423584, 0.13319239020347595, 0.1618155688047409, 0.029784632846713066, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13663174211978912, 0.5250937938690186, 0.20416004955768585, 0.37758082151412964, 0.7281314134597778, 0.24714940786361694, 0.006291824858635664, 0.029336191713809967, 0.258807897567749, 0.17944614589214325, 0.2768983840942383, 0.49996671080589294, 0.6760725975036621, 0.0684136375784874, 0.9500845074653625, 0.04427658021450043, 0.027829600498080254, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05520259216427803, 0.4062710404396057, 0.11698392778635025, 0.09814880043268204, 0.8328142166137695, 0.46247926354408264, 0.07190129905939102, 0.3418641984462738, 0.14486591517925262, 0.025201991200447083, 0.042143724858760834, 0.4074908196926117, 0.1494714319705963, 0.17342594265937805, 0.908286988735199, 0.5950636863708496, 0.14296366274356842, 0.20851416885852814, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08497714251279831, 0.5087416172027588, 0.4508724510669708, 0.33144411444664, 0.600685715675354, 0.523800790309906, 0.4743403494358063, 0.10964386910200119, 0.6009643077850342, 0.29714730381965637, 0.1661888062953949, 0.10026849061250687, 0.19036318361759186, 0.07889659702777863, 0.29447081685066223, 0.5917950868606567, 0.05482999235391617, 0.0994495078921318, 0.08629819005727768, NaN, NaN, NaN, NaN, NaN, NaN], [0.04716389998793602, 0.6635201573371887, 0.5744545459747314, 0.33429521322250366, 0.755266010761261, 0.7800281643867493, 0.9541771411895752, 0.5776658058166504, 0.8714791536331177, 0.9158549308776855, 0.2818737030029297, 0.06938906759023666, 0.10379814356565475, 0.3064776659011841, 0.7474142909049988, 0.7715258002281189, 0.37782159447669983, 0.057383324950933456, 0.013433223590254784, 0.03400390222668648, NaN, NaN, NaN, NaN, NaN], [0.1486319750547409, 0.22267495095729828, 0.42902871966362, 0.07982667535543442, 0.5459871888160706, 0.9060689210891724, 0.8350642919540405, 0.10920917987823486, 0.4773065447807312, 0.7826967239379883, 0.5733710527420044, 0.26356616616249084, 0.040332335978746414, 0.031653065234422684, 0.8572309613227844, 0.5636150240898132, 0.07464684545993805, 0.03465104475617409, 0.03009859099984169, 0.008700854144990444, 0.005375253036618233, NaN, NaN, NaN, NaN], [0.25873932242393494, 0.5196211338043213, 0.3300914764404297, 0.5837901830673218, 0.4101006090641022, 0.7175306677818298, 0.6572118401527405, 0.6919461488723755, 0.6594171524047852, 0.7066829204559326, 0.46555259823799133, 0.3380126953125, 0.05317035689949989, 0.053740378469228745, 0.031323984265327454, 0.30507126450538635, 0.1422475129365921, 0.03319966048002243, 0.08714800328016281, 0.01252773217856884, 0.006611488293856382, 0.007115270011126995, NaN, NaN, NaN], [0.011579165235161781, 0.05381239950656891, 0.044945720583200455, 0.035533830523490906, 0.6624263525009155, 0.8997865319252014, 0.9679857492446899, 0.17051655054092407, 0.940772533416748, 0.6132625341415405, 0.01721411757171154, 0.04632151871919632, 0.010550450533628464, 0.08354383707046509, 0.12839946150779724, 0.02755529060959816, 0.44050073623657227, 0.04286862909793854, 0.01342833787202835, 0.003870438551530242, 0.026607532054185867, 0.02663758397102356, 0.005111980251967907, NaN, NaN], [0.13300661742687225, 0.5851269960403442, 0.20284885168075562, 0.5700805187225342, 0.7479174137115479, 0.39722636342048645, 0.004733124747872353, 0.0698152482509613, 0.6515945196151733, 0.5409151315689087, 0.25820717215538025, 0.4583084285259247, 0.6744768619537354, 0.3421478569507599, 0.9633424878120422, 0.1852269172668457, 0.04996338114142418, 0.5482219457626343, 0.296283096075058, 0.48366567492485046, 0.06441208720207214, 0.9149421453475952, 0.02780383825302124, 0.0073219588957726955, NaN], [0.14593175053596497, 0.2687321603298187, 0.04604685679078102, 0.30660173296928406, 0.3806478679180145, 0.38105660676956177, 0.15303322672843933, 0.014211257919669151, 0.05383581668138504, 0.20604565739631653, 0.2462100237607956, 0.5718756914138794, 0.5113963484764099, 0.21981710195541382, 0.4276719391345978, 0.5577609539031982, 0.4118191599845886, 0.31598320603370667, 0.5468451976776123, 0.4359907805919647, 0.2059280127286911, 0.3916337192058563, 0.2548142671585083, 0.2198532670736313, 0.026425611227750778]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1684475541114807, 0.01643766649067402, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20323613286018372, 0.02236698381602764, 0.0030780781526118517, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15523119270801544, 0.029148569330573082, 0.04869325831532478, 0.027081435546278954, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20906439423561096, 0.016835892572999, 0.005647255107760429, 0.004844226874411106, 0.00019458922906778753, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19736447930335999, 0.01826038584113121, 0.012854915112257004, 0.09684289991855621, 0.0006958578014746308, 4.3345058656996116e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16369424760341644, 0.023256592452526093, 0.01855486072599888, 0.06154748797416687, 0.06098903343081474, 0.10795246064662933, 0.023746412247419357, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19143380224704742, 0.11398851871490479, 0.03716170787811279, 0.07628969103097916, 0.38886839151382446, 0.24263328313827515, 0.13712459802627563, 0.02201412245631218, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2130274772644043, 0.007986752316355705, 0.02235114760696888, 0.0019427334191277623, 0.005593507084995508, 0.012699572369456291, 0.006745419930666685, 0.06126464158296585, 0.14077326655387878, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.22579564154148102, 0.013292824849486351, 0.10215212404727936, 0.005943832919001579, 0.013894540257751942, 0.01404587086290121, 0.02319374494254589, 0.10344905406236649, 0.1325504034757614, 0.008661924861371517, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1733061671257019, 0.07715445756912231, 0.2302267998456955, 0.05804288014769554, 0.07560069113969803, 0.23177897930145264, 0.2901765704154968, 0.042333029210567474, 0.08450006693601608, 0.04456959664821625, 0.015471314080059528, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.16428759694099426, 0.01361166127026081, 0.2167942076921463, 0.03707392141222954, 0.09917350113391876, 0.2872558534145355, 0.08793877810239792, 0.03127053380012512, 0.051127880811691284, 0.02603980340063572, 0.12251178920269012, 0.06466985493898392, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2214493751525879, 0.0034381633158773184, 0.025536755099892616, 0.005642351228743792, 0.0024517737329006195, 0.00733930105343461, 0.0003064426709897816, 0.024970028549432755, 0.0009503457695245743, 0.0013023557839915156, 0.012362079694867134, 0.002213133964687586, 0.0037243058905005455, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21803884208202362, 0.044672977179288864, 0.15033316612243652, 0.24480289220809937, 0.0010314357932657003, 0.006885815411806107, 0.017953861504793167, 0.09280995279550552, 0.09214792400598526, 0.01309943851083517, 0.026278402656316757, 0.029330603778362274, 0.10137840360403061, 0.0009828503243625164, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.28474918007850647, 0.005827821791172028, 0.0010850036051124334, 0.005180059466511011, 0.00018831032502930611, 0.002925402717664838, 0.0029562395066022873, 0.005281978752464056, 0.002952893264591694, 0.013548285700380802, 0.01663871854543686, 0.02234998345375061, 0.001472283387556672, 0.00024227210087701678, 9.911999950418249e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11472342163324356, 0.017006950452923775, 0.03429265320301056, 0.05351921543478966, 0.010289198718965054, 0.02545105293393135, 0.002036151010543108, 0.08590202778577805, 0.007977829314768314, 0.008050770498812199, 0.02079172432422638, 0.07815419882535934, 0.25072064995765686, 0.11726108938455582, 0.04080193489789963, 0.020839283242821693, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.25351014733314514, 0.018978603184223175, 0.013279697857797146, 0.14657457172870636, 0.0005683518829755485, 0.003044809214770794, 0.0003673452010843903, 0.0009085922501981258, 0.00026260188315063715, 6.703466351609677e-05, 0.00393629027530551, 0.0411190427839756, 0.014572926796972752, 0.0009043514728546143, 0.001453216653317213, 0.001335341832600534, 0.0036634530406445265, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2249869406223297, 0.0773954764008522, 0.10561174154281616, 0.3267342746257782, 0.011780736967921257, 0.03227663040161133, 0.09185110032558441, 0.03840579837560654, 0.01289159432053566, 0.002641883445903659, 0.03386297821998596, 0.16820214688777924, 0.06345225125551224, 0.027306171134114265, 0.007737002335488796, 0.018253128975629807, 0.0508209764957428, 0.015562118031084538, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17073971033096313, 0.01119090337306261, 0.07090220600366592, 0.026190776377916336, 0.04357914999127388, 0.10384812206029892, 0.05681576952338219, 0.008270802907645702, 0.011212479323148727, 0.016114890575408936, 0.1306251734495163, 0.04437248408794403, 0.022720789536833763, 0.0017881430685520172, 0.005742507986724377, 0.03271590173244476, 0.12170897424221039, 0.18442584574222565, 0.07238933444023132, NaN, NaN, NaN, NaN, NaN, NaN], [0.2460513859987259, 0.004599481821060181, 0.030415518209338188, 0.006707339081913233, 0.001940727117471397, 0.0018293699249625206, 0.0002438600640743971, 0.021702459082007408, 0.00019114103633910418, 0.0004616644873749465, 0.02795419655740261, 0.007376548834145069, 0.009364028461277485, 0.0008695388678461313, 0.027626920491456985, 0.002984545426443219, 0.0021758046932518482, 0.005276597570627928, 0.0015223525697365403, 0.0046029179356992245, NaN, NaN, NaN, NaN, NaN], [0.1682240217924118, 0.15532228350639343, 0.17499232292175293, 0.31528380513191223, 0.0016938054468482733, 0.0013859918108209968, 0.0071086762472987175, 0.08609996736049652, 0.02145048975944519, 0.00334079097956419, 0.08546027541160583, 0.16909679770469666, 0.5000762343406677, 0.012536582536995411, 0.0033327846322208643, 0.01681024581193924, 0.01291667390614748, 0.11205089092254639, 0.06917328387498856, 0.24062496423721313, 0.003104837378486991, NaN, NaN, NaN, NaN], [0.30163663625717163, 0.008585775271058083, 0.0018221536884084344, 0.004949942696839571, 0.0002661931503098458, 0.0017199779395014048, 0.00286088977009058, 0.004591777920722961, 0.0013412131229415536, 0.009152509272098541, 0.029603971168398857, 0.059182800352573395, 0.004352512303739786, 0.0009281163802370429, 0.00013420419418253005, 0.0015637356555089355, 0.004895435180515051, 0.0020298720337450504, 0.016267914324998856, 0.0014363413210958242, 0.00015049855574034154, 4.989441003999673e-05, NaN, NaN, NaN], [0.1420876681804657, 0.030559053644537926, 0.035777460783720016, 0.0549585185945034, 0.010907668620347977, 0.018195953220129013, 0.005288956221193075, 0.07946551591157913, 0.003352995030581951, 0.00945360492914915, 0.03057919070124626, 0.20277532935142517, 0.5438944697380066, 0.2487112432718277, 0.11027072370052338, 0.03672702983021736, 0.009589559398591518, 0.03681262582540512, 0.12653782963752747, 0.3100517988204956, 0.04488144814968109, 0.07299992442131042, 0.024292031303048134, NaN, NaN], [0.2571920156478882, 0.012253361754119396, 0.00982633139938116, 0.09085621684789658, 0.00026428516139276326, 0.001174133620224893, 0.00010905979434028268, 0.0006958161829970777, 9.435929678147659e-05, 1.889842314994894e-05, 0.0019355103140696883, 0.03233037516474724, 0.014144179411232471, 0.0034062752965837717, 0.0014896523207426071, 0.0032966958824545145, 0.0043079969473183155, 0.002425077836960554, 0.0237245112657547, 0.017915409058332443, 0.0004631538176909089, 0.0033925946336239576, 0.0019653798080980778, 0.0010656031081452966, NaN], [0.25252944231033325, 0.012149164453148842, 0.019892947748303413, 0.013666713610291481, 0.05940697342157364, 0.04882493242621422, 0.025430571287870407, 0.00045668394886888564, 0.0054928152821958065, 0.005623141769319773, 0.004253733437508345, 0.014798035845160484, 0.012909402139484882, 0.011927488259971142, 0.007018915377557278, 0.021986471489071846, 0.016502689570188522, 0.002887164242565632, 0.006932961288839579, 0.007926056161522865, 0.015145027078688145, 0.005945136770606041, 0.016453862190246582, 0.011257275938987732, 0.0009747393196448684]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14568212628364563, 0.073321633040905, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07740449905395508, 0.019538799300789833, 0.31676185131073, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11254165321588516, 0.04977253079414368, 0.12113941460847855, 0.18998825550079346, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09693466126918793, 0.12094055861234665, 0.48810020089149475, 0.07605772465467453, 0.10663138329982758, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.002718105213716626, 0.037000641226768494, 0.1506986916065216, 0.012303436174988747, 0.09212689101696014, 0.5217995047569275, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17887507379055023, 0.10589989274740219, 0.004075651057064533, 0.0014342612121254206, 0.00521382549777627, 0.031908128410577774, 0.003124895039945841, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.23519471287727356, 0.3653021454811096, 0.05512593686580658, 0.10675911605358124, 0.0014886436983942986, 0.001230676076374948, 0.003634560154750943, 0.00975269265472889, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19171930849552155, 0.3204987347126007, 0.0060858046635985374, 0.010409774258732796, 0.003722283523529768, 0.0010954621247947216, 0.0028676562942564487, 0.35306307673454285, 0.01622932404279709, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.25555557012557983, 0.13076956570148468, 0.003832729533314705, 0.0447237528860569, 0.014599477872252464, 0.0024878191761672497, 0.0016443775966763496, 0.20187559723854065, 0.0005508072790689766, 0.0029457835480570793, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13948844373226166, 0.2463626265525818, 0.09502393007278442, 0.197096586227417, 0.47678983211517334, 0.3142886161804199, 0.09103813022375107, 0.10499368607997894, 0.07698603719472885, 0.026083102449774742, 0.3110981583595276, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1511228382587433, 0.027682308107614517, 0.014322453178465366, 0.0030328254215419292, 0.04723867028951645, 0.30981165170669556, 0.025852922350168228, 0.018514074385166168, 0.01515920553356409, 0.009253463707864285, 0.10175863653421402, 0.16996310651302338, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1847103387117386, 0.05052594095468521, 0.005765186157077551, 0.018545929342508316, 0.00881477165967226, 0.0375242680311203, 0.027162199839949608, 0.09025334566831589, 0.0028228689916431904, 0.0033718899358063936, 0.1103500947356224, 0.0837099552154541, 0.0044236015528440475, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.27341794967651367, 0.03427007421851158, 0.008004172705113888, 0.009254892356693745, 0.005621441174298525, 0.00972525030374527, 0.005248658824712038, 0.02184745855629444, 0.0006181569187901914, 0.0005494534852914512, 0.06994801014661789, 0.02213645726442337, 0.004287416115403175, 0.0008399627404287457, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.008804291486740112, 0.07617928832769394, 0.47516930103302, 0.07513945549726486, 0.5241973400115967, 0.4384346902370453, 0.06213618069887161, 0.06345370411872864, 0.0682281106710434, 0.15877418220043182, 0.023486817255616188, 0.026526909321546555, 0.0028373831883072853, 0.001617963775061071, 0.37629759311676025, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.26533833146095276, 0.10994716733694077, 0.010266831144690514, 0.037150826305150986, 0.009969023987650871, 0.00030588259687647223, 8.988264016807079e-05, 0.07940464466810226, 0.00027601365582086146, 0.0013282618019729853, 0.009904097765684128, 0.03278518095612526, 0.0630892813205719, 0.10911130160093307, 0.016624033451080322, 0.011541539803147316, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2451263964176178, 0.014867580495774746, 0.0005470102187246084, 0.0054298522882163525, 0.0004450916312634945, 0.0006575370789505541, 3.8741818570997566e-05, 0.0010275153908878565, 0.0013172366889193654, 0.0019110681023448706, 0.13600468635559082, 0.29138538241386414, 0.011091821826994419, 0.0002334356977371499, 0.0002162840828532353, 0.0001727231137920171, 0.004782650154083967, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18341027200222015, 0.31211209297180176, 0.08544175326824188, 0.17215219140052795, 0.07786234468221664, 0.033002957701683044, 0.028957894071936607, 0.08467604964971542, 0.018818018957972527, 0.0016417433507740498, 0.15075404942035675, 0.1522863805294037, 0.03350237384438515, 0.006119633559137583, 0.022573737427592278, 0.03810621052980423, 0.13675758242607117, 0.1992093175649643, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1540856957435608, 0.05453011393547058, 0.023697303608059883, 0.003979950677603483, 0.014029269106686115, 0.1104540005326271, 0.019629694521427155, 0.011429534293711185, 0.010672842152416706, 0.00807265006005764, 0.1843080371618271, 0.19234825670719147, 0.0017768212128430605, 0.006891301833093166, 0.08265318721532822, 0.014878016896545887, 0.09550431370735168, 0.1691773235797882, 0.20674942433834076, NaN, NaN, NaN, NaN, NaN, NaN], [0.21139073371887207, 0.06409671157598495, 0.007977590896189213, 0.017582383006811142, 0.004139575641602278, 0.008497070521116257, 0.024324562400579453, 0.12332659959793091, 0.0006915424601174891, 0.0006991134723648429, 0.09821731597185135, 0.18821127712726593, 0.009975801222026348, 0.024784373119473457, 0.009686794131994247, 0.0016004297649487853, 0.006526788230985403, 0.04246864095330238, 0.05479469522833824, 0.004482009913772345, NaN, NaN, NaN, NaN, NaN], [0.33224669098854065, 0.07294216006994247, 0.01592269167304039, 0.006994656287133694, 0.003661615075543523, 0.0007586313877254725, 0.0006907262722961605, 0.022764746099710464, 0.000276167003903538, 9.849678463069722e-05, 0.08613532781600952, 0.07070992141962051, 0.03258151933550835, 0.002256957348436117, 0.00035050295991823077, 0.002809839555993676, 0.005992868449538946, 0.14088936150074005, 0.024111032485961914, 0.015468394383788109, 0.000736193498596549, NaN, NaN, NaN, NaN], [0.00368693470954895, 0.0603332445025444, 0.389295369386673, 0.03955860063433647, 0.26089394092559814, 0.125760018825531, 0.029167605563998222, 0.03710402920842171, 0.03377004712820053, 0.08135493099689484, 0.01946301944553852, 0.033920928835868835, 0.00409010099247098, 0.0020981510169804096, 0.4028157889842987, 0.01821253076195717, 0.03254074230790138, 0.005954912398010492, 0.016414301469922066, 0.0033934058155864477, 0.0012025205651298165, 0.37666910886764526, NaN, NaN, NaN], [0.30478137731552124, 0.23805196583271027, 0.009743728674948215, 0.02953244559466839, 0.005627358797937632, 0.00013927526015322655, 0.00016958850028458983, 0.09182754158973694, 0.00019882968626916409, 0.0018803260754793882, 0.01743759773671627, 0.09691343456506729, 0.09625609964132309, 0.0949849784374237, 0.057061683386564255, 0.028116967529058456, 0.00013736996334046125, 0.022905906662344933, 0.02515738271176815, 0.029101604595780373, 0.01233749371021986, 0.027021989226341248, 0.012159456498920918, NaN, NaN], [0.2508227825164795, 0.013127491809427738, 0.0004774215049110353, 0.005875048227608204, 0.00014762053615413606, 0.0003128673997707665, 1.7799626220948994e-05, 0.0017815351020544767, 0.0009225650574080646, 0.0009481729357503355, 0.09391504526138306, 0.24316561222076416, 0.008820290677249432, 0.0015348505694419146, 0.0002856143401004374, 0.00038499117363244295, 0.010248353704810143, 0.0923430323600769, 0.1539699137210846, 0.0089821582660079, 0.00013843990745954216, 0.0004539538058452308, 6.709429726470262e-05, 0.0014084051363170147, NaN], [0.06230561435222626, 0.051613274961709976, 0.02077883668243885, 0.04204944148659706, 0.07247611880302429, 0.11675790697336197, 0.004215644672513008, 0.00555834174156189, 0.008976897224783897, 0.017200933769345284, 0.007355507928878069, 0.06492317467927933, 0.04215962812304497, 0.02968345396220684, 0.23223130404949188, 0.03253115341067314, 0.08794146776199341, 0.025323374196887016, 0.08459514379501343, 0.05644838511943817, 0.04970480501651764, 0.3588789105415344, 0.028869707137346268, 0.11940079927444458, 0.27181047201156616]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04884753376245499, 0.31528204679489136, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [7.444373295584228e-06, 4.17321571148932e-05, 0.5221405029296875, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09023705869913101, 0.59262615442276, 0.038057319819927216, 0.1896824985742569, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0001943353418027982, 0.004992108792066574, 0.35714879631996155, 0.028785984963178635, 0.7041940689086914, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [1.0879062756430358e-05, 5.022298137191683e-05, 0.0836932584643364, 0.0041815838776528835, 0.7177854776382446, 0.4451410174369812, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.003986984025686979, 0.03902542591094971, 0.00027279910864308476, 0.00016326647892128676, 0.09999275952577591, 0.23601794242858887, 0.8888784646987915, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0004483810334932059, 0.01581367664039135, 0.00053547159768641, 0.005416989792138338, 0.0004931549192406237, 1.743426764733158e-06, 0.0002464183489792049, 0.38669928908348083, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0014915558276697993, 0.0036082565784454346, 0.0005674233543686569, 0.0010717788245528936, 0.04321836307644844, 0.5446166396141052, 0.38359156250953674, 0.006869717035442591, 0.0028910271357744932, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [8.035104838199914e-05, 0.005924052093178034, 0.005847892723977566, 0.020417997613549232, 0.11436353623867035, 0.6555760502815247, 0.4247216582298279, 0.04553407058119774, 0.00039129320066422224, 0.013846640475094318, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0012459981953725219, 0.12171746790409088, 0.022806251421570778, 0.021380947902798653, 0.018195364624261856, 0.08835338801145554, 0.20732422173023224, 0.30439698696136475, 0.09951408952474594, 0.2512991428375244, 0.4290468692779541, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.007976139895617962, 0.03435874730348587, 0.026849543675780296, 0.002102706115692854, 0.13315419852733612, 0.1177494078874588, 0.08904305100440979, 0.576798677444458, 0.140389084815979, 0.6266443729400635, 0.32779327034950256, 0.5110495090484619, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0015641784993931651, 0.09294694662094116, 0.006881145294755697, 0.0020365919917821884, 0.4301930069923401, 0.06383264064788818, 0.0045266724191606045, 0.17422647774219513, 0.00404678238555789, 0.006469257641583681, 0.052995309233665466, 0.1725381463766098, 0.668171763420105, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.004304439760744572, 0.05993141233921051, 0.054169829934835434, 0.025809768587350845, 0.7262899279594421, 0.2466905415058136, 0.15344326198101044, 0.33606013655662537, 0.02952432446181774, 0.07010773569345474, 0.008777104318141937, 0.03394261747598648, 0.032566726207733154, 0.6152393221855164, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [1.0540320545260329e-05, 0.0013190202880650759, 0.20101842284202576, 0.004686327185481787, 0.13271625339984894, 0.04526880756020546, 0.0007031870190985501, 0.0011485026916489005, 0.002882149303331971, 0.0005991549696773291, 0.0030197217129170895, 0.004800362046808004, 0.004403174854815006, 0.002436757553368807, 0.4002683460712433, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0003210107679478824, 0.5876501798629761, 0.16318874061107635, 0.7096263766288757, 0.11595475673675537, 0.007003267295658588, 0.001205803593620658, 0.1902448534965515, 0.011727835983037949, 0.44888344407081604, 0.8117052912712097, 0.45698752999305725, 0.023960944265127182, 0.010929742828011513, 0.005293603055179119, 0.00987145397812128, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.020372437313199043, 0.3410835862159729, 0.6929088234901428, 0.04383905977010727, 0.1458517462015152, 0.4223538339138031, 0.9439106583595276, 0.9473816156387329, 0.15120889246463776, 0.7730743288993835, 0.5082507133483887, 0.0460858978331089, 0.032336097210645676, 0.011211436241865158, 0.009573124349117279, 0.0003536108124535531, 0.06564418971538544, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.020423829555511475, 0.09150233864784241, 0.593336284160614, 0.050333935767412186, 0.04262891411781311, 0.44151586294174194, 0.7098277807235718, 0.36869171261787415, 0.7183430194854736, 0.3146522641181946, 0.5934929251670837, 0.08962199836969376, 0.01141325756907463, 0.0268073882907629, 0.008290876634418964, 0.022364463657140732, 0.0520397312939167, 0.3134966492652893, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.008604546077549458, 0.07562410086393356, 0.10463645309209824, 0.003217896446585655, 0.1296835094690323, 0.21162182092666626, 0.30799001455307007, 0.7962209582328796, 0.27782267332077026, 0.5974112749099731, 0.3643631041049957, 0.5975222587585449, 0.032379183918237686, 0.8344925045967102, 0.5903766751289368, 0.1521190106868744, 0.10492946952581406, 0.10503242909908295, 0.5022279620170593, NaN, NaN, NaN, NaN, NaN, NaN], [0.0010157334618270397, 0.08574047684669495, 0.010654903016984463, 0.003869200125336647, 0.15051355957984924, 0.02434478886425495, 0.005829520523548126, 0.10341739654541016, 0.0023463659454137087, 0.00469975033774972, 0.1621563881635666, 0.27765417098999023, 0.6246147155761719, 0.44377410411834717, 0.0757245346903801, 0.08620554953813553, 0.08146335929632187, 0.32109129428863525, 0.1958039551973343, 0.5327519178390503, NaN, NaN, NaN, NaN, NaN], [0.0009064326295629144, 0.04867112636566162, 0.09537991136312485, 0.12993541359901428, 0.38632717728614807, 0.056282784789800644, 0.13602504134178162, 0.18383464217185974, 0.024170320481061935, 0.09972675889730453, 0.022063996642827988, 0.042059145867824554, 0.01842264086008072, 0.8592916131019592, 0.1306053251028061, 0.06485681235790253, 0.048735883086919785, 0.037178389728069305, 0.017466288059949875, 0.006924192421138287, 0.8764364123344421, NaN, NaN, NaN, NaN], [1.2418378219081205e-06, 0.0003037750138901174, 0.10264009237289429, 0.0010840333998203278, 0.03004724159836769, 0.00720690144225955, 0.00017297905287705362, 0.00021026108879595995, 0.0005732537247240543, 0.00013229742762632668, 0.0014890850288793445, 0.0027206502854824066, 0.0022100789938122034, 0.0018764312844723463, 0.22427155077457428, 0.0012303950497880578, 0.0001426686649210751, 0.0015814924845471978, 0.00487141590565443, 0.0029599322006106377, 0.003610847517848015, 0.41901907324790955, NaN, NaN, NaN], [0.00015546051145065576, 0.5271192193031311, 0.2684091329574585, 0.7487277388572693, 0.0846778005361557, 0.003557654097676277, 0.0064069912768900394, 0.16770148277282715, 0.008421340025961399, 0.27412623167037964, 0.8534677624702454, 0.5243650078773499, 0.02665238454937935, 0.01776440255343914, 0.013793676160275936, 0.00868560466915369, 0.08064579218626022, 0.69512540102005, 0.49261555075645447, 0.010526523925364017, 0.0028473760467022657, 0.008281596936285496, 0.007198471110314131, NaN, NaN], [0.03285643830895424, 0.3327244818210602, 0.7442528605461121, 0.049526505172252655, 0.13722854852676392, 0.37294694781303406, 0.9746374487876892, 0.9050161242485046, 0.144730344414711, 0.44314900040626526, 0.6168692708015442, 0.18840178847312927, 0.12898683547973633, 0.1250022053718567, 0.01759251020848751, 0.0030696040485054255, 0.6704888939857483, 0.3205258250236511, 0.28675025701522827, 0.09770815074443817, 0.0085873082280159, 0.028106005862355232, 0.0015327840810641646, 0.12156207114458084, NaN], [0.027913866564631462, 0.6360336542129517, 0.8947576880455017, 0.5603421926498413, 0.3501611351966858, 0.3494046926498413, 0.7655782103538513, 0.9696423411369324, 0.8922762274742126, 0.42980051040649414, 0.4555767774581909, 0.17016178369522095, 0.1410100758075714, 0.652664303779602, 0.2781027853488922, 0.07839874923229218, 0.11400053650140762, 0.10023999214172363, 0.04957454651594162, 0.07193805277347565, 0.5185664892196655, 0.15356925129890442, 0.02747632935643196, 0.046240244060754776, 0.017650051042437553]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02477514185011387, 0.37543168663978577, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02274254709482193, 0.6458237767219543, 0.013541627675294876, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03146426007151604, 0.019330549985170364, 0.019686071202158928, 0.5363749265670776, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05261930450797081, 0.12757715582847595, 0.003555318573489785, 0.48483166098594666, 0.00033596818684600294, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09825422614812851, 0.08890903741121292, 0.0022953739389777184, 0.3788372278213501, 6.525879871333018e-05, 3.547202504705638e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1839720457792282, 0.005392392631620169, 0.0012601928319782019, 0.000860364583786577, 0.0008281354093924165, 0.0005760629428550601, 0.002849774667993188, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.005911883432418108, 0.0029267233330756426, 0.007144090253859758, 0.001919957809150219, 0.004637785721570253, 0.004848909098654985, 0.006189228966832161, 0.3764636814594269, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2256152480840683, 0.0020181250292807817, 0.0012439934071153402, 0.00031968209077604115, 0.0029859780333936214, 0.017534615471959114, 0.0004058087943121791, 0.00034323628642596304, 0.029154805466532707, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03960844501852989, 0.0036635666619986296, 0.00109457119833678, 0.0017422186210751534, 0.022469639778137207, 0.004235065542161465, 0.007348764222115278, 0.00280297570861876, 0.030011437833309174, 0.576508641242981, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0628783106803894, 0.014568633399903774, 0.003403500886633992, 0.005917230620980263, 0.009509358555078506, 0.0019911406561732292, 0.005211993586272001, 0.01603839360177517, 0.00502167409285903, 0.3301290273666382, 0.10268117487430573, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.178706556558609, 0.5124386548995972, 0.028256116434931755, 0.011254883371293545, 0.03223628178238869, 0.0004171380714979023, 0.004843876231461763, 0.09010603278875351, 0.0025540743954479694, 0.016201328486204147, 0.029397757723927498, 0.010837158188223839, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18362975120544434, 0.10373001545667648, 0.006869313772767782, 0.010921900160610676, 0.01820673979818821, 0.0017379705095663667, 0.002349345711991191, 0.03729201853275299, 5.792165029561147e-05, 0.0013579311780631542, 0.0025659396778792143, 0.008523254655301571, 0.1568114459514618, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.060853905975818634, 0.016029829159379005, 0.001439533894881606, 0.017260756343603134, 0.0007974627078510821, 0.0012342276750132442, 0.028226196765899658, 0.0047790613025426865, 0.0015612602001056075, 0.004867547657340765, 0.039023980498313904, 0.05208572745323181, 0.33480554819107056, 0.17332881689071655, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.043774526566267014, 0.2669547498226166, 0.035314492881298065, 0.1941595822572708, 0.006638282909989357, 0.005091785918921232, 0.2628510892391205, 0.2860943675041199, 0.06445851922035217, 0.34950578212738037, 0.6430334448814392, 0.5673049688339233, 0.6101463437080383, 0.29372307658195496, 0.0028161092195659876, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.018545497208833694, 0.059764593839645386, 0.0026272537652403116, 0.020267995074391365, 0.009687644429504871, 0.00033462722785770893, 0.0024671528954058886, 0.054633729159832, 5.4464391723740846e-05, 0.00043273900519125164, 0.0019224031129851937, 0.21117039024829865, 0.3183750510215759, 0.03866858780384064, 0.011778384447097778, 0.1297062188386917, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0004199208051431924, 4.603992783813737e-05, 8.09443406524224e-07, 2.029701317951549e-05, 3.386533080629306e-06, 2.203315261795069e-06, 4.220597020321293e-06, 8.901660294213798e-06, 0.00016298270202241838, 0.000983458710834384, 0.0005640776362270117, 0.0008154786773957312, 0.001651398022659123, 2.400618996034609e-06, 3.3168395020766184e-05, 6.549440058734035e-06, 0.8699775338172913, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06053417548537254, 0.012584012933075428, 0.0010002547642216086, 0.0027718576602637768, 0.006610550452023745, 0.0029896856285631657, 0.008355176076292992, 0.048459943383932114, 0.002307809190824628, 0.65205979347229, 0.1651758849620819, 0.011300449259579182, 0.029586348682641983, 0.014456091448664665, 0.0007872084970586002, 0.0008902085828594863, 0.029332326725125313, 0.16636918485164642, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19553376734256744, 0.2426333725452423, 0.004519153386354446, 0.00883245188742876, 0.006844275165349245, 0.00014635240950156003, 0.00260242260992527, 0.03859727829694748, 0.0011520206462591887, 0.014703472144901752, 0.016579829156398773, 0.003783928230404854, 0.01771795004606247, 0.0035672299563884735, 0.000677697011269629, 0.002100451150909066, 0.023971345275640488, 0.03231354430317879, 0.011524699628353119, NaN, NaN, NaN, NaN, NaN, NaN], [0.17035169899463654, 0.07290639728307724, 0.0013864204520359635, 0.008776376023888588, 0.010795027948915958, 0.0008890280150808394, 0.00375909055583179, 0.03264426812529564, 2.1074760297778994e-05, 0.0009656226029619575, 0.004805654752999544, 0.015095297247171402, 0.19429266452789307, 0.060086220502853394, 0.013300183229148388, 0.019145654514431953, 0.08634541183710098, 0.018065713346004486, 0.012390428222715855, 0.3474832773208618, NaN, NaN, NaN, NaN, NaN], [0.002681915881112218, 0.0020622191950678825, 1.740588413667865e-05, 0.001647116499952972, 2.462047996232286e-05, 1.4256034774007276e-05, 0.0023770714178681374, 0.0007797144935466349, 6.146806117612869e-05, 0.00019536878971848637, 0.023629816249012947, 0.022664623335003853, 0.058040015399456024, 0.02328144572675228, 0.00014305225340649486, 0.1791975051164627, 0.7950490117073059, 0.40287262201309204, 0.05916967615485191, 0.11726692318916321, 0.045271970331668854, NaN, NaN, NaN, NaN], [0.017539121210575104, 0.07800457626581192, 0.013338283635675907, 0.07843150943517685, 0.003389358287677169, 0.0011982140131294727, 0.07936429977416992, 0.08406823873519897, 0.016710255295038223, 0.13201765716075897, 0.339507520198822, 0.3268124461174011, 0.4709261357784271, 0.24707961082458496, 0.0009133804705925286, 0.27326905727386475, 0.539431095123291, 0.8842423558235168, 0.5773340463638306, 0.643308699131012, 0.15606866776943207, 0.0011033734772354364, NaN, NaN, NaN], [0.0009739195229485631, 0.0011780881322920322, 3.265493069193326e-05, 0.0005334040033631027, 0.0007281061843968928, 3.2774634746601805e-05, 0.0004276044783182442, 0.00342408730648458, 2.9227990125946235e-06, 5.522280844161287e-05, 0.00012372780474834144, 0.011400841176509857, 0.008755120448768139, 0.0017365129897370934, 0.0007705622701905668, 0.0024924452882260084, 0.4634210169315338, 0.010356471873819828, 0.06587640196084976, 0.03498200699687004, 0.005118835251778364, 0.0019369632937014103, 0.023791478946805, NaN, NaN], [0.00023119446996133775, 9.065014637599234e-06, 3.0932378081161005e-07, 7.128239758458221e-06, 2.417179757685517e-06, 1.9917408735636855e-06, 1.0686825362427044e-06, 3.5747166293731425e-06, 3.038432441826444e-05, 0.00024045849568210542, 0.00012102597975172102, 0.0003720777458511293, 0.0005474414792843163, 4.2138731259910855e-06, 8.004362825886346e-06, 4.010584234492853e-06, 0.22906039655208588, 0.00024706448311917484, 0.003541025100275874, 0.0035716970451176167, 1.1338630656609894e-06, 4.888530747848563e-05, 2.00755093828775e-05, 0.8455927968025208, NaN], [0.023575956001877785, 0.001566409133374691, 0.0004935376346111298, 0.015205318108201027, 0.0005761805805377662, 0.00026375881861895323, 0.0017682479228824377, 0.00015503005124628544, 0.011253873817622662, 0.321735680103302, 0.05970581993460655, 0.008942467160522938, 0.051820773631334305, 0.009087985381484032, 0.002068085130304098, 0.00584985688328743, 0.01019755844026804, 0.16441591084003448, 0.021173937246203423, 0.09159599989652634, 0.004452125634998083, 0.0037374526727944613, 0.01578103005886078, 0.01742226630449295, 0.3373567461967468]]], [[[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1729947179555893, 0.014742943458259106, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11518532782793045, 0.28854820132255554, 0.0005498379468917847, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12768876552581787, 0.007979520596563816, 0.05741023272275925, 0.14377589523792267, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.25598737597465515, 0.03471918776631355, 0.08263758569955826, 0.03616967797279358, 0.0012629067059606314, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.29742351174354553, 0.10481993854045868, 0.07552393525838852, 0.008401650935411453, 0.3407011330127716, 0.028353586792945862, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17861823737621307, 0.07256677001714706, 0.1795390099287033, 0.04586997628211975, 0.27750420570373535, 0.0032322825863957405, 0.09472999721765518, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1281835287809372, 0.008169662207365036, 0.10209551453590393, 0.22781534492969513, 0.13339588046073914, 0.022249281406402588, 0.2580547630786896, 0.0071509419940412045, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19490991532802582, 0.0105251120403409, 0.07082764059305191, 0.07746586948633194, 0.10047772526741028, 0.007984980009496212, 0.045915842056274414, 0.030714787542819977, 0.09154831618070602, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2116595059633255, 0.006228659767657518, 0.09237925708293915, 0.33000993728637695, 0.06037600710988045, 0.06468494236469269, 0.028822004795074463, 0.015993207693099976, 0.023504862561821938, 0.014777855016291142, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11546289920806885, 0.0627092570066452, 0.1015198826789856, 0.17440570890903473, 0.11644574254751205, 0.15138378739356995, 0.17151175439357758, 0.07174428552389145, 0.1994275599718094, 0.20994937419891357, 0.08254047483205795, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13584046065807343, 0.09117304533720016, 0.15590398013591766, 0.10968183726072311, 0.5585501790046692, 0.07535546272993088, 0.2762793302536011, 0.32588398456573486, 0.3246583938598633, 0.41251155734062195, 0.043567951768636703, 0.0185235645622015, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1674133688211441, 0.12648360431194305, 0.27492284774780273, 0.24355122447013855, 0.8769406676292419, 0.6096609234809875, 0.4704851806163788, 0.055198147892951965, 0.6140321493148804, 0.2705269455909729, 0.07450747489929199, 0.04471021145582199, 0.05369797348976135, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.035074394196271896, 0.012203776277601719, 0.2713678479194641, 0.27628132700920105, 0.5399907231330872, 0.3242804706096649, 0.5765586495399475, 0.02925838902592659, 0.3159044086933136, 0.11935708671808243, 0.16010764241218567, 0.31936678290367126, 0.22831447422504425, 0.09149928390979767, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1354324370622635, 0.08839684724807739, 0.010535157285630703, 0.3809414505958557, 0.006101538427174091, 0.04204240441322327, 0.6714356541633606, 0.02054513990879059, 0.44751474261283875, 0.5217893123626709, 0.16833685338497162, 0.4138224124908447, 0.5945862531661987, 0.14406909048557281, 0.000551112403627485, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.26645413041114807, 0.038747917860746384, 0.15441381931304932, 0.6166976094245911, 0.04416924715042114, 0.07849516719579697, 0.41569313406944275, 0.018940549343824387, 0.18770581483840942, 0.11268321424722672, 0.0962471142411232, 0.028718965128064156, 0.019747000187635422, 0.011864973232150078, 0.07090434432029724, 0.02976600080728531, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.26584282517433167, 0.03641113266348839, 0.24681606888771057, 0.03326011076569557, 0.5612249970436096, 0.11044078320264816, 0.038705065846443176, 0.07638699561357498, 0.20042885839939117, 0.41367095708847046, 0.16446417570114136, 0.05500950291752815, 0.0458536334335804, 0.038293108344078064, 0.05886702984571457, 0.005421455018222332, 0.03447017818689346, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.052208781242370605, 0.10399425774812698, 0.2661847770214081, 0.06582632660865784, 0.5218088626861572, 0.41107869148254395, 0.18652401864528656, 0.10915308445692062, 0.2499890774488449, 0.21385571360588074, 0.11996328830718994, 0.2169666439294815, 0.17541900277137756, 0.34852319955825806, 0.29904353618621826, 0.3583068549633026, 0.0660485103726387, 0.0772518739104271, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1452419012784958, 0.08285138756036758, 0.20162978768348694, 0.10332676023244858, 0.7324197292327881, 0.1815183311700821, 0.27558720111846924, 0.41944485902786255, 0.4614993929862976, 0.7035390734672546, 0.14779764413833618, 0.07484183460474014, 0.09274464100599289, 0.1956741362810135, 0.4027537703514099, 0.17018413543701172, 0.15845544636249542, 0.03217604011297226, 0.027846908196806908, NaN, NaN, NaN, NaN, NaN, NaN], [0.06803880631923676, 0.0777740478515625, 0.3149954080581665, 0.17862020432949066, 0.9274848103523254, 0.6797788739204407, 0.28538215160369873, 0.04841757193207741, 0.524702250957489, 0.33268001675605774, 0.06556227803230286, 0.08207366615533829, 0.08443650603294373, 0.19301387667655945, 0.68314129114151, 0.7843886613845825, 0.24039600789546967, 0.0983721911907196, 0.035574402660131454, 0.04086223617196083, NaN, NaN, NaN, NaN, NaN], [0.004222579766064882, 0.012189013883471489, 0.38177239894866943, 0.23501808941364288, 0.3822557032108307, 0.273560494184494, 0.28252631425857544, 0.039307549595832825, 0.41269388794898987, 0.3037600517272949, 0.1617780327796936, 0.33094146847724915, 0.37525615096092224, 0.1388353556394577, 0.8142803907394409, 0.5916069149971008, 0.18943282961845398, 0.08566068857908249, 0.11778654158115387, 0.1818830519914627, 0.04465563967823982, NaN, NaN, NaN, NaN], [0.0780838280916214, 0.07355974614620209, 0.01093215774744749, 0.22770193219184875, 0.008550305850803852, 0.06503485888242722, 0.5060688257217407, 0.02145100012421608, 0.43843212723731995, 0.6872871518135071, 0.1969044953584671, 0.45010682940483093, 0.7415768504142761, 0.3103433847427368, 0.001054091495461762, 0.20113487541675568, 0.21400661766529083, 0.41673052310943604, 0.3260871469974518, 0.620118260383606, 0.12724098563194275, 0.0004952864837832749, NaN, NaN, NaN], [0.3314567506313324, 0.06341477483510971, 0.5618032217025757, 0.642646074295044, 0.27415919303894043, 0.23788774013519287, 0.38833677768707275, 0.08984735608100891, 0.42147237062454224, 0.6564009785652161, 0.2928015887737274, 0.1047874391078949, 0.1023104265332222, 0.06365151703357697, 0.39097070693969727, 0.14560170471668243, 0.23420175909996033, 0.08592629432678223, 0.02493405155837536, 0.011453422717750072, 0.006046658381819725, 0.1451905518770218, 0.005812718998640776, NaN, NaN], [0.21756824851036072, 0.03937938064336777, 0.3266570568084717, 0.05877631530165672, 0.5281912088394165, 0.11102446913719177, 0.03890432044863701, 0.10487684607505798, 0.2815292179584503, 0.4750865697860718, 0.3058159351348877, 0.11602579057216644, 0.12021853774785995, 0.06692790240049362, 0.1190272718667984, 0.019106050953269005, 0.21307361125946045, 0.15337608754634857, 0.06824280321598053, 0.040861621499061584, 0.032932352274656296, 0.052440475672483444, 0.005818615201860666, 0.0524408333003521, NaN], [0.21100056171417236, 0.13406150043010712, 0.10563220083713531, 0.15389345586299896, 0.10192565619945526, 0.07836726307868958, 0.22881029546260834, 0.05055452138185501, 0.24765580892562866, 0.48160815238952637, 0.2201593518257141, 0.1761431246995926, 0.21236160397529602, 0.20979638397693634, 0.10962515324354172, 0.09009265154600143, 0.0623038187623024, 0.17415094375610352, 0.13285446166992188, 0.11576873064041138, 0.10801524668931961, 0.0743527039885521, 0.03413216769695282, 0.027520645409822464, 0.06626196205615997]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0702696219086647, 0.2507307231426239, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.028418319299817085, 0.003963488154113293, 0.4144974946975708, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13786309957504272, 0.03506092354655266, 0.02415982447564602, 0.10726116597652435, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.011229841969907284, 0.008138949982821941, 0.04613415151834488, 0.2518063187599182, 0.013397655449807644, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0016812672838568687, 0.012760624289512634, 0.002261990448459983, 0.2769384980201721, 0.03090759925544262, 0.0014064738061279058, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11822758615016937, 0.07095540314912796, 0.030966516584157944, 0.03516996279358864, 0.2070395052433014, 0.02684318646788597, 0.2317354679107666, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.23311708867549896, 0.026411496102809906, 0.011159970425069332, 0.03808103874325752, 0.017219573259353638, 0.006694006733596325, 0.001702688867226243, 0.009211051277816296, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1427604705095291, 0.06787170469760895, 0.04101337492465973, 0.04024908319115639, 0.2669386863708496, 0.04579312726855278, 0.07587221264839172, 0.10059545934200287, 0.18715938925743103, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.059837497770786285, 0.10673120617866516, 0.06554628908634186, 0.047321293503046036, 0.26084935665130615, 0.05379262939095497, 0.09055614471435547, 0.09319713711738586, 0.334230899810791, 0.23545128107070923, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06699422001838684, 0.48348554968833923, 0.10470042377710342, 0.2643885016441345, 0.49639153480529785, 0.11732041090726852, 0.061902400106191635, 0.1530170738697052, 0.11711295694112778, 0.23237623274326324, 0.09402092546224594, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.050390250980854034, 0.2627623975276947, 0.057036180049180984, 0.10587681084871292, 0.22481703758239746, 0.07078704982995987, 0.028480585664510727, 0.47086307406425476, 0.03990349546074867, 0.16108965873718262, 0.02393723465502262, 0.06960758566856384, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.29633763432502747, 0.1570599228143692, 0.07358378916978836, 0.08321648091077805, 0.01657349243760109, 0.02100137248635292, 0.019902318716049194, 0.5162196755409241, 0.03987365961074829, 0.018146652728319168, 0.026169516146183014, 0.00614600395783782, 0.07103840261697769, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1833065152168274, 0.0826280415058136, 0.06509751826524734, 0.017351830378174782, 0.08598462492227554, 0.028223805129528046, 0.03195580840110779, 0.045467328280210495, 0.041934747248888016, 0.016390223056077957, 0.05298775061964989, 0.05077003315091133, 0.2718433141708374, 0.04039132222533226, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09722712635993958, 0.09857381135225296, 0.2290657013654709, 0.162257120013237, 0.3208743929862976, 0.7083525657653809, 0.08285251259803772, 0.05820265784859657, 0.14296579360961914, 0.06442547589540482, 0.3963678479194641, 0.1963234394788742, 0.13509824872016907, 0.0551372766494751, 0.1773844212293625, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1786596029996872, 0.03035295568406582, 0.011360704898834229, 0.0041356864385306835, 0.02253635786473751, 0.032254207879304886, 0.05765725299715996, 0.06512543559074402, 0.26075252890586853, 0.14487245678901672, 0.06064848601818085, 0.02561355009675026, 0.06785233318805695, 0.08367668837308884, 0.11658230423927307, 0.21664968132972717, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02336198277771473, 0.027563903480768204, 0.02503703534603119, 0.002219978952780366, 0.024155667051672935, 0.005802824627608061, 0.011775066144764423, 0.03527237847447395, 0.0438326895236969, 0.16127318143844604, 0.07829897105693817, 0.04636809974908829, 0.16168944537639618, 0.17395752668380737, 0.5116502642631531, 0.11367138475179672, 0.24585914611816406, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14312313497066498, 0.6151867508888245, 0.2511911392211914, 0.34089455008506775, 0.21357816457748413, 0.06974375993013382, 0.04017443582415581, 0.4436698257923126, 0.0627409890294075, 0.029346130788326263, 0.06214871257543564, 0.07426106929779053, 0.37162381410598755, 0.1908751130104065, 0.2730017304420471, 0.09601876139640808, 0.07787502557039261, 0.1985486000776291, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05929486081004143, 0.1356429159641266, 0.08288607001304626, 0.1716676652431488, 0.17707081139087677, 0.11502664536237717, 0.023076828569173813, 0.41179341077804565, 0.03153251111507416, 0.08080360293388367, 0.03793509677052498, 0.0956316813826561, 0.40457794070243835, 0.3355584144592285, 0.2116643786430359, 0.2117510586977005, 0.0911363810300827, 0.13469243049621582, 0.08244834095239639, NaN, NaN, NaN, NaN, NaN, NaN], [0.34530380368232727, 0.14280815422534943, 0.08469259738922119, 0.20386184751987457, 0.018106382340192795, 0.025206930935382843, 0.03376462310552597, 0.665645956993103, 0.06945709139108658, 0.030968131497502327, 0.031062953174114227, 0.015101979486644268, 0.10170532017946243, 0.03453005850315094, 0.05652596056461334, 0.028510402888059616, 0.036133769899606705, 0.04489430412650108, 0.010548176243901253, 0.07425779104232788, NaN, NaN, NaN, NaN, NaN], [0.21361097693443298, 0.09641434252262115, 0.0472431480884552, 0.030436551198363304, 0.12823571264743805, 0.024378983303904533, 0.03781319037079811, 0.04478050768375397, 0.04302188381552696, 0.031242409721016884, 0.06916327774524689, 0.08240062743425369, 0.2609483301639557, 0.04106062278151512, 0.01303931511938572, 0.014160559512674809, 0.011109860613942146, 0.034855347126722336, 0.10407929867506027, 0.21024775505065918, 0.08525354415178299, NaN, NaN, NaN, NaN], [0.056013792753219604, 0.04104574769735336, 0.13420559465885162, 0.14404895901679993, 0.30753612518310547, 0.5552563667297363, 0.06356479972600937, 0.02527950517833233, 0.09324341267347336, 0.03306487947702408, 0.2522013187408447, 0.14255186915397644, 0.09901494532823563, 0.06439376622438431, 0.10042564570903778, 0.43083739280700684, 0.20968028903007507, 0.35324180126190186, 0.2700602114200592, 0.23262809216976166, 0.11776822060346603, 0.14138048887252808, NaN, NaN, NaN], [0.1699744164943695, 0.02438814751803875, 0.00377153092995286, 0.0020952692721039057, 0.017941365018486977, 0.009907160885632038, 0.04197421669960022, 0.08005423098802567, 0.16825814545154572, 0.08759146183729172, 0.037892259657382965, 0.02378804422914982, 0.12696562707424164, 0.21072204411029816, 0.039158232510089874, 0.12900760769844055, 0.018357207998633385, 0.09957201033830643, 0.024237502366304398, 0.12091250717639923, 0.2524404227733612, 0.044468626379966736, 0.19958341121673584, NaN, NaN], [0.016944430768489838, 0.011726072989404202, 0.017351148650050163, 0.0028529188130050898, 0.013441222719848156, 0.005811003036797047, 0.010734970681369305, 0.020825698971748352, 0.04144507274031639, 0.0777476355433464, 0.07330787181854248, 0.0589311420917511, 0.1305314600467682, 0.09686601907014847, 0.49986732006073, 0.09861493855714798, 0.24486178159713745, 0.2709232568740845, 0.08328418433666229, 0.1665872186422348, 0.2741791903972626, 0.5570544600486755, 0.09308093041181564, 0.18428745865821838, NaN], [0.043635401874780655, 0.027883753180503845, 0.11735352873802185, 0.09225393831729889, 0.11462916433811188, 0.1478782296180725, 0.04645288363099098, 0.049018505960702896, 0.08540874719619751, 0.16189652681350708, 0.081883005797863, 0.13365384936332703, 0.17616337537765503, 0.16547891497612, 0.3400772511959076, 0.14388780295848846, 0.2768324613571167, 0.1609276533126831, 0.18515954911708832, 0.2950800061225891, 0.32982173562049866, 0.4366631507873535, 0.3681013882160187, 0.34051525592803955, 0.05319627374410629]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1755252629518509, 0.00892956368625164, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18403629958629608, 0.12486936897039413, 0.01289399154484272, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07995349168777466, 0.1140136644244194, 0.16089488565921783, 0.271826833486557, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19368642568588257, 0.20833823084831238, 0.38513559103012085, 0.0724099725484848, 0.026710418984293938, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2920932173728943, 0.20408804714679718, 0.47836723923683167, 0.009784400463104248, 0.41401228308677673, 0.0022880665492266417, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2459677904844284, 0.013399376533925533, 0.165635347366333, 0.0016970435390248895, 0.00861914549022913, 0.0019094902090728283, 0.006659353617578745, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1659669429063797, 0.3024148941040039, 0.4638516902923584, 0.19814886152744293, 0.06386706978082657, 0.37022748589515686, 0.096834197640419, 0.004976118449121714, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.23605915904045105, 0.015010624192655087, 0.29689958691596985, 0.002272083656862378, 0.02557971514761448, 0.04829570651054382, 0.03933914750814438, 0.012097989208996296, 0.005491157062351704, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2229652851819992, 0.011020033620297909, 0.07613904774188995, 0.00492003234103322, 0.11613531410694122, 0.12462546676397324, 0.03799906745553017, 0.029671484604477882, 0.022334527224302292, 0.003809461137279868, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.30055463314056396, 0.03860635682940483, 0.08235271275043488, 0.12519411742687225, 0.07496307790279388, 0.24307869374752045, 0.02970520593225956, 0.043270040303468704, 0.01804984174668789, 0.008444367907941341, 0.04573319852352142, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.361846923828125, 0.0072926427237689495, 0.07028269022703171, 0.038334887474775314, 0.02117738127708435, 0.035939738154411316, 0.03011121228337288, 0.01985063962638378, 0.03699057549238205, 0.0448327511548996, 0.07655268162488937, 0.03217002749443054, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18510019779205322, 0.0857149139046669, 0.2959531545639038, 0.10870446264743805, 0.034602705389261246, 0.04019882157444954, 0.02403290942311287, 0.05409723520278931, 0.04566982761025429, 0.19149497151374817, 0.23549742996692657, 0.074503093957901, 0.01255789864808321, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03710656613111496, 0.054964251816272736, 0.037898506969213486, 0.3724515438079834, 0.058691613376140594, 0.03363177552819252, 0.06933214515447617, 0.05247700959444046, 0.15643684566020966, 0.589249849319458, 0.349843829870224, 0.29659491777420044, 0.2287619560956955, 0.05358140170574188, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2688547670841217, 0.1434442549943924, 0.18350595235824585, 0.07485228031873703, 0.0647219642996788, 0.04773847386240959, 0.14254990220069885, 0.03905782103538513, 0.2126167118549347, 0.24802155792713165, 0.30339401960372925, 0.17472584545612335, 0.03891041502356529, 0.02338952198624611, 0.026767900213599205, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1340402513742447, 0.12347351759672165, 0.42842522263526917, 0.0631304681301117, 0.06392616778612137, 0.1770109236240387, 0.11116458475589752, 0.04706185683608055, 0.09571156650781631, 0.3872493505477905, 0.5415271520614624, 0.14801958203315735, 0.013348261825740337, 0.016769861802458763, 0.019784821197390556, 0.012107723392546177, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3128407299518585, 0.02314484678208828, 0.20690661668777466, 0.0038596922531723976, 0.10119188576936722, 0.375572144985199, 0.077932208776474, 0.16011959314346313, 0.07805528491735458, 0.020400837063789368, 0.2237216979265213, 0.1006372720003128, 0.022764090448617935, 0.005061473231762648, 0.0205483790487051, 0.0018506759079173207, 0.001139476546086371, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5802629590034485, 0.17577120661735535, 0.22907592356204987, 0.3224048614501953, 0.21584153175354004, 0.3719359040260315, 0.08852899819612503, 0.18978306651115417, 0.06894023716449738, 0.008546161465346813, 0.34136468172073364, 0.44251179695129395, 0.07915834337472916, 0.27557075023651123, 0.0915302038192749, 0.0036887326277792454, 0.0038842300418764353, 0.015524323098361492, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.5194967985153198, 0.010316978208720684, 0.10247951745986938, 0.03023943491280079, 0.02351299114525318, 0.05376119539141655, 0.03751303628087044, 0.02858700230717659, 0.03933052346110344, 0.026450933888554573, 0.16396890580654144, 0.08825679868459702, 0.01957540772855282, 0.02957809716463089, 0.0652899444103241, 0.003373907646164298, 0.007670924998819828, 0.004321575630456209, 0.024295708164572716, NaN, NaN, NaN, NaN, NaN, NaN], [0.2508450150489807, 0.1962328553199768, 0.3596697747707367, 0.1504865288734436, 0.029224414378404617, 0.0663013905286789, 0.043777331709861755, 0.06269483268260956, 0.06556038558483124, 0.2250475436449051, 0.35171735286712646, 0.22191122174263, 0.018188640475273132, 0.026326660066843033, 0.017122289165854454, 0.0037187051493674517, 0.024730468168854713, 0.035062648355960846, 0.09351257234811783, 0.011442800983786583, NaN, NaN, NaN, NaN, NaN], [0.007168593350797892, 0.033368390053510666, 0.00873665139079094, 0.16062632203102112, 0.028196215629577637, 0.02527499757707119, 0.06866460293531418, 0.0198657363653183, 0.1544157713651657, 0.2752910256385803, 0.14698350429534912, 0.1242247000336647, 0.13061578571796417, 0.010920656844973564, 0.0055906628258526325, 0.006986986380070448, 0.030699225142598152, 0.36674854159355164, 0.2189747393131256, 0.2510429620742798, 0.04264682158827782, NaN, NaN, NaN, NaN], [0.317547470331192, 0.16016888618469238, 0.1976199448108673, 0.10644932836294174, 0.09830258786678314, 0.07801979035139084, 0.301817923784256, 0.05034731701016426, 0.32512444257736206, 0.2241876721382141, 0.4657731354236603, 0.2891538441181183, 0.08093820512294769, 0.06031876429915428, 0.06730521470308304, 0.14267991483211517, 0.289673775434494, 0.1076083853840828, 0.2949788272380829, 0.0365237332880497, 0.015645001083612442, 0.03993191570043564, NaN, NaN, NaN], [0.17233391106128693, 0.22507980465888977, 0.300968736410141, 0.03457535058259964, 0.06539295613765717, 0.2556630074977875, 0.12555503845214844, 0.08745130896568298, 0.10011813044548035, 0.13041436672210693, 0.501103937625885, 0.14929187297821045, 0.03132137656211853, 0.02265048772096634, 0.03383776918053627, 0.006481703836470842, 0.011523596942424774, 0.35894638299942017, 0.1662973165512085, 0.034177642315626144, 0.02702290564775467, 0.036704160273075104, 0.014952532015740871, NaN, NaN], [0.4115316569805145, 0.042032964527606964, 0.21366682648658752, 0.010602481663227081, 0.11737099289894104, 0.5779745578765869, 0.13523340225219727, 0.2636784315109253, 0.170937180519104, 0.020469455048441887, 0.3112620711326599, 0.17165400087833405, 0.044973500072956085, 0.006653682328760624, 0.053596071898937225, 0.008654352277517319, 0.002382548525929451, 0.02675137296319008, 0.09427332878112793, 0.01890433207154274, 0.002222384326159954, 0.018390605226159096, 0.0013299400452524424, 0.0009657714981585741, NaN], [0.38502925634384155, 0.1563987135887146, 0.13578397035598755, 0.1404726654291153, 0.14828255772590637, 0.28480827808380127, 0.15350891649723053, 0.09994281083345413, 0.06321649998426437, 0.030282480642199516, 0.13266463577747345, 0.1722954362630844, 0.07113035768270493, 0.024887708947062492, 0.016665330156683922, 0.03949398547410965, 0.020136239007115364, 0.01368448045104742, 0.09379612654447556, 0.030771953985095024, 0.011002926155924797, 0.007083212956786156, 0.009242233820259571, 0.007993990555405617, 0.018528543412685394]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17860974371433258, 0.0018437139224261045, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20284786820411682, 0.0034877806901931763, 0.08334594964981079, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1494244486093521, 0.3379342555999756, 0.0649241954088211, 0.006597604602575302, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2969810962677002, 0.005403619725257158, 0.054099179804325104, 0.0006044544279575348, 0.009600944817066193, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.32280662655830383, 0.01735025830566883, 0.15535852313041687, 0.00028658873634412885, 0.016427762806415558, 0.001579301548190415, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.016787199303507805, 0.10643576830625534, 0.24800433218479156, 0.4802894592285156, 0.03762362524867058, 0.06816797703504562, 0.10676699876785278, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.22070105373859406, 0.03063296526670456, 0.12860903143882751, 0.04803713783621788, 0.06528759002685547, 0.3172104060649872, 0.012414618395268917, 0.008628717623651028, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0170818492770195, 0.2921580374240875, 0.24774892628192902, 0.2979756295681, 0.16657015681266785, 0.03825104981660843, 0.39123743772506714, 0.0541624091565609, 0.01715947687625885, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06952934712171555, 0.09443160146474838, 0.3155873417854309, 0.2511345446109772, 0.20146684348583221, 0.17959536612033844, 0.500001072883606, 0.3407229483127594, 0.15127938985824585, 0.026401039212942123, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12491581588983536, 0.08139167726039886, 0.045777399092912674, 0.07585746794939041, 0.05243801325559616, 0.09790124744176865, 0.17415514588356018, 0.44996151328086853, 0.13761505484580994, 0.06580806523561478, 0.1016187071800232, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03772348165512085, 0.0006561332265846431, 0.04040418565273285, 0.23337695002555847, 0.0037602160591632128, 0.1251135915517807, 0.07994246482849121, 0.0032252452801913023, 0.044697076082229614, 0.05314825102686882, 0.16676445305347443, 0.42838534712791443, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.008380687795579433, 0.11938491463661194, 0.03761400282382965, 0.10612092912197113, 0.004111893475055695, 0.07536520808935165, 0.06150262430310249, 0.010061400011181831, 0.01712355576455593, 0.026476707309484482, 0.05440329760313034, 0.37643373012542725, 0.12204637378454208, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0973815768957138, 0.1330094188451767, 0.2356250286102295, 0.23801013827323914, 0.16962124407291412, 0.3808935284614563, 0.19062454998493195, 0.12487400323152542, 0.4241224527359009, 0.1858355700969696, 0.1843334436416626, 0.17186462879180908, 0.1674181967973709, 0.03679514676332474, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.28161293268203735, 0.39586660265922546, 0.35408592224121094, 0.26687130331993103, 0.036089953035116196, 0.12106626480817795, 0.05175312981009483, 0.6374836564064026, 0.06537415832281113, 0.01867927983403206, 0.03261437267065048, 0.05161871388554573, 0.026679201051592827, 0.0063977655954658985, 0.0581950880587101, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.052721865475177765, 0.30848002433776855, 0.24953237175941467, 0.2790854275226593, 0.7654650807380676, 0.6871634125709534, 0.13210926949977875, 0.673875629901886, 0.04467727988958359, 0.018614191561937332, 0.08283445239067078, 0.0906965509057045, 0.06073237210512161, 0.12131030112504959, 0.06997358053922653, 0.3489122688770294, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03943483531475067, 0.28613966703414917, 0.07243800908327103, 0.8744964599609375, 0.029915155842900276, 0.331167072057724, 0.4079437255859375, 0.5431530475616455, 0.3259604275226593, 0.1150238886475563, 0.3324905335903168, 0.44221389293670654, 0.2450132817029953, 0.12577538192272186, 0.11014749854803085, 0.1900990903377533, 0.042790502309799194, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.06558705866336823, 0.020870981737971306, 0.007642277050763369, 0.028054187074303627, 0.010532653890550137, 0.10334379225969315, 0.12033270299434662, 0.1911371499300003, 0.30930495262145996, 0.04741071164608002, 0.06516209989786148, 0.09313901513814926, 0.24243950843811035, 0.15116305649280548, 0.09231718629598618, 0.47254911065101624, 0.053373783826828, 0.18162642419338226, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.017124762758612633, 0.00014164860476739705, 0.01482362300157547, 0.13952724635601044, 0.0008921221597120166, 0.07150562852621078, 0.037848807871341705, 0.0009583857608959079, 0.0160027127712965, 0.01657933183014393, 0.09754330664873123, 0.3402610719203949, 0.02766183763742447, 0.011668790131807327, 0.019427720457315445, 0.01879642903804779, 0.06977814435958862, 0.23379765450954437, 0.41046860814094543, NaN, NaN, NaN, NaN, NaN, NaN], [0.0033047832548618317, 0.043024010956287384, 0.009507044218480587, 0.05758155509829521, 0.0012058177962899208, 0.04777836054563522, 0.038867104798555374, 0.0027761561796069145, 0.008453112095594406, 0.011027430184185505, 0.021058345213532448, 0.3453521430492401, 0.05058252438902855, 0.004837945103645325, 0.0014179014833644032, 0.06873936206102371, 0.10687354952096939, 0.21186815202236176, 0.44615596532821655, 0.10872229933738708, NaN, NaN, NaN, NaN, NaN], [0.05260666832327843, 0.09784732013940811, 0.08957145363092422, 0.40504154562950134, 0.2393025904893875, 0.37446328997612, 0.33926665782928467, 0.06915906071662903, 0.28494811058044434, 0.18951286375522614, 0.21801336109638214, 0.2963850796222687, 0.09700386226177216, 0.02254888415336609, 0.016780056059360504, 0.3380737006664276, 0.17247304320335388, 0.15711140632629395, 0.27414536476135254, 0.12462585419416428, 0.05461693927645683, NaN, NaN, NaN, NaN], [0.4168609082698822, 0.5786882042884827, 0.4795728027820587, 0.4880480170249939, 0.07741907238960266, 0.22295767068862915, 0.10229793190956116, 0.7397969365119934, 0.09120289236307144, 0.02111845649778843, 0.040493883192539215, 0.06478337198495865, 0.029333919286727905, 0.01266437117010355, 0.08807221800088882, 0.12442159652709961, 0.019878262653946877, 0.02248454838991165, 0.045759230852127075, 0.02396523579955101, 0.002620323793962598, 0.04143214225769043, NaN, NaN, NaN], [0.05813424289226532, 0.29987069964408875, 0.06046860292553902, 0.2948205769062042, 0.6036045551300049, 0.4684220552444458, 0.10851431638002396, 0.5970842242240906, 0.03630568087100983, 0.009022231213748455, 0.034897517412900925, 0.044963937252759933, 0.06918716430664062, 0.06464210897684097, 0.027029458433389664, 0.39741793274879456, 0.1858920007944107, 0.0860959067940712, 0.03553689271211624, 0.03651457652449608, 0.07401836663484573, 0.02850046567618847, 0.457316130399704, NaN, NaN], [0.011862307786941528, 0.06274299323558807, 0.019264375790953636, 0.7077140212059021, 0.009838010184466839, 0.08938813954591751, 0.2665976285934448, 0.21134285628795624, 0.19931168854236603, 0.029879093170166016, 0.11873869597911835, 0.2187809944152832, 0.10740162432193756, 0.03893040865659714, 0.02778119407594204, 0.17118902504444122, 0.03705315291881561, 0.41107529401779175, 0.3035467863082886, 0.1782693862915039, 0.062172479927539825, 0.04369974508881569, 0.43116021156311035, 0.04090215638279915, NaN], [0.13294808566570282, 0.07747184485197067, 0.06700501590967178, 0.24500344693660736, 0.07035010308027267, 0.06088097393512726, 0.15465889871120453, 0.22422827780246735, 0.20946520566940308, 0.06346394866704941, 0.1416163444519043, 0.10671631991863251, 0.07756247371435165, 0.14874279499053955, 0.2551397681236267, 0.18877547979354858, 0.07302238047122955, 0.24805422127246857, 0.1228112131357193, 0.08095405995845795, 0.12022056430578232, 0.20888803899288177, 0.1654488444328308, 0.07207347452640533, 0.12261014431715012]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04818185046315193, 0.30147239565849304, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.000490668579004705, 0.5364181399345398, 0.0016803600592538714, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17249688506126404, 0.003960400819778442, 1.1815190191555303e-05, 0.00205309153534472, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08659190684556961, 0.2260276973247528, 0.018877657130360603, 0.019257033243775368, 0.9179584980010986, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [2.1155383365112357e-05, 0.00016346832853741944, 0.0004644138098228723, 9.852640505414456e-05, 0.009302367456257343, 0.8758521676063538, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0038963633123785257, 0.11578002572059631, 0.06833135336637497, 0.2930091321468353, 0.06728219240903854, 0.588379442691803, 0.190787211060524, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04113525524735451, 0.03917931765317917, 0.013817446306347847, 0.06874216347932816, 0.027753230184316635, 0.04752122610807419, 0.17637789249420166, 0.2964049279689789, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.006397286430001259, 0.008155078627169132, 0.02385183423757553, 0.08218340575695038, 0.09733399748802185, 0.7216709852218628, 0.11420661956071854, 0.028804002329707146, 0.49512770771980286, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.007080267183482647, 0.010165071114897728, 0.007166726514697075, 0.04547898843884468, 0.014898931607604027, 0.06153866648674011, 0.05960511788725853, 0.025653565302491188, 0.05574938654899597, 0.5054050087928772, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12821261584758759, 0.09823491424322128, 0.2407415509223938, 0.03722868487238884, 0.07500484585762024, 0.23719841241836548, 0.08696958422660828, 0.10033686459064484, 0.08637046813964844, 0.05946339666843414, 0.17889682948589325, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.018611561506986618, 0.530681848526001, 0.37442806363105774, 0.09326046705245972, 0.039934538304805756, 0.607749342918396, 0.1011725440621376, 0.041957128793001175, 0.061673425137996674, 0.012941170483827591, 0.012897199019789696, 0.02531522512435913, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.025258230045437813, 0.013820141553878784, 0.020238902419805527, 0.20186173915863037, 0.008764497935771942, 0.044081512838602066, 0.11685895919799805, 0.12131167203187943, 0.03466574102640152, 0.0033257410395890474, 0.009427645243704319, 0.00932170171290636, 0.6215367317199707, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0027034373488277197, 0.008653531782329082, 0.0021412167698144913, 0.02395743690431118, 0.06537352502346039, 0.05110874027013779, 0.050060901790857315, 0.023448945954442024, 0.0059632728807628155, 0.0016337132547050714, 0.0060929651372134686, 0.00957516860216856, 0.05008334666490555, 0.696637749671936, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [5.63129390229733e-07, 0.00027805642457678914, 1.7160025890916586e-05, 5.958595011179568e-06, 0.00078710971865803, 1.2566613349918043e-06, 9.03528507478768e-06, 2.1993335394654423e-05, 4.528845238382928e-06, 1.0594538935038145e-06, 2.375837993895402e-06, 1.0765622391772922e-05, 0.00012861557479482144, 0.000270194374024868, 0.4203896224498749, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.19651824235916138, 0.009276115335524082, 0.0007576652569696307, 0.02043321169912815, 0.000937489268835634, 0.0014158851699903607, 0.02691410481929779, 0.025149332359433174, 0.015754513442516327, 0.002638434525579214, 0.03568584471940994, 0.28478676080703735, 0.08937329053878784, 0.04057440906763077, 0.41798362135887146, 0.02812151424586773, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0009883381426334381, 0.005475975573062897, 0.017872320488095284, 0.0038598645478487015, 0.01383217889815569, 0.1060260757803917, 0.010558119975030422, 0.0004280287539586425, 0.011488020420074463, 0.004323506727814674, 0.015877770259976387, 0.025533713400363922, 0.06758329272270203, 0.005362953990697861, 0.03033292666077614, 0.3987913429737091, 0.22715723514556885, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.025437461212277412, 0.027387555688619614, 0.0211916733533144, 0.0013409400125965476, 0.0016278955154120922, 0.0205780491232872, 0.006606978829950094, 0.005105526186525822, 0.008417481556534767, 0.008475488983094692, 0.016475802287459373, 0.021865585818886757, 0.04041945934295654, 0.001965513452887535, 0.030297037214040756, 0.018051480874419212, 0.2940014600753784, 0.09546513855457306, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.014116446487605572, 0.6685785055160522, 0.40577325224876404, 0.09365412592887878, 0.008716625161468983, 0.504762589931488, 0.11037815362215042, 0.03693895787000656, 0.066362664103508, 0.025546396151185036, 0.030971869826316833, 0.07333581149578094, 0.21910515427589417, 0.03128749132156372, 0.013437384739518166, 0.06674141436815262, 0.055549826472997665, 0.02615067921578884, 0.05289305001497269, NaN, NaN, NaN, NaN, NaN, NaN], [0.01752244122326374, 0.013681006617844105, 0.015325021930038929, 0.15400148928165436, 0.0017620606813579798, 0.03783759847283363, 0.07285356521606445, 0.042190372943878174, 0.019725583493709564, 0.004497688263654709, 0.010335608385503292, 0.023485884070396423, 0.5969190001487732, 0.22785267233848572, 0.05655405670404434, 0.05765213817358017, 0.006416310556232929, 0.029401889070868492, 0.022928474470973015, 0.6468356251716614, NaN, NaN, NaN, NaN, NaN], [0.003705248236656189, 0.09392052888870239, 0.0011726000811904669, 0.042238909751176834, 0.07787514477968216, 0.11800158768892288, 0.09318403154611588, 0.018972182646393776, 0.022339271381497383, 0.02290215529501438, 0.009648749604821205, 0.020298194140195847, 0.09632600843906403, 0.6665039658546448, 0.01913357712328434, 0.016501925885677338, 0.01550414226949215, 0.014767719432711601, 0.035943012684583664, 0.1298983097076416, 0.7307590246200562, NaN, NaN, NaN, NaN], [3.2450822118335054e-07, 0.0001958437787834555, 1.195628647110425e-05, 3.192948497598991e-06, 0.00034392892848700285, 1.3818779507346335e-06, 6.319523890851997e-06, 9.25252061279025e-06, 3.2897685287025524e-06, 1.041492623699014e-06, 2.450263082209858e-06, 1.1291336704744026e-05, 9.216016042046249e-05, 0.00025747373001649976, 0.3770022690296173, 7.494814053643495e-05, 0.00011931787594221532, 5.454379424918443e-05, 3.481862586340867e-05, 0.0001493972522439435, 6.532184488605708e-05, 0.4379080533981323, NaN, NaN, NaN], [0.11172444373369217, 0.00812594499439001, 0.000803561822976917, 0.011673782020807266, 0.00013412271800916642, 0.002435607835650444, 0.021002406254410744, 0.009926681406795979, 0.014218374155461788, 0.0044799866154789925, 0.03462693840265274, 0.49634605646133423, 0.1610735058784485, 0.03537029027938843, 0.3717024624347687, 0.0470024012029171, 0.0025306264869868755, 0.08426976948976517, 0.5137573480606079, 0.047759927809238434, 0.008752438239753246, 0.5270217657089233, 0.020567137748003006, NaN, NaN], [0.00039373920299112797, 0.00142151047475636, 0.016346368938684464, 0.0038184949662536383, 0.00426360173150897, 0.10012070834636688, 0.007060237228870392, 0.00022489627008326352, 0.006389277055859566, 0.0014407823327928782, 0.01344740204513073, 0.019176417961716652, 0.04953484237194061, 0.003102741902694106, 0.017501499503850937, 0.25968801975250244, 0.12805432081222534, 0.03450275957584381, 0.03214799612760544, 0.06495527178049088, 0.007038496434688568, 0.018200475722551346, 0.2228115350008011, 0.24082934856414795, NaN], [0.004585978575050831, 0.008592751808464527, 0.20804427564144135, 0.003501898143440485, 0.01809401623904705, 0.0088487658649683, 0.01839679665863514, 0.009930659085512161, 0.019693726673722267, 0.015943868085741997, 0.06719032675027847, 0.03678698092699051, 0.03292753919959068, 0.02313893660902977, 0.023240724578499794, 0.03294161707162857, 0.24390928447246552, 0.10472099483013153, 0.0623757429420948, 0.06489475816488266, 0.03424002602696419, 0.03615953400731087, 0.05666068568825722, 0.29077935218811035, 0.20903274416923523]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15880486369132996, 0.04734092205762863, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.22883240878582, 0.015307039953768253, 0.023610780015587807, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.15376803278923035, 0.17623378336429596, 0.16427822411060333, 0.018553992733359337, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12576976418495178, 0.44071146845817566, 0.38860467076301575, 0.12043511122465134, 0.027116619050502777, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03928220644593239, 0.42239660024642944, 0.2546820342540741, 0.22367709875106812, 0.1215892881155014, 0.001983387628570199, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17152060568332672, 0.49365419149398804, 0.08085957914590836, 0.02207508496940136, 0.19231174886226654, 0.008304901421070099, 0.03878962993621826, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.13843253254890442, 0.07047099620103836, 0.2525072991847992, 0.13487939536571503, 0.27911728620529175, 0.11727599054574966, 0.022392159327864647, 0.1764850914478302, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10915631055831909, 0.30942168831825256, 0.19657404720783234, 0.031007295474410057, 0.23716343939304352, 0.05435822904109955, 0.08149112015962601, 0.6613667011260986, 0.11670006066560745, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.0640818402171135, 0.41535088419914246, 0.29784247279167175, 0.05657188221812248, 0.036311421543359756, 0.08192699402570724, 0.16688455641269684, 0.10144203901290894, 0.346017450094223, 0.15466110408306122, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04877842590212822, 0.16450235247612, 0.23761717975139618, 0.0720985159277916, 0.12954245507717133, 0.08035153150558472, 0.18124118447303772, 0.05973014980554581, 0.26483285427093506, 0.39028850197792053, 0.05098416656255722, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11044558137655258, 0.08550350368022919, 0.2513507902622223, 0.28401821851730347, 0.12441904842853546, 0.05029991641640663, 0.42405593395233154, 0.08374682813882828, 0.43869927525520325, 0.14253327250480652, 0.10876792669296265, 0.09369473904371262, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.08764015138149261, 0.46941375732421875, 0.23278135061264038, 0.11763583868741989, 0.0354606918990612, 0.16624747216701508, 0.2793619632720947, 0.1965668648481369, 0.23052528500556946, 0.3914787769317627, 0.08669382333755493, 0.10678009688854218, 0.08708767592906952, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.2116944044828415, 0.06720030307769775, 0.29984304308891296, 0.010844358243048191, 0.051072586327791214, 0.15023349225521088, 0.04554526135325432, 0.1560167670249939, 0.03609438240528107, 0.026584016159176826, 0.14512087404727936, 0.05890262499451637, 0.015816861763596535, 0.07422769069671631, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.056502565741539, 0.15541820228099823, 0.07158821076154709, 0.00490804947912693, 0.015012365765869617, 0.06302572786808014, 0.01116714347153902, 0.22065599262714386, 0.021468764171004295, 0.01365464273840189, 0.022816751152276993, 0.019708380103111267, 0.0059420084580779076, 0.0700121819972992, 0.287899911403656, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.058403778821229935, 0.0693131536245346, 0.04999461770057678, 0.004054869059473276, 0.0624610111117363, 0.018093721941113472, 0.07961009442806244, 0.1545858234167099, 0.3008257746696472, 0.14455094933509827, 0.09800520539283752, 0.09531621634960175, 0.27401015162467957, 0.4782770574092865, 0.11211755871772766, 0.01358953770250082, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1399688720703125, 0.5559014678001404, 0.20350231230258942, 0.042011573910713196, 0.020507201552391052, 0.03915366902947426, 0.4243565797805786, 0.11376935243606567, 0.31140708923339844, 0.051479678601026535, 0.07416504621505737, 0.2654426097869873, 0.3960915207862854, 0.5790604948997498, 0.18063338100910187, 0.1939544379711151, 0.04191381484270096, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.027515297755599022, 0.0486784465610981, 0.06845460832118988, 0.023408811539411545, 0.008863206952810287, 0.008533195592463017, 0.24178741872310638, 0.01229054294526577, 0.25817692279815674, 0.6869812607765198, 0.049950506538152695, 0.12178820371627808, 0.0564231351017952, 0.02026011236011982, 0.004908477421849966, 0.03562311828136444, 0.12746450304985046, 0.0016219470417127013, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.11620164662599564, 0.09937138110399246, 0.17538107931613922, 0.40406307578086853, 0.043817292898893356, 0.05759625509381294, 0.49306368827819824, 0.09120260924100876, 0.36450278759002686, 0.08042807132005692, 0.1856311559677124, 0.1376025527715683, 0.1998283714056015, 0.3654005527496338, 0.15910619497299194, 0.4969707429409027, 0.08565060794353485, 0.02514367550611496, 0.090617336332798, NaN, NaN, NaN, NaN, NaN, NaN], [0.0739481970667839, 0.5182103514671326, 0.19721719622612, 0.21118015050888062, 0.015751224011182785, 0.12249443680047989, 0.5174803733825684, 0.17075838148593903, 0.30025264620780945, 0.29246312379837036, 0.0875946432352066, 0.2326347827911377, 0.13986286520957947, 0.511695921421051, 0.12602318823337555, 0.03662485629320145, 0.1263200044631958, 0.0166145209223032, 0.19702456891536713, 0.09621746093034744, NaN, NaN, NaN, NaN, NaN], [0.3052336871623993, 0.37224864959716797, 0.45515015721321106, 0.04986808821558952, 0.05332064628601074, 0.13846120238304138, 0.15990367531776428, 0.20659208297729492, 0.06640873104333878, 0.035323526710271835, 0.30340465903282166, 0.10174556821584702, 0.02102985605597496, 0.11508277803659439, 0.09203195571899414, 0.0029288395307958126, 0.023838462308049202, 0.004605103749781847, 0.052648112177848816, 0.006431906949728727, 0.026736242696642876, NaN, NaN, NaN, NaN], [0.047024402767419815, 0.1257133185863495, 0.052377521991729736, 0.009844984859228134, 0.015597687102854252, 0.06965665519237518, 0.01849394477903843, 0.1603521853685379, 0.02587857097387314, 0.00957732368260622, 0.023523790761828423, 0.020081259310245514, 0.008425970561802387, 0.10955916345119476, 0.35300737619400024, 0.023505402728915215, 0.00786643661558628, 0.007557017263025045, 0.013908758759498596, 0.004675114993005991, 0.035296451300382614, 0.3261549174785614, NaN, NaN, NaN], [0.11014947295188904, 0.08461853116750717, 0.02981843426823616, 0.004099451471120119, 0.009237504564225674, 0.011130756698548794, 0.132149338722229, 0.11619938164949417, 0.22203940153121948, 0.02292616292834282, 0.06793706119060516, 0.07227552682161331, 0.3262397348880768, 0.40601006150245667, 0.08270477503538132, 0.013506797142326832, 0.03135772421956062, 0.07034049183130264, 0.09623772650957108, 0.20842698216438293, 0.2752794623374939, 0.1234828308224678, 0.04129752516746521, NaN, NaN], [0.1182219609618187, 0.7384620308876038, 0.11492461711168289, 0.09884578734636307, 0.012010940350592136, 0.038200050592422485, 0.4905328154563904, 0.23439669609069824, 0.2528713345527649, 0.015177865512669086, 0.07817362248897552, 0.33532261848449707, 0.4971323609352112, 0.7384514212608337, 0.2383432686328888, 0.2306600660085678, 0.025716517120599747, 0.023198120296001434, 0.3352215886116028, 0.4797173738479614, 0.5688640475273132, 0.2555003762245178, 0.1890360713005066, 0.06237812712788582, NaN], [0.13153354823589325, 0.5476850867271423, 0.27465543150901794, 0.27658137679100037, 0.5121651291847229, 0.3939417600631714, 0.2527337968349457, 0.41937416791915894, 0.2437492311000824, 0.1485103964805603, 0.10651403665542603, 0.241710364818573, 0.34289923310279846, 0.3691290616989136, 0.108230821788311, 0.32214298844337463, 0.08876177668571472, 0.03369928151369095, 0.23942533135414124, 0.302080899477005, 0.3531237244606018, 0.09724070131778717, 0.19267186522483826, 0.06874143332242966, 0.052875734865665436]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03290099650621414, 0.3365767002105713, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.003547579748556018, 0.004082763101905584, 0.4616691768169403, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03595791012048721, 0.1313885897397995, 0.007101066876202822, 0.42131781578063965, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.007601147051900625, 0.014137630350887775, 0.01938864029943943, 0.2572920322418213, 0.0011994435917586088, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.00011468974116723984, 0.0032473355531692505, 0.00037737423554062843, 0.2793608605861664, 0.003465541172772646, 5.061212868895382e-05, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.21311266720294952, 0.10434294492006302, 0.011484598740935326, 0.0013334749964997172, 0.03845251351594925, 0.028238367289304733, 0.05654546618461609, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.052184704691171646, 0.499632865190506, 0.005138374865055084, 0.10169705748558044, 0.09997230768203735, 0.036990027874708176, 0.07566682249307632, 0.32418423891067505, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.23645982146263123, 0.016864946112036705, 0.013305210508406162, 0.0007752762176096439, 0.017555342987179756, 0.03100133314728737, 0.04085567593574524, 0.029846351593732834, 0.010373883880674839, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18805328011512756, 0.046367619186639786, 0.10314629226922989, 0.018223291262984276, 0.27720585465431213, 0.3798944056034088, 0.09291481226682663, 0.09293034672737122, 0.04290880635380745, 0.03370373696088791, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.028641005977988243, 0.03295213729143143, 0.0065453751012682915, 0.16686026751995087, 0.028714975342154503, 0.015397193841636181, 0.02003423683345318, 0.019093815237283707, 0.020523719489574432, 0.016172079369425774, 0.3490104377269745, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.10839971899986267, 0.004465002100914717, 0.016082070767879486, 0.035488102585077286, 0.015600458718836308, 0.012030484154820442, 0.015872180461883545, 0.01552913524210453, 0.03533920273184776, 0.11401902139186859, 0.31523072719573975, 0.20448055863380432, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.18776558339595795, 0.0060520414263010025, 0.017473671585321426, 0.005528539884835482, 0.0027145782951265574, 0.012176988646388054, 0.0031525399535894394, 0.004637573380023241, 0.011988476850092411, 0.06979440897703171, 0.38327983021736145, 0.020156072452664375, 0.010166948661208153, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.3064975440502167, 0.004262991715222597, 0.009997943416237831, 0.00034317225799895823, 0.013912403024733067, 0.02852706052362919, 0.004078225698322058, 0.001928618410602212, 0.006367305759340525, 0.035507142543792725, 0.050674788653850555, 0.007057875394821167, 0.0049485149793326855, 0.0049379738047719, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14988604187965393, 0.015584584325551987, 0.137997567653656, 0.0031439096201211214, 0.5546696782112122, 0.01658078096807003, 0.0025873971171677113, 0.0010246702004224062, 0.019667595624923706, 0.012580120004713535, 0.015491531230509281, 0.029023459181189537, 0.021588340401649475, 0.25595030188560486, 0.02325037308037281, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.07357528805732727, 0.007756352424621582, 0.002724927617236972, 0.001402079127728939, 0.0004431438574101776, 0.00010925461538136005, 0.0029409730341285467, 0.005563507787883282, 0.012139370664954185, 0.03890732303261757, 0.05558362230658531, 0.03318313509225845, 0.4270496368408203, 0.07112571597099304, 0.15036046504974365, 0.020786603912711143, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.012120572850108147, 0.0003307444858364761, 0.009640182368457317, 0.00017808230768423527, 0.0021490382496267557, 0.0008148089982569218, 0.0008481521508656442, 0.0019973982125520706, 0.005024890415370464, 0.01719486527144909, 0.044799502938985825, 0.006444229744374752, 0.018026985228061676, 0.0067391968332231045, 0.061299871653318405, 0.01281613577157259, 0.3084925711154938, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.011204708367586136, 0.0033799665980041027, 0.008117830380797386, 0.1567971557378769, 0.012545537203550339, 0.002854604972526431, 0.0037395430263131857, 0.0003391341888345778, 0.002928558737039566, 0.004266565665602684, 0.28180748224258423, 0.005543314386159182, 0.0059068226255476475, 0.004401014186441898, 0.09436267614364624, 0.003524675266817212, 0.09697568416595459, 0.3818984925746918, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1085091158747673, 0.0013132937019690871, 0.011304548010230064, 0.014309195801615715, 0.009265521541237831, 0.00682368129491806, 0.01179590355604887, 0.005223054438829422, 0.01697726733982563, 0.05782441794872284, 0.2522926330566406, 0.16053971648216248, 0.020927468314766884, 0.02051178365945816, 0.1114674061536789, 0.014847181737422943, 0.40623563528060913, 0.12017090618610382, 0.2281613051891327, NaN, NaN, NaN, NaN, NaN, NaN], [0.23926517367362976, 0.007461922243237495, 0.015478387475013733, 0.02120528556406498, 0.0046339076943695545, 0.01287792343646288, 0.005305645987391472, 0.0037130024284124374, 0.011430526152253151, 0.10132863372564316, 0.42019084095954895, 0.03134358674287796, 0.006659360136836767, 0.0015345009742304683, 0.05340040102601051, 0.0021821516565978527, 0.15366847813129425, 0.09343723207712173, 0.04055917635560036, 0.009410854429006577, NaN, NaN, NaN, NaN, NaN], [0.3882482349872589, 0.012203006073832512, 0.008404962718486786, 0.0008633172838017344, 0.07213836163282394, 0.03903299570083618, 0.006879106629639864, 0.0025245456490665674, 0.011604986153542995, 0.1302306056022644, 0.05970751494169235, 0.005057368893176317, 0.0025832061655819416, 0.003548768814653158, 0.03821956738829613, 0.0041786422953009605, 0.029319334775209427, 0.009258194826543331, 0.010013489983975887, 0.0024901984725147486, 0.009316755458712578, NaN, NaN, NaN, NaN], [0.08333727717399597, 0.009125825949013233, 0.12352871894836426, 0.0034849271178245544, 0.49194949865341187, 0.008760062977671623, 0.002427457133308053, 0.0004761714953929186, 0.014378424733877182, 0.007653949782252312, 0.010163314640522003, 0.018072640523314476, 0.014914281666278839, 0.33540958166122437, 0.012212751433253288, 0.050671979784965515, 0.08942927420139313, 0.0058481828309595585, 0.02088618278503418, 0.013520943000912666, 0.3026564419269562, 0.011637967079877853, NaN, NaN, NaN], [0.019913960248231888, 0.003490668721497059, 0.00020567848696373403, 0.00036819992237724364, 0.00019341551524121314, 3.8652269722661003e-05, 0.0008544524316675961, 0.002890991745516658, 0.001110991695895791, 0.005157719366252422, 0.008338885381817818, 0.0030357406940311193, 0.14557099342346191, 0.021602485328912735, 0.04367346689105034, 0.0015647107502445579, 0.009655454196035862, 0.14827704429626465, 0.008163533173501492, 0.49237948656082153, 0.06938102096319199, 0.08394628763198853, 0.049248531460762024, NaN, NaN], [0.010580360889434814, 0.00023049254377838224, 0.00745873898267746, 0.00016025979130063206, 0.002226235345005989, 0.0004258991975802928, 0.000578688399400562, 0.0014760587364435196, 0.002039685845375061, 0.0048048608005046844, 0.019996320828795433, 0.0029125709552317858, 0.006709430366754532, 0.0017099445685744286, 0.02097223326563835, 0.0024284888058900833, 0.10361000150442123, 0.022238893434405327, 0.009704988449811935, 0.017071064561605453, 0.011506098322570324, 0.0406200997531414, 0.0063119689002633095, 0.36112311482429504, NaN], [0.07011571526527405, 0.029766615480184555, 0.05616272985935211, 0.02569880336523056, 0.02553572878241539, 0.010698755271732807, 0.02022577077150345, 0.01824677176773548, 0.03918607532978058, 0.034657131880521774, 0.11515442281961441, 0.05569382756948471, 0.035370998084545135, 0.047812946140766144, 0.1140216588973999, 0.018943075090646744, 0.09709078818559647, 0.08172454684972763, 0.04602199047803879, 0.02941049635410309, 0.031383853405714035, 0.10708537697792053, 0.012693268246948719, 0.07050468772649765, 0.25427982211112976]], [[0.125, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1627129465341568, 0.03836298733949661, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.23664157092571259, 0.02332315407693386, 0.0017523575806990266, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.14284735918045044, 0.19342879951000214, 0.5212197303771973, 0.028613613918423653, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.022152410820126534, 0.06252314150333405, 0.005122532602399588, 0.24202540516853333, 0.0027534610126167536, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.04657726734876633, 0.23517371714115143, 0.03296450525522232, 0.2014523595571518, 0.06359406560659409, 0.0884864553809166, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.05186963453888893, 0.02286554127931595, 0.21517929434776306, 0.12055587023496628, 0.1711670458316803, 0.27492430806159973, 0.27398592233657837, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.020278872922062874, 0.02308776043355465, 0.022820638492703438, 0.18259893357753754, 0.3133871257305145, 0.08183155953884125, 0.35655686259269714, 0.17295894026756287, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.057175230234861374, 0.2799927890300751, 0.10977934300899506, 0.4680712819099426, 0.08838099986314774, 0.05264464393258095, 0.21108192205429077, 0.08241217583417892, 0.0764400064945221, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.17679302394390106, 0.30970489978790283, 0.042192552238702774, 0.2463400512933731, 0.032756272703409195, 0.05394153669476509, 0.02321716584265232, 0.30038926005363464, 0.023974716663360596, 0.0257905051112175, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1864403486251831, 0.03811780363321304, 0.18074536323547363, 0.08396673202514648, 0.026499373838305473, 0.05736878141760826, 0.274480402469635, 0.10284627228975296, 0.15606749057769775, 0.017497936263680458, 0.09719526022672653, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.1767420768737793, 0.017465414479374886, 0.034512054175138474, 0.0999627411365509, 0.011741198599338531, 0.022724410519003868, 0.04408577084541321, 0.03894393891096115, 0.018038587644696236, 0.058924250304698944, 0.2522818148136139, 0.12782295048236847, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.042104240506887436, 0.022070694714784622, 0.04743226245045662, 0.13338083028793335, 0.020831480622291565, 0.031267598271369934, 0.024703562259674072, 0.041907425969839096, 0.006121364887803793, 0.02875565178692341, 0.13002096116542816, 0.36194902658462524, 0.021867850795388222, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.12623563408851624, 0.6370776891708374, 0.07802888005971909, 0.06076015904545784, 0.015353387221693993, 0.0031011439859867096, 0.031844403594732285, 0.5665289163589478, 0.013176449574530125, 0.025442441925406456, 0.05083877220749855, 0.08586791157722473, 0.03281332179903984, 0.0019294946687296033, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.010483458638191223, 0.10243765264749527, 0.013204336166381836, 0.1070198118686676, 0.001742976950481534, 0.0011925535509362817, 0.03764529153704643, 0.023008054122328758, 0.09038762003183365, 0.1208486333489418, 0.06097627431154251, 0.11476689577102661, 0.17706690728664398, 0.4447736442089081, 0.005561552010476589, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.03962688520550728, 0.412600040435791, 0.1027907133102417, 0.011060677468776703, 0.04006139934062958, 0.005457504652440548, 0.17391063272953033, 0.009697728790342808, 0.08243320137262344, 0.1504840850830078, 0.029468167573213577, 0.29366523027420044, 0.04788699373602867, 0.17640100419521332, 0.04229334741830826, 0.3300667107105255, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.20544184744358063, 0.06503231078386307, 0.21778742969036102, 0.04011436551809311, 0.2470238208770752, 0.03102266602218151, 0.027881061658263206, 0.06887322664260864, 0.023802783340215683, 0.2166331559419632, 0.06618232280015945, 0.058350641280412674, 0.04297764599323273, 0.06574989855289459, 0.02652076631784439, 0.08339553326368332, 0.09817715734243393, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.09466058760881424, 0.0047309016808867455, 0.1481417566537857, 0.06127317249774933, 0.015202163718640804, 0.011932089924812317, 0.31230586767196655, 0.04852164536714554, 0.039501819759607315, 0.001117925625294447, 0.06312739849090576, 0.023924386128783226, 0.02860989049077034, 0.007241260260343552, 0.11453913897275925, 0.012237192131578922, 0.2803768217563629, 0.0480632521212101, NaN, NaN, NaN, NaN, NaN, NaN, NaN], [0.02001449465751648, 0.0017837424529716372, 0.005722085013985634, 0.04321253299713135, 0.00430489843711257, 0.009005578234791756, 0.010736249387264252, 0.0058517144061625, 0.003792154835537076, 0.008828205987811089, 0.0838593989610672, 0.029530486091971397, 0.015579215250909328, 0.010320665314793587, 0.016853220760822296, 0.017335176467895508, 0.12552303075790405, 0.42354699969291687, 0.08326870948076248, NaN, NaN, NaN, NaN, NaN, NaN], [0.001771818962879479, 0.000807587115559727, 0.0031146325636655092, 0.023062998428940773, 0.0018312688916921616, 0.007724495604634285, 0.002569216303527355, 0.003803644794970751, 0.00041838324978016317, 0.001987496856600046, 0.012477965094149113, 0.04809670150279999, 0.0016458284808322787, 0.00020838514319621027, 0.005814890842884779, 0.018183711916208267, 0.30546146631240845, 0.4703490138053894, 0.15369661152362823, 0.012250960804522038, NaN, NaN, NaN, NaN, NaN], [0.02520398050546646, 0.2818087637424469, 0.007948609068989754, 0.07590723037719727, 0.01867567002773285, 0.006826441269367933, 0.011762343347072601, 0.5987983345985413, 0.0045673479326069355, 0.01173742488026619, 0.03130093589425087, 0.03894692659378052, 0.016236862167716026, 0.0014989122282713652, 0.0009245824767276645, 0.025562506169080734, 0.5276230573654175, 0.32699310779571533, 0.1864093542098999, 0.0933799296617508, 0.0060149896889925, NaN, NaN, NaN, NaN], [0.0011320068733766675, 0.011502433568239212, 0.0017513524508103728, 0.020418671891093254, 0.0003008104977197945, 0.00031320590642280877, 0.0053228470496833324, 0.0022876623552292585, 0.011736828833818436, 0.017109515145421028, 0.010937619023025036, 0.015238909050822258, 0.025703608989715576, 0.10705357789993286, 0.0009204442030750215, 0.02667400799691677, 0.16934601962566376, 0.08647502958774567, 0.028284918516874313, 0.06841914355754852, 0.39870724081993103, 0.0010592876933515072, NaN, NaN, NaN], [0.02631283551454544, 0.29101136326789856, 0.042160265147686005, 0.009721376933157444, 0.02933679334819317, 0.014515053480863571, 0.18161341547966003, 0.016545770689845085, 0.03647695854306221, 0.0840071588754654, 0.02240183763206005, 0.1055113896727562, 0.037331126630306244, 0.17535105347633362, 0.010923052206635475, 0.2594170868396759, 0.5064816474914551, 0.06657205522060394, 0.130835622549057, 0.0483754500746727, 0.2870587110519409, 0.010685333050787449, 0.21122200787067413, NaN, NaN], [0.21289733052253723, 0.10400458425283432, 0.2843308448791504, 0.11722961068153381, 0.31265783309936523, 0.07705509662628174, 0.050357937812805176, 0.1631784737110138, 0.04547655209898949, 0.37539371848106384, 0.07925810664892197, 0.07719646394252777, 0.043498191982507706, 0.04735783487558365, 0.022911155596375465, 0.20965908467769623, 0.2452480047941208, 0.05793433263897896, 0.07357832789421082, 0.03363368287682533, 0.041085004806518555, 0.014093895442783833, 0.05045074224472046, 0.0570731945335865, NaN], [0.02115148864686489, 0.018139760941267014, 0.03536282852292061, 0.06259438395500183, 0.00901759136468172, 0.014575985260307789, 0.12521256506443024, 0.12870429456233978, 0.09162478893995285, 0.06363746523857117, 0.1348179280757904, 0.07700010389089584, 0.05158444121479988, 0.01101324986666441, 0.03299920633435249, 0.163722425699234, 0.13794326782226562, 0.18303781747817993, 0.117555633187294, 0.08103907853364944, 0.012191864661872387, 0.032527241855859756, 0.16104964911937714, 0.12187117338180542, 0.22321484982967377]]]], \"bot_text\": [\"Das_\", \"Tier\", \"_\", \"\\u00fcber\", \"quer\", \"te_\", \"die_\", \"Stra\\u00dfe_\", \"nicht_\", \", _\", \"weil_\", \"es_\", \"zu_\", \"m\\u00fc\", \"de_\", \"war_\", \", _\", \"weil_\", \"es_\", \"zu_\", \"m\\u00fc\", \"de_\", \"war_\", \"._\"]}, \"inp_inp\": {\"top_text\": [\"The_\", \"animal_\", \"didn_\", \"'_\", \"t_\", \"cross_\", \"the_\", \"street_\", \"because_\", \"it_\", \"was_\", \"too_\", \"tire\", \"d_\"], \"att\": [[[[0.04540494084358215, 0.009098929353058338, 0.06841860711574554, 0.050027038902044296, 0.1867244392633438, 0.20893266797065735, 0.15536439418792725, 0.2501838803291321, 0.03253718465566635, 0.045193806290626526, 0.01405471283942461, 0.15126678347587585, 0.5554144382476807, 0.07120772451162338, 0.21479088068008423], [0.010880604386329651, 0.008569094352424145, 0.3644530475139618, 0.032524824142456055, 0.15862980484962463, 0.2895345985889435, 0.007411073427647352, 0.03074379824101925, 0.23678991198539734, 0.04092710092663765, 0.21633881330490112, 0.10217994451522827, 0.5741018652915955, 0.08794906735420227, 0.15811748802661896], [0.1548197716474533, 0.04407857358455658, 0.04267416149377823, 0.14390510320663452, 0.39150071144104004, 0.10470721870660782, 0.21010224521160126, 0.37398451566696167, 0.24677534401416779, 0.3071460425853729, 0.12511251866817474, 0.37053829431533813, 0.34731435775756836, 0.21468856930732727, 0.22426171600818634], [0.01666487753391266, 0.070415198802948, 0.13558338582515717, 0.030082950368523598, 0.17114414274692535, 0.20995233952999115, 0.018852930516004562, 0.2688913345336914, 0.024380644783377647, 0.01614876091480255, 0.058318838477134705, 0.003357462352141738, 0.22233186662197113, 0.08606056123971939, 0.08522026240825653], [0.26702794432640076, 0.10013092309236526, 0.15535299479961395, 0.01822819747030735, 0.19259323179721832, 0.1620739996433258, 0.06925511360168457, 0.14121465384960175, 0.30160874128341675, 0.138941690325737, 0.14571446180343628, 0.1845642775297165, 0.3172887861728668, 0.1378965824842453, 0.15321676433086395], [0.05774107202887535, 0.08979255706071854, 0.15777261555194855, 0.0986839085817337, 0.04042482376098633, 0.02364284358918667, 0.006265458185225725, 0.20312650501728058, 0.04589210823178291, 0.2705432176589966, 0.29482388496398926, 0.25277185440063477, 0.21941334009170532, 0.09023746848106384, 0.12374064326286316], [0.10808208584785461, 0.08377770334482193, 0.3031982481479645, 0.08575166761875153, 0.1659224033355713, 0.02410510927438736, 0.024052061140537262, 0.06346622854471207, 0.012278172187507153, 0.033475130796432495, 0.02865537814795971, 0.2309909611940384, 0.5272806286811829, 0.058207638561725616, 0.12589795887470245], [0.2848440408706665, 0.04557379335165024, 0.07043055444955826, 0.13887976109981537, 0.25104182958602905, 0.08729252219200134, 0.03900376707315445, 0.06159999966621399, 0.07028467953205109, 0.1360185593366623, 0.12163159996271133, 0.4339398145675659, 0.18035274744033813, 0.13636742532253265, 0.35040098428726196], [0.03364454582333565, 0.06385143101215363, 0.4650610089302063, 0.13847006857395172, 0.12132523953914642, 0.23606915771961212, 0.02828356996178627, 0.17786316573619843, 0.0068073878064751625, 0.0032905752304941416, 0.04716186597943306, 0.060036350041627884, 0.5867005586624146, 0.23594366014003754, 0.05739189311861992], [0.04961356148123741, 0.4571499228477478, 0.32633671164512634, 0.044803813099861145, 0.12193554639816284, 0.15620054304599762, 0.031114954501390457, 0.37925899028778076, 0.023853085935115814, 0.007363635115325451, 0.0625552162528038, 0.04359081760048866, 0.12771400809288025, 0.10945692658424377, 0.03218715265393257], [0.054336514323949814, 0.12682472169399261, 0.28572455048561096, 0.7098703384399414, 0.04356186464428902, 0.036012813448905945, 0.12616953253746033, 0.12438997626304626, 0.06097114831209183, 0.011340769939124584, 0.00453603221103549, 0.02511424943804741, 0.15918391942977905, 0.004009802360087633, 0.1337292641401291], [0.029656492173671722, 0.11861541867256165, 0.25968441367149353, 0.6952800154685974, 0.06073199212551117, 0.3734285235404968, 0.030824951827526093, 0.09641394764184952, 0.0529148206114769, 0.01715172454714775, 0.01323915645480156, 0.055627286434173584, 0.11593649536371231, 0.04441850632429123, 0.04630020260810852], [0.10554661601781845, 0.6362442970275879, 0.6959939002990723, 0.018170323222875595, 0.40134888887405396, 0.15823723375797272, 0.1629355400800705, 0.11358990520238876, 0.24731940031051636, 0.23558683693408966, 0.07505767047405243, 0.03725680336356163, 0.014009351842105389, 0.03713200241327286, 0.09585387259721756], [0.4055319130420685, 0.2534714341163635, 0.44874629378318787, 0.14194901287555695, 0.3008168041706085, 0.20029903948307037, 0.07248799502849579, 0.26174047589302063, 0.1826024055480957, 0.0982341319322586, 0.09884719550609589, 0.22728654742240906, 0.04277953878045082, 0.06280668079853058, 0.09454112499952316], [0.025013893842697144, 0.013348683714866638, 0.22353146970272064, 0.0037027201615273952, 0.14888618886470795, 0.22346094250679016, 0.021921563893556595, 0.6342950463294983, 0.03356323391199112, 0.06236502528190613, 0.03522828221321106, 0.17797930538654327, 0.04731723666191101, 0.06786928325891495, 0.042550042271614075]], [[0.1577349603176117, 0.09554319828748703, 0.02016325853765011, 0.08440300822257996, 0.33925309777259827, 0.35353752970695496, 0.49755600094795227, 0.2782062292098999, 0.2544572949409485, 0.6230229735374451, 0.04059281200170517, 0.12019311636686325, 0.2659685015678406, 0.3508304953575134, 0.10784413665533066], [0.053030457347631454, 0.00926118716597557, 0.08361255377531052, 0.1587543487548828, 0.42493122816085815, 0.0713140144944191, 0.05032603442668915, 0.790120005607605, 0.4618776738643646, 0.3647898733615875, 0.20375682413578033, 0.2847990393638611, 0.20242592692375183, 0.33538198471069336, 0.174686461687088], [0.08703262358903885, 0.32554149627685547, 0.013934381306171417, 0.05831753462553024, 0.13550086319446564, 0.24707834422588348, 0.10738440603017807, 0.2015978991985321, 0.20393061637878418, 0.3176687955856323, 0.11071985214948654, 0.18533341586589813, 0.23293758928775787, 0.34885379672050476, 0.5850104689598083], [0.10977373272180557, 0.1966770738363266, 0.08552326261997223, 0.3559982180595398, 0.025181425735354424, 0.05637436732649803, 0.04466243088245392, 0.30799123644828796, 0.24855823814868927, 0.13041310012340546, 0.16531962156295776, 0.11238406598567963, 0.33737656474113464, 0.08863592892885208, 0.043888676911592484], [0.5166918635368347, 0.35558366775512695, 0.01755080744624138, 0.011931763030588627, 0.556053638458252, 0.21828243136405945, 0.17387567460536957, 0.11686032265424728, 0.22141756117343903, 0.6036979556083679, 0.3235246241092682, 0.21816273033618927, 0.20258961617946625, 0.7225815653800964, 0.3817636966705322], [0.34899845719337463, 0.35567307472229004, 0.2643766403198242, 0.12664493918418884, 0.18397535383701324, 0.012551958672702312, 0.056629326194524765, 0.06369142234325409, 0.252005010843277, 0.3601645529270172, 0.3771168887615204, 0.4479873776435852, 0.13717319071292877, 0.6667386293411255, 0.1451762467622757], [0.5782451629638672, 0.6189379096031189, 0.11758852005004883, 0.3125992715358734, 0.3504111170768738, 0.10631152987480164, 0.16217094659805298, 0.04177623987197876, 0.10916820168495178, 0.3274877965450287, 0.10721725970506668, 0.11595069617033005, 0.11270644515752792, 0.32787472009658813, 0.13412055373191833], [0.2553749084472656, 0.5479037165641785, 0.3395489752292633, 0.13140854239463806, 0.07771788537502289, 0.06743729114532471, 0.04718935862183571, 0.022107038646936417, 0.2706955075263977, 0.06462319940328598, 0.20574931800365448, 0.08401398360729218, 0.11249610781669617, 0.20925462245941162, 0.07354141771793365], [0.15992610156536102, 0.4297313988208771, 0.11996463686227798, 0.29957810044288635, 0.19940054416656494, 0.6192947030067444, 0.07005859166383743, 0.4058174192905426, 0.0451255701482296, 0.02480492927134037, 0.052432600408792496, 0.13078351318836212, 0.14195236563682556, 0.12686756253242493, 0.10959619283676147], [0.13202522695064545, 0.3311104476451874, 0.12707853317260742, 0.06901858001947403, 0.13186469674110413, 0.37057942152023315, 0.1482420712709427, 0.21941475570201874, 0.1949346363544464, 0.11534072458744049, 0.011536079458892345, 0.018882060423493385, 0.16279305517673492, 0.07962523400783539, 0.11737312376499176], [0.0604790523648262, 0.5140921473503113, 0.37517040967941284, 0.060462601482868195, 0.14644990861415863, 0.49839717149734497, 0.08009912073612213, 0.3367377519607544, 0.0785842090845108, 0.043956201523542404, 0.0826396569609642, 0.015624956227838993, 0.10417986661195755, 0.07971351593732834, 0.018050679937005043], [0.10509271919727325, 0.5468136072158813, 0.2136838436126709, 0.13898353278636932, 0.11654751002788544, 0.1982421725988388, 0.03731672093272209, 0.5618436336517334, 0.37511539459228516, 0.015668287873268127, 0.07859797775745392, 0.026544239372015, 0.11879771202802658, 0.051024846732616425, 0.03191406652331352], [0.2583395540714264, 0.306291788816452, 0.15283380448818207, 0.48663485050201416, 0.24239543080329895, 0.6472541093826294, 0.11895711719989777, 0.7050262093544006, 0.43789902329444885, 0.07257331907749176, 0.1529301553964615, 0.07237879186868668, 0.029207568615674973, 0.031136667355895042, 0.04320577159523964], [0.37997886538505554, 0.3090342879295349, 0.09529577195644379, 0.06091787666082382, 0.5611693859100342, 0.5351426005363464, 0.5250707268714905, 0.4058402180671692, 0.08284364640712738, 0.7192233204841614, 0.12988585233688354, 0.24924960732460022, 0.016598563641309738, 0.6531801819801331, 0.22117754817008972], [0.31734058260917664, 0.02799793891608715, 0.08435621112585068, 0.4273812472820282, 0.37900310754776, 0.1551857888698578, 0.12445898354053497, 0.02975497953593731, 0.13922178745269775, 0.25836795568466187, 0.3142063617706299, 0.5329877138137817, 0.020000692456960678, 0.19246473908424377, 0.34441179037094116]], [[0.022252710536122322, 0.017558962106704712, 0.12289869785308838, 0.01514213066548109, 0.04983796179294586, 0.160098597407341, 0.09159664064645767, 0.03634485974907875, 0.27353572845458984, 0.14908282458782196, 0.8423851132392883, 0.33708906173706055, 0.03012021631002426, 0.05972116440534592, 0.2686574459075928], [0.13637107610702515, 0.02899317629635334, 0.09026061743497849, 0.22582301497459412, 0.09117049723863602, 0.19661013782024384, 0.30083417892456055, 0.13528303802013397, 0.1352328211069107, 0.18504901230335236, 0.3621358573436737, 0.504258930683136, 0.10044156759977341, 0.37106865644454956, 0.36433035135269165], [0.10935092717409134, 0.06271693855524063, 0.044740546494722366, 0.1709805577993393, 0.22382155060768127, 0.2615796625614166, 0.3429900109767914, 0.02677186205983162, 0.39723172783851624, 0.1559167355298996, 0.6381150484085083, 0.34350308775901794, 0.14388519525527954, 0.322640985250473, 0.07209958881139755], [0.11123806983232498, 0.14550834894180298, 0.12841136753559113, 0.013620064593851566, 0.006130752619355917, 0.025231752544641495, 0.11538708955049515, 0.09429272264242172, 0.3855685293674469, 0.016912028193473816, 0.3869503438472748, 0.1961694061756134, 0.15352581441402435, 0.019190048798918724, 0.4291467070579529], [0.1283823847770691, 0.33987957239151, 0.06837885081768036, 0.03946131095290184, 0.03139644116163254, 0.11983324587345123, 0.12062173336744308, 0.46404916048049927, 0.24212448298931122, 0.1594262570142746, 0.4298713207244873, 0.5236353278160095, 0.2188095897436142, 0.049411591142416, 0.10146455466747284], [0.010564678348600864, 0.32722386717796326, 0.19864077866077423, 0.015389330685138702, 0.0028029000386595726, 0.007416849955916405, 0.003262599464505911, 0.23795713484287262, 0.05000551417469978, 0.075996033847332, 0.049679387360811234, 0.21265098452568054, 0.2097157984972, 0.01007634773850441, 0.03895873948931694], [0.10390599817037582, 0.04329453781247139, 0.42168325185775757, 0.06385642290115356, 0.04340887442231178, 0.029213739559054375, 0.036663200706243515, 0.0028809772338718176, 0.19718152284622192, 0.16335125267505646, 0.6605148315429688, 0.17834524810314178, 0.08135847747325897, 0.05741032958030701, 0.24636343121528625], [0.010566278360784054, 0.32608217000961304, 0.34194469451904297, 0.08201102167367935, 0.036688148975372314, 0.12155891954898834, 0.015490439720451832, 0.05858473479747772, 0.1731383204460144, 0.12207219004631042, 0.0636284351348877, 0.2239474654197693, 0.2988812327384949, 0.033257871866226196, 0.04593053460121155], [0.26241976022720337, 0.0378817655146122, 0.10770448297262192, 0.11944369971752167, 0.367754727602005, 0.041288651525974274, 0.25914207100868225, 0.061461515724658966, 0.061867646872997284, 0.08977923542261124, 0.03797370195388794, 0.2101898193359375, 0.035329420119524, 0.38835543394088745, 0.3324989080429077], [0.3753410875797272, 0.031615160405635834, 0.1074504628777504, 0.07966858148574829, 0.16393397748470306, 0.01204571221023798, 0.36072632670402527, 0.026240641251206398, 0.09493876993656158, 0.12203314155340195, 0.0640302300453186, 0.13458214700222015, 0.19451306760311127, 0.3176366686820984, 0.19878560304641724], [0.19523903727531433, 0.1090913861989975, 0.11059779673814774, 0.03402426466345787, 0.4491459131240845, 0.1729225516319275, 0.3482173979282379, 0.01764478161931038, 0.14307594299316406, 0.22771455347537994, 0.04787566140294075, 0.14714154601097107, 0.028272001072764397, 0.23823784291744232, 0.19700175523757935], [0.1428564339876175, 0.03585843741893768, 0.023294193670153618, 0.1143055409193039, 0.07461919635534286, 0.13578416407108307, 0.4153969883918762, 0.03374828025698662, 0.10746961832046509, 0.17216910421848297, 0.02314077876508236, 0.02450137585401535, 0.06497504562139511, 0.381274551153183, 0.14229674637317657], [0.5444629788398743, 0.049506742507219315, 0.09827632457017899, 0.29229700565338135, 0.06650383025407791, 0.11397240310907364, 0.597455620765686, 0.1362738311290741, 0.15222173929214478, 0.2562837302684784, 0.13646292686462402, 0.38294121623039246, 0.030382927507162094, 0.038297515362501144, 0.465526819229126], [0.12950241565704346, 0.2834409177303314, 0.40745216608047485, 0.040315985679626465, 0.09126543253660202, 0.16738829016685486, 0.24838824570178986, 0.2707839906215668, 0.5177856087684631, 0.1416875720024109, 0.6573355793952942, 0.4225574731826782, 0.02239617332816124, 0.07502269744873047, 0.07588320225477219], [0.00751910824328661, 0.5024122595787048, 0.38239815831184387, 0.016937274485826492, 0.039716992527246475, 0.11479316651821136, 0.004478333052247763, 0.02017248421907425, 0.011771232821047306, 0.0035600941628217697, 0.03807784244418144, 0.07125832885503769, 0.1964063048362732, 0.0026467873249202967, 0.00302477041259408]], [[0.06952784210443497, 0.0770183801651001, 0.23747292160987854, 0.022874178364872932, 0.14143598079681396, 0.08435114473104477, 0.0795491486787796, 0.054600730538368225, 0.015159118920564651, 0.06120437756180763, 0.02771361917257309, 0.06765643507242203, 0.013518131338059902, 0.15485556423664093, 0.21279898285865784], [0.2531612813472748, 0.03241151198744774, 0.04793045297265053, 0.13835468888282776, 0.05921119078993797, 0.20751594007015228, 0.5453532934188843, 0.021712571382522583, 0.07093679159879684, 0.2689567506313324, 0.13515745103359222, 0.05570060759782791, 0.04099860414862633, 0.03517309948801994, 0.11268090456724167], [0.35043928027153015, 0.18572849035263062, 0.0481790192425251, 0.19426384568214417, 0.018465382978320122, 0.2676069438457489, 0.3000488579273224, 0.2726097106933594, 0.08134563267230988, 0.10164237022399902, 0.05787196010351181, 0.03694695979356766, 0.21335498988628387, 0.0815601795911789, 0.051584985107183456], [0.10967924445867538, 0.047143928706645966, 0.06498727947473526, 0.0161599051207304, 0.08311080187559128, 0.25361040234565735, 0.2589581310749054, 0.0646943673491478, 0.11701063811779022, 0.7398742437362671, 0.11236728727817535, 0.4240334630012512, 0.09019055217504501, 0.1980810910463333, 0.08526580780744553], [0.0050394656136631966, 0.005000656470656395, 0.01952306181192398, 0.4184519350528717, 0.012662295252084732, 0.015614073723554611, 0.006089636590331793, 0.027387546375393867, 0.007885311730206013, 0.009227052330970764, 0.015002718195319176, 0.002679894445464015, 0.040426015853881836, 0.023895790800452232, 0.031263262033462524], [0.1104135811328888, 0.16341662406921387, 0.10040471702814102, 0.15014782547950745, 0.22085179388523102, 0.07417210936546326, 0.08140900731086731, 0.21936744451522827, 0.12380684167146683, 0.030364450067281723, 0.008148477412760258, 0.040405042469501495, 0.016740301623940468, 0.05651557818055153, 0.03777482733130455], [0.021739037707448006, 0.025255737826228142, 0.041796568781137466, 0.028582973405718803, 0.06361079961061478, 0.10603900998830795, 0.04079660773277283, 0.23573672771453857, 0.031395647674798965, 0.17699679732322693, 0.11518478393554688, 0.12758946418762207, 0.029195530340075493, 0.19761133193969727, 0.24158287048339844], [0.1121676117181778, 0.056780170649290085, 0.05766424164175987, 0.4753672778606415, 0.17093990743160248, 0.055545274168252945, 0.23774300515651703, 0.047642335295677185, 0.2396271675825119, 0.07084424793720245, 0.05071293190121651, 0.15200014412403107, 0.17973174154758453, 0.16349640488624573, 0.16329222917556763], [0.08155515789985657, 0.04415197670459747, 0.09395420551300049, 0.06736686080694199, 0.009449290111660957, 0.007789341267198324, 0.08313233405351639, 0.018231436610221863, 0.2736586928367615, 0.12516330182552338, 0.14283257722854614, 0.03993181511759758, 0.11735112965106964, 0.037545330822467804, 0.095799021422863], [0.07989984005689621, 0.019307896494865417, 0.05061032995581627, 0.29983657598495483, 0.009587445296347141, 0.23453857004642487, 0.06259765475988388, 0.014452173374593258, 0.026213111355900764, 0.03952796012163162, 0.12968890368938446, 0.019515926018357277, 0.23016268014907837, 0.18980233371257782, 0.14884653687477112], [0.042069002985954285, 0.007410319056361914, 0.027750220149755478, 0.14348776638507843, 0.190275177359581, 0.0696464255452156, 0.09576459228992462, 0.08924749493598938, 0.16830699145793915, 0.14098002016544342, 0.2945949137210846, 0.08460760116577148, 0.11812892556190491, 0.2108343094587326, 0.28860458731651306], [0.509858250617981, 0.07021021842956543, 0.044154465198516846, 0.005825423635542393, 0.5241404175758362, 0.030089300125837326, 0.19222509860992432, 0.02549084462225437, 0.1939508020877838, 0.09437919408082962, 0.10883274674415588, 0.13631868362426758, 0.08004569262266159, 0.04784407094120979, 0.14005501568317413], [0.029798628762364388, 0.0011461747344583273, 0.00650657806545496, 0.02902117185294628, 0.007348767947405577, 0.012432223185896873, 0.018553903326392174, 0.006125486921519041, 0.008405826054513454, 0.057926055043935776, 0.04542696848511696, 0.21123111248016357, 0.05352021008729935, 0.2931033968925476, 0.1833699345588684], [0.01627730205655098, 0.0057758791372179985, 0.013731835409998894, 0.6289489269256592, 0.011782719753682613, 0.006108477246016264, 0.005309773609042168, 0.023312430828809738, 0.012817217037081718, 0.00939176045358181, 0.04320970177650452, 0.012798959389328957, 0.1585281491279602, 0.11795029044151306, 0.13285225629806519], [0.39748579263687134, 0.10528232902288437, 0.006042438093572855, 0.07306646555662155, 0.020484283566474915, 0.09288878738880157, 0.6331413388252258, 0.03478514030575752, 0.016230005770921707, 0.039869412779808044, 0.10224607586860657, 0.005181388463824987, 0.007975003682076931, 0.01008305512368679, 0.026732152327895164]], [[0.2484879046678543, 0.12593188881874084, 0.11472177505493164, 0.6318025588989258, 0.009745504707098007, 0.030495919287204742, 0.054615989327430725, 0.004801109898835421, 0.23875823616981506, 0.011562658473849297, 0.02087206020951271, 0.059635717421770096, 0.011483770795166492, 0.07716090232133865, 0.041850361973047256], [0.3294946551322937, 0.17723912000656128, 0.041080135852098465, 0.30134642124176025, 0.0073102316819131374, 0.049291279166936874, 0.0495959147810936, 0.0037847748026251793, 0.014987694099545479, 0.07676513493061066, 0.039059415459632874, 0.006041571032255888, 0.011380840092897415, 0.011979957111179829, 0.02782473713159561], [0.008675806224346161, 0.016726570203900337, 0.19906938076019287, 0.3167073726654053, 0.022006884217262268, 0.014510865323245525, 0.00237266905605793, 0.00938868336379528, 0.004848333541303873, 0.00305117666721344, 0.042285457253456116, 0.0026737553998827934, 0.017337674275040627, 0.0016427191440016031, 0.0027906473260372877], [0.06292864680290222, 0.010060630738735199, 0.07846219092607498, 0.3009726405143738, 0.09911586344242096, 0.3769649565219879, 0.290684312582016, 0.048859626054763794, 0.015964722260832787, 0.02972962148487568, 0.25837212800979614, 0.050403933972120285, 0.052831199020147324, 0.44793814420700073, 0.12096201628446579], [0.0647541731595993, 0.06744952499866486, 0.010754776187241077, 0.15598785877227783, 0.08916914463043213, 0.4045051634311676, 0.5958212018013, 0.10594789683818817, 0.12025819718837738, 0.04822946712374687, 0.02913811057806015, 0.014846491627395153, 0.17111137509346008, 0.049513354897499084, 0.14188753068447113], [0.07069405168294907, 0.0006015333347022533, 0.0017680496675893664, 0.0010985832195729017, 0.0012869784841313958, 0.22278346121311188, 0.4465882480144501, 0.06128238886594772, 0.02642727456986904, 0.03756114840507507, 0.002607540925964713, 0.0018699204083532095, 0.0059012919664382935, 0.020283877849578857, 0.03355809301137924], [0.0861939862370491, 0.03346291184425354, 0.009915103204548359, 0.35010838508605957, 0.03437130153179169, 0.18394741415977478, 0.5006390810012817, 0.0633198693394661, 0.36160194873809814, 0.07578127831220627, 0.038500167429447174, 0.08213403075933456, 0.026455186307430267, 0.12013117223978043, 0.1146865040063858], [0.2484544962644577, 0.00790119543671608, 0.004407763481140137, 0.02700735628604889, 0.015422074124217033, 0.015295883640646935, 0.40846768021583557, 0.10706920176744461, 0.06367217004299164, 0.22094424068927765, 0.21221157908439636, 0.006999517325311899, 0.054566796869039536, 0.124799944460392, 0.09114839136600494], [0.1237153485417366, 0.029043834656476974, 0.07521974295377731, 0.04068650305271149, 0.002623512176796794, 0.008706655353307724, 0.03832445293664932, 0.14616532623767853, 0.1701044738292694, 0.20599642395973206, 0.11677426844835281, 0.2341107875108719, 0.06235762685537338, 0.003964806441217661, 0.15731573104858398], [0.034962959587574005, 0.023077068850398064, 0.034600574523210526, 0.14041800796985626, 0.0021679585333913565, 0.009290770627558231, 0.07274696230888367, 0.014187950640916824, 0.1371506154537201, 0.39440277218818665, 0.2198760211467743, 0.19940708577632904, 0.11203428357839584, 0.08552268147468567, 0.11737436801195145], [0.015330069698393345, 0.007386082783341408, 0.017500948160886765, 0.01906486414372921, 0.010120063088834286, 0.05364372953772545, 0.043298348784446716, 0.12658876180648804, 0.06039673835039139, 0.02238147333264351, 0.16429400444030762, 0.06984445452690125, 0.3043651580810547, 0.055543575435876846, 0.11423089355230331], [0.09644094854593277, 0.0058854687958955765, 0.03721459209918976, 0.0025620406959205866, 0.062300242483615875, 0.003563062520697713, 0.07219880819320679, 0.03924282267689705, 0.025451356545090675, 0.06598387658596039, 0.026776403188705444, 0.07250863313674927, 0.45021528005599976, 0.08199745416641235, 0.4220075309276581], [0.01460834126919508, 0.0005662022740580142, 0.0013911814894527197, 0.05315173417329788, 0.008028149604797363, 0.016604119911789894, 0.011740745045244694, 0.008678588084876537, 0.0025609249714761972, 0.01638207584619522, 0.018210044130682945, 0.014119945466518402, 0.06550943106412888, 0.34254926443099976, 0.04794229939579964], [0.05372002348303795, 0.14061135053634644, 0.018787089735269547, 0.0958278551697731, 0.0019092779839411378, 0.03348369151353836, 0.13957257568836212, 0.031220966950058937, 0.19735871255397797, 0.017847368493676186, 0.0589337982237339, 0.01900595612823963, 0.1276925951242447, 0.04769464209675789, 0.4384888708591461], [0.08416850119829178, 0.1088641807436943, 0.0573052242398262, 0.27551695704460144, 0.030813831835985184, 0.18022866547107697, 0.10468263924121857, 0.09972096234560013, 0.31189021468162537, 0.3315774202346802, 0.2321816384792328, 0.034622836858034134, 0.14143656194210052, 0.04640315845608711, 0.09621720016002655]], [[0.130781888961792, 0.31469303369522095, 0.10550640523433685, 0.05234318599104881, 0.073336161673069, 0.022349786013364792, 0.04807984083890915, 0.1931842416524887, 0.06399697810411453, 0.042083337903022766, 0.026750531047582626, 0.11997608095407486, 0.008983415551483631, 0.03431839123368263, 0.019280044361948967], [0.1582711637020111, 0.14862558245658875, 0.20016248524188995, 0.08876624703407288, 0.11006557196378708, 0.14632253348827362, 0.04025046527385712, 0.010204354301095009, 0.017868297174572945, 0.059372395277023315, 0.02111685276031494, 0.04181571304798126, 0.025184988975524902, 0.09681157767772675, 0.11611668020486832], [0.23875439167022705, 0.3084685802459717, 0.14188633859157562, 0.026331612840294838, 0.0149313323199749, 0.09176106750965118, 0.03131069242954254, 0.10051372647285461, 0.03149634972214699, 0.11085867136716843, 0.014410188421607018, 0.02796255424618721, 0.034816499799489975, 0.025807565078139305, 0.01846306212246418], [0.3404518961906433, 0.24260303378105164, 0.15383434295654297, 0.17020593583583832, 0.011800014413893223, 0.014385397545993328, 0.09441643208265305, 0.12204645574092865, 0.13843503594398499, 0.045293405652046204, 0.010667533613741398, 0.19693949818611145, 0.10281307995319366, 0.01422606036067009, 0.06984427571296692], [0.002873742487281561, 0.008706165477633476, 0.35573768615722656, 0.0015586970839649439, 0.015496796928346157, 0.003392455168068409, 0.01149011217057705, 0.01891980692744255, 0.016394488513469696, 0.003960000351071358, 0.0035995631478726864, 0.008501716889441013, 0.018164046108722687, 0.004727588500827551, 0.013562880456447601], [0.044807154685258865, 0.02788197249174118, 0.03947468474507332, 0.1271299421787262, 0.17640650272369385, 0.25110092759132385, 0.08349309861660004, 0.02069718949496746, 0.45751577615737915, 0.039922621101140976, 0.1781769096851349, 0.002931024879217148, 0.16567888855934143, 0.1177627220749855, 0.5156693458557129], [0.005990047473460436, 0.04782475531101227, 0.01399919856339693, 0.010489771142601967, 0.06132129579782486, 0.030459748581051826, 0.010153756476938725, 0.3387801945209503, 0.06446883827447891, 0.007243711035698652, 0.00693717272952199, 0.020023254677653313, 0.007285784464329481, 0.009139767847955227, 0.0044054011814296246], [0.020405659452080727, 0.00729386368766427, 0.06661678105592728, 0.08295443654060364, 0.20373474061489105, 0.3448184132575989, 0.04295210912823677, 0.20947468280792236, 0.03081577830016613, 0.010805373080074787, 0.17521467804908752, 0.06567652523517609, 0.012400656938552856, 0.10652147233486176, 0.07385163754224777], [0.21573591232299805, 0.13175059854984283, 0.04085814207792282, 0.04119405150413513, 0.03551999852061272, 0.023009058088064194, 0.2751774191856384, 0.047030266374349594, 0.14272502064704895, 0.20153193175792694, 0.09575672447681427, 0.11327007412910461, 0.008532780222594738, 0.053245026618242264, 0.08952803909778595], [0.2778390347957611, 0.11423225700855255, 0.3034791946411133, 0.34643107652664185, 0.5395972728729248, 0.06785042583942413, 0.13029156625270844, 0.18737749755382538, 0.029348008334636688, 0.16667678952217102, 0.021040884777903557, 0.008728248998522758, 0.037633832544088364, 0.02033349499106407, 0.03947347402572632], [0.4898838996887207, 0.08082167059183121, 0.07362432777881622, 0.02171795442700386, 0.1333591789007187, 0.09000474214553833, 0.13501934707164764, 0.03979193791747093, 0.19113953411579132, 0.13522492349147797, 0.16557832062244415, 0.16255514323711395, 0.07687958329916, 0.15948235988616943, 0.09843874722719193], [0.045906297862529755, 0.18602333962917328, 0.4082620143890381, 0.010370302945375443, 0.04507172852754593, 0.19693265855312347, 0.04021843150258064, 0.027866821736097336, 0.1546991914510727, 0.33766424655914307, 0.09260500222444534, 0.05066358670592308, 0.05655887722969055, 0.13157807290554047, 0.06850539147853851], [0.020344020798802376, 0.0030158585868775845, 0.004445259924978018, 0.022628312930464745, 0.030150510370731354, 0.027700912207365036, 0.026311388239264488, 0.012862108647823334, 0.07009940594434738, 0.24656175076961517, 0.10596039146184921, 0.1143152266740799, 0.3679012656211853, 0.0068145813420414925, 0.04171491786837578], [0.004749340936541557, 0.00182742765173316, 0.0021293568424880505, 0.00394084258005023, 0.004750867374241352, 5.3125138947507367e-05, 0.0026011874433606863, 0.000718552153557539, 0.002356230979785323, 0.00125187449157238, 0.0021339249797165394, 0.00044074622564949095, 0.2141493707895279, 0.0029175111558288336, 0.00477015832439065], [0.12991508841514587, 0.06724811345338821, 0.06397818773984909, 0.15923364460468292, 0.2566852867603302, 0.07963784784078598, 0.09182894974946976, 0.040824584662914276, 0.21298912167549133, 0.2517295181751251, 0.2285410314798355, 0.11115844547748566, 0.1010512113571167, 0.3968040943145752, 0.1870165765285492]], [[0.06147387623786926, 0.0657946914434433, 0.22564710676670074, 0.1299343705177307, 0.021580645814538002, 0.08992400765419006, 0.025479430332779884, 0.04823821783065796, 0.05891237407922745, 0.016958819702267647, 0.0021926285699009895, 0.017513686791062355, 0.09859969466924667, 0.16368542611598969, 0.038398925215005875], [0.029852252453565598, 0.26626214385032654, 0.14803646504878998, 0.038784727454185486, 0.07803148031234741, 0.006210723891854286, 0.0026457132771611214, 0.006018034182488918, 0.05453306809067726, 0.002730109030380845, 0.015730326995253563, 0.0017557059181854129, 0.034912969917058945, 0.03208531066775322, 0.03983413055539131], [0.01053018867969513, 0.02744918502867222, 0.2530466914176941, 0.05846027657389641, 0.1744728684425354, 0.011957419104874134, 0.003304906887933612, 0.00205883732996881, 0.00874510407447815, 0.0014524421421810985, 0.0009729861048981547, 0.0026561047416180372, 0.0023208027705550194, 0.0038251704536378384, 0.005045189522206783], [0.016039762645959854, 0.05755838379263878, 0.10756286233663559, 0.03799062967300415, 0.5738711953163147, 0.061907339841127396, 0.128611221909523, 0.01847657933831215, 0.06501789391040802, 0.015564735978841782, 0.0016139671206474304, 0.014343881979584694, 0.020734043791890144, 0.14008449018001556, 0.13515408337116241], [0.005847899243235588, 0.11914067715406418, 0.01715121790766716, 0.3517457842826843, 0.0661543607711792, 0.07493122667074203, 0.012425812892615795, 0.11745280772447586, 0.08440648764371872, 0.020029406994581223, 0.05165768414735794, 0.04094480350613594, 0.024548601359128952, 0.005826729815453291, 0.13841456174850464], [0.015926362946629524, 0.007578620687127113, 0.1226087138056755, 0.030128292739391327, 0.03851892054080963, 0.3367418944835663, 0.01694057136774063, 0.09829536825418472, 0.0361555740237236, 0.10537439584732056, 0.007450005039572716, 0.029753634706139565, 0.22920416295528412, 0.01793695241212845, 0.05258304625749588], [0.01326388493180275, 0.05337870866060257, 0.047661036252975464, 0.08615607023239136, 0.12425915151834488, 0.4180251955986023, 0.04702466353774071, 0.0717325434088707, 0.05138256773352623, 0.06877672672271729, 0.0152205191552639, 0.0719875767827034, 0.1666427105665207, 0.13322126865386963, 0.053655143827199936], [0.026802292093634605, 0.003955241292715073, 0.0206829272210598, 0.02742936834692955, 0.06016179919242859, 0.15127348899841309, 0.06774158030748367, 0.2981398105621338, 0.05239749699831009, 0.09365928173065186, 0.035629644989967346, 0.020771589130163193, 0.13655303418636322, 0.012941722758114338, 0.05640798062086105], [0.06469012051820755, 0.1851334124803543, 0.08788572251796722, 0.19977343082427979, 0.00846380740404129, 0.03702360764145851, 0.0876760184764862, 0.046302031725645065, 0.11564433574676514, 0.05180440843105316, 0.49518024921417236, 0.1649368405342102, 0.030481798574328423, 0.10461966693401337, 0.07739346474409103], [0.020106524229049683, 0.01925482228398323, 0.006043681409209967, 0.01652396097779274, 0.001572003006003797, 0.005779887083917856, 0.015335858799517155, 0.03537710756063461, 0.009967570193111897, 0.09144406765699387, 0.43651703000068665, 0.2613205015659332, 0.0483890138566494, 0.06553913652896881, 0.055434126406908035], [0.07980967313051224, 0.14815203845500946, 0.09271827340126038, 0.004086778499186039, 0.010790406726300716, 0.0747552439570427, 0.10995902121067047, 0.04728228971362114, 0.1809520274400711, 0.025821411982178688, 0.06657237559556961, 0.1431768387556076, 0.19449584186077118, 0.20780201256275177, 0.10148976743221283], [0.05537823587656021, 0.008725662715733051, 0.0058344281278550625, 0.029011448845267296, 0.048424966633319855, 0.047911662608385086, 0.16901308298110962, 0.17019973695278168, 0.011648884043097496, 0.08953043073415756, 0.5360274910926819, 0.10330803692340851, 0.078437939286232, 0.12202966213226318, 0.11905822902917862], [0.01546903420239687, 0.0005347061669453979, 0.0015839362749829888, 0.053056132048368454, 0.23614321649074554, 0.013318118639290333, 0.051473915576934814, 0.011966699734330177, 0.007302975282073021, 0.09275621920824051, 0.06646261364221573, 0.010813506320118904, 0.13289499282836914, 0.22826357185840607, 0.04386172071099281], [0.009458722546696663, 0.0058342707343399525, 0.012789146974682808, 0.005895438138395548, 0.026010286062955856, 0.057482823729515076, 0.005663284566253424, 0.005727604031562805, 0.0033144087065011263, 0.011671853251755238, 0.00424896739423275, 0.056589994579553604, 0.20401620864868164, 0.03777612745761871, 0.03114682249724865], [0.0012354525970295072, 0.034024473279714584, 0.10020612925291061, 0.02267461270093918, 0.08676987141370773, 0.14216794073581696, 0.0033775768242776394, 0.07320579141378403, 0.07390473037958145, 0.0168889332562685, 0.00386308366432786, 0.02569040097296238, 0.24664165079593658, 0.2674221694469452, 0.014589445665478706]], [[0.2643359303474426, 0.2943609654903412, 0.10517127066850662, 0.013473477214574814, 0.17808614671230316, 0.05031028389930725, 0.0477585569024086, 0.13444076478481293, 0.0626431554555893, 0.05089121311903, 0.025438696146011353, 0.12666909396648407, 0.015911895781755447, 0.08822031319141388, 0.09637932479381561], [0.02893858775496483, 0.3286381959915161, 0.024464154615998268, 0.015645690262317657, 0.07065004110336304, 0.03320073336362839, 0.0035833900328725576, 0.002133443485945463, 0.0077736834064126015, 0.0014096481027081609, 0.006704544182866812, 0.0034484381321817636, 0.010553284548223019, 0.029550330713391304, 0.0064092278480529785], [0.0403970405459404, 0.029290249571204185, 0.2564694881439209, 0.03103366494178772, 0.01930038072168827, 0.0007984130643308163, 0.0024861868005245924, 0.013074777089059353, 0.025626862421631813, 0.0022637112997472286, 0.010511897504329681, 0.03038576804101467, 0.00803295336663723, 0.000980974524281919, 0.040744345635175705], [0.23322375118732452, 0.23003342747688293, 0.24563531577587128, 0.07496963441371918, 0.029645830392837524, 0.0015733843902125955, 0.048427432775497437, 0.07474764436483383, 0.005064227152615786, 0.006064139772206545, 0.00639030896127224, 0.0023683567997068167, 0.0201968252658844, 0.0057837339118123055, 0.030518243089318275], [0.009382463060319424, 0.004108777269721031, 0.355550616979599, 0.0026344929356127977, 0.036474164575338364, 0.0013674235669896007, 0.010420771315693855, 0.008167937397956848, 0.005904712714254856, 0.0164882093667984, 0.0014915319625288248, 0.00666471105068922, 0.007061991840600967, 0.006146776955574751, 0.03842667490243912], [0.340854674577713, 0.027831802144646645, 0.11495380103588104, 0.4507772624492645, 0.33573275804519653, 0.07158998399972916, 0.3054116368293762, 0.09558256715536118, 0.008191889151930809, 0.08007357269525528, 0.08199689537286758, 0.011630101129412651, 0.016172919422388077, 0.020448284223675728, 0.05253906920552254], [0.0825798362493515, 0.09406770020723343, 0.044158000499010086, 0.06245531886816025, 0.15669509768486023, 0.1018981784582138, 0.17849969863891602, 0.1823071539402008, 0.1725231111049652, 0.14688736200332642, 0.027769910171628, 0.1729786992073059, 0.04907526820898056, 0.09640378504991531, 0.07928813993930817], [0.04138464853167534, 0.0045098732225596905, 0.098704032599926, 0.034942083060741425, 0.1842936873435974, 0.1567782759666443, 0.14141200482845306, 0.1953822374343872, 0.09936889261007309, 0.281032919883728, 0.13522183895111084, 0.012650868855416775, 0.02501768246293068, 0.2133605033159256, 0.14542686939239502], [0.05831298604607582, 0.07845572382211685, 0.00935202743858099, 0.09348727762699127, 0.2554629147052765, 0.026818757876753807, 0.15820558369159698, 0.09712891280651093, 0.18406683206558228, 0.297629177570343, 0.011888068169355392, 0.04674078896641731, 0.01729435659945011, 0.04945852607488632, 0.08047669380903244], [0.030211733654141426, 0.004252443555742502, 0.044400423765182495, 0.0032993308268487453, 0.029341043904423714, 0.14371474087238312, 0.17894455790519714, 0.12369092553853989, 0.48359414935112, 0.06321088969707489, 0.05475561320781708, 0.3139732778072357, 0.086760014295578, 0.13208359479904175, 0.2905256450176239], [0.06285266578197479, 0.0062216646037995815, 0.016913438215851784, 0.007285475265234709, 0.01629750058054924, 0.004617355298250914, 0.06147269159555435, 0.21831700205802917, 0.11657348275184631, 0.39258062839508057, 0.17390909790992737, 0.3519352376461029, 0.014494672417640686, 0.04437657818198204, 0.04845427721738815], [0.014810703694820404, 0.027867808938026428, 0.00787208043038845, 0.003661711234599352, 0.06816401332616806, 0.014048570767045021, 0.04280591011047363, 0.04519394412636757, 0.07874996215105057, 0.2074531614780426, 0.12078044563531876, 0.53052818775177, 0.035032909363508224, 0.1398327797651291, 0.02986292913556099], [0.011430865153670311, 0.002694258699193597, 0.03896895423531532, 0.04504057392477989, 0.00808126013725996, 0.01048098411411047, 0.012571780942380428, 0.0054772221483290195, 0.07419075071811676, 0.02193005569279194, 0.3994891941547394, 0.15694338083267212, 0.3065741956233978, 0.022703034803271294, 0.07852455973625183], [0.0007813395350240171, 4.470362910069525e-06, 0.0010683261789381504, 0.022204171866178513, 0.0022952572908252478, 4.198186070425436e-05, 0.0009061718010343611, 0.0006557627930305898, 0.0009219115017913282, 0.0006920882733538747, 0.005404994357377291, 0.012070748023688793, 0.21383939683437347, 0.0026518681552261114, 0.0011399114737287164], [0.03732156753540039, 0.14082211256027222, 0.08218222856521606, 0.02148711122572422, 0.037640467286109924, 0.011636778712272644, 0.01611051708459854, 0.06724098324775696, 0.20042963325977325, 0.035641491413116455, 0.045655738562345505, 0.041121501475572586, 0.23917138576507568, 0.01630677469074726, 0.2854580283164978]]], [[[0.00028402332100085914, 1.9304454923485537e-08, 1.5483598847509938e-09, 7.885660006923256e-12, 2.7246130684943637e-08, 2.9440096113830805e-05, 4.3406546978985716e-07, 3.7434634236888087e-07, 3.9264233464564313e-07, 1.911867819615054e-08, 6.894639170695882e-08, 1.9322192201798316e-06, 1.594805780769093e-06, 1.097217136702966e-06, 0.25163131952285767], [0.8221166729927063, 0.0031213052570819855, 7.842657214496285e-05, 5.977510153520882e-10, 6.043178735204435e-10, 7.336016096815001e-07, 0.0001510237343609333, 0.000765863514970988, 0.0003504687047097832, 5.704807790607447e-07, 3.8402351520971933e-08, 3.7901799032624695e-07, 1.534954208182171e-05, 4.934078606311232e-05, 0.00023439944197889417], [0.0023944040294736624, 0.796754002571106, 0.004422985017299652, 9.068900226338883e-07, 5.795331436964091e-10, 1.0343059742012883e-08, 4.4964113499190717e-07, 0.0014743957435712218, 0.00028717826353386045, 7.994436600711197e-05, 3.3569827451174206e-07, 1.215876466176269e-07, 7.940250839055807e-07, 4.835407253267476e-06, 2.585098854979151e-07], [4.3931080995207594e-11, 0.0005229745293036103, 0.5791732668876648, 0.0002632129180710763, 3.316774765949049e-08, 1.7754019825469425e-12, 1.4596207272357664e-14, 1.5350217763554497e-09, 1.2882580335826788e-07, 7.457471838279162e-06, 1.2410231420290074e-06, 2.736720361440348e-08, 3.621486097116211e-11, 3.919724787804224e-12, 2.306477925317907e-12], [3.994035801418473e-14, 1.3595737036187217e-10, 5.270875135465758e-06, 0.5513067841529846, 0.00020578903786372393, 1.9226330039145978e-07, 1.181193272532799e-12, 2.80986930771554e-13, 9.120337812881449e-14, 1.37843805814164e-10, 7.154308718781976e-07, 1.5133276747292257e-06, 7.425698944629744e-10, 2.2010659354171347e-13, 1.8997327582565005e-12], [2.3444651168352815e-12, 2.1774425253313912e-13, 1.857566878094019e-09, 0.00030468025943264365, 0.9472002983093262, 0.00010681805724743754, 2.00606624645161e-08, 5.2167251502746245e-14, 1.354494091723496e-15, 5.737065011425513e-13, 8.729777456473187e-10, 3.2425006793346256e-05, 7.676636641917867e-07, 1.870739785303499e-09, 2.3914221713994266e-09], [3.644098217625569e-11, 3.867062572937563e-11, 4.1057553190615437e-11, 1.5412249254609378e-09, 0.018834512680768967, 0.505605936050415, 0.0010763276368379593, 5.434728933551014e-08, 2.6194791127864825e-11, 6.074670846504876e-15, 3.814499497517554e-12, 1.2291486939375318e-07, 9.572526323609054e-06, 4.437842653715052e-05, 7.18067713023629e-06], [5.002242687623948e-05, 2.445471238843311e-07, 7.217475506138271e-09, 2.943958878759423e-12, 1.391844648424012e-07, 0.0035048718564212322, 0.755942702293396, 0.0011242764303460717, 1.4866960555082187e-05, 9.753278740198823e-11, 3.792431321238132e-13, 1.6398679289486573e-11, 1.3850768709744443e-07, 0.0002873632765840739, 2.565975592005998e-05], [7.748224284398475e-09, 3.667011867491965e-07, 1.7906526261768363e-09, 1.001209222569038e-16, 4.707358499311462e-15, 2.921879960204876e-10, 4.77575849799905e-06, 0.9355171918869019, 1.7088919776142575e-05, 1.5246609308405823e-08, 1.546373502880373e-14, 1.9256968477537417e-16, 2.8356877952137637e-15, 6.199032398512827e-10, 3.679770266273863e-09], [6.04271771509346e-11, 2.349539499846287e-06, 6.254656170767703e-08, 2.0915530592191534e-12, 3.303753013789688e-16, 1.0466700578893717e-14, 7.288482968201282e-13, 0.0006303040427155793, 0.47335511445999146, 8.928982424549758e-05, 1.5872458902776998e-08, 1.3611594998645584e-14, 1.3777586457132233e-16, 1.589055302510104e-15, 8.100658338561217e-11], [3.812023474658588e-10, 1.421315573679749e-06, 2.2867025109007955e-06, 2.6682736020688935e-08, 3.632111755455525e-12, 1.6831340872913367e-14, 3.240909670081289e-14, 1.4920277635610546e-07, 0.0005182845052331686, 0.39297640323638916, 0.0007259719423018396, 1.2580667174688642e-08, 3.7229049595736974e-13, 2.157145159519631e-15, 1.0612778433838344e-09], [6.84109713322556e-10, 1.9775532322796607e-08, 5.041609938416514e-07, 0.00017906920402310789, 1.631619738873269e-06, 2.0158734681530177e-09, 9.65507530290054e-15, 4.2181228128435055e-12, 8.564649545128589e-10, 0.00023218656133394688, 0.6439363956451416, 0.000818322179839015, 1.3831699163802114e-07, 2.1358659198916774e-12, 5.4572883101400294e-08], [1.4084274191361601e-08, 2.1930364191291574e-09, 7.004614666072939e-09, 2.0828078959311824e-06, 6.64705439703539e-05, 3.6118690331932157e-06, 4.0857584676645686e-11, 1.0090924406833124e-12, 5.430448080009356e-15, 6.815135122906213e-09, 0.0007384128402918577, 0.9033229351043701, 0.0037223652470856905, 5.428325380307797e-07, 5.097080588711833e-07], [3.370899046006848e-11, 1.5044922772877722e-12, 1.903236411786996e-13, 5.2399131041103164e-12, 5.3600892613303586e-09, 3.287689196440624e-07, 1.293990137263279e-09, 3.2395277866498207e-13, 8.98320316581696e-19, 7.591717251043266e-18, 2.4333673097343134e-12, 7.08575316821225e-05, 0.3025490641593933, 0.00011370918218744919, 1.7842703314840946e-08], [0.0009491983219049871, 3.734114216058515e-05, 0.00010643315181368962, 4.299266220186837e-05, 0.0019948105327785015, 0.012520392425358295, 0.0005770812276750803, 0.00013455892622005194, 0.0002518744731787592, 0.0005399127840064466, 0.0017743584467098117, 0.004756112117320299, 0.00398082984611392, 0.002925803419202566, 0.1746407300233841]], [[0.1577264666557312, 0.03251823037862778, 0.4939506947994232, 0.8334789872169495, 0.6927971243858337, 0.3147047460079193, 0.7604361176490784, 0.11822030693292618, 0.7022377848625183, 0.6516091823577881, 0.14691989123821259, 0.2232232689857483, 0.14339210093021393, 0.3761228322982788, 0.014605461619794369], [0.028655482456088066, 0.14083503186702728, 0.08485368639230728, 0.8299343585968018, 0.8304422497749329, 0.5664599537849426, 0.834579586982727, 0.7438958287239075, 0.8452481031417847, 0.8614712953567505, 0.3640905022621155, 0.805733323097229, 0.3481642007827759, 0.795884370803833, 0.05269646272063255], [0.02106422185897827, 0.010846637189388275, 0.073356993496418, 0.017661061137914658, 0.8741048574447632, 0.5687165856361389, 0.5249210000038147, 0.5693489909172058, 0.5103186368942261, 0.5253384709358215, 0.6472406387329102, 0.4561024308204651, 0.1524587720632553, 0.45141565799713135, 0.034538887441158295], [0.2203565090894699, 0.02154199220240116, 0.007279311306774616, 0.003464027540758252, 0.18461424112319946, 0.07773485034704208, 0.7297388315200806, 0.2260110229253769, 0.6848539113998413, 0.2328294813632965, 0.22646839916706085, 0.3173597455024719, 0.10388152301311493, 0.06158056855201721, 0.11330780386924744], [0.1574045568704605, 0.12516136467456818, 0.04707150533795357, 0.0032313871197402477, 0.19444315135478973, 0.046962298452854156, 0.48863229155540466, 0.8290899991989136, 0.892469584941864, 0.6836395859718323, 0.83636474609375, 0.47956424951553345, 0.034452617168426514, 0.38761135935783386, 0.055785421282052994], [0.4389230012893677, 0.6133158802986145, 0.4783843159675598, 0.11230780929327011, 0.006951127201318741, 0.0644199401140213, 0.03406795859336853, 0.33251792192459106, 0.9552598595619202, 0.8827710747718811, 0.9276224970817566, 0.8325800895690918, 0.737617552280426, 0.745059609413147, 0.05149900168180466], [0.3395847976207733, 0.09897124767303467, 0.16763220727443695, 0.1671983003616333, 0.049412358552217484, 0.007114487700164318, 0.3340696394443512, 0.018166696652770042, 0.7235669493675232, 0.9639523029327393, 0.851059079170227, 0.7306914925575256, 0.5801126956939697, 0.8017169237136841, 0.08099871873855591], [0.44394704699516296, 0.6082286238670349, 0.37166181206703186, 0.3715074956417084, 0.35315781831741333, 0.10853563994169235, 0.013190319761633873, 0.07092351466417313, 0.03435605764389038, 0.25131845474243164, 0.921750545501709, 0.8745512366294861, 0.7473158240318298, 0.834020733833313, 0.1216883435845375], [0.18251584470272064, 0.8759727478027344, 0.1439245641231537, 0.06640342622995377, 0.060579828917980194, 0.2710072100162506, 0.011089610867202282, 0.034396518021821976, 0.1700025051832199, 0.043876904994249344, 0.14450228214263916, 0.9449294805526733, 0.9689385294914246, 0.939329981803894, 0.07954179495573044], [0.32071176171302795, 0.7452729344367981, 0.11999625712633133, 0.08053360879421234, 0.3748469650745392, 0.31863275170326233, 0.028054066002368927, 0.2197551280260086, 0.01771731488406658, 0.23943577706813812, 0.01906767673790455, 0.8113164901733398, 0.9739595055580139, 0.9691897630691528, 0.21732129156589508], [0.6261264085769653, 0.6649302244186401, 0.5194191336631775, 0.6324451565742493, 0.6771988272666931, 0.7814968228340149, 0.4118405878543854, 0.3728334903717041, 0.03296521306037903, 0.008678224869072437, 0.6047253012657166, 0.11251461505889893, 0.21560458838939667, 0.9244948625564575, 0.10127653181552887], [0.3176693320274353, 0.5172579884529114, 0.1793123036623001, 0.37762320041656494, 0.23678036034107208, 0.5621929168701172, 0.08773050457239151, 0.24525783956050873, 0.010828782804310322, 0.025829488411545753, 0.0057976157404482365, 0.08708162605762482, 0.04166324809193611, 0.5714256167411804, 0.16898052394390106], [0.6460146307945251, 0.8194199800491333, 0.48921409249305725, 0.6910595297813416, 0.5259124636650085, 0.6389046311378479, 0.3241840600967407, 0.7817367911338806, 0.17853572964668274, 0.1606196016073227, 0.06383053213357925, 0.007355134002864361, 0.02128707617521286, 0.02206379547715187, 0.23354344069957733], [0.5992116332054138, 0.6358246803283691, 0.47243836522102356, 0.5617506504058838, 0.6971379518508911, 0.6431114673614502, 0.39991113543510437, 0.8182389140129089, 0.2704472243785858, 0.20400457084178925, 0.059529319405555725, 0.06732083112001419, 0.008503233082592487, 0.06121496111154556, 0.2071741670370102], [0.2342938333749771, 0.5683650374412537, 0.6037701964378357, 0.7331977486610413, 0.7349027395248413, 0.6651985049247742, 0.23853524029254913, 0.2293619066476822, 0.48426058888435364, 0.7077944874763489, 0.5918195843696594, 0.8169012665748596, 0.7005065679550171, 0.4784330725669861, 0.015931207686662674]], [[0.04383472725749016, 0.02773081697523594, 0.016415273770689964, 0.024880478158593178, 0.005487722344696522, 0.14834517240524292, 0.010061212815344334, 0.013310510665178299, 0.03559315577149391, 0.022788431495428085, 0.016539618372917175, 0.022621937096118927, 0.3853665292263031, 0.02895752713084221, 0.21785423159599304], [0.02212689444422722, 0.0360226184129715, 0.0007962794625200331, 0.005733562167733908, 0.0017349227564409375, 0.011109595187008381, 0.02015179581940174, 0.048344310373067856, 0.003794114338234067, 0.016348786652088165, 0.0018908409401774406, 0.010183308273553848, 0.04822028428316116, 0.011540568433701992, 0.21287554502487183], [0.19621919095516205, 0.02568935602903366, 0.012553256005048752, 0.05958101898431778, 0.0049527534283697605, 0.009129918180406094, 0.035662900656461716, 0.006033026147633791, 0.01979534700512886, 0.016174430027604103, 0.025959551334381104, 0.017891131341457367, 0.21532145142555237, 0.010915487073361874, 0.2776879370212555], [0.22681212425231934, 0.26364389061927795, 0.1368870735168457, 0.07472710311412811, 0.004966794513165951, 0.17209400236606598, 0.07595591247081757, 0.10330677032470703, 0.009879215620458126, 0.30214887857437134, 0.027453631162643433, 0.07928238064050674, 0.6068928837776184, 0.0009245484252460301, 0.41711828112602234], [0.03220081329345703, 0.07110226154327393, 0.19687172770500183, 0.32465922832489014, 0.06123804301023483, 0.009123058058321476, 0.008925903588533401, 0.001694322214461863, 0.009767607785761356, 0.012425252236425877, 0.021234901621937752, 0.006749649532139301, 0.022427640855312347, 0.00419656652957201, 0.11337225884199142], [0.1499132513999939, 0.1588381826877594, 0.006192722357809544, 0.06905046850442886, 0.021936854347586632, 0.04223879054188728, 0.01654554158449173, 0.012800824828445911, 0.001194898271933198, 0.011350413784384727, 0.0011690479004755616, 0.03650015965104103, 0.0330234132707119, 0.032408226281404495, 0.30060991644859314], [0.10197536647319794, 0.32784661650657654, 0.22266407310962677, 0.37194594740867615, 0.4840903878211975, 0.2562866806983948, 0.20682689547538757, 0.01685171388089657, 0.02662164717912674, 0.01744299754500389, 0.07043293118476868, 0.06053447723388672, 0.13449640572071075, 0.0437617152929306, 0.15905345976352692], [0.04155902937054634, 0.02725875750184059, 0.06621034443378448, 0.15740959346294403, 0.22226983308792114, 0.11737026274204254, 0.021176597103476524, 0.037896860390901566, 0.001983239781111479, 0.07737525552511215, 0.040612466633319855, 0.036445699632167816, 0.04206009954214096, 0.005294053349643946, 0.22695806622505188], [0.3731417655944824, 0.020610323175787926, 0.04687204957008362, 0.19942151010036469, 0.0219199787825346, 0.023319954052567482, 0.607546865940094, 0.0038317576982080936, 0.05746426433324814, 0.0039819530211389065, 0.0020286834333091974, 0.023514816537499428, 0.0007224131841212511, 0.0017132725333794951, 0.31377115845680237], [0.007707278709858656, 0.04994801804423332, 0.0602150596678257, 0.1843070536851883, 0.023052150383591652, 0.00867108628153801, 0.0030793596524745226, 0.008175634779036045, 0.3707427382469177, 0.032583341002464294, 0.030614105984568596, 0.003414844162762165, 0.0027733321767300367, 0.00039667857345193624, 0.06665757298469543], [0.06275568902492523, 0.15385569632053375, 0.07121506333351135, 0.04657430946826935, 0.08974524587392807, 0.017753345891833305, 0.09537442773580551, 0.08409535884857178, 0.4617481529712677, 0.05371565744280815, 0.051210206001996994, 0.014556556940078735, 0.0261379461735487, 0.0015151489060372114, 0.25993233919143677], [0.037524934858083725, 0.08964382112026215, 0.11503562331199646, 0.2385229468345642, 0.14595970511436462, 0.01507873460650444, 0.07354842126369476, 0.014194677583873272, 0.01029899064451456, 0.3145633935928345, 0.08443433046340942, 0.02799280546605587, 0.006364578381180763, 0.0011598452692851424, 0.25597554445266724], [0.03498825803399086, 0.003427299438044429, 0.012860815972089767, 0.00960747804492712, 0.0073430403135716915, 0.002194140339270234, 0.020218953490257263, 0.04016692563891411, 0.0035721054300665855, 0.11439335346221924, 0.03179614990949631, 0.0055262502282857895, 0.08811097592115402, 0.0019241927657276392, 0.31578439474105835], [0.0003122057532891631, 0.0005657155998051167, 0.0003099576279055327, 0.018182117491960526, 8.608390635345131e-05, 0.00029685357003472745, 0.00030423246789723635, 0.0039575002156198025, 0.00041145391878671944, 0.0009832053910940886, 0.0007515411707572639, 0.006357411853969097, 0.3007054328918457, 0.00010537439811741933, 0.00161165336612612], [0.052370160818099976, 0.019386928528547287, 0.0404941625893116, 0.16087706387043, 0.14014431834220886, 0.0561581589281559, 0.1907973736524582, 0.027806226164102554, 0.022970959544181824, 0.05846026912331581, 0.09902504831552505, 0.038958851248025894, 0.016928229480981827, 0.04114920645952225, 0.14461401104927063]], [[0.1774463951587677, 0.26868411898612976, 0.03527391701936722, 0.01705012284219265, 0.00047759010340087116, 0.006241941824555397, 0.0031507122330367565, 0.2944689095020294, 0.038735195994377136, 0.003944840747863054, 0.004385389853268862, 0.004225992131978273, 0.03986744210124016, 0.00549504067748785, 0.07870971411466599], [0.00027908835909329355, 0.005506355315446854, 0.001626787707209587, 0.13775548338890076, 0.0008261757320724428, 0.00028156363987363875, 0.0002459189563523978, 0.0025131029542535543, 0.0009445812902413309, 0.001017659087665379, 0.002250042976811528, 0.0015115974238142371, 0.0017954352078959346, 0.0006745054270140827, 0.21780018508434296], [0.021244889125227928, 0.1178143173456192, 0.008956437930464745, 0.14321640133857727, 0.023635229095816612, 0.3068733811378479, 0.15845780074596405, 0.3092327415943146, 0.0024783278349786997, 0.06481246650218964, 0.008965774439275265, 0.019083118066191673, 0.04005150496959686, 0.01112168189138174, 0.19139143824577332], [0.00042023108107969165, 0.0008873279439285398, 0.0019056870369240642, 0.007766622584313154, 0.23140135407447815, 0.5036463141441345, 0.015440672636032104, 0.008361338637769222, 0.001879698014818132, 0.0006688520661555231, 0.01133010908961296, 0.09722423553466797, 0.03314661607146263, 0.006971372757107019, 0.02285030484199524], [0.002678314223885536, 0.004764833487570286, 0.0003137744788546115, 0.0006636036559939384, 0.07552827149629593, 0.36051952838897705, 0.21059149503707886, 0.11911091953516006, 0.00013829045929014683, 0.00018005385936703533, 0.00021675217431038618, 0.007453517522662878, 0.004449300933629274, 0.03708551451563835, 0.13281597197055817], [0.008487393148243427, 0.014329447411000729, 0.005103611387312412, 0.0017902699764817953, 0.00018748251022771, 0.07080603390932083, 0.1865091174840927, 0.03389747440814972, 0.0026728338561952114, 0.00012369015894364566, 0.0001717496052151546, 0.0016556874616071582, 0.0035823825746774673, 0.018341869115829468, 0.2051384449005127], [0.0016413311241194606, 0.0038119314704090357, 0.0005628983490169048, 6.117233715485781e-05, 0.00011399950017221272, 0.0007454796577803791, 0.054881561547517776, 0.30246245861053467, 0.15667226910591125, 0.0004453254514373839, 0.0002609542279969901, 0.0001120980887208134, 0.0006856885738670826, 0.00573006272315979, 0.011146760545670986], [0.001007524086162448, 0.0022212164476513863, 0.00036003260174766183, 2.8946307793376036e-05, 1.0167077562073246e-05, 0.00012231878645252436, 0.00022786400222685188, 0.03619853034615517, 0.005354967433959246, 0.003357505425810814, 0.0005030903848819435, 5.3131421736907214e-05, 4.2532476072665304e-05, 0.00010396525613032281, 0.2518664300441742], [0.004948427900671959, 0.0037361346185207367, 0.0040338728576898575, 0.0015943445032462478, 3.9753424061927944e-05, 0.00016846440848894417, 0.00017597683472558856, 0.003258961718529463, 0.06328149139881134, 0.43567389249801636, 0.03252503648400307, 0.006277996581047773, 3.634384847828187e-05, 2.672040500328876e-05, 0.030029548332095146], [0.00322673749178648, 0.017767680808901787, 0.0033617434091866016, 0.029219835996627808, 0.0009114073473028839, 0.002889687195420265, 0.00012576105655170977, 0.01574547402560711, 0.0018639388727024198, 0.6032934188842773, 0.1301620751619339, 0.04121570661664009, 0.0035096178762614727, 0.00032833084696903825, 0.3004224896430969], [0.033899419009685516, 0.07324357330799103, 0.00985381193459034, 0.017461512237787247, 0.019165849313139915, 0.07006029784679413, 0.01799222268164158, 0.013579626567661762, 0.00021177329472266138, 0.026033537462353706, 0.13102787733078003, 0.2077469676733017, 0.7029638886451721, 0.029135672375559807, 0.05414650961756706], [0.0015424743760377169, 0.007544125430285931, 0.010602829977869987, 0.0016127177514135838, 0.006006686482578516, 0.08514653891324997, 0.003129118587821722, 0.0036380700767040253, 1.298951519856928e-05, 6.919799488969147e-05, 0.0003367147874087095, 0.031529009342193604, 0.36636054515838623, 0.21289798617362976, 0.04463290795683861], [0.005653384607285261, 0.005221519153565168, 0.010438429191708565, 0.0023121859412640333, 0.0034771040081977844, 0.01156994141638279, 0.006321457680314779, 0.006196276750415564, 2.671167931111995e-05, 0.00012823205906897783, 0.00023895784397609532, 0.0015353390481323004, 0.06888392567634583, 0.3010466396808624, 0.05789510905742645], [0.0025978884659707546, 0.0011408268474042416, 0.0005907863960601389, 0.0073682027868926525, 5.514698841579957e-06, 0.0001586068101460114, 0.0016139426734298468, 0.002635698765516281, 2.2516995159094222e-05, 7.803570952091832e-06, 4.170422926108586e-06, 4.799172893399373e-05, 8.148160122800618e-05, 0.006126015912741423, 0.363029420375824], [0.018444720655679703, 0.036891017109155655, 0.08301377296447754, 0.04485299810767174, 0.0371856652200222, 0.0472157783806324, 0.022677546367049217, 0.017107300460338593, 0.03217196837067604, 0.03369837626814842, 0.021089907735586166, 0.018274538218975067, 0.020997297018766403, 0.034321803599596024, 0.1648317128419876]], [[0.2133164256811142, 0.025492815300822258, 0.20653849840164185, 0.07043907791376114, 0.10411863774061203, 0.3043566346168518, 0.06760577112436295, 0.5064103603363037, 0.08081910014152527, 0.27507925033569336, 0.5432406663894653, 0.27881479263305664, 0.16320040822029114, 0.2653813064098358, 0.11116068065166473], [0.015402763150632381, 0.2444494515657425, 0.0030522451270371675, 0.00048490799963474274, 0.0026600188575685024, 0.06905494630336761, 0.012269481085240841, 0.014592616818845272, 0.004205085337162018, 0.0039128707721829414, 0.0037959537003189325, 0.012499181553721428, 0.02713301219046116, 0.00563135975971818, 0.19437076151371002], [0.04805738478899002, 0.007929358631372452, 0.4969516396522522, 0.08109094947576523, 0.008613435551524162, 0.06128339096903801, 0.020970679819583893, 0.014624540694057941, 0.001800250494852662, 0.04372387006878853, 0.036881472915410995, 0.022519467398524284, 0.032134752720594406, 0.17586740851402283, 0.15428785979747772], [0.021660206839442253, 0.06483402103185654, 0.07990853488445282, 0.8655576705932617, 0.10770212858915329, 0.042777951806783676, 0.004243527539074421, 0.04141073673963547, 0.0011197980493307114, 0.0010354480473324656, 0.007620980031788349, 0.009411019273102283, 0.023886993527412415, 0.8532692193984985, 0.009252375923097134], [0.03802541270852089, 0.5626884698867798, 0.3869370222091675, 0.012873617932200432, 0.11968709528446198, 0.014900745823979378, 0.02957817167043686, 0.018288375809788704, 0.005979553796350956, 0.03379013389348984, 0.016338851302862167, 0.01766209304332733, 0.8086205720901489, 0.08052025735378265, 0.13067808747291565], [0.0663566142320633, 0.02082742564380169, 0.009716741740703583, 0.003548208624124527, 0.0008020728128030896, 0.4547119140625, 0.03523911535739899, 0.0031006578356027603, 0.006736437324434519, 0.0009184986702166498, 0.0011584048625081778, 0.04212343320250511, 0.019468490034341812, 0.001240313402377069, 0.20631356537342072], [0.004470710642635822, 0.02006937935948372, 0.020011691376566887, 0.019766854122281075, 0.12330501526594162, 0.15558527410030365, 0.04160740226507187, 0.1780312955379486, 0.014384130015969276, 0.005233153235167265, 0.004123131278902292, 0.05227937176823616, 0.013469746336340904, 0.022578507661819458, 0.07922197878360748], [0.17898443341255188, 0.006772744003683329, 0.041487641632556915, 0.009575014933943748, 0.016729410737752914, 0.2668032944202423, 0.12321095168590546, 0.6781973838806152, 0.0025635806377977133, 0.01087682880461216, 0.002732365159317851, 0.020299792289733887, 0.0031363710295408964, 0.0008204782498069108, 0.05180227383971214], [0.12461799383163452, 0.013122161850333214, 0.02311752177774906, 0.0762406587600708, 0.09383975714445114, 0.007501720450818539, 0.07133012264966965, 0.008159258402884007, 0.13900579512119293, 0.006521029397845268, 0.021471921354532242, 0.012502939440310001, 0.0014349960256367922, 0.011674328707158566, 0.3848530650138855], [0.014992507174611092, 0.010756749659776688, 0.10129547864198685, 0.15213072299957275, 0.1363232582807541, 0.16603931784629822, 0.0040587568655610085, 0.505429208278656, 0.0025213102344423532, 0.05678342655301094, 0.20746274292469025, 0.04314066469669342, 0.0019582516979426146, 0.01985819824039936, 0.18090446293354034], [0.11427638679742813, 0.0123747568577528, 0.020808644592761993, 0.1336503028869629, 0.008563186042010784, 0.09643486887216568, 0.15193390846252441, 0.050255559384822845, 0.0023536821827292442, 0.3208443820476532, 0.021319447085261345, 0.003293143818154931, 0.027340535074472427, 0.01197835523635149, 0.09007034450769424], [0.15923485159873962, 0.11477550864219666, 0.21969333291053772, 0.09681756794452667, 0.07061057537794113, 0.1670638769865036, 0.1398637294769287, 0.059452954679727554, 0.00850652251392603, 0.062244825065135956, 0.03212086483836174, 0.10482167452573776, 0.05658517777919769, 0.03675027936697006, 0.24718202650547028], [0.004966236650943756, 0.001515651005320251, 0.002549123717471957, 0.006106496322900057, 0.00036676786839962006, 0.0014838402858003974, 0.008350875228643417, 0.003760475432500243, 9.004020830616355e-05, 0.003012964967638254, 0.000879374798387289, 0.0023141989950090647, 0.5349817276000977, 0.00013737898552790284, 0.18041089177131653], [3.0577066354453564e-05, 0.00011073229688918218, 0.0002722943318076432, 0.00012968607188668102, 3.925479541067034e-05, 9.284611587645486e-05, 1.1375399481039494e-05, 0.00013649655738845468, 2.160583608201705e-05, 3.872126853821101e-06, 4.776401965500554e-06, 5.892393892281689e-05, 0.3018791675567627, 0.0016873051645234227, 0.00020723984926007688], [0.0053407615050673485, 0.002270790981128812, 0.015077341347932816, 0.008943013846874237, 0.01947944425046444, 0.013856526464223862, 0.021029049530625343, 0.011522401124238968, 0.019980257377028465, 0.021877266466617584, 0.03018842823803425, 0.06539047509431839, 0.04945596680045128, 0.008784771896898746, 0.1688213050365448]], [[0.09667091816663742, 0.08969368785619736, 0.16646768152713776, 0.01428181305527687, 0.1262292116880417, 0.03015410713851452, 0.00857650488615036, 0.013287652283906937, 0.013465571217238903, 0.009945754893124104, 0.03584994748234749, 0.07976501435041428, 0.013894102536141872, 0.07191513478755951, 0.16682514548301697], [0.00307486648671329, 0.2169581949710846, 0.015313946641981602, 0.005070009268820286, 0.13766343891620636, 0.036365993320941925, 0.013734312728047371, 0.012890451587736607, 0.00037508379318751395, 0.002069024136289954, 0.0038654597010463476, 0.007793853525072336, 0.006365353707224131, 0.02897111512720585, 0.19472798705101013], [0.013033762574195862, 0.0016745100729167461, 0.09789733588695526, 0.11557573825120926, 0.070904940366745, 0.039959780871868134, 0.06112189590930939, 0.005926545709371567, 0.05931684747338295, 0.06562750041484833, 0.015556245110929012, 0.2949027419090271, 0.09280899167060852, 0.18960142135620117, 0.2321171909570694], [0.0009253448224626482, 0.0011463494738563895, 0.0022407870274037123, 0.022192178294062614, 0.18083734810352325, 0.18906380236148834, 0.06340676546096802, 0.5556718111038208, 0.008876022882759571, 0.00195835973136127, 0.009641225449740887, 0.13488754630088806, 0.03692271187901497, 0.0069083282724022865, 0.19416382908821106], [0.020195724442601204, 0.0026999269612133503, 0.0047158133238554, 0.017117822542786598, 0.22690622508525848, 0.009801734238862991, 0.18513473868370056, 0.000916039280127734, 0.006044555455446243, 0.006021710112690926, 0.010346228256821632, 0.04500352963805199, 0.008295656181871891, 0.1122727021574974, 0.4271945357322693], [0.02983868308365345, 0.03651329129934311, 0.005064305383712053, 0.00043434457620605826, 0.001774297677911818, 0.10316617041826248, 0.10274261981248856, 0.570116400718689, 0.0018607155652716756, 0.004884766880422831, 0.0001192242925753817, 0.01004798710346222, 0.011760696768760681, 0.020220324397087097, 0.036799319088459015], [0.020830435678362846, 0.04066089913249016, 0.01340602245181799, 0.0007146665593609214, 0.05329689383506775, 0.010700137354433537, 0.06310626864433289, 0.1416247934103012, 0.059007443487644196, 0.009734428487718105, 0.023192377761006355, 0.030464952811598778, 0.011454294435679913, 0.06458231806755066, 0.29838618636131287], [0.04047420993447304, 0.05575861781835556, 0.0035385461524128914, 0.00047053993330337107, 0.010776028037071228, 0.0002634078555274755, 0.006466362159699202, 0.09768779575824738, 0.011305907741189003, 0.6455902457237244, 0.005685864482074976, 0.009437574073672295, 0.0014128481270745397, 0.0036261524073779583, 0.1994941532611847], [0.001968077849596739, 0.00013096239126753062, 0.014192181639373302, 0.0025808673817664385, 1.1752749742299784e-05, 7.090794679243118e-05, 8.489128958899528e-05, 7.501097570639104e-05, 0.005588378757238388, 0.00024033378576859832, 0.7911840081214905, 0.0006417080294340849, 0.00012212486763019115, 0.0026151463389396667, 0.024830428883433342], [0.007711799815297127, 0.006852409336715937, 0.005409319419413805, 0.029324712231755257, 0.0012151957489550114, 0.0014427780406549573, 0.0002848623844329268, 0.0011284908978268504, 0.00042831210885196924, 0.0035933239851146936, 0.2853389084339142, 0.04352247342467308, 0.0011324246879667044, 0.0015205255476757884, 0.05924868583679199], [0.06333743035793304, 0.004831443540751934, 0.017261236906051636, 0.05893971398472786, 0.005950291641056538, 0.002105317311361432, 0.003185122972354293, 0.0028415010310709476, 0.004572128411382437, 0.007815520279109478, 0.07613655924797058, 0.10669270157814026, 0.027066918089985847, 0.03207901865243912, 0.4743220806121826], [0.10327208787202835, 0.004544916562736034, 0.05445469170808792, 0.010814311914145947, 0.026858847588300705, 0.011217474937438965, 0.07071709632873535, 0.05960191786289215, 0.0010665962472558022, 0.025403864681720734, 0.006131312809884548, 0.5720618963241577, 0.029676837846636772, 0.17520834505558014, 0.23297326266765594], [0.011414228938519955, 0.002735550981014967, 0.015156290493905544, 0.0027777000796049833, 0.009832575917243958, 0.015552453696727753, 0.017305195331573486, 0.004722784738987684, 4.7792200348339975e-05, 0.0034479873720556498, 0.0004017044266220182, 0.0011886333813890815, 0.18307994306087494, 0.2786843478679657, 0.04159880056977272], [0.0032662157900631428, 0.004168938845396042, 0.0016457620076835155, 0.0005059303948655725, 0.0003206630062777549, 0.000853654695674777, 0.010604765266180038, 0.005784912034869194, 0.00014833646127954125, 0.0001704594906186685, 5.580573997576721e-05, 0.0004662217397708446, 0.0009024841128848493, 0.025914611294865608, 0.3543371260166168], [0.057395875453948975, 0.01834016665816307, 0.017516011372208595, 0.011936328373849392, 0.010095582343637943, 0.018046732991933823, 0.24530914425849915, 0.01257838774472475, 0.014466731809079647, 0.027552323415875435, 0.054997242987155914, 0.013960911892354488, 0.0074861980974674225, 0.03251070901751518, 0.14566579461097717]], [[0.3107149600982666, 0.049285680055618286, 0.08128133416175842, 0.03986956924200058, 0.07088969647884369, 0.1961679309606552, 0.15016919374465942, 0.05429982393980026, 0.1291487067937851, 0.03663256764411926, 0.25306442379951477, 0.3913470208644867, 0.2542778253555298, 0.252127081155777, 0.15921251475811005], [0.10834414511919022, 0.3508348762989044, 0.02124197781085968, 0.019397908821702003, 0.026673240587115288, 0.3167271912097931, 0.11886779963970184, 0.17699773609638214, 0.14507175981998444, 0.115145742893219, 0.6241064667701721, 0.1622784435749054, 0.5683063268661499, 0.15724869072437286, 0.12728430330753326], [0.6979861855506897, 0.039286430925130844, 0.3014020621776581, 0.003208757843822241, 0.01772892102599144, 0.014036925509572029, 0.19886529445648193, 0.09335973858833313, 0.4060034155845642, 0.28424081206321716, 0.26539483666419983, 0.1895008385181427, 0.4672236740589142, 0.16107353568077087, 0.10992881655693054], [0.5298255681991577, 0.6474234461784363, 0.19260530173778534, 0.026028962805867195, 0.013013242743909359, 0.01466711051762104, 0.11121421307325363, 0.06523838639259338, 0.29339125752449036, 0.46135157346725464, 0.7174844145774841, 0.3618351221084595, 0.19526919722557068, 0.0703459233045578, 0.24330592155456543], [0.7494951486587524, 0.23358309268951416, 0.3640848398208618, 0.09014757722616196, 0.32190942764282227, 0.0021980239544063807, 0.07713330537080765, 0.030900368466973305, 0.08560045808553696, 0.26394325494766235, 0.11549779027700424, 0.44356539845466614, 0.12175428122282028, 0.3783136308193207, 0.14015373587608337], [0.3064809739589691, 0.15617568790912628, 0.4955383241176605, 0.8125641942024231, 0.02114781178534031, 0.2633197009563446, 0.014569958671927452, 0.04754461348056793, 0.03227522596716881, 0.09995166957378387, 0.0697590634226799, 0.0770602896809578, 0.19454655051231384, 0.18272873759269714, 0.19963966310024261], [0.5314973592758179, 0.5086395144462585, 0.5757231116294861, 0.44031307101249695, 0.2709468603134155, 0.0639616996049881, 0.2984015941619873, 0.0039451331831514835, 0.0197422094643116, 0.0031917106825858355, 0.05093149095773697, 0.12591752409934998, 0.25977155566215515, 0.0615861676633358, 0.3711840510368347], [0.2939777970314026, 0.2997593581676483, 0.5167340040206909, 0.46100836992263794, 0.39705657958984375, 0.5034002065658569, 0.07978513836860657, 0.0779491513967514, 0.012053987942636013, 0.01132633350789547, 0.028715649619698524, 0.059212565422058105, 0.20603224635124207, 0.15584728121757507, 0.14816488325595856], [0.3128078877925873, 0.0864272266626358, 0.7678588032722473, 0.6537591814994812, 0.8236088752746582, 0.6979317665100098, 0.30976778268814087, 0.014760972931981087, 0.5645584464073181, 0.004590533208101988, 0.008271697908639908, 0.012132997624576092, 0.028745530173182487, 0.04464057460427284, 0.1669740080833435], [0.6456499099731445, 0.1693999022245407, 0.7097220420837402, 0.5244839191436768, 0.46365103125572205, 0.5023244023323059, 0.9643971920013428, 0.24913577735424042, 0.13337120413780212, 0.06419410556554794, 0.012416149489581585, 0.0573885552585125, 0.016666844487190247, 0.008706454187631607, 0.1754455268383026], [0.09960467368364334, 0.0907629206776619, 0.36143985390663147, 0.11092879623174667, 0.19937658309936523, 0.03214935213327408, 0.3196737766265869, 0.4763943552970886, 0.497630774974823, 0.1899363249540329, 0.1145005002617836, 0.004749455489218235, 0.0008605146431364119, 0.0007969819707795978, 0.02025206945836544], [0.3807562589645386, 0.26623356342315674, 0.4209006428718567, 0.27443018555641174, 0.5137820839881897, 0.1592678278684616, 0.6250110864639282, 0.6178545951843262, 0.9692861437797546, 0.5716569423675537, 0.22724294662475586, 0.17567582428455353, 0.008769324980676174, 0.002557128667831421, 0.05025441572070122], [0.2969632148742676, 0.16767999529838562, 0.46978121995925903, 0.28813451528549194, 0.45300158858299255, 0.33029136061668396, 0.6236194968223572, 0.1634167730808258, 0.8177276253700256, 0.718397855758667, 0.9021148681640625, 0.07875741273164749, 0.09992827475070953, 0.004932410083711147, 0.1707668900489807], [0.3945808410644531, 0.3581867516040802, 0.5247420072555542, 0.4120633900165558, 0.3024104833602905, 0.35548633337020874, 0.5872392654418945, 0.15815261006355286, 0.7289484143257141, 0.7948301434516907, 0.9396543502807617, 0.9256777167320251, 0.08537369966506958, 0.03166399896144867, 0.03224433213472366], [0.004588960204273462, 0.041907694190740585, 0.17755450308322906, 0.039724841713905334, 0.047663237899541855, 0.09274838864803314, 0.010110240429639816, 0.014862497337162495, 0.11161036789417267, 0.0490046888589859, 0.18517035245895386, 0.029471391811966896, 0.05094437301158905, 0.002971563721075654, 0.16300250589847565]], [[6.113462859502761e-06, 0.5065946578979492, 7.261813152581453e-05, 5.1066386498122354e-14, 1.0490246824277965e-15, 1.4956003015903496e-12, 2.5734427609724886e-13, 2.1143946469237562e-06, 9.544867651811728e-08, 4.2543565892394497e-10, 6.215519418595328e-12, 1.687761909396901e-11, 1.6993320528513323e-08, 1.0583119935958507e-09, 9.857150189418462e-07], [4.727198188447801e-08, 0.002272214274853468, 0.8730366826057434, 0.0016238681273534894, 9.849362297975617e-11, 6.310171162720105e-14, 1.3311845115798748e-12, 1.350557283785747e-07, 1.07800769910682e-05, 3.4101576602552086e-05, 7.529693561991735e-07, 3.7022258592145363e-09, 3.1551092294357375e-10, 8.851498527195911e-12, 1.024629546009237e-05], [6.003397223786067e-10, 5.335852165444521e-06, 0.00445933174341917, 0.5796651840209961, 5.976808097329922e-05, 2.377180230439535e-09, 1.7792844021063958e-12, 1.2140626282075573e-09, 6.417224529542409e-09, 2.601910637167748e-06, 1.1842810181406094e-06, 1.8266834445057611e-07, 1.3081095096012518e-09, 1.5776791765370612e-12, 4.7676843678345904e-05], [2.4071971206038626e-15, 2.3560551770727793e-14, 9.98394700246763e-11, 1.7167060661904543e-07, 0.2774648666381836, 1.6012703781598248e-05, 9.760837530760607e-15, 4.654387315338889e-18, 8.039692137064508e-20, 2.1508527635127157e-16, 1.789740057545064e-11, 2.4233797191186568e-08, 2.7592322870972907e-10, 4.956549239646573e-15, 1.5411848153235042e-06], [1.9919477308935618e-13, 5.266535346254387e-16, 1.2917133013982517e-14, 7.221083175856791e-10, 8.195231930585578e-05, 0.5564944744110107, 4.117699063499458e-06, 5.438900198273533e-13, 2.4172004338169554e-20, 9.57835365503234e-22, 9.376302678036402e-17, 3.235451073724249e-10, 6.101883442966027e-09, 9.971044129253315e-11, 1.6162671201414014e-08], [9.771466125130246e-08, 3.17872256294649e-11, 3.1429036890379125e-13, 5.901367481980172e-16, 4.2342058748090494e-09, 0.0012305855052545667, 0.6103256940841675, 2.2161180822877213e-05, 7.972257402844019e-12, 6.481494664823834e-19, 5.35928561114305e-19, 7.863773244772346e-14, 1.1593314752644801e-07, 8.808668212623161e-07, 1.1730364235518209e-07], [2.6939844799400703e-10, 3.892770337188267e-07, 2.2438891023046637e-10, 2.095593632707407e-18, 1.8655412772298346e-14, 2.206185598652155e-07, 3.0316745323943906e-05, 0.33891788125038147, 5.437008439912461e-06, 1.3213468337612382e-14, 2.5347562276209975e-18, 1.0659246862729562e-18, 2.6392999114346893e-13, 9.868956762915104e-10, 1.6170986327779246e-06], [1.3015508670832787e-09, 4.1474245904282725e-07, 7.619819371029735e-06, 9.079691751061325e-13, 5.725895077835787e-16, 1.0568446176517903e-14, 8.978999488373773e-11, 2.253716047562193e-05, 0.9323674440383911, 0.0001553743495605886, 1.1094852814252931e-10, 4.251380123255501e-17, 3.4548606558270072e-18, 1.563022274271835e-14, 1.7832363141678798e-07], [1.2218349942916262e-10, 4.9370779464652514e-08, 1.0212672805209877e-06, 3.802215486903293e-11, 4.1323817879847246e-16, 3.8503187577578586e-16, 6.2032051316354e-15, 3.2203126920649083e-07, 8.202762546716258e-05, 0.5051153898239136, 1.6483796571264975e-05, 2.317061202194298e-13, 9.134085045449695e-19, 4.959048342554486e-21, 1.9839136555788173e-08], [3.5615963439117673e-14, 6.311461336200308e-12, 7.572167781688677e-09, 7.864790063649707e-08, 5.871175941252194e-13, 4.399392566282849e-15, 3.6105855357745724e-20, 8.408651243829376e-14, 2.915925279012299e-09, 2.7294316168990918e-05, 0.31493836641311646, 1.4271394093157141e-06, 7.57530499374999e-14, 1.0444343699767344e-21, 5.65783730976932e-09], [1.619628042792698e-10, 6.862534152052291e-11, 7.238428190170509e-10, 5.1994692995549485e-08, 8.193378420173758e-08, 6.734891755399985e-09, 1.47457238341411e-14, 5.793711288450045e-15, 1.5065480465795492e-14, 1.167909147170576e-08, 0.0003541565383784473, 0.5504465699195862, 2.5677532903500833e-05, 4.9321430864142715e-14, 1.3459792569392448e-07], [8.003913504195381e-11, 5.626729984720136e-12, 4.9737857062137625e-12, 1.4365373474101162e-11, 1.165467935493325e-07, 3.263785401941277e-05, 9.4434834951862e-11, 2.6144878938953817e-15, 6.540743544149476e-19, 2.5930401594030658e-17, 1.8366722587259687e-09, 1.8794700736179948e-05, 0.49058014154434204, 8.066950840657228e-07, 1.3585024589701788e-06], [1.0801989728040362e-12, 2.2359935084037552e-13, 1.1691597126203823e-12, 1.0214807062303036e-16, 2.4270561688882752e-12, 4.4484740890915475e-10, 1.1468358207533669e-10, 1.5131759777478604e-13, 3.7208958865722007e-20, 6.888861115537483e-21, 1.5888746801787275e-18, 3.2241334168431335e-12, 5.685043561243219e-06, 0.3912107050418854, 3.0407140694244106e-10], [5.397048425948014e-07, 2.3629811494174646e-06, 8.614414923613367e-07, 8.006720286779512e-13, 4.92412575016192e-14, 2.066644277931573e-08, 0.00031528103863820434, 0.011093947105109692, 3.7555511767095595e-07, 1.151808547627739e-13, 5.505821095062543e-16, 1.6971218267519683e-12, 5.383023108151974e-06, 0.8731740117073059, 0.04139598086476326], [0.6266164779663086, 0.3128010928630829, 0.06246759742498398, 0.00042505442979745567, 0.008534153923392296, 0.09425555169582367, 0.2709643542766571, 0.686626672744751, 0.3142872750759125, 0.10107265412807465, 0.015935143455863, 0.012286541052162647, 0.14970052242279053, 0.3989029824733734, 0.022492708638310432]]], [[[0.1393769532442093, 0.0735321119427681, 0.701509952545166, 0.10650816559791565, 0.05110495164990425, 0.021589145064353943, 0.0033319133799523115, 0.0014166238252073526, 0.01486207265406847, 0.006584684830158949, 0.002582702785730362, 0.0004108685825485736, 0.010701421648263931, 0.009390643797814846, 0.06290604919195175], [0.0030957262497395277, 0.0237117987126112, 0.7945073246955872, 0.09792613238096237, 0.2614360749721527, 0.179405078291893, 0.011310527101159096, 0.009954328648746014, 0.009489532560110092, 0.0005609119543805718, 0.000751268700696528, 0.0001462608779547736, 0.004604416899383068, 0.004964352585375309, 0.019775664433836937], [0.002461136318743229, 0.024594180285930634, 0.009559455327689648, 0.055053047835826874, 0.30010533332824707, 0.4690517783164978, 0.03334644436836243, 0.0075769852846860886, 0.007821744307875633, 0.004109389614313841, 0.0022267017047852278, 0.000916018383577466, 0.0037954216822981834, 0.0007741246954537928, 0.004415341652929783], [0.0019876149017363787, 0.0012237336486577988, 0.00015556006110273302, 0.0003553472051862627, 0.4419420659542084, 0.6252713799476624, 0.02062046155333519, 0.0028509902767837048, 0.00548406969755888, 0.0003452444798313081, 0.0001962203241419047, 0.0008938669925555587, 0.0009214308229275048, 1.2216354662086815e-05, 0.0019377138232812285], [0.00020824302919209003, 0.00021322975226212293, 4.6913473852328025e-06, 0.00017657040734775364, 0.0005752452998422086, 0.5289100408554077, 0.1970362812280655, 0.12947966158390045, 0.0005265067447908223, 0.000227929005632177, 6.233566091395915e-05, 0.0001991882745642215, 0.00032238851417787373, 0.0003627484547905624, 0.0016414258861914277], [0.0010278578847646713, 0.0029486939311027527, 0.00014835220645181835, 0.00036925319000147283, 0.00742883887141943, 0.03272741660475731, 0.8576475977897644, 0.03500620648264885, 0.2982224225997925, 0.0003585784579627216, 5.663683623424731e-05, 0.0011889662127941847, 0.00576341338455677, 0.003998933359980583, 0.03130826726555824], [0.002113666385412216, 0.004151111003011465, 0.002428078791126609, 0.002119476906955242, 0.001100956811569631, 0.003687644377350807, 0.13543397188186646, 0.11922256648540497, 0.7567945718765259, 0.2570010721683502, 0.004903816152364016, 0.0001005519661703147, 0.000830159813631326, 0.001259618904441595, 0.14076685905456543], [0.0010344160255044699, 0.00660368800163269, 0.0025270660407841206, 0.00023567670723423362, 0.0004021638887934387, 0.0030120171140879393, 0.0016376315616071224, 0.0524386465549469, 0.7797302007675171, 0.1269131302833557, 0.004214781802147627, 0.0002750723797362298, 0.002267329953610897, 0.001067862962372601, 0.16698867082595825], [0.0009750229655764997, 0.0120720649138093, 0.0038384809158742428, 0.0036232813727110624, 0.004431525245308876, 0.0007613649941049516, 5.662842158926651e-05, 0.01338160876184702, 0.041878536343574524, 0.7091978788375854, 0.2535402476787567, 0.13969287276268005, 0.026510832831263542, 0.0006678565987385809, 0.015569130890071392], [0.0002093962684739381, 0.00030164673808030784, 0.00010105424007633701, 5.030819465901004e-06, 0.001411793869920075, 0.003664590884000063, 0.00017403968377038836, 0.0011218853760510683, 0.011106000281870365, 0.003924186807125807, 0.07315385341644287, 0.3008219599723816, 0.36353737115859985, 0.025737306103110313, 0.0060785748064517975], [0.0001716838014544919, 0.0008840822265483439, 4.3183892557863146e-05, 3.6494086543825688e-06, 0.0005770743009634316, 0.010045445524156094, 0.00010205945727648214, 6.57988857710734e-05, 0.0006949909729883075, 0.004452799912542105, 0.009000658988952637, 0.49080607295036316, 0.17717383801937103, 0.11174798011779785, 0.021669577807188034], [0.019416164606809616, 0.0014941463014110923, 0.001027028076350689, 0.001502541359513998, 0.0085412273183465, 0.12493651360273361, 0.0035243057645857334, 0.0026196581311523914, 0.0008317703031934798, 0.0015569254755973816, 0.060888972133398056, 0.06929422169923782, 0.3396435081958771, 0.387500524520874, 0.017253199592232704], [0.04994890093803406, 0.15025374293327332, 0.024391163140535355, 0.00227133696898818, 0.012616162188351154, 0.2894521951675415, 0.4185648262500763, 0.19089959561824799, 0.027421748265624046, 0.001001756638288498, 0.0036985764745622873, 0.06802930682897568, 0.02484762854874134, 0.057649459689855576, 0.1606004238128662], [0.03736208751797676, 0.11793919652700424, 0.0180205088108778, 0.0001436693564755842, 0.0030756669584661722, 0.08228655159473419, 0.12110688537359238, 0.09650447964668274, 0.015347721055150032, 0.0004259537090547383, 0.00022625335259363055, 0.001013986300677061, 0.0784289613366127, 0.2240448147058487, 0.18707746267318726], [0.7529165148735046, 0.7075774073600769, 0.6068683862686157, 0.3852986991405487, 0.6197313666343689, 0.6735447645187378, 0.6598724722862244, 0.7226093411445618, 0.31395286321640015, 0.2518909275531769, 0.07010441273450851, 0.21793116629123688, 0.4325476884841919, 0.7029338479042053, 0.06848814338445663]], [[0.0006553527782671154, 0.5631614327430725, 0.0008777088369242847, 0.00020331511041149497, 0.0014234310947358608, 0.013944034464657307, 9.958680493582506e-06, 0.01898920349776745, 0.00014103656576480716, 1.4779416233068332e-06, 1.1701366275929104e-07, 1.195983372781484e-06, 0.00012817273091059178, 3.365538941579871e-05, 0.00028557839686982334], [0.00638999929651618, 0.7093943953514099, 0.004974186420440674, 0.06159398332238197, 0.003979360219091177, 0.06536109745502472, 0.005324128083884716, 0.02885170467197895, 0.0003847253101412207, 0.0002721542550716549, 4.3882369936909527e-05, 0.00024302180099766701, 0.00612376956269145, 0.006710950285196304, 0.0343138724565506], [0.109707772731781, 0.1680740863084793, 0.05170662701129913, 0.04158816486597061, 0.026700180023908615, 0.23248757421970367, 0.5156019330024719, 0.3799504041671753, 0.02909121848642826, 0.009008231572806835, 0.0013055672170594335, 0.0032788640819489956, 0.0791734829545021, 0.010587821714580059, 0.06850002706050873], [0.04004191607236862, 0.02257939800620079, 0.01325287576764822, 0.14834734797477722, 0.0700073167681694, 0.12831416726112366, 0.47980472445487976, 0.3121630549430847, 0.05984592065215111, 0.015101294964551926, 0.002668763743713498, 0.0007187540177255869, 0.04004915803670883, 0.0007627750164829195, 0.05523831769824028], [0.0007188548916019499, 0.006864115130156279, 0.00033292395528405905, 0.000431404507253319, 0.0152564262971282, 0.2775210440158844, 0.03714991733431816, 0.7278205156326294, 0.004819776862859726, 0.00047404138604179025, 0.0003997469611931592, 0.0001266899926122278, 0.0201359074562788, 0.0027800032403320074, 0.042311206459999084], [0.00020999301341362298, 0.0025689874310046434, 3.502765650864603e-07, 6.610702985199168e-05, 0.00024143110204022378, 0.018905406817793846, 0.033397458493709564, 0.4650881290435791, 0.004783111158758402, 0.00013528004637919366, 5.751344360760413e-06, 7.93816871009767e-05, 0.0039043116848915815, 0.0005016719806008041, 0.07914639264345169], [0.00019393693946767598, 0.07456899434328079, 1.429513213224709e-05, 4.6383509470615536e-05, 6.820548151154071e-05, 0.004400796256959438, 0.0021800962276756763, 0.45963534712791443, 0.00143687822856009, 0.0008175616967491806, 6.983020284678787e-05, 3.49152869603131e-05, 0.0030698180198669434, 0.0006545006763190031, 0.001625033444724977], [0.004301158711314201, 0.013502174988389015, 4.788395017385483e-05, 0.00021532995742745697, 7.713190279901028e-05, 0.001439842046238482, 0.005622516851872206, 0.121849425137043, 0.006593172438442707, 0.006624745205044746, 0.0006814572843722999, 0.0002721978526096791, 0.0009267745190300047, 0.0016606011195108294, 0.2357456088066101], [0.0064394231885671616, 0.03409593552350998, 0.0025135872419923544, 0.0008376456098631024, 0.0004409599641803652, 0.0026055865455418825, 0.005634414032101631, 0.014003962278366089, 0.2343187928199768, 0.08099395036697388, 0.23927520215511322, 0.01715606264770031, 0.10332414507865906, 0.021894987672567368, 0.1941189020872116], [0.0004975660121999681, 0.0015548047376796603, 6.826691333117196e-06, 1.0557592986515374e-06, 2.731301538005937e-05, 0.0005447702133096755, 0.00042012380436062813, 0.0503113828599453, 0.0053693996742367744, 0.0012762928381562233, 0.0017790982965379953, 0.019809026271104813, 0.47653263807296753, 0.008869247511029243, 0.017010610550642014], [0.00012974163109902292, 0.005610004533082247, 2.3442629753844813e-05, 1.8520654521125834e-06, 3.9678394387010485e-05, 0.0016583451069891453, 0.00029088594601489604, 0.004530484322458506, 0.0021493860986083746, 0.00029196502873674035, 0.0005848451401107013, 0.0028240433894097805, 0.4590959846973419, 0.22978197038173676, 0.0020738127641379833], [0.00021855060185771435, 0.005491270218044519, 1.9927349057979882e-05, 7.633860150235705e-06, 0.0004071943403687328, 0.008836714550852776, 7.301902951439843e-05, 0.011723233386874199, 1.7278060113312677e-05, 0.0001269245840376243, 0.00022235361393541098, 0.016586007550358772, 0.41012606024742126, 0.37776312232017517, 0.0024871949572116137], [0.02619638666510582, 0.18392468988895416, 0.0003054745029658079, 0.00016413358389399946, 0.0015171386767178774, 0.004799532704055309, 0.004810427315533161, 0.058836404234170914, 0.0003794554795604199, 0.0017285931389778852, 0.000568193441722542, 0.003299211384728551, 0.6178385019302368, 0.5079926252365112, 0.05467592179775238], [0.03445081040263176, 0.14193737506866455, 0.0007241201237775385, 0.0002892682678066194, 0.0003202178922947496, 0.003702279180288315, 0.01134149543941021, 0.12129464000463486, 0.0006569268880411983, 0.0008894759230315685, 8.523569704266265e-05, 0.00030898841214366257, 0.7088924646377563, 0.10790188610553741, 0.05374660715460777], [0.04547691345214844, 0.010678221471607685, 0.0016328264027833939, 0.024403419345617294, 0.012795579619705677, 0.004323439672589302, 0.06414945423603058, 0.014008321799337864, 0.011475995182991028, 0.00871653389185667, 0.012156924232840538, 0.0147528275847435, 0.009472412057220936, 0.0331418551504612, 0.1366012692451477]], [[0.3143080472946167, 0.014564945362508297, 0.07743841409683228, 0.19665417075157166, 0.23130221664905548, 0.03274351730942726, 0.23599109053611755, 0.04763320833444595, 0.20168107748031616, 0.7521476149559021, 0.7922006249427795, 0.840878427028656, 0.6463541388511658, 0.6008138656616211, 0.0070990691892802715], [0.05880431830883026, 0.004086965229362249, 0.06557433307170868, 0.4476080536842346, 0.32179930806159973, 0.2046266496181488, 0.5952353477478027, 0.20483972132205963, 0.7834360599517822, 0.27592822909355164, 0.5900363922119141, 0.6986290812492371, 0.3548848032951355, 0.36629796028137207, 0.07452832907438278], [0.4484235942363739, 0.0712433010339737, 0.09740526974201202, 0.49982836842536926, 0.18807044625282288, 0.007537430617958307, 0.2073078453540802, 0.015238385647535324, 0.18028782308101654, 0.6095888018608093, 0.4225178062915802, 0.6769288778305054, 0.3957397937774658, 0.7102670669555664, 0.05611870437860489], [0.4341801106929779, 0.05481646955013275, 0.17834456264972687, 0.2579769194126129, 0.326920747756958, 0.0030261597130447626, 0.03147314488887787, 0.003279186552390456, 0.09941483289003372, 0.5679370760917664, 0.8480010032653809, 0.8133074045181274, 0.4710683822631836, 0.9189481139183044, 0.04321537911891937], [0.559230387210846, 0.08983521163463593, 0.16111011803150177, 0.14667965471744537, 0.32596829533576965, 0.008685072883963585, 0.1111784353852272, 0.02690659649670124, 0.06770152598619461, 0.18340016901493073, 0.4614297151565552, 0.502476155757904, 0.42325475811958313, 0.5992166996002197, 0.05437220633029938], [0.367906779050827, 0.21432256698608398, 0.3548191487789154, 0.2603428363800049, 0.22096140682697296, 0.0013341127196326852, 0.021726170554757118, 0.005543001927435398, 0.5389296412467957, 0.818263828754425, 0.919593095779419, 0.8187286257743835, 0.4823090434074402, 0.4897681474685669, 0.07018090784549713], [0.7116888761520386, 0.17206020653247833, 0.6874114871025085, 0.19288089871406555, 0.20990870893001556, 0.011273512616753578, 0.2026582807302475, 0.004371582996100187, 0.10976968705654144, 0.4432500898838043, 0.7022042274475098, 0.8704607486724854, 0.721519947052002, 0.7422701716423035, 0.025589054450392723], [0.7674684524536133, 0.20032620429992676, 0.42808812856674194, 0.11714937537908554, 0.32732346653938293, 0.009955272078514099, 0.05444686487317085, 0.0040375906974077225, 0.12078685313463211, 0.6266691088676453, 0.5163981914520264, 0.8307003378868103, 0.32096055150032043, 0.24524804949760437, 0.04717922583222389], [0.7549813389778137, 0.15439504384994507, 0.33331331610679626, 0.24930144846439362, 0.2927357852458954, 0.04936225712299347, 0.44933974742889404, 0.06466211378574371, 0.09519664198160172, 0.08716140687465668, 0.058296240866184235, 0.09990595281124115, 0.5117565989494324, 0.1508449912071228, 0.039490822702646255], [0.654628574848175, 0.3205694854259491, 0.5841068029403687, 0.21299651265144348, 0.365792840719223, 0.0401315838098526, 0.18686936795711517, 0.05883712321519852, 0.05069931596517563, 0.33667507767677307, 0.3354107439517975, 0.22027519345283508, 0.05277648940682411, 0.09031395614147186, 0.015531455166637897], [0.3366456627845764, 0.1530359387397766, 0.41866233944892883, 0.39775165915489197, 0.7769761681556702, 0.06979230791330338, 0.41583842039108276, 0.02130916155874729, 0.14617334306240082, 0.25815388560295105, 0.1423572301864624, 0.18894770741462708, 0.041056301444768906, 0.026175418868660927, 0.03888533264398575], [0.24913249909877777, 0.0818726196885109, 0.5426726341247559, 0.1687711775302887, 0.8305720090866089, 0.26261457800865173, 0.39635857939720154, 0.1712585836648941, 0.1158638522028923, 0.17366157472133636, 0.12521226704120636, 0.5298976302146912, 0.041029125452041626, 0.02415779046714306, 0.1170416921377182], [0.3567614257335663, 0.035316068679094315, 0.3819185495376587, 0.10469090938568115, 0.3454773426055908, 0.09596268832683563, 0.3821227550506592, 0.17425164580345154, 0.40528857707977295, 0.1745157092809677, 0.10956539213657379, 0.5078453421592712, 0.0026470222510397434, 0.016186503693461418, 0.08932095021009445], [0.330766886472702, 0.039845019578933716, 0.6981685757637024, 0.09713104367256165, 0.8411048650741577, 0.16356231272220612, 0.3630223274230957, 0.1627381145954132, 0.6954487562179565, 0.17326875030994415, 0.1752558946609497, 0.24479816854000092, 0.026946308091282845, 0.016200177371501923, 0.06702017039060593], [0.07683827728033066, 0.07034450024366379, 0.21707428991794586, 0.2902449369430542, 0.1834353357553482, 0.01726321130990982, 0.13144701719284058, 0.005189047660678625, 0.150242418050766, 0.1182665303349495, 0.4041094183921814, 0.12062898278236389, 0.05959685891866684, 0.1186181977391243, 0.1283060759305954]], [[0.06827192008495331, 0.0036808219738304615, 0.005701950751245022, 0.005157816223800182, 0.003777393838390708, 0.024757172912359238, 0.0020165019668638706, 0.010267351754009724, 0.013163687661290169, 0.001690453034825623, 0.00837681908160448, 0.00522418599575758, 0.061038240790367126, 0.015438525006175041, 0.325132817029953], [0.7422951459884644, 0.028774140402674675, 0.06394203752279282, 0.00887901522219181, 0.04345611855387688, 0.027670713141560555, 0.0295904241502285, 0.01398912351578474, 0.025535697117447853, 0.02094031311571598, 0.022182827815413475, 0.009663421660661697, 0.049684178084135056, 0.026225639507174492, 0.13834334909915924], [0.20897099375724792, 0.21868035197257996, 0.23815643787384033, 0.005872054491192102, 0.0010661164997145534, 0.0017293300479650497, 0.00042713910806924105, 0.002609806600958109, 0.016046296805143356, 0.009100147522985935, 0.014420107938349247, 0.0022624030243605375, 0.010553905740380287, 0.007111164275556803, 0.25332581996917725], [0.2508500814437866, 0.20390872657299042, 0.7329782247543335, 0.07117453217506409, 0.016424261033535004, 0.021444672718644142, 0.001510130357928574, 0.004098558332771063, 0.0484151765704155, 0.02061472274363041, 0.001126835006289184, 0.0022107160184532404, 0.007578131277114153, 0.004504901356995106, 0.1403624713420868], [0.27370113134384155, 0.8174626231193542, 0.7193068861961365, 0.7076587677001953, 0.07771007716655731, 0.01620337925851345, 0.004001453518867493, 0.004182097036391497, 0.03681829199194908, 0.09453201293945312, 0.026799198240041733, 0.006044679321348667, 0.03725922852754593, 0.016391301527619362, 0.04474738612771034], [0.3889567255973816, 0.4487122893333435, 0.5870586037635803, 0.6609426140785217, 0.6319714188575745, 0.10676700621843338, 0.009257740341126919, 0.0017087672604247928, 0.027955975383520126, 0.07590407133102417, 0.006841681431978941, 0.08621303737163544, 0.05063363164663315, 0.016846608370542526, 0.05719457566738129], [0.00991373136639595, 0.0983041524887085, 0.15667210519313812, 0.19277995824813843, 0.5809133052825928, 0.7996482253074646, 0.06316149979829788, 0.004939877428114414, 0.023352928459644318, 0.010926214046776295, 0.008795071393251419, 0.006998055148869753, 0.0765714943408966, 0.006783204153180122, 0.05886436253786087], [0.07887525111436844, 0.017153050750494003, 0.2216421663761139, 0.13068468868732452, 0.5295770764350891, 0.35302138328552246, 0.8493326902389526, 0.04265422001481056, 0.052519019693136215, 0.027357611805200577, 0.01357424259185791, 0.004279646556824446, 0.026089098304510117, 0.04089489206671715, 0.014124121516942978], [0.03465811163187027, 0.15351061522960663, 0.2825109362602234, 0.08174889534711838, 0.19755861163139343, 0.5825939774513245, 0.37084007263183594, 0.7892780900001526, 0.1287456750869751, 0.006381133571267128, 0.001940184272825718, 0.00047384126810356975, 0.011903955601155758, 0.003972942009568214, 0.06710142642259598], [0.013788340613245964, 0.006632686126977205, 0.02207767777144909, 0.0785517543554306, 0.014113685116171837, 0.048156753182411194, 0.1944313496351242, 0.22155866026878357, 0.49656373262405396, 0.009422117844223976, 0.004702835343778133, 0.0007582302205264568, 0.00014129001647233963, 0.00033574484405107796, 0.23994654417037964], [0.00469209672883153, 0.015491061843931675, 0.035103749483823776, 0.009631682187318802, 0.008573818951845169, 0.051444172859191895, 0.04315423220396042, 0.05495374649763107, 0.6859460473060608, 0.5370080471038818, 0.06784479320049286, 0.004556083586066961, 0.001035997993312776, 0.0006345660076476634, 0.13974453508853912], [0.02668480947613716, 0.016245348379015923, 0.01112398225814104, 0.008507933467626572, 0.02067524567246437, 0.17763113975524902, 0.05662769451737404, 0.04544723033905029, 0.7948054671287537, 0.7384940385818481, 0.5224500298500061, 0.1060851439833641, 0.014122114516794682, 0.0019289307529106736, 0.08371670544147491], [0.02394592948257923, 0.04371663182973862, 0.028385786339640617, 0.007640721742063761, 0.014576996676623821, 0.08887659758329391, 0.017377078533172607, 0.020801657810807228, 0.187345951795578, 0.5047414302825928, 0.6342922449111938, 0.3672487437725067, 0.04719087854027748, 0.10966072231531143, 0.08543073385953903], [0.009629062376916409, 0.020042795687913895, 0.006009343545883894, 0.001406975439749658, 0.0026742229238152504, 0.006072318647056818, 0.006495587062090635, 0.0032924923580139875, 0.034326668828725815, 0.5998041033744812, 0.7456773519515991, 0.7204623818397522, 0.012111457996070385, 0.018825965002179146, 0.008305574767291546], [0.08114123344421387, 0.05478224158287048, 0.11802507936954498, 0.1980995535850525, 0.15338915586471558, 0.11414031684398651, 0.06528255343437195, 0.04494854062795639, 0.26375874876976013, 0.30061599612236023, 0.26960447430610657, 0.5329554677009583, 0.4288364350795746, 0.12292250245809555, 0.12395624816417694]], [[0.09139528125524521, 0.1232069656252861, 0.06926427036523819, 0.03596228361129761, 0.08677947521209717, 0.3523865342140198, 0.17220446467399597, 0.3048216700553894, 0.24129998683929443, 0.008230631239712238, 0.012852879241108894, 0.0024019270204007626, 0.003931952640414238, 0.002576343482360244, 0.13348431885242462], [0.005495021585375071, 0.009821278043091297, 0.006606503389775753, 0.0009270968730561435, 0.022634856402873993, 0.02637101709842682, 0.03666122257709503, 0.003247066168114543, 0.03138025477528572, 0.0023785934317857027, 0.007012520916759968, 0.0027185468934476376, 0.001623710268177092, 0.009003029204905033, 0.24841202795505524], [0.004891206510365009, 0.01856830157339573, 0.01660238206386566, 0.05400720611214638, 0.2678459584712982, 0.21548990905284882, 0.0901486948132515, 0.14165979623794556, 0.4387242794036865, 0.0060303402133286, 0.03774549812078476, 0.022296983748674393, 0.014843892306089401, 0.003844154067337513, 0.0701230987906456], [0.009136357344686985, 0.005524215288460255, 0.002000550739467144, 0.004360574297606945, 0.06230698525905609, 0.032116882503032684, 0.14447683095932007, 0.11250873655080795, 0.12456412613391876, 0.017903752624988556, 0.03641437739133835, 0.030236193910241127, 0.03817100450396538, 0.0020203718449920416, 0.24235397577285767], [0.011458649300038815, 0.0028747334145009518, 0.0048751854337751865, 0.0034302298445254564, 0.032581884413957596, 0.009492963552474976, 0.29646721482276917, 0.024549754336476326, 0.5199102163314819, 0.07497825473546982, 0.039336495101451874, 0.23366358876228333, 0.2855432629585266, 0.0047793262638151646, 0.131587415933609], [0.0048281243070960045, 0.014400148764252663, 0.00021499136346392334, 0.00015902110317256302, 0.0008502291166223586, 0.005816742777824402, 0.03721616789698601, 0.31765323877334595, 0.006985681131482124, 9.90723492577672e-05, 0.0015535155544057488, 0.002471775049343705, 0.00966054666787386, 0.002636645222082734, 0.15553238987922668], [0.01824354939162731, 0.02838711440563202, 0.0006440957658924162, 0.00040316785452887416, 0.00041587575105950236, 0.0021029487252235413, 0.07766012847423553, 0.3384210765361786, 0.005884509067982435, 0.02229108288884163, 0.02292727865278721, 0.00326070049777627, 0.002748187631368637, 0.004811563994735479, 0.08466839045286179], [0.0009052195237018168, 0.00028935770387761295, 0.00010135041520697996, 4.4237076508579776e-05, 9.765469440026209e-05, 0.0003226006228942424, 0.0006174442823976278, 0.003764552064239979, 0.001191335148178041, 0.0005841490346938372, 0.001988127361983061, 0.0019700597040355206, 0.0006354944198392332, 0.0011416736524552107, 0.25631290674209595], [0.007226317655295134, 0.015471585094928741, 0.027516253292560577, 0.0063530029729008675, 0.015222059562802315, 0.004327190574258566, 0.010739101096987724, 0.0023785619996488094, 0.053105201572179794, 0.0674574077129364, 0.31870341300964355, 0.4986713230609894, 0.027042971923947334, 0.0736011192202568, 0.116986483335495], [0.015794623643159866, 0.009404269978404045, 0.017993446439504623, 0.003823975333943963, 0.004969433881342411, 0.03679484874010086, 0.04242165759205818, 0.017222637310624123, 0.1201641708612442, 0.016131659969687462, 0.3518509864807129, 0.3061373829841614, 0.0458594486117363, 0.15943044424057007, 0.17968055605888367], [0.006380036938935518, 0.028477374464273453, 0.006851766724139452, 0.005024573765695095, 0.02579522877931595, 0.052536945790052414, 0.0111169358715415, 0.0038714397232979536, 0.008046599105000496, 0.008921324275434017, 0.011395278386771679, 0.10255969315767288, 0.21638940274715424, 0.44467252492904663, 0.05895284563302994], [0.010142950341105461, 0.001643709372729063, 0.002422438468784094, 0.0009472724632360041, 0.0033483330626040697, 0.003415578044950962, 0.03889569267630577, 0.005287462379783392, 0.00042015319922938943, 0.0010667687747627497, 0.00740370387211442, 0.00895014964044094, 0.0067735291086137295, 0.017782215029001236, 0.26753443479537964], [0.11724554747343063, 0.0023070531897246838, 0.004510094877332449, 0.0014967885799705982, 0.007825762964785099, 0.00018500315491110086, 0.013543304987251759, 0.0012864026939496398, 0.0007778326398693025, 0.00044295378029346466, 0.001640060218051076, 0.0014512997586280107, 0.002360806567594409, 0.2112705558538437, 0.19457924365997314], [0.09882069379091263, 0.014871560037136078, 0.005077258683741093, 0.0014827846316620708, 0.005620975513011217, 0.0024449406191706657, 0.07368315756320953, 0.06950978189706802, 0.0017206794582307339, 0.00039900749106891453, 0.0006052122334949672, 0.0005968212499283254, 0.004762541502714157, 0.0232950821518898, 0.2500154376029968], [0.001020739320665598, 0.001402992638759315, 0.0006185534875839949, 0.0003395593084860593, 0.0013021298218518496, 0.0008022591937333345, 0.003452729433774948, 0.0026675688568502665, 0.0021077031269669533, 0.0008018113439902663, 0.0017594166565686464, 0.0005115982494316995, 0.0007778447470627725, 0.0008368113776668906, 0.13888627290725708]], [[0.04622220993041992, 0.12740419805049896, 0.05372706800699234, 0.5582705140113831, 0.030120277777314186, 0.3703221380710602, 0.020304178819060326, 0.3357560634613037, 0.11819478869438171, 0.0765489861369133, 0.09261158853769302, 0.03858334198594093, 0.13079233467578888, 0.0447748564183712, 0.11706516146659851], [0.0919138491153717, 0.05798470228910446, 0.02827676385641098, 0.34965166449546814, 0.05504997447133064, 0.1526506543159485, 0.09941896051168442, 0.4367760419845581, 0.061004042625427246, 0.5390062928199768, 0.28723591566085815, 0.15840129554271698, 0.2018149495124817, 0.11561664938926697, 0.1249081939458847], [0.032068803906440735, 0.0549696609377861, 0.018587671220302582, 0.2202640324831009, 0.0011182812741026282, 0.03810814768075943, 0.027008401229977608, 0.3763306438922882, 0.11146998405456543, 0.16719762980937958, 0.13283231854438782, 0.014421377331018448, 0.07254088670015335, 0.007401765324175358, 0.20662666857242584], [0.10753453522920609, 0.479284405708313, 0.009764611721038818, 0.0431443527340889, 0.0008862981921993196, 0.03188035264611244, 0.00600279588252306, 0.43093177676200867, 0.08460848033428192, 0.18502341210842133, 0.038902610540390015, 0.030237559229135513, 0.1820157915353775, 0.03367093205451965, 0.14427724480628967], [0.013928310945630074, 0.032752107828855515, 0.0024797581136226654, 0.10617181658744812, 0.0002726189268287271, 0.011333486996591091, 0.005626056343317032, 0.05421115458011627, 0.020341530442237854, 0.0548044852912426, 0.027503041550517082, 0.005752534605562687, 0.033552803099155426, 0.008454940281808376, 0.388910174369812], [0.15046736598014832, 0.296213299036026, 0.044096194207668304, 0.05168119817972183, 0.02727358601987362, 0.04717152938246727, 0.0016543868696317077, 0.035376399755477905, 0.027143586426973343, 0.0870317667722702, 0.05812281742691994, 0.06705813109874725, 0.3147181272506714, 0.39039844274520874, 0.23394177854061127], [0.14644725620746613, 0.5605929493904114, 0.11812092363834381, 0.5902084112167358, 0.021858595311641693, 0.10718227922916412, 0.007383488584309816, 0.019886687397956848, 0.06570647656917572, 0.10820640623569489, 0.1357717514038086, 0.025582531467080116, 0.077891044318676, 0.061965201050043106, 0.164744034409523], [0.049012791365385056, 0.35138410329818726, 0.26388463377952576, 0.7301797866821289, 0.014552393928170204, 0.24720129370689392, 0.0041521950624883175, 0.07795857638120651, 0.014070906676352024, 0.04667593538761139, 0.1480453461408615, 0.010990227572619915, 0.20039354264736176, 0.17517414689064026, 0.0717916414141655], [0.09980960935354233, 0.4834202826023102, 0.20237547159194946, 0.5161312222480774, 0.2011035680770874, 0.31254804134368896, 0.023049525916576385, 0.09284620732069016, 0.030714770779013634, 0.009841320104897022, 0.03625232353806496, 0.02249438874423504, 0.030981028452515602, 0.01249231118708849, 0.19809871912002563], [0.2242409735918045, 0.5898000001907349, 0.2996082305908203, 0.6961580514907837, 0.3950251638889313, 0.824604332447052, 0.0551396869122982, 0.5436567068099976, 0.06683327257633209, 0.03568824753165245, 0.060814060270786285, 0.00592254800722003, 0.012778226286172867, 0.017990900203585625, 0.1082865446805954], [0.03427329286932945, 0.7018846869468689, 0.18350760638713837, 0.5559015274047852, 0.03810380771756172, 0.7226935029029846, 0.05184842646121979, 0.881024181842804, 0.06315085291862488, 0.03384441137313843, 0.014913397841155529, 0.002015632577240467, 0.008405282162129879, 0.0011906703002750874, 0.2768104076385498], [0.022437993437051773, 0.7336767315864563, 0.2893984615802765, 0.7315550446510315, 0.021726222708821297, 0.3247562646865845, 0.05117126554250717, 0.7097986340522766, 0.03149837628006935, 0.017582548782229424, 0.017906883731484413, 0.004864181391894817, 0.0014982494758442044, 0.0005988480988889933, 0.17147301137447357], [0.279982328414917, 0.427709698677063, 0.4798988997936249, 0.811837911605835, 0.5607104301452637, 0.3233453035354614, 0.03364620357751846, 0.48738226294517517, 0.20507316291332245, 0.2806957960128784, 0.20560167729854584, 0.021487781777977943, 0.0051806773990392685, 0.018182942643761635, 0.10378202050924301], [0.15081651508808136, 0.5779510736465454, 0.21354816854000092, 0.8126901984214783, 0.041816346347332, 0.5376638174057007, 0.02729017473757267, 0.45972490310668945, 0.1708957701921463, 0.17148789763450623, 0.06268936395645142, 0.0045938147231936455, 0.0036332160234451294, 0.0009066996863111854, 0.10311751067638397], [0.009540104307234287, 0.03889232128858566, 0.016071060672402382, 0.08366316556930542, 0.004574422258883715, 0.029401082545518875, 0.00834547821432352, 0.0893266350030899, 0.14732055366039276, 0.09065960347652435, 0.14173488318920135, 0.042114999145269394, 0.004022075328975916, 0.003513866104185581, 0.1347859650850296]], [[0.009570755064487457, 0.005546795669943094, 0.006825579330325127, 0.033384330570697784, 0.3769712448120117, 0.15916845202445984, 0.5290282368659973, 0.24695992469787598, 0.2377869039773941, 0.0913546234369278, 0.07570143043994904, 0.06522544473409653, 0.12397455424070358, 0.2645682692527771, 0.1787039041519165], [0.0061562443152070045, 0.040286894887685776, 0.0029807272367179394, 0.016133036464452744, 0.1151214987039566, 0.07519882172346115, 0.10128971189260483, 0.046498823910951614, 0.04111110791563988, 0.11845260113477707, 0.08915312588214874, 0.10556784272193909, 0.16933780908584595, 0.3531811535358429, 0.21578538417816162], [0.14712950587272644, 0.04435151070356369, 0.015454337000846863, 0.01427951455116272, 0.08342041075229645, 0.005383625626564026, 0.10468690097332001, 0.05861024558544159, 0.08666124939918518, 0.15304753184318542, 0.23543620109558105, 0.2374279797077179, 0.10751555860042572, 0.10399115085601807, 0.23440681397914886], [0.0859314426779747, 0.15731151401996613, 0.005385389551520348, 0.04620514437556267, 0.010708490386605263, 0.006711416877806187, 0.012445325031876564, 0.056288186460733414, 0.097142793238163, 0.07020799815654755, 0.02479076385498047, 0.0890590250492096, 0.22972674667835236, 0.034618109464645386, 0.28529092669487], [0.07441635429859161, 0.018118128180503845, 0.016377849504351616, 0.003080169903114438, 0.20936372876167297, 0.0007255859090946615, 0.03578657656908035, 0.00550744216889143, 0.1172742024064064, 0.5684130191802979, 0.3980042636394501, 0.15252694487571716, 0.10817506164312363, 0.23486874997615814, 0.2619861364364624], [0.05188249424099922, 0.0069924332201480865, 0.0009591103880666196, 0.0061192926950752735, 0.002253405749797821, 0.006572761107236147, 0.004667140077799559, 0.11107926070690155, 0.03415685519576073, 0.010113962925970554, 0.006655086297541857, 0.010832482948899269, 0.03651394695043564, 0.040573474019765854, 0.2686486840248108], [0.08095332235097885, 0.02014574408531189, 0.011188640259206295, 0.0037319576367735863, 0.024485761299729347, 0.0018746056593954563, 0.04114176332950592, 0.034570205956697464, 0.009728988632559776, 0.07755846530199051, 0.09898480027914047, 0.0613434873521328, 0.09528356045484543, 0.1511603444814682, 0.2821846306324005], [0.04335615411400795, 0.026033984497189522, 0.03572213277220726, 0.017578190192580223, 0.05956277251243591, 0.01715734601020813, 0.011929154396057129, 0.28936532139778137, 0.0027683174703270197, 0.061091482639312744, 0.23734883964061737, 0.10397756844758987, 0.16337142884731293, 0.37352773547172546, 0.18409839272499084], [0.06077902019023895, 0.031166722998023033, 0.11759120225906372, 0.1409873068332672, 0.24215947091579437, 0.009796793572604656, 0.10265856236219406, 0.01014934666454792, 0.2757207751274109, 0.023714441806077957, 0.038815632462501526, 0.15303847193717957, 0.14991649985313416, 0.6824791431427002, 0.13190437853336334], [0.06505369395017624, 0.006089756730943918, 0.036541152745485306, 0.005829536356031895, 0.20233574509620667, 0.029401954263448715, 0.49993017315864563, 0.030510973185300827, 0.01976127363741398, 0.07993583381175995, 0.017815636470913887, 0.04079095646739006, 0.022992853075265884, 0.6425142288208008, 0.26567763090133667], [0.6054520010948181, 0.07051455229520798, 0.2702813744544983, 0.029061302542686462, 0.13962645828723907, 0.07908772677183151, 0.4563634395599365, 0.02414957620203495, 0.02722080610692501, 0.03215296193957329, 0.015534932725131512, 0.009437407366931438, 0.0218642745167017, 0.08506882190704346, 0.4000338017940521], [0.3943043351173401, 0.11258544027805328, 0.12088752537965775, 0.0732470229268074, 0.030587676912546158, 0.056065596640110016, 0.2533946633338928, 0.04020307958126068, 0.03702285513281822, 0.018525324761867523, 0.009753274731338024, 0.01584538072347641, 0.006842197384685278, 0.013304048217833042, 0.2415902465581894], [0.09087645262479782, 0.0733630359172821, 0.03259122744202614, 0.05433432757854462, 0.028730718418955803, 0.026890264824032784, 0.0992540791630745, 0.042951032519340515, 0.1659460812807083, 0.017093859612941742, 0.006921885069459677, 0.0007972968742251396, 0.010357401333749294, 0.037234287708997726, 0.1852690428495407], [0.2766205668449402, 0.06249983608722687, 0.03302843123674393, 0.08374682813882828, 0.07296875864267349, 0.016804786399006844, 0.2612326145172119, 0.06074067950248718, 0.06402052938938141, 0.021471360698342323, 0.00216249143704772, 0.001582604949362576, 0.0037338242400437593, 0.005314995069056749, 0.23526467382907867], [0.005338736344128847, 0.013486125506460667, 0.016210375353693962, 0.00714905746281147, 0.01115293800830841, 0.008639699779450893, 0.009605110622942448, 0.01017976924777031, 0.008433598093688488, 0.06244685873389244, 0.040223702788352966, 0.009117859415709972, 0.005228321999311447, 0.0028589563444256783, 0.13790398836135864]], [[0.3301994204521179, 0.08890271931886673, 0.08465498685836792, 0.06385943293571472, 0.21852104365825653, 0.02508896216750145, 0.03711355850100517, 0.034155964851379395, 0.1728704422712326, 0.06344152241945267, 0.01567375846207142, 0.047274719923734665, 0.023079151287674904, 0.06240373104810715, 0.17532315850257874], [0.08584976941347122, 0.12593986093997955, 0.03313801810145378, 0.017280908301472664, 0.17652282118797302, 0.268716037273407, 0.12116961926221848, 0.2558431923389435, 0.04765854403376579, 0.04246087744832039, 0.0035840249620378017, 0.02463056705892086, 0.2119264155626297, 0.11800020188093185, 0.14393316209316254], [0.046346988528966904, 0.39951857924461365, 0.5525277853012085, 0.10910754650831223, 0.13167327642440796, 0.030212268233299255, 0.021472660824656487, 0.018023721873760223, 0.1298973113298416, 0.04191790521144867, 0.1535157859325409, 0.04246748238801956, 0.3158371150493622, 0.15602277219295502, 0.1064835637807846], [0.0703379437327385, 0.07535148411989212, 0.05811825022101402, 0.428435742855072, 0.07080380618572235, 0.15123498439788818, 0.3036666214466095, 0.07787945121526718, 0.48052453994750977, 0.12286645174026489, 0.04789941385388374, 0.033336445689201355, 0.030469346791505814, 0.005462532863020897, 0.08732402324676514], [0.0663379579782486, 0.03187985718250275, 0.09551261365413666, 0.0323714055120945, 0.33827176690101624, 0.1471284031867981, 0.3127540946006775, 0.02734280750155449, 0.23260797560214996, 0.02317011170089245, 0.046465177088975906, 0.0992102101445198, 0.09175661206245422, 0.13314616680145264, 0.07444406300783157], [0.034720633178949356, 0.01384154986590147, 0.012703170999884605, 0.020319687202572823, 0.10901976376771927, 0.7807050347328186, 0.03443336486816406, 0.028544975444674492, 0.061822760850191116, 0.00809338316321373, 0.007171421777456999, 0.01342758722603321, 0.09649696201086044, 0.05527613312005997, 0.10404697060585022], [0.030445659533143044, 0.041789710521698, 0.023520270362496376, 0.01782963052392006, 0.16124852001667023, 0.06983006745576859, 0.4703807234764099, 0.01895260065793991, 0.027326058596372604, 0.07994905114173889, 0.026343191042542458, 0.032219063490629196, 0.022085823118686676, 0.031095484271645546, 0.24155765771865845], [0.055046502500772476, 0.3847074508666992, 0.04798666015267372, 0.003912709187716246, 0.06840738654136658, 0.36789029836654663, 0.07226144522428513, 0.4079316258430481, 0.022340288385748863, 0.10408379882574081, 0.07774890959262848, 0.04753485694527626, 0.285355806350708, 0.16128498315811157, 0.02375940792262554], [0.03513112664222717, 0.11586778610944748, 0.03034079447388649, 0.001017131027765572, 0.04634808376431465, 0.03800477832555771, 0.03768199309706688, 0.013300161808729172, 0.14031966030597687, 0.015252463519573212, 0.053176701068878174, 0.06856708973646164, 0.13856393098831177, 0.054046642035245895, 0.2367301732301712], [0.025786809623241425, 0.06564735621213913, 0.039564721286296844, 0.0026341548655182123, 0.016324089840054512, 0.016701271757483482, 0.020613567903637886, 0.0767805427312851, 0.22950275242328644, 0.51694655418396, 0.1544727236032486, 0.1054847463965416, 0.025381706655025482, 0.05480813980102539, 0.1677880734205246], [0.012255452573299408, 0.02410232275724411, 0.08552651852369308, 0.002623841166496277, 0.010307574644684792, 0.0127415731549263, 0.021285703405737877, 0.010095748119056225, 0.06661782413721085, 0.12517453730106354, 0.7383688688278198, 0.19885332882404327, 0.07497892528772354, 0.10072800517082214, 0.06182975694537163], [0.2776626944541931, 0.046990759670734406, 0.032447993755340576, 0.015461347065865993, 0.08414210379123688, 0.04174359515309334, 0.19995476305484772, 0.013662091456353664, 0.019540153443813324, 0.048985805362463, 0.25616249442100525, 0.2484772503376007, 0.1799653023481369, 0.17696446180343628, 0.09890354424715042], [0.05504303798079491, 0.08340897411108017, 0.04799877479672432, 0.017563870176672935, 0.028545444831252098, 0.1704884171485901, 0.030681313946843147, 0.02359093725681305, 0.007767115719616413, 0.019779905676841736, 0.03771185874938965, 0.029841119423508644, 0.28957709670066833, 0.04182300344109535, 0.12634176015853882], [0.06153338775038719, 0.02491314895451069, 0.02542346529662609, 0.0031092099379748106, 0.03241894021630287, 0.1874629557132721, 0.1358277052640915, 0.02619485929608345, 0.017582973465323448, 0.03225348889827728, 0.01329810544848442, 0.026643214747309685, 0.1614912450313568, 0.6035103797912598, 0.09545250982046127], [0.027727488428354263, 0.10283610969781876, 0.02349940501153469, 0.010801603086292744, 0.0136191351339221, 0.1518852412700653, 0.05784522369503975, 0.11107083410024643, 0.10270816832780838, 0.1666017472743988, 0.06030665338039398, 0.06198698654770851, 0.05951831862330437, 0.015173939988017082, 0.1310720145702362]]], [[[0.042950913310050964, 0.0007196685182861984, 0.027302199974656105, 0.006393556483089924, 0.09642192721366882, 0.01637418009340763, 0.0023990001063793898, 0.0024961719755083323, 0.0020593979861587286, 0.0015603104839101434, 0.03318732604384422, 0.35782966017723083, 0.0989728793501854, 0.061845745891332626, 0.203965961933136], [0.10955026745796204, 0.02388770505785942, 0.04351670667529106, 0.023162608966231346, 0.012142845429480076, 0.035775765776634216, 0.03457501530647278, 0.11992064118385315, 0.01240380760282278, 0.007506475783884525, 0.05337386205792427, 0.6535924673080444, 0.5536571145057678, 0.19680790603160858, 0.140446737408638], [0.005947283003479242, 0.0010204642312601209, 0.18009734153747559, 0.006447697523981333, 0.012463629245758057, 7.613956404384226e-05, 7.241032290039584e-05, 0.00011841111700050533, 0.0034185522235929966, 0.0034766956232488155, 0.002135018352419138, 0.005925178527832031, 0.003751354990527034, 0.0019247139571234584, 0.28479355573654175], [0.014483454637229443, 0.022866876795887947, 0.32726621627807617, 0.007662326563149691, 0.09431912004947662, 0.0004296264669392258, 0.0011131323408335447, 0.0014158609556034207, 0.018019702285528183, 0.01865016296505928, 0.0020740600302815437, 0.0029411758296191692, 0.0016890126280486584, 0.0063899424858391285, 0.12852828204631805], [0.030419446527957916, 0.058438073843717575, 0.3924228250980377, 0.035587672144174576, 0.08137891441583633, 0.010925069451332092, 0.001356365391984582, 0.0012006007600575686, 0.053269751369953156, 0.0027948038186877966, 0.04010261595249176, 0.01993635483086109, 0.004820133093744516, 0.004111820366233587, 0.21765674650669098], [0.07767480611801147, 0.006269918289035559, 0.09326869994401932, 0.6196063756942749, 0.11043263971805573, 0.052975643426179886, 0.02037718892097473, 0.0008919782703742385, 0.008360025472939014, 0.002104781800881028, 0.0179440937936306, 0.10498880594968796, 0.011864815838634968, 0.002359954407438636, 0.24602332711219788], [0.00026913435431197286, 8.159392746165395e-05, 0.007915529422461987, 0.05068095400929451, 0.6570689678192139, 0.32081079483032227, 0.05758208408951759, 0.0006442792946472764, 0.0015821922570466995, 6.469202344305813e-05, 0.003034515306353569, 0.0310077928006649, 0.025656316429376602, 0.0025228438898921013, 0.023106882348656654], [0.0005435149651020765, 0.0005490019102580845, 0.034476928412914276, 0.01287262886762619, 0.25229769945144653, 0.4536571502685547, 0.10281822830438614, 0.012222280725836754, 0.016108570620417595, 0.00031008716905489564, 0.0026372161228209734, 0.0034134499728679657, 0.0248859953135252, 0.017225822433829308, 0.02475895546376705], [0.000726195692550391, 0.00036735343746840954, 0.007114858832210302, 0.0026034389156848192, 0.01250846590846777, 0.009484091773629189, 0.0354158952832222, 0.0016834242269396782, 0.19215336441993713, 0.007594457361847162, 0.003938279580324888, 2.8376112823025323e-05, 0.001137340790592134, 0.00011368053674232215, 0.29228782653808594], [0.0005387092242017388, 0.0003453432582318783, 0.015091696754097939, 0.06184916943311691, 0.003162123030051589, 0.014056581072509289, 0.012467358261346817, 0.009164737537503242, 0.05548334866762161, 0.008076494559645653, 0.005971547681838274, 0.001972777536138892, 0.006774900481104851, 0.001264052465558052, 0.2362799048423767], [0.0025044670328497887, 0.0023456772323697805, 0.07385681569576263, 0.006188494618982077, 0.021690815687179565, 0.0007893598522059619, 0.002135526854544878, 0.006048245821148157, 0.25190338492393494, 0.09442908316850662, 0.19532348215579987, 0.031008923426270485, 0.009561427868902683, 0.0021240306086838245, 0.21234139800071716], [0.015501828864216805, 0.0072255814447999, 0.006012998055666685, 0.008203291334211826, 0.0171041339635849, 0.001770812552422285, 0.00655776634812355, 0.002186145167797804, 0.15154685080051422, 0.5713958144187927, 0.05368567630648613, 0.051326390355825424, 0.01612916588783264, 0.0019418209558352828, 0.18746227025985718], [0.05876695737242699, 0.005032649263739586, 0.05515526235103607, 0.012789947912096977, 0.017388533800840378, 0.00580496434122324, 0.015462081879377365, 0.009339934214949608, 0.0222479198127985, 0.03960718587040901, 0.14906688034534454, 0.2817051410675049, 0.14850065112113953, 0.09505022317171097, 0.10619710385799408], [0.012425977736711502, 0.0006452641100622714, 0.00298808584921062, 0.001349467202089727, 0.014642779715359211, 0.0010115096811205149, 0.0033098396379500628, 0.00038259345456026495, 0.0035037249326705933, 0.008293021470308304, 0.03801131248474121, 0.8317341208457947, 0.018821584060788155, 0.057542454451322556, 0.011905365623533726], [0.04682805389165878, 0.01908799074590206, 0.10485747456550598, 0.060083843767642975, 0.15075230598449707, 0.029059063643217087, 0.04093548655509949, 0.03368941321969032, 0.017014725133776665, 0.011203174479305744, 0.0391479916870594, 0.24882012605667114, 0.37940239906311035, 0.12485622614622116, 0.12782400846481323]], [[0.010500228963792324, 0.7224081754684448, 0.030353030189871788, 0.00683749420568347, 0.007232841569930315, 0.018554184585809708, 0.0004432629211805761, 0.02719983458518982, 0.0006519495509564877, 0.0012597806053236127, 0.006804677192121744, 0.0011734187137335539, 0.003679303452372551, 0.010371293872594833, 0.019012004137039185], [0.0004097823693882674, 0.007568135391920805, 0.05432860180735588, 0.08570658415555954, 0.005480978172272444, 0.0009473124518990517, 0.000799189496319741, 0.0012391285272315145, 0.00044785221689380705, 0.0009745006100274622, 0.013956908136606216, 0.00011593959061428905, 0.004404959734529257, 0.0031790253706276417, 0.20507724583148956], [0.022728245705366135, 0.0194535069167614, 0.024020839482545853, 0.023168254643678665, 0.45748311281204224, 0.5855799913406372, 0.21754446625709534, 0.1001717820763588, 0.0221620611846447, 0.0033511894289404154, 0.03508710116147995, 0.20201759040355682, 0.2973189353942871, 0.04947788640856743, 0.0494859553873539], [0.010499863885343075, 0.004784405697137117, 0.0035181313287466764, 0.007238015066832304, 0.4155227243900299, 0.8333501219749451, 0.07475034892559052, 0.20445603132247925, 0.005854693241417408, 0.001852003508247435, 0.02841898612678051, 0.243921160697937, 0.10275343060493469, 0.13816815614700317, 0.07406751066446304], [0.00768234534189105, 0.012151399627327919, 0.0006104251369833946, 0.0018971813842654228, 0.08389636874198914, 0.7291921973228455, 0.2573831081390381, 0.13359335064888, 0.0011000150116160512, 0.0005446228897199035, 0.036390628665685654, 0.06110000237822533, 0.1527252048254013, 0.14593005180358887, 0.05624886974692345], [0.0037335127126425505, 0.004452059045433998, 0.00018280810036230832, 0.016856878995895386, 0.0016014263965189457, 0.05306785926222801, 0.5318921208381653, 0.2889253497123718, 0.0004385874199215323, 0.007465890143066645, 0.0005691659171134233, 0.008836256340146065, 0.00793292187154293, 0.0033322598319500685, 0.1706118881702423], [0.00023320072796195745, 0.0486629419028759, 0.0005405444535426795, 0.005952970590442419, 0.0009982762858271599, 0.004001363180577755, 0.009125707671046257, 0.6945337057113647, 0.006549985148012638, 0.007807720452547073, 0.003924727905541658, 0.004149672109633684, 0.003537258366122842, 0.001676861196756363, 0.11541670560836792], [0.0021667596884071827, 0.0005287157837301493, 0.009149480611085892, 0.024324318394064903, 0.0018866003956645727, 0.0003624066011980176, 0.0004668526817113161, 0.0064473398961126804, 0.0217228215187788, 0.0031395854894071817, 0.0052951243706047535, 0.004629157949239016, 0.003511544084176421, 0.0017145106103271246, 0.2705381214618683], [0.0036477160174399614, 0.018601393327116966, 0.00400471780449152, 0.016223786398768425, 0.015442389994859695, 0.030637366697192192, 0.04816145822405815, 0.009263478219509125, 0.08580432087182999, 0.07024423778057098, 0.17587034404277802, 0.2670482397079468, 0.10741393268108368, 0.11723090708255768, 0.197556272149086], [0.0067135002464056015, 0.005400336813181639, 0.002429268090054393, 0.0005210567032918334, 0.0009090648964047432, 0.056922394782304764, 0.006305574905127287, 0.02051912061870098, 0.009087055921554565, 0.0029723523184657097, 0.5903128385543823, 0.4623943269252777, 0.5148944854736328, 0.10147220641374588, 0.10177940130233765], [0.016283290460705757, 0.004236595239490271, 0.00024049253261182457, 0.00013081195356789976, 0.004825976211577654, 0.03370611369609833, 0.030076656490564346, 0.006495397537946701, 0.015585500746965408, 0.0006116450531408191, 0.009124655276536942, 0.7220618724822998, 0.5160555839538574, 0.16948190331459045, 0.04205150157213211], [0.04056651145219803, 0.05449386313557625, 0.007923644036054611, 0.00034379694261588156, 0.0072999089024960995, 0.005707062315195799, 0.018278487026691437, 0.00924981851130724, 0.0004191468469798565, 0.0015566512010991573, 0.0019580996595323086, 0.06517467647790909, 0.4938390851020813, 0.1360015720129013, 0.14540629088878632], [0.02595147117972374, 0.0358305424451828, 0.021912503987550735, 0.01559682097285986, 0.0029425774700939655, 0.008820675313472748, 0.259022980928421, 0.24083182215690613, 0.0008326273527927697, 0.009937180206179619, 0.008380424231290817, 0.0008840225636959076, 0.11912944912910461, 0.5976794362068176, 0.17433230578899384], [0.024576334282755852, 0.01131413970142603, 0.0036256120074540377, 0.007047882303595543, 0.015460383147001266, 0.007877636700868607, 0.035456594079732895, 0.017273712903261185, 0.0020541276317089796, 0.005268692504614592, 0.003138576401397586, 0.0058868261985480785, 0.09279357641935349, 0.45485755801200867, 0.2460370808839798], [0.02016485668718815, 0.03839857131242752, 0.0345035195350647, 0.005700604524463415, 0.03111962042748928, 0.03698137030005455, 0.056010663509368896, 0.043163470923900604, 0.004449993837624788, 0.000997284660115838, 0.006035848520696163, 0.0027079761493951082, 0.009604639373719692, 0.02099894918501377, 0.13394789397716522]], [[0.11855445802211761, 0.018203705549240112, 0.014699782244861126, 0.005997231230139732, 0.012317956425249577, 0.005482070613652468, 0.020501872524619102, 0.04173066467046738, 0.028033137321472168, 0.007907108403742313, 0.13633504509925842, 0.11779958009719849, 0.02402079664170742, 0.08686818182468414, 0.19919154047966003], [0.015789268538355827, 0.07802969217300415, 0.024552250280976295, 0.007203033193945885, 0.015197299420833588, 0.0086579704657197, 0.005928180180490017, 0.015956610441207886, 0.019966211169958115, 0.002508557867258787, 0.048071712255477905, 0.0452260747551918, 0.027286410331726074, 0.034357864409685135, 0.19209280610084534], [0.7560696601867676, 0.09646204113960266, 0.24264514446258545, 0.03150765225291252, 0.15196740627288818, 0.027980739250779152, 0.025865402072668076, 0.037002913653850555, 0.02429634891450405, 0.014392002485692501, 0.11331582069396973, 0.2883520722389221, 0.24113057553768158, 0.5529852509498596, 0.13967400789260864], [0.6593953371047974, 0.14735713601112366, 0.007992099039256573, 0.03938791900873184, 0.047611087560653687, 0.002478603972122073, 0.00756214139983058, 0.01120123453438282, 0.017771385610103607, 0.011085578240454197, 0.01766165718436241, 0.07185176759958267, 0.01590064913034439, 0.05699647217988968, 0.22524236142635345], [0.8214750289916992, 0.5506035089492798, 0.04117008298635483, 0.00517136137932539, 0.5628769993782043, 0.013714980334043503, 0.018153639510273933, 0.019494647160172462, 0.02796507254242897, 0.003693098435178399, 0.052905939519405365, 0.024033749476075172, 0.017759546637535095, 0.154443621635437, 0.2181331366300583], [0.47579920291900635, 0.4996025860309601, 0.02201933227479458, 0.032786499708890915, 0.003352785250172019, 0.402157723903656, 0.028392860665917397, 0.03425603359937668, 0.017302367836236954, 0.007774383760988712, 0.03628184646368027, 0.015436487272381783, 0.09682580828666687, 0.09163853526115417, 0.1807471215724945], [0.6324970722198486, 0.5132108926773071, 0.14723047614097595, 0.10531618446111679, 0.14770705997943878, 0.01965152472257614, 0.16446776688098907, 0.023718399927020073, 0.014144167304039001, 0.003392518265172839, 0.03989372402429581, 0.048702552914619446, 0.05385157838463783, 0.06003360450267792, 0.2021118402481079], [0.2804942727088928, 0.4447323679924011, 0.40719398856163025, 0.15280602872371674, 0.5485119223594666, 0.006256175693124533, 0.005905789323151112, 0.0894087627530098, 0.014159541577100754, 0.0037697115913033485, 0.08780182898044586, 0.04568948596715927, 0.08344046771526337, 0.08309336006641388, 0.1791403889656067], [0.38668709993362427, 0.3767029941082001, 0.5765653848648071, 0.14457443356513977, 0.830109715461731, 0.558448314666748, 0.2105703204870224, 0.015437009744346142, 0.0802588015794754, 0.0035789015237241983, 0.009509528055787086, 0.011719968169927597, 0.04601259157061577, 0.015442220494151115, 0.02989899180829525], [0.42374563217163086, 0.4557475447654724, 0.5995064973831177, 0.22240440547466278, 0.8298278450965881, 0.26192477345466614, 0.5618261694908142, 0.2755923569202423, 0.03321446478366852, 0.014314521104097366, 0.030895033851265907, 0.0061126528307795525, 0.0033166268840432167, 0.0021476708352565765, 0.12580153346061707], [0.4742293357849121, 0.32335561513900757, 0.5931060910224915, 0.0772920548915863, 0.3757626712322235, 0.211185023188591, 0.42018893361091614, 0.37329575419425964, 0.26276469230651855, 0.012583179399371147, 0.3317490220069885, 0.002885210793465376, 0.011435287073254585, 0.00757939275354147, 0.1435183733701706], [0.21439705789089203, 0.17853425443172455, 0.32548797130584717, 0.06489395350217819, 0.64824378490448, 0.1159982681274414, 0.19616922736167908, 0.27417391538619995, 0.6047332286834717, 0.1810707151889801, 0.034782104194164276, 0.10310898721218109, 0.0316632017493248, 0.025309519842267036, 0.09833981841802597], [0.19860051572322845, 0.10174965113401413, 0.08606765419244766, 0.053267233073711395, 0.11251617968082428, 0.2378872036933899, 0.16651752591133118, 0.1490997076034546, 0.4605393707752228, 0.18029887974262238, 0.1883857697248459, 0.007075145840644836, 0.25310245156288147, 0.08171047270298004, 0.15088772773742676], [0.2976968586444855, 0.21286718547344208, 0.04716610535979271, 0.025928588584065437, 0.1317281424999237, 0.12927810847759247, 0.2939497232437134, 0.23276808857917786, 0.5986261367797852, 0.05386120826005936, 0.05668044835329056, 0.025143466889858246, 0.007965278811752796, 0.03647890314459801, 0.16275253891944885], [0.34472423791885376, 0.33325105905532837, 0.5841152667999268, 0.8456752300262451, 0.4377557933330536, 0.4159393310546875, 0.33224907517433167, 0.1488359123468399, 0.2203720510005951, 0.7425854206085205, 0.7086009383201599, 0.5293036699295044, 0.2777566909790039, 0.22530661523342133, 0.09936152398586273]], [[0.3582096993923187, 0.12323450297117233, 0.41414904594421387, 0.12697191536426544, 0.2567327618598938, 0.12921607494354248, 0.303745299577713, 0.26060354709625244, 0.2067556530237198, 0.0739586353302002, 0.038356974720954895, 0.018690073862671852, 0.019858568906784058, 0.03828525170683861, 0.09448481351137161], [0.034560851752758026, 0.06147807836532593, 0.09719342738389969, 0.03090484067797661, 0.05040246620774269, 0.10769589245319366, 0.28225648403167725, 0.03959896042943001, 0.04561477154493332, 0.015998149290680885, 0.010396423749625683, 0.0027313604950904846, 0.02088637463748455, 0.02540828473865986, 0.1729334592819214], [0.031599532812833786, 0.03154325857758522, 0.01938430592417717, 0.10300880670547485, 0.07719798386096954, 0.3211115002632141, 0.5488157868385315, 0.6110779047012329, 0.03511836752295494, 0.03874386474490166, 0.02549627609550953, 0.08684590458869934, 0.1071673184633255, 0.10855282843112946, 0.09071482717990875], [0.05947110056877136, 0.046990834176540375, 0.001917339744977653, 0.019972380250692368, 0.14856000244617462, 0.10937333106994629, 0.7613639235496521, 0.43800127506256104, 0.038890283554792404, 0.0702563002705574, 0.052807219326496124, 0.20175476372241974, 0.09827514737844467, 0.19838720560073853, 0.1799801141023636], [0.010548654943704605, 0.056933727115392685, 0.0004277318366803229, 0.0005220972234383225, 0.03427216783165932, 0.15697234869003296, 0.44382861256599426, 0.28639304637908936, 0.1278306096792221, 0.0589531809091568, 0.07240739464759827, 0.21584689617156982, 0.623681902885437, 0.39177897572517395, 0.053747572004795074], [0.012333033606410027, 0.11936485022306442, 0.0015480549773201346, 0.05167163908481598, 0.003915506415069103, 0.05033823475241661, 0.18770258128643036, 0.5247471332550049, 0.13492631912231445, 0.0999734029173851, 0.02801361307501793, 0.04943297058343887, 0.067798912525177, 0.02220618724822998, 0.04863249137997627], [0.023225123062729836, 0.03936318680644035, 0.0654693990945816, 0.0780135840177536, 0.03190883249044418, 0.007237496320158243, 0.3230750560760498, 0.11266676336526871, 0.3152024447917938, 0.12503208220005035, 0.08215073496103287, 0.20814812183380127, 0.054794978350400925, 0.014369799755513668, 0.31165388226509094], [0.021642545238137245, 0.05032852664589882, 0.10916808992624283, 0.14173567295074463, 0.025796422734856606, 0.002176823327317834, 0.004212724044919014, 0.11230720579624176, 0.2761599123477936, 0.18545517325401306, 0.30032697319984436, 0.18456220626831055, 0.1202857494354248, 0.02383211813867092, 0.22383396327495575], [0.014165909960865974, 0.030938388779759407, 0.019327908754348755, 0.025021186098456383, 0.018685894086956978, 0.058899857103824615, 0.05705944076180458, 0.013411193154752254, 0.27564239501953125, 0.14192135632038116, 0.4484158754348755, 0.49174171686172485, 0.42328834533691406, 0.5148258805274963, 0.024227913469076157], [0.030343737453222275, 0.035576362162828445, 0.011198173277080059, 0.0029289661906659603, 0.004656192846596241, 0.19044476747512817, 0.14425727725028992, 0.14593322575092316, 0.02429576776921749, 0.03922351822257042, 0.03158531337976456, 0.3954472541809082, 0.18761666119098663, 0.829915463924408, 0.05755764618515968], [0.07378673553466797, 0.08269044756889343, 0.008506381884217262, 0.004565858747810125, 0.0033621611073613167, 0.47163471579551697, 0.3437289595603943, 0.16293375194072723, 0.0103234788402915, 0.006828381214290857, 0.025515833869576454, 0.13491219282150269, 0.23380780220031738, 0.7675665616989136, 0.06853343546390533], [0.19539110362529755, 0.20751968026161194, 0.012997383251786232, 0.004634191282093525, 0.004486567340791225, 0.10301963984966278, 0.2361651211977005, 0.10510270297527313, 0.007245894055813551, 0.02498149685561657, 0.005201807711273432, 0.12586773931980133, 0.2985144853591919, 0.741521954536438, 0.061252206563949585], [0.3654796779155731, 0.656768798828125, 0.02389511466026306, 0.057929087430238724, 0.025417884811758995, 0.2985052168369293, 0.29244741797447205, 0.15614598989486694, 0.02199239283800125, 0.027919312939047813, 0.024499662220478058, 0.0015409317566081882, 0.18344998359680176, 0.05587974563241005, 0.11099682748317719], [0.24996283650398254, 0.30432745814323425, 0.08651068061590195, 0.27794384956359863, 0.10948572307825089, 0.32318809628486633, 0.40224379301071167, 0.24700750410556793, 0.016620514914393425, 0.03902489319443703, 0.01563531532883644, 0.008603462018072605, 0.029363060370087624, 0.20380347967147827, 0.1635625809431076], [0.08184575289487839, 0.05559774115681648, 0.012900986708700657, 0.004766350146383047, 0.02465618960559368, 0.0658264234662056, 0.16982027888298035, 0.09995799511671066, 0.1946410834789276, 0.03345171734690666, 0.026332948356866837, 0.010880211368203163, 0.01684177853167057, 0.011932285502552986, 0.13059602677822113]], [[0.06378140300512314, 0.013955923728644848, 0.058693334460258484, 0.014864355325698853, 0.02882157638669014, 0.02533077634871006, 0.013877282850444317, 0.02919653430581093, 0.029733512550592422, 0.010929838754236698, 0.2184230536222458, 0.404588907957077, 0.5044611692428589, 0.4171900451183319, 0.18600669503211975], [0.09787620604038239, 0.3741878271102905, 0.1718531847000122, 0.22170154750347137, 0.11211875081062317, 0.06884550303220749, 0.023903023451566696, 0.00765330670401454, 0.043831951916217804, 0.04742401838302612, 0.08705892413854599, 0.19904442131519318, 0.1439688503742218, 0.08975595235824585, 0.124632827937603], [0.024405136704444885, 0.006321595516055822, 0.03571266308426857, 0.0050111510790884495, 0.01807553507387638, 6.11300565651618e-05, 0.0022184934932738543, 0.002461126074194908, 0.00987271312624216, 0.03944821655750275, 0.02587837167084217, 0.009154303930699825, 0.018459370359778404, 0.07083768397569656, 0.2838045060634613], [0.02829434722661972, 0.05303699150681496, 0.03342747688293457, 0.026768406853079796, 0.06776657700538635, 0.0015663451049476862, 0.0066550131887197495, 0.028257621452212334, 0.02201445959508419, 0.024995435029268265, 0.014314326457679272, 0.019762825220823288, 0.019060753285884857, 0.09995586425065994, 0.2721303105354309], [0.011709636077284813, 0.13082386553287506, 0.3091292977333069, 0.012390679679811, 0.06598176062107086, 0.0025066242087632418, 0.008877930231392384, 0.03396160528063774, 0.01681593246757984, 0.01466491911560297, 0.12272557616233826, 0.010357965715229511, 0.009066522121429443, 0.12291242927312851, 0.3062548041343689], [0.05738264322280884, 0.12342102825641632, 0.7862259149551392, 0.20355252921581268, 0.007363088894635439, 0.0717976987361908, 0.032159313559532166, 0.018495721742510796, 0.0034321516286581755, 0.0013732254737988114, 0.006710591726005077, 0.0023603499867022038, 0.007563347462564707, 0.05948156490921974, 0.12037239223718643], [0.015277753584086895, 0.006394209805876017, 0.6686000227928162, 0.29117655754089355, 0.06745831668376923, 0.2462725043296814, 0.06154515966773033, 0.015117062255740166, 0.004134421236813068, 0.0023558081593364477, 0.08952713012695312, 0.04650713875889778, 0.023702487349510193, 0.01321239210665226, 0.09701406955718994], [0.028385812416672707, 0.012191490270197392, 0.27066752314567566, 0.18411272764205933, 0.040896836668252945, 0.48173367977142334, 0.02650352008640766, 0.07071101665496826, 0.007758310064673424, 0.001958101289346814, 0.01839292421936989, 0.023066602647304535, 0.03435399383306503, 0.03657263144850731, 0.029525745660066605], [0.04876675456762314, 0.422792911529541, 0.22041767835617065, 0.2559551000595093, 0.08884847164154053, 0.01230597123503685, 0.025672338902950287, 0.003895203350111842, 0.022659877315163612, 0.0043840305879712105, 0.007982935756444931, 0.010924039408564568, 0.06971067935228348, 0.0061518345028162, 0.21563398838043213], [0.015657104551792145, 0.02366352081298828, 0.07373688369989395, 0.10379613190889359, 0.013535204343497753, 0.07323776930570602, 0.048540983349084854, 0.008235346525907516, 0.01638718694448471, 0.012322558090090752, 0.073370561003685, 0.03809332847595215, 0.021602218970656395, 0.003090204205363989, 0.23272792994976044], [0.018198516219854355, 0.011175387538969517, 0.02189311571419239, 0.012938260100781918, 0.09454065561294556, 0.010837653651833534, 0.04214898869395256, 0.03231353685259819, 0.2788335978984833, 0.02807164192199707, 0.0381515808403492, 0.013884211890399456, 0.014051362872123718, 0.00934662390500307, 0.24102351069450378], [0.01114112138748169, 0.11382883787155151, 0.017900465056300163, 0.008639826439321041, 0.024639632552862167, 0.020821422338485718, 0.022935912013053894, 0.04321465268731117, 0.055257730185985565, 0.0561254657804966, 0.006350866984575987, 0.034159135073423386, 0.001170721254311502, 0.00040716465446166694, 0.2438717484474182], [0.01806582696735859, 0.014762195758521557, 0.02654433250427246, 0.025726040825247765, 0.03240499645471573, 0.020733002573251724, 0.04244884103536606, 0.02047092467546463, 0.13412125408649445, 0.512605607509613, 0.5156171321868896, 0.023306455463171005, 0.0489252470433712, 0.06594526767730713, 0.173824280500412], [0.018763704225420952, 0.010509289801120758, 0.06387435644865036, 0.02487548068165779, 0.10975509881973267, 0.01984621025621891, 0.06460897624492645, 0.03137337416410446, 0.1802622228860855, 0.7354047894477844, 0.7864400148391724, 0.1003832221031189, 0.007522855885326862, 0.14785504341125488, 0.08187610656023026], [0.02117479033768177, 0.061044495552778244, 0.02157888375222683, 0.021421663463115692, 0.04618487507104874, 0.05167240649461746, 0.01054168026894331, 0.009977741166949272, 0.0295058935880661, 0.008349624462425709, 0.02268156036734581, 0.026699911803007126, 0.020697196945548058, 0.013632250018417835, 0.13365623354911804]], [[4.754594192490913e-05, 2.1380438752771624e-08, 2.918067565360616e-08, 2.8621201408896013e-08, 2.499384379461844e-07, 0.0002631827082950622, 5.21495513439163e-10, 2.490414274802788e-08, 1.4592379216082918e-07, 4.660217989282955e-09, 1.3478041793746343e-08, 1.530838318331007e-07, 4.6195887989597395e-05, 8.429636181972455e-06, 0.2157532423734665], [0.6645432114601135, 0.00044607618474401534, 8.70102576300269e-06, 1.056492124007491e-06, 4.43653931370136e-07, 3.5252294310339494e-06, 0.013106754049658775, 0.0008970960625447333, 5.719662112824153e-07, 3.2791810156140855e-08, 1.0544068729245737e-08, 3.57371057191358e-08, 0.00012361648259684443, 0.0008665899513289332, 0.00011794524471042678], [5.6636022236489225e-06, 0.771808385848999, 0.2603715658187866, 7.618767995154485e-05, 2.6443340175319463e-05, 1.448297037853763e-08, 1.7459943213449236e-10, 0.0005545829189941287, 1.3129211993145873e-06, 0.0003596498572733253, 1.3187416243454209e-06, 1.2532552773336647e-08, 5.7067543821176514e-05, 1.4676837054139469e-05, 8.822963764032465e-07], [7.866851170490463e-09, 0.0015575109282508492, 0.5911858677864075, 0.005255529191344976, 0.00012560673349071294, 1.2381517144888221e-08, 1.3975322635251253e-12, 4.631081083061872e-06, 1.8297629367225454e-06, 0.043241821229457855, 0.00025465109501965344, 1.6550380621538352e-07, 1.5873881693551084e-06, 1.3629888329091955e-08, 2.2046858560997862e-08], [1.6020940130090366e-10, 3.2446525892737554e-06, 0.1964423805475235, 0.9067507982254028, 4.244087540428154e-05, 3.027215825568419e-05, 6.154020626425449e-10, 3.570748958736658e-07, 2.493328743469192e-08, 1.327106815551815e-07, 5.116170723340474e-05, 7.67620722541551e-09, 6.538175512105227e-07, 1.6885725528936746e-07, 1.9495971503857845e-09], [4.057985947270026e-09, 1.6926858803500977e-09, 0.00014235911658033729, 0.0026504932902753353, 0.8634750843048096, 1.9555229300749488e-05, 1.294085109293519e-06, 2.6649362894204387e-07, 3.0507638082433175e-10, 5.069419550807197e-09, 1.108148239836737e-07, 1.7377595213474706e-05, 9.726352800498717e-06, 1.823265733946755e-06, 5.869507617717318e-07], [1.9094309466893833e-12, 2.4682887027685507e-13, 6.382604444965523e-10, 6.302604549368596e-10, 1.4692274817207363e-05, 0.3734012544155121, 3.483030241113738e-06, 1.1820202594492457e-08, 1.9522692351614523e-09, 1.394072303342181e-13, 1.7670450172535546e-11, 1.716609077107023e-09, 3.7749509829154704e-06, 2.593782255644328e-06, 3.855710133393586e-07], [8.508453674949124e-08, 1.863478038544031e-09, 1.257351167627263e-10, 5.331373190142763e-11, 3.337832410466035e-08, 1.777973557182122e-05, 0.8244234323501587, 8.755041926633567e-05, 1.7572835409040977e-09, 1.3142270258170718e-11, 7.735358035533546e-13, 4.927841815161038e-11, 5.296478775562719e-07, 0.000259329448454082, 1.8429471282388477e-08], [1.2582735964272729e-09, 2.3675827378610848e-06, 5.770066309196409e-07, 5.0431950282536775e-11, 2.6034334410507398e-11, 1.7287857190240175e-07, 9.084228622668888e-06, 0.8877476453781128, 0.0008898449596017599, 7.2106473680833e-08, 1.9634756043274137e-08, 4.930736808433922e-13, 3.217972377456135e-08, 1.2906410120194778e-05, 9.568290160189008e-09], [2.8039692789860737e-09, 1.3000158105569426e-06, 4.493769978353157e-08, 2.493898698663344e-10, 7.932443764346875e-12, 1.7288407150317653e-08, 2.642636942606913e-10, 3.576151357265189e-05, 0.8324669599533081, 5.240505197434686e-05, 8.11301958947297e-07, 9.422521651814009e-10, 4.6924657937097436e-08, 2.8963553333483105e-08, 6.33739318800508e-08], [2.873091320410026e-09, 7.32139524188824e-05, 1.393846559949452e-05, 2.2707215663331226e-08, 3.602095333121724e-08, 7.893682235637911e-12, 1.2799745258921386e-13, 1.2971109697446082e-07, 4.534097752184607e-05, 0.7187873721122742, 0.0028858170844614506, 4.860597982769832e-06, 3.316463335067965e-06, 6.64895694058032e-08, 4.189383506769673e-09], [3.5802516507033033e-10, 3.3775189312024168e-09, 1.689890041234321e-06, 2.72409181434341e-07, 2.3650377656281307e-08, 3.1582386705863996e-10, 4.773196676235644e-14, 6.179980832632381e-11, 1.0790042637154329e-07, 0.00019566719129215926, 0.8666706681251526, 0.00033315850305370986, 7.101260734998505e-07, 3.226231015673875e-08, 6.780910499770698e-09], [7.800644574729176e-09, 1.700809604265885e-09, 9.215954577257435e-08, 4.046364665555302e-07, 0.00011374137102393433, 5.132134901941754e-06, 5.991689921991394e-10, 9.107053305923429e-11, 5.105777606262407e-11, 3.3974476565390432e-09, 3.904122058884241e-05, 0.65162193775177, 0.00035754009149968624, 6.446759653044865e-05, 8.575011065659055e-07], [5.410449865905775e-10, 1.9016622998524468e-10, 1.651180719930423e-10, 9.184660809680167e-10, 4.749936000081334e-09, 6.8993631430203095e-06, 9.186856830822876e-10, 1.2120262259107673e-11, 1.0679299241797557e-12, 7.136916383397585e-13, 1.9098522763272285e-10, 9.612936082703527e-06, 0.7662882208824158, 0.00778515450656414, 3.0943773765557125e-08], [0.0058370670303702354, 0.00017831011791713536, 6.727457275701454e-06, 4.542615897662472e-06, 0.0008248149533756077, 0.04996809363365173, 0.010534689761698246, 8.931134652812034e-05, 2.4081384708551923e-07, 6.080232139993313e-08, 3.077615701840841e-06, 0.00041306819184683263, 0.062034472823143005, 0.37576472759246826, 0.1323644071817398]], [[0.278582364320755, 0.012074317783117294, 0.4035726487636566, 0.05818924307823181, 0.5308449864387512, 0.7759386301040649, 0.6032847166061401, 0.04120228812098503, 0.6623223423957825, 0.4034832715988159, 0.2541539669036865, 0.023309720680117607, 0.054716046899557114, 0.3570294678211212, 0.004749305546283722], [0.03977029398083687, 0.025161603465676308, 0.4579423666000366, 0.3708552420139313, 0.767479419708252, 0.5835962295532227, 0.5609359741210938, 0.14304085075855255, 0.8166816234588623, 0.848468542098999, 0.5771627426147461, 0.07112090289592743, 0.12416274100542068, 0.618628740310669, 0.06885465234518051], [0.004083612468093634, 0.0006101519684307277, 0.12011494487524033, 0.04229450225830078, 0.17203551530838013, 0.013333754613995552, 0.01874622330069542, 0.021773431450128555, 0.8914079666137695, 0.25239333510398865, 0.2674473226070404, 0.0986163467168808, 0.10968483239412308, 0.05420238524675369, 0.020816486328840256], [0.00974054355174303, 0.009372939355671406, 0.016473596915602684, 0.12944141030311584, 0.06805374473333359, 0.019993484020233154, 0.038472987711429596, 0.21791628003120422, 0.8550615310668945, 0.2646826505661011, 0.7350810766220093, 0.17277619242668152, 0.36265626549720764, 0.3741258382797241, 0.06228891760110855], [0.0007183643756434321, 0.0016902177594602108, 0.0015671673463657498, 0.000663107552099973, 0.015286565758287907, 0.000776923552621156, 0.007700319401919842, 0.11482121050357819, 0.7658083438873291, 0.5443719625473022, 0.22170989215373993, 0.027013972401618958, 0.025342080742120743, 0.049981117248535156, 0.0074298488907516], [0.011776593513786793, 0.00668947771191597, 0.05204532667994499, 0.026732588186860085, 0.007738037500530481, 0.19347773492336273, 0.08661007881164551, 0.02065080776810646, 0.8265263438224792, 0.77967369556427, 0.8155033588409424, 0.7568296194076538, 0.6889008283615112, 0.7797287106513977, 0.04647013917565346], [0.03701920434832573, 0.011276619508862495, 0.026248518377542496, 0.01771446317434311, 0.046063318848609924, 0.020064320415258408, 0.23005641996860504, 0.032302577048540115, 0.6365551948547363, 0.6746889352798462, 0.6497765183448792, 0.5260909199714661, 0.6955898404121399, 0.8770567178726196, 0.04424796253442764], [0.3583561182022095, 0.034818924963474274, 0.1010005921125412, 0.08171684294939041, 0.0902533084154129, 0.0273053590208292, 0.029195906594395638, 0.10516665875911713, 0.5163984894752502, 0.7107389569282532, 0.5390304327011108, 0.6552954316139221, 0.648922324180603, 0.8148984909057617, 0.13771982491016388], [0.04790134355425835, 0.016352321952581406, 0.004838719964027405, 0.039540428668260574, 0.004614146891981363, 0.10033231228590012, 0.05411757901310921, 0.012187371961772442, 0.25466611981391907, 0.4822390675544739, 0.22996564209461212, 0.2013523131608963, 0.3018202781677246, 0.325538694858551, 0.10763657093048096], [0.18817435204982758, 0.007200991734862328, 0.0915139690041542, 0.00800582580268383, 0.007660675328224897, 0.27090781927108765, 0.08786749839782715, 0.014442713931202888, 0.017244037240743637, 0.8212726712226868, 0.22018176317214966, 0.05063365772366524, 0.16457810997962952, 0.059498634189367294, 0.11578860878944397], [0.1423795521259308, 0.008703344501554966, 0.2208349108695984, 0.02527845837175846, 0.027401143684983253, 0.09980836510658264, 0.024800043553113937, 0.009310302324593067, 0.11915526539087296, 0.048824433237314224, 0.23738479614257812, 0.04641610383987427, 0.11649724096059799, 0.03864651918411255, 0.200869619846344], [0.19247660040855408, 0.028833042830228806, 0.1872357279062271, 0.03232081979513168, 0.031028537079691887, 0.3644941747188568, 0.11239293217658997, 0.0803447812795639, 0.13423573970794678, 0.07468846440315247, 0.009079186245799065, 0.19545331597328186, 0.09625646471977234, 0.07526607811450958, 0.1802312582731247], [0.1263553649187088, 0.009648445062339306, 0.47829046845436096, 0.22347994148731232, 0.2749265432357788, 0.23197446763515472, 0.05249631777405739, 0.01617230661213398, 0.3326357305049896, 0.1497221142053604, 0.04782721772789955, 0.011572148650884628, 0.1354474574327469, 0.0791783407330513, 0.15636207163333893], [0.166306734085083, 0.04561271890997887, 0.48400574922561646, 0.31743937730789185, 0.4171416163444519, 0.1806352734565735, 0.04328177124261856, 0.022486848756670952, 0.1779668778181076, 0.03957689553499222, 0.009708160534501076, 0.01422630064189434, 0.013467496261000633, 0.06257133930921555, 0.22838094830513], [0.39438390731811523, 0.20185884833335876, 0.19486168026924133, 0.053202297538518906, 0.29429352283477783, 0.31667405366897583, 0.3313867747783661, 0.37864530086517334, 0.4971301257610321, 0.178373321890831, 0.16689708828926086, 0.16029801964759827, 0.22925321757793427, 0.22496484220027924, 0.11296840012073517]], [[0.12737327814102173, 0.10940374433994293, 0.05123003572225571, 0.7807462215423584, 0.0676276683807373, 0.02884089946746826, 0.05574861168861389, 0.5975708961486816, 0.07044392824172974, 0.5009010434150696, 0.31273892521858215, 0.07660850137472153, 0.29424503445625305, 0.028401609510183334, 0.07683643698692322], [0.03750006482005119, 0.429240882396698, 0.15060469508171082, 0.2604650557041168, 0.037177786231040955, 0.1944778561592102, 0.07849539071321487, 0.6716934442520142, 0.06105323135852814, 0.07711976766586304, 0.20997941493988037, 0.028168758377432823, 0.12550987303256989, 0.030995607376098633, 0.0958443135023117], [0.15516091883182526, 0.07278051972389221, 0.11765316128730774, 0.7884857058525085, 0.11075033247470856, 0.051856692880392075, 0.18673725426197052, 0.2268398553133011, 0.013722711242735386, 0.6478350162506104, 0.5306386947631836, 0.3090885877609253, 0.22243055701255798, 0.16200464963912964, 0.13070979714393616], [0.21811531484127045, 0.7140333652496338, 0.018219277262687683, 0.764274001121521, 0.15804116427898407, 0.03280843421816826, 0.11008237302303314, 0.09874711185693741, 0.0423860140144825, 0.5652360320091248, 0.14938808977603912, 0.2869919240474701, 0.39966318011283875, 0.1259765923023224, 0.0577625073492527], [0.11744663864374161, 0.1893559694290161, 0.05823011323809624, 0.03701714053750038, 0.15626470744609833, 0.08588159829378128, 0.26269999146461487, 0.41053518652915955, 0.007210245821624994, 0.3749772906303406, 0.4537068009376526, 0.6417111158370972, 0.1666039228439331, 0.13084180653095245, 0.14052902162075043], [0.3613002598285675, 0.240200012922287, 0.044567547738552094, 0.04614294692873955, 0.0021214759908616543, 0.17616558074951172, 0.11286458373069763, 0.11203286051750183, 0.009014172479510307, 0.10163455456495285, 0.0949772298336029, 0.06209810823202133, 0.11910365521907806, 0.04125094786286354, 0.1871420443058014], [0.2914785146713257, 0.381010502576828, 0.08399549126625061, 0.4511452913284302, 0.048780620098114014, 0.008560722693800926, 0.1541443020105362, 0.12101723253726959, 0.02183164842426777, 0.18665823340415955, 0.13169258832931519, 0.13539372384548187, 0.14286382496356964, 0.031125182285904884, 0.2064482420682907], [0.3084108829498291, 0.4568510055541992, 0.068343386054039, 0.40243175625801086, 0.04035715013742447, 0.028490515425801277, 0.006473515648394823, 0.6036491990089417, 0.14769236743450165, 0.09462843090295792, 0.04651549458503723, 0.08334364742040634, 0.08459941297769547, 0.022403797134757042, 0.13448290526866913], [0.4981050491333008, 0.13424238562583923, 0.16773013770580292, 0.5160816311836243, 0.029790958389639854, 0.22989192605018616, 0.568993866443634, 0.056374672800302505, 0.08792523294687271, 0.2900378406047821, 0.12431738525629044, 0.017185388132929802, 0.05061684548854828, 0.020683959126472473, 0.13275840878486633], [0.33482691645622253, 0.4720645546913147, 0.20652346312999725, 0.6004944443702698, 0.1402488797903061, 0.13250590860843658, 0.13873517513275146, 0.5260767936706543, 0.01182119082659483, 0.1017654612660408, 0.047682080417871475, 0.04534589499235153, 0.10121697187423706, 0.0026118881069123745, 0.13006491959095], [0.27261805534362793, 0.5674196481704712, 0.08154824376106262, 0.8736060261726379, 0.4724165201187134, 0.1720387041568756, 0.13692085444927216, 0.40960294008255005, 0.06138879805803299, 0.0898643285036087, 0.15986473858356476, 0.04882661625742912, 0.09858791530132294, 0.005254920106381178, 0.09166211634874344], [0.33052578568458557, 0.40956470370292664, 0.44244009256362915, 0.8809638619422913, 0.26719745993614197, 0.38818857073783875, 0.40750059485435486, 0.4857279658317566, 0.04656125605106354, 0.08998580276966095, 0.02227160707116127, 0.42457664012908936, 0.06242617964744568, 0.019552020356059074, 0.08343644440174103], [0.20678018033504486, 0.17620769143104553, 0.3081345558166504, 0.6112105250358582, 0.534289538860321, 0.19626931846141815, 0.17160479724407196, 0.4079393148422241, 0.027630727738142014, 0.07990976423025131, 0.0661839172244072, 0.022294294089078903, 0.11108729988336563, 0.024492109194397926, 0.12739884853363037], [0.2302674651145935, 0.4147239625453949, 0.3118293881416321, 0.3454154133796692, 0.20178626477718353, 0.3381562829017639, 0.1571493148803711, 0.4487079083919525, 0.02096635475754738, 0.11857040971517563, 0.09038619697093964, 0.01401298213750124, 0.06377796083688736, 0.029106009751558304, 0.10548537224531174], [0.0850413590669632, 0.2905830442905426, 0.047175440937280655, 0.009145522490143776, 0.014412813819944859, 0.03387918695807457, 0.04852135106921196, 0.2856408655643463, 0.03688584640622139, 0.02503933012485504, 0.030300520360469818, 0.020876996219158173, 0.004409631714224815, 0.0025441893376410007, 0.1292814165353775]]], [[[0.00039591442327946424, 4.3682277464540675e-05, 1.7448855942348018e-05, 4.859234650211874e-06, 1.1413659422032651e-06, 1.0625568393152207e-05, 1.9137923246148603e-08, 5.615326585939329e-07, 5.487099315359956e-06, 2.1910665282121045e-07, 2.532970881929941e-07, 7.501878940274764e-07, 1.657212578720646e-06, 1.0862070212169783e-06, 0.18717002868652344], [0.6005652546882629, 0.09179380536079407, 0.017407523468136787, 0.009556752629578114, 0.001977206440642476, 0.02417689561843872, 0.001285116421058774, 0.0015866898465901613, 0.0007265046588145196, 0.0008927723974920809, 0.008914382196962833, 0.0016361800953745842, 0.1313493698835373, 0.006872364319860935, 0.052507203072309494], [0.00456381356343627, 0.8302816152572632, 0.11558636277914047, 0.010320104658603668, 0.00024428890901617706, 9.749805758474395e-05, 7.678471774852369e-06, 0.0030259541235864162, 3.9539358112961054e-05, 7.781033491482958e-05, 0.0003711417084559798, 9.1652873379644e-06, 0.0006458949064835906, 0.00023330377007368952, 0.00865631178021431], [0.0011992683866992593, 0.008629350923001766, 0.6251504421234131, 0.015135818161070347, 0.001978840446099639, 0.000745285302400589, 5.708653407054953e-05, 0.00043479635496623814, 0.0005481417756527662, 0.0016355890547856688, 0.0002436988870613277, 5.164237336430233e-06, 4.976044510840438e-05, 3.400173591217026e-05, 0.00024351823958568275], [0.006698334589600563, 0.006304558366537094, 0.34660738706588745, 0.7217360138893127, 0.06864907592535019, 0.0027605369687080383, 0.0006927561480551958, 0.00010832686530193314, 0.0002978279662784189, 0.007849807851016521, 0.0023863124661147594, 8.873132173903286e-06, 2.0952818886144087e-05, 4.62439584225649e-06, 0.000559441396035254], [0.0006861803703941405, 0.036174044013023376, 0.4128260612487793, 0.09897080808877945, 0.6376775503158569, 0.19431157410144806, 0.0007082957308739424, 0.05852581560611725, 0.0003548018867149949, 0.00026609119959175587, 0.0006576658925041556, 0.0007862210040912032, 0.027955245226621628, 0.006076914723962545, 0.0010327105410397053], [1.7293352305713938e-09, 1.4693102912133327e-06, 3.0192679332685657e-05, 1.0152590220968705e-05, 0.005660888738930225, 0.5108420252799988, 0.0005426039570011199, 0.0008102089632302523, 3.168102921335958e-06, 6.12798771726375e-08, 2.5310575324510864e-07, 5.088519174023531e-06, 0.00021843344438821077, 2.5946601454052143e-06, 2.594279294498847e-06], [7.755387923680246e-05, 3.5259185096947476e-05, 0.0012139425380155444, 0.00035162578569725156, 0.00505053298547864, 0.4696201980113983, 0.5859625339508057, 0.009771172888576984, 0.0005853781476616859, 3.0261137453635456e-06, 1.2206013707327656e-05, 2.2465645088232122e-05, 0.013555033132433891, 0.0011026648571714759, 7.656160596525297e-05], [3.390625025190275e-08, 5.7732322602532804e-05, 3.19563605444273e-06, 2.0829493507790175e-07, 5.039521965954918e-06, 0.00017657184798736125, 0.000729007413610816, 0.8331114649772644, 0.0037640428636223078, 1.5948112377373036e-06, 5.8014775277115405e-06, 4.528372699041938e-07, 0.00020723954366985708, 0.00025866259238682687, 1.95706252270611e-06], [2.7739795882553153e-07, 2.501485141692683e-05, 4.778147285833256e-06, 3.7190903867667657e-07, 9.610201523457818e-09, 1.1292572708043735e-06, 1.2355405942798825e-07, 3.984562499681488e-05, 0.6202287077903748, 0.0002610959345474839, 0.00017016819037962705, 9.242457963409834e-07, 2.799387630147976e-06, 3.2760857493485673e-07, 1.038134087139042e-06], [1.2775580216839444e-05, 0.0010497755138203502, 6.564326031366363e-05, 4.172011358605232e-06, 4.676745959386608e-07, 3.6489967669695034e-07, 8.09820832614605e-08, 5.78842673348845e-06, 0.0015375507064163685, 0.7445451617240906, 0.026254041120409966, 8.213486580643803e-05, 1.1159563655382954e-05, 3.0355058697750792e-05, 2.6809220798895694e-06], [1.3068409316474572e-05, 0.00010775982809718698, 0.00024633039720356464, 3.3576598070794716e-05, 4.556980275083333e-05, 1.0597023702985098e-07, 9.86238859468358e-08, 2.1072135041322326e-06, 0.0013669389300048351, 0.5916010141372681, 0.4436832368373871, 0.0013138806680217385, 4.73510908705066e-06, 6.116700660641072e-06, 2.961193558803643e-06], [4.950460061081685e-05, 0.0011237917933613062, 0.017257435247302055, 0.0011414129985496402, 0.025087760761380196, 0.00036485170130617917, 3.213326635886915e-05, 5.293267349770758e-06, 4.4593522034119815e-05, 0.001686945091933012, 0.00823597889393568, 0.8047888278961182, 0.014818375930190086, 0.006413417402654886, 2.281446177221369e-05], [0.000998240546323359, 0.1768636256456375, 0.0663335844874382, 0.02716292440891266, 0.03197554498910904, 0.001621886040084064, 0.00012482069723773748, 7.020989141892642e-05, 0.08078382909297943, 0.1701173484325409, 0.08303841948509216, 0.5506232380867004, 0.06293172389268875, 0.03332124650478363, 0.0033543158788233995], [0.021357281133532524, 0.0013016555458307266, 0.00422634556889534, 0.00104909623041749, 0.012563652358949184, 0.07401228696107864, 0.007866809144616127, 0.0024991247337311506, 0.0011657974682748318, 5.4276370065053925e-06, 0.0024851916823536158, 0.0298884529620409, 0.4522511959075928, 0.2182934284210205, 0.14462554454803467]], [[0.03249572962522507, 0.01680905371904373, 0.01368993055075407, 0.005182549823075533, 0.0014828554121777415, 0.0045396420173347, 0.0006250899168662727, 0.01684878207743168, 0.005824672989547253, 0.007428525947034359, 0.009805276058614254, 0.003550198394805193, 0.007900950498878956, 0.009690256789326668, 0.18011362850666046], [0.11159665137529373, 0.10346578061580658, 0.414338618516922, 0.08694489300251007, 0.2136271595954895, 0.10264819115400314, 0.023593097925186157, 0.0335584320127964, 0.0575689822435379, 0.06024341657757759, 0.1307218372821808, 0.13801440596580505, 0.1756829470396042, 0.14866231381893158, 0.1320090889930725], [0.1948547214269638, 0.038279034197330475, 0.07790879160165787, 0.04177340865135193, 0.004589961376041174, 0.0009778933599591255, 0.002051346004009247, 0.006739486940205097, 0.009280361235141754, 0.0007642557029612362, 0.0012637393083423376, 0.00433916924521327, 0.00236115837469697, 0.008354227058589458, 0.2381056696176529], [0.07799407094717026, 0.10201291739940643, 0.037178199738264084, 0.03369736298918724, 0.035083431750535965, 0.003606606973335147, 0.0009816481033340096, 0.010917055420577526, 0.019562464207410812, 0.004011118784546852, 0.0029224867466837168, 0.0011325542582198977, 0.00486336974427104, 0.007979645393788815, 0.2784355580806732], [0.11467810720205307, 0.4025481641292572, 0.4041208028793335, 0.13489782810211182, 0.520052433013916, 0.013409112580120564, 0.0056337821297347546, 0.04408307746052742, 0.06485209614038467, 0.0023049998562783003, 0.0050890627317130566, 0.004091872368007898, 0.006159461103379726, 0.0242836382240057, 0.07189745455980301], [0.1516697108745575, 0.2241159826517105, 0.5074643492698669, 0.3874017000198364, 0.2519407868385315, 0.032381314784288406, 0.015091626904904842, 0.006451433524489403, 0.09749187529087067, 0.007731522433459759, 0.00912014115601778, 0.029297562316060066, 0.05765664204955101, 0.059585090726614, 0.023513801395893097], [0.01171550527215004, 0.10137046873569489, 0.870269238948822, 0.5154522657394409, 0.6626715660095215, 0.08923148363828659, 0.047533176839351654, 0.015608957968652248, 0.11948943883180618, 0.008091520518064499, 0.008133050054311752, 0.012773845344781876, 0.051611315459012985, 0.01502595841884613, 0.00961183663457632], [0.01722140610218048, 0.036506716161966324, 0.7147647738456726, 0.20675897598266602, 0.8291797637939453, 0.31030455231666565, 0.11803850531578064, 0.03327609598636627, 0.4245462417602539, 0.013293992727994919, 0.008976193144917488, 0.054750751703977585, 0.1754072904586792, 0.04528210312128067, 0.012820743955671787], [0.01982569508254528, 0.15988187491893768, 0.12975367903709412, 0.1326102912425995, 0.6299260258674622, 0.28946900367736816, 0.34108322858810425, 0.11804011464118958, 0.16752222180366516, 0.01777276024222374, 0.0021109972149133682, 0.0006076672580093145, 0.0030632279813289642, 0.00126487051602453, 0.1333881914615631], [0.005461913999170065, 0.03046412020921707, 0.008993657305836678, 0.005659051705151796, 0.004244270734488964, 0.02773391455411911, 0.042834386229515076, 0.13534432649612427, 0.27069228887557983, 0.04962563514709473, 0.015227400697767735, 0.0016283531440421939, 0.0014969720505177975, 0.0027089377399533987, 0.17130999267101288], [0.01672529987990856, 0.10339350253343582, 0.009749630466103554, 0.02030925825238228, 0.017326004803180695, 0.03957638517022133, 0.030999623239040375, 0.10308665037155151, 0.5008098483085632, 0.09767498821020126, 0.09780175238847733, 0.025981366634368896, 0.003117683343589306, 0.00962040200829506, 0.1932818591594696], [0.026731140911579132, 0.05838552862405777, 0.07611822336912155, 0.05796685442328453, 0.5904980301856995, 0.010755263268947601, 0.0517524816095829, 0.055663660168647766, 0.29654714465141296, 0.1307908594608307, 0.1585402488708496, 0.03976760059595108, 0.07525579631328583, 0.16488958895206451, 0.1035238653421402], [0.024593327194452286, 0.12932555377483368, 0.13568159937858582, 0.16021546721458435, 0.3227141201496124, 0.029398979619145393, 0.01611196994781494, 0.016819216310977936, 0.2378186136484146, 0.5602607131004333, 0.7615779638290405, 0.08417549729347229, 0.10783103108406067, 0.2013072967529297, 0.06744378060102463], [0.018169090151786804, 0.26050350069999695, 0.078061044216156, 0.023439347743988037, 0.05254700779914856, 0.0014709478709846735, 0.002907117595896125, 0.009980114176869392, 0.1381266713142395, 0.5626046061515808, 0.5405392646789551, 0.11909772455692291, 0.008021530695259571, 0.06359856575727463, 0.009888176806271076], [0.08646434545516968, 0.009946366772055626, 0.041608210653066635, 0.009163393639028072, 0.12723588943481445, 0.17822976410388947, 0.01437843032181263, 0.0057503837160766125, 0.008486853912472725, 0.002935740165412426, 0.019836073741316795, 0.07525425404310226, 0.02854214422404766, 0.0230310820043087, 0.1518138200044632]], [[0.7472922801971436, 0.06644202023744583, 0.12477048486471176, 0.07691145688295364, 0.17426471412181854, 0.17453429102897644, 0.8713244795799255, 0.22852616012096405, 0.7413471937179565, 0.5253387689590454, 0.16250024735927582, 0.19445888698101044, 0.10716042667627335, 0.2310180366039276, 0.05536508187651634], [0.13811203837394714, 0.40626850724220276, 0.2430061399936676, 0.22277961671352386, 0.18414726853370667, 0.21574343740940094, 0.8225958943367004, 0.5822084546089172, 0.41659367084503174, 0.35776287317276, 0.4909748136997223, 0.39181941747665405, 0.34554892778396606, 0.6003718972206116, 0.043436333537101746], [0.03130434453487396, 0.0024298657663166523, 0.43690061569213867, 0.5043830275535583, 0.07530603557825089, 0.015139158815145493, 0.03498073294758797, 0.012510559521615505, 0.6034607291221619, 0.7801509499549866, 0.8402397036552429, 0.5008089542388916, 0.17657218873500824, 0.11879491806030273, 0.05205746740102768], [0.09661327302455902, 0.049034956842660904, 0.05331439897418022, 0.7222777009010315, 0.25703296065330505, 0.020087046548724174, 0.06235986202955246, 0.0651831179857254, 0.32113927602767944, 0.5460676550865173, 0.7442458271980286, 0.5571728348731995, 0.08091285824775696, 0.059992171823978424, 0.029936296865344048], [0.00972762517631054, 0.007879518903791904, 0.02767527848482132, 0.019306808710098267, 0.22303025424480438, 0.007516835816204548, 0.007440114859491587, 0.022099999710917473, 0.29848337173461914, 0.9075287580490112, 0.5192471742630005, 0.8959035873413086, 0.055479276925325394, 0.04288056865334511, 0.021558567881584167], [0.03836950287222862, 0.05839527025818825, 0.005887853913009167, 0.08494037389755249, 0.012977076694369316, 0.5726994872093201, 0.09935679286718369, 0.13719113171100616, 0.448569655418396, 0.5218547582626343, 0.13800226151943207, 0.1732572466135025, 0.4354798197746277, 0.4542965292930603, 0.12337890267372131], [0.17566490173339844, 0.03925755247473717, 0.01956782303750515, 0.04187121242284775, 0.02149910107254982, 0.049183186143636703, 0.5663522481918335, 0.045388396829366684, 0.45039302110671997, 0.19015204906463623, 0.22913624346256256, 0.10953018814325333, 0.21400360763072968, 0.572381854057312, 0.1667298972606659], [0.2136794924736023, 0.20810233056545258, 0.08830246329307556, 0.27903637290000916, 0.02317022904753685, 0.10591837763786316, 0.15087167918682098, 0.5299598574638367, 0.3452024757862091, 0.15965056419372559, 0.2765912711620331, 0.516273021697998, 0.2846863567829132, 0.3888777792453766, 0.0719258189201355], [0.07398565858602524, 0.04620325192809105, 0.3374384939670563, 0.19415578246116638, 0.025615269318223, 0.010194968432188034, 0.018451105803251266, 0.0005573831731453538, 0.5073301196098328, 0.25312942266464233, 0.15244188904762268, 0.143111914396286, 0.051979612559080124, 0.04884689673781395, 0.12363318353891373], [0.5805832147598267, 0.09438126534223557, 0.24455930292606354, 0.06023820489645004, 0.03943831846117973, 0.021930387243628502, 0.026398053392767906, 0.012488989159464836, 0.011794325895607471, 0.767930269241333, 0.4412824809551239, 0.07896611094474792, 0.01228941697627306, 0.018458310514688492, 0.10866446793079376], [0.1145540103316307, 0.05171298235654831, 0.7072227597236633, 0.4839639961719513, 0.11294537037611008, 0.06211492419242859, 0.021921994164586067, 0.0025394419208168983, 0.0033554628025740385, 0.07357389479875565, 0.7795555591583252, 0.05686911940574646, 0.022035235539078712, 0.034172482788562775, 0.07262071967124939], [0.08121224492788315, 0.025126218795776367, 0.4891066551208496, 0.29065003991127014, 0.20622830092906952, 0.36699986457824707, 0.07864820212125778, 0.014422299340367317, 0.016684990376234055, 0.0649130716919899, 0.07936163991689682, 0.6605017185211182, 0.18783104419708252, 0.08294262737035751, 0.03477967903017998], [0.0700722336769104, 0.1311686784029007, 0.5332850813865662, 0.1558467000722885, 0.36321985721588135, 0.7912644743919373, 0.32202765345573425, 0.1934671401977539, 0.031114375218749046, 0.09986341744661331, 0.08630139380693436, 0.055017780512571335, 0.44781896471977234, 0.42446693778038025, 0.1060790941119194], [0.08875010907649994, 0.06247853487730026, 0.4616371989250183, 0.12711729109287262, 0.3074216842651367, 0.19363558292388916, 0.2020244151353836, 0.0779867023229599, 0.019831692799925804, 0.03570472076535225, 0.07392378151416779, 0.04282142594456673, 0.0921483263373375, 0.3143211603164673, 0.22281906008720398], [0.5682113766670227, 0.1249876543879509, 0.7342633008956909, 0.902918815612793, 0.7035764455795288, 0.3718622326850891, 0.6157594919204712, 0.15625660121440887, 0.8438207507133484, 0.9341241121292114, 0.8159937858581543, 0.6624717712402344, 0.3264457583427429, 0.5970154404640198, 0.003644895739853382]], [[0.0183254461735487, 0.00659788167104125, 0.046570390462875366, 0.04327844828367233, 0.10241857916116714, 0.5407979488372803, 0.0026681027375161648, 0.15349310636520386, 0.0016508381813764572, 0.010916458442807198, 0.036675866693258286, 0.15769276022911072, 0.4073828458786011, 0.04228133708238602, 0.15622197091579437], [0.07985992729663849, 0.06383417546749115, 0.024972105398774147, 0.18746882677078247, 0.11770728975534439, 0.13333363831043243, 0.006719768047332764, 0.04288880154490471, 0.001412510173395276, 0.058754052966833115, 0.14280158281326294, 0.13529875874519348, 0.08268098533153534, 0.02367851696908474, 0.1494951695203781], [0.01403640117496252, 0.014278309419751167, 0.1034439280629158, 0.022417087107896805, 0.10706920921802521, 0.018271848559379578, 0.046350300312042236, 0.04233889281749725, 0.037542134523391724, 0.0005760823260061443, 0.004724643658846617, 0.233056902885437, 0.2574465572834015, 0.1892177164554596, 0.21611936390399933], [0.032590243965387344, 0.14464972913265228, 0.1993260532617569, 0.12327495217323303, 0.27639931440353394, 0.011173157021403313, 0.012838426046073437, 0.0802190750837326, 0.0400678850710392, 0.013469994999468327, 0.025247203186154366, 0.30583158135414124, 0.6397863626480103, 0.258308470249176, 0.08317234367132187], [0.007401467300951481, 0.04209339618682861, 0.1104009672999382, 0.04737341031432152, 0.06253770738840103, 0.0023836863692849874, 0.05026397854089737, 0.01439946424216032, 0.006556188687682152, 0.001721409265883267, 0.01908556930720806, 0.022761031985282898, 0.01600046642124653, 0.22344018518924713, 0.2855986952781677], [0.00031611474696546793, 0.010241325944662094, 0.005327185150235891, 0.007503898814320564, 0.009216651320457458, 0.08986854553222656, 0.0022410263773053885, 0.04830501973628998, 0.013246790505945683, 0.0036830154713243246, 0.001605262397788465, 0.004246865399181843, 0.005818811245262623, 0.00778583250939846, 0.2319662719964981], [0.00028042105259373784, 0.004604758229106665, 0.008834331296384335, 0.010530425235629082, 0.04934454336762428, 0.3239482641220093, 0.02964387647807598, 0.041019540280103683, 0.028070107102394104, 0.002580034313723445, 0.0034616885241121054, 0.006594499107450247, 0.07731658220291138, 0.01784621551632881, 0.10414844751358032], [0.002352550160139799, 0.00811008270829916, 0.007519579492509365, 0.09616736322641373, 0.00784054771065712, 0.06404154002666473, 0.025837063789367676, 0.06720300018787384, 0.008001329377293587, 0.016075177118182182, 0.0036620565224438906, 0.031110821291804314, 0.1529460847377777, 0.03003939613699913, 0.19531111419200897], [0.014062762260437012, 0.03979215770959854, 0.0070105125196278095, 0.010145032778382301, 0.023933248594403267, 0.08613994717597961, 0.027301009744405746, 0.007488427218049765, 0.04610109701752663, 0.00706111453473568, 0.005716769024729729, 0.008516461588442326, 0.04168170318007469, 0.004054774064570665, 0.3198099434375763], [0.0027477010153234005, 0.009237049147486687, 0.005884162615984678, 0.004349177703261375, 0.039300523698329926, 0.06504905968904495, 0.005921225529164076, 0.05048412084579468, 0.004538795445114374, 0.019958311691880226, 0.08035917580127716, 0.1339075267314911, 0.45191076397895813, 0.1108468547463417, 0.15996994078159332], [0.0004566281568259001, 0.0044615683145821095, 0.008062957786023617, 0.0003266451822128147, 0.032452184706926346, 0.004190187435597181, 0.0009983428753912449, 0.0015420016134157777, 0.025539150461554527, 0.0009114624699577689, 0.001308016013354063, 0.11249691247940063, 0.5262115597724915, 0.16036535799503326, 0.02284345217049122], [0.006384413689374924, 0.006966868881136179, 0.013256898149847984, 0.008146845735609531, 0.005910678766667843, 0.005924733821302652, 0.0029809526167809963, 0.004338744096457958, 0.0021091948729008436, 0.02691148780286312, 0.09123647958040237, 0.0904775932431221, 0.10420377552509308, 0.019918829202651978, 0.21981710195541382], [0.004395737312734127, 0.0342060811817646, 0.08344801515340805, 0.012639162130653858, 0.07537969946861267, 0.00383414002135396, 0.007808698806911707, 0.007516762241721153, 0.0023650380317121744, 0.055798787623643875, 0.025632014498114586, 0.040716953575611115, 0.16482838988304138, 0.13848447799682617, 0.17180821299552917], [0.0016022673808038235, 0.013307235203683376, 0.012306403368711472, 0.0029055906925350428, 0.06092625483870506, 0.01653674617409706, 0.008309547789394855, 0.00395687622949481, 0.002493055537343025, 0.0038927635177969933, 0.009680269286036491, 0.23031921684741974, 0.35693949460983276, 0.1708209365606308, 0.050492819398641586], [0.009627100080251694, 0.006502249743789434, 0.0023533182684332132, 0.0021814347710460424, 0.007286426145583391, 0.024909881874918938, 0.01453662570565939, 0.010449647903442383, 0.0028000103775411844, 0.001988302916288376, 0.001580765936523676, 0.013102496974170208, 0.001836722600273788, 0.0008430163725279272, 0.15720587968826294]], [[0.060514166951179504, 0.09119007736444473, 0.5136731863021851, 0.024349171668291092, 0.41056114435195923, 0.043175265192985535, 0.016160618513822556, 0.12711943686008453, 0.029147693887352943, 0.01592664048075676, 0.04504424333572388, 0.03736018016934395, 0.026280265301465988, 0.042564861476421356, 0.13562467694282532], [0.009338664822280407, 0.09596994519233704, 0.12376897037029266, 0.01794583536684513, 0.059337858110666275, 0.04990454390645027, 0.003890786785632372, 0.07171432673931122, 0.0057785604149103165, 0.005389686673879623, 0.009663187898695469, 0.014342015609145164, 0.020640142261981964, 0.04060304909944534, 0.16408833861351013], [0.07689530402421951, 0.027863014489412308, 0.15549975633621216, 0.2693096697330475, 0.73520827293396, 0.03749871999025345, 0.3640631139278412, 0.14002074301242828, 0.16656053066253662, 0.02643253095448017, 0.0061660525389015675, 0.054253485053777695, 0.14240022003650665, 0.14975441992282867, 0.13701564073562622], [0.21953634917736053, 0.22122228145599365, 0.04846278205513954, 0.07968296110630035, 0.3619323670864105, 0.03181222453713417, 0.6669740080833435, 0.3975786566734314, 0.11174946278333664, 0.15518029034137726, 0.004886193200945854, 0.010736972093582153, 0.07725195586681366, 0.09191425889730453, 0.1523013859987259], [0.0740056112408638, 0.054083533585071564, 0.027193741872906685, 0.014972379431128502, 0.04523617774248123, 0.012482533231377602, 0.4212614595890045, 0.25695085525512695, 0.3699147403240204, 0.013461914844810963, 0.08041262626647949, 0.015268572606146336, 0.627507209777832, 0.13811761140823364, 0.19850368797779083], [0.029503263533115387, 0.09333665668964386, 0.016309864819049835, 0.1364656686782837, 0.03873518481850624, 0.019083604216575623, 0.758955180644989, 0.6250144243240356, 0.10551930963993073, 0.0059091635048389435, 0.001959211425855756, 0.004587537609040737, 0.0029548059683293104, 0.011073557659983635, 0.10497581213712692], [0.0038599083200097084, 0.03815716505050659, 0.004112291149795055, 0.0037336996756494045, 0.02896580658853054, 0.003606554586440325, 0.2724342346191406, 0.5795999765396118, 0.041377726942300797, 0.01812332309782505, 0.006642999593168497, 0.006629596464335918, 0.018780261278152466, 0.00801254715770483, 0.11063171178102493], [0.023342538625001907, 0.1589166522026062, 0.01254882663488388, 0.01894153468310833, 0.04743911698460579, 0.015340029262006283, 0.06989605724811554, 0.22605817019939423, 0.016811540350317955, 0.014681086875498295, 0.0061398339457809925, 0.02630683407187462, 0.032653048634529114, 0.05358496680855751, 0.18197578191757202], [0.01728241890668869, 0.12100599706172943, 0.003952578641474247, 0.038103699684143066, 0.00803869217634201, 0.017839567735791206, 0.040644098073244095, 0.014622771181166172, 0.07288665324449539, 0.4550913870334625, 0.18886235356330872, 0.2150641530752182, 0.487347275018692, 0.42817094922065735, 0.12942945957183838], [0.011775199323892593, 0.1349712610244751, 0.005470172502100468, 0.003098055487498641, 0.028361253440380096, 0.03303566575050354, 0.007174484897404909, 0.015601159073412418, 0.006606224924325943, 0.08859884738922119, 0.18040567636489868, 0.31761303544044495, 0.2462366670370102, 0.4818485677242279, 0.12394269555807114], [0.05270439758896828, 0.1637289971113205, 0.009510326199233532, 0.008013473823666573, 0.14090411365032196, 0.011389089748263359, 0.013123652897775173, 0.023534703999757767, 0.009078129194676876, 0.02855684608221054, 0.026650836691260338, 0.39132389426231384, 0.16291603446006775, 0.25967708230018616, 0.10212607681751251], [0.19571052491664886, 0.10246216505765915, 0.02142595686018467, 0.012254489585757256, 0.00365867605432868, 0.007110960781574249, 0.020346596837043762, 0.03192196041345596, 0.00833944883197546, 0.07423693686723709, 0.09786227345466614, 0.08075869083404541, 0.1330210417509079, 0.26891645789146423, 0.17930860817432404], [0.11616674810647964, 0.175978422164917, 0.00425378605723381, 0.017427049577236176, 0.011484457179903984, 0.030517226085066795, 0.08637198060750961, 0.1500588357448578, 0.0009573447750881314, 0.044167183339595795, 0.005869577638804913, 0.0011607500491663814, 0.014711305499076843, 0.027834221720695496, 0.18594378232955933], [0.11675343662500381, 0.17556257545948029, 0.016423039138317108, 0.02097608894109726, 0.06606884300708771, 0.06371303647756577, 0.09760221093893051, 0.2481643557548523, 0.0015754855703562498, 0.03009907715022564, 0.03618617355823517, 0.012020162306725979, 0.17486301064491272, 0.22630257904529572, 0.2108311653137207], [0.004961065016686916, 0.011551961302757263, 0.006318831816315651, 0.002851473866030574, 0.003461753251031041, 0.011111320927739143, 0.004611799493432045, 0.004697122145444155, 0.0026004482060670853, 0.0010426584631204605, 0.0060967751778662205, 0.01239971723407507, 0.004622939508408308, 0.002610035240650177, 0.15716104209423065]], [[0.027552247047424316, 0.013821233063936234, 0.004237555433064699, 0.0007387229125015438, 0.0009859473211690784, 0.001997306477278471, 0.002160864183679223, 0.009250090457499027, 0.0009738927474245429, 0.0009403586154803634, 0.003406830132007599, 0.0010056114988401532, 0.008306043222546577, 0.06191018968820572, 0.18169914186000824], [0.0056476471945643425, 0.0617278628051281, 0.026225095614790916, 0.009516767226159573, 0.019543437287211418, 0.011766157113015652, 0.0015307252760976553, 0.004000868182629347, 0.006223553325980902, 0.02180931344628334, 0.02397397719323635, 0.025289250537753105, 0.01872297003865242, 0.05591608211398125, 0.17309869825839996], [0.5742589831352234, 0.02769068442285061, 0.03131784498691559, 0.008496972732245922, 0.005279624368995428, 0.0009009581408463418, 0.013010378926992416, 0.009255914948880672, 0.08095329999923706, 0.0017015798948705196, 0.0027918636333197355, 0.01474103331565857, 0.07241056859493256, 0.2960302531719208, 0.1991364061832428], [0.3870091140270233, 0.24428580701351166, 0.004871743265539408, 0.01251932606101036, 0.004600874613970518, 0.007045479491353035, 0.011942178010940552, 0.06100638955831528, 0.06223933771252632, 0.00421120086684823, 0.0017708303639665246, 0.010406754910945892, 0.016386834904551506, 0.038040366023778915, 0.25559180974960327], [0.6136646866798401, 0.2692064642906189, 0.043582458049058914, 0.00652115186676383, 0.05291604623198509, 0.006654517259448767, 0.03398957848548889, 0.03886384516954422, 0.13169772922992706, 0.002106831641867757, 0.005907678045332432, 0.01888049766421318, 0.04876947030425072, 0.2226717472076416, 0.22327177226543427], [0.685612678527832, 0.0861489400267601, 0.03236214071512222, 0.16196951270103455, 0.03394145518541336, 0.05551951378583908, 0.027528556063771248, 0.06770895421504974, 0.19389298558235168, 0.03780713677406311, 0.0038191182538866997, 0.05989958345890045, 0.13479465246200562, 0.24111053347587585, 0.15613426268100739], [0.6876600384712219, 0.0606975182890892, 0.05783677101135254, 0.05387236177921295, 0.11914167553186417, 0.004756046459078789, 0.031782086938619614, 0.011465699411928654, 0.1448838710784912, 0.09538520872592926, 0.007872258313000202, 0.033316925168037415, 0.09786565601825714, 0.08940181881189346, 0.23629719018936157], [0.5363585352897644, 0.11579979956150055, 0.10718797892332077, 0.21453110873699188, 0.030864767730236053, 0.026318436488509178, 0.03807519003748894, 0.12262200564146042, 0.08015674352645874, 0.06537020206451416, 0.004594390746206045, 0.015254726633429527, 0.06485987454652786, 0.039039257913827896, 0.16586215794086456], [0.6220377087593079, 0.17304541170597076, 0.23731492459774017, 0.32412996888160706, 0.2203587144613266, 0.09306959062814713, 0.2822628319263458, 0.008407875895500183, 0.14113475382328033, 0.022416740655899048, 0.005183607805520296, 0.0005837879725731909, 0.00799399521201849, 0.006284625735133886, 0.12005029618740082], [0.18509520590305328, 0.21334251761436462, 0.12845394015312195, 0.3693835139274597, 0.41559898853302, 0.19613976776599884, 0.7053389549255371, 0.3886314332485199, 0.06599769741296768, 0.04325481504201889, 0.029052795842289925, 0.001557054347358644, 0.0018087843200191855, 0.0036887156311422586, 0.18107539415359497], [0.612794041633606, 0.24153079092502594, 0.076973557472229, 0.17341682314872742, 0.06242084503173828, 0.2242424041032791, 0.8304246068000793, 0.5655775666236877, 0.4262824058532715, 0.00936043355613947, 0.03881426528096199, 0.0046007027849555016, 0.005786797031760216, 0.020520325750112534, 0.226027712225914], [0.21637925505638123, 0.22487440705299377, 0.19202512502670288, 0.3957260847091675, 0.15970049798488617, 0.16693006455898285, 0.3690066933631897, 0.5193001627922058, 0.6459834575653076, 0.047006867825984955, 0.06868032366037369, 0.043628890067338943, 0.02405296452343464, 0.05333276465535164, 0.08607933670282364], [0.5923737287521362, 0.3536633849143982, 0.08390633016824722, 0.2980528473854065, 0.042989592999219894, 0.026934657245874405, 0.1647067815065384, 0.1620720773935318, 0.6647022366523743, 0.13678880035877228, 0.10115252435207367, 0.012052871286869049, 0.2444845736026764, 0.1799331158399582, 0.10357851535081863], [0.3260110914707184, 0.10825559496879578, 0.040669191628694534, 0.08903322368860245, 0.055108752101659775, 0.014200238510966301, 0.06877616047859192, 0.07561883330345154, 0.7116665244102478, 0.08518233895301819, 0.13964912295341492, 0.01787719503045082, 0.027594367042183876, 0.0709126889705658, 0.09409899264574051], [0.26070404052734375, 0.8011303544044495, 0.17980173230171204, 0.0725909024477005, 0.12434736639261246, 0.28980228304862976, 0.3281027674674988, 0.7843722701072693, 0.12677432596683502, 0.054726697504520416, 0.13370326161384583, 0.19018130004405975, 0.1707623451948166, 0.14939220249652863, 0.07447532564401627]], [[0.10194799304008484, 0.042179130017757416, 0.27587375044822693, 0.8387316465377808, 0.3051532208919525, 0.225641667842865, 0.10655678808689117, 0.4426303505897522, 0.21958006918430328, 0.4376780688762665, 0.7421585917472839, 0.6036965250968933, 0.4420715570449829, 0.6119644045829773, 0.08460802584886551], [0.052479684352874756, 0.018692737445235252, 0.13130725920200348, 0.4463008642196655, 0.4007475674152374, 0.4465942680835724, 0.13863760232925415, 0.26287177205085754, 0.5015351176261902, 0.48749616742134094, 0.19089040160179138, 0.2783986032009125, 0.20843097567558289, 0.11412637680768967, 0.11901978403329849], [0.09998084604740143, 0.05760321766138077, 0.06884635984897614, 0.1367950737476349, 0.03696327656507492, 0.02052011340856552, 0.23966658115386963, 0.6639524102210999, 0.08913422375917435, 0.1896458864212036, 0.14239966869354248, 0.18587030470371246, 0.2512775659561157, 0.1800404042005539, 0.13985422253608704], [0.17776982486248016, 0.2164098620414734, 0.03016561083495617, 0.006355184596031904, 0.04318562150001526, 0.004709928296506405, 0.02340516820549965, 0.07859960943460464, 0.3921053409576416, 0.27134451270103455, 0.2182498425245285, 0.1118401437997818, 0.13378913700580597, 0.4978374242782593, 0.18931511044502258], [0.16739480197429657, 0.20097726583480835, 0.038037389516830444, 0.05488090589642525, 0.020769814029335976, 0.044557277113199234, 0.32692524790763855, 0.5529306530952454, 0.06495681405067444, 0.061963245272636414, 0.3602059483528137, 0.040287844836711884, 0.11072657257318497, 0.3166219890117645, 0.19249440729618073], [0.07948607206344604, 0.4389178156852722, 0.019072405993938446, 0.11389600485563278, 0.015004596672952175, 0.0008035529754124582, 0.00560334138572216, 0.007579134311527014, 0.12602436542510986, 0.4041804373264313, 0.8435949087142944, 0.7255359292030334, 0.3334953784942627, 0.21919409930706024, 0.13174442946910858], [0.11827840656042099, 0.43549492955207825, 0.035650141537189484, 0.3500109016895294, 0.10479609668254852, 0.0029047641437500715, 0.016262628138065338, 0.008920608088374138, 0.1923075020313263, 0.6588289737701416, 0.7271849513053894, 0.8207041025161743, 0.5342087149620056, 0.29674431681632996, 0.16698533296585083], [0.19771254062652588, 0.43774574995040894, 0.057631127536296844, 0.15638697147369385, 0.05497771501541138, 0.0015852008946239948, 0.004800108727067709, 0.0038221883587539196, 0.11230877041816711, 0.6780416369438171, 0.6535694003105164, 0.33372464776039124, 0.2617355287075043, 0.4378974735736847, 0.15096917748451233], [0.2510830760002136, 0.455088347196579, 0.2769528925418854, 0.28598156571388245, 0.08308438956737518, 0.495423823595047, 0.2878262400627136, 0.017540372908115387, 0.036487918347120285, 0.07030303031206131, 0.04537871107459068, 0.017587929964065552, 0.15749330818653107, 0.15622387826442719, 0.134229376912117], [0.2108728438615799, 0.12734071910381317, 0.6047671437263489, 0.5566261410713196, 0.4727993309497833, 0.6295000314712524, 0.20963285863399506, 0.3828260004520416, 0.01981351152062416, 0.02910005673766136, 0.17932364344596863, 0.029557999223470688, 0.02868420071899891, 0.05513756722211838, 0.1339428722858429], [0.2013130933046341, 0.35711804032325745, 0.18803814053535461, 0.31239861249923706, 0.6328845024108887, 0.6068195104598999, 0.09879770874977112, 0.295420378446579, 0.033300116658210754, 0.04495004564523697, 0.027333615347743034, 0.034196678549051285, 0.011724627576768398, 0.023517103865742683, 0.3543241322040558], [0.27807915210723877, 0.07025524973869324, 0.15421687066555023, 0.23079168796539307, 0.0323871448636055, 0.4182601273059845, 0.43312954902648926, 0.3330070972442627, 0.027521615847945213, 0.03977188467979431, 0.03152378648519516, 0.00340716983191669, 0.005408053286373615, 0.0057552107609808445, 0.23170912265777588], [0.15765754878520966, 0.07761365175247192, 0.1382310688495636, 0.33822664618492126, 0.15857987105846405, 0.11602839827537537, 0.3749851584434509, 0.3412497341632843, 0.06253337115049362, 0.09931040555238724, 0.010201470926404, 0.0010190334869548678, 0.0007929145358502865, 0.0016151106683537364, 0.1723894327878952], [0.39988550543785095, 0.09145350754261017, 0.3013111352920532, 0.5813722610473633, 0.4042908251285553, 0.2935561537742615, 0.4903331696987152, 0.4357178807258606, 0.04456466808915138, 0.10430204123258591, 0.10590728372335434, 0.007762597873806953, 0.0026525144930928946, 0.0052152471616864204, 0.24974997341632843], [0.03366217389702797, 0.03653215244412422, 0.027766529470682144, 0.007369572762399912, 0.014929202385246754, 0.04527684673666954, 0.00940654892474413, 0.023517949506640434, 0.010960820131003857, 0.0019369145156815648, 0.01981637440621853, 0.00444602407515049, 0.014915830455720425, 0.007271313574165106, 0.15384840965270996]], [[0.011476250365376472, 0.7629169225692749, 0.02116730809211731, 0.010803135111927986, 0.005132503807544708, 0.009303245693445206, 0.0005040443502366543, 0.022131631150841713, 0.001470191520638764, 0.0017710012616589665, 0.0004086543631274253, 0.0022351557854562998, 0.000896299781743437, 0.0005698543391190469, 0.019197434186935425], [0.0024000771809369326, 0.158247172832489, 0.01897430047392845, 0.019486481323838234, 0.0029122373089194298, 0.015832845121622086, 0.0017470666207373142, 0.00117065932136029, 0.01016113068908453, 0.007651789113879204, 0.0020597530528903008, 0.015201352536678314, 0.016943661496043205, 0.009769451804459095, 0.16634535789489746], [0.00410552928224206, 0.0015743908006697893, 0.01049637421965599, 0.006504607852548361, 0.035339318215847015, 0.9065937995910645, 0.2998698651790619, 0.12215600907802582, 0.013029203750193119, 0.000650988076813519, 0.002043183660134673, 0.006920983083546162, 0.09688588231801987, 0.057574767619371414, 0.009054930880665779], [0.007287806831300259, 0.01375514268875122, 0.001530585577711463, 0.007056740578263998, 0.01978658139705658, 0.9208202958106995, 0.2214416116476059, 0.30606138706207275, 0.052588097751140594, 0.004079628270119429, 0.0024339878000319004, 0.0028739250265061855, 0.04695972800254822, 0.045893676578998566, 0.0110039496794343], [0.006429406348615885, 0.016907041892409325, 0.0023819799534976482, 0.0003115522558800876, 0.006808500271290541, 0.9102355241775513, 0.15379303693771362, 0.07056371122598648, 0.06324119120836258, 0.0030630400869995356, 0.007665702607482672, 0.002797773340716958, 0.13533660769462585, 0.03197972849011421, 0.006115978583693504], [0.014356410130858421, 0.0526699461042881, 0.0007501932559534907, 0.008851941674947739, 0.0005067299935035408, 0.035332534462213516, 0.09051518887281418, 0.049224019050598145, 0.014900125563144684, 0.01856788620352745, 0.0012414768571034074, 0.002389064058661461, 0.0018446464091539383, 0.000877396494615823, 0.22725383937358856], [0.0025407460052520037, 0.32041609287261963, 0.0036992463283240795, 0.02451898716390133, 0.007920290343463421, 0.015527674928307533, 0.03544912114739418, 0.29718661308288574, 0.02347515895962715, 0.026838794350624084, 0.01756858080625534, 0.010445725172758102, 0.005995406303554773, 0.0005847325082868338, 0.2055930197238922], [0.009255345910787582, 0.034783441573381424, 0.010831266641616821, 0.02782595343887806, 0.001477425335906446, 0.006871670484542847, 0.006518858019262552, 0.0072874827310442924, 0.012387615628540516, 0.05288432911038399, 0.04645476117730141, 0.02255677618086338, 0.014156763441860676, 0.00417641457170248, 0.22105874121189117], [0.0017225841293111444, 0.0049251834861934185, 0.007573804818093777, 0.014873476698994637, 0.00903867557644844, 0.0076865823939442635, 0.0017025101697072387, 0.00023153165238909423, 0.024773191660642624, 0.1742238849401474, 0.6002998948097229, 0.6145275831222534, 0.25023365020751953, 0.35489538311958313, 0.039457567036151886], [0.0034636815544217825, 0.39023807644844055, 0.0018667654367163777, 0.0006454490358009934, 0.00025732445647008717, 0.026610050350427628, 0.0026998629327863455, 0.014584111049771309, 0.00032847325201146305, 0.0012709795264527202, 0.07417861372232437, 0.43676891922950745, 0.25757044553756714, 0.32731080055236816, 0.12109360098838806], [0.0014396773185580969, 0.07700426131486893, 0.0003769460890907794, 0.0015669490676373243, 0.0010665652807801962, 0.05166712775826454, 0.003733921330422163, 0.00829349085688591, 9.729996236274019e-05, 0.0004270579374860972, 0.0022819112055003643, 0.3744491934776306, 0.2681969404220581, 0.4920969009399414, 0.028773367404937744], [0.19549021124839783, 0.5118218064308167, 0.053603943437337875, 0.004430307075381279, 0.0015711480518803, 0.024018822237849236, 0.0441354438662529, 0.04134393110871315, 0.0014472270850092173, 0.024767767637968063, 0.029112013056874275, 0.08014442026615143, 0.4702226519584656, 0.40423843264579773, 0.14477935433387756], [0.034691162407398224, 0.09692039340734482, 0.003936667460948229, 0.0164506658911705, 0.0005446859868243337, 0.0016573348548263311, 0.02795562334358692, 0.12881094217300415, 0.0004645287699531764, 0.0021237744949758053, 0.0010291342623531818, 0.001068241661414504, 0.00471450574696064, 0.019945403560996056, 0.19273433089256287], [0.04783029109239578, 0.11157537996768951, 0.02325829118490219, 0.12799327075481415, 0.0216610599309206, 0.41526544094085693, 0.129922553896904, 0.14850500226020813, 0.0009580283658578992, 0.008097043260931969, 0.01107556838542223, 0.019478609785437584, 0.2748490571975708, 0.11550750583410263, 0.15876543521881104], [0.015012643299996853, 0.00804762914776802, 0.00366173661313951, 0.0018753333715721965, 0.0065993256866931915, 0.00479541253298521, 0.005337378475815058, 0.012457020580768585, 0.0033909485209733248, 0.0032401280477643013, 0.00048777347547002137, 0.012255984358489513, 0.0006230318685993552, 0.001543535152450204, 0.1572250872850418]]], [[[0.016101790592074394, 0.0050575402565300465, 0.008322462439537048, 0.006855499465018511, 0.003766664071008563, 0.0032708626240491867, 0.008669405244290829, 0.016983401030302048, 0.023632090538740158, 0.0007983215618878603, 0.006762287113815546, 0.019076332449913025, 0.0018054646207019687, 0.011848386377096176, 0.23875673115253448], [0.03118298575282097, 0.022700916975736618, 0.01820814236998558, 0.011041272431612015, 0.013735579326748848, 0.003388292621821165, 0.014374880120158195, 0.0029534229543060064, 0.06276529282331467, 0.0010488847037777305, 0.005698299501091242, 0.018068330362439156, 0.009247002191841602, 0.010645000264048576, 0.2274351567029953], [0.10749327391386032, 0.01361121516674757, 0.01930609717965126, 0.025707745924592018, 0.010174103081226349, 0.0019352196250110865, 0.006933925207704306, 0.026056114584207535, 0.003662128932774067, 0.006897854618728161, 0.0015213300939649343, 0.006132383830845356, 0.0028239174280315638, 0.013304864056408405, 0.22739072144031525], [0.25010421872138977, 0.005582309328019619, 0.006115755997598171, 0.08664196729660034, 0.005224197171628475, 0.005311913322657347, 0.03281412273645401, 0.024678068235516548, 0.018595430999994278, 0.0819764956831932, 0.005479714833199978, 0.008821909315884113, 0.02042486146092415, 0.03525637462735176, 0.19444485008716583], [0.1781134456396103, 0.021083489060401917, 0.038613177835941315, 0.16417931020259857, 0.0029645320028066635, 0.00899361353367567, 0.009076704271137714, 0.01357053779065609, 0.01101364754140377, 0.04086701199412346, 0.014270029030740261, 0.011464214883744717, 0.011689195409417152, 0.0706799253821373, 0.3730076551437378], [0.3090042769908905, 0.031162124127149582, 0.033009856939315796, 0.14512063562870026, 0.00411824369803071, 0.07382509857416153, 0.02702517993748188, 0.07667822390794754, 0.021658627316355705, 0.01615101285278797, 0.0066233747638762, 0.008623828180134296, 0.0008525048615410924, 0.011195158585906029, 0.2578849792480469], [0.3291372060775757, 0.0561586357653141, 0.4192807674407959, 0.4571635127067566, 0.057550910860300064, 0.04359428584575653, 0.005270917434245348, 0.03804505616426468, 0.03733760863542557, 0.20409555733203888, 0.04554562643170357, 0.024629684165120125, 0.018161950632929802, 0.04353561997413635, 0.145583838224411], [0.3828665316104889, 0.019200418144464493, 0.34599530696868896, 0.4376910328865051, 0.07537391781806946, 0.036528222262859344, 0.04610925167798996, 0.04538694769144058, 0.1663823127746582, 0.04690397158265114, 0.05553056299686432, 0.021811597049236298, 0.012554574757814407, 0.03599526360630989, 0.1534716635942459], [0.08861738443374634, 0.06363938748836517, 0.7135313749313354, 0.146565243601799, 0.3346884250640869, 0.3544132113456726, 0.12204702943563461, 0.028818881139159203, 0.04564356431365013, 0.03288809210062027, 0.06753166019916534, 0.12387087196111679, 0.029650555923581123, 0.014753012917935848, 0.04379607364535332], [0.03655187785625458, 0.006058508530259132, 0.04018249735236168, 0.08900216966867447, 0.027111714705824852, 0.006408872082829475, 0.03783104568719864, 0.010064247064292431, 0.2550305724143982, 0.008420061320066452, 0.012097015976905823, 0.017737949267029762, 0.0012783813290297985, 0.0026436946354806423, 0.172612726688385], [0.1163061186671257, 0.04424217715859413, 0.014033653773367405, 0.03590161353349686, 0.06527962535619736, 0.00195779325440526, 0.027195196598768234, 0.1581626534461975, 0.30849722027778625, 0.1652299016714096, 0.04234298691153526, 0.05585171654820442, 0.016547594219446182, 0.04909297078847885, 0.08752257376909256], [0.1013311892747879, 0.06866802275180817, 0.06425411254167557, 0.4572087228298187, 0.04987834766507149, 0.005650981329381466, 0.053177352994680405, 0.04739876464009285, 0.2551265060901642, 0.06654207408428192, 0.20209699869155884, 0.04737241193652153, 0.042119286954402924, 0.22778292000293732, 0.10508881509304047], [0.24632138013839722, 0.045121580362319946, 0.12561434507369995, 0.43826135993003845, 0.07532560080289841, 0.002372375223785639, 0.0398109070956707, 0.026653334498405457, 0.5938559174537659, 0.12655052542686462, 0.04707850515842438, 0.018195422366261482, 0.010826833546161652, 0.023274976760149002, 0.14916135370731354], [0.12666325271129608, 0.047387395054101944, 0.04497509077191353, 0.23918962478637695, 0.016611548140645027, 0.009305250830948353, 0.02713325433433056, 0.030590379610657692, 0.4573454260826111, 0.17728003859519958, 0.08635216951370239, 0.05938294902443886, 0.008936652913689613, 0.028742672875523567, 0.15077541768550873], [0.03701020032167435, 0.037774376571178436, 0.1161394715309143, 0.09335700422525406, 0.015312368050217628, 0.026739761233329773, 0.013009096495807171, 0.005902147851884365, 0.07189750671386719, 0.00625182269141078, 0.056744903326034546, 0.06423129141330719, 0.06661844998598099, 0.02100159414112568, 0.2252311259508133]], [[0.0034671342000365257, 0.05013812705874443, 0.16192083060741425, 0.3595426082611084, 0.20735634863376617, 0.08139260113239288, 0.009979248046875, 0.05037669837474823, 0.0023427342530339956, 6.08037480560597e-05, 0.003484810469672084, 0.023961462080478668, 0.38460296392440796, 0.24992075562477112, 0.13989195227622986], [0.6699675917625427, 0.09382463991641998, 0.2939082980155945, 0.17940783500671387, 0.06414232403039932, 0.05161670595407486, 0.09315118193626404, 0.0025183490943163633, 0.0024716362822800875, 0.00784118939191103, 0.06077995523810387, 0.010742363519966602, 0.027031319215893745, 0.033606547862291336, 0.020909229293465614], [0.2646949589252472, 0.029353437945246696, 0.21451972424983978, 0.10881441831588745, 0.06597915291786194, 0.0030848400201648474, 0.011694483458995819, 0.021679535508155823, 0.002872215351089835, 0.013158812187612057, 0.002100167330354452, 6.679360376438126e-05, 0.004520595073699951, 0.019191764295101166, 0.15631338953971863], [0.040224652737379074, 0.02035309188067913, 0.3179875612258911, 0.11730892956256866, 0.5032125115394592, 0.4173433780670166, 0.2045394331216812, 0.3468436896800995, 0.0142394183203578, 0.034110911190509796, 0.0166803989559412, 0.0005183254834264517, 0.014372344128787518, 0.013749183155596256, 0.07609989494085312], [0.0153636634349823, 0.002009550342336297, 0.5970484614372253, 0.5668097734451294, 0.03708057850599289, 0.030387206003069878, 0.003990367520600557, 0.00021067907800897956, 0.0006718098884448409, 0.004241611808538437, 0.01157804112881422, 0.0002699779870454222, 0.0015558624872937799, 0.0029094237834215164, 0.04601351544260979], [0.03574535250663757, 0.009626551531255245, 0.4402237832546234, 0.2294078767299652, 0.26443710923194885, 0.01504121907055378, 0.016090886667370796, 0.007329131942242384, 0.002309221774339676, 0.0030864060390740633, 0.0026519321836531162, 0.0004272839578334242, 0.0011082548880949616, 0.01614256016910076, 0.03275791555643082], [6.553631828865036e-05, 0.000357702374458313, 0.08750326931476593, 0.01436514500528574, 0.006815748754888773, 0.6623476147651672, 0.0034670215100049973, 0.0015547194052487612, 0.00029766204534098506, 1.8653441657079384e-05, 0.0003687080170493573, 0.00015007570618763566, 0.0009929342195391655, 0.00030579339363612235, 0.0016504023224115372], [0.0004548979632090777, 7.145033305278048e-05, 0.025678247213363647, 0.00989772193133831, 0.007979623042047024, 0.6904858946800232, 0.04177143797278404, 0.0005172804230824113, 0.00045151059748604894, 9.678980859462172e-05, 0.0003766386944334954, 0.00020437331113498658, 0.0009936039568856359, 0.0004823105991818011, 0.001104293274693191], [0.02770741656422615, 0.15481999516487122, 0.0164713803678751, 0.029219333082437515, 0.01727348566055298, 0.0033895254600793123, 0.08395758271217346, 0.08886045962572098, 0.06561290472745895, 0.23454923927783966, 0.01131775975227356, 0.00014876923523843288, 0.021633606404066086, 0.032435301691293716, 0.2441566288471222], [0.0002423129917588085, 0.0011915951035916805, 0.0022339578717947006, 0.006169029977172613, 0.0026169228367507458, 0.006970150861889124, 0.0023872333113104105, 0.020186979323625565, 0.5034035444259644, 0.061859097331762314, 0.01802009530365467, 0.08541904389858246, 0.11395227909088135, 0.12879255414009094, 0.06123032420873642], [0.0016445622313767672, 0.0006882954621687531, 0.0003155411686748266, 0.0014561355346813798, 0.0007120753289200366, 0.00010650769399944693, 0.0005508221802301705, 0.004306118004024029, 0.4519909620285034, 0.2298276424407959, 0.04858560487627983, 0.008956322446465492, 0.005770590156316757, 0.011063157580792904, 0.0306133683770895], [0.0032223593443632126, 0.0006265831179916859, 0.002176017500460148, 0.010606854222714901, 0.0010762742022052407, 6.259929068619385e-05, 0.0013370343949645758, 0.0014808439882472157, 0.030783534049987793, 0.7491747736930847, 0.34058046340942383, 0.00350938574410975, 0.02303031086921692, 0.0742756798863411, 0.006112673785537481], [0.010601752437651157, 0.009935700334608555, 0.0694134384393692, 0.14514312148094177, 0.01701076701283455, 0.0001025431411108002, 0.003628269536420703, 0.007610301487147808, 0.1447119563817978, 0.2691461443901062, 0.7685887217521667, 0.06739932298660278, 0.05600086599588394, 0.567065417766571, 0.01997430995106697], [0.0020818221382796764, 0.006225256249308586, 0.007747206371277571, 0.02054281160235405, 0.00644321832805872, 0.00019787036580964923, 0.0007576930802315474, 0.0013290452770888805, 0.1748982071876526, 0.20870953798294067, 0.6057864427566528, 0.2165842056274414, 0.10265108197927475, 0.12960675358772278, 0.026959752663969994], [0.0929064005613327, 0.3412420153617859, 0.13197122514247894, 0.20421825349330902, 0.6308890581130981, 0.08085004985332489, 0.35388287901878357, 0.3416491150856018, 0.024628864601254463, 0.013967287726700306, 0.0762757882475853, 0.26007020473480225, 0.3328040838241577, 0.09019435197114944, 0.014360385946929455]], [[0.014275058172643185, 0.006687531713396311, 0.3026585280895233, 0.06917963922023773, 0.2396276444196701, 0.6229325532913208, 0.15904799103736877, 0.13992713391780853, 0.10272591561079025, 0.6685669422149658, 0.22624024748802185, 0.09492585808038712, 0.40837499499320984, 0.2735627591609955, 0.011893448419868946], [0.021194536238908768, 0.020265106111764908, 0.1736137419939041, 0.08712188154459, 0.3174395263195038, 0.3545694649219513, 0.3640749752521515, 0.11553992331027985, 0.3069344758987427, 0.7487083673477173, 0.45964598655700684, 0.41950592398643494, 0.6157799363136292, 0.47228363156318665, 0.04039919748902321], [0.008898869156837463, 0.002019912237301469, 0.021509699523448944, 0.0182319525629282, 0.07474909722805023, 0.02385670319199562, 0.013716273009777069, 0.008799813687801361, 0.3437807857990265, 0.008914400823414326, 0.012629772536456585, 0.10342472046613693, 0.0370708666741848, 0.023541903123259544, 0.18654775619506836], [0.01223641075193882, 0.003142833709716797, 0.006001354195177555, 0.003996475599706173, 0.0579916350543499, 0.01896491087973118, 0.01948327198624611, 0.013184066861867905, 0.30560916662216187, 0.015957718715071678, 0.016950437799096107, 0.06207568570971489, 0.044481322169303894, 0.01894378289580345, 0.19150091707706451], [0.003971019294112921, 0.0012432326329872012, 0.005908531602472067, 0.0021760377567261457, 0.002044213702902198, 0.01004379615187645, 0.01574278064072132, 0.026324355974793434, 0.4105670154094696, 0.05117517337203026, 0.02775881439447403, 0.023424910381436348, 0.009920927695930004, 0.011210974305868149, 0.16597995162010193], [0.007421860471367836, 0.006305157672613859, 0.011464249342679977, 0.020268600434064865, 0.025753991678357124, 0.031131377443671227, 0.03418951481580734, 0.0052986773662269115, 0.5788748264312744, 0.46168622374534607, 0.07252157479524612, 0.06022901460528374, 0.017210712656378746, 0.04054110497236252, 0.15131165087223053], [0.001541785546578467, 0.0008907613810151815, 0.004846525378525257, 0.001811343478038907, 0.0069520194083452225, 0.008084121160209179, 0.021458715200424194, 0.02802192233502865, 0.3832707405090332, 0.25552085041999817, 0.014592574909329414, 0.01065820176154375, 0.012523604556918144, 0.010731800459325314, 0.22416816651821136], [0.004116748925298452, 0.0016883857315406203, 0.014749680645763874, 0.00869818776845932, 0.01003838051110506, 0.007631313521414995, 0.02068890631198883, 0.027104953303933144, 0.13497500121593475, 0.6378710865974426, 0.10288828611373901, 0.0942029282450676, 0.028772620484232903, 0.05935161933302879, 0.21764545142650604], [0.06222981959581375, 0.01881357654929161, 0.00486758491024375, 0.015509632416069508, 0.0009378677350468934, 0.004574655555188656, 0.005093523766845465, 0.0076056248508393764, 0.02507362887263298, 0.02107030339539051, 0.007815904915332794, 0.010442771948873997, 0.011698074638843536, 0.006942160427570343, 0.31572407484054565], [0.01727244071662426, 0.009210732765495777, 0.005953751504421234, 0.0013454181607812643, 0.005081892944872379, 0.04435739293694496, 0.006434922106564045, 0.0007962443050928414, 0.0007702711154706776, 0.16453301906585693, 0.5625144839286804, 0.34227296710014343, 0.6355522871017456, 0.6161591410636902, 0.02771596610546112], [0.12786830961704254, 0.008172453381121159, 0.0017843057867139578, 0.004017683211714029, 0.007877650670707226, 0.0018398476531729102, 0.01566770300269127, 0.0026914728805422783, 0.0035052604507654905, 0.0037441153544932604, 0.011492998339235783, 0.10472051054239273, 0.01954079605638981, 0.025050928816199303, 0.24727097153663635], [0.1465907245874405, 0.037033673375844955, 0.013877127319574356, 0.00413108617067337, 0.00966043584048748, 0.02326187677681446, 0.04576379433274269, 0.010370912030339241, 0.05009477958083153, 0.002161832293495536, 0.012562266550958157, 0.08835282921791077, 0.018735390156507492, 0.07781965285539627, 0.21298982203006744], [0.018177246674895287, 0.009594686329364777, 0.010616189800202847, 0.003939185757189989, 0.020018288865685463, 0.006944165099412203, 0.014553648419678211, 0.014575640670955181, 0.031773608177900314, 0.0201406329870224, 0.008282337337732315, 0.02822018228471279, 0.008926213718950748, 0.030271533876657486, 0.18345791101455688], [0.029857823625206947, 0.018949948251247406, 0.0061294399201869965, 0.002908851485699415, 0.00919707678258419, 0.00952958408743143, 0.01205661240965128, 0.00758303003385663, 0.05086279660463333, 0.007759919855743647, 0.006360263098031282, 0.02717713639140129, 0.006157578434795141, 0.027468249201774597, 0.21562480926513672], [0.035946138203144073, 0.021175134927034378, 0.025809520855545998, 0.0228139478713274, 0.02454732172191143, 0.008901212364435196, 0.01817207969725132, 0.024075007066130638, 0.042662542313337326, 0.10151555389165878, 0.03429628908634186, 0.025050567463040352, 0.015684176236391068, 0.028640326112508774, 0.23519039154052734]], [[0.29903000593185425, 0.5539957880973816, 0.06723504513502121, 0.06922264397144318, 0.12363186478614807, 0.04431891441345215, 0.10694187879562378, 0.08094406872987747, 0.15170463919639587, 0.05897890776395798, 0.026665056124329567, 0.04277891665697098, 0.011532573029398918, 0.016366619616746902, 0.08233406394720078], [0.030788322910666466, 0.06814564764499664, 0.1441766321659088, 0.42568475008010864, 0.23481200635433197, 0.09723259508609772, 0.20801249146461487, 0.2833361029624939, 0.12989479303359985, 0.09075285494327545, 0.02217184565961361, 0.10632100701332092, 0.07123817503452301, 0.18399499356746674, 0.11842577904462814], [0.21215111017227173, 0.2570435404777527, 0.03298918902873993, 0.11753708124160767, 0.2531988024711609, 0.2834656238555908, 0.13087181746959686, 0.14389817416667938, 0.06408312171697617, 0.023736948147416115, 0.043677639216184616, 0.007582403719425201, 0.08098249137401581, 0.042930904775857925, 0.09848955273628235], [0.24232596158981323, 0.4370230436325073, 0.27921250462532043, 0.32216426730155945, 0.14763100445270538, 0.1446210741996765, 0.041608523577451706, 0.05782362446188927, 0.03667302429676056, 0.015881532803177834, 0.09886573255062103, 0.0007486737449653447, 0.022804880514740944, 0.01436265092343092, 0.04328664019703865], [0.0417991504073143, 0.06808368116617203, 0.22980956733226776, 0.06044253334403038, 0.09120408445596695, 0.3664403557777405, 0.01738058589398861, 0.026107804849743843, 0.16878005862236023, 0.007388730999082327, 0.6907519698143005, 0.00283504044637084, 0.004864559043198824, 0.017621232196688652, 0.04920867085456848], [0.07025078684091568, 0.08007846027612686, 0.18737106025218964, 0.08649075031280518, 0.14398247003555298, 0.03926409035921097, 0.10999412834644318, 0.10028164088726044, 0.2733333110809326, 0.07497494667768478, 0.6277027726173401, 0.03760387748479843, 0.07242996245622635, 0.04469411447644234, 0.0635850802063942], [0.18292218446731567, 0.29889917373657227, 0.16216641664505005, 0.041324593126773834, 0.08738134056329727, 0.03374062106013298, 0.10780933499336243, 0.1685270518064499, 0.3661736249923706, 0.13795819878578186, 0.7607439160346985, 0.022037923336029053, 0.11896573007106781, 0.017960727214813232, 0.09792909026145935], [0.29104405641555786, 0.7119240164756775, 0.16990531980991364, 0.02345188707113266, 0.15646961331367493, 0.008449066430330276, 0.06418811529874802, 0.018176060169935226, 0.3091927766799927, 0.08911041170358658, 0.3005200922489166, 0.04236089810729027, 0.2996547222137451, 0.08733220398426056, 0.07523740082979202], [0.046947941184043884, 0.14375551044940948, 0.004344047512859106, 0.0067795743234455585, 0.02948000282049179, 0.08397668600082397, 0.06400846689939499, 0.18865461647510529, 0.023663662374019623, 0.08527978509664536, 0.02815503440797329, 0.04117048531770706, 0.5833349823951721, 0.0677085593342781, 0.23153413832187653], [0.08349642902612686, 0.4532567262649536, 0.004409583285450935, 0.009004302322864532, 0.007938031107187271, 0.13749390840530396, 0.1858609914779663, 0.31525370478630066, 0.018453413620591164, 0.12712040543556213, 0.04680929332971573, 0.12408707290887833, 0.13737666606903076, 0.12311573326587677, 0.142713725566864], [0.05042501911520958, 0.07026762515306473, 0.0020696106366813183, 0.010109566152095795, 0.07710029184818268, 0.05610239878296852, 0.05948542803525925, 0.19247274100780487, 0.001940111513249576, 0.05155838653445244, 0.04620450362563133, 0.20989066362380981, 0.485702246427536, 0.4166657328605652, 0.18102103471755981], [0.09080760926008224, 0.09187275916337967, 0.012195594608783722, 0.021634280681610107, 0.019499676302075386, 0.09054076671600342, 0.11008334904909134, 0.23214302957057953, 0.0423310361802578, 0.034868963062763214, 0.06751228123903275, 0.049237679690122604, 0.03915484994649887, 0.08995199203491211, 0.1941523253917694], [0.0706457570195198, 0.10473088920116425, 0.039385173469781876, 0.02697153575718403, 0.04372800514101982, 0.06655491143465042, 0.23491710424423218, 0.19935868680477142, 0.036273516714572906, 0.06345809996128082, 0.020782677456736565, 0.12393849343061447, 0.05726756155490875, 0.041495081037282944, 0.15982753038406372], [0.039186086505651474, 0.11076691001653671, 0.03891725465655327, 0.009549588896334171, 0.01825849525630474, 0.051163915544748306, 0.1146436408162117, 0.1649821698665619, 0.03586947172880173, 0.06679365783929825, 0.09092967957258224, 0.14827685058116913, 0.10948126018047333, 0.10746686905622482, 0.1515202671289444], [0.14541134238243103, 0.05313154682517052, 0.01991144008934498, 0.08764121681451797, 0.014597749337553978, 0.03937898576259613, 0.04872390255331993, 0.04689335823059082, 0.04558950290083885, 0.051970891654491425, 0.02520112879574299, 0.022838978096842766, 0.00921469647437334, 0.00801294855773449, 0.21471147239208221]], [[0.009874092414975166, 0.0475393682718277, 0.0700187012553215, 0.05995699018239975, 0.023110831156373024, 0.04304451867938042, 0.02397323027253151, 0.09104450792074203, 0.13320927321910858, 0.0718994140625, 0.16378211975097656, 0.06306017935276031, 0.03516274318099022, 0.06407153606414795, 0.1927335411310196], [0.007679122034460306, 0.008519956842064857, 0.023641018196940422, 0.036320336163043976, 0.005810021422803402, 0.002834178740158677, 0.01027101743966341, 0.005131446290761232, 0.05288401618599892, 0.022729018703103065, 0.02885960415005684, 0.007142365910112858, 0.005423326510936022, 0.00592823838815093, 0.23125353455543518], [0.17363575100898743, 0.08529574424028397, 0.018747013062238693, 0.09323837608098984, 0.07366655766963959, 0.2784116566181183, 0.6226999759674072, 0.6422466039657593, 0.18433590233325958, 0.44911590218544006, 0.07703087478876114, 0.23628254234790802, 0.37835898995399475, 0.3362680971622467, 0.10061702132225037], [0.039354946464300156, 0.028671007603406906, 0.0009692042949609458, 0.010166235268115997, 0.003592043649405241, 0.024686597287654877, 0.0576656274497509, 0.10543617606163025, 0.069565050303936, 0.23999209702014923, 0.0370241142809391, 0.07099387794733047, 0.08031197637319565, 0.0629396140575409, 0.19831009209156036], [0.07821620255708694, 0.07413192838430405, 0.008470119908452034, 0.005837618373334408, 0.016890503466129303, 0.34118980169296265, 0.6424257159233093, 0.5736639499664307, 0.18751046061515808, 0.08286380022764206, 0.013973995111882687, 0.16452431678771973, 0.6265572905540466, 0.24633896350860596, 0.03771306574344635], [0.08601168543100357, 0.11519530415534973, 0.00501672737300396, 0.0384475477039814, 0.0009856059914454818, 0.020220156759023666, 0.4602939486503601, 0.41334664821624756, 0.011432202532887459, 0.039776530116796494, 0.004202698357403278, 0.012451107613742352, 0.012797003611922264, 0.0109980758279562, 0.22371669113636017], [0.05821564793586731, 0.2493630200624466, 0.017187682911753654, 0.007334073074162006, 0.002277297666296363, 0.012770043686032295, 0.014771709218621254, 0.06810285151004791, 0.008148171938955784, 0.093966543674469, 0.03078475221991539, 0.016961626708507538, 0.009818210266530514, 0.005369590129703283, 0.2805846929550171], [0.0315314382314682, 0.006441309116780758, 0.005187691655009985, 0.0023020647931843996, 0.001103160553611815, 0.0010285694152116776, 0.0036586276255548, 0.0034369472414255142, 0.02540425956249237, 0.018933216109871864, 0.011261656880378723, 0.014689027331769466, 0.0047272746451199055, 0.003173592034727335, 0.27608010172843933], [0.052501752972602844, 0.03902341425418854, 0.022159013897180557, 0.15980832278728485, 0.04565480723977089, 0.04961955174803734, 0.10487794876098633, 0.03556728735566139, 0.011893571354448795, 0.350600004196167, 0.8153157234191895, 0.696418821811676, 0.19642634689807892, 0.7945331335067749, 0.025074943900108337], [0.008775658905506134, 0.0231929961591959, 0.001974506536498666, 0.02221933752298355, 0.002016729209572077, 0.03464629501104355, 0.020560195669531822, 0.015741808339953423, 0.024821357801556587, 0.03194829449057579, 0.062133170664310455, 0.009445058181881905, 0.008440939709544182, 0.031038939952850342, 0.24359388649463654], [0.15448324382305145, 0.15535393357276917, 0.0009195139864459634, 0.02347545325756073, 0.010745828039944172, 0.05933469906449318, 0.0886014774441719, 0.09891750663518906, 0.008176282048225403, 0.17814745008945465, 0.04613054543733597, 0.10348650068044662, 0.06132601201534271, 0.10257216542959213, 0.2144334316253662], [0.1637454628944397, 0.3587695062160492, 0.013175190426409245, 0.027070751413702965, 0.009701711125671864, 0.027045298367738724, 0.06057014688849449, 0.08674251288175583, 0.018084047362208366, 0.012978773564100266, 0.04984384402632713, 0.0746963769197464, 0.21545591950416565, 0.18275731801986694, 0.18403297662734985], [0.04016833007335663, 0.03071952983736992, 0.0073937661945819855, 0.044594794511795044, 0.005693770945072174, 0.007929249666631222, 0.19023852050304413, 0.12198647856712341, 0.00967123731970787, 0.05747445672750473, 0.006795276887714863, 0.006636326666921377, 0.014849998988211155, 0.02297961339354515, 0.1823122203350067], [0.08359953761100769, 0.14515268802642822, 0.009139984846115112, 0.10055579245090485, 0.007817201316356659, 0.06191832944750786, 0.24591712653636932, 0.26670339703559875, 0.008127851411700249, 0.05132465437054634, 0.011226493865251541, 0.020721180364489555, 0.025672290474176407, 0.06137499585747719, 0.19538666307926178], [0.004038439132273197, 0.01158715970814228, 0.012492671608924866, 0.008604439906775951, 0.0044732466340065, 0.001471644383855164, 0.003622728632763028, 0.005392232909798622, 0.024040954187512398, 0.002572751836851239, 0.011896335519850254, 0.00655994052067399, 0.004419950768351555, 0.0023605322930961847, 0.2578853368759155]], [[0.020951254293322563, 0.19576001167297363, 0.05422525107860565, 0.000516751199029386, 0.0576050765812397, 0.039616964757442474, 0.0011584623716771603, 0.06260760873556137, 0.05524995177984238, 5.760174462920986e-05, 0.0005486492882482708, 0.01856253668665886, 0.008022493682801723, 0.0032547120936214924, 0.1980074942111969], [0.15878187119960785, 0.5755441188812256, 0.073322594165802, 0.006848999299108982, 0.04221894592046738, 0.057610929012298584, 0.01498481910675764, 0.15564584732055664, 0.02557745948433876, 0.010493909008800983, 0.04444737732410431, 0.10564734041690826, 0.04703369736671448, 0.007807346060872078, 0.10371111333370209], [0.0667557343840599, 0.5756934881210327, 0.02783285267651081, 0.001271323417313397, 0.13096383213996887, 0.007863562554121017, 0.0004880728665739298, 0.00786207988858223, 0.030193913727998734, 0.0004458925104700029, 0.0008183285826817155, 0.003005507169291377, 0.008833326399326324, 0.014566708356142044, 0.09050195664167404], [0.006902126595377922, 0.22582471370697021, 0.027240794152021408, 0.000252248632023111, 0.08146748691797256, 0.008376134559512138, 0.0017193618696182966, 0.010283069685101509, 0.09191752970218658, 1.873078872449696e-05, 0.0001427968527423218, 0.0006295929779298604, 0.016630304977297783, 0.005029548890888691, 0.17517179250717163], [0.46813952922821045, 0.7474208474159241, 0.04419572278857231, 0.039987821131944656, 0.07900705188512802, 0.010286353528499603, 0.008277984336018562, 0.21022778749465942, 0.018339863047003746, 0.003122991183772683, 0.0047759185545146465, 0.0031952662393450737, 0.0037801233120262623, 0.005526377819478512, 0.11187370121479034], [0.08057912439107895, 0.09254536032676697, 0.26037144660949707, 0.04459136351943016, 0.19053104519844055, 0.18187369406223297, 0.04494835063815117, 0.08866222947835922, 0.05515718460083008, 0.011219717562198639, 0.041749756783246994, 0.13417255878448486, 0.43527963757514954, 0.4240920841693878, 0.05903848633170128], [0.005677447654306889, 0.1104632169008255, 0.17886187136173248, 0.06816153228282928, 0.31320425868034363, 0.08580746501684189, 0.044242095202207565, 0.4031389355659485, 0.13310441374778748, 8.991359209176153e-05, 0.00051962147699669, 0.017516016960144043, 0.02517649158835411, 0.02827705629169941, 0.13873830437660217], [0.009441166184842587, 0.04568161070346832, 0.08503290265798569, 0.055850934237241745, 0.15800173580646515, 0.09921947866678238, 0.2719998359680176, 0.7131122350692749, 0.12690743803977966, 0.0015569856623187661, 0.019959524273872375, 0.06398878246545792, 0.1124982088804245, 0.07506788522005081, 0.06075114384293556], [0.1778930425643921, 0.41812169551849365, 0.05459700897336006, 0.015388981439173222, 0.296997606754303, 0.041353121399879456, 0.1696915328502655, 0.1226804181933403, 0.3453136682510376, 0.006036087870597839, 0.008416525088250637, 0.004891113843768835, 0.003974124789237976, 0.0023401544895023108, 0.04184575751423836], [0.0018550200620666146, 0.2628808617591858, 0.0018376001389697194, 9.925621998263523e-05, 0.008250601589679718, 0.11965687572956085, 0.011913565918803215, 0.3649533987045288, 0.12527383863925934, 0.0011617891723290086, 0.002173396060243249, 0.011088940314948559, 0.02579125389456749, 0.004398738034069538, 0.18079015612602234], [0.0033212341368198395, 0.4786561131477356, 0.00019389556837268174, 4.100392834516242e-05, 0.03255903348326683, 0.004482456482946873, 0.0018638258334249258, 0.04032744839787483, 0.151435986161232, 0.0011174781247973442, 0.0008650964009575546, 0.049343932420015335, 0.013284855522215366, 0.009702197276055813, 0.17111515998840332], [0.015286837704479694, 0.17760051786899567, 0.012107143178582191, 0.004069492220878601, 0.40114596486091614, 0.005856915842741728, 0.025313973426818848, 0.23595470190048218, 0.5599475502967834, 0.019674712792038918, 0.01789786107838154, 0.0449712835252285, 0.024323459714651108, 0.008310162462294102, 0.10516723990440369], [0.013816175982356071, 0.10832668840885162, 0.014126134105026722, 0.0044770012609660625, 0.18972823023796082, 0.04144473373889923, 0.013167506083846092, 0.0398833267390728, 0.08117146790027618, 0.03379456326365471, 0.04336484149098396, 0.6766878366470337, 0.6025072932243347, 0.24042664468288422, 0.05677386373281479], [0.010657100938260555, 0.1729527860879898, 0.006031150463968515, 0.006062258500605822, 0.10042858123779297, 0.007653414737433195, 0.0031583579257130623, 0.014785557985305786, 0.13275322318077087, 0.05689838156104088, 0.04302775487303734, 0.36964303255081177, 0.3870774507522583, 0.31299954652786255, 0.07590257376432419], [0.014769526198506355, 0.05199434980750084, 0.11582475155591965, 0.14804258942604065, 0.05702318996191025, 0.3275434374809265, 0.3759170472621918, 0.3329218327999115, 0.027774346992373466, 0.12548163533210754, 0.13219930231571198, 0.029332099482417107, 0.2028164267539978, 0.518939197063446, 4.3280975660309196e-05]], [[0.5917359590530396, 0.12410512566566467, 0.24872945249080658, 0.20040015876293182, 0.21720361709594727, 0.11561702191829681, 0.58521568775177, 0.41413450241088867, 0.22558750212192535, 0.117314413189888, 0.3378458619117737, 0.10710897296667099, 0.0625920221209526, 0.24034489691257477, 0.0060951621271669865], [0.03933318331837654, 0.17479471862316132, 0.1999012678861618, 0.1507989913225174, 0.2344110906124115, 0.41628938913345337, 0.19733835756778717, 0.42009472846984863, 0.32125937938690186, 0.09302358329296112, 0.29758843779563904, 0.2500022351741791, 0.15192696452140808, 0.19621950387954712, 0.06078135594725609], [0.03998054191470146, 0.02165106125175953, 0.5779209733009338, 0.4094802737236023, 0.3219829499721527, 0.23359909653663635, 0.15223096311092377, 0.0776560828089714, 0.11850404739379883, 0.1752316802740097, 0.7765606641769409, 0.15624035894870758, 0.19448350369930267, 0.3389243483543396, 0.015656093135476112], [0.2606712579727173, 0.23122362792491913, 0.33188652992248535, 0.327752023935318, 0.0930425301194191, 0.13157396018505096, 0.5079332590103149, 0.15524731576442719, 0.2039693295955658, 0.336448073387146, 0.7406277656555176, 0.11173539608716965, 0.03980698063969612, 0.2757716476917267, 0.009055807255208492], [0.03992704302072525, 0.03562299162149429, 0.05761631205677986, 0.04593607783317566, 0.747100830078125, 0.13848423957824707, 0.25807130336761475, 0.11098858714103699, 0.025020861998200417, 0.027831630781292915, 0.07712040096521378, 0.5344594120979309, 0.28488224744796753, 0.37143638730049133, 0.060307834297418594], [0.146702840924263, 0.5779150128364563, 0.04704871401190758, 0.12512727081775665, 0.05839477851986885, 0.5817644596099854, 0.2541782557964325, 0.167904794216156, 0.020014837384223938, 0.0557471327483654, 0.1778557300567627, 0.29983726143836975, 0.34978994727134705, 0.3759990334510803, 0.07532685250043869], [0.14372284710407257, 0.20398879051208496, 0.060162752866744995, 0.022449441254138947, 0.15882903337478638, 0.12907396256923676, 0.7781419157981873, 0.20689332485198975, 0.023098474368453026, 0.02567201852798462, 0.04225016012787819, 0.05647281929850578, 0.5644452571868896, 0.8062969446182251, 0.0037398021668195724], [0.09274263679981232, 0.19406189024448395, 0.18035270273685455, 0.18292436003684998, 0.2674761116504669, 0.1057504341006279, 0.5214765071868896, 0.1765710562467575, 0.15375129878520966, 0.08563723415136337, 0.35003283619880676, 0.12250327318906784, 0.4574505388736725, 0.6043637990951538, 0.046846963465213776], [0.3136129081249237, 0.10648278146982193, 0.02492944709956646, 0.07937752455472946, 0.16382691264152527, 0.40212482213974, 0.2148500233888626, 0.5046796798706055, 0.25625455379486084, 0.10382789373397827, 0.027611082419753075, 0.07138189673423767, 0.1265101283788681, 0.05298655480146408, 0.01642199046909809], [0.7252353429794312, 0.23862500488758087, 0.17466871440410614, 0.2584758698940277, 0.15821219980716705, 0.41019105911254883, 0.4795793294906616, 0.2558479905128479, 0.061036378145217896, 0.5831483006477356, 0.23237691819667816, 0.36767491698265076, 0.07294586300849915, 0.0734395682811737, 0.006080146878957748], [0.18402060866355896, 0.2199273407459259, 0.10670217871665955, 0.36498934030532837, 0.37264159321784973, 0.5975290536880493, 0.641157865524292, 0.4798426032066345, 0.07047704607248306, 0.30389490723609924, 0.6835307478904724, 0.29959914088249207, 0.32009243965148926, 0.2076108753681183, 0.015385132282972336], [0.18547095358371735, 0.1046445369720459, 0.17664410173892975, 0.031107882037758827, 0.4872691333293915, 0.6876094937324524, 0.29805243015289307, 0.2697339355945587, 0.03289056569337845, 0.04577193781733513, 0.2390383929014206, 0.650258481502533, 0.6253164410591125, 0.2719551920890808, 0.042574722319841385], [0.06026101112365723, 0.4596063494682312, 0.11362233757972717, 0.050736263394355774, 0.47900232672691345, 0.8146356344223022, 0.23428170382976532, 0.5258204936981201, 0.07407079637050629, 0.24087238311767578, 0.04631686583161354, 0.04097185283899307, 0.24002470076084137, 0.051092784851789474, 0.10185284167528152], [0.05915316566824913, 0.3385859429836273, 0.23845957219600677, 0.13520635664463043, 0.49372056126594543, 0.8321547508239746, 0.47351959347724915, 0.4942004382610321, 0.11661165207624435, 0.273796945810318, 0.09639480710029602, 0.07113680988550186, 0.3545372784137726, 0.3069557547569275, 0.026768943294882774], [0.6326229572296143, 0.28129494190216064, 0.2424720972776413, 0.23961131274700165, 0.1532977670431137, 0.03248026221990585, 0.07237446308135986, 0.03991716355085373, 0.058106135576963425, 0.6791825294494629, 0.4868316352367401, 0.4841252863407135, 0.1838759332895279, 0.16229771077632904, 0.03779346123337746]], [[0.04456469416618347, 0.016716457903385162, 0.08688971400260925, 0.23432573676109314, 0.12769784033298492, 0.0498066172003746, 0.10501405596733093, 0.14398211240768433, 0.3055479824542999, 0.0823235884308815, 0.23467087745666504, 0.6305257678031921, 0.08790664374828339, 0.14063040912151337, 0.13028757274150848], [0.04107241332530975, 0.03620494529604912, 0.07322828471660614, 0.1027759537100792, 0.08743055909872055, 0.016458408907055855, 0.09779228270053864, 0.014780157245695591, 0.09821301698684692, 0.025402111932635307, 0.0808086097240448, 0.08257035166025162, 0.07231960445642471, 0.0895148441195488, 0.19708459079265594], [0.1263897716999054, 0.01533158216625452, 0.08717449009418488, 0.22571881115436554, 0.06928549706935883, 0.16778334975242615, 0.06136450543999672, 0.07180161774158478, 0.2525678873062134, 0.32249853014945984, 0.08566119521856308, 0.48726531863212585, 0.2929263114929199, 0.21127133071422577, 0.12448348850011826], [0.1481804996728897, 0.04817945510149002, 0.03058626689016819, 0.13171793520450592, 0.10783855617046356, 0.24912205338478088, 0.1342363804578781, 0.28650397062301636, 0.25943103432655334, 0.2756144404411316, 0.08422903716564178, 0.7444766163825989, 0.7611673474311829, 0.5739472508430481, 0.11213001608848572], [0.1744699776172638, 0.050404343754053116, 0.018338145688176155, 0.11463086307048798, 0.02370826154947281, 0.09417468309402466, 0.04503462836146355, 0.0389062762260437, 0.1780962496995926, 0.7825090885162354, 0.15977078676223755, 0.2598268687725067, 0.05674973130226135, 0.2742767333984375, 0.15589554607868195], [0.26428407430648804, 0.0871720165014267, 0.015494171530008316, 0.31054598093032837, 0.31179672479629517, 0.05687993764877319, 0.05327969416975975, 0.14049863815307617, 0.03721972927451134, 0.33735793828964233, 0.06669215857982635, 0.44665512442588806, 0.1105320155620575, 0.07633788883686066, 0.13637836277484894], [0.27871736884117126, 0.07987862080335617, 0.06999076902866364, 0.3873903453350067, 0.3669894337654114, 0.0245819091796875, 0.02483827993273735, 0.08571609854698181, 0.04856930300593376, 0.2826782464981079, 0.10519464313983917, 0.8515737056732178, 0.24991582334041595, 0.08752243965864182, 0.1076057106256485], [0.18780259788036346, 0.02093103528022766, 0.1730981320142746, 0.27918383479118347, 0.32355740666389465, 0.05090703070163727, 0.030107326805591583, 0.015694553032517433, 0.08293543756008148, 0.11989035457372665, 0.1594303995370865, 0.6402391195297241, 0.08334839344024658, 0.13423335552215576, 0.16886292397975922], [0.23048973083496094, 0.05534357205033302, 0.15910016000270844, 0.5473513603210449, 0.11114095151424408, 0.060548413544893265, 0.23547381162643433, 0.0231330469250679, 0.22654443979263306, 0.16574865579605103, 0.03383632004261017, 0.05167527496814728, 0.026772163808345795, 0.028301218524575233, 0.08144620060920715], [0.126570925116539, 0.0055835917592048645, 0.7687394022941589, 0.6136845350265503, 0.7887718677520752, 0.24027548730373383, 0.25543272495269775, 0.017155619338154793, 0.01121050026267767, 0.02180907502770424, 0.06387564539909363, 0.04227403923869133, 0.004662328865379095, 0.0204116590321064, 0.16526305675506592], [0.3619309663772583, 0.022692076861858368, 0.8739812970161438, 0.5600091814994812, 0.4330839216709137, 0.27864721417427063, 0.1654776781797409, 0.02327956072986126, 0.003977042157202959, 0.0664801374077797, 0.12084753066301346, 0.16815124452114105, 0.07773539423942566, 0.17824198305606842, 0.05263833701610565], [0.29354482889175415, 0.16078433394432068, 0.705570638179779, 0.44417092204093933, 0.02176845259964466, 0.15997210144996643, 0.4057019054889679, 0.11617531627416611, 0.010741903446614742, 0.06882698833942413, 0.07046788930892944, 0.041601523756980896, 0.011864392086863518, 0.06714706867933273, 0.14988133311271667], [0.5400083065032959, 0.2319646179676056, 0.6198285818099976, 0.2858767509460449, 0.1694929450750351, 0.06001640111207962, 0.26940232515335083, 0.06411167979240417, 0.02847147174179554, 0.18856319785118103, 0.05879069119691849, 0.03795049339532852, 0.009596540592610836, 0.023393897339701653, 0.14663995802402496], [0.6488012075424194, 0.15997910499572754, 0.6486002802848816, 0.4859846830368042, 0.34752336144447327, 0.028076842427253723, 0.12281371653079987, 0.019826101139187813, 0.023531395941972733, 0.15743687748908997, 0.059922393411397934, 0.08707788586616516, 0.005486410576850176, 0.025385212153196335, 0.15706156194210052], [0.037294961512088776, 0.2018004208803177, 0.33537882566452026, 0.19571122527122498, 0.0998593419790268, 0.48263466358184814, 0.11429780721664429, 0.20324908196926117, 0.7053001523017883, 0.01905757561326027, 0.1765546351671219, 0.10779165476560593, 0.18456625938415527, 0.16855330765247345, 0.014784654602408409]]]], \"bot_text\": [\"The_\", \"animal_\", \"didn_\", \"'_\", \"t_\", \"cross_\", \"the_\", \"street_\", \"because_\", \"it_\", \"was_\", \"too_\", \"tire\", \"d_\"]}}" + ], + "text/plain": [ + "\u003cIPython.core.display.Javascript object\u003e" + ] + }, + "metadata": { + "tags": [] + }, + "output_type": "display_data" + }, + { + "data": { + "application/javascript": [ + "/**\n", + " * @fileoverview Transformer Visualization D3 javascript code.\n", + " */\n", + "\n", + "requirejs(['jquery', 'd3'],\n", + "function($, d3) {\n", + "\n", + "var attention = window.attention;\n", + "\n", + "const TEXT_SIZE = 15;\n", + "const BOXWIDTH = TEXT_SIZE * 8;\n", + "const BOXHEIGHT = TEXT_SIZE * 1.5;\n", + "const WIDTH = 2000;\n", + "const HEIGHT = attention.all.bot_text.length * BOXHEIGHT * 2 + 100;\n", + "const MATRIX_WIDTH = 150;\n", + "const head_colours = d3.scale.category10();\n", + "const CHECKBOX_SIZE = 20;\n", + "\n", + "function lighten(colour) {\n", + " var c = d3.hsl(colour);\n", + " var increment = (1 - c.l) * 0.6;\n", + " c.l += increment;\n", + " c.s -= increment;\n", + " return c;\n", + "}\n", + "\n", + "function transpose(mat) {\n", + " return mat[0].map(function(col, i) {\n", + " return mat.map(function(row) {\n", + " return row[i];\n", + " });\n", + " });\n", + "}\n", + "\n", + "function zip(a, b) {\n", + " return a.map(function (e, i) {\n", + " return [e, b[i]];\n", + " });\n", + "}\n", + "\n", + "\n", + "function renderVis(id, top_text, bot_text, attention_heads, config) {\n", + " $(id).empty();\n", + " var svg = d3.select(id)\n", + " .append('svg')\n", + " .attr(\"width\", WIDTH)\n", + " .attr(\"height\", HEIGHT);\n", + "\n", + " var att_data = [];\n", + " for (var i=0; i \u003c attention_heads.length; i++) {\n", + " var att_trans = transpose(attention_heads[i]);\n", + " att_data.push(zip(attention_heads[i], att_trans));\n", + " }\n", + "\n", + " renderText(svg, top_text, true, att_data, 0);\n", + " renderText(svg, bot_text, false, att_data, MATRIX_WIDTH + BOXWIDTH);\n", + "\n", + " renderAttentionHighlights(svg, att_data);\n", + "\n", + " svg.append(\"g\").classed(\"attention_heads\", true);\n", + "\n", + " renderAttention(svg, attention_heads);\n", + "\n", + " draw_checkboxes(config, 0, svg, attention_heads);\n", + "}\n", + "\n", + "\n", + "function renderText(svg, text, is_top, att_data, left_pos) {\n", + " var id = is_top ? \"top\" : \"bottom\";\n", + " var textContainer = svg.append(\"svg:g\")\n", + " .attr(\"id\", id);\n", + "\n", + " textContainer.append(\"g\").classed(\"attention_boxes\", true)\n", + " .selectAll(\"g\")\n", + " .data(att_data)\n", + " .enter()\n", + " .append(\"g\")\n", + " .selectAll(\"rect\")\n", + " .data(function(d) {return d;})\n", + " .enter()\n", + " .append(\"rect\")\n", + " .attr(\"x\", function(d, i, j) {\n", + " return left_pos + box_offset(j);\n", + " })\n", + " .attr(\"y\", function(d, i) {\n", + " return (+1) * BOXHEIGHT;\n", + " })\n", + " .attr(\"width\", BOXWIDTH/active_heads())\n", + " .attr(\"height\", function() { return BOXHEIGHT; })\n", + " .attr(\"fill\", function(d, i, j) {\n", + " return head_colours(j);\n", + " })\n", + " .style(\"opacity\", 0.0);\n", + "\n", + "\n", + " var tokenContainer = textContainer.append(\"g\").selectAll(\"g\")\n", + " .data(text)\n", + " .enter()\n", + " .append(\"g\");\n", + "\n", + " tokenContainer.append(\"rect\")\n", + " .classed(\"background\", true)\n", + " .style(\"opacity\", 0.0)\n", + " .attr(\"fill\", \"lightgray\")\n", + " .attr(\"x\", left_pos)\n", + " .attr(\"y\", function(d, i) {\n", + " return (i+1) * BOXHEIGHT;\n", + " })\n", + " .attr(\"width\", BOXWIDTH)\n", + " .attr(\"height\", BOXHEIGHT);\n", + "\n", + " var theText = tokenContainer.append(\"text\")\n", + " .text(function(d) { return d; })\n", + " .attr(\"font-size\", TEXT_SIZE + \"px\")\n", + " .style(\"cursor\", \"default\")\n", + " .style(\"-webkit-user-select\", \"none\")\n", + " .attr(\"x\", left_pos)\n", + " .attr(\"y\", function(d, i) {\n", + " return (i+1) * BOXHEIGHT;\n", + " });\n", + "\n", + " if (is_top) {\n", + " theText.style(\"text-anchor\", \"end\")\n", + " .attr(\"dx\", BOXWIDTH - TEXT_SIZE)\n", + " .attr(\"dy\", TEXT_SIZE);\n", + " } else {\n", + " theText.style(\"text-anchor\", \"start\")\n", + " .attr(\"dx\", + TEXT_SIZE)\n", + " .attr(\"dy\", TEXT_SIZE);\n", + " }\n", + "\n", + " tokenContainer.on(\"mouseover\", function(d, index) {\n", + " textContainer.selectAll(\".background\")\n", + " .style(\"opacity\", function(d, i) {\n", + " return i == index ? 1.0 : 0.0;\n", + " });\n", + "\n", + " svg.selectAll(\".attention_heads\").style(\"display\", \"none\");\n", + "\n", + " svg.selectAll(\".line_heads\") // To get the nesting to work.\n", + " .selectAll(\".att_lines\")\n", + " .attr(\"stroke-opacity\", function(d) {\n", + " return 1.0;\n", + " })\n", + " .attr(\"y1\", function(d, i) {\n", + " if (is_top) {\n", + " return (index+1) * BOXHEIGHT + (BOXHEIGHT/2);\n", + " } else {\n", + " return (i+1) * BOXHEIGHT + (BOXHEIGHT/2);\n", + " }\n", + " })\n", + " .attr(\"x1\", BOXWIDTH)\n", + " .attr(\"y2\", function(d, i) {\n", + " if (is_top) {\n", + " return (i+1) * BOXHEIGHT + (BOXHEIGHT/2);\n", + " } else {\n", + " return (index+1) * BOXHEIGHT + (BOXHEIGHT/2);\n", + " }\n", + " })\n", + " .attr(\"x2\", BOXWIDTH + MATRIX_WIDTH)\n", + " .attr(\"stroke-width\", 2)\n", + " .attr(\"stroke\", function(d, i, j) {\n", + " return head_colours(j);\n", + " })\n", + " .attr(\"stroke-opacity\", function(d, i, j) {\n", + " if (is_top) {d = d[0];} else {d = d[1];}\n", + " if (config.head_vis[j]) {\n", + " if (d) {\n", + " return d[index];\n", + " } else {\n", + " return 0.0;\n", + " }\n", + " } else {\n", + " return 0.0;\n", + " }\n", + " });\n", + "\n", + "\n", + " function updateAttentionBoxes() {\n", + " var id = is_top ? \"bottom\" : \"top\";\n", + " var the_left_pos = is_top ? MATRIX_WIDTH + BOXWIDTH : 0;\n", + " svg.select(\"#\" + id)\n", + " .selectAll(\".attention_boxes\")\n", + " .selectAll(\"g\")\n", + " .selectAll(\"rect\")\n", + " .attr(\"x\", function(d, i, j) { return the_left_pos + box_offset(j); })\n", + " .attr(\"y\", function(d, i) { return (i+1) * BOXHEIGHT; })\n", + " .attr(\"width\", BOXWIDTH/active_heads())\n", + " .attr(\"height\", function() { return BOXHEIGHT; })\n", + " .style(\"opacity\", function(d, i, j) {\n", + " if (is_top) {d = d[0];} else {d = d[1];}\n", + " if (config.head_vis[j])\n", + " if (d) {\n", + " return d[index];\n", + " } else {\n", + " return 0.0;\n", + " }\n", + " else\n", + " return 0.0;\n", + "\n", + " });\n", + " }\n", + "\n", + " updateAttentionBoxes();\n", + " });\n", + "\n", + " textContainer.on(\"mouseleave\", function() {\n", + " d3.select(this).selectAll(\".background\")\n", + " .style(\"opacity\", 0.0);\n", + "\n", + " svg.selectAll(\".att_lines\").attr(\"stroke-opacity\", 0.0);\n", + " svg.selectAll(\".attention_heads\").style(\"display\", \"inline\");\n", + " svg.selectAll(\".attention_boxes\")\n", + " .selectAll(\"g\")\n", + " .selectAll(\"rect\")\n", + " .style(\"opacity\", 0.0);\n", + " });\n", + "}\n", + "\n", + "function renderAttentionHighlights(svg, attention) {\n", + " var line_container = svg.append(\"g\");\n", + " line_container.selectAll(\"g\")\n", + " .data(attention)\n", + " .enter()\n", + " .append(\"g\")\n", + " .classed(\"line_heads\", true)\n", + " .selectAll(\"line\")\n", + " .data(function(d){return d;})\n", + " .enter()\n", + " .append(\"line\").classed(\"att_lines\", true);\n", + "}\n", + "\n", + "function renderAttention(svg, attention_heads) {\n", + " var line_container = svg.selectAll(\".attention_heads\");\n", + " line_container.html(null);\n", + " for(var h=0; h\u003cattention_heads.length; h++) {\n", + " for(var a=0; a\u003cattention_heads[h].length; a++) {\n", + " for(var s=0; s\u003cattention_heads[h][a].length; s++) {\n", + " line_container.append(\"line\")\n", + " .attr(\"y1\", (s+1) * BOXHEIGHT + (BOXHEIGHT/2))\n", + " .attr(\"x1\", BOXWIDTH)\n", + " .attr(\"y2\", (a+1) * BOXHEIGHT + (BOXHEIGHT/2))\n", + " .attr(\"x2\", BOXWIDTH + MATRIX_WIDTH)\n", + " .attr(\"stroke-width\", 2)\n", + " .attr(\"stroke\", head_colours(h))\n", + " .attr(\"stroke-opacity\", function() {\n", + " if (config.head_vis[h]) {\n", + " return attention_heads[h][a][s]/active_heads();\n", + " } else {\n", + " return 0.0;\n", + " }\n", + " }());\n", + " }\n", + " }\n", + " }\n", + "}\n", + "\n", + "// Checkboxes\n", + "function box_offset(i) {\n", + " var num_head_above = config.head_vis.reduce(\n", + " function(acc, val, cur) {return val \u0026\u0026 cur \u003c i ? acc + 1: acc;}, 0);\n", + " return num_head_above*(BOXWIDTH / active_heads());\n", + "}\n", + "\n", + "function active_heads() {\n", + " return config.head_vis.reduce(function(acc, val) {\n", + " return val ? acc + 1: acc;\n", + " }, 0);\n", + "}\n", + "\n", + "function draw_checkboxes(config, top, svg, attention_heads) {\n", + " var checkboxContainer = svg.append(\"g\");\n", + " var checkbox = checkboxContainer.selectAll(\"rect\")\n", + " .data(config.head_vis)\n", + " .enter()\n", + " .append(\"rect\")\n", + " .attr(\"fill\", function(d, i) {\n", + " return head_colours(i);\n", + " })\n", + " .attr(\"x\", function(d, i) {\n", + " return (i+1) * CHECKBOX_SIZE;\n", + " })\n", + " .attr(\"y\", top)\n", + " .attr(\"width\", CHECKBOX_SIZE)\n", + " .attr(\"height\", CHECKBOX_SIZE);\n", + "\n", + " function update_checkboxes() {\n", + " checkboxContainer.selectAll(\"rect\")\n", + " .data(config.head_vis)\n", + " .attr(\"fill\", function(d, i) {\n", + " var head_colour = head_colours(i);\n", + " var colour = d ? head_colour : lighten(head_colour);\n", + " return colour;\n", + " });\n", + " }\n", + "\n", + " update_checkboxes();\n", + "\n", + " checkbox.on(\"click\", function(d, i) {\n", + " if (config.head_vis[i] \u0026\u0026 active_heads() == 1) return;\n", + " config.head_vis[i] = !config.head_vis[i];\n", + " update_checkboxes();\n", + " renderAttention(svg, attention_heads);\n", + " });\n", + "\n", + " checkbox.on(\"dblclick\", function(d, i) {\n", + " // If we double click on the only active head then reset\n", + " if (config.head_vis[i] \u0026\u0026 active_heads() == 1) {\n", + " config.head_vis = new Array(config.num_heads).fill(true);\n", + " } else {\n", + " config.head_vis = new Array(config.num_heads).fill(false);\n", + " config.head_vis[i] = true;\n", + " }\n", + " update_checkboxes();\n", + " renderAttention(svg, attention_heads);\n", + " });\n", + "}\n", + "\n", + "var config = {\n", + " layer: 0,\n", + " att_type: 'all',\n", + "};\n", + "\n", + "function visualize() {\n", + " var num_heads = attention['all']['att'][0].length;\n", + " config.head_vis = new Array(num_heads).fill(true);\n", + " config.num_heads = num_heads;\n", + " config.attention = attention;\n", + "\n", + " render();\n", + "}\n", + "\n", + "function render() {\n", + " var conf = config.attention[config.att_type];\n", + "\n", + " var top_text = conf.top_text;\n", + " var bot_text = conf.bot_text;\n", + " var attention = conf.att[config.layer];\n", + "\n", + " $(\"#vis svg\").empty();\n", + " renderVis(\"#vis\", top_text, bot_text, attention, config);\n", + "}\n", + "\n", + "$(\"#layer\").empty();\n", + "for(var i=0; i\u003c6; i++) {\n", + " $(\"#layer\").append($(\"\u003coption /\u003e\").val(i).text(i));\n", + "}\n", + "\n", + "$(\"#layer\").on('change', function(e) {\n", + " config.layer = +e.currentTarget.value;\n", + " render();\n", + "});\n", + "\n", + "$(\"#att_type\").on('change', function(e) {\n", + " config.att_type = e.currentTarget.value;\n", + " render();\n", + "});\n", + "\n", + "$(\"button\").on('click', visualize);\n", + "\n", + "visualize();\n", + "\n", + "});\n" + ], + "text/plain": [ + "\u003cIPython.core.display.Javascript object\u003e" + ] + }, + "metadata": { + "tags": [] + }, + "output_type": "display_data" + } + ], + "source": [ + "# Convert inputs and outputs to subwords\n", + "inp_text = to_tokens(encoders[\"inputs\"].encode(inputs))\n", + "out_text = to_tokens(encoders[\"inputs\"].encode(outputs))\n", + "\n", + "# Run eval to collect attention weights\n", + "example = encode_eval(inputs, outputs)\n", + "with tfe.restore_variables_on_create(tf.train.latest_checkpoint(checkpoint_dir)):\n", + " translate_model.set_mode(Modes.EVAL)\n", + " translate_model(example)\n", + "# Get normalized attention weights for each layer\n", + "enc_atts, dec_atts, encdec_atts = get_att_mats()\n", + "\n", + "call_html()\n", + "attention.show(inp_text, out_text, enc_atts, dec_atts, encdec_atts)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "i7BZuO7T5BB4" + }, + "source": [ + "# Train a custom model on MNIST" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "-H25oG91YQj3" + }, + "outputs": [], + "source": [ + "# Create your own model\n", + "\n", + "class MySimpleModel(t2t_model.T2TModel):\n", + "\n", + " def body(self, features):\n", + " inputs = features[\"inputs\"]\n", + " filters = self.hparams.hidden_size\n", + " h1 = tf.layers.conv2d(inputs, filters,\n", + " kernel_size=(5, 5), strides=(2, 2))\n", + " h2 = tf.layers.conv2d(tf.nn.relu(h1), filters,\n", + " kernel_size=(5, 5), strides=(2, 2))\n", + " return tf.layers.conv2d(tf.nn.relu(h2), filters,\n", + " kernel_size=(3, 3))\n", + "\n", + "hparams = trainer_lib.create_hparams(\"basic_1\", data_dir=data_dir, problem_name=\"image_mnist\")\n", + "hparams.hidden_size = 64\n", + "model = MySimpleModel(hparams, Modes.TRAIN)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 625, + "status": "ok", + "timestamp": 1512369563515, + "user": { + "displayName": "Niki Parmar", + "photoUrl": "//lh3.googleusercontent.com/-ReuwZvCmGE8/AAAAAAAAAAI/AAAAAAAAAIc/fcvytJVpitE/s50-c-k-no/photo.jpg", + "userId": "115864460963462186442" + }, + "user_tz": 480 + }, + "id": "7GEmpYQ2ZMnB", + "outputId": "a574a1a3-ce56-4715-9ad3-8289c61ade3b" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Reading data files from /content/t2t/data/image_mnist-train*\n" + ] + } + ], + "source": [ + "# Prepare for the training loop\n", + "\n", + "# In Eager mode, opt.minimize must be passed a loss function wrapped with\n", + "# implicit_value_and_gradients\n", + "@tfe.implicit_value_and_gradients\n", + "def loss_fn(features):\n", + " _, losses = model(features)\n", + " return losses[\"training\"]\n", + "\n", + "# Setup the training data\n", + "BATCH_SIZE = 128\n", + "mnist_train_dataset = mnist_problem.dataset(Modes.TRAIN, data_dir)\n", + "mnist_train_dataset = mnist_train_dataset.repeat(None).batch(BATCH_SIZE)\n", + "\n", + "optimizer = tf.train.AdamOptimizer()" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 204 + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 103766, + "status": "ok", + "timestamp": 1512369756046, + "user": { + "displayName": "Niki Parmar", + "photoUrl": "//lh3.googleusercontent.com/-ReuwZvCmGE8/AAAAAAAAAAI/AAAAAAAAAIc/fcvytJVpitE/s50-c-k-no/photo.jpg", + "userId": "115864460963462186442" + }, + "user_tz": 480 + }, + "id": "AWVd2I7PYz6H", + "outputId": "504a7876-8bbb-4e5f-f303-f951c2e071b2" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Step: 0, Loss: 0.513\n", + "Step: 50, Loss: 0.342\n", + "Step: 100, Loss: 0.315\n", + "Step: 150, Loss: 0.372\n", + "Step: 200, Loss: 0.324\n", + "Step: 250, Loss: 0.271\n", + "Step: 300, Loss: 0.281\n", + "Step: 350, Loss: 0.285\n", + "Step: 400, Loss: 0.250\n", + "Step: 450, Loss: 0.247\n", + "Step: 500, Loss: 0.338\n" + ] + } + ], + "source": [ + "# Train\n", + "NUM_STEPS = 500\n", + "\n", + "for count, example in enumerate(tfe.Iterator(mnist_train_dataset)):\n", + " example[\"targets\"] = tf.reshape(example[\"targets\"], [BATCH_SIZE, 1, 1, 1]) # Make it 4D.\n", + " loss, gv = loss_fn(example)\n", + " optimizer.apply_gradients(gv)\n", + "\n", + " if count % 50 == 0:\n", + " print(\"Step: %d, Loss: %.3f\" % (count, loss.numpy()))\n", + " if count \u003e= NUM_STEPS:\n", + " break" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 68 + }, + "colab_type": "code", + "executionInfo": { + "elapsed": 3833, + "status": "ok", + "timestamp": 1512369759917, + "user": { + "displayName": "Niki Parmar", + "photoUrl": "//lh3.googleusercontent.com/-ReuwZvCmGE8/AAAAAAAAAAI/AAAAAAAAAIc/fcvytJVpitE/s50-c-k-no/photo.jpg", + "userId": "115864460963462186442" + }, + "user_tz": 480 + }, + "id": "CIFlkiVOd8jO", + "outputId": "ef33057a-1a22-4ab8-ab7b-3c90d9f6a850" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Reading data files from /content/t2t/data/image_mnist-dev*\n", + "accuracy_top5: 1.00\n", + "accuracy: 0.99\n" + ] + } + ], + "source": [ + "model.set_mode(Modes.EVAL)\n", + "mnist_eval_dataset = mnist_problem.dataset(Modes.EVAL, data_dir)\n", + "\n", + "# Create eval metric accumulators for accuracy (ACC) and accuracy in\n", + "# top 5 (ACC_TOP5)\n", + "metrics_accum, metrics_result = metrics.create_eager_metrics(\n", + " [metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5])\n", + "\n", + "for count, example in enumerate(tfe.Iterator(mnist_eval_dataset)):\n", + " if count \u003e= 200:\n", + " break\n", + "\n", + " # Make the inputs and targets 4D\n", + " example[\"inputs\"] = tf.reshape(example[\"inputs\"], [1, 28, 28, 1])\n", + " example[\"targets\"] = tf.reshape(example[\"targets\"], [1, 1, 1, 1])\n", + "\n", + " # Call the model\n", + " predictions, _ = model(example)\n", + "\n", + " # Compute and accumulate metrics\n", + " metrics_accum(predictions, example[\"targets\"])\n", + "\n", + "# Print out the averaged metric values on the eval data\n", + "for name, val in metrics_result().items():\n", + " print(\"%s: %.2f\" % (name, val))" + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "Tensor2Tensor Intro", + "provenance": [ + { + "file_id": "1-VScmaLkMqWiSbqgUCFWefzisSREd8l1", + "timestamp": 1512175750497 + } + ] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/tensor2tensor/notebooks/t2t_problem.ipynb b/tensor2tensor/notebooks/t2t_problem.ipynb new file mode 100644 index 000000000..98e07fcbb --- /dev/null +++ b/tensor2tensor/notebooks/t2t_problem.ipynb @@ -0,0 +1,519 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Wd48fv-zDMe6" + }, + "source": [ + "# Welcome to the [Tensor2Tensor](https://github.com/tensorflow/tensor2tensor) Dataset Colab!\n", + "\n", + "Tensor2Tensor, or T2T for short, is a library of deep learning models and datasets designed to make deep learning more accessible and [accelerate ML research](https://research.googleblog.com/2017/06/accelerating-deep-learning-research.html).\n", + "\n", + "**This colab shows you how to add your own dataset to T2T so that you can train one of the several preexisting models on your newly added dataset!**\n", + "\n", + "For a tutorial that covers all the broader aspects of T2T using existing datasets and models, please see this [IPython notebook](https://colab.research.google.com/github/tensorflow/tensor2tensor/blob/master/tensor2tensor/notebooks/hello_t2t.ipynb)." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "form", + "colab": {}, + "colab_type": "code", + "id": "FesA0dakI2kh" + }, + "outputs": [], + "source": [ + "#@title\n", + "# Copyright 2018 Google LLC.\n", + "\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "toc", + "id": "av8U13aqyEdf" + }, + "source": [ + "\u003e[Welcome to the Tensor2Tensor Dataset Colab!](#scrollTo=Wd48fv-zDMe6)\n", + "\n", + "\u003e\u003e[Installation \u0026 Setup](#scrollTo=Urn4QmNfI3hw)\n", + "\n", + "\u003e\u003e[Define the Problem](#scrollTo=LUoP57gOjlk9)\n", + "\n", + "\u003e\u003e\u003e[Run t2t_datagen](#scrollTo=Q1xBmlrFLSPX)\n", + "\n", + "\u003e\u003e[Viewing the generated data.](#scrollTo=MCqJhdnYgiG-)\n", + "\n", + "\u003e\u003e\u003e[tf.python_io.tf_record_iterator](#scrollTo=uNpohcPXKsLN)\n", + "\n", + "\u003e\u003e\u003e[Using tf.data.Dataset](#scrollTo=6o_1BHGQC5w5)\n", + "\n", + "\u003e\u003e[Terminology](#scrollTo=xRtfC0sHBlSo)\n", + "\n", + "\u003e\u003e\u003e[Problem](#scrollTo=xRtfC0sHBlSo)\n", + "\n", + "\u003e\u003e\u003e[Modalities](#scrollTo=xRtfC0sHBlSo)\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Urn4QmNfI3hw" + }, + "source": [ + "## Installation \u0026 Setup\n", + "\n", + "\n", + "We'll install T2T and TensorFlow.\n", + "\n", + "We also need to setup the directories where T2T will:\n", + "\n", + "* Generate the dataset and write the TFRecords file representing the training and the eval set, vocabulary files etc `DATA_DIR`\n", + "* Run the training, keep the graph and the checkpoint files `OUTPUT_DIR` and\n", + "* Use as a scratch directory to download your dataset from a URL, unzip it, etc. `TMP_DIR`" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "IBWBeE39JYaR" + }, + "outputs": [], + "source": [ + "#@title Run for installation.\n", + "\n", + "! pip install -q -U tensor2tensor\n", + "! pip install -q tensorflow" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "both", + "colab": {}, + "colab_type": "code", + "id": "sbTULiroLs2w" + }, + "outputs": [], + "source": [ + "#@title Run this only once - Sets up TF Eager execution.\n", + "\n", + "import sys\n", + "if 'google.colab' in sys.modules: # Colab-only TensorFlow version selector\n", + " %tensorflow_version 1.x\n", + "import tensorflow as tf\n", + "\n", + "# Enable Eager execution - useful for seeing the generated data.\n", + "tf.compat.v1.enable_eager_execution()" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "form", + "colab": {}, + "colab_type": "code", + "id": "A8JljOzDYF-Z" + }, + "outputs": [], + "source": [ + "#@title Setting a random seed.\n", + "\n", + "from tensor2tensor.utils import trainer_lib\n", + "\n", + "# Set a seed so that we have deterministic outputs.\n", + "RANDOM_SEED = 301\n", + "trainer_lib.set_random_seed(RANDOM_SEED)" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "form", + "colab": {}, + "colab_type": "code", + "id": "ioW-V1qpqSCE" + }, + "outputs": [], + "source": [ + "#@title Run for setting up directories.\n", + "\n", + "import os\n", + "\n", + "# Setup and create directories.\n", + "DATA_DIR = os.path.expanduser(\"/tmp/t2t/data\")\n", + "OUTPUT_DIR = os.path.expanduser(\"/tmp/t2t/output\")\n", + "TMP_DIR = os.path.expanduser(\"/tmp/t2t/tmp\")\n", + "\n", + "# Create them.\n", + "tf.io.gfile.makedirs(DATA_DIR)\n", + "tf.io.gfile.makedirs(OUTPUT_DIR)\n", + "tf.io.gfile.makedirs(TMP_DIR)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "LUoP57gOjlk9" + }, + "source": [ + "## Define the `Problem`\n", + "\n", + "To simplify our setting our input text sampled randomly from [a, z] - each sentence has between [3, 20] words with each word being [1, 8] characters in length.\n", + "\n", + "Example input: \"olrkpi z cldv xqcxisg cutzllf doteq\" -- this will be generated by `sample_sentence()`\n", + "\n", + "Our output will be the input words sorted according to length.\n", + "\n", + "Example output: \"z cldv doteq olrkpi xqcxisg cutzllf\" -- this will be processed by `target_sentence()`\n", + "\n", + "Let's dive right into our first problem -- we'll explain as we go on.\n", + "\n", + "Take some time to read each line along with its comments -- or skip them and come back later to clarify your understanding." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "form", + "colab": {}, + "colab_type": "code", + "id": "pDDiPxqg9UF-" + }, + "outputs": [], + "source": [ + "#@title Define `sample_sentence()` and `target_sentence(input_sentence)`\n", + "import random\n", + "import string\n", + "\n", + "def sample_sentence():\n", + " # Our sentence has between 3 and 20 words\n", + " num_words = random.randint(3, 20)\n", + " words = []\n", + " for i in range(num_words):\n", + " # Our words have between 1 and 8 characters.\n", + " num_chars = random.randint(1, 8)\n", + " chars = []\n", + " for j in range(num_chars):\n", + " chars.append(random.choice(string.ascii_lowercase))\n", + " words.append(\"\".join(chars))\n", + " return \" \".join(words)\n", + "\n", + "def target_sentence(input_sentence):\n", + " words = input_sentence.split(\" \")\n", + " return \" \".join(sorted(words, key=lambda x: len(x)))" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "KcT_x4ma-Uaq" + }, + "outputs": [], + "source": [ + "# `Problem` is the base class for any dataset that we want to add to T2T -- it\n", + "# unifies the specification of the problem for generating training data,\n", + "# training, evaluation and inference.\n", + "#\n", + "# All its methods (except `generate_data`) have reasonable default\n", + "# implementations.\n", + "#\n", + "# A sub-class must implement `generate_data(data_dir, tmp_dir)` -- this method\n", + "# is called by t2t-trainer or t2t-datagen to actually generate TFRecord dataset\n", + "# files on disk.\n", + "from tensor2tensor.data_generators import problem\n", + "\n", + "# Certain categories of problems are very common, like where either the input or\n", + "# output is text, for such problems we define an (abstract) sub-class of\n", + "# `Problem` called `Text2TextProblem` -- this implements `generate_data` in\n", + "# terms of another function `generate_samples`. Sub-classes must override\n", + "# `generate_samples` and `is_generate_per_split`.\n", + "from tensor2tensor.data_generators import text_problems\n", + "\n", + "# Every non-abstract problem sub-class (as well as models and hyperparameter\n", + "# sets) must be registered with T2T so that T2T knows about it and can look it\n", + "# up when you specify your problem on the commandline to t2t-trainer or\n", + "# t2t-datagen.\n", + "#\n", + "# One uses:\n", + "# `register_problem` for a new Problem sub-class.\n", + "# `register_model` for a new T2TModel sub-class.\n", + "# `register_hparams` for a new hyperparameter set. All hyperparameter sets\n", + "# typically extend `common_hparams.basic_params1` (directly or indirectly).\n", + "from tensor2tensor.utils import registry\n", + "\n", + "\n", + "# By default, when you register a problem (or model or hyperparameter set) the\n", + "# name with which it gets registered is the 'snake case' version -- so here\n", + "# the Problem class `SortWordsAccordingToLengthRandom` will be registered with\n", + "# the name `sort_words_according_to_length_random`.\n", + "#\n", + "# One can override this default by actually assigning a name as follows:\n", + "# `@registry.register_problem(\"my_awesome_problem\")`\n", + "#\n", + "# The registered name is specified to the t2t-trainer or t2t-datagen using the\n", + "# commandline flag `--problem`.\n", + "@registry.register_problem\n", + "\n", + "# We inherit from `Text2TextProblem` which takes care of a lot of details\n", + "# regarding reading and writing the data to disk, what vocabulary type one\n", + "# should use, its size etc -- so that we need not worry about them, one can,\n", + "# of course, override those.\n", + "class SortWordsAccordingToLengthRandom(text_problems.Text2TextProblem):\n", + " \"\"\"Sort words on length in randomly generated text.\"\"\"\n", + "\n", + " # START: Methods we should override.\n", + "\n", + " # The methods that need to be overriden from `Text2TextProblem` are:\n", + " # `is_generate_per_split` and\n", + " # `generate_samples`.\n", + "\n", + " @property\n", + " def is_generate_per_split(self):\n", + " # If we have pre-existing data splits for (train, eval, test) then we set\n", + " # this to True, which will have generate_samples be called for each of the\n", + " # dataset_splits.\n", + " #\n", + " # If we do not have pre-existing data splits, we set this to False, which\n", + " # will have generate_samples be called just once and the Problem will\n", + " # automatically partition the data into dataset_splits.\n", + " return False\n", + "\n", + " def generate_samples(self, data_dir, tmp_dir, dataset_split):\n", + " # Here we are generating the data in-situ using the `sample_sentence`\n", + " # function, otherwise we would have downloaded the data and put it in\n", + " # `tmp_dir` -- and read it from that location.\n", + " del tmp_dir\n", + "\n", + " # Unused here, is used in `Text2TextProblem.generate_data`.\n", + " del data_dir\n", + "\n", + " # This would have been useful if `self.is_generate_per_split()` was True.\n", + " # In that case we would have checked if we were generating a training,\n", + " # evaluation or test sample. This is of type `problem.DatasetSplit`.\n", + " del dataset_split\n", + "\n", + " # Just an arbitrary limit to our number of examples, this can be set higher.\n", + " MAX_EXAMPLES = 10\n", + "\n", + " for i in range(MAX_EXAMPLES):\n", + " sentence_input = sample_sentence()\n", + " sentence_target = target_sentence(sentence_input)\n", + " yield {\n", + " \"inputs\" : sentence_input,\n", + " \"targets\" : sentence_target,\n", + " }\n", + "\n", + " # END: Methods we should override.\n", + "\n", + " # START: Overridable methods.\n", + "\n", + " @property\n", + " def vocab_type(self):\n", + " # We can use different types of vocabularies, `VocabType.CHARACTER`,\n", + " # `VocabType.SUBWORD` and `VocabType.TOKEN`.\n", + " #\n", + " # SUBWORD and CHARACTER are fully invertible -- but SUBWORD provides a good\n", + " # tradeoff between CHARACTER and TOKEN.\n", + " return text_problems.VocabType.SUBWORD\n", + "\n", + " @property\n", + " def approx_vocab_size(self):\n", + " # Approximate vocab size to generate. Only for VocabType.SUBWORD.\n", + " return 2**13 # ~8k\n", + "\n", + " @property\n", + " def dataset_splits(self):\n", + " # Since we are responsible for generating the dataset splits, we override\n", + " # `Text2TextProblem.dataset_splits` to specify that we intend to keep\n", + " # 80% data for training and 10% for evaluation and testing each.\n", + " return [{\n", + " \"split\": problem.DatasetSplit.TRAIN,\n", + " \"shards\": 8,\n", + " }, {\n", + " \"split\": problem.DatasetSplit.EVAL,\n", + " \"shards\": 1,\n", + " }, {\n", + " \"split\": problem.DatasetSplit.TEST,\n", + " \"shards\": 1,\n", + " }]\n", + "\n", + " # END: Overridable methods." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "HwxQpOKhrolK" + }, + "source": [ + "That's it!\n", + "\n", + "To use this with `t2t-trainer` or `t2t-datagen`, save it to a directory, add an `__init__.py` that imports it, and then specify that directory with `--t2t_usr_dir`.\n", + "\n", + "i.e. as follows:\n", + "\n", + "```\n", + "$ t2t-datagen \\\n", + " --problem=sort_words_according_to_length_random \\\n", + " --data_dir=/tmp/t2t/data \\\n", + " --tmp_dir=/tmp/t2t/tmp \\\n", + " --t2t_usr_dir=/tmp/t2t/usr\n", + "\n", + "```\n", + "\n", + "However, we'll generate the data from the colab itself as well -- this is what `t2t-datagen` essentially does." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "Q1xBmlrFLSPX" + }, + "source": [ + "## Generate the data.\n", + "\n", + "We will now generate the data by calling `Problem.generate_data()` and inspect it." + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "atYWRpM1FgaJ" + }, + "outputs": [], + "source": [ + "sort_len_problem = SortWordsAccordingToLengthRandom()\n", + "\n", + "sort_len_problem.generate_data(DATA_DIR, TMP_DIR)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "MCqJhdnYgiG-" + }, + "source": [ + "## Viewing the generated data.\n", + "\n", + "`tf.data.Dataset` is the recommended API for inputting data into a TensorFlow graph and the `Problem.dataset()` method returns a `tf.data.Dataset` object.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "PZczDWnOQDp2" + }, + "outputs": [], + "source": [ + "Modes = tf.estimator.ModeKeys\n", + "\n", + "# We can iterate over our examples by making an iterator and calling next on it.\n", + "sort_len_problem_dataset = sort_len_problem.dataset(Modes.EVAL, DATA_DIR)\n", + "eager_iterator = sort_len_problem_dataset.make_one_shot_iterator()\n", + "example = next(eager_iterator)\n", + "\n", + "input_tensor = example[\"inputs\"]\n", + "target_tensor = example[\"targets\"]\n", + "\n", + "# The tensors are actually encoded using the generated vocabulary file -- you\n", + "# can inspect the actual vocab file in DATA_DIR.\n", + "print(\"Tensor Input: \" + str(input_tensor))\n", + "print(\"Tensor Target: \" + str(target_tensor))" + ] + }, + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "colab": {}, + "colab_type": "code", + "id": "1DtfzgqivAxl" + }, + "outputs": [], + "source": [ + "\n", + "# We use the encoders to decode the tensors to the actual input text.\n", + "input_encoder = sort_len_problem.get_feature_encoders(\n", + " data_dir=DATA_DIR)[\"inputs\"]\n", + "target_encoder = sort_len_problem.get_feature_encoders(\n", + " data_dir=DATA_DIR)[\"targets\"]\n", + "\n", + "input_decoded = input_encoder.decode(input_tensor.numpy())\n", + "target_decoded = target_encoder.decode(target_tensor.numpy())\n", + "\n", + "print(\"Decoded Input: \" + input_decoded)\n", + "print(\"Decoded Target: \" + target_decoded)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "xRtfC0sHBlSo" + }, + "source": [ + "## To be continued ...\n", + "\n", + "Stay tuned for additions to this notebook for adding problems with non-text modalities like Images, Audio and Video!" + ] + } + ], + "metadata": { + "colab": { + "collapsed_sections": [], + "name": "t2t_problem.ipynb", + "provenance": [ + { + "file_id": "1FwspR4PzEZAiQCGziob5oov-8DyEXSnw", + "timestamp": 1533664607636 + } + ] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/tensor2tensor/problems.py b/tensor2tensor/problems.py new file mode 100644 index 000000000..a16a1a717 --- /dev/null +++ b/tensor2tensor/problems.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Access T2T Problems.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.data_generators import all_problems +from tensor2tensor.utils import registry + + +def problem(name): + return registry.problem(name) + + +def available(): + return registry.list_base_problems() + + +all_problems.import_modules(all_problems.ALL_MODULES) diff --git a/tensor2tensor/problems_colab.py b/tensor2tensor/problems_colab.py new file mode 100644 index 000000000..20a7fa8b4 --- /dev/null +++ b/tensor2tensor/problems_colab.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Access T2T Problems.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.data_generators import all_problems +from tensor2tensor.utils import registry + + +def problem(name): + return registry.problem(name) + + +def available(): + return sorted(registry.list_problems()) + + +# Import problem modules +_modules = list(all_problems.MODULES) + +all_problems.import_modules(_modules) diff --git a/tensor2tensor/problems_test.py b/tensor2tensor/problems_test.py new file mode 100644 index 000000000..5753cb8fe --- /dev/null +++ b/tensor2tensor/problems_test.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""tensor2tensor.problems test.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor import problems + +import tensorflow.compat.v1 as tf + + +class ProblemsTest(tf.test.TestCase): + + def testImport(self): + self.assertIsNotNone(problems) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/rl/README.md b/tensor2tensor/rl/README.md new file mode 100644 index 000000000..8ca6fb3e2 --- /dev/null +++ b/tensor2tensor/rl/README.md @@ -0,0 +1,222 @@ +# Tensor2Tensor Model-Based Reinforcement Learning. + +The `rl` package allows to run reinforcement learning algorithms, +both model-free (e.g., [Proximal Policy Optimization](https://arxiv.org/abs/1707.06347), train with `trainer_model_free.py`) and model-based ones ([SimPLe](https://arxiv.org/abs/1903.00374), train with `trainer_model_based.py`). + +You should be able to reproduce the [Model-Based Reinforcement Learning for Atari](https://arxiv.org/abs/1903.00374) results. [These videos](https://sites.google.com/corp/view/modelbasedrlatari/home) show what to expect from the final models. + +To use this package, we recommend Tensorflow 1.13.1 and T2T version 1.13.1. +You also need to install the Atari dependencies for OpenAI Gym: + +``` +pip install gym[atari] +``` + +[This iPython notebook](https://colab.research.google.com/github/tensorflow/tensor2tensor/blob/master/tensor2tensor/notebooks/hello_t2t-rl.ipynb) provides a quick start if you want to check out the videos. + + +## Play using a pre-trained policy + +We provide a set of pretrained policies and models you can use. To evaluate and +generate videos for a pretrained policy on Pong: + +``` +OUTPUT_DIR=~/t2t_train/pong_pretrained +python -m tensor2tensor.rl.evaluator \ + --loop_hparams_set=rlmb_long_stochastic_discrete \ + --loop_hparams=game=pong \ + --policy_dir=gs://tensor2tensor-checkpoints/modelrl_experiments/train_sd/142/policy \ + --eval_metrics_dir=$OUTPUT_DIR \ + --debug_video_path=$OUTPUT_DIR \ + --num_debug_videos=4 +``` + +By default, it will run a grid of different evaluation settings (sampling +temperatures and whether to do initial rollouts). You can override those +settings: + +``` + --loop_hparams=game=pong,eval_max_num_noops=0,eval_sampling_temps=[0.0] +``` + +TensorBoard metrics are exported to the `eval_metrics_dir`. To view them, run: + +``` +tensorboard --logdir=~/t2t_train/pong_pretrained +``` + +Description of player controls and flags can be found in `tensor2tensor/rl/player.py`. + + +## Train your policy (model-free training) + +Training model-free on Pong: + +``` +python -m tensor2tensor.rl.trainer_model_free \ + --hparams_set=rlmf_base \ + --hparams=game=pong \ + --output_dir=~/t2t_train/mf_pong +``` + +Hyperparameter sets are defined in `tensor2tensor/models/research/rl.py`. You +can override them using the `hparams` flag, e.g. + +``` + --hparams=game=kung_fu_master,frame_stack_size=5 +``` + +As in model-based training, the periodic evaluation runs with timestep limit +of 1000. To do full evaluation after training, run: + +``` +OUTPUT_DIR=~/t2t_train/mf_pong +python -m tensor2tensor.rl.evaluator \ + --loop_hparams_set=rlmf_base \ + --hparams=game=pong \ + --policy_dir=$OUTPUT_DIR \ + --eval_metrics_dir=$OUTPUT_DIR/full_eval_metrics +``` + +## World Model training (with random trajectories) + +The simplest way to train your own world model is to use random trajectories. +Then you can train a policy on it as described next. + +To train a deterministic model: + +``` +python -m tensor2tensor.rl.trainer_model_based \ + --loop_hparams_set=rlmb_base \ + --loop_hparams=game=pong,epochs=1,ppo_epochs_num=0 \ + --output_dir=~/t2t_train/mb_det_pong_random +``` + +To train a stochastic discrete model (it will require more time and memory): + +``` +python -m tensor2tensor.rl.trainer_model_based \ + --loop_hparams_set=rlmb_base_stochastic_discrete \ + --loop_hparams=game=pong,epochs=1,ppo_epochs_num=0 \ + --output_dir=~/t2t_train/mb_sd_pong_random +``` + +## Playing in the world model + +To assess world model quality you can play in it, as in an Atari emulator +(you need a machine with GPU for this). First install `pygame`: + +``` +pip install pygame +``` + +Then you can run the player, specifying a path to world model checkpoints: + +``` +OUTPUT_DIR=~/t2t_train/mb_sd_pong_pretrained +mkdir -p $OUTPUT_DIR +gsutil -m cp -r \ + gs://tensor2tensor-checkpoints/modelrl_experiments/train_sd/142/world_model \ + $OUTPUT_DIR/ +python -m tensor2tensor.rl.player \ + --wm_dir=$OUTPUT_DIR/world_model \ + --loop_hparams_set=rlmb_base_stochastic_discrete \ + --loop_hparams=game=pong \ + --game_from_filenames=False \ + --zoom=3 \ + --fps=5 +``` + +The screen is split into 3 columns: frame from the world model, corresponding +frame from the real environment and the difference between the two. Use WSAD +and space to control the agent. The model will likely diverge quickly, press X +to reset it using the current state of the real environment. Note that frames +fed to the model were likely never seen by it during training, so the model's +performance will be worse than during the policy training. + +For more details on controls and flags see `tensor2tensor/rl/player.py`. + + +## Model-based training with pre-trained world models + +To train a policy with a pretrained world model (requires Google Cloud SDK): + +``` +OUTPUT_DIR=~/t2t_train/mb_sd_pong_pretrained +mkdir -p $OUTPUT_DIR +gsutil -m cp -r \ + gs://tensor2tensor-checkpoints/modelrl_experiments/train_sd/142/world_model \ + $OUTPUT_DIR/ +python -m tensor2tensor.rl.trainer_model_based \ + --loop_hparams_set=rlmb_base_stochastic_discrete \ + --loop_hparams=game=pong,epochs=1,model_train_steps=0 \ + --eval_world_model=False \ + --output_dir=$OUTPUT_DIR +``` + +Note that this command will collect some frames from the real environment for +random starts. + +The same command can be used to resume interrupted training - checkpoints are +saved in `output_dir`. + +We use `NoFrameskip-v4` game mode with our own frame skip (4 by default). + +The training script runs periodic evaluation, but with timestep limit 1000 to +make it faster. To do full evaluation after training, run: + +``` +python -m tensor2tensor.rl.evaluator \ + --loop_hparams_set=rlmb_base_stochastic_discrete \ + --hparams=game=pong \ + --policy_dir=$OUTPUT_DIR \ + --eval_metrics_dir=$OUTPUT_DIR/full_eval_metrics +``` + + +## Full model-based training + +Our full training pipeline involves alternating between collecting data using +policy, training the world model and training the policy inside the model. It +requires significantly more time (several days to a week, depending on your +hardware and the model you use). + +To train a deterministic model: + +``` +python -m tensor2tensor.rl.trainer_model_based \ + --loop_hparams_set=rlmb_base \ + --loop_hparams=game=pong \ + --output_dir ~/t2t_train/mb_det_pong +``` + +To train a stochastic discrete model: + +``` +python -m tensor2tensor.rl.trainer_model_based \ + --loop_hparams_set=rlmb_base_stochastic_discrete \ + --loop_hparams=game=pong \ + --output_dir ~/t2t_train/mb_sd_pong +``` + +Hyperparameter sets are defined in +`tensor2tensor/rl/trainer_model_based_params.py`. Hyperparameter sets for the +world model and agent are nested within `loop_hparams` by name. You can change +them with: + +``` + --loop_hparams=game=freeway,generative_model=next_frame_basic_deterministic,base_algo_params=ppo_original_params +``` + +Game names should be provided in `snake_case`. + + +## Using checkpoints for other games + +We provide pretrained policies and stochastic discrete models for most of the +Atari games in OpenAI Gym. They are available in Google Cloud Storage at +`gs://tensor2tensor-checkpoints/modelrl_experiments/train_sd/N`, where `N` is +a run number in range 1 - 180. Games with checkpoints are defined in +`tensor2tensor.data_generators.gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE` and +are numbered according to this order, with 5 runs per game. For example, runs +for Amidar have numbers 6 - 10. diff --git a/tensor2tensor/rl/__init__.py b/tensor2tensor/rl/__init__.py new file mode 100644 index 000000000..ff174dd63 --- /dev/null +++ b/tensor2tensor/rl/__init__.py @@ -0,0 +1,15 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tensor2tensor/rl/batch_dqn_agent_test.py b/tensor2tensor/rl/batch_dqn_agent_test.py new file mode 100644 index 000000000..fc826f0d0 --- /dev/null +++ b/tensor2tensor/rl/batch_dqn_agent_test.py @@ -0,0 +1,158 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for BatchDQNAgent.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import shutil + +from absl import flags +from dopamine.agents.dqn import dqn_agent +from dopamine.discrete_domains import atari_lib +import numpy as np + +from tensor2tensor.rl import dopamine_connector + +import tensorflow.compat.v1 as tf + +FLAGS = flags.FLAGS + + +class BatchDQNAgentTest(tf.test.TestCase): + # TODO(kozak): add testStepTrain (and possibly other tests) from dopamine + # dqn_agent_test.py + + def setUp(self): + super(BatchDQNAgentTest, self).setUp() + self._test_subdir = os.path.join('/tmp/dopamine_tests', 'ckpts') + shutil.rmtree(self._test_subdir, ignore_errors=True) + os.makedirs(self._test_subdir) + self.num_actions = 4 + self.min_replay_history = 6 + self.update_period = 2 + self.target_update_period = 4 + self.epsilon_decay_period = 90 + self.epsilon_train = 0.05 + self.observation_shape = dqn_agent.NATURE_DQN_OBSERVATION_SHAPE + self.stack_size = dqn_agent.NATURE_DQN_STACK_SIZE + self.env_batch_size = 4 + + self.zero_state = np.zeros( + [self.env_batch_size, self.observation_shape[0], + self.observation_shape[1], self.stack_size]) + + def _create_test_agent(self, sess): + stack_size = self.stack_size + + class MockDQNNetwork(tf.keras.Model): + """The Keras network used in tests.""" + + def __init__(self, num_actions, **kwargs): + # This weights_initializer gives action 0 a higher weight, ensuring + # that it gets picked by the argmax. + super(MockDQNNetwork, self).__init__(**kwargs) + weights_initializer = np.tile( + np.arange(num_actions, 0, -1), (stack_size, 1)) + self.layer = tf.keras.layers.Dense( + num_actions, + kernel_initializer=tf.constant_initializer(weights_initializer), + bias_initializer=tf.ones_initializer()) + + def call(self, state): + inputs = tf.constant( + np.zeros((state.shape[0], stack_size)), dtype=tf.float32) + return atari_lib.DQNNetworkType(self.layer((inputs))) + + agent = dopamine_connector.BatchDQNAgent( + network=MockDQNNetwork, + replay_capacity=100, + buffer_batch_size=8, + generates_trainable_dones=True, + sess=sess, + env_batch_size=self.env_batch_size, + num_actions=self.num_actions, + min_replay_history=self.min_replay_history, + epsilon_fn=lambda w, x, y, z: 0.0, # No exploration. + update_period=self.update_period, + target_update_period=self.target_update_period, + epsilon_eval=0.0) # No exploration during evaluation. + # This ensures non-random action choices (since epsilon_eval = 0.0) and + # skips the train_step. + agent.eval_mode = True + sess.run(tf.global_variables_initializer()) + return agent + + def testCreateAgentWithDefaults(self): + # Verifies that we can create and train an agent with the default values. + with tf.Session() as sess: + agent = self._create_test_agent(sess) + sess.run(tf.global_variables_initializer()) + observation = np.ones([84, 84, 1]) + agent.begin_episode([observation]) + agent.step(reward=[1], observation=[observation]) + agent.end_episode(reward=[1]) + + def testBeginEpisode(self): + """Test the functionality of agent.begin_episode. + + Specifically, the action returned and its effect on state. + """ + with tf.Session() as sess: + agent = self._create_test_agent(sess) + # We fill up the state with 9s. On calling agent.begin_episode the state + # should be reset to all 0s. + agent.state_batch.fill(9) + first_observation = np.ones( + [self.env_batch_size, self.observation_shape[0], + self.observation_shape[1], 1]) + self.assertTrue((agent.begin_episode(first_observation) == 0).all()) + # When the all-1s observation is received, it will be placed at the end of + # the state. + expected_state = self.zero_state + expected_state[:, :, :, -1] = np.ones( + [self.env_batch_size, self.observation_shape[0], + self.observation_shape[1]]) + self.assertAllEqual(agent.state_batch, expected_state) + self.assertAllEqual(agent._observation_batch, first_observation[..., 0]) + # No training happens in eval mode. + self.assertEqual(agent.training_steps, 0) + + # This will now cause training to happen. + agent.eval_mode = False + # Having a low replay memory add_count will prevent any of the + # train/prefetch/sync ops from being called. + agent._replay.memory.add_count = 0 + second_observation = np.ones( + [self.env_batch_size, self.observation_shape[0], + self.observation_shape[1], 1]) * 2 + agent.begin_episode(second_observation) + # The agent's state will be reset, so we will only be left with the all-2s + # observation. + expected_state[:, :, :, -1] = np.full( + (self.env_batch_size, self.observation_shape[0], + self.observation_shape[1]), 2 + ) + self.assertAllEqual(agent.state_batch, expected_state) + self.assertAllEqual(agent._observation_batch, + second_observation[:, :, :, 0]) + # training_steps is incremented since we set eval_mode to False. + self.assertEqual(agent.training_steps, 1) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/rl/batch_runner_test.py b/tensor2tensor/rl/batch_runner_test.py new file mode 100644 index 000000000..648e7ff6e --- /dev/null +++ b/tensor2tensor/rl/batch_runner_test.py @@ -0,0 +1,284 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for BatchRunner.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import shutil + +from absl import flags +from dopamine.discrete_domains import logger +import mock +import numpy as np + +from tensor2tensor.rl import dopamine_connector + +import tensorflow.compat.v1 as tf + + +FLAGS = flags.FLAGS + + +def _create_mock_checkpointer(): + mock_checkpointer = mock.Mock() + test_dictionary = {"current_iteration": 1729, + "logs": "logs"} + mock_checkpointer.load_checkpoint.return_value = test_dictionary + return mock_checkpointer + + +class MockEnvironment(object): + """Mock environment for testing.""" + + def __init__(self, max_steps=10, reward_multiplier=1): + self._observation = 0 + self.max_steps = max_steps + self.reward_multiplier = reward_multiplier + self.game_over = False + + def reset(self): + self._observation = 0 + return self._observation + + def step(self, action): + self._observation += 1 + action_reward_multiplier = -1 if action > 0 else 1 + reward_multiplier = self.reward_multiplier * action_reward_multiplier + reward = self._observation * reward_multiplier + is_terminal = self._observation >= self.max_steps + self.game_over = is_terminal + + unused = 0 + return (self._observation, reward, is_terminal, unused) + + def render(self, mode): + pass + + +class BatchEnv(object): + """Batch env. + + Batch of environments. Assumes that all throws "done" on the same step. + + Observations and rewards are returned as arrays, done as single value. + """ + + # TODO(kozak): this can be used for mbrl pipeline (for both simulated and + # real env), move it to dopamine_connector.py (rename it?) + def __init__(self, envs): + self.env_batch = envs + self.batch_size = len(self.env_batch) + self.max_steps = self.env_batch[0].max_steps + assert np.all(self.max_steps == env.max_steps for env in self.env_batch) + + def step(self, actions): + ret = [env.step(action) for env, action in zip(self.env_batch, actions)] + obs, rewards, dones, infos = [np.array(r) for r in zip(*ret)] + done = dones[0] + assert np.all(done == dones) + self.game_over = done + return obs, rewards, done, infos + + def reset(self): + return np.array([env.reset() for env in self.env_batch]) + + def render(self, mode): + pass + + +class MockLogger(object): + """Class to mock the experiment logger.""" + + def __init__(self, test_cls=None, run_asserts=True, data=None): + self._test_cls = test_cls + self._run_asserts = run_asserts + self._iter = 0 + self._calls_to_set = 0 + self._calls_to_log = 0 + self.data = data + + def __setitem__(self, key, val): + if self._run_asserts: + self._test_cls.assertEqual("iteration_{:d}".format(self._iter), key) + self._test_cls.assertEqual("statistics", val) + self._iter += 1 + self._calls_to_set += 1 + + def log_to_file(self, filename_prefix, iteration_number): + if self._run_asserts: + self._test_cls.assertEqual( + "prefix_{}".format(self._iter - 1), + "{}_{}".format(filename_prefix, iteration_number)) + self._calls_to_log += 1 + + +class BatchedRunnerTest(tf.test.TestCase): + """Modified tests from dopamine run_experiment_test.py.""" + + # TODO(kozak): decide if we want to use and modify more tests from + # dopamine/tests/atari/run_experiment_test.py (e.g. testRunExperiment.py) + + def _agent_step(self, rewards, observations): + # We verify that rewards are clipped (and set by MockEnvironment as a + # function of observation) + # observation = observations[0] + # expected_rewards = [1 if observation % 2 else -1] + # self.assertEqual(expected_reward, reward) + actions = [ob % 2 for ob in observations] + return actions + + def prepare_mock_agent(self, batch_size): + assert batch_size % 2 == 0, "Some of tests assume that batch_size % 2 == 0" + self.batch_size = batch_size + self._agent = mock.Mock() + self._agent.begin_episode.side_effect = \ + lambda x: np.repeat(0, self.batch_size) + self._agent.step.side_effect = self._agent_step + self._create_agent_fn = lambda x, y, summary_writer: self._agent + + def setUp(self): + super(BatchedRunnerTest, self).setUp() + self._test_subdir = "/tmp/dopamine_tests" + shutil.rmtree(self._test_subdir, ignore_errors=True) + os.makedirs(self._test_subdir) + self.prepare_mock_agent(batch_size=4) + + def testRunEpisodeBatch(self): + max_steps_per_episode = 11 + batch_size = self.batch_size + reward_multipliers = [-1, 1] * int(batch_size / 2) + envs = [MockEnvironment(reward_multiplier=rm) for rm in reward_multipliers] + environment = BatchEnv(envs) + runner = dopamine_connector.BatchRunner( + self._test_subdir, self._create_agent_fn, + create_environment_fn=lambda: environment, + max_steps_per_episode=max_steps_per_episode) + step_number, total_rewards = runner._run_one_episode() + + self.assertEqual(self._agent.step.call_count, environment.max_steps - 1) + self.assertEqual(self._agent.end_episode.call_count, 1) + self.assertEqual(environment.max_steps, step_number / batch_size) + # Expected reward will be \sum_{i=0}^{9} (-1)**i * i = -5 when reward + # multiplier=1 + self.assertAllEqual(np.array(reward_multipliers) * -5, total_rewards) + + def testRunOneEpisodeWithLowMaxSteps(self): + max_steps_per_episode = 2 + batch_size = self.batch_size + reward_multipliers = [-1, 1] * int(batch_size / 2) + envs = [MockEnvironment(reward_multiplier=rm) for rm in reward_multipliers] + environment = BatchEnv(envs) + runner = dopamine_connector.BatchRunner( + self._test_subdir, self._create_agent_fn, + create_environment_fn=lambda: environment, + max_steps_per_episode=max_steps_per_episode) + step_number, total_rewards = runner._run_one_episode() + + self.assertEqual(self._agent.step.call_count, max_steps_per_episode - 1) + self.assertEqual(self._agent.end_episode.call_count, 1) + self.assertEqual(max_steps_per_episode, step_number / batch_size) + self.assertAllEqual(np.array(reward_multipliers) * -1, total_rewards) + + def testRunOnePhase(self): + batch_size = self.batch_size + environment_steps = 2 + max_steps = environment_steps * batch_size * 10 + + envs = [MockEnvironment(max_steps=environment_steps) + for _ in range(batch_size)] + + environment = BatchEnv(envs) + runner = dopamine_connector.BatchRunner( + self._test_subdir, self._create_agent_fn, + create_environment_fn=lambda: environment) + + statistics = [] + + step_number, sum_returns, num_episodes = runner._run_one_phase( + max_steps, statistics, "test") + calls_to_run_episode = int(max_steps / (environment_steps * batch_size)) + self.assertEqual(self._agent.step.call_count, calls_to_run_episode) + self.assertEqual(self._agent.end_episode.call_count, calls_to_run_episode) + self.assertEqual(max_steps, step_number) + self.assertEqual(-1 * calls_to_run_episode * batch_size, sum_returns) + self.assertEqual(calls_to_run_episode, num_episodes / batch_size) + expected_statistics = [] + for _ in range(calls_to_run_episode * batch_size): + expected_statistics.append({ + "test_episode_lengths": 2, + "test_episode_returns": -1 + }) + self.assertEqual(len(expected_statistics), len(statistics)) + for expected_stats, stats in zip(expected_statistics, statistics): + self.assertDictEqual(expected_stats, stats) + + def testRunOneIteration(self): + environment_steps = 2 + batch_size = self.batch_size + envs = [MockEnvironment(max_steps=environment_steps) + for _ in range(batch_size)] + + environment = BatchEnv(envs) + + training_steps = 20 * batch_size + evaluation_steps = 10 * batch_size + + runner = dopamine_connector.BatchRunner( + self._test_subdir, self._create_agent_fn, + create_environment_fn=lambda: environment, + training_steps=training_steps, evaluation_steps=evaluation_steps + ) + + dictionary = runner._run_one_iteration(1) + train_rollouts = int(training_steps / environment_steps) + eval_rollouts = int(evaluation_steps / environment_steps) + expected_dictionary = { + "train_episode_lengths": [2 for _ in range(train_rollouts)], + "train_episode_returns": [-1 for _ in range(train_rollouts)], + "train_average_return": [-1], + "eval_episode_lengths": [2 for _ in range(eval_rollouts)], + "eval_episode_returns": [-1 for _ in range(eval_rollouts)], + "eval_average_return": [-1] + } + self.assertDictEqual(expected_dictionary, dictionary) + + @mock.patch.object(logger, "Logger") + def testLogExperiment(self, mock_logger_constructor): + # TODO(kozak): We probably do not need this test, dopamine test + # for Runner is enough here. Remove this? + log_every_n = 2 + logging_file_prefix = "prefix" + statistics = "statistics" + experiment_logger = MockLogger(test_cls=self) + mock_logger_constructor.return_value = experiment_logger + runner = dopamine_connector.BatchRunner( + self._test_subdir, self._create_agent_fn, + create_environment_fn=mock.Mock, + logging_file_prefix=logging_file_prefix, + log_every_n=log_every_n) + num_iterations = 10 + for i in range(num_iterations): + runner._log_experiment(i, statistics) + self.assertEqual(num_iterations, experiment_logger._calls_to_set) + self.assertEqual((num_iterations / log_every_n), + experiment_logger._calls_to_log) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/rl/datagen_with_agent.py b/tensor2tensor/rl/datagen_with_agent.py new file mode 100644 index 000000000..66a68e780 --- /dev/null +++ b/tensor2tensor/rl/datagen_with_agent.py @@ -0,0 +1,69 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Generate trajectories to disk with random or ckpt agent. + +TODO: Usage +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.data_generators import gym_env +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string("data_dir", "", "Data directory.") +flags.DEFINE_string("tmp_dir", "/tmp/t2t_datagen", + "Temporary storage directory.") +flags.DEFINE_string("game", None, "Atari game to generate data for.") +flags.DEFINE_integer("num_env_steps", 5000, "Number of steps to roll out.") +flags.DEFINE_boolean("eval", False, "Whether to run in eval mode.") + + +def main(_): + + tf.gfile.MakeDirs(FLAGS.data_dir) + tf.gfile.MakeDirs(FLAGS.tmp_dir) + + # Create problem if not already defined + problem_name = "gym_discrete_problem_with_agent_on_%s" % FLAGS.game + if problem_name not in registry.Registries.problems: + gym_env.register_game(FLAGS.game) + + # Generate + tf.logging.info("Running %s environment for %d steps for trajectories.", + FLAGS.game, FLAGS.num_env_steps) + problem = registry.problem(problem_name) + problem.settable_num_steps = FLAGS.num_env_steps + problem.settable_eval_phase = FLAGS.eval + problem.generate_data(FLAGS.data_dir, FLAGS.tmp_dir) + + # Log stats + if problem.statistics.number_of_dones: + mean_reward = (problem.statistics.sum_of_rewards / + problem.statistics.number_of_dones) + tf.logging.info("Mean reward: %.2f, Num dones: %d", + mean_reward, + problem.statistics.number_of_dones) + + +if __name__ == "__main__": + tf.app.run(main) diff --git a/tensor2tensor/rl/dopamine_connector.py b/tensor2tensor/rl/dopamine_connector.py new file mode 100644 index 000000000..e51a0c6c9 --- /dev/null +++ b/tensor2tensor/rl/dopamine_connector.py @@ -0,0 +1,838 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Connects dopamine to as the another rl traning framework.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import random +import sys + +from dopamine.agents.dqn import dqn_agent +from dopamine.agents.rainbow import rainbow_agent +from dopamine.replay_memory import circular_replay_buffer +from dopamine.replay_memory.circular_replay_buffer import OutOfGraphReplayBuffer +from dopamine.replay_memory.circular_replay_buffer import ReplayElement +from dopamine.replay_memory.prioritized_replay_buffer import OutOfGraphPrioritizedReplayBuffer +from dopamine.replay_memory.prioritized_replay_buffer import WrappedPrioritizedReplayBuffer +import numpy as np + +from tensor2tensor.rl.policy_learner import PolicyLearner +import tensorflow.compat.v1 as tf + +# pylint: disable=g-import-not-at-top +# pylint: disable=ungrouped-imports +try: + import cv2 +except ImportError: + cv2 = None + +try: + from dopamine.discrete_domains import run_experiment +except ImportError: + run_experiment = None + +# pylint: enable=g-import-not-at-top +# pylint: enable=ungrouped-imports + +# TODO(rlmb): Vanilla DQN and Rainbow have a lot of common code. We will want +# to remove Vanilla DQN and only have Rainbow. To do so one needs to remove +# following: +# * _DQNAgent +# * BatchDQNAgent +# * _OutOfGraphReplayBuffer +# * "if" clause in create_agent() +# * parameter "agent_type" from dqn_atari_base() hparams and possibly other +# rlmb dqn hparams sets +# If we want to keep both Vanilla DQN and Rainbow, larger refactor is required. + + +class _DQNAgent(dqn_agent.DQNAgent): + """Modify dopamine DQNAgent to match our needs. + + Allow passing batch_size and replay_capacity to ReplayBuffer, allow not using + (some of) terminal episode transitions in training. + """ + + def __init__(self, replay_capacity, buffer_batch_size, + generates_trainable_dones, **kwargs): + self._replay_capacity = replay_capacity + self._buffer_batch_size = buffer_batch_size + self._generates_trainable_dones = generates_trainable_dones + super(_DQNAgent, self).__init__(**kwargs) + + def _build_replay_buffer(self, use_staging): + """Build WrappedReplayBuffer with custom OutOfGraphReplayBuffer.""" + replay_buffer_kwargs = dict( + observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE, + stack_size=dqn_agent.NATURE_DQN_STACK_SIZE, + replay_capacity=self._replay_capacity, + batch_size=self._buffer_batch_size, + update_horizon=self.update_horizon, + gamma=self.gamma, + extra_storage_types=None, + observation_dtype=np.uint8, + ) + replay_memory = _OutOfGraphReplayBuffer( + artificial_done=not self._generates_trainable_dones, + **replay_buffer_kwargs) + + return circular_replay_buffer.WrappedReplayBuffer( + wrapped_memory=replay_memory, + use_staging=use_staging, + **replay_buffer_kwargs) + + +class BatchDQNAgent(_DQNAgent): + """Batch agent for DQN. + + Episodes are stored on done. + + Assumes that all rollouts in batch would end at the same moment. + """ + + def __init__(self, env_batch_size, *args, **kwargs): + super(BatchDQNAgent, self).__init__(*args, **kwargs) + self.env_batch_size = env_batch_size + obs_size = dqn_agent.NATURE_DQN_OBSERVATION_SHAPE + state_shape = [self.env_batch_size, obs_size[0], obs_size[1], + dqn_agent.NATURE_DQN_STACK_SIZE] + self.state_batch = np.zeros(state_shape) + self.state = None # assure it will be not used + self._observation = None # assure it will be not used + self.reset_current_rollouts() + + def reset_current_rollouts(self): + self._current_rollouts = [[] for _ in range(self.env_batch_size)] + + def _record_observation(self, observation_batch): + # Set current observation. Represents an (batch_size x 84 x 84 x 1) image + # frame. + observation_batch = np.array(observation_batch) + self._observation_batch = observation_batch[:, :, :, 0] + # Swap out the oldest frames with the current frames. + self.state_batch = np.roll(self.state_batch, -1, axis=3) + self.state_batch[:, :, :, -1] = self._observation_batch + + def _reset_state(self): + self.state_batch.fill(0) + + def begin_episode(self, observation): + self._reset_state() + self._record_observation(observation) + + if not self.eval_mode: + self._train_step() + + self.action = self._select_action() + return self.action + + def _update_current_rollouts(self, last_observation, action, reward, + are_terminal): + transitions = zip(last_observation, action, reward, are_terminal) + for transition, rollout in zip(transitions, self._current_rollouts): + rollout.append(transition) + + def _store_current_rollouts(self): + for rollout in self._current_rollouts: + for transition in rollout: + self._store_transition(*transition) + self.reset_current_rollouts() + + def step(self, reward, observation): + self._last_observation = self._observation_batch + self._record_observation(observation) + + if not self.eval_mode: + self._update_current_rollouts(self._last_observation, self.action, reward, + [False] * self.env_batch_size) + # We want to have the same train_step:env_step ratio not depending on + # batch size. + for _ in range(self.env_batch_size): + self._train_step() + + self.action = self._select_action() + return self.action + + def end_episode(self, reward): + if not self.eval_mode: + self._update_current_rollouts( + self._observation_batch, self.action, reward, + [True] * self.env_batch_size) + self._store_current_rollouts() + + def _select_action(self): + epsilon = self.epsilon_eval + if not self.eval_mode: + epsilon = self.epsilon_fn( + self.epsilon_decay_period, + self.training_steps, + self.min_replay_history, + self.epsilon_train) + + def choose_action(ix): + if random.random() <= epsilon: + # Choose a random action with probability epsilon. + return random.randint(0, self.num_actions - 1) + else: + # Choose the action with highest Q-value at the current state. + return self._sess.run(self._q_argmax, + {self.state_ph: self.state_batch[ix:ix+1]}) + + return np.array([choose_action(ix) for ix in range(self.env_batch_size)]) + + +class _OutOfGraphReplayBuffer(OutOfGraphReplayBuffer): + """Replay not sampling artificial_terminal transition. + + Adds to stored tuples "artificial_done" field (as last ReplayElement). + When sampling, ignores tuples for which artificial_done is True. + + When adding new attributes check if there are loaded from disk, when using + load() method. + + Attributes: + are_terminal_valid: A boolean indicating if newly added terminal + transitions should be marked as artificially done. Replay data loaded + from disk will not be overridden. + """ + + def __init__(self, artificial_done, **kwargs): + extra_storage_types = kwargs.pop("extra_storage_types", None) or [] + extra_storage_types.append(ReplayElement("artificial_done", (), np.uint8)) + super(_OutOfGraphReplayBuffer, self).__init__( + extra_storage_types=extra_storage_types, **kwargs) + self._artificial_done = artificial_done + + def is_valid_transition(self, index): + valid = super(_OutOfGraphReplayBuffer, self).is_valid_transition(index) + valid &= not self.get_artificial_done_stack(index).any() + return valid + + def get_artificial_done_stack(self, index): + return self.get_range(self._store["artificial_done"], + index - self._stack_size + 1, index + 1) + + def add(self, observation, action, reward, terminal, *args): + """Append artificial_done to *args and run parent method.""" + # If this will be a problem for maintenance, we could probably override + # DQNAgent.add() method instead. + artificial_done = self._artificial_done and terminal + args = list(args) + args.append(artificial_done) + return super(_OutOfGraphReplayBuffer, self).add(observation, action, reward, + terminal, *args) + + def load(self, *args, **kwargs): + # Check that appropriate attributes are not overridden + are_terminal_valid = self._artificial_done + super(_OutOfGraphReplayBuffer, self).load(*args, **kwargs) + assert self._artificial_done == are_terminal_valid + + +class _WrappedPrioritizedReplayBuffer(WrappedPrioritizedReplayBuffer): + """Allows to pass out-of-graph-replay-buffer via wrapped_memory.""" + + def __init__(self, wrapped_memory, batch_size, use_staging): + self.batch_size = batch_size + self.memory = wrapped_memory + self.create_sampling_ops(use_staging) + + +class _RainbowAgent(rainbow_agent.RainbowAgent): + """Modify dopamine DQNAgent to match our needs. + + Allow passing batch_size and replay_capacity to ReplayBuffer, allow not using + (some of) terminal episode transitions in training. + """ + + def __init__(self, replay_capacity, buffer_batch_size, + generates_trainable_dones, **kwargs): + self._replay_capacity = replay_capacity + self._buffer_batch_size = buffer_batch_size + self._generates_trainable_dones = generates_trainable_dones + super(_RainbowAgent, self).__init__(**kwargs) + + def _build_replay_buffer(self, use_staging): + """Build WrappedReplayBuffer with custom OutOfGraphReplayBuffer.""" + replay_buffer_kwargs = dict( + observation_shape=dqn_agent.NATURE_DQN_OBSERVATION_SHAPE, + stack_size=dqn_agent.NATURE_DQN_STACK_SIZE, + replay_capacity=self._replay_capacity, + batch_size=self._buffer_batch_size, + update_horizon=self.update_horizon, + gamma=self.gamma, + extra_storage_types=None, + observation_dtype=np.uint8, + ) + + replay_memory = _OutOfGraphPrioritizedReplayBuffer( + artificial_done=not self._generates_trainable_dones, + **replay_buffer_kwargs) + + return _WrappedPrioritizedReplayBuffer( + wrapped_memory=replay_memory, + use_staging=use_staging, batch_size=self._buffer_batch_size) + # **replay_buffer_kwargs) + + +class BatchRainbowAgent(_RainbowAgent): + """Batch agent for DQN. + + Episodes are stored on done. + + Assumes that all rollouts in batch would end at the same moment. + """ + + def __init__(self, env_batch_size, *args, **kwargs): + super(BatchRainbowAgent, self).__init__(*args, **kwargs) + self.env_batch_size = env_batch_size + obs_size = dqn_agent.NATURE_DQN_OBSERVATION_SHAPE + state_shape = [self.env_batch_size, obs_size[0], obs_size[1], + dqn_agent.NATURE_DQN_STACK_SIZE] + self.state_batch = np.zeros(state_shape) + self.state = None # assure it will be not used + self._observation = None # assure it will be not used + self.reset_current_rollouts() + + def reset_current_rollouts(self): + self._current_rollouts = [[] for _ in range(self.env_batch_size)] + + def _record_observation(self, observation_batch): + # Set current observation. Represents an (batch_size x 84 x 84 x 1) image + # frame. + observation_batch = np.array(observation_batch) + self._observation_batch = observation_batch[:, :, :, 0] + # Swap out the oldest frames with the current frames. + self.state_batch = np.roll(self.state_batch, -1, axis=3) + self.state_batch[:, :, :, -1] = self._observation_batch + + def _reset_state(self): + self.state_batch.fill(0) + + def begin_episode(self, observation): + self._reset_state() + self._record_observation(observation) + + if not self.eval_mode: + self._train_step() + + self.action = self._select_action() + return self.action + + def _update_current_rollouts(self, last_observation, action, reward, + are_terminal): + transitions = zip(last_observation, action, reward, are_terminal) + for transition, rollout in zip(transitions, self._current_rollouts): + rollout.append(transition) + + def _store_current_rollouts(self): + for rollout in self._current_rollouts: + for transition in rollout: + self._store_transition(*transition) + self.reset_current_rollouts() + + def step(self, reward, observation): + self._last_observation = self._observation_batch + self._record_observation(observation) + + if not self.eval_mode: + self._update_current_rollouts(self._last_observation, self.action, reward, + [False] * self.env_batch_size) + # We want to have the same train_step:env_step ratio not depending on + # batch size. + for _ in range(self.env_batch_size): + self._train_step() + + self.action = self._select_action() + return self.action + + def end_episode(self, reward): + if not self.eval_mode: + self._update_current_rollouts( + self._observation_batch, self.action, reward, + [True] * self.env_batch_size) + self._store_current_rollouts() + + def _select_action(self): + epsilon = self.epsilon_eval + if not self.eval_mode: + epsilon = self.epsilon_fn( + self.epsilon_decay_period, + self.training_steps, + self.min_replay_history, + self.epsilon_train) + + def choose_action(ix): + if random.random() <= epsilon: + # Choose a random action with probability epsilon. + return random.randint(0, self.num_actions - 1) + else: + # Choose the action with highest Q-value at the current state. + return self._sess.run(self._q_argmax, + {self.state_ph: self.state_batch[ix:ix+1]}) + + return np.array([choose_action(ix) for ix in range(self.env_batch_size)]) + + +class BatchRunner(run_experiment.Runner): + """Run a batch of environments. + + Assumes that all environments would end at the same moment. + """ + + def __init__(self, base_dir, create_agent_fn, **kwargs): + super(BatchRunner, self).__init__(base_dir, create_agent_fn, **kwargs) + self.batch_size = self._environment.batch_size + + def _run_one_episode(self): + # This assumes that everything inside _run_one_episode works on batches, + # which is risky for future. + steps_number, total_rewards = super(BatchRunner, self)._run_one_episode() + return steps_number * self.batch_size, total_rewards + + def _run_one_phase(self, min_steps, statistics, run_mode_str): + # Mostly copy of parent method. + step_count = 0 + num_episodes = 0 + sum_returns = 0. + + while step_count < min_steps: + num_steps, episode_returns = self._run_one_episode() + for episode_return in episode_returns: + statistics.append({ + "{}_episode_lengths".format(run_mode_str): + num_steps / self.batch_size, + "{}_episode_returns".format(run_mode_str): episode_return + }) + step_count += num_steps + sum_returns += sum(episode_returns) + num_episodes += self.batch_size + # We use sys.stdout.write instead of tf.logging so as to flush frequently + # without generating a line break. + sys.stdout.write("Steps executed: {} ".format(step_count) + + "Batch episodes steps: {} ".format(num_steps) + + "Returns: {}\r".format(episode_returns)) + sys.stdout.flush() + return step_count, sum_returns, num_episodes + + def close(self): + self._environment.close() + + +class _OutOfGraphPrioritizedReplayBuffer(OutOfGraphPrioritizedReplayBuffer): + """Replay not sampling artificial_terminal transition. + + Adds to stored tuples "artificial_done" field (as last ReplayElement). + When sampling, ignores tuples for which artificial_done is True. + + When adding new attributes check if there are loaded from disk, when using + load() method. + + Attributes: + are_terminal_valid: A boolean indicating if newly added terminal + transitions should be marked as artificially done. Replay data loaded + from disk will not be overridden. + """ + + def __init__(self, artificial_done, **kwargs): + extra_storage_types = kwargs.pop("extra_storage_types", None) or [] + msg = "Other extra_storage_types aren't currently supported for this class." + assert not extra_storage_types, msg + extra_storage_types.append(ReplayElement("artificial_done", (), np.uint8)) + super(_OutOfGraphPrioritizedReplayBuffer, self).__init__( + extra_storage_types=extra_storage_types, **kwargs) + self._artificial_done = artificial_done + + def is_valid_transition(self, index): + valid = super(_OutOfGraphPrioritizedReplayBuffer, + self).is_valid_transition(index) + if valid: + valid = not self.get_artificial_done_stack(index).any() + return valid + + def get_artificial_done_stack(self, index): + return self.get_range(self._store["artificial_done"], + index - self._stack_size + 1, index + 1) + + def add(self, observation, action, reward, terminal, priority): + """Infer artificial_done and call parent method.""" + # If this will be a problem for maintenance, we could probably override + # DQNAgent.add() method instead. + if not isinstance(priority, (float, np.floating)): + raise ValueError("priority should be float, got type {}" + .format(type(priority))) + artificial_done = self._artificial_done and terminal + return super(_OutOfGraphPrioritizedReplayBuffer, self).add( + observation, action, reward, terminal, artificial_done, priority + ) + + def load(self, *args, **kwargs): + # Check that appropriate attributes are not overridden + are_terminal_valid = self._artificial_done + super(_OutOfGraphPrioritizedReplayBuffer, self).load(*args, **kwargs) + assert self._artificial_done == are_terminal_valid + + +def get_create_agent(agent_kwargs): + """Factory for dopamine agent initialization. + + Args: + agent_kwargs: dict of BatchDQNAgent parameters + + Returns: + Function(sess, environment, summary_writer) -> BatchDQNAgent instance. + """ + agent_kwargs = copy.deepcopy(agent_kwargs) + agent_type = agent_kwargs.pop("type") + + def create_agent(sess, environment, summary_writer=None): + """Creates a DQN agent. + + Simplified version of `dopamine.discrete_domains.train.create_agent` + + Args: + sess: a session + environment: an environment + summary_writer: a summary writer. + + Returns: + a DQN agent. + """ + if agent_type == "Rainbow": + return BatchRainbowAgent( + env_batch_size=environment.batch_size, + sess=sess, + num_actions=environment.action_space.n, + summary_writer=summary_writer, + tf_device="/gpu:*", + **agent_kwargs) + elif agent_type == "VanillaDQN": + return BatchDQNAgent( + env_batch_size=environment.batch_size, + sess=sess, + num_actions=environment.action_space.n, + summary_writer=summary_writer, + tf_device="/gpu:*", + **agent_kwargs) + else: + raise ValueError("Unknown agent_type {}".format(agent_type)) + + return create_agent + + +class ResizeBatchObservation(object): + """Wrapper resizing observations for batched environment. + + Dopamine also uses cv2.resize(..., interpolation=cv2.INTER_AREA). + + Attributes: + batch_env: batched environment + batch_size: batch size + action_space: the action space + size: size of width and height for returned observations + """ + + def __init__(self, batch_env, size=84): + self.size = size + self.batch_env = batch_env + + def observation(self, frames): + if not cv2: + return frames + return np.array([cv2.resize( + frame, (self.size, self.size), interpolation=cv2.INTER_AREA) + for frame in frames]) + + def step(self, actions): + obs, rewards, dones = self.batch_env.step(actions) + obs = self.observation(obs) + return obs, rewards, dones + + def reset(self, *args, **kwargs): + return self.observation(self.batch_env.reset(*args, **kwargs)) + + @property + def action_space(self): + return self.batch_env.action_space + + @property + def batch_size(self): + return self.batch_env.batch_size + + def close(self): + self.batch_env.close() + + +class DopamineBatchEnv(object): + """Batch of environments. + + Assumes that all given environments finishes at the same time. + + Observations and rewards are returned as batches (arrays). Done is returned + as single boolean. + """ + + def __init__(self, batch_env, max_episode_steps): + self.batch_env = batch_env + self._max_episode_steps = max_episode_steps + self.game_over = None + self._elapsed_steps = 0 + + def reset(self): + self.game_over = False + self._elapsed_steps = 0 + return np.array(self.batch_env.reset()) + + def step(self, actions): + """Step.""" + self._elapsed_steps += 1 + obs, rewards, dones = \ + [np.array(r) for r in self.batch_env.step(actions)] + if self._elapsed_steps > self._max_episode_steps: + done = True + if self._elapsed_steps > self._max_episode_steps + 1: + rewards.fill(0) + else: + done = dones[0] + assert np.all(done == dones), ("Current modifications of Dopamine " + "require same number of steps for each " + "environment in batch") + del dones + + self.game_over = done + return obs, rewards, done, {} + + def render(self, mode): + pass + + def close(self): + self.batch_env.close() + + @property + def action_space(self): + return self.batch_env.action_space + + @property + def batch_size(self): + return self.batch_env.batch_size + + +class PaddedTrajectoriesEnv(DopamineBatchEnv): + """Pad finished episodes with zeros. + + Allow episodes in batch to end on different timesteps, return zero + observations and rewards for finished ones. Return done=True when all + episodes are finished. + + Note that output of this class might be misleading - the agent/evaluator + which uses this environment gets false information about when episodes have + ended. This class is used for informal check of Batched dopamine + implementation in model-free pipeline. + """ + + def reset(self): + self.done_envs = [False] * self.batch_size + self.game_over = False + self._elapsed_steps = 0 + return np.array(self.batch_env.reset()) + + def step(self, actions): + if any(self.done_envs): + print("Warning, some environments already ended, using mocked data.") + + self._elapsed_steps += 1 + obs, rewards, dones = \ + [np.array(r) for r in self.batch_env.step(actions)] + for i, ignore in enumerate(self.done_envs): + if ignore: + obs[i] = np.zeros(obs[i].shape, dtype=obs.dtype) + rewards[i] = 0 + if dones[i]: + self.batch_env.reset([i]) + self.done_envs[i] = True + + all_done = all(self.done_envs) + + if self._elapsed_steps > self._max_episode_steps: + all_done = True + if self._elapsed_steps > self._max_episode_steps + 1: + rewards.fill(0) + + self.game_over = all_done + return obs, rewards, all_done, {} + + +def get_create_batch_env_fun(batch_env_fn, time_limit): + """Factory for dopamine environment initialization function. + + Args: + batch_env_fn: function(in_graph: bool) -> batch environment. + time_limit: time steps limit for environment. + + Returns: + function (with optional, unused parameters) initializing environment. + """ + + def create_env_fun(game_name=None, sticky_actions=None): + del game_name, sticky_actions + batch_env = batch_env_fn(in_graph=False) + batch_env = ResizeBatchObservation(batch_env) # pylint: disable=redefined-variable-type + batch_env = DopamineBatchEnv(batch_env, max_episode_steps=time_limit) + return batch_env + + return create_env_fun + + +def _parse_hparams(hparams): + """Split hparams, based on key prefixes. + + Args: + hparams: hyperparameters + + Returns: + Tuple of hparams for respectably: agent, optimizer, runner, replay_buffer. + """ + prefixes = ["agent_", "optimizer_", "runner_", "replay_buffer_"] + ret = [] + + for prefix in prefixes: + ret_dict = {} + for key in hparams.values(): + if prefix in key: + par_name = key[len(prefix):] + ret_dict[par_name] = hparams.get(key) + ret.append(ret_dict) + + return ret + + +def _get_optimizer(params): + assert params["class"] == "RMSProp", "RMSProp is the only one supported" + params.pop("class") + return tf.train.RMSPropOptimizer(**params) + + +class DQNLearner(PolicyLearner): + """Interface for learning dqn implemented in dopamine.""" + + def __init__(self, frame_stack_size, base_event_dir, agent_model_dir, + total_num_epochs, **kwargs): + super(DQNLearner, self).__init__( + frame_stack_size, base_event_dir, agent_model_dir, total_num_epochs) + self.completed_iterations = 0 + + def _target_iteractions_and_steps(self, num_env_steps, save_continuously, + save_every_steps): + + if save_continuously: + training_steps_per_iteration = min(num_env_steps, save_every_steps) + num_iterations_to_do = num_env_steps // training_steps_per_iteration + else: + num_iterations_to_do = 1 + training_steps_per_iteration = num_env_steps + target_iterations = self.completed_iterations + num_iterations_to_do + return target_iterations, training_steps_per_iteration + + def create_runner(self, env_fn, hparams, target_iterations, + training_steps_per_iteration): + # pylint: disable=unbalanced-tuple-unpacking + agent_params, optimizer_params, \ + runner_params, replay_buffer_params = _parse_hparams(hparams) + # pylint: enable=unbalanced-tuple-unpacking + optimizer = _get_optimizer(optimizer_params) + agent_params["optimizer"] = optimizer + agent_params.update(replay_buffer_params) + create_agent_fn = get_create_agent(agent_params) + runner = BatchRunner( + base_dir=self.agent_model_dir, + create_agent_fn=create_agent_fn, + create_environment_fn=get_create_batch_env_fun( + env_fn, time_limit=hparams.time_limit), + evaluation_steps=0, + num_iterations=target_iterations, + training_steps=training_steps_per_iteration, + **runner_params) + return runner + + def train(self, + env_fn, + hparams, + simulated, + save_continuously, + epoch, + sampling_temp=1.0, + num_env_steps=None, + env_step_multiplier=1, + eval_env_fn=None, + report_fn=None, + model_save_fn=None): + # TODO(konradczechowski): evaluation during training (with eval_env_fun) + del epoch, eval_env_fn, simulated, report_fn, model_save_fn + if num_env_steps is None: + num_env_steps = hparams.num_frames + + hparams = copy.copy(hparams) + hparams.set_hparam( + "agent_epsilon_eval", min(hparams.agent_epsilon_eval * sampling_temp, 1) + ) + + target_iterations, training_steps_per_iteration = \ + self._target_iteractions_and_steps( + num_env_steps=num_env_steps * env_step_multiplier, + save_continuously=save_continuously, + save_every_steps=hparams.save_every_steps) + + with tf.Graph().as_default(): + runner = self.create_runner(env_fn, hparams, target_iterations, + training_steps_per_iteration) + runner.run_experiment() + runner.close() + self.completed_iterations = target_iterations + + def evaluate(self, env_fn, hparams, sampling_temp): + target_iterations = 0 + training_steps_per_iteration = 0 + + hparams = copy.copy(hparams) + hparams.set_hparam( + "agent_epsilon_eval", min(hparams.agent_epsilon_eval * sampling_temp, 1) + ) + + create_environment_fn = get_create_batch_env_fun( + env_fn, time_limit=hparams.time_limit) + env = create_environment_fn( + game_name="unused_arg", sticky_actions="unused_arg") + + with tf.Graph().as_default(): + runner = self.create_runner(env_fn, hparams, target_iterations, + training_steps_per_iteration) + assert runner.batch_size == 1 + agent = runner._agent # pylint: disable=protected-access + runner.close() + del runner + agent.eval_mode = True + + for _ in range(hparams.eval_episodes_num): + # Run single episode + ob = env.reset() + action = agent.begin_episode(ob) + done = False + while not done: + ob, reward, done, _ = env.step(action) + action = agent.step(reward, ob) diff --git a/tensor2tensor/rl/envs/__init__.py b/tensor2tensor/rl/envs/__init__.py new file mode 100644 index 000000000..ff174dd63 --- /dev/null +++ b/tensor2tensor/rl/envs/__init__.py @@ -0,0 +1,15 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tensor2tensor/rl/envs/in_graph_batch_env.py b/tensor2tensor/rl/envs/in_graph_batch_env.py new file mode 100644 index 000000000..91f44afa4 --- /dev/null +++ b/tensor2tensor/rl/envs/in_graph_batch_env.py @@ -0,0 +1,107 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Batch of environments inside the TensorFlow graph.""" + +# The code was based on Danijar Hafner's code from tf.agents: +# https://github.com/tensorflow/agents/blob/master/agents/tools/in_graph_batch_env.py + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import gym + +import tensorflow.compat.v1 as tf + + +class InGraphBatchEnv(object): + """Abstract class for batch of environments inside the TensorFlow graph. + """ + + def __init__(self, observ_space, action_space): + self.observ_space = observ_space + self.action_space = action_space + + def __str__(self): + return "InGraphEnv(%s)" % str(self._batch_env) + + def __len__(self): + """Number of combined environments.""" + return len(self._batch_env) + + def __getitem__(self, index): + """Access an underlying environment by index.""" + return self._batch_env[index] + + def simulate(self, action): + """Step the batch of environments. + + The results of the step can be accessed from the variables defined below. + + Args: + action: Tensor holding the batch of actions to apply. + + Returns: + Operation. + """ + raise NotImplementedError + + def reset(self, indices=None): + """Reset the batch of environments. + + Args: + indices: The batch indices of the environments to reset. + + Returns: + Batch tensor of the new observations. + """ + return tf.cond( + tf.cast(tf.reduce_sum(indices + 1), tf.bool), + lambda: self._reset_non_empty(indices), + lambda: tf.cast(0, self.observ_dtype)) + + @staticmethod + def _get_tf_dtype(space): + if isinstance(space, gym.spaces.Discrete): + return tf.int32 + if isinstance(space, gym.spaces.Box): + return tf.as_dtype(space.dtype) + raise NotImplementedError() + + @property + def observ_dtype(self): + return self._get_tf_dtype(self.observ_space) + + @property + def observ_shape(self): + return self.observ_space.shape + + @property + def action_dtype(self): + return self._get_tf_dtype(self.action_space) + + @property + def action_shape(self): + return self.action_space.shape + + @property + def observ(self): + """Access the variable holding the current observation.""" + return self._observ.read_value() + + def close(self): + """Send close messages to the external process and join them.""" + self._batch_env.close() diff --git a/tensor2tensor/rl/envs/py_func_batch_env.py b/tensor2tensor/rl/envs/py_func_batch_env.py new file mode 100644 index 000000000..e009eeb3a --- /dev/null +++ b/tensor2tensor/rl/envs/py_func_batch_env.py @@ -0,0 +1,135 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Batch of environments inside the TensorFlow graph.""" + +# The code was based on Danijar Hafner's code from tf.agents: +# https://github.com/tensorflow/agents/blob/master/agents/tools/in_graph_batch_env.py + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np + +from tensor2tensor.rl.envs.in_graph_batch_env import InGraphBatchEnv +import tensorflow.compat.v1 as tf + + +class PyFuncBatchEnv(InGraphBatchEnv): + """Batch of environments inside the TensorFlow graph. + + The batch of environments will be stepped and reset inside of the graph using + a tf.py_func(). The current batch of observations, actions, rewards, and done + flags are held in according variables. + """ + + def __init__(self, batch_env): + """Batch of environments inside the TensorFlow graph. + + Args: + batch_env: Batch environment. + """ + super(PyFuncBatchEnv, self).__init__(batch_env.observation_space, + batch_env.action_space) + self._batch_env = batch_env + with tf.variable_scope("env_temporary"): + self._observ = tf.Variable( + tf.zeros((self._batch_env.batch_size,) + self.observ_shape, + self.observ_dtype), + name="observ", trainable=False) + + def __str__(self): + return "PyFuncEnv(%s)" % str(self._batch_env) + + def __getattr__(self, name): + """Forward unimplemented attributes to one of the original environments. + + Args: + name: Attribute that was accessed. + + Returns: + Value behind the attribute name in one of the original environments. + """ + return getattr(self._batch_env, name) + + def initialize(self, sess): + pass + + def __len__(self): + """Number of combined environments.""" + return self._batch_env.batch_size + + def __getitem__(self, index): + """Access an underlying environment by index.""" + return self._batch_env[index] + + def simulate(self, action): + """Step the batch of environments. + + The results of the step can be accessed from the variables defined below. + + Args: + action: Tensor holding the batch of actions to apply. + + Returns: + Operation. + """ + with tf.name_scope("environment/simulate"): + if action.dtype in (tf.float16, tf.float32, tf.float64): + action = tf.check_numerics(action, "action") + def step(action): + step_response = self._batch_env.step(action) + # Current env doesn't return `info`, but EnvProblem does. + # TODO(afrozm): The proper way to do this is to make T2TGymEnv return + # an empty info return value. + if len(step_response) == 3: + (observ, reward, done) = step_response + else: + (observ, reward, done, _) = step_response + return (observ, reward.astype(np.float32), done) + observ, reward, done = tf.py_func( + step, [action], + [self.observ_dtype, tf.float32, tf.bool], name="step") + reward = tf.check_numerics(reward, "reward") + reward.set_shape((len(self),)) + done.set_shape((len(self),)) + with tf.control_dependencies([self._observ.assign(observ)]): + return tf.identity(reward), tf.identity(done) + + def _reset_non_empty(self, indices): + """Reset the batch of environments. + + Args: + indices: The batch indices of the environments to reset; defaults to all. + + Returns: + Batch tensor of the new observations. + """ + observ = tf.py_func( + self._batch_env.reset, [indices], self.observ_dtype, name="reset") + observ.set_shape(indices.get_shape().concatenate(self.observ_shape)) + with tf.control_dependencies([ + tf.scatter_update(self._observ, indices, observ)]): + return tf.identity(observ) + + @property + def observ(self): + """Access the variable holding the current observation.""" + return self._observ.read_value() + + def close(self): + """Send close messages to the external process and join them.""" + self._batch_env.close() diff --git a/tensor2tensor/rl/envs/simulated_batch_env.py b/tensor2tensor/rl/envs/simulated_batch_env.py new file mode 100644 index 000000000..b0048f198 --- /dev/null +++ b/tensor2tensor/rl/envs/simulated_batch_env.py @@ -0,0 +1,299 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Batch of environments inside the TensorFlow graph.""" + +# The code was based on Danijar Hafner's code from tf.agents: +# https://github.com/tensorflow/agents/blob/master/agents/tools/in_graph_batch_env.py + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import os + +import numpy as np + +from tensor2tensor.data_generators.gym_env import DummyWorldModelProblem +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import common_video +from tensor2tensor.rl.envs import in_graph_batch_env +from tensor2tensor.utils import registry +from tensor2tensor.utils import trainer_lib + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +# Lazy load PIL.Image +def PIL_Image(): # pylint: disable=invalid-name + from PIL import Image # pylint: disable=g-import-not-at-top + return Image + + +# Lazy load PIL.Image +def PIL_ImageDraw(): # pylint: disable=invalid-name + from PIL import ImageDraw # pylint: disable=g-import-not-at-top + return ImageDraw + + +class HistoryBuffer(object): + """History Buffer.""" + + def __init__(self, initial_frame_chooser, observ_shape, observ_dtype, + num_initial_frames, batch_size): + self.batch_size = batch_size + self._observ_dtype = observ_dtype + initial_shape = (batch_size, num_initial_frames) + observ_shape + self._initial_frames = tf.py_func( + initial_frame_chooser, [tf.constant(batch_size)], observ_dtype + ) + self._initial_frames.set_shape(initial_shape) + self._history_buff = tf.Variable(tf.zeros(initial_shape, observ_dtype), + trainable=False) + + def get_all_elements(self): + return self._history_buff.read_value() + + def move_by_one_element(self, element): + last_removed = self.get_all_elements()[:, 1:, ...] + element = tf.expand_dims(element, dim=1) + moved = tf.concat([last_removed, element], axis=1) + with tf.control_dependencies([moved]): + with tf.control_dependencies([self._history_buff.assign(moved)]): + return self._history_buff.read_value() + + def reset(self, indices): + initial_frames = tf.gather(self._initial_frames, indices) + scatter_op = tf.scatter_update(self._history_buff, indices, initial_frames) + with tf.control_dependencies([scatter_op]): + return self._history_buff.read_value() + + +def compute_uncertainty_reward(logits, predictions): + """Uncertainty reward based on logits.""" + # TODO(rsepassi): Add support for L1/L2 loss models. Current code only + # works for softmax models. + vocab_size = logits.shape[-1] + assert vocab_size > 1 + log_probs = common_layers.log_prob_from_logits(logits) + max_log_probs = common_layers.index_last_dim_with_indices(log_probs, + predictions) + # Threshold + neg_log_prob = tf.nn.relu(-max_log_probs - 0.02) + # Sum across all but the batch dimension + reduce_dims = list(range(len(neg_log_prob.shape)))[1:] + summed = tf.reduce_sum(neg_log_prob, axis=reduce_dims) + return summed / 10 + + +class SimulatedBatchEnv(in_graph_batch_env.InGraphBatchEnv): + """Batch of environments inside the TensorFlow graph. + + The batch of environments will be stepped and reset inside of the graph using + a tf.py_func(). The current batch of observations, actions, rewards, and done + flags are held in according variables. + """ + + def __init__( + self, reward_range, observation_space, action_space, frame_stack_size, + frame_height, frame_width, initial_frame_chooser, batch_size, model_name, + model_hparams, model_dir, intrinsic_reward_scale=0.0, sim_video_dir=None + ): + """Batch of environments inside the TensorFlow graph.""" + super(SimulatedBatchEnv, self).__init__(observation_space, action_space) + + self._ffmpeg_works = common_video.ffmpeg_works() + self.batch_size = batch_size + self._min_reward = reward_range[0] + self._num_frames = frame_stack_size + self._intrinsic_reward_scale = intrinsic_reward_scale + self._episode_counter = tf.get_variable( + "episode_counter", initializer=tf.zeros((), dtype=tf.int32), + trainable=False, dtype=tf.int32) + if sim_video_dir: + self._video_every_epochs = 100 + self._video_dir = sim_video_dir + self._video_writer = None + self._video_counter = 0 + tf.gfile.MakeDirs(self._video_dir) + self._video_condition = tf.equal( + self._episode_counter.read_value() % self._video_every_epochs, 0) + else: + self._video_condition = tf.constant(False, dtype=tf.bool, shape=()) + + model_hparams = copy.copy(model_hparams) + problem = DummyWorldModelProblem(action_space, reward_range, + frame_height, frame_width) + trainer_lib.add_problem_hparams(model_hparams, problem) + model_hparams.force_full_predict = True + self._model = registry.model(model_name)( + model_hparams, tf_estimator.ModeKeys.PREDICT + ) + + self.history_buffer = HistoryBuffer( + initial_frame_chooser, self.observ_shape, self.observ_dtype, + self._num_frames, self.batch_size + ) + + self._observ = tf.Variable( + tf.zeros((batch_size,) + self.observ_shape, self.observ_dtype), + trainable=False + ) + + self._reset_model = tf.get_variable( + "reset_model", [], trainable=False, initializer=tf.zeros_initializer()) + + self._model_dir = model_dir + + def initialize(self, sess): + model_loader = tf.train.Saver( + var_list=tf.global_variables(scope="next_frame*") # pylint:disable=unexpected-keyword-arg + ) + if tf.gfile.IsDirectory(self._model_dir): + trainer_lib.restore_checkpoint( + self._model_dir, saver=model_loader, sess=sess, must_restore=True + ) + else: + model_loader.restore(sess=sess, save_path=self._model_dir) + + def __str__(self): + return "SimulatedEnv" + + def __len__(self): + """Number of combined environments.""" + return self.batch_size + + def simulate(self, action): + with tf.name_scope("environment/simulate"): + actions = tf.concat([tf.expand_dims(action, axis=1)] * self._num_frames, + axis=1) + history = self.history_buffer.get_all_elements() + with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): + # We only need 1 target frame here, set it. + hparams_target_frames = self._model.hparams.video_num_target_frames + self._model.hparams.video_num_target_frames = 1 + model_output = self._model.infer({ + "inputs": history, + "input_action": actions, + "reset_internal_states": self._reset_model.read_value() + }) + self._model.hparams.video_num_target_frames = hparams_target_frames + + observ = tf.cast(tf.squeeze(model_output["targets"], axis=1), + self.observ_dtype) + + reward = tf.to_float(model_output["target_reward"]) + reward = tf.reshape(reward, shape=(self.batch_size,)) + self._min_reward + + if self._intrinsic_reward_scale: + # Use the model's uncertainty about its prediction as an intrinsic + # reward. The uncertainty is measured by the log probability of the + # predicted pixel value. + if "targets_logits" not in model_output: + raise ValueError("The use of intrinsic rewards requires access to " + "the logits. Ensure that model.infer returns " + "'targets_logits'") + uncertainty_reward = compute_uncertainty_reward( + model_output["targets_logits"], model_output["targets"]) + uncertainty_reward = tf.minimum( + 1., self._intrinsic_reward_scale * uncertainty_reward) + uncertainty_reward = tf.Print(uncertainty_reward, [uncertainty_reward], + message="uncertainty_reward", first_n=1, + summarize=8) + reward += uncertainty_reward + + done = tf.constant(False, tf.bool, shape=(self.batch_size,)) + + with tf.control_dependencies([observ]): + dump_frame_op = tf.cond(self._video_condition, + lambda: tf.py_func(self._video_dump_frame, # pylint: disable=g-long-lambda + [observ, reward], []), + tf.no_op) + with tf.control_dependencies( + [self._observ.assign(observ), + self.history_buffer.move_by_one_element(observ), dump_frame_op]): + clear_reset_model_op = tf.assign(self._reset_model, tf.constant(0.0)) + with tf.control_dependencies([clear_reset_model_op]): + return tf.identity(reward), tf.identity(done) + + def _reset_non_empty(self, indices): + """Reset the batch of environments. + + Args: + indices: The batch indices of the environments to reset; defaults to all. + + Returns: + Batch tensor of the new observations. + """ + reset_video_op = tf.cond( + self._video_condition, + lambda: tf.py_func(self._video_reset_writer, [], []), + tf.no_op) + with tf.control_dependencies([reset_video_op]): + inc_op = tf.assign_add(self._episode_counter, 1) + with tf.control_dependencies([self.history_buffer.reset(indices), + inc_op]): + initial_frame_dump_op = tf.cond( + self._video_condition, + lambda: tf.py_func(self._video_dump_frames, # pylint: disable=g-long-lambda + [self.history_buffer.get_all_elements()], []), + tf.no_op) + observ_assign_op = self._observ.assign( + self.history_buffer.get_all_elements()[:, -1, ...]) + with tf.control_dependencies([observ_assign_op, initial_frame_dump_op]): + reset_model_op = tf.assign(self._reset_model, tf.constant(1.0)) + with tf.control_dependencies([reset_model_op]): + return tf.gather(self._observ.read_value(), indices) + + @property + def observ(self): + """Access the variable holding the current observation.""" + return self._observ.read_value() + + @property + def history_observations(self): + return self.history_buffer.get_all_elements() + + def _video_dump_frame(self, obs, rews): + if not self._ffmpeg_works: + return + if self._video_writer is None: + self._video_counter += 1 + self._video_writer = common_video.WholeVideoWriter( + fps=10, + output_path=os.path.join(self._video_dir, + "{}.avi".format(self._video_counter)), + file_format="avi") + img = PIL_Image().new("RGB", (obs.shape[-2], 11),) + draw = PIL_ImageDraw().Draw(img) + draw.text((0, 0), "r:{:3}".format(int(rews[-1])), fill=(255, 0, 0)) + self._video_writer.write(np.concatenate([np.asarray(img), obs[-1]], axis=0)) + + def _video_dump_frames(self, obs): + if not self._ffmpeg_works: + return + zeros = np.zeros(obs.shape[0]) + for i in range(obs.shape[1]): + self._video_dump_frame(obs[:, i, :], zeros) + + def _video_reset_writer(self): + if self._video_writer: + self._video_writer.finish_to_disk() + self._video_writer = None + + def close(self): + self._video_reset_writer() diff --git a/tensor2tensor/rl/envs/simulated_batch_gym_env.py b/tensor2tensor/rl/envs/simulated_batch_gym_env.py new file mode 100644 index 000000000..74b568339 --- /dev/null +++ b/tensor2tensor/rl/envs/simulated_batch_gym_env.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""SimulatedBatchEnv in a Gym-like interface.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from gym import Env + +import numpy as np + +from tensor2tensor.rl.envs.simulated_batch_env import SimulatedBatchEnv + +import tensorflow.compat.v1 as tf + + +class FlatBatchEnv(Env): + """Gym environment interface for Batched Environments (with batch size 1).""" + + def __init__(self, batch_env): + if batch_env.batch_size != 1: + raise ValueError("Number of environments in batch must be equal to one") + self.batch_env = batch_env + self.action_space = self.batch_env.action_space + self.observation_space = self.batch_env.observation_space + + def step(self, action): + obs, rewards, dones = self.batch_env.step([action]) + return obs[0], rewards[0], dones[0], {} + + def reset(self): + return self.batch_env.reset()[0] + + +# TODO(koz4k): Unify interfaces of batch envs. +class SimulatedBatchGymEnv(Env): + """SimulatedBatchEnv in a Gym-like interface, environments are batched.""" + + def __init__(self, *args, **kwargs): + with tf.Graph().as_default(): + self._batch_env = SimulatedBatchEnv(*args, **kwargs) + + self._actions_t = tf.placeholder(shape=(self.batch_size,), dtype=tf.int32) + self._rewards_t, self._dones_t = self._batch_env.simulate(self._actions_t) + with tf.control_dependencies([self._rewards_t]): + self._obs_t = self._batch_env.observ + self._indices_t = tf.placeholder(shape=(self.batch_size,), dtype=tf.int32) + self._reset_op = self._batch_env.reset( + tf.range(self.batch_size, dtype=tf.int32) + ) + + self._sess = tf.Session() + self._sess.run(tf.global_variables_initializer()) + self._batch_env.initialize(self._sess) + + @property + def batch_size(self): + return self._batch_env.batch_size + + @property + def observation_space(self): + return self._batch_env.observ_space + + @property + def action_space(self): + return self._batch_env.action_space + + def render(self, mode="human"): + raise NotImplementedError() + + def reset(self, indices=None): + if indices is None: + indices = np.array(range(self.batch_size)) + obs = self._sess.run(self._reset_op, feed_dict={self._indices_t: indices}) + return obs + + def step(self, actions): + obs, rewards, dones = self._sess.run( + [self._obs_t, self._rewards_t, self._dones_t], + feed_dict={self._actions_t: actions}) + return obs, rewards, dones + + def close(self): + self._sess.close() + self._batch_env.close() diff --git a/tensor2tensor/rl/envs/tf_atari_wrappers.py b/tensor2tensor/rl/envs/tf_atari_wrappers.py new file mode 100644 index 000000000..36838a62a --- /dev/null +++ b/tensor2tensor/rl/envs/tf_atari_wrappers.py @@ -0,0 +1,149 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Batch of environments inside the TensorFlow graph.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from six.moves import range # pylint: disable=redefined-builtin + +from tensor2tensor.rl.envs.in_graph_batch_env import InGraphBatchEnv + +import tensorflow.compat.v1 as tf + + +class WrapperBase(InGraphBatchEnv): + """Base wrapper class.""" + + def __init__(self, batch_env): + super(WrapperBase, self).__init__( + batch_env.observ_space, batch_env.action_space) + self._length = len(batch_env) + self._batch_env = batch_env + + def initialize(self, sess): + """Initializations to be run once the tf.Session is available.""" + pass + + @property + def observ(self): + """Access the variable holding the current observation.""" + return self._observ.read_value() + + @property + def observ_shape(self): + return self._batch_env.observ_shape + + def __len__(self): + """Number of combined environments.""" + return self._length + + def _reset_non_empty(self, indices): + # pylint: disable=protected-access + new_values = self._batch_env._reset_non_empty(indices) + # pylint: enable=protected-access + assign_op = tf.scatter_update(self._observ, indices, new_values) + with tf.control_dependencies([assign_op]): + return tf.identity(new_values) + + def _transform_history_observations(self, frames): + """Applies a wrapper-specific transformation to the history observations. + + Overridden in wrappers that alter observations. + + Args: + frames: A tensor of history frames to transform. + + Returns: + a tensor of transformed frames. + """ + return frames + + @property + def history_observations(self): + """Returns observations from the root simulated env's history_buffer. + + Transforms them with a wrapper-specific function if necessary. + + Raises: + AttributeError: if root env doesn't have a history_buffer (i.e. is not + simulated). + """ + return self._transform_history_observations( + self._batch_env.history_observations + ) + + +class StackWrapper(WrapperBase): + """A wrapper which stacks previously seen frames.""" + + def __init__(self, batch_env, history=4): + super(StackWrapper, self).__init__(batch_env) + self.history = history + self.old_shape = batch_env.observ_shape + # TODO(afrozm): Make into tf.get_variable and use_resource=True + self._observ = tf.Variable( + tf.zeros((len(self),) + self.observ_shape, self.observ_dtype), + trainable=False) + + def __str__(self): + return "StackWrapper(%s)" % str(self._batch_env) + + @property + def observ_shape(self): + return (self.history,) + self.old_shape + + def simulate(self, action): + reward, done = self._batch_env.simulate(action) + with tf.control_dependencies([reward, done]): + new_observ = tf.expand_dims(self._batch_env.observ, axis=1) + + # If we shouldn't stack, i.e. self.history == 1, then just assign + # new_observ to self._observ and return from here. + if self.history == 1: + with tf.control_dependencies([self._observ.assign(new_observ)]): + return tf.identity(reward), tf.identity(done) + + # If we should stack, then do the required work. + old_observ = tf.gather( + self._observ.read_value(), + list(range(1, self.history)), + axis=1) + with tf.control_dependencies([new_observ, old_observ]): + with tf.control_dependencies([self._observ.assign( + tf.concat([old_observ, new_observ], axis=1))]): + return tf.identity(reward), tf.identity(done) + + def _reset_non_empty(self, indices): + # pylint: disable=protected-access + new_values = self._batch_env._reset_non_empty(indices) + # pylint: enable=protected-access + initial_frames = getattr(self._batch_env, "history_observations", None) + + num_dimensions_in_env_observation = len(self.old_shape) + + if initial_frames is None: + inx = [1, self.history] + ([1] * num_dimensions_in_env_observation) + initial_frames = tf.tile(tf.expand_dims(new_values, axis=1), inx) + with tf.control_dependencies([new_values]): + assign_op = tf.scatter_update(self._observ, indices, initial_frames) + with tf.control_dependencies([assign_op]): + return tf.gather(self.observ, indices) + + def _transform_history_observations(self, frames): + # Should be implemented if ever two StackWrappers are to be used together. + raise NotImplementedError diff --git a/tensor2tensor/rl/evaluator.py b/tensor2tensor/rl/evaluator.py new file mode 100644 index 000000000..f2f3b980a --- /dev/null +++ b/tensor2tensor/rl/evaluator.py @@ -0,0 +1,547 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Evaluation script for RL agents. + +Example invocation: + +python -m tensor2tensor.rl.evaluator \ + --policy_dir=$HOME/t2t/rl_v1/policy \ + --eval_metrics_dir=$HOME/t2t/rl_v1/full_eval_metrics \ + --hparams_set=rlmb_base \ + --hparams='batch_size=64' +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import datetime +import os + +from tensor2tensor.data_generators import gym_env +from tensor2tensor.layers import common_video +from tensor2tensor.models.research import rl # pylint: disable=unused-import +from tensor2tensor.rl import rl_utils +from tensor2tensor.rl import trainer_model_based_params # pylint: disable=unused-import +from tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import +from tensor2tensor.utils import hparam +from tensor2tensor.utils import registry +from tensor2tensor.utils import trainer_lib + +import tensorflow.compat.v1 as tf + + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string("output_dir", "", "Main directory for multi-runs.") +flags.DEFINE_integer("total_num_workers", 1, "How many workers in total.") +flags.DEFINE_string("worker_to_game_map", "", "How to map workers to games.") +flags.DEFINE_string("policy_dir", "", "Directory with policy checkpoints.") +flags.DEFINE_string("model_dir", "", "Directory with model checkpoints.") +flags.DEFINE_string( + "eval_metrics_dir", "", "Directory to output the eval metrics at." +) +flags.DEFINE_integer("eval_batch_size", 64, "Number of games to evaluate.") +flags.DEFINE_integer("eval_step_limit", 50000, + "Maximum number of time steps, ignored if -1.") +flags.DEFINE_enum( + "agent", "policy", ["random", "policy", "planner"], "Agent type to use." +) +# Evaluator doesn't report metrics for agent on the simulated env because we +# don't collect rollouts there. It's just for generating videos. +# TODO(koz4k): Enable reporting metrics from simulated env by refactoring +# T2TEnv to a wrapper storing rollouts and providing Problem interface for any +# batch env. +flags.DEFINE_enum( + "mode", "agent_real", ["agent_real", "agent_simulated", "model"], + "Evaluation mode; report agent's score on real or simulated env, or model's" + " reward accuracy." +) +# TODO(koz4k): Switch to out-of-graph evaluation everywhere and remove this +# flag. +flags.DEFINE_bool( + "eval_with_learner", False, + "Whether to use the PolicyLearner.evaluate function instead of an " + "out-of-graph one. Works only with --agent=policy." +) +flags.DEFINE_string( + "planner_hparams_set", "planner_small", "Planner hparam set." +) +flags.DEFINE_string("planner_hparams", "", "Planner hparam overrides.") +flags.DEFINE_integer( + "log_every_steps", 5, "Log every how many environment steps." +) +flags.DEFINE_string( + "debug_video_path", "", "Path to save the debug video at." +) +flags.DEFINE_integer( + "num_debug_videos", 1, "Number of debug videos to generate." +) +flags.DEFINE_integer( + "random_starts_step_limit", 10000, + "Number of frames to choose from for random starts of the simulated env." +) +flags.DEFINE_bool( + "all_epochs", False, + "Whether to run the evaluator on policy checkpoints from all epochs." +) + +# Unused flags needed to pass for multi-run infrastructure. +flags.DEFINE_bool("autotune", False, "Unused here.") +flags.DEFINE_string("objective", "", "Unused here.") +flags.DEFINE_string("client_handle", "client_0", "Unused.") +flags.DEFINE_bool("maximize_tuner_objective", True, "Unused.") +flags.DEFINE_integer("vizier_search_algorithm", 0, "Unused.") + + +@registry.register_hparams +def planner_tiny(): + return hparam.HParams( + num_rollouts=1, + planning_horizon=2, + rollout_agent_type="random", + batch_size=1, + env_type="simulated", + uct_const=0.0, + uniform_first_action=True, + ) + + +@registry.register_hparams +def planner_small(): + return hparam.HParams( + num_rollouts=64, + planning_horizon=16, + rollout_agent_type="policy", + batch_size=64, + env_type="simulated", + uct_const=0.0, + uniform_first_action=True, + ) + + +@registry.register_hparams +def planner_base(): + return hparam.HParams( + num_rollouts=96, + batch_size=96, + planning_horizon=8, + rollout_agent_type="policy", + env_type="simulated", + uct_const=0., + uniform_first_action=True, + ) + + +# Tuning of uniform_first_action and uct_const. Default params repeated for +# clarity. + + +@registry.register_hparams +def planner_guess1(): + hparams = planner_base() + hparams.uniform_first_action = False + hparams.uct_const = 0. + return hparams + + +@registry.register_hparams +def planner_guess2(): + hparams = planner_base() + hparams.uniform_first_action = True + hparams.uct_const = 3. + return hparams + + +@registry.register_hparams +def planner_guess3(): + hparams = planner_base() + hparams.uniform_first_action = False + hparams.uct_const = 2. + return hparams + + +# Tuning of uct_const, num_collouts and normalizer_window_size. + + +@registry.register_hparams +def planner_guess4(): + hparams = planner_base() + hparams.uct_const = 2 + hparams.num_rollouts = 96 + hparams.normalizer_window_size = 30 + return hparams + + +@registry.register_hparams +def planner_guess5(): + hparams = planner_base() + hparams.uct_const = 2 + hparams.num_rollouts = 3 * 96 + hparams.normalizer_window_size = 30 + return hparams + + +@registry.register_hparams +def planner_guess6(): + hparams = planner_base() + hparams.uct_const = 4 + hparams.num_rollouts = 96 + hparams.normalizer_window_size = 30 + return hparams + + +@registry.register_hparams +def planner_guess7(): + hparams = planner_base() + hparams.uct_const = 4 + hparams.num_rollouts = 3 * 96 + hparams.normalizer_window_size = 30 + return hparams + + +@registry.register_hparams +def planner_guess8(): + hparams = planner_base() + hparams.uct_const = 2 + hparams.num_rollouts = 3 * 96 + hparams.normalizer_window_size = 300 + return hparams + + +@registry.register_hparams +def planner_guess9(): + hparams = planner_base() + hparams.uct_const = 4 + hparams.num_rollouts = 3 * 96 + hparams.normalizer_window_size = 300 + return hparams + + +@registry.register_hparams +def planner_guess0(): + hparams = planner_base() + hparams.uct_const = 6 + hparams.num_rollouts = 4 * 96 + hparams.normalizer_window_size = 30 + return hparams + + +def make_env(env_type, real_env, sim_env_kwargs): + """Factory function for envs.""" + return { + "real": lambda: real_env.new_like( # pylint: disable=g-long-lambda + batch_size=sim_env_kwargs["batch_size"], + store_rollouts=False, + ), + "simulated": lambda: rl_utils.SimulatedBatchGymEnvWithFixedInitialFrames( # pylint: disable=g-long-lambda + **sim_env_kwargs + ), + }[env_type]() + + +def make_agent( + agent_type, env, policy_hparams, policy_dir, sampling_temp, + sim_env_kwargs_fn=None, frame_stack_size=None, rollout_agent_type=None, + batch_size=None, inner_batch_size=None, env_type=None, **planner_kwargs +): + """Factory function for Agents.""" + if batch_size is None: + batch_size = env.batch_size + return { + "random": lambda: rl_utils.RandomAgent( # pylint: disable=g-long-lambda + batch_size, env.observation_space, env.action_space + ), + "policy": lambda: rl_utils.PolicyAgent( # pylint: disable=g-long-lambda + batch_size, env.observation_space, env.action_space, + policy_hparams, policy_dir, sampling_temp + ), + "planner": lambda: rl_utils.PlannerAgent( # pylint: disable=g-long-lambda + batch_size, make_agent( + rollout_agent_type, env, policy_hparams, policy_dir, + sampling_temp, batch_size=inner_batch_size + ), make_env(env_type, env.env, sim_env_kwargs_fn()), + lambda env: rl_utils.BatchStackWrapper(env, frame_stack_size), + discount_factor=policy_hparams.gae_gamma, **planner_kwargs + ), + }[agent_type]() + + +def collect_frames_for_random_starts( + storage_env, stacked_env, agent, frame_stack_size, random_starts_step_limit, + log_every_steps=None +): + """Collects frames from real env for random starts of simulated env.""" + del frame_stack_size + storage_env.start_new_epoch(0) + tf.logging.info( + "Collecting %d frames for random starts.", random_starts_step_limit + ) + rl_utils.run_rollouts( + stacked_env, agent, stacked_env.reset(), + step_limit=random_starts_step_limit, + many_rollouts_from_each_env=True, + log_every_steps=log_every_steps, + ) + # Save unfinished rollouts to history. + stacked_env.reset() + + +def make_agent_from_hparams( + agent_type, base_env, stacked_env, loop_hparams, policy_hparams, + planner_hparams, model_dir, policy_dir, sampling_temp, video_writers=() +): + """Creates an Agent from hparams.""" + def sim_env_kwargs_fn(): + return rl.make_simulated_env_kwargs( + base_env, loop_hparams, batch_size=planner_hparams.batch_size, + model_dir=model_dir + ) + planner_kwargs = planner_hparams.values() + planner_kwargs.pop("batch_size") + planner_kwargs.pop("rollout_agent_type") + planner_kwargs.pop("env_type") + return make_agent( + agent_type, stacked_env, policy_hparams, policy_dir, sampling_temp, + sim_env_kwargs_fn, loop_hparams.frame_stack_size, + planner_hparams.rollout_agent_type, + inner_batch_size=planner_hparams.batch_size, + env_type=planner_hparams.env_type, + video_writers=video_writers, **planner_kwargs + ) + + +def make_eval_fn_with_agent( + agent_type, eval_mode, planner_hparams, model_dir, log_every_steps=None, + video_writers=(), random_starts_step_limit=None +): + """Returns an out-of-graph eval_fn using the Agent API.""" + def eval_fn(env, loop_hparams, policy_hparams, policy_dir, sampling_temp): + """Eval function.""" + base_env = env + env = rl_utils.BatchStackWrapper(env, loop_hparams.frame_stack_size) + agent = make_agent_from_hparams( + agent_type, base_env, env, loop_hparams, policy_hparams, + planner_hparams, model_dir, policy_dir, sampling_temp, video_writers + ) + + if eval_mode == "agent_simulated": + real_env = base_env.new_like(batch_size=1) + stacked_env = rl_utils.BatchStackWrapper( + real_env, loop_hparams.frame_stack_size + ) + collect_frames_for_random_starts( + real_env, stacked_env, agent, loop_hparams.frame_stack_size, + random_starts_step_limit, log_every_steps + ) + initial_frame_chooser = rl_utils.make_initial_frame_chooser( + real_env, loop_hparams.frame_stack_size, + simulation_random_starts=True, + simulation_flip_first_random_for_beginning=False, + split=None, + ) + env_fn = rl.make_simulated_env_fn_from_hparams( + real_env, loop_hparams, batch_size=loop_hparams.eval_batch_size, + initial_frame_chooser=initial_frame_chooser, model_dir=model_dir + ) + sim_env = env_fn(in_graph=False) + env = rl_utils.BatchStackWrapper(sim_env, loop_hparams.frame_stack_size) + + kwargs = {} + if not agent.records_own_videos: + kwargs["video_writers"] = video_writers + step_limit = base_env.rl_env_max_episode_steps + if step_limit == -1: + step_limit = None + rl_utils.run_rollouts( + env, agent, env.reset(), log_every_steps=log_every_steps, + step_limit=step_limit, **kwargs + ) + if eval_mode == "agent_real": + assert len(base_env.current_epoch_rollouts()) == env.batch_size + return eval_fn + + +def evaluate_world_model( + agent_type, loop_hparams, planner_hparams, model_dir, policy_dir, + random_starts_step_limit, debug_video_path, log_every_steps +): + """Evaluates the world model.""" + if debug_video_path: + debug_video_path = os.path.join(debug_video_path, "0.avi") + + storage_env = rl_utils.setup_env(loop_hparams, batch_size=1, max_num_noops=0) + stacked_env = rl_utils.BatchStackWrapper( + storage_env, loop_hparams.frame_stack_size + ) + policy_hparams = trainer_lib.create_hparams(loop_hparams.base_algo_params) + agent = make_agent_from_hparams( + agent_type, storage_env, stacked_env, loop_hparams, policy_hparams, + planner_hparams, model_dir, policy_dir, + # TODO(koz4k): Loop over eval_sampling_temps? + sampling_temp=loop_hparams.eval_sampling_temps[0], + ) + collect_frames_for_random_starts( + storage_env, stacked_env, agent, loop_hparams.frame_stack_size, + random_starts_step_limit, log_every_steps + ) + return rl_utils.evaluate_world_model( + storage_env, loop_hparams, model_dir, debug_video_path, split=None + ) + + +def evaluate( + loop_hparams, planner_hparams, policy_dir, model_dir, eval_metrics_dir, + agent_type, eval_mode, eval_with_learner, log_every_steps, debug_video_path, + num_debug_videos=1, random_starts_step_limit=None, + report_fn=None, report_metric=None +): + """Evaluate.""" + if eval_with_learner: + assert agent_type == "policy" + + if report_fn: + assert report_metric is not None + + eval_metrics_writer = tf.summary.FileWriter(eval_metrics_dir) + video_writers = () + kwargs = {} + if eval_mode in ["agent_real", "agent_simulated"]: + if not eval_with_learner: + if debug_video_path: + tf.gfile.MakeDirs(debug_video_path) + video_writers = [ + common_video.WholeVideoWriter( # pylint: disable=g-complex-comprehension + fps=10, + output_path=os.path.join(debug_video_path, "{}.avi".format(i)), + file_format="avi", + ) + for i in range(num_debug_videos) + ] + kwargs["eval_fn"] = make_eval_fn_with_agent( + agent_type, eval_mode, planner_hparams, model_dir, + log_every_steps=log_every_steps, + video_writers=video_writers, + random_starts_step_limit=random_starts_step_limit + ) + eval_metrics = rl_utils.evaluate_all_configs( + loop_hparams, policy_dir, **kwargs + ) + else: + eval_metrics = evaluate_world_model( + agent_type, loop_hparams, planner_hparams, model_dir, policy_dir, + random_starts_step_limit, debug_video_path, log_every_steps + ) + rl_utils.summarize_metrics(eval_metrics_writer, eval_metrics, 0) + + for video_writer in video_writers: + video_writer.finish_to_disk() + + # Report metrics + if report_fn: + if report_metric == "mean_reward": + metric_name = rl_utils.get_metric_name( + sampling_temp=loop_hparams.eval_sampling_temps[0], + max_num_noops=loop_hparams.eval_max_num_noops, + clipped=False + ) + report_fn(eval_metrics[metric_name], 0) + else: + report_fn(eval_metrics[report_metric], 0) + return eval_metrics + + +def get_game_for_worker(map_name, directory_id): + """Get game for the given worker (directory) id.""" + if map_name == "v100unfriendly": + games = ["chopper_command", "boxing", "asterix", "seaquest"] + worker_per_game = 5 + elif map_name == "human_nice": + games = gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE + worker_per_game = 5 + else: + raise ValueError("Unknown worker to game map name: %s" % map_name) + games.sort() + game_id = (directory_id - 1) // worker_per_game + tf.logging.info("Getting game %d from %s." % (game_id, games)) + return games[game_id] + + +def evaluate_all_epochs( + loop_hparams, planner_hparams, policy_dir, model_dir, eval_metrics_dir, + *args, **kwargs +): + epoch_policy_dirs = tf.gfile.Glob(os.path.join(policy_dir, "epoch_*")) + for epoch_policy_dir in epoch_policy_dirs: + epoch_metrics_dir = os.path.join(eval_metrics_dir, "epoch_{}".format( + epoch_policy_dir.split("_")[-1] + )) + evaluate( + loop_hparams, planner_hparams, epoch_policy_dir, model_dir, + epoch_metrics_dir, *args, **kwargs + ) + + +def main(_): + now = datetime.datetime.now() + now_tag = now.strftime("%Y_%m_%d_%H_%M") + loop_hparams = trainer_lib.create_hparams( + FLAGS.loop_hparams_set, FLAGS.loop_hparams + ) + if FLAGS.worker_to_game_map and FLAGS.total_num_workers > 1: + loop_hparams.game = get_game_for_worker( + FLAGS.worker_to_game_map, FLAGS.worker_id + 1) + tf.logging.info("Set game to %s." % loop_hparams.game) + loop_hparams.eval_rl_env_max_episode_steps = FLAGS.eval_step_limit + loop_hparams.eval_batch_size = FLAGS.eval_batch_size + planner_hparams = trainer_lib.create_hparams( + FLAGS.planner_hparams_set, FLAGS.planner_hparams + ) + policy_dir = FLAGS.policy_dir + model_dir = FLAGS.model_dir + eval_metrics_dir = FLAGS.eval_metrics_dir + debug_video_path = FLAGS.debug_video_path + evaluate_fn = evaluate + if FLAGS.output_dir: + cur_dir = FLAGS.output_dir + if FLAGS.total_num_workers > 1: + cur_dir = os.path.join(cur_dir, "%d" % (FLAGS.worker_id + 1)) + policy_dir = os.path.join(cur_dir, "policy") + model_dir = os.path.join(cur_dir, "world_model") + eval_dir_basename = "evaluator_" + if FLAGS.agent == "planner": + eval_dir_basename = FLAGS.planner_hparams_set + "_" + eval_metrics_dir = os.path.join(cur_dir, eval_dir_basename + now_tag) + debug_video_path = eval_metrics_dir + tf.logging.info("Writing metrics to %s." % eval_metrics_dir) + if not tf.gfile.Exists(eval_metrics_dir): + tf.gfile.MkDir(eval_metrics_dir) + if FLAGS.all_epochs: + evaluate_fn = evaluate_all_epochs + evaluate_fn( + loop_hparams, planner_hparams, policy_dir, model_dir, + eval_metrics_dir, FLAGS.agent, FLAGS.mode, FLAGS.eval_with_learner, + FLAGS.log_every_steps if FLAGS.log_every_steps > 0 else None, + debug_video_path=debug_video_path, + num_debug_videos=FLAGS.num_debug_videos, + random_starts_step_limit=FLAGS.random_starts_step_limit, + ) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/rl/evaluator_test.py b/tensor2tensor/rl/evaluator_test.py new file mode 100644 index 000000000..76ebfb912 --- /dev/null +++ b/tensor2tensor/rl/evaluator_test.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests the evaluator.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.rl import evaluator +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +class EvalTest(tf.test.TestCase): + + def test_evaluate_pong_random_agent(self): + loop_hparams = registry.hparams("rlmb_tiny") + planner_hparams = registry.hparams("planner_tiny") + temp_dir = tf.test.get_temp_dir() + evaluator.evaluate( + loop_hparams, planner_hparams, temp_dir, temp_dir, temp_dir, + agent_type="random", eval_mode="agent_real", eval_with_learner=False, + log_every_steps=None, debug_video_path="" + ) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/rl/gym_utils.py b/tensor2tensor/rl/gym_utils.py new file mode 100644 index 000000000..ef1f511ed --- /dev/null +++ b/tensor2tensor/rl/gym_utils.py @@ -0,0 +1,360 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for interacting with Gym classes.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math + +from absl import logging +import gym +import gym.wrappers +import numpy as np +from PIL import Image + + +class StickyActionEnv(gym.Wrapper): + """Based on openai/atari-reset implementation.""" + + def __init__(self, env, p=0.25): + gym.Wrapper.__init__(self, env) + self.p = p + self.last_action = 0 + + def step(self, action): + if np.random.uniform() < self.p: + action = self.last_action + self.last_action = action + obs, reward, done, info = self.env.step(action) + return obs, reward, done, info + + def reset(self, **kwargs): + return self.env.reset(**kwargs) + + +class MaxAndSkipEnv(gym.Wrapper): + """Same wrapper as in OpenAI baselines for comparability of results.""" + + def __init__(self, env, skip=4): + """Return only every `skip`-th frame.""" + gym.Wrapper.__init__(self, env) + observation_space = env.observation_space + # Most recent raw observations (for max pooling across time steps). + self._obs_buffer = np.zeros( + (2,) + observation_space.shape, dtype=observation_space.dtype) + self._skip = skip + + def __str__(self): + return "MaxAndSkip<%s>" % str(self.env) + + def step(self, action): + """Repeat action, sum reward, and max over last observations.""" + total_reward = 0.0 + done = None + for i in range(self._skip): + obs, reward, done, info = self.env.step(action) + if i == self._skip - 2: + self._obs_buffer[0] = obs + if i == self._skip - 1: + self._obs_buffer[1] = obs + total_reward += reward + if done: + break + # Note that the observation on the done=True frame doesn't matter. + max_frame = self._obs_buffer.max(axis=0) + return max_frame, total_reward, done, info + + def reset(self, **kwargs): + return self.env.reset(**kwargs) + + +class ActionDiscretizeWrapper(gym.ActionWrapper): + """Wraps an environment with continuous actions and discretizes them. + + This is a simplified adaptation of ActionDiscretizeWrapper + from tf_agents. + """ + + def __init__(self, env, num_actions): + """Constructs a wrapper for discretizing the action space. + + Args: + env: environment to wrap. + num_actions: A np.array of the same shape as the environment's + action_spec. Elements in the array specify the number of actions to + discretize to for each dimension. + + Raises: + ValueError: IF the action_spec shape and the limits shape are not equal. + """ + + if not isinstance(env.action_space, gym.spaces.box.Box): + raise ValueError( + "The action space is {}, but gym.spaces.box.Box is expected".format( + env.action_space)) + + gym.Wrapper.__init__(self, env) + + # We convert a scalar num_actions to array [num_actions, num_actions, ...] + self._num_actions = np.broadcast_to(num_actions, env.action_space.shape) + + if env.action_space.shape != self._num_actions.shape: + raise ValueError("Spec {} and limit shape do not match. Got {}".format( + env.action_space.shape, self._num_actions.shape)) + self.action_space = gym.spaces.MultiDiscrete(nvec=self._num_actions) + self._action_map = self._discretize_env(env) + + def _discretize_env(self, env): + """Generates a discrete bounded spec and a linspace for the given limits. + + Args: + env: An array to discretize. + + Returns: + Tuple with the discrete_spec along with a list of lists mapping actions. + Raises: + ValueError: If not all limits value are >=2 or maximum or minimum of boxes + is equal to +- infinity. + """ + if not np.all(self._num_actions >= 2): + raise ValueError("num_actions should all be at least size 2.") + + if (math.isinf(np.min(env.action_space.low)) or + math.isinf(np.max(env.action_space.high))): + raise ValueError( + """Minimum of boxes is {} and maximum of boxes is {}, + but we expect that finite values are provided.""". + format(np.min(env.action_space.low), + np.max(env.action_space.high))) + + limits = np.broadcast_to(self._num_actions, + env.action_space.shape) + minimum = np.broadcast_to(np.min(env.action_space.low), + env.action_space.shape) + maximum = np.broadcast_to(np.max(env.action_space.high), + env.action_space.shape) + + action_map = [ + np.linspace(env_min, env_max, num=n_actions) + for env_min, env_max, n_actions in zip( + np.nditer(minimum), np.nditer(maximum), np.nditer(limits)) + ] + + return action_map + + def _map_actions(self, action): + """Maps the given discrete action to the corresponding continuous action. + + Args: + action: Discrete action to map. + + Returns: + Numpy array with the mapped continuous actions. + Raises: + ValueError: If the given action's shpe does not match the action_spec + shape. + """ + action = np.asarray(action) + if action.shape != self.action_space.shape: + raise ValueError( + "Received action with incorrect shape. Got {}, expected {}".format( + action.shape, self.action_space.shape)) + + mapped_action = [self._action_map[i][a] + for i, a in enumerate(action.flatten())] + return np.reshape(mapped_action, newshape=action.shape) + + def action(self, action): + """Steps the environment while remapping the actions. + + Args: + action: Action to take. + + Returns: + The next time_step from the environment. + """ + return self._map_actions(action) + + def reverse_action(self, action): + raise NotImplementedError + + +class RenderedEnv(gym.Wrapper): + """Simple Env wrapper to override observations with rendered rgb values.""" + + def __init__(self, + env, + mode="rgb_array", + low=0, + high=255, + resize_to=None, + output_dtype=None): + gym.Wrapper.__init__(self, env) + # Get a sample frame to correctly set observation space + self.mode = mode + sample_frame = self.render(mode=self.mode) + assert sample_frame is not None + self.should_resize = False + self.output_dtype = output_dtype + if resize_to is None: + self.observation_space = gym.spaces.Box( + low=low, + high=high, + shape=sample_frame.shape, + dtype=sample_frame.dtype) + else: + assert len(resize_to) == 2 + self.should_resize = True + num_channels = sample_frame.shape[-1] + self.observation_space = gym.spaces.Box( + low=low, + high=high, + shape=list(resize_to) + [num_channels], + dtype=sample_frame.dtype) + + def _maybe_resize(self, obs): + if not self.should_resize: + return obs + height, width = self.observation_space.shape[:2] + img = Image.fromarray(obs) + img = img.resize([width, height], resample=Image.ANTIALIAS) + if self.output_dtype is None: + return np.array(img) + return np.array(img).astype(self.output_dtype) + + def step(self, action): + _, reward, done, info = self.env.step(action) + obs = self._maybe_resize(self.env.render(mode=self.mode)) + return obs, reward, done, info + + def reset(self, **kwargs): + self.env.reset(**kwargs) + obs = self._maybe_resize(self.env.render(mode=self.mode)) + return obs + + +def remove_time_limit_wrapper(env): + """Removes top level TimeLimit Wrapper. + + Removes TimeLimit Wrapper from top level if exists, throws error if any other + TimeLimit Wrapper is present in stack. + + Args: + env: environment + + Returns: + the env with removed time limit wrapper. + """ + if isinstance(env, gym.wrappers.TimeLimit): + env = env.env + env_ = env + while isinstance(env_, gym.Wrapper): + if isinstance(env_, gym.wrappers.TimeLimit): + raise ValueError("Can remove only top-level TimeLimit gym.Wrapper.") + env_ = env_.env + return env + + +def gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env, rendered_env, + rendered_env_resize_to, sticky_actions, output_dtype, + num_actions): + """Wraps a gym environment. see make_gym_env for details.""" + # rl_env_max_episode_steps is None or int. + assert ((not rl_env_max_episode_steps) or + isinstance(rl_env_max_episode_steps, int)) + + wrap_with_time_limit = ((not rl_env_max_episode_steps) or + rl_env_max_episode_steps >= 0) + + if wrap_with_time_limit: + env = remove_time_limit_wrapper(env) + + if num_actions is not None: + logging.log_first_n( + logging.INFO, "Number of discretized actions: %d", 1, num_actions) + env = ActionDiscretizeWrapper(env, num_actions=num_actions) + + if sticky_actions: + env = StickyActionEnv(env) + + if maxskip_env: + env = MaxAndSkipEnv(env) # pylint: disable=redefined-variable-type + + if rendered_env: + env = RenderedEnv( + env, resize_to=rendered_env_resize_to, output_dtype=output_dtype) + + if wrap_with_time_limit and rl_env_max_episode_steps is not None: + env = gym.wrappers.TimeLimit( + env, max_episode_steps=rl_env_max_episode_steps) + return env + + +def make_gym_env(name, + rl_env_max_episode_steps=-1, + maxskip_env=False, + rendered_env=False, + rendered_env_resize_to=None, + sticky_actions=False, + output_dtype=None, + num_actions=None): + """Create a gym env optionally with a time limit and maxskip wrapper. + + NOTE: The returned env may already be wrapped with TimeLimit! + + Args: + name: `str` - base name of the gym env to make. + rl_env_max_episode_steps: `int` or None - Using any value < 0 returns the + env as-in, otherwise we impose the requested timelimit. Setting this to + None returns a wrapped env that doesn't have a step limit. + maxskip_env: whether to also use MaxAndSkip wrapper before time limit. + rendered_env: whether to force render for observations. Use this for + environments that are not natively rendering the scene for observations. + rendered_env_resize_to: a list of [height, width] to change the original + resolution of the native environment render. + sticky_actions: whether to use sticky_actions before MaxAndSkip wrapper. + output_dtype: numpy datatype that we want the observation to be in, if None + this defaults to the env's observation dtype. Useful for TPUs since they + don't support uint8 which is a default observation type for a lot of envs. + num_actions: None if we do not need discretization and the number of + discrete actions per continuous action. + + Returns: + An instance of `gym.Env` or `gym.Wrapper`. + """ + env = gym.make(name) + return gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env, + rendered_env, rendered_env_resize_to, sticky_actions, + output_dtype, num_actions) + + +def register_gym_env(class_entry_point, version="v0", kwargs=None): + """Registers the class in Gym and returns the registered name and the env.""" + + split_on_colon = class_entry_point.split(":") + assert len(split_on_colon) == 2 + + class_name = split_on_colon[1] + # We have to add the version to conform to gym's API. + env_name = "T2TEnv-{}-{}".format(class_name, version) + gym.envs.register(id=env_name, entry_point=class_entry_point, kwargs=kwargs) + + logging.info( + "Entry Point [%s] registered with id [%s]", class_entry_point, env_name) + + return env_name, gym.make(env_name) diff --git a/tensor2tensor/rl/gym_utils_test.py b/tensor2tensor/rl/gym_utils_test.py new file mode 100644 index 000000000..dbc85a024 --- /dev/null +++ b/tensor2tensor/rl/gym_utils_test.py @@ -0,0 +1,289 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.rl.gym_utils.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import unittest + +import gym +from gym import spaces +import numpy as np +from tensor2tensor.rl import gym_utils +import tensorflow.compat.v1 as tf + + +class SimpleEnv(gym.Env): + """A simple environment with a 3x3 observation space, is done on action=1.""" + + def __init__(self): + self.reward_range = (-1.0, 1.0) + self.action_space = spaces.Discrete(2) + self.observation_space = spaces.Box(low=0, high=255, shape=(3, 3)) + + def reset(self): + return self.observation_space.low + + def step(self, action): + if action == 0: + return self.reset(), -1.0, False, {} + else: + return self.observation_space.high, +1.0, True, {} + + def render(self, mode="human"): + del mode # Unused + return np.zeros([640, 480, 3], np.uint8) + + +class SimpleContinuousActionsEnv(gym.Env): + """A simple environment with a 3x3 observation space, is done on action=1.""" + + def __init__(self, dimensions): + self.reward_range = (-1.0, 1.0) + self.action_space = spaces.Box(low=-1, high=1, shape=(dimensions,)) + self.observation_space = spaces.Box(low=0, high=255, shape=(3, 3)) + + def reset(self): + return self.observation_space.low + + def step(self, action): + if action == 0: + return self.reset(), -1.0, False, {} + else: + return self.observation_space.high, +1.0, True, {} + + def render(self, mode="human"): + del mode # Unused + return np.zeros([640, 480, 3], np.uint8) + + +class EnvWithOptions(SimpleEnv): + """A simple env that takes arguments on init.""" + + def __init__(self, done_action=0): + super(EnvWithOptions, self).__init__() + self.action_space = spaces.Discrete(3) + self._done_action = done_action + + def step(self, action): + if action == self._done_action: + return self.observation_space.high, +1.0, True, {} + return self.reset(), -1.0, False, {} + + +class GymUtilsTest(tf.test.TestCase): + + # Just make an environment and expect to get one. + def test_making_simple_env(self): + env = gym_utils.make_gym_env("CartPole-v0") + self.assertIsInstance(env, gym.Env) + + # Make a time-wrapped environment and expect to get one. + def test_making_timewrapped_env(self): + env = gym_utils.make_gym_env("CartPole-v0", rl_env_max_episode_steps=1000) + self.assertIsInstance(env, gym.Env) + self.assertIsInstance(env, gym.wrappers.TimeLimit) + self.assertEqual(1000, env._max_episode_steps) + + # Make an instance of the environment without a TimeLimit + def test_unlimited_env(self): + env = gym_utils.make_gym_env("CartPole-v0", rl_env_max_episode_steps=None) + self.assertIsInstance(env, gym.Env) + self.assertNotIsInstance(env, gym.wrappers.TimeLimit) + + def test_rendered_env(self): + env = gym_utils.RenderedEnv(SimpleEnv(), resize_to=(64, 12)) + obs, _, _, _ = env.step(1) + self.assertTrue(np.allclose(np.zeros([64, 12, 3], np.uint8), obs)) + + env = gym_utils.RenderedEnv(SimpleEnv(), resize_to=(64, 12), + output_dtype=np.float32) + obs, _, _, _ = env.step(1) + self.assertTrue(np.allclose(np.zeros([64, 12, 3], np.float32), obs)) + + def test_rendered_env_continuous_1d(self): + env = gym_utils.RenderedEnv( + SimpleContinuousActionsEnv(dimensions=1), + resize_to=(64, 12)) + obs, _, _, _ = env.step(0.5) + self.assertTrue(np.allclose(np.zeros([64, 12, 3], np.uint8), obs)) + + env = gym_utils.RenderedEnv( + SimpleContinuousActionsEnv(dimensions=1), + resize_to=(64, 12), + output_dtype=np.float32) + obs, _, _, _ = env.step(1) + self.assertTrue(np.allclose(np.zeros([64, 12, 3], np.float32), obs)) + + def test_rendered_env_continuous_2d(self): + env = gym_utils.RenderedEnv( + SimpleContinuousActionsEnv(dimensions=2), + resize_to=(64, 12)) + obs, _, _, _ = env.step(0.5) + self.assertTrue(np.allclose(np.zeros([64, 12, 3], np.uint8), obs)) + + env = gym_utils.RenderedEnv( + SimpleContinuousActionsEnv(dimensions=2), + resize_to=(64, 12), + output_dtype=np.float32) + obs, _, _, _ = env.step(1) + self.assertTrue(np.allclose(np.zeros([64, 12, 3], np.float32), obs)) + + def test_correct_number_of_discrete_actions_1d(self): + """The env should become discrete whenever we pass num_action.""" + env_discrete = gym_utils.ActionDiscretizeWrapper( + gym_utils.RenderedEnv(SimpleContinuousActionsEnv(dimensions=1)), + num_actions=4) + + expected_action_space = gym.spaces.MultiDiscrete([4,]) + self.assertEqual(env_discrete.action_space, expected_action_space) + + def test_correct_number_of_discrete_actions_2d(self): + env_discrete = gym_utils.ActionDiscretizeWrapper( + gym_utils.RenderedEnv(SimpleContinuousActionsEnv(dimensions=2)), + num_actions=4) + + expected_action_space = gym.spaces.MultiDiscrete([4, 4]) + self.assertEqual(env_discrete.action_space, expected_action_space) + + def test_action_mapping_1d(self): + """Testing discretization with a mock environment. + + In the mock call we get access to the argument of the + SimpleContinuousActionsEnv.step method which we check against + precomputed values of continuous actions. + """ + num_actions = 4 + + with unittest.mock.patch.object( + gym_utils.RenderedEnv, "step", autospec=True) as mock_step_method: + env = gym_utils.RenderedEnv(SimpleContinuousActionsEnv(dimensions=1)) + expected_continuous_actions = np.linspace( + np.min(env.action_space.low), + np.min(env.action_space.high), + num=num_actions).flatten() + + env_discrete = gym_utils.ActionDiscretizeWrapper(env, num_actions) + for discrete_action in range(num_actions): + env_discrete.step([discrete_action]) + mock_step_method.assert_called_with( + unittest.mock.ANY, + expected_continuous_actions[discrete_action]) + + def test_action_mapping_2d(self): + num_actions = 8 + + def expected_continuous_actions(discrete_action): + if discrete_action == [0, 0]: + return np.array([-1, -1]) + elif discrete_action == [0, 3]: + return np.array([-1, -0.14285714]) + elif discrete_action == [4, 4]: + return np.array([0.14285714, 0.14285714]) + elif discrete_action == [7, 7]: + return np.array([1, 1]) + + discrete_actions = [[0, 0], [0, 3], [4, 4], [7, 7]] + + with unittest.mock.patch.object( + gym_utils.RenderedEnv, "step", autospec=True) as mock_step_method: + env = gym_utils.RenderedEnv(SimpleContinuousActionsEnv(dimensions=2)) + + env_discrete = gym_utils.ActionDiscretizeWrapper(env, num_actions) + for discrete_action in discrete_actions: + env_discrete.step(discrete_action) + mock_args, _ = mock_step_method.call_args + np.testing.assert_array_almost_equal( + mock_args[1], expected_continuous_actions(discrete_action)) + + def test_gym_registration(self): + reg_id, env = gym_utils.register_gym_env( + "tensor2tensor.rl.gym_utils_test:SimpleEnv") + + self.assertEqual("T2TEnv-SimpleEnv-v0", reg_id) + + # Most basic check. + self.assertIsInstance(env, gym.Env) + + # Just make sure we got the same environment. + self.assertTrue( + np.allclose(env.reset(), np.zeros(shape=(3, 3), dtype=np.uint8))) + + _, _, done, _ = env.step(1) + self.assertTrue(done) + + def test_gym_registration_continuous(self): + reg_id, env = gym_utils.register_gym_env( + "tensor2tensor.rl.gym_utils_test:SimpleContinuousActionsEnv", + kwargs={"dimensions": 2}) + + self.assertEqual("T2TEnv-SimpleContinuousActionsEnv-v0", reg_id) + + # Most basic check. + self.assertIsInstance(env, gym.Env) + + # Just make sure we got the same environment. + self.assertTrue( + np.allclose(env.reset(), np.zeros(shape=(3, 3), dtype=np.uint8))) + + _, _, done, _ = env.step(1) + self.assertTrue(done) + + def test_gym_registration_with_kwargs(self): + reg_id, env = gym_utils.register_gym_env( + "tensor2tensor.rl.gym_utils_test:EnvWithOptions", + kwargs={"done_action": 2}) + + self.assertEqual("T2TEnv-EnvWithOptions-v0", reg_id) + + # Obligatory reset. + env.reset() + + # Make sure that on action = 0, 1 we are not done, but on 2 we are. + _, _, done, _ = env.step(0) + self.assertFalse(done) + + _, _, done, _ = env.step(1) + self.assertFalse(done) + + _, _, done, _ = env.step(2) + self.assertTrue(done) + + # Now lets try to change the env -- note we have to change the version. + reg_id, env = gym_utils.register_gym_env( + "tensor2tensor.rl.gym_utils_test:EnvWithOptions", + version="v1", + kwargs={"done_action": 1}) + + self.assertEqual("T2TEnv-EnvWithOptions-v1", reg_id) + + # Obligatory reset. + env.reset() + + # Make sure that on action = 0, 2 we are not done, but on 1 we are. + _, _, done, _ = env.step(0) + self.assertFalse(done) + + _, _, done, _ = env.step(2) + self.assertFalse(done) + + _, _, done, _ = env.step(1) + self.assertTrue(done) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/rl/player.py b/tensor2tensor/rl/player.py new file mode 100644 index 000000000..8d81ec474 --- /dev/null +++ b/tensor2tensor/rl/player.py @@ -0,0 +1,541 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Play with a world model. + +Controls: + WSAD and SPACE to control the agent. + R key to reset env. + C key to toggle WAIT mode. + N to perform NOOP action under WAIT mode. + X to reset simulated env only, when running sim-real comparison. + +Run this script with the same parameters as trainer_model_based.py. Note that +values of most of them have no effect on player, so running just + +python -m tensor2tensor/rl/player.py \ + --output_dir=path/to/your/experiment \ + --loop_hparams_set=rlmb_base + +might work for you. + +More advanced example: + +python -m tensor2tensor/rl/record_ppo.py \ + --output_dir=path/to/your/experiment \ + --loop_hparams_set=rlmb_base \ + --sim_and_real=False \ + --simulated_env=False \ + --loop_hparams=generative_model="next_frame" \ + --video_dir=my/video/dir \ + --zoom=6 \ + --fps=50 \ + --env=real \ + --epoch=-1 + +Check flags definitions under imports for more details. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import gym +from gym.utils import play +import numpy as np + +from tensor2tensor.bin import t2t_trainer # pylint: disable=unused-import +from tensor2tensor.rl import player_utils +from tensor2tensor.rl.envs.simulated_batch_env import PIL_Image +from tensor2tensor.rl.envs.simulated_batch_env import PIL_ImageDraw +from tensor2tensor.rl.envs.simulated_batch_gym_env import FlatBatchEnv +from tensor2tensor.rl.rl_utils import absolute_hinge_difference +from tensor2tensor.rl.rl_utils import full_game_name +# Import flags from t2t_trainer and trainer_model_based +import tensor2tensor.rl.trainer_model_based_params # pylint: disable=unused-import +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string("video_dir", "/tmp/gym-results", + "Where to save played trajectories.") +flags.DEFINE_float("zoom", 4., + "Resize factor of displayed game.") +flags.DEFINE_float("fps", 20., + "Frames per second.") +flags.DEFINE_string("epoch", "last", + "Data from which epoch to use.") +flags.DEFINE_boolean("sim_and_real", True, + "Compare simulated and real environment.") +flags.DEFINE_boolean("simulated_env", True, + "Either to use 'simulated' or 'real' env.") +flags.DEFINE_boolean("dry_run", False, + "Dry run - without pygame interaction and display, just " + "some random actions on environment") +flags.DEFINE_string("model_ckpt", "", + "World model checkpoint path.") +flags.DEFINE_string("wm_dir", "", + "Directory with world model checkpoints. Inferred from " + "output_dir if empty.") +flags.DEFINE_string("policy_dir", "", + "Directory with policy. Inferred from output_dir if empty.") +flags.DEFINE_string("episodes_data_dir", "", + "Path to data for simulated environment initialization. " + "Inferred from output_dir if empty.") +flags.DEFINE_boolean("game_from_filenames", True, + "If infer game name from data_dir filenames or from " + "hparams.") + + +class PlayerEnv(gym.Env): + """Base (abstract) environment for interactive human play with gym.utils.play. + + Additionally to normal actions passed to underlying environment(s) it + allows to pass special actions by `step` method. + + Special actions: + RETURN_DONE_ACTION: Returns done from `step` to force gym.utils.play to + call reset. + TOGGLE_WAIT_ACTION: Change between real-time-play and wait-for-pressed-key + modes. + WAIT_MODE_NOOP_ACTION: perform noop action (when wait-for-pressed-key mode + is on) + + For keyboard keys related to actions above see `get_keys_to_action` method. + + Naming conventions: + envs_step_tuples: Dictionary of tuples similar to these returned by + gym.Env.step(). + { + "env_name": (observation, reward, done, info), + ... + } + Keys depend on subclass. + """ + + # Integers (as taken by step() method) related to special actions. + RETURN_DONE_ACTION = 101 + TOGGLE_WAIT_ACTION = 102 + WAIT_MODE_NOOP_ACTION = 103 + + HEADER_HEIGHT = 27 + + def __init__(self, action_meanings): + """Constructor for PlayerEnv. + + Args: + action_meanings: list of strings indicating action names. Can be obtain by + >>> env = gym.make("PongNoFrameskip-v4") # insert your game name + >>> env.unwrapped.get_action_meanings() + See gym AtariEnv get_action_meanings() for more details. + """ + self.action_meanings = action_meanings + self._wait = True + # If action_space will be needed, one could use e.g. gym.spaces.Dict. + self.action_space = None + self._last_step_tuples = None + self.action_meanings = action_meanings + self.name_to_action_num = {name: num for num, name in + enumerate(self.action_meanings)} + + def get_keys_to_action(self): + """Get mapping from keyboard keys to actions. + + Required by gym.utils.play in environment or top level wrapper. + + Returns: + { + Unicode code point for keyboard key: action (formatted for step()), + ... + } + """ + # Based on gym AtariEnv.get_keys_to_action() + keyword_to_key = { + "UP": ord("w"), + "DOWN": ord("s"), + "LEFT": ord("a"), + "RIGHT": ord("d"), + "FIRE": ord(" "), + } + + keys_to_action = {} + + for action_id, action_meaning in enumerate(self.action_meanings): + keys_tuple = tuple(sorted([ + key for keyword, key in keyword_to_key.items() + if keyword in action_meaning])) + assert keys_tuple not in keys_to_action + keys_to_action[keys_tuple] = action_id + + # Special actions: + keys_to_action[(ord("r"),)] = self.RETURN_DONE_ACTION + keys_to_action[(ord("c"),)] = self.TOGGLE_WAIT_ACTION + keys_to_action[(ord("n"),)] = self.WAIT_MODE_NOOP_ACTION + + return keys_to_action + + def _player_actions(self): + return { + self.RETURN_DONE_ACTION: self._player_return_done_action, + self.TOGGLE_WAIT_ACTION: self._player_toggle_wait_action, + } + + def _player_toggle_wait_action(self): + self._wait = not self._wait + return self._last_step_tuples + + def step(self, action): + """Pass action to underlying environment(s) or perform special action.""" + # Special codes + if action in self._player_actions(): + envs_step_tuples = self._player_actions()[action]() + elif self._wait and action == self.name_to_action_num["NOOP"]: + # Ignore no-op, do not pass to environment. + envs_step_tuples = self._last_step_tuples + else: + # Run action on environment(s). + if action == self.WAIT_MODE_NOOP_ACTION: + action = self.name_to_action_num["NOOP"] + # Perform action on underlying environment(s). + envs_step_tuples = self._step_envs(action) + self._update_statistics(envs_step_tuples) + + self._last_step_tuples = envs_step_tuples + ob, reward, done, info = self._player_step_tuple(envs_step_tuples) + return ob, reward, done, info + + def _augment_observation(self, ob, reward, cumulative_reward): + """"Expand observation array with additional information header (top rows). + + Args: + ob: observation + reward: reward to be included in header. + cumulative_reward: total cumulated reward to be included in header. + + Returns: + Expanded observation array. + """ + img = PIL_Image().new("RGB", + (ob.shape[1], self.HEADER_HEIGHT,)) + draw = PIL_ImageDraw().Draw(img) + draw.text( + (1, 0), "c:{:3}, r:{:3}".format(int(cumulative_reward), int(reward)), + fill=(255, 0, 0) + ) + draw.text( + (1, 15), "fc:{:3}".format(int(self._frame_counter)), + fill=(255, 0, 0) + ) + header = np.asarray(img) + del img + header.setflags(write=1) + # Top row color indicates if WAIT MODE is on. + if self._wait: + pixel_fill = (0, 255, 0) + else: + pixel_fill = (255, 0, 0) + header[0, :, :] = pixel_fill + return np.concatenate([header, ob], axis=0) + + def reset(self): + raise NotImplementedError + + def _step_envs(self, action): + """Perform action on underlying environment(s).""" + raise NotImplementedError + + def _update_statistics(self, envs_step_tuples): + """Update underlying environment(s) total cumulative rewards.""" + raise NotImplementedError + + def _player_return_done_action(self): + """Function. + + Returns: + envs_step_tuples: such that `player_step_tuple(envs_step_tuples)` + will return done. + """ + raise NotImplementedError + + def _player_step_tuple(self, envs_step_tuples): + """Infer return tuple for step() given underlying environment tuple(s).""" + raise NotImplementedError + + +class SimAndRealEnvPlayer(PlayerEnv): + """Run simulated and real env side-by-side for comparison. + + Displays three windows - one for real environment, second for simulated + and third for their differences. + + Normal actions are passed to both environments. + + Special Actions: + RESTART_SIMULATED_ENV_ACTION: restart simulated environment only, using + current frames from real environment. + See `PlayerEnv` for rest of special actions. + + Naming conventions: + envs_step_tuples: dictionary with two keys. + { + "real_env": (observation, reward, done, info), + "sim_env": (observation, reward, done, info) + } + """ + + RESTART_SIMULATED_ENV_ACTION = 110 + + def __init__(self, real_env, sim_env, action_meanings): + """Init. + + Args: + real_env: real environment such as `FlatBatchEnv`. + sim_env: simulation of `real_env` to be compared with. E.g. + `SimulatedGymEnv` must allow to update initial frames for next reset + with `add_to_initial_stack` method. + action_meanings: list of strings indicating action names. Can be obtain by + >>> env = gym.make("PongNoFrameskip-v4") # insert your game name + >>> env.unwrapped.get_action_meanings() + See gym AtariEnv get_action_meanings() for more details. + """ + super(SimAndRealEnvPlayer, self).__init__(action_meanings) + assert real_env.observation_space.shape == sim_env.observation_space.shape + self.real_env = real_env + self.sim_env = sim_env + orig = self.real_env.observation_space + # Observation consists three side-to-side images - simulated environment + # observation, real environment observation and difference between these + # two. + shape = (orig.shape[0] + self.HEADER_HEIGHT, orig.shape[1] * 3, + orig.shape[2]) + + self.observation_space = gym.spaces.Box(low=orig.low.min(), + high=orig.high.max(), + shape=shape, dtype=orig.dtype) + + def _player_actions(self): + actions = super(SimAndRealEnvPlayer, self)._player_actions() + actions.update({ + self.RESTART_SIMULATED_ENV_ACTION: + self.player_restart_simulated_env_action, + }) + return actions + + def get_keys_to_action(self): + keys_to_action = super(SimAndRealEnvPlayer, self).get_keys_to_action() + keys_to_action[(ord("x"),)] = self.RESTART_SIMULATED_ENV_ACTION + return keys_to_action + + def _player_step_tuple(self, envs_step_tuples): + """Construct observation, return usual step tuple. + + Args: + envs_step_tuples: tuples. + + Returns: + Step tuple: ob, reward, done, info + ob: concatenated images [simulated observation, real observation, + difference], with additional informations in header. + reward: real environment reward + done: True iff. envs_step_tuples['real_env'][2] is True + info: real environment info + """ + ob_real, reward_real, _, _ = envs_step_tuples["real_env"] + ob_sim, reward_sim, _, _ = envs_step_tuples["sim_env"] + ob_err = absolute_hinge_difference(ob_sim, ob_real) + + ob_real_aug = self._augment_observation(ob_real, reward_real, + self.cumulative_real_reward) + ob_sim_aug = self._augment_observation(ob_sim, reward_sim, + self.cumulative_sim_reward) + ob_err_aug = self._augment_observation( + ob_err, reward_sim - reward_real, + self.cumulative_sim_reward - self.cumulative_real_reward + ) + ob = np.concatenate([ob_sim_aug, ob_real_aug, ob_err_aug], axis=1) + _, reward, done, info = envs_step_tuples["real_env"] + return ob, reward, done, info + + def reset(self): + """Reset simulated and real environments.""" + self._frame_counter = 0 + ob_real = self.real_env.reset() + # Initialize simulated environment with frames from real one. + self.sim_env.add_to_initial_stack(ob_real) + for _ in range(3): + ob_real, _, _, _ = self.real_env.step(self.name_to_action_num["NOOP"]) + self.sim_env.add_to_initial_stack(ob_real) + ob_sim = self.sim_env.reset() + assert np.all(ob_real == ob_sim) + self._last_step_tuples = self._pack_step_tuples((ob_real, 0, False, {}), + (ob_sim, 0, False, {})) + self.set_zero_cumulative_rewards() + ob, _, _, _ = self._player_step_tuple(self._last_step_tuples) + return ob + + def _pack_step_tuples(self, real_env_step_tuple, sim_env_step_tuple): + return dict(real_env=real_env_step_tuple, + sim_env=sim_env_step_tuple) + + def set_zero_cumulative_rewards(self): + self.cumulative_real_reward = 0 + self.cumulative_sim_reward = 0 + + def _step_envs(self, action): + """Perform step(action) on environments and update initial_frame_stack.""" + self._frame_counter += 1 + real_env_step_tuple = self.real_env.step(action) + sim_env_step_tuple = self.sim_env.step(action) + self.sim_env.add_to_initial_stack(real_env_step_tuple[0]) + return self._pack_step_tuples(real_env_step_tuple, sim_env_step_tuple) + + def _update_statistics(self, envs_step_tuples): + self.cumulative_real_reward += envs_step_tuples["real_env"][1] + self.cumulative_sim_reward += envs_step_tuples["sim_env"][1] + + def _player_return_done_action(self): + ob = np.zeros(self.real_env.observation_space.shape, dtype=np.uint8) + return self._pack_step_tuples((ob, 0, True, {}), + (ob, 0, True, {})) + + def player_restart_simulated_env_action(self): + self._frame_counter = 0 + ob = self.sim_env.reset() + assert np.all(self._last_step_tuples["real_env"][0] == ob) + self.set_zero_cumulative_rewards() + return self._pack_step_tuples( + self._last_step_tuples["real_env"], (ob, 0, False, {})) + + +class SingleEnvPlayer(PlayerEnv): + """"Play on single (simulated or real) environment. + + See `PlayerEnv` for more details. + + Naming conventions: + envs_step_tuples: dictionary with single key. + { + "env": (observation, reward, done, info), + } + Plural form used for consistency with `PlayerEnv`. + """ + + def __init__(self, env, action_meanings): + super(SingleEnvPlayer, self).__init__(action_meanings) + self.env = env + # Set observation space + orig = self.env.observation_space + shape = tuple([orig.shape[0] + self.HEADER_HEIGHT] + list(orig.shape[1:])) + self.observation_space = gym.spaces.Box(low=orig.low.min(), + high=orig.high.max(), + shape=shape, dtype=orig.dtype) + + def _player_step_tuple(self, envs_step_tuples): + """Augment observation, return usual step tuple.""" + ob, reward, done, info = envs_step_tuples["env"] + ob = self._augment_observation(ob, reward, self.cumulative_reward) + return ob, reward, done, info + + def _pack_step_tuples(self, env_step_tuple): + return dict(env=env_step_tuple) + + def reset(self): + self._frame_counter = 0 + ob = self.env.reset() + self._last_step_tuples = self._pack_step_tuples((ob, 0, False, {})) + self.cumulative_reward = 0 + return self._augment_observation(ob, 0, self.cumulative_reward) + + def _step_envs(self, action): + self._frame_counter += 1 + return self._pack_step_tuples(self.env.step(action)) + + def _update_statistics(self, envs_step_tuples): + _, reward, _, _ = envs_step_tuples["env"] + self.cumulative_reward += reward + + def _player_return_done_action(self): + ob = np.zeros(self.env.observation_space.shape, dtype=np.uint8) + return self._pack_step_tuples((ob, 0, True, {})) + + +def main(_): + # gym.logger.set_level(gym.logger.DEBUG) + hparams = registry.hparams(FLAGS.loop_hparams_set) + hparams.parse(FLAGS.loop_hparams) + # Not important for experiments past 2018 + if "wm_policy_param_sharing" not in hparams.values().keys(): + hparams.add_hparam("wm_policy_param_sharing", False) + directories = player_utils.infer_paths( + output_dir=FLAGS.output_dir, + world_model=FLAGS.wm_dir, + policy=FLAGS.policy_dir, + data=FLAGS.episodes_data_dir) + if FLAGS.game_from_filenames: + hparams.set_hparam( + "game", player_utils.infer_game_name_from_filenames(directories["data"]) + ) + action_meanings = gym.make(full_game_name(hparams.game)).\ + unwrapped.get_action_meanings() + epoch = FLAGS.epoch if FLAGS.epoch == "last" else int(FLAGS.epoch) + + def make_real_env(): + env = player_utils.setup_and_load_epoch( + hparams, data_dir=directories["data"], + which_epoch_data=None) + env = FlatBatchEnv(env) # pylint: disable=redefined-variable-type + return env + + def make_simulated_env(setable_initial_frames, which_epoch_data): + env = player_utils.load_data_and_make_simulated_env( + directories["data"], directories["world_model"], + hparams, which_epoch_data=which_epoch_data, + setable_initial_frames=setable_initial_frames) + return env + + if FLAGS.sim_and_real: + sim_env = make_simulated_env( + which_epoch_data=None, setable_initial_frames=True) + real_env = make_real_env() + env = SimAndRealEnvPlayer(real_env, sim_env, action_meanings) + else: + if FLAGS.simulated_env: + env = make_simulated_env( # pylint: disable=redefined-variable-type + which_epoch_data=epoch, setable_initial_frames=False) + else: + env = make_real_env() + env = SingleEnvPlayer(env, action_meanings) # pylint: disable=redefined-variable-type + + env = player_utils.wrap_with_monitor(env, FLAGS.video_dir) + + if FLAGS.dry_run: + env.unwrapped.get_keys_to_action() + for _ in range(5): + env.reset() + for i in range(50): + env.step(i % 3) + env.step(PlayerEnv.RETURN_DONE_ACTION) # reset + return + + play.play(env, zoom=FLAGS.zoom, fps=FLAGS.fps) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/rl/player_utils.py b/tensor2tensor/rl/player_utils.py new file mode 100644 index 000000000..6265533e3 --- /dev/null +++ b/tensor2tensor/rl/player_utils.py @@ -0,0 +1,396 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for player.py.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import os +import re + +import gym +import numpy as np +import six + +from tensor2tensor.models.research.rl import get_policy +from tensor2tensor.models.research.rl import make_simulated_env_fn_from_hparams +from tensor2tensor.rl import rl_utils +from tensor2tensor.rl.envs.simulated_batch_gym_env import FlatBatchEnv +from tensor2tensor.utils import hparam +from tensor2tensor.utils import trainer_lib +from tensor2tensor.utils.misc_utils import camelcase_to_snakecase + +import tensorflow.compat.v1 as tf + + +flags = tf.flags +FLAGS = flags.FLAGS + + +class SimulatedGymEnv(gym.Env): + """Gym environment, running with world model. + + Allows passing custom initial frames. + + Examples: + Setup simulated env from some point of real rollout. + >>> sim_env = SimulatedGymEnv(setable_initial_frames=True, **kwargs) + >>> real_env = FlatBatchEnv(T2TGymEnv(...)) + >>> while ...: + >>> ob, _, _, _ = real_env.step(action) + >>> sim_env.add_to_initial_stack(ob) + >>> sim_env.reset() + >>> # Continue sim_env rollout. + """ + + def __init__(self, real_env, world_model_dir, hparams, random_starts, + setable_initial_frames=False): + """Init. + + Args: + real_env: gym environment. + world_model_dir: path to world model checkpoint directory. + hparams: hparams for rlmb pipeline. + random_starts: if restart world model from random frames, or only + from initial ones (from beginning of episodes). Valid only when + `setable_initial_fames` set to False. + setable_initial_frames: if True, initial_frames for world model should be + set by `add_to_initial_stack`. + """ + + self._setable_initial_frames = setable_initial_frames + + if self._setable_initial_frames: + real_obs_shape = real_env.observation_space.shape + shape = (1, hparams.frame_stack_size) + real_obs_shape + self._initial_frames = np.zeros(shape=shape, dtype=np.uint8) + def initial_frame_chooser(batch_size): + assert batch_size == 1 + return self._initial_frames + + else: + initial_frame_chooser = rl_utils.make_initial_frame_chooser( + real_env, hparams.frame_stack_size, + simulation_random_starts=random_starts, + simulation_flip_first_random_for_beginning=False + ) + env_fn = make_simulated_env_fn_from_hparams( + real_env, hparams, + batch_size=1, + initial_frame_chooser=initial_frame_chooser, + model_dir=world_model_dir, + ) + + env = env_fn(in_graph=False) + self.env = FlatBatchEnv(env) + + self.observation_space = self.env.observation_space + self.action_space = self.env.action_space + + def reset(self): + return self.env.reset() + + def step(self, action): + return self.env.step(action) + + def add_to_initial_stack(self, frame): + """Adds new frame to (initial) frame stack, removes last one.""" + if not self._setable_initial_frames: + raise ValueError( + "This instance does not allow to manually set initial frame stack.") + assert_msg = "{}, {}".format(frame.shape, self._initial_frames.shape[:1]) + assert frame.shape == self._initial_frames.shape[2:], assert_msg + initial_frames = np.roll(self._initial_frames, shift=-1, axis=1) + initial_frames[0, -1, ...] = frame + self._initial_frames = initial_frames + + +def infer_last_epoch_num(data_dir): + """Infer highest epoch number from file names in data_dir.""" + names = os.listdir(data_dir) + epochs_str = [re.findall(pattern=r".*\.(-?\d+)$", string=name) + for name in names] + epochs_str = sum(epochs_str, []) + return max([int(epoch_str) for epoch_str in epochs_str]) + + +def setup_and_load_epoch(hparams, data_dir, which_epoch_data=None): + """Load T2TGymEnv with data from one epoch. + + Args: + hparams: hparams. + data_dir: data directory. + which_epoch_data: data from which epoch to load. + + Returns: + env. + """ + t2t_env = rl_utils.setup_env( + hparams, batch_size=hparams.real_batch_size, + max_num_noops=hparams.max_num_noops + ) + # Load data. + if which_epoch_data is not None: + if which_epoch_data == "last": + which_epoch_data = infer_last_epoch_num(data_dir) + assert isinstance(which_epoch_data, int), \ + "{}".format(type(which_epoch_data)) + t2t_env.start_new_epoch(which_epoch_data, data_dir) + else: + t2t_env.start_new_epoch(-999) + return t2t_env + + +def infer_game_name_from_filenames(data_dir, snake_case=True): + """Infer name from filenames.""" + names = os.listdir(data_dir) + game_names = [re.findall(pattern=r"^Gym(.*)NoFrameskip", string=name) + for name in names] + assert game_names, "No data files found in {}".format(data_dir) + game_names = sum(game_names, []) + game_name = game_names[0] + assert all(game_name == other for other in game_names), \ + "There are multiple different game names in {}".format(data_dir) + if snake_case: + game_name = camelcase_to_snakecase(game_name) + return game_name + + +def load_data_and_make_simulated_env( + data_dir, wm_dir, hparams, which_epoch_data="last", random_starts=True, + setable_initial_frames=False +): + hparams = copy.deepcopy(hparams) + t2t_env = setup_and_load_epoch( + hparams, data_dir=data_dir, + which_epoch_data=which_epoch_data) + return SimulatedGymEnv( + t2t_env, world_model_dir=wm_dir, + hparams=hparams, random_starts=random_starts, + setable_initial_frames=setable_initial_frames) + + +class ExtendToEvenDimentions(gym.ObservationWrapper): + """Force even dimentions of both height and width by adding zeros.""" + HW_AXES = (0, 1) + + def __init__(self, env): + gym.ObservationWrapper.__init__(self, env) + + orig_shape = env.observation_space.shape + extended_shape = list(orig_shape) + for axis in self.HW_AXES: + if self.if_odd(orig_shape[axis]): + extended_shape[axis] += 1 + + assert env.observation_space.dtype == np.uint8 + self.observation_space = gym.spaces.Box( + low=0, + high=255, + shape=extended_shape, + dtype=np.uint8) + + def observation(self, frame): + """Add single zero row/column to observation if needed.""" + if frame.shape == self.observation_space.shape: + return frame + else: + extended_frame = np.zeros(self.observation_space.shape, + self.observation_space.dtype) + assert self.HW_AXES == (0, 1) + extended_frame[:frame.shape[0], :frame.shape[1]] = frame + return extended_frame + + def if_odd(self, n): + return n % 2 + + +class RenderObservations(gym.Wrapper): + """Add observations rendering in 'rgb_array' mode.""" + + def __init__(self, env): + super(RenderObservations, self).__init__(env) + if "rgb_array" not in self.metadata["render.modes"]: + self.metadata["render.modes"].append("rgb_array") + + def step(self, action): + ret = self.env.step(action) + self.last_observation = ret[0] + return ret + + def reset(self, **kwargs): + self.last_observation = self.env.reset(**kwargs) + return self.last_observation + + def render(self, mode="human", **kwargs): + assert mode == "rgb_array" + return self.last_observation + + +def wrap_with_monitor(env, video_dir): + """Wrap environment with gym.Monitor. + + Video recording provided by Monitor requires + 1) both height and width of observation to be even numbers. + 2) rendering of environment + + Args: + env: environment. + video_dir: video directory. + + Returns: + wrapped environment. + """ + env = ExtendToEvenDimentions(env) + env = RenderObservations(env) # pylint: disable=redefined-variable-type + env = gym.wrappers.Monitor(env, video_dir, force=True, + video_callable=lambda idx: True, + write_upon_reset=True) + return env + + +def create_simulated_env( + output_dir, grayscale, resize_width_factor, resize_height_factor, + frame_stack_size, generative_model, generative_model_params, + random_starts=True, which_epoch_data="last", **other_hparams +): + """"Create SimulatedEnv with minimal subset of hparams.""" + # We need these, to initialize T2TGymEnv, but these values (hopefully) have + # no effect on player. + a_bit_risky_defaults = { + "game": "pong", # assumes that T2TGymEnv has always reward_range (-1,1) + "real_batch_size": 1, + "rl_env_max_episode_steps": -1, + "max_num_noops": 0 + } + + for key in a_bit_risky_defaults: + if key not in other_hparams: + other_hparams[key] = a_bit_risky_defaults[key] + + hparams = hparam.HParams( + grayscale=grayscale, + resize_width_factor=resize_width_factor, + resize_height_factor=resize_height_factor, + frame_stack_size=frame_stack_size, + generative_model=generative_model, + generative_model_params=generative_model_params, + **other_hparams + ) + return load_data_and_make_simulated_env( + output_dir, wm_dir=None, hparams=hparams, + which_epoch_data=which_epoch_data, + random_starts=random_starts) + + +class PPOPolicyInferencer(object): + """Non-tensorflow API for infering policy (and value function). + + Example: + >>> ppo = PPOPolicyInferencer(...) + >>> ppo.reset_frame_stack() + >>> ob = env.reset() + >>> while not done: + >>> logits, value = ppo.infer(ob) + >>> ob, _, done, _ = env.step(action) + """ + + def __init__(self, hparams, action_space, observation_space, policy_dir): + assert hparams.base_algo == "ppo" + ppo_hparams = trainer_lib.create_hparams(hparams.base_algo_params) + + frame_stack_shape = (1, hparams.frame_stack_size) + observation_space.shape + self._frame_stack = np.zeros(frame_stack_shape, dtype=np.uint8) + + with tf.Graph().as_default(): + self.obs_t = tf.placeholder(shape=self.frame_stack_shape, dtype=np.uint8) + self.logits_t, self.value_function_t = get_policy( + self.obs_t, ppo_hparams, action_space + ) + model_saver = tf.train.Saver( + tf.global_variables(scope=ppo_hparams.policy_network + "/.*") # pylint: disable=unexpected-keyword-arg + ) + self.sess = tf.Session() + self.sess.run(tf.global_variables_initializer()) + trainer_lib.restore_checkpoint(policy_dir, model_saver, + self.sess) + + @property + def frame_stack_shape(self): + return self._frame_stack.shape + + def reset_frame_stack(self, frame_stack=None): + if frame_stack is None: + self._frame_stack.fill(0) + else: + assert frame_stack.shape == self.frame_stack_shape, \ + "{}, {}".format(frame_stack.shape, self.frame_stack_shape) + self._frame_stack = frame_stack.copy() + + def _add_to_stack(self, ob): + stack = np.roll(self._frame_stack, shift=-1, axis=1) + stack[0, -1, ...] = ob + self._frame_stack = stack + + def infer(self, ob): + """Add new observation to frame stack and infer policy. + + Args: + ob: array of shape (height, width, channels) + + Returns: + logits and vf. + """ + self._add_to_stack(ob) + logits, vf = self.infer_from_frame_stack(self._frame_stack) + return logits, vf + + def infer_from_frame_stack(self, ob_stack): + """Infer policy from stack of observations. + + Args: + ob_stack: array of shape (1, frame_stack_size, height, width, channels) + + Returns: + logits and vf. + """ + logits, vf = self.sess.run([self.logits_t, self.value_function_t], + feed_dict={self.obs_t: ob_stack}) + return logits, vf + + +def infer_paths(output_dir, **subdirs): + """Infers standard paths to policy and model directories. + + Example: + >>> infer_paths("/some/output/dir/", policy="", model="custom/path") + {"policy": "/some/output/dir/policy", "model": "custom/path", + "output_dir":"/some/output/dir/"} + + Args: + output_dir: output directory. + **subdirs: sub-directories. + + Returns: + a dictionary with the directories. + """ + directories = {} + for name, path in six.iteritems(subdirs): + directories[name] = path if path else os.path.join(output_dir, name) + directories["output_dir"] = output_dir + return directories diff --git a/tensor2tensor/rl/policy_learner.py b/tensor2tensor/rl/policy_learner.py new file mode 100644 index 000000000..67d037cf9 --- /dev/null +++ b/tensor2tensor/rl/policy_learner.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unified interface for different RL algorithms.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + + +class PolicyLearner(object): + """API for policy learners.""" + + def __init__( + self, frame_stack_size, base_event_dir, agent_model_dir, total_num_epochs + ): + self.frame_stack_size = frame_stack_size + self.base_event_dir = base_event_dir + self.agent_model_dir = agent_model_dir + self.total_num_epochs = total_num_epochs + + def train( + self, + env_fn, + hparams, + simulated, + save_continuously, + epoch, + sampling_temp=1.0, + num_env_steps=None, + env_step_multiplier=1, + eval_env_fn=None, + report_fn=None + ): + """Train.""" + raise NotImplementedError() + + def evaluate(self, env_fn, hparams, sampling_temp): + raise NotImplementedError() diff --git a/tensor2tensor/rl/ppo.py b/tensor2tensor/rl/ppo.py new file mode 100644 index 000000000..cf21a5b7d --- /dev/null +++ b/tensor2tensor/rl/ppo.py @@ -0,0 +1,257 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""PPO algorithm implementation. + +Based on: https://arxiv.org/abs/1707.06347 +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_layers +from tensor2tensor.models.research.rl import get_policy +from tensor2tensor.utils import learning_rate +from tensor2tensor.utils import optimize + +import tensorflow.compat.v1 as tf +import tensorflow_probability as tfp + + +def define_ppo_step(data_points, hparams, action_space, lr, epoch=-1, + distributional_size=1, distributional_subscale=0.04): + """Define ppo step.""" + del distributional_subscale + (observation, action, discounted_reward, discounted_reward_probs, + norm_advantage, old_pdf) = data_points + + obs_shape = common_layers.shape_list(observation) + observation = tf.reshape( + observation, [obs_shape[0] * obs_shape[1]] + obs_shape[2:] + ) + (logits, new_value) = get_policy(observation, hparams, action_space, + epoch=epoch, + distributional_size=distributional_size) + logits = tf.reshape(logits, obs_shape[:2] + [action_space.n]) + new_policy_dist = tfp.distributions.Categorical(logits=logits) + + new_pdf = new_policy_dist.prob(action) + + ratio = new_pdf / old_pdf + clipped_ratio = tf.clip_by_value(ratio, 1 - hparams.clipping_coef, + 1 + hparams.clipping_coef) + + surrogate_objective = tf.minimum(clipped_ratio * norm_advantage, + ratio * norm_advantage) + policy_loss = -tf.reduce_mean(surrogate_objective) + + if distributional_size > 1: + new_value = tf.reshape(new_value, obs_shape[:2] + [distributional_size]) + new_value = tf.nn.log_softmax(new_value, axis=-1) + value_shape = common_layers.shape_list(new_value) + # The above is the new value distribution. We are also given as discounted + # reward the value distribution and the corresponding probabilities. + # The given discounted reward is already rounded to integers but in range + # increased by 2x for greater fidelity. Increase range of new_values here. + new_value_shifted = tf.concat([new_value[1:], new_value[-1:]], axis=0) + new_value_mean = (new_value + new_value_shifted) / 2 + new_value = tf.concat([tf.expand_dims(new_value, axis=-1), + tf.expand_dims(new_value_mean, axis=-1)], -1) + new_value = tf.reshape(new_value, value_shape[:-1] + [2 * value_shape[-1]]) + # Cast discounted reward to integers and gather the new log-probs for them. + discounted_reward = tf.cast(discounted_reward, tf.int32) + value_loss = tf.batch_gather(new_value, discounted_reward) + # Weight the gathered (new) log-probs by the old probabilities. + discounted_reward_probs = tf.expand_dims(discounted_reward_probs, axis=1) + value_loss = - tf.reduce_sum(value_loss * discounted_reward_probs, axis=-1) + # Take the mean over batch and time as final loss, multiply by coefficient. + value_loss = hparams.value_loss_coef * tf.reduce_mean(value_loss) + else: + new_value = tf.reshape(new_value, obs_shape[:2]) + value_error = new_value - discounted_reward + value_loss = hparams.value_loss_coef * tf.reduce_mean(value_error ** 2) + + entropy = new_policy_dist.entropy() + entropy_loss = -hparams.entropy_loss_coef * tf.reduce_mean(entropy) + + losses = [policy_loss, value_loss, entropy_loss] + loss = sum(losses) + variables = tf.global_variables(hparams.policy_network + "/.*") + train_op = optimize.optimize(loss, lr, hparams, variables=variables) + + with tf.control_dependencies([train_op]): + return [tf.identity(x) for x in losses] + + +def _distributional_to_value(value_d, size, subscale, threshold): + """Get a scalar value out of a value distribution in distributional RL.""" + half = size // 2 + value_range = (tf.to_float(tf.range(-half, half)) + 0.5) * subscale + probs = tf.nn.softmax(value_d) + + if threshold == 0.0: + return tf.reduce_sum(probs * value_range, axis=-1) + + # accumulated_probs[..., i] is the sum of probabilities in buckets upto i + # so it is the probability that value <= i'th bucket value + accumulated_probs = tf.cumsum(probs, axis=-1) + # New probs are 0 on all lower buckets, until the threshold + probs = tf.where(accumulated_probs < threshold, tf.zeros_like(probs), probs) + probs /= tf.reduce_sum(probs, axis=-1, keepdims=True) # Re-normalize. + return tf.reduce_sum(probs * value_range, axis=-1) + + +def define_ppo_epoch(memory, hparams, action_space, batch_size, + distributional_size=1, distributional_subscale=0.04, + distributional_threshold=0.0, epoch=-1): + """PPO epoch.""" + observation, reward, done, action, old_pdf, value_sm = memory + + # This is to avoid propagating gradients through simulated environment. + observation = tf.stop_gradient(observation) + action = tf.stop_gradient(action) + reward = tf.stop_gradient(reward) + if hasattr(hparams, "rewards_preprocessing_fun"): + reward = hparams.rewards_preprocessing_fun(reward) + done = tf.stop_gradient(done) + value_sm = tf.stop_gradient(value_sm) + old_pdf = tf.stop_gradient(old_pdf) + + value = value_sm + if distributional_size > 1: + value = _distributional_to_value( + value_sm, distributional_size, distributional_subscale, + distributional_threshold) + + advantage = calculate_generalized_advantage_estimator( + reward, value, done, hparams.gae_gamma, hparams.gae_lambda) + + if distributional_size > 1: + # Create discounted reward values range. + half = distributional_size // 2 + value_range = tf.to_float(tf.range(-half, half)) + 0.5 # Mid-bucket value. + value_range *= distributional_subscale + # Acquire new discounted rewards by using the above range as end-values. + end_values = tf.expand_dims(value_range, 0) + discounted_reward = discounted_rewards( + reward, done, hparams.gae_gamma, end_values) + # Re-normalize the discounted rewards to integers, in [0, dist_size] range. + discounted_reward /= distributional_subscale + discounted_reward += half + discounted_reward = tf.maximum(discounted_reward, 0.0) + discounted_reward = tf.minimum(discounted_reward, distributional_size) + # Multiply the rewards by 2 for greater fidelity and round to integers. + discounted_reward = tf.stop_gradient(tf.round(2 * discounted_reward)) + # The probabilities corresponding to the end values from old predictions. + discounted_reward_prob = tf.stop_gradient(value_sm[-1]) + discounted_reward_prob = tf.nn.softmax(discounted_reward_prob, axis=-1) + else: + discounted_reward = tf.stop_gradient(advantage + value[:-1]) + discounted_reward_prob = discounted_reward # Unused in this case. + + advantage_mean, advantage_variance = tf.nn.moments(advantage, axes=[0, 1], + keep_dims=True) + advantage_normalized = tf.stop_gradient( + (advantage - advantage_mean)/(tf.sqrt(advantage_variance) + 1e-8)) + + add_lists_elementwise = lambda l1, l2: [x + y for x, y in zip(l1, l2)] + + number_of_batches = ((hparams.epoch_length-1) * hparams.optimization_epochs + // hparams.optimization_batch_size) + epoch_length = hparams.epoch_length + if hparams.effective_num_agents is not None: + number_of_batches *= batch_size + number_of_batches //= hparams.effective_num_agents + epoch_length //= hparams.effective_num_agents + + assert number_of_batches > 0, "Set the paremeters so that number_of_batches>0" + lr = learning_rate.learning_rate_schedule(hparams) + + shuffled_indices = [tf.random.shuffle(tf.range(epoch_length - 1)) + for _ in range(hparams.optimization_epochs)] + shuffled_indices = tf.concat(shuffled_indices, axis=0) + shuffled_indices = shuffled_indices[:number_of_batches * + hparams.optimization_batch_size] + indices_of_batches = tf.reshape(shuffled_indices, + shape=(-1, hparams.optimization_batch_size)) + input_tensors = [observation, action, discounted_reward, + discounted_reward_prob, advantage_normalized, old_pdf] + + ppo_step_rets = tf.scan( + lambda a, i: add_lists_elementwise( # pylint: disable=g-long-lambda + a, define_ppo_step( + [tf.gather(t, indices_of_batches[i, :]) for t in input_tensors], + hparams, action_space, lr, + epoch=epoch, + distributional_size=distributional_size, + distributional_subscale=distributional_subscale + )), + tf.range(number_of_batches), + [0., 0., 0.], + parallel_iterations=1) + + ppo_summaries = [tf.reduce_mean(ret) / number_of_batches + for ret in ppo_step_rets] + ppo_summaries.append(lr) + summaries_names = [ + "policy_loss", "value_loss", "entropy_loss", "learning_rate" + ] + + summaries = [tf.summary.scalar(summary_name, summary) + for summary_name, summary in zip(summaries_names, ppo_summaries)] + losses_summary = tf.summary.merge(summaries) + + for summary_name, summary in zip(summaries_names, ppo_summaries): + losses_summary = tf.Print(losses_summary, [summary], summary_name + ": ") + + return losses_summary + + +def calculate_generalized_advantage_estimator( + reward, value, done, gae_gamma, gae_lambda): + # pylint: disable=g-doc-args + """Generalized advantage estimator. + + Returns: + GAE estimator. It will be one element shorter than the input; this is + because to compute GAE for [0, ..., N-1] one needs V for [1, ..., N]. + """ + # pylint: enable=g-doc-args + + next_value = value[1:, :] + next_not_done = 1 - tf.cast(done[1:, :], tf.float32) + delta = (reward[:-1, :] + gae_gamma * next_value * next_not_done + - value[:-1, :]) + + return_ = tf.reverse(tf.scan( + lambda agg, cur: cur[0] + cur[1] * gae_gamma * gae_lambda * agg, + [tf.reverse(delta, [0]), tf.reverse(next_not_done, [0])], + tf.zeros_like(delta[0, :]), + parallel_iterations=1), [0]) + return tf.check_numerics(return_, "return") + + +def discounted_rewards(reward, done, gae_gamma, end_values): + """Discounted rewards.""" + not_done = tf.expand_dims(1 - tf.cast(done, tf.float32), axis=2) + end_values = end_values * not_done[-1, :, :] + return_ = tf.scan( + lambda agg, cur: cur + gae_gamma * agg, + tf.expand_dims(reward, axis=2) * not_done, + initializer=end_values, + reverse=True, + back_prop=False, + parallel_iterations=2) + return tf.check_numerics(return_, "return") diff --git a/tensor2tensor/rl/ppo_learner.py b/tensor2tensor/rl/ppo_learner.py new file mode 100644 index 000000000..fb963232c --- /dev/null +++ b/tensor2tensor/rl/ppo_learner.py @@ -0,0 +1,547 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""PPO learner.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import math +import os + +from tensor2tensor.layers import common_layers +from tensor2tensor.models.research.rl import get_policy +from tensor2tensor.rl import ppo +from tensor2tensor.rl.envs.tf_atari_wrappers import StackWrapper +from tensor2tensor.rl.envs.tf_atari_wrappers import WrapperBase +from tensor2tensor.rl.policy_learner import PolicyLearner +from tensor2tensor.rl.restarter import Restarter +from tensor2tensor.utils import trainer_lib + +import tensorflow.compat.v1 as tf +import tensorflow_probability as tfp + + +class PPOLearner(PolicyLearner): + """PPO for policy learning.""" + + def __init__(self, frame_stack_size, base_event_dir, agent_model_dir, + total_num_epochs, **kwargs): + super(PPOLearner, self).__init__( + frame_stack_size, base_event_dir, agent_model_dir, total_num_epochs) + self._num_completed_iterations = 0 + self._lr_decay_start = None + self._distributional_size = kwargs.get("distributional_size", 1) + self._distributional_subscale = kwargs.get("distributional_subscale", 0.04) + self._distributional_threshold = kwargs.get("distributional_threshold", 0.0) + + def train(self, + env_fn, + hparams, + simulated, + save_continuously, + epoch, + sampling_temp=1.0, + num_env_steps=None, + env_step_multiplier=1, + eval_env_fn=None, + report_fn=None, + model_save_fn=None): + assert sampling_temp == 1.0 or hparams.learning_rate == 0.0, \ + "Sampling with non-1 temperature does not make sense during training." + + if not save_continuously: + # We do not save model, as that resets frames that we need at restarts. + # But we need to save at the last step, so we set it very high. + hparams.save_models_every_epochs = 1000000 + + if simulated: + simulated_str = "sim" + else: + simulated_str = "real" + name_scope = "ppo_{}{}".format(simulated_str, epoch + 1) + event_dir = os.path.join(self.base_event_dir, "ppo_summaries", + str(epoch) + simulated_str) + + with tf.Graph().as_default(): + with tf.name_scope(name_scope): + with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): + env = env_fn(in_graph=True) + (train_summary_op, eval_summary_op, initializers) = ( + _define_train( + env, + hparams, + eval_env_fn, + sampling_temp, + distributional_size=self._distributional_size, + distributional_subscale=self._distributional_subscale, + distributional_threshold=self._distributional_threshold, + epoch=epoch if simulated else -1, + frame_stack_size=self.frame_stack_size, + force_beginning_resets=simulated)) + + if num_env_steps is None: + iteration_increment = hparams.epochs_num + else: + iteration_increment = int( + math.ceil( + num_env_steps / (env.batch_size * hparams.epoch_length))) + iteration_increment *= env_step_multiplier + + self._num_completed_iterations += iteration_increment + + restarter = Restarter( + "policy", self.agent_model_dir, self._num_completed_iterations + ) + if restarter.should_skip: + return + + if hparams.lr_decay_in_final_epoch: + if epoch != self.total_num_epochs - 1: + # Extend the warmup period to the end of this epoch. + hparams.learning_rate_warmup_steps = restarter.target_global_step + else: + if self._lr_decay_start is None: + # Stop the warmup at the beginning of this epoch. + self._lr_decay_start = \ + restarter.target_global_step - iteration_increment + hparams.learning_rate_warmup_steps = self._lr_decay_start + + _run_train( + hparams, + event_dir, + self.agent_model_dir, + restarter, + train_summary_op, + eval_summary_op, + initializers, + epoch, + report_fn=report_fn, + model_save_fn=model_save_fn) + + def evaluate(self, env_fn, hparams, sampling_temp): + with tf.Graph().as_default(): + with tf.name_scope("rl_eval"): + eval_env = env_fn(in_graph=True) + (collect_memory, _, collect_init) = _define_collect( + eval_env, + hparams, + "ppo_eval", + eval_phase=True, + frame_stack_size=self.frame_stack_size, + force_beginning_resets=False, + sampling_temp=sampling_temp, + distributional_size=self._distributional_size, + ) + model_saver = tf.train.Saver( + tf.global_variables(hparams.policy_network + "/.*") + # tf.global_variables("clean_scope.*") # Needed for sharing params. + ) + + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + collect_init(sess) + trainer_lib.restore_checkpoint(self.agent_model_dir, model_saver, + sess) + sess.run(collect_memory) + + +def _define_train( + train_env, + ppo_hparams, + eval_env_fn=None, + sampling_temp=1.0, + distributional_size=1, + distributional_subscale=0.04, + distributional_threshold=0.0, + epoch=-1, + **collect_kwargs +): + """Define the training setup.""" + memory, collect_summary, train_initialization = ( + _define_collect( + train_env, + ppo_hparams, + "ppo_train", + eval_phase=False, + sampling_temp=sampling_temp, + distributional_size=distributional_size, + **collect_kwargs)) + ppo_summary = ppo.define_ppo_epoch( + memory, ppo_hparams, train_env.action_space, train_env.batch_size, + distributional_size=distributional_size, + distributional_subscale=distributional_subscale, + distributional_threshold=distributional_threshold, + epoch=epoch) + train_summary = tf.summary.merge([collect_summary, ppo_summary]) + + if ppo_hparams.eval_every_epochs: + # TODO(koz4k): Do we need this at all? + assert eval_env_fn is not None + eval_env = eval_env_fn(in_graph=True) + (_, eval_collect_summary, eval_initialization) = ( + _define_collect( + eval_env, + ppo_hparams, + "ppo_eval", + eval_phase=True, + sampling_temp=0.0, + distributional_size=distributional_size, + **collect_kwargs)) + return (train_summary, eval_collect_summary, (train_initialization, + eval_initialization)) + else: + return (train_summary, None, (train_initialization,)) + + +def _run_train(ppo_hparams, + event_dir, + model_dir, + restarter, + train_summary_op, + eval_summary_op, + initializers, + epoch, + report_fn=None, + model_save_fn=None): + """Train.""" + summary_writer = tf.summary.FileWriter( + event_dir, graph=tf.get_default_graph(), flush_secs=60) + + model_saver = tf.train.Saver( + tf.global_variables(ppo_hparams.policy_network + "/.*") + + tf.global_variables("training/" + ppo_hparams.policy_network + "/.*") + + # tf.global_variables("clean_scope.*") + # Needed for sharing params. + tf.global_variables("global_step") + + tf.global_variables("losses_avg.*") + + tf.global_variables("train_stats.*") + ) + + global_step = tf.train.get_or_create_global_step() + with tf.control_dependencies([tf.assign_add(global_step, 1)]): + train_summary_op = tf.identity(train_summary_op) + + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + for initializer in initializers: + initializer(sess) + trainer_lib.restore_checkpoint(model_dir, model_saver, sess) + + num_target_iterations = restarter.target_local_step + num_completed_iterations = num_target_iterations - restarter.steps_to_go + with restarter.training_loop(): + for epoch_index in range(num_completed_iterations, num_target_iterations): + summary = sess.run(train_summary_op) + if summary_writer: + summary_writer.add_summary(summary, epoch_index) + + if (ppo_hparams.eval_every_epochs and + epoch_index % ppo_hparams.eval_every_epochs == 0): + eval_summary = sess.run(eval_summary_op) + if summary_writer: + summary_writer.add_summary(eval_summary, epoch_index) + if report_fn: + summary_proto = tf.Summary() + summary_proto.ParseFromString(eval_summary) + for elem in summary_proto.value: + if "mean_score" in elem.tag: + report_fn(elem.simple_value, epoch_index) + break + + if (model_saver and ppo_hparams.save_models_every_epochs and + (epoch_index % ppo_hparams.save_models_every_epochs == 0 or + (epoch_index + 1) == num_target_iterations)): + ckpt_name = "model.ckpt-{}".format( + tf.train.global_step(sess, global_step) + ) + # Keep the last checkpoint from each epoch in a separate directory. + epoch_dir = os.path.join(model_dir, "epoch_{}".format(epoch)) + tf.gfile.MakeDirs(epoch_dir) + for ckpt_dir in (model_dir, epoch_dir): + model_saver.save(sess, os.path.join(ckpt_dir, ckpt_name)) + if model_save_fn: + model_save_fn(model_dir) + + +def _rollout_metadata(batch_env, distributional_size=1): + """Metadata for rollouts.""" + batch_env_shape = batch_env.observ.get_shape().as_list() + batch_size = [batch_env_shape[0]] + value_size = batch_size + if distributional_size > 1: + value_size = batch_size + [distributional_size] + shapes_types_names = [ + # TODO(piotrmilos): possibly retrieve the observation type for batch_env + (batch_size + batch_env_shape[1:], batch_env.observ_dtype, "observation"), + (batch_size, tf.float32, "reward"), + (batch_size, tf.bool, "done"), + (batch_size + list(batch_env.action_shape), batch_env.action_dtype, + "action"), + (batch_size, tf.float32, "pdf"), + (value_size, tf.float32, "value_function"), + ] + return shapes_types_names + + +class _MemoryWrapper(WrapperBase): + """Memory wrapper.""" + + def __init__(self, batch_env): + super(_MemoryWrapper, self).__init__(batch_env) + infinity = 10000000 + meta_data = list(zip(*_rollout_metadata(batch_env))) + # In memory wrapper we do not collect pdfs neither value_function + # thus we only need the first 4 entries of meta_data + shapes = meta_data[0][:4] + dtypes = meta_data[1][:4] + self.speculum = tf.FIFOQueue(infinity, shapes=shapes, dtypes=dtypes) + observs_shape = batch_env.observ.shape + # TODO(piotrmilos): possibly retrieve the observation type for batch_env + self._observ = tf.Variable( + tf.zeros(observs_shape, self.observ_dtype), trainable=False) + + def __str__(self): + return "MemoryWrapper(%s)" % str(self._batch_env) + + def simulate(self, action): + + # There is subtlety here. We need to collect data + # obs, action = policy(obs), done, reward = env(abs, action) + # Thus we need to enqueue data before assigning new observation + + reward, done = self._batch_env.simulate(action) + + with tf.control_dependencies([reward, done]): + enqueue_op = self.speculum.enqueue( + [self._observ.read_value(), reward, done, action]) + + with tf.control_dependencies([enqueue_op]): + assign = self._observ.assign(self._batch_env.observ) + + with tf.control_dependencies([assign]): + return tf.identity(reward), tf.identity(done) + + +def _define_collect(batch_env, ppo_hparams, scope, frame_stack_size, eval_phase, + sampling_temp, force_beginning_resets, + distributional_size=1): + """Collect trajectories. + + Args: + batch_env: Batch environment. + ppo_hparams: PPO hparams, defined in tensor2tensor.models.research.rl. + scope: var scope. + frame_stack_size: Number of last observations to feed into the policy. + eval_phase: TODO(koz4k): Write docstring. + sampling_temp: Sampling temperature for the policy. + force_beginning_resets: Whether to reset at the beginning of each episode. + distributional_size: optional, number of buckets in distributional RL. + + Returns: + Returns memory (observations, rewards, dones, actions, + pdfs, values_functions) + containing a rollout of environment from nested wrapped structure. + """ + epoch_length = ppo_hparams.epoch_length + + to_initialize = [] + with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): + num_agents = batch_env.batch_size + + to_initialize.append(batch_env) + wrappers = [(StackWrapper, { + "history": frame_stack_size + }), (_MemoryWrapper, {})] + rollout_metadata = None + speculum = None + for w in wrappers: + tf.logging.info("Applying wrapper %s(%s) to env %s." % (str( + w[0]), str(w[1]), str(batch_env))) + batch_env = w[0](batch_env, **w[1]) + to_initialize.append(batch_env) + + rollout_metadata = _rollout_metadata(batch_env, distributional_size) + speculum = batch_env.speculum + + def initialization_lambda(sess): + for batch_env in to_initialize: + batch_env.initialize(sess) + + memory = [ + tf.get_variable( # pylint: disable=g-complex-comprehension + "collect_memory_%d_%s" % (epoch_length, name), + shape=[epoch_length] + shape, + dtype=dtype, + initializer=tf.zeros_initializer(), + trainable=False) for (shape, dtype, name) in rollout_metadata + ] + + cumulative_rewards = tf.get_variable( + "cumulative_rewards", len(batch_env), trainable=False) + + eval_phase_t = tf.convert_to_tensor(eval_phase) + should_reset_var = tf.Variable(True, trainable=False) + zeros_tensor = tf.zeros(len(batch_env)) + + force_beginning_resets = tf.convert_to_tensor(force_beginning_resets) + + def reset_ops_group(): + return tf.group( + batch_env.reset(tf.range(len(batch_env))), + tf.assign(cumulative_rewards, zeros_tensor)) + + reset_op = tf.cond( + tf.logical_or(should_reset_var.read_value(), force_beginning_resets), + reset_ops_group, tf.no_op) + + with tf.control_dependencies([reset_op]): + reset_once_op = tf.assign(should_reset_var, False) + + with tf.control_dependencies([reset_once_op]): + + def step(index, scores_sum, scores_num): + """Single step.""" + index %= epoch_length # Only needed in eval runs. + # Note - the only way to ensure making a copy of tensor is to run simple + # operation. We are waiting for tf.copy: + # https://github.com/tensorflow/tensorflow/issues/11186 + obs_copy = batch_env.observ + 0 + value_fun_shape = (num_agents,) + if distributional_size > 1: + value_fun_shape = (num_agents, distributional_size) + + def env_step(arg1, arg2, arg3): # pylint: disable=unused-argument + """Step of the environment.""" + + (logits, value_function) = get_policy( + obs_copy, ppo_hparams, batch_env.action_space, distributional_size + ) + action = common_layers.sample_with_temperature(logits, sampling_temp) + action = tf.cast(action, tf.int32) + action = tf.reshape(action, shape=(num_agents,)) + + reward, done = batch_env.simulate(action) + + pdf = tfp.distributions.Categorical(logits=logits).prob(action) + pdf = tf.reshape(pdf, shape=(num_agents,)) + value_function = tf.reshape(value_function, shape=value_fun_shape) + done = tf.reshape(done, shape=(num_agents,)) + + with tf.control_dependencies([reward, done]): + return tf.identity(pdf), tf.identity(value_function), \ + tf.identity(done) + + # TODO(piotrmilos): while_body is executed at most once, + # thus should be replaced with tf.cond + pdf, value_function, top_level_done = tf.while_loop( + lambda _1, _2, _3: tf.equal(speculum.size(), 0), + env_step, + [ + tf.constant(0.0, shape=(num_agents,)), + tf.constant(0.0, shape=value_fun_shape), + tf.constant(False, shape=(num_agents,)) + ], + parallel_iterations=1, + back_prop=False, + ) + + with tf.control_dependencies([pdf, value_function]): + obs, reward, done, action = speculum.dequeue() + to_save = [obs, reward, done, action, pdf, value_function] + save_ops = [ + tf.scatter_update(memory_slot, index, value) + for memory_slot, value in zip(memory, to_save) + ] + cumulate_rewards_op = cumulative_rewards.assign_add(reward) + + agent_indices_to_reset = tf.where(top_level_done)[:, 0] + with tf.control_dependencies([cumulate_rewards_op]): + # TODO(piotrmilos): possibly we need cumulative_rewards.read_value() + scores_sum_delta = tf.reduce_sum( + tf.gather(cumulative_rewards.read_value(), agent_indices_to_reset)) + scores_num_delta = tf.count_nonzero(done, dtype=tf.int32) + with tf.control_dependencies(save_ops + + [scores_sum_delta, scores_num_delta]): + reset_env_op = batch_env.reset(agent_indices_to_reset) + reset_cumulative_rewards_op = tf.scatter_update( + cumulative_rewards, agent_indices_to_reset, + tf.gather(zeros_tensor, agent_indices_to_reset)) + with tf.control_dependencies([reset_env_op, reset_cumulative_rewards_op]): + return [ + index + 1, scores_sum + scores_sum_delta, + scores_num + scores_num_delta + ] + + def stop_condition(i, _, resets): + return tf.cond(eval_phase_t, lambda: resets < num_agents, + lambda: i < epoch_length) + + init = [tf.constant(0), tf.constant(0.0), tf.constant(0)] + index, scores_sum, scores_num = tf.while_loop( + stop_condition, step, init, parallel_iterations=1, back_prop=False) + + # We handle force_beginning_resets differently. We assume that all envs are + # reseted at the end of episod (though it happens at the beginning of the + # next one + scores_num = tf.cond(force_beginning_resets, + lambda: scores_num + len(batch_env), lambda: scores_num) + + with tf.control_dependencies([scores_sum]): + scores_sum = tf.cond( + force_beginning_resets, + lambda: scores_sum + tf.reduce_sum(cumulative_rewards.read_value()), + lambda: scores_sum) + + mean_score = tf.cond( + tf.greater(scores_num, 0), + lambda: scores_sum / tf.cast(scores_num, tf.float32), lambda: 0.) + printing = tf.Print(0, [mean_score, scores_sum, scores_num], "mean_score: ") + with tf.control_dependencies([index, printing]): + memory = [mem.read_value() for mem in memory] + # When generating real data together with PPO training we must use single + # agent. For PPO to work we reshape the history, as if it was generated + # by real_ppo_effective_num_agents. + if ppo_hparams.effective_num_agents is not None and not eval_phase: + new_memory = [] + effective_num_agents = ppo_hparams.effective_num_agents + assert epoch_length % ppo_hparams.effective_num_agents == 0, ( + "The rollout of ppo_hparams.epoch_length will be distributed amongst" + "effective_num_agents of agents") + new_epoch_length = int(epoch_length / effective_num_agents) + for mem, info in zip(memory, rollout_metadata): + shape, _, name = info + new_shape = [effective_num_agents, new_epoch_length] + shape[1:] + perm = list(range(len(shape) + 1)) + perm[0] = 1 + perm[1] = 0 + mem = tf.transpose(mem, perm=perm) + mem = tf.reshape(mem, shape=new_shape) + mem = tf.transpose( + mem, + perm=perm, + name="collect_memory_%d_%s" % (new_epoch_length, name)) + new_memory.append(mem) + memory = new_memory + + with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): + mean_score_summary = tf.cond( + tf.greater(scores_num, 0), + lambda: tf.summary.scalar("mean_score_this_iter", mean_score), str) + summaries = tf.summary.merge([ + mean_score_summary, + tf.summary.scalar("episodes_finished_this_iter", scores_num) + ]) + return memory, summaries, initialization_lambda diff --git a/tensor2tensor/rl/restarter.py b/tensor2tensor/rl/restarter.py new file mode 100644 index 000000000..88b2e6312 --- /dev/null +++ b/tensor2tensor/rl/restarter.py @@ -0,0 +1,122 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Training restarter.""" + +import contextlib +import os + +import tensorflow.compat.v1 as tf + + +class Restarter(object): + """Handles training restarts. + + Particularly useful when sharing parameters (and checkpoints) between models. + + Args: + model_mode (str): Model "mode". Different modes have different local step + counters, but the same global step counter. Also used in log messages. + checkpoint_dir (str): Model checkpoint directory. Global step is inferred + from the name of the last checkpoint. + target_local_step (int): Local step to train the model up to. + + Attributes: + model_mode (str): See args. + checkpoint_dir (str): See args. + target_local_step (int): See args. + target_global_step (int): Calculated global step to train the model up to. + should_skip (bool): Whether training should be skipped because the number of + local steps already done is higher than the target. This happens during + restarts. + steps_to_go: how many steps to go. + restarting (bool): Whether the current epoch of training has been + interrupted and is being restarted. + """ + + def __init__(self, model_mode, checkpoint_dir, target_local_step): + self.model_mode = model_mode + self.checkpoint_dir = checkpoint_dir + self.target_local_step = target_local_step + self.target_global_step = None + self.should_skip = False + self.restarting = False + + self._counter_path = os.path.join( + checkpoint_dir, "{}_step_counter".format(model_mode) + ) + + self._global_step = self._get_global_step() + tf.logging.info( + "Will load %s checkpoint %d", self.model_mode, self._global_step + ) + + (self._local_step_at_start, global_step_at_start) = self._read_counters() + + self.steps_to_go = target_local_step - self._local_step_at_start + if self.steps_to_go <= 0: + tf.logging.info( + "Skipping training %s, requested %d steps, already done %d", + self.model_mode, target_local_step, self._local_step_at_start + ) + self.should_skip = True + return + + if global_step_at_start != -1: + # Restart. + steps_done_this_epoch = self._global_step - global_step_at_start + self.steps_to_go -= steps_done_this_epoch + tf.logging.info( + "Restarting training %s, %d steps already done this epoch", + self.model_mode, steps_done_this_epoch + ) + self.restarting = True + + self.target_global_step = self._global_step + self.steps_to_go + + @contextlib.contextmanager + def training_loop(self): + """Context manager wrapping the training loop, updates step counters.""" + if not self.restarting: + self._write_counters(self._local_step_at_start, self._global_step) + + tf.logging.info( + "Training %s up to %d, %d to go", self.model_mode, + self.target_local_step, self.steps_to_go + ) + + yield + + self._write_counters(self.target_local_step, -1) + + def _get_global_step(self): + checkpoint = tf.train.latest_checkpoint(self.checkpoint_dir) + if checkpoint: + return int(checkpoint.split("-")[-1]) + else: + return 0 + + def _read_counters(self): + try: + with tf.gfile.Open(self._counter_path, "r") as f: + return tuple( + int(counter) for counter in f.read().split(" ") + ) + except tf.errors.NotFoundError: + return (0, -1) + + def _write_counters(self, local_step, global_step): + with tf.gfile.Open(self._counter_path, "w") as f: + f.write("{} {}".format(local_step, global_step)) diff --git a/tensor2tensor/rl/restarter_test.py b/tensor2tensor/rl/restarter_test.py new file mode 100644 index 000000000..5e0826c23 --- /dev/null +++ b/tensor2tensor/rl/restarter_test.py @@ -0,0 +1,124 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for rl_utils.""" + +import os + +from tensor2tensor.rl.restarter import Restarter + +import tensorflow.compat.v1 as tf + + +TEST_MODE_1 = "mode1" +TEST_MODE_2 = "mode2" +TEST_NUM_STEPS = 2 + + +class RestarterTest(tf.test.TestCase): + + def setUp(self): + self.out_dir = tf.test.get_temp_dir() + tf.gfile.DeleteRecursively(self.out_dir) + tf.gfile.MkDir(self.out_dir) + + def create_checkpoint(self, global_step): + checkpoint_name = "model.ckpt-{}".format(global_step) + for suffix in ("index", "meta", "data-00000-of-00001"): + filename = "{}.{}".format(checkpoint_name, suffix) + # Just create the file. + with tf.gfile.Open(os.path.join(self.out_dir, filename), "w") as f: + f.write("") + tf.train.update_checkpoint_state(self.out_dir, checkpoint_name) + + def run_single_mode(self, mode, target_local_step, target_global_step): + restarter = Restarter(mode, self.out_dir, target_local_step) + with restarter.training_loop(): + self.create_checkpoint(target_global_step) + + def assert_first_run(self, restarter, steps_to_go, target_global_step): + self.assertFalse(restarter.should_skip) + self.assertFalse(restarter.restarting) + self.assertEqual(restarter.steps_to_go, steps_to_go) + self.assertEqual(restarter.target_global_step, target_global_step) + + def test_runs_in_single_mode(self): + restarter = Restarter( + TEST_MODE_1, self.out_dir, target_local_step=TEST_NUM_STEPS + ) + self.assert_first_run( + restarter, steps_to_go=TEST_NUM_STEPS, target_global_step=TEST_NUM_STEPS + ) + + def test_runs_in_two_modes(self): + global_step = TEST_NUM_STEPS + local_steps = { + TEST_MODE_1: TEST_NUM_STEPS, + TEST_MODE_2: 0 + } + self.run_single_mode(TEST_MODE_1, local_steps[TEST_MODE_1], global_step) + + for mode in [TEST_MODE_2, TEST_MODE_1]: + global_step += TEST_NUM_STEPS + local_steps[mode] += TEST_NUM_STEPS + restarter = Restarter( + mode, self.out_dir, target_local_step=local_steps[mode] + ) + self.assert_first_run( + restarter, steps_to_go=TEST_NUM_STEPS, target_global_step=global_step + ) + with restarter.training_loop(): + self.create_checkpoint(global_step) + + def test_skips_already_done(self): + self.run_single_mode( + TEST_MODE_1, target_local_step=TEST_NUM_STEPS, + target_global_step=TEST_NUM_STEPS + ) + + restarter = Restarter( + TEST_MODE_1, self.out_dir, target_local_step=TEST_NUM_STEPS + ) + # We should skip the training as those steps are already completed. + self.assertTrue(restarter.should_skip) + + def test_restarts_after_interruption(self): + # Run some initial training first. + self.run_single_mode( + TEST_MODE_1, target_local_step=TEST_NUM_STEPS, + target_global_step=TEST_NUM_STEPS + ) + global_step = TEST_NUM_STEPS + + restarter = Restarter( + TEST_MODE_2, self.out_dir, target_local_step=2 + ) + with self.assertRaises(RuntimeError): + global_step += 1 + with restarter.training_loop(): + self.create_checkpoint(global_step) + # Simulate training interruption after the first step. + raise RuntimeError + restarter = Restarter( + TEST_MODE_2, self.out_dir, target_local_step=2 + ) + + self.assertFalse(restarter.should_skip) + self.assertTrue(restarter.restarting) + # Training should resume after the first step. + self.assertEqual(restarter.steps_to_go, 1) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/rl/rl_utils.py b/tensor2tensor/rl/rl_utils.py new file mode 100644 index 000000000..082b806bd --- /dev/null +++ b/tensor2tensor/rl/rl_utils.py @@ -0,0 +1,862 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for RL training.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy +import math +import random + +from gym.spaces import Box +import numpy as np +import six + +from tensor2tensor.data_generators.gym_env import T2TGymEnv +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import common_video +from tensor2tensor.models.research import rl +from tensor2tensor.rl.dopamine_connector import DQNLearner +from tensor2tensor.rl.envs.simulated_batch_env import PIL_Image +from tensor2tensor.rl.envs.simulated_batch_env import PIL_ImageDraw +from tensor2tensor.rl.envs.simulated_batch_gym_env import SimulatedBatchGymEnv +from tensor2tensor.rl.ppo_learner import PPOLearner +from tensor2tensor.utils import misc_utils +from tensor2tensor.utils import trainer_lib + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +def compute_mean_reward(rollouts, clipped): + """Calculate mean rewards from given epoch.""" + reward_name = "reward" if clipped else "unclipped_reward" + rewards = [] + for rollout in rollouts: + if rollout[-1].done: + rollout_reward = sum(getattr(frame, reward_name) for frame in rollout) + rewards.append(rollout_reward) + if rewards: + mean_rewards = np.mean(rewards) + else: + mean_rewards = 0 + return mean_rewards + + +def get_metric_name(sampling_temp, max_num_noops, clipped): + return "mean_reward/eval/sampling_temp_{}_max_noops_{}_{}".format( + sampling_temp, max_num_noops, "clipped" if clipped else "unclipped" + ) + + +def _eval_fn_with_learner( + env, hparams, policy_hparams, policy_dir, sampling_temp +): + env_fn = rl.make_real_env_fn(env) + learner = LEARNERS[hparams.base_algo]( + hparams.frame_stack_size, base_event_dir=None, + agent_model_dir=policy_dir, total_num_epochs=1 + ) + learner.evaluate(env_fn, policy_hparams, sampling_temp) + + +def evaluate_single_config( + hparams, sampling_temp, max_num_noops, agent_model_dir, + eval_fn=_eval_fn_with_learner +): + """Evaluate the PPO agent in the real environment.""" + tf.logging.info("Evaluating metric %s", get_metric_name( + sampling_temp, max_num_noops, clipped=False + )) + eval_hparams = trainer_lib.create_hparams(hparams.base_algo_params) + env = setup_env( + hparams, batch_size=hparams.eval_batch_size, max_num_noops=max_num_noops, + rl_env_max_episode_steps=hparams.eval_rl_env_max_episode_steps, + env_name=hparams.rl_env_name) + env.start_new_epoch(0) + eval_fn(env, hparams, eval_hparams, agent_model_dir, sampling_temp) + rollouts = env.current_epoch_rollouts() + env.close() + + return tuple( + compute_mean_reward(rollouts, clipped) for clipped in (True, False) + ) + + +def evaluate_all_configs( + hparams, agent_model_dir, eval_fn=_eval_fn_with_learner +): + """Evaluate the agent with multiple eval configurations.""" + metrics = {} + # Iterate over all combinations of sampling temperatures and whether to do + # initial no-ops. + for sampling_temp in hparams.eval_sampling_temps: + # Iterate over a set so if eval_max_num_noops == 0 then it's 1 iteration. + for max_num_noops in set([hparams.eval_max_num_noops, 0]): + scores = evaluate_single_config( + hparams, sampling_temp, max_num_noops, agent_model_dir, eval_fn + ) + for (score, clipped) in zip(scores, (True, False)): + metric_name = get_metric_name(sampling_temp, max_num_noops, clipped) + metrics[metric_name] = score + + return metrics + + +def evaluate_world_model( + real_env, hparams, world_model_dir, debug_video_path, + split=tf_estimator.ModeKeys.EVAL, +): + """Evaluate the world model (reward accuracy).""" + frame_stack_size = hparams.frame_stack_size + rollout_subsequences = [] + def initial_frame_chooser(batch_size): + assert batch_size == len(rollout_subsequences) + return np.stack([ + [frame.observation.decode() for frame in subsequence[:frame_stack_size]] # pylint: disable=g-complex-comprehension + for subsequence in rollout_subsequences + ]) + + env_fn = rl.make_simulated_env_fn_from_hparams( + real_env, hparams, batch_size=hparams.wm_eval_batch_size, + initial_frame_chooser=initial_frame_chooser, model_dir=world_model_dir + ) + sim_env = env_fn(in_graph=False) + subsequence_length = int( + max(hparams.wm_eval_rollout_ratios) * hparams.simulated_rollout_length + ) + rollouts = real_env.current_epoch_rollouts( + split=split, + minimal_rollout_frames=(subsequence_length + frame_stack_size) + ) + + video_writer = common_video.WholeVideoWriter( + fps=10, output_path=debug_video_path, file_format="avi" + ) + + reward_accuracies_by_length = { + int(ratio * hparams.simulated_rollout_length): [] + for ratio in hparams.wm_eval_rollout_ratios + } + for _ in range(hparams.wm_eval_num_batches): + rollout_subsequences[:] = random_rollout_subsequences( + rollouts, hparams.wm_eval_batch_size, + subsequence_length + frame_stack_size + ) + + eval_subsequences = [ + subsequence[(frame_stack_size - 1):] + for subsequence in rollout_subsequences + ] + + # Check that the initial observation is the same in the real and simulated + # rollout. + sim_init_obs = sim_env.reset() + def decode_real_obs(index): + return np.stack([ + subsequence[index].observation.decode() + for subsequence in eval_subsequences # pylint: disable=cell-var-from-loop + ]) + real_init_obs = decode_real_obs(0) + assert np.all(sim_init_obs == real_init_obs) + + debug_frame_batches = [] + def append_debug_frame_batch(sim_obs, real_obs, sim_cum_rews, + real_cum_rews, sim_rews, real_rews): + """Add a debug frame.""" + rews = [[sim_cum_rews, sim_rews], [real_cum_rews, real_rews]] + headers = [] + for j in range(len(sim_obs)): + local_nps = [] + for i in range(2): + img = PIL_Image().new("RGB", (sim_obs.shape[-2], 11),) + draw = PIL_ImageDraw().Draw(img) + draw.text((0, 0), "c:{:3}, r:{:3}".format(int(rews[i][0][j]), + int(rews[i][1][j])), + fill=(255, 0, 0)) + local_nps.append(np.asarray(img)) + local_nps.append(np.zeros_like(local_nps[0])) + headers.append(np.concatenate(local_nps, axis=1)) + errs = absolute_hinge_difference(sim_obs, real_obs) + headers = np.stack(headers) + debug_frame_batches.append( # pylint: disable=cell-var-from-loop + np.concatenate([headers, + np.concatenate([sim_obs, real_obs, errs], axis=2)], + axis=1) + ) + append_debug_frame_batch(sim_init_obs, real_init_obs, + np.zeros(hparams.wm_eval_batch_size), + np.zeros(hparams.wm_eval_batch_size), + np.zeros(hparams.wm_eval_batch_size), + np.zeros(hparams.wm_eval_batch_size)) + + (sim_cum_rewards, real_cum_rewards) = ( + np.zeros(hparams.wm_eval_batch_size) for _ in range(2) + ) + for i in range(subsequence_length): + actions = [subsequence[i].action for subsequence in eval_subsequences] + (sim_obs, sim_rewards, _) = sim_env.step(actions) + sim_cum_rewards += sim_rewards + + real_rewards = np.array([ + subsequence[i + 1].reward for subsequence in eval_subsequences + ]) + real_cum_rewards += real_rewards + for (length, reward_accuracies) in six.iteritems( + reward_accuracies_by_length + ): + if i + 1 == length: + reward_accuracies.append( + np.sum(sim_cum_rewards == real_cum_rewards) / + len(real_cum_rewards) + ) + + real_obs = decode_real_obs(i + 1) + append_debug_frame_batch(sim_obs, real_obs, sim_cum_rewards, + real_cum_rewards, sim_rewards, real_rewards) + + for debug_frames in np.stack(debug_frame_batches, axis=1): + debug_frame = None + for debug_frame in debug_frames: + video_writer.write(debug_frame) + + if debug_frame is not None: + # Append two black frames for aesthetics. + for _ in range(2): + video_writer.write(np.zeros_like(debug_frame)) + + video_writer.finish_to_disk() + + return { + "reward_accuracy/at_{}".format(length): np.mean(reward_accuracies) + for (length, reward_accuracies) in six.iteritems( + reward_accuracies_by_length + ) + } + + +def summarize_metrics(eval_metrics_writer, metrics, epoch): + """Write metrics to summary.""" + for (name, value) in six.iteritems(metrics): + summary = tf.Summary() + summary.value.add(tag=name, simple_value=value) + eval_metrics_writer.add_summary(summary, epoch) + eval_metrics_writer.flush() + + +LEARNERS = { + "ppo": PPOLearner, + "dqn": DQNLearner, +} + + +ATARI_GAME_MODE = "NoFrameskip-v4" + + +def full_game_name(short_name): + """CamelCase game name with mode suffix. + + Args: + short_name: snake_case name without mode e.g "crazy_climber" + + Returns: + full game name e.g. "CrazyClimberNoFrameskip-v4" + """ + camel_game_name = misc_utils.snakecase_to_camelcase(short_name) + full_name = camel_game_name + ATARI_GAME_MODE + return full_name + + +def should_apply_max_and_skip_env(hparams): + """MaxAndSkipEnv doesn't make sense for some games, so omit it if needed.""" + return hparams.game != "tictactoe" + + +def setup_env(hparams, + batch_size, + max_num_noops, + rl_env_max_episode_steps=-1, + env_name=None): + """Setup.""" + if not env_name: + env_name = full_game_name(hparams.game) + + maxskip_envs = should_apply_max_and_skip_env(hparams) + + env = T2TGymEnv( + base_env_name=env_name, + batch_size=batch_size, + grayscale=hparams.grayscale, + should_derive_observation_space=hparams + .rl_should_derive_observation_space, + resize_width_factor=hparams.resize_width_factor, + resize_height_factor=hparams.resize_height_factor, + rl_env_max_episode_steps=rl_env_max_episode_steps, + max_num_noops=max_num_noops, + maxskip_envs=maxskip_envs, + sticky_actions=hparams.sticky_actions + ) + return env + + +def update_hparams_from_hparams(target_hparams, source_hparams, prefix): + """Copy a subset of hparams to target_hparams.""" + for (param_name, param_value) in six.iteritems(source_hparams.values()): + if param_name.startswith(prefix): + target_hparams.set_hparam(param_name[len(prefix):], param_value) + + +def random_rollout_subsequences(rollouts, num_subsequences, subsequence_length): + """Chooses a random frame sequence of given length from a set of rollouts.""" + def choose_subsequence(): + # TODO(koz4k): Weigh rollouts by their lengths so sampling is uniform over + # frames and not rollouts. + rollout = random.choice(rollouts) + try: + from_index = random.randrange(len(rollout) - subsequence_length + 1) + except ValueError: + # Rollout too short; repeat. + return choose_subsequence() + return rollout[from_index:(from_index + subsequence_length)] + + return [choose_subsequence() for _ in range(num_subsequences)] + + +def make_initial_frame_chooser( + real_env, frame_stack_size, simulation_random_starts, + simulation_flip_first_random_for_beginning, + split=tf_estimator.ModeKeys.TRAIN, +): + """Make frame chooser. + + Args: + real_env: T2TEnv to take initial frames from. + frame_stack_size (int): Number of consecutive frames to extract. + simulation_random_starts (bool): Whether to choose frames at random. + simulation_flip_first_random_for_beginning (bool): Whether to flip the first + frame stack in every batch for the frames at the beginning. + split (tf.estimator.ModeKeys or None): Data split to take the frames from, + None means use all frames. + + Returns: + Function batch_size -> initial_frames. + """ + initial_frame_rollouts = real_env.current_epoch_rollouts( + split=split, minimal_rollout_frames=frame_stack_size, + ) + def initial_frame_chooser(batch_size): + """Frame chooser.""" + + deterministic_initial_frames =\ + initial_frame_rollouts[0][:frame_stack_size] + if not simulation_random_starts: + # Deterministic starts: repeat first frames from the first rollout. + initial_frames = [deterministic_initial_frames] * batch_size + else: + # Random starts: choose random initial frames from random rollouts. + initial_frames = random_rollout_subsequences( + initial_frame_rollouts, batch_size, frame_stack_size + ) + if simulation_flip_first_random_for_beginning: + # Flip first entry in the batch for deterministic initial frames. + initial_frames[0] = deterministic_initial_frames + + return np.stack([ + [frame.observation.decode() for frame in initial_frame_stack] # pylint: disable=g-complex-comprehension + for initial_frame_stack in initial_frames + ]) + return initial_frame_chooser + + +def absolute_hinge_difference(arr1, arr2, min_diff=10, dtype=np.uint8): + """Point-wise, hinge loss-like, difference between arrays. + + Args: + arr1: integer array to compare. + arr2: integer array to compare. + min_diff: minimal difference taken into consideration. + dtype: dtype of returned array. + + Returns: + array + """ + diff = np.abs(arr1.astype(int) - arr2, dtype=int) + return np.maximum(diff - min_diff, 0).astype(dtype) + + +# TODO(koz4k): Use this function in player and all debug videos. +def augment_observation( + observation, reward, cum_reward, frame_index, bar_color=None, + header_height=27 +): + """Augments an observation with debug info.""" + img = PIL_Image().new( + "RGB", (observation.shape[1], header_height,) + ) + draw = PIL_ImageDraw().Draw(img) + draw.text( + (1, 0), "c:{:3}, r:{:3}".format(int(cum_reward), int(reward)), + fill=(255, 0, 0) + ) + draw.text( + (1, 15), "f:{:3}".format(int(frame_index)), + fill=(255, 0, 0) + ) + header = np.copy(np.asarray(img)) + del img + if bar_color is not None: + header[0, :, :] = bar_color + return np.concatenate([header, observation], axis=0) + + +def run_rollouts( + env, agent, initial_observations, step_limit=None, discount_factor=1.0, + log_every_steps=None, video_writers=(), color_bar=False, + many_rollouts_from_each_env=False +): + """Runs a batch of rollouts from given initial observations.""" + assert step_limit is not None or not many_rollouts_from_each_env, ( + "When collecting many rollouts from each environment, time limit must " + "be set." + ) + + num_dones = 0 + first_dones = np.array([False] * env.batch_size) + observations = initial_observations + step_index = 0 + cum_rewards = np.zeros(env.batch_size) + + for (video_writer, obs_stack) in zip(video_writers, initial_observations): + for (i, ob) in enumerate(obs_stack): + debug_frame = augment_observation( + ob, reward=0, cum_reward=0, frame_index=(-len(obs_stack) + i + 1), + bar_color=((0, 255, 0) if color_bar else None) + ) + video_writer.write(debug_frame) + + def proceed(): + if step_index < step_limit: + return num_dones < env.batch_size or many_rollouts_from_each_env + else: + return False + + while proceed(): + act_kwargs = {} + if agent.needs_env_state: + act_kwargs["env_state"] = env.state + actions = agent.act(observations, **act_kwargs) + (observations, rewards, dones) = env.step(actions) + observations = list(observations) + now_done_indices = [] + for (i, done) in enumerate(dones): + if done and (not first_dones[i] or many_rollouts_from_each_env): + now_done_indices.append(i) + first_dones[i] = True + num_dones += 1 + if now_done_indices: + # Unless many_rollouts_from_each_env, reset only envs done the first time + # in this timestep to ensure that we collect exactly 1 rollout from each + # env. + reset_observations = env.reset(now_done_indices) + for (i, observation) in zip(now_done_indices, reset_observations): + observations[i] = observation + observations = np.array(observations) + cum_rewards[~first_dones] = ( + cum_rewards[~first_dones] * discount_factor + rewards[~first_dones] + ) + step_index += 1 + + for (video_writer, obs_stack, reward, cum_reward, done) in zip( + video_writers, observations, rewards, cum_rewards, first_dones + ): + if done: + continue + ob = obs_stack[-1] + debug_frame = augment_observation( + ob, reward=reward, cum_reward=cum_reward, + frame_index=step_index, bar_color=((255, 0, 0) if color_bar else None) + ) + video_writer.write(debug_frame) + + # TODO(afrozm): Clean this up with tf.logging.log_every_n + if log_every_steps is not None and step_index % log_every_steps == 0: + tf.logging.info("Step %d, mean_score: %f", step_index, cum_rewards.mean()) + + return (observations, cum_rewards) + + +class BatchAgent(object): + """Python API for agents. + + Runs a batch of parallel agents. Operates on Numpy arrays. + """ + + needs_env_state = False + records_own_videos = False + + def __init__(self, batch_size, observation_space, action_space): + self.batch_size = batch_size + self.observation_space = observation_space + self.action_space = action_space + + def act(self, observations, env_state=None): + """Picks actions based on observations. + + Args: + observations: A batch of observations. + env_state: State. + + Returns: + A batch of actions. + """ + raise NotImplementedError + + def estimate_value(self, observations): + """Estimates values of states based on observations. + + Used for temporal-difference planning. + + Args: + observations: A batch of observations. + + Returns: + A batch of values. + """ + raise NotImplementedError + + def action_distribution(self, observations): + """Calculates action distribution based on observations. + + Used for temporal-difference planning. + + Args: + observations: A batch of observations. + + Returns: + A batch of action probabilities. + """ + raise NotImplementedError + + +class RandomAgent(BatchAgent): + """Random agent, sampling actions from the uniform distribution.""" + + def act(self, observations, env_state=None): + del env_state + return np.array([ + self.action_space.sample() for _ in range(observations.shape[0]) + ]) + + def estimate_value(self, observations): + return np.zeros(observations.shape[0]) + + def action_distribution(self, observations): + return np.full( + (observations.shape[0], self.action_space.n), 1.0 / self.action_space.n + ) + + +class PolicyAgent(BatchAgent): + """Agent based on a policy network.""" + + def __init__( + self, batch_size, observation_space, action_space, policy_hparams, + policy_dir, sampling_temp + ): + super(PolicyAgent, self).__init__( + batch_size, observation_space, action_space + ) + self._sampling_temp = sampling_temp + with tf.Graph().as_default(): + self._observations_t = tf.placeholder( + shape=((batch_size,) + self.observation_space.shape), + dtype=self.observation_space.dtype + ) + (logits, self._values_t) = rl.get_policy( + self._observations_t, policy_hparams, self.action_space + ) + actions = common_layers.sample_with_temperature(logits, sampling_temp) + self._probs_t = tf.nn.softmax(logits / sampling_temp) + self._actions_t = tf.cast(actions, tf.int32) + model_saver = tf.train.Saver( + tf.global_variables(policy_hparams.policy_network + "/.*") # pylint: disable=unexpected-keyword-arg + ) + self._sess = tf.Session() + self._sess.run(tf.global_variables_initializer()) + trainer_lib.restore_checkpoint(policy_dir, model_saver, self._sess) + + def _run(self, observations): + return self._sess.run( + [self._actions_t, self._values_t, self._probs_t], + feed_dict={self._observations_t: observations} + ) + + def act(self, observations, env_state=None): + del env_state + (actions, _, _) = self._run(observations) + return actions + + def estimate_value(self, observations): + (_, values, _) = self._run(observations) + return values + + def action_distribution(self, observations): + (_, _, probs) = self._run(observations) + return probs + + +class PlannerAgent(BatchAgent): + """Agent based on temporal difference planning.""" + + needs_env_state = True + records_own_videos = True + + def __init__( + self, + batch_size, + rollout_agent, + sim_env, + wrapper_fn, + num_rollouts, + planning_horizon, + discount_factor=1.0, + uct_const=0, + uniform_first_action=True, + normalizer_window_size=30, + normalizer_epsilon=0.001, + video_writers=(), + ): + super(PlannerAgent, self).__init__( + batch_size, rollout_agent.observation_space, rollout_agent.action_space + ) + self._rollout_agent = rollout_agent + self._sim_env = sim_env + self._wrapped_env = wrapper_fn(sim_env) + self._num_rollouts = num_rollouts + self._num_batches = num_rollouts // rollout_agent.batch_size + self._discount_factor = discount_factor + self._planning_horizon = planning_horizon + self._uct_const = uct_const + self._uniform_first_action = uniform_first_action + self._normalizer_window_size = normalizer_window_size + self._normalizer_epsilon = normalizer_epsilon + self._video_writers = video_writers + self._best_mc_values = [[] for _ in range(self.batch_size)] + + def act(self, observations, env_state=None): + def run_batch_from(observation, planner_index, batch_index): + """Run a batch of actions.""" + repeated_observation = np.array( + [observation] * self._wrapped_env.batch_size + ) + actions = self._get_first_actions(repeated_observation) + self._wrapped_env.set_initial_state( + initial_state=[ + copy.deepcopy(env_state[planner_index]) + for _ in range(self._sim_env.batch_size) + ], + initial_frames=repeated_observation + ) + self._wrapped_env.reset() + (initial_observations, initial_rewards, _) = self._wrapped_env.step( + actions + ) + video_writers = () + if planner_index < len(self._video_writers) and batch_index == 0: + video_writers = (self._video_writers[planner_index],) + (final_observations, cum_rewards) = run_rollouts( + self._wrapped_env, self._rollout_agent, initial_observations, + discount_factor=self._discount_factor, + step_limit=self._planning_horizon, + video_writers=video_writers, color_bar=True) + values = self._rollout_agent.estimate_value(final_observations) + total_values = ( + initial_rewards + self._discount_factor * cum_rewards + + self._discount_factor ** (self._planning_horizon + 1) * values + ) + return list(zip(actions, total_values)) + + def run_batches_from(observation, planner_index): + sums = {a: 0 for a in range(self.action_space.n)} + counts = copy.copy(sums) + for i in range(self._num_batches): + for (action, total_value) in run_batch_from( + observation, planner_index, i + ): + sums[action] += total_value + counts[action] += 1 + return {a: (sums[a], counts[a]) for a in sums} + + def choose_best_action(observation, planner_index): + """Choose the best action, update best Monte Carlo values.""" + best_mc_values = self._best_mc_values[planner_index] + action_probs = self._rollout_agent.action_distribution( + np.array([observation] * self._rollout_agent.batch_size) + )[0, :] + sums_and_counts = run_batches_from(observation, planner_index) + + def monte_carlo_value(action): + (value_sum, count) = sums_and_counts[action] + if count > 0: + mean_value = value_sum / count + else: + mean_value = -np.inf + return mean_value + + mc_values = np.array( + [monte_carlo_value(action) for action in range(self.action_space.n)] + ) + best_mc_values.append(mc_values.max()) + + normalizer = max( + np.std(best_mc_values[-self._normalizer_window_size:]), + self._normalizer_epsilon + ) + normalized_mc_values = mc_values / normalizer + + uct_bonuses = np.array( + [self._uct_bonus(sums_and_counts[action][1], action_probs[action]) + for action in range(self.action_space.n)] + ) + values = normalized_mc_values + uct_bonuses + return np.argmax(values) + + return np.array([ + choose_best_action(observation, i) + for (i, observation) in enumerate(observations) + ]) + + def _uct_bonus(self, count, prob): + return self._uct_const * prob * math.sqrt( + math.log(self._num_rollouts) / (1 + count) + ) + + def _get_first_actions(self, observations): + if self._uniform_first_action: + return np.array([ + int(x) for x in np.linspace( + 0, self.action_space.n, self._rollout_agent.batch_size + 1 + ) + ])[:self._rollout_agent.batch_size] + else: + return list(sorted(self._rollout_agent.act(observations))) + + +# TODO(koz4k): Unify interfaces of batch envs. +class BatchWrapper(object): + """Base class for batch env wrappers.""" + + def __init__(self, env): + self.env = env + self.batch_size = env.batch_size + self.observation_space = env.observation_space + self.action_space = env.action_space + self.reward_range = env.reward_range + + def reset(self, indices=None): + return self.env.reset(indices) + + def step(self, actions): + return self.env.step(actions) + + def close(self): + self.env.close() + + +class BatchStackWrapper(BatchWrapper): + """Out-of-graph batch stack wrapper. + + Its behavior is consistent with tf_atari_wrappers.StackWrapper. + """ + + def __init__(self, env, stack_size): + super(BatchStackWrapper, self).__init__(env) + self.stack_size = stack_size + inner_space = env.observation_space + self.observation_space = Box( + low=np.array([inner_space.low] * self.stack_size), + high=np.array([inner_space.high] * self.stack_size), + dtype=inner_space.dtype, + ) + self._history_buffer = np.zeros( + (self.batch_size,) + self.observation_space.shape, + dtype=inner_space.dtype + ) + self._initial_frames = None + + @property + def state(self): + """Gets the current state.""" + return self.env.state + + def set_initial_state(self, initial_state, initial_frames): + """Sets the state that will be used on next reset.""" + self.env.set_initial_state(initial_state, initial_frames) + self._initial_frames = initial_frames + + def reset(self, indices=None): + if indices is None: + indices = range(self.batch_size) + + observations = self.env.reset(indices) + try: + # If we wrap the simulated env, take the initial frames from there. + assert self.env.initial_frames.shape[1] == self.stack_size + self._history_buffer[...] = self.env.initial_frames + except AttributeError: + # Otherwise, check if set_initial_state was called and we can take the + # frames from there. + if self._initial_frames is not None: + for (index, observation) in zip(indices, observations): + assert (self._initial_frames[index, -1, ...] == observation).all() + self._history_buffer[index, ...] = self._initial_frames[index, ...] + else: + # Otherwise, repeat the first observation stack_size times. + for (index, observation) in zip(indices, observations): + self._history_buffer[index, ...] = [observation] * self.stack_size + return self._history_buffer + + def step(self, actions): + (observations, rewards, dones) = self.env.step(actions) + self._history_buffer = np.roll(self._history_buffer, shift=-1, axis=1) + self._history_buffer[:, -1, ...] = observations + return (self._history_buffer, rewards, dones) + + +class SimulatedBatchGymEnvWithFixedInitialFrames(BatchWrapper): + """Wrapper for SimulatedBatchGymEnv that allows to fix initial frames.""" + + def __init__(self, *args, **kwargs): + self.initial_frames = None + def initial_frame_chooser(batch_size): + assert batch_size == self.initial_frames.shape[0] + return self.initial_frames + env = SimulatedBatchGymEnv( + *args, initial_frame_chooser=initial_frame_chooser, **kwargs + ) + super(SimulatedBatchGymEnvWithFixedInitialFrames, self).__init__(env) + + @property + def state(self): + """Gets the current state.""" + return [None] * self.batch_size + + def set_initial_state(self, initial_state, initial_frames): + """Sets the state that will be used on next reset.""" + del initial_state + self.initial_frames = initial_frames diff --git a/tensor2tensor/rl/trainer_model_based.py b/tensor2tensor/rl/trainer_model_based.py new file mode 100644 index 000000000..2d5e48dd7 --- /dev/null +++ b/tensor2tensor/rl/trainer_model_based.py @@ -0,0 +1,388 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Training of model-based RL agents. + +Example invocation: + +python -m tensor2tensor.rl.trainer_model_based \ + --output_dir=$HOME/t2t/rl_v1 \ + --loop_hparams_set=rlmb_base \ + --loop_hparams='num_real_env_frames=10000,epochs=3' +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import datetime +import math +import os +import pprint +import random +import time + +import six + +from tensor2tensor.bin import t2t_trainer # pylint: disable=unused-import +from tensor2tensor.models.research import rl +from tensor2tensor.rl import rl_utils +from tensor2tensor.rl import trainer_model_based_params +from tensor2tensor.rl.dopamine_connector import DQNLearner # pylint: disable=unused-import +from tensor2tensor.rl.restarter import Restarter +from tensor2tensor.utils import trainer_lib + +import tensorflow.compat.v1 as tf + + +flags = tf.flags +FLAGS = flags.FLAGS + + +def real_env_step_increment(hparams): + """Real env step increment.""" + return int(math.ceil( + hparams.num_real_env_frames / hparams.epochs + )) + + +def world_model_step_increment(hparams, epoch): + if epoch in [0, 1, 4, 9, 14]: + multiplier = hparams.initial_epoch_train_steps_multiplier + else: + multiplier = 1 + return multiplier * hparams.model_train_steps + + +def setup_directories(base_dir, subdirs): + """Setup directories.""" + base_dir = os.path.expanduser(base_dir) + tf.gfile.MakeDirs(base_dir) + + all_dirs = {} + for subdir in subdirs: + if isinstance(subdir, six.string_types): + subdir_tuple = (subdir,) + else: + subdir_tuple = subdir + dir_name = os.path.join(base_dir, *subdir_tuple) + tf.gfile.MakeDirs(dir_name) + all_dirs[subdir] = dir_name + return all_dirs + + +def make_relative_timing_fn(): + """Make a function that logs the duration since it was made.""" + start_time = time.time() + + def format_relative_time(): + time_delta = time.time() - start_time + return str(datetime.timedelta(seconds=time_delta)) + + def log_relative_time(): + tf.logging.info("Timing: %s", format_relative_time()) + + return log_relative_time + + +def make_log_fn(epoch, log_relative_time_fn): + + def log(msg, *args): + msg %= args + tf.logging.info("%s Epoch %d: %s", ">>>>>>>", epoch, msg) + log_relative_time_fn() + + return log + + +def random_rollout_subsequences(rollouts, num_subsequences, subsequence_length): + """Chooses a random frame sequence of given length from a set of rollouts.""" + def choose_subsequence(): + # TODO(koz4k): Weigh rollouts by their lengths so sampling is uniform over + # frames and not rollouts. + rollout = random.choice(rollouts) + try: + from_index = random.randrange(len(rollout) - subsequence_length + 1) + except ValueError: + # Rollout too short; repeat. + return choose_subsequence() + return rollout[from_index:(from_index + subsequence_length)] + + return [choose_subsequence() for _ in range(num_subsequences)] + + +def train_supervised(problem, model_name, hparams, data_dir, output_dir, + train_steps, eval_steps, local_eval_frequency=None, + schedule="continuous_train_and_eval"): + """Train supervised.""" + if local_eval_frequency is None: + local_eval_frequency = FLAGS.local_eval_frequency + + exp_fn = trainer_lib.create_experiment_fn( + model_name, problem, data_dir, train_steps, eval_steps, + min_eval_frequency=local_eval_frequency + ) + run_config = trainer_lib.create_run_config(model_name, model_dir=output_dir) + exp = exp_fn(run_config, hparams) + getattr(exp, schedule)() + + +def train_agent(real_env, learner, world_model_dir, hparams, epoch): + """Train the PPO agent in the simulated environment.""" + initial_frame_chooser = rl_utils.make_initial_frame_chooser( + real_env, hparams.frame_stack_size, hparams.simulation_random_starts, + hparams.simulation_flip_first_random_for_beginning + ) + env_fn = rl.make_simulated_env_fn_from_hparams( + real_env, hparams, batch_size=hparams.simulated_batch_size, + initial_frame_chooser=initial_frame_chooser, model_dir=world_model_dir, + sim_video_dir=os.path.join( + learner.agent_model_dir, "sim_videos_{}".format(epoch) + ) + ) + base_algo_str = hparams.base_algo + train_hparams = trainer_lib.create_hparams(hparams.base_algo_params) + if hparams.wm_policy_param_sharing: + train_hparams.optimizer_zero_grads = True + + rl_utils.update_hparams_from_hparams( + train_hparams, hparams, base_algo_str + "_" + ) + + final_epoch = hparams.epochs - 1 + is_special_epoch = (epoch + 3) == final_epoch or (epoch + 7) == final_epoch + is_special_epoch = is_special_epoch or (epoch == 1) # Make 1 special too. + is_final_epoch = epoch == final_epoch + env_step_multiplier = 3 if is_final_epoch else 2 if is_special_epoch else 1 + learner.train( + env_fn, train_hparams, simulated=True, save_continuously=True, + epoch=epoch, env_step_multiplier=env_step_multiplier + ) + + +def train_agent_real_env(env, learner, hparams, epoch): + """Train the PPO agent in the real environment.""" + base_algo_str = hparams.base_algo + + train_hparams = trainer_lib.create_hparams(hparams.base_algo_params) + rl_utils.update_hparams_from_hparams( + train_hparams, hparams, "real_" + base_algo_str + "_" + ) + if hparams.wm_policy_param_sharing: + train_hparams.optimizer_zero_grads = True + + env_fn = rl.make_real_env_fn(env) + num_env_steps = real_env_step_increment(hparams) + learner.train( + env_fn, + train_hparams, + simulated=False, + save_continuously=False, + epoch=epoch, + sampling_temp=hparams.real_sampling_temp, + num_env_steps=num_env_steps, + ) + # Save unfinished rollouts to history. + env.reset() + + +def train_world_model( + env, data_dir, output_dir, hparams, world_model_steps_num, epoch +): + """Train the world model on problem_name.""" + world_model_steps_num += world_model_step_increment(hparams, epoch) + model_hparams = trainer_lib.create_hparams(hparams.generative_model_params) + model_hparams.learning_rate = model_hparams.learning_rate_constant + if epoch > 0: + model_hparams.learning_rate *= hparams.learning_rate_bump + if hparams.wm_policy_param_sharing: + model_hparams.optimizer_zero_grads = True + + restarter = Restarter("world_model", output_dir, world_model_steps_num) + if restarter.should_skip: + return world_model_steps_num + with restarter.training_loop(): + train_supervised( + problem=env, + model_name=hparams.generative_model, + hparams=model_hparams, + data_dir=data_dir, + output_dir=output_dir, + train_steps=restarter.target_global_step, + eval_steps=100, + local_eval_frequency=2000 + ) + + return world_model_steps_num + + +def load_metrics(event_dir, epoch): + """Loads metrics for this epoch if they have already been written. + + This reads the entire event file but it's small with just per-epoch metrics. + + Args: + event_dir: TODO(koz4k): Document this. + epoch: TODO(koz4k): Document this. + + Returns: + metrics. + """ + metrics = {} + for filename in tf.gfile.ListDirectory(event_dir): + path = os.path.join(event_dir, filename) + for event in tf.train.summary_iterator(path): + if event.step == epoch and event.HasField("summary"): + value = event.summary.value[0] + metrics[value.tag] = value.simple_value + return metrics + + +def training_loop(hparams, output_dir, report_fn=None, report_metric=None): + """Run the main training loop.""" + if report_fn: + assert report_metric is not None + + # Directories + subdirectories = [ + "data", "tmp", "world_model", ("world_model", "debug_videos"), + "policy", "eval_metrics" + ] + directories = setup_directories(output_dir, subdirectories) + + epoch = -1 + data_dir = directories["data"] + env = rl_utils.setup_env( + hparams, batch_size=hparams.real_batch_size, + max_num_noops=hparams.max_num_noops, + rl_env_max_episode_steps=hparams.rl_env_max_episode_steps + ) + env.start_new_epoch(epoch, data_dir) + + if hparams.wm_policy_param_sharing: + policy_model_dir = directories["world_model"] + else: + policy_model_dir = directories["policy"] + learner = rl_utils.LEARNERS[hparams.base_algo]( + hparams.frame_stack_size, policy_model_dir, + policy_model_dir, hparams.epochs + ) + + # Timing log function + log_relative_time = make_relative_timing_fn() + + # Per-epoch state + epoch_metrics = [] + metrics = {} + + # Collect data from the real environment. + policy_model_dir = directories["policy"] + tf.logging.info("Initial training of the policy in real environment.") + train_agent_real_env(env, learner, hparams, epoch) + metrics["mean_reward/train/clipped"] = rl_utils.compute_mean_reward( + env.current_epoch_rollouts(), clipped=True + ) + tf.logging.info("Mean training reward (initial): {}".format( + metrics["mean_reward/train/clipped"] + )) + env.generate_data(data_dir) + + eval_metrics_writer = tf.summary.FileWriter( + directories["eval_metrics"] + ) + + world_model_steps_num = 0 + + for epoch in range(hparams.epochs): + log = make_log_fn(epoch, log_relative_time) + + # Train world model + log("Training world model") + world_model_steps_num = train_world_model( + env, data_dir, directories["world_model"], hparams, + world_model_steps_num, epoch + ) + + # Train agent + log("Training policy in simulated environment.") + train_agent(env, learner, directories["world_model"], hparams, epoch) + + env.start_new_epoch(epoch, data_dir) + + # Train agent on real env (short) + log("Training policy in real environment.") + train_agent_real_env(env, learner, hparams, epoch) + + if hparams.stop_loop_early: + return 0.0 + + env.generate_data(data_dir) + + metrics = load_metrics(directories["eval_metrics"], epoch) + if metrics: + # Skip eval if metrics have already been written for this epoch. Otherwise + # we'd overwrite them with wrong data. + log("Metrics found for this epoch, skipping evaluation.") + else: + metrics["mean_reward/train/clipped"] = rl_utils.compute_mean_reward( + env.current_epoch_rollouts(), clipped=True + ) + log("Mean training reward: {}".format( + metrics["mean_reward/train/clipped"] + )) + + eval_metrics = rl_utils.evaluate_all_configs(hparams, policy_model_dir) + log("Agent eval metrics:\n{}".format(pprint.pformat(eval_metrics))) + metrics.update(eval_metrics) + + if hparams.eval_world_model: + debug_video_path = os.path.join( + directories["world_model", "debug_videos"], + "{}.avi".format(env.current_epoch) + ) + wm_metrics = rl_utils.evaluate_world_model( + env, hparams, directories["world_model"], debug_video_path + ) + log("World model eval metrics:\n{}".format(pprint.pformat(wm_metrics))) + metrics.update(wm_metrics) + + rl_utils.summarize_metrics(eval_metrics_writer, metrics, epoch) + + # Report metrics + if report_fn: + if report_metric == "mean_reward": + metric_name = rl_utils.get_metric_name( + sampling_temp=hparams.eval_sampling_temps[0], + max_num_noops=hparams.eval_max_num_noops, + clipped=False + ) + report_fn(eval_metrics[metric_name], epoch) + else: + report_fn(eval_metrics[report_metric], epoch) + + epoch_metrics.append(metrics) + + # Return the evaluation metrics from the final epoch + return epoch_metrics[-1] + + +def main(_): + hp = trainer_model_based_params.create_loop_hparams() + assert not FLAGS.job_dir_to_evaluate + training_loop(hp, FLAGS.output_dir) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/rl/trainer_model_based_agent_only.py b/tensor2tensor/rl/trainer_model_based_agent_only.py new file mode 100644 index 000000000..9475f3227 --- /dev/null +++ b/tensor2tensor/rl/trainer_model_based_agent_only.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Training of model-based RL agent assuming a fully trained world model. + +Example invocation: + +python -m tensor2tensor.rl.trainer_model_based_agent_only \ + --loop_hparams_set=rl_modelrl_base \ + --world_model_dir=$HOME/world_model/ \ + --data_dir=$HOME/data/ \ + --output_dir=$HOME/ppo_agent_only/ \ +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.bin import t2t_trainer # pylint: disable=unused-import +from tensor2tensor.data_generators import gym_env +from tensor2tensor.rl import trainer_model_based +from tensor2tensor.rl import trainer_model_based_params + + +import tensorflow.compat.v1 as tf + + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string("world_model_dir", "", + "Directory containing checkpoints of the world model.") + + +def get_simulated_problem_name(game): + game_with_mode = game + if game in gym_env.ATARI_GAMES: + game_with_mode += "_deterministic-v4" + return "gym_simulated_discrete_problem_with_agent_on_%s" % game_with_mode + + +def main(_): + hparams = trainer_model_based_params.create_loop_hparams() + problem_name = get_simulated_problem_name(hparams.game) + world_model_dir = FLAGS.world_model_dir + agent_model_dir = FLAGS.output_dir + event_dir = FLAGS.output_dir + epoch_data_dir = FLAGS.data_dir # only required for initial frames + + trainer_model_based.train_agent( + problem_name, + agent_model_dir, + event_dir, + world_model_dir, + epoch_data_dir, + hparams, + 0, + epoch=0, + is_final_epoch=True) + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/rl/trainer_model_based_params.py b/tensor2tensor/rl/trainer_model_based_params.py new file mode 100644 index 000000000..0c5ee7120 --- /dev/null +++ b/tensor2tensor/rl/trainer_model_based_params.py @@ -0,0 +1,1023 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Parameter sets for training of model-based RL agents.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import six + + +from tensor2tensor.data_generators import gym_env +from tensor2tensor.utils import hparam +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +flags = tf.flags +FLAGS = flags.FLAGS + + +flags.DEFINE_string("loop_hparams_set", "rlmb_base", + "Which RL hparams set to use.") +flags.DEFINE_string("loop_hparams", "", "Overrides for overall loop HParams.") +flags.DEFINE_string("job_dir_to_evaluate", "", + "Directory of a job to be evaluated.") +flags.DEFINE_string("eval_results_dir", "/tmp", + "Directory to store result of evaluation") + + +HP_SCOPES = ["loop", "model", "ppo"] + + +def _rlmb_base(): + return hparam.HParams( + epochs=15, + # Total frames used for training. This will be distributed evenly across + # hparams.epochs. + # This number should be divisible by real_ppo_epoch_length*epochs + # for our frame accounting to be preceise. + num_real_env_frames=96000, + generative_model="next_frame_basic_deterministic", + generative_model_params="next_frame_pixel_noise", + autoencoder_train_steps=0, + autoencoder_train_steps_initial_multiplier=10, + autoencoder_hparams_set="autoencoder_discrete_pong", + model_train_steps=15000, + initial_epoch_train_steps_multiplier=3, + # Use random starts when learning agent on simulated env. + simulation_random_starts=True, + # Flip the first random frame in PPO batch for the true beginning. + simulation_flip_first_random_for_beginning=False, + intrinsic_reward_scale=0., + # Resizing. + resize_height_factor=2, + resize_width_factor=2, + grayscale=False, + # Maximum number of noops to make on environment reset. + max_num_noops=8, + # Bump learning rate after first epoch by 3x. + # We picked 3x because our default learning rate schedule decreases with + # 1/square root of step; 1/sqrt(10k) = 0.01 and 1/sqrt(100k) ~ 0.0032 + # so by bumping it up 3x we about "go back" from 100k steps to 10k, which + # is approximately as much as "going back 1 epoch" would be. + # In your experiments, you want to optimize this rate to your schedule. + learning_rate_bump=3.0, + + # Policy sampling temperature to use when gathering data from the real + # environment. + real_sampling_temp=1.0, + + # Sampling temperatures to try during eval. + eval_sampling_temps=[0.5, 0.0, 1.0], + eval_max_num_noops=8, + # To speed up the pipeline. Some games want to run forever. + eval_rl_env_max_episode_steps=1000, + + game="pong", + sticky_actions=False, + # If set, use this as the gym env name, instead of changing game mode etc. + rl_env_name="", + # Controls whether we should derive observation space, do some + # pre-processing etc. See T2TGymEnv._derive_observation_space. + rl_should_derive_observation_space=True, + # Whether to evaluate the world model in each iteration of the loop to get + # the model_reward_accuracy metric. + eval_world_model=True, + # Number of concurrent rollouts in world model evaluation. + wm_eval_batch_size=16, + # Number of batches to run for world model evaluation. + wm_eval_num_batches=8, + # Ratios of ppo_epoch_length to report reward_accuracy on. + wm_eval_rollout_ratios=[0.25, 1], + stop_loop_early=False, # To speed-up tests. + rl_env_max_episode_steps=-1, # Use default from gym.make() + # Number of last observations to feed to the agent and world model. + frame_stack_size=4, + # This is only used for world-model evaluation currently, PolicyLearner + # uses algorithm specific hparams to set this during training. + simulated_rollout_length=50, + wm_policy_param_sharing=False, + + # To be overridden. + base_algo="", + base_algo_params="", + # Number of real environments to train on simultaneously. + real_batch_size=-1, + # Number of simulated environments to train on simultaneously. + simulated_batch_size=-1, + # Batch size during evaluation. Metrics are averaged over this number of + # rollouts. + eval_batch_size=-1, + ) + + +def update_hparams(hparams, other): + for key, value in six.iteritems(other): + if key in hparams.values(): + hparams.set_hparam(key, value) + else: + hparams.add_hparam(key, value) + + +@registry.register_hparams +def rlmb_ppo_base(): + """HParams for PPO base.""" + hparams = _rlmb_base() + ppo_params = dict( + base_algo="ppo", + base_algo_params="ppo_original_params", + # Number of real environments to train on simultaneously. + real_batch_size=1, + # Number of simulated environments to train on simultaneously. + simulated_batch_size=16, + eval_batch_size=32, + + # Unused; number of PPO epochs is calculated from the real frame limit. + real_ppo_epochs_num=0, + # Number of frames that can be taken from the simulated environment before + # it diverges, used for training the agent. + + ppo_epochs_num=1000, # This should be enough to see something + # Should be equal to simulated_rollout_length. + # TODO(koz4k): Uncouple this by outputing done from SimulatedBatchEnv. + ppo_epoch_length=hparams.simulated_rollout_length, + # Do not eval since simulated batch env does not produce dones + ppo_eval_every_epochs=0, + ppo_learning_rate_constant=1e-4, # Will be changed, just so it exists. + # This needs to be divisible by real_ppo_effective_num_agents. + real_ppo_epoch_length=16 * 200, + real_ppo_learning_rate_constant=1e-4, + real_ppo_effective_num_agents=16, + real_ppo_eval_every_epochs=0, + + simulation_flip_first_random_for_beginning=True, + ) + update_hparams(hparams, ppo_params) + return hparams + + +@registry.register_hparams +def rlmb_ppo_base_param_sharing(): + """HParams for PPO base with parameter sharing.""" + hparams = rlmb_ppo_base() + hparams.wm_policy_param_sharing = True + hparams.base_algo_params = "ppo_original_world_model" + return hparams + + +@registry.register_hparams +def rlmb_base(): + return rlmb_ppo_base() + + +@registry.register_hparams +def rlmb_dqn_base(): + """rlmb_dqn_base params.""" + hparams = _rlmb_base() + simulated_rollout_length = 10 + dqn_params = dict( + base_algo="dqn", + base_algo_params="dqn_original_params", + real_batch_size=1, + simulated_batch_size=16, + dqn_agent_generates_trainable_dones=False, + eval_batch_size=1, + # Must be equal to dqn_time_limit for now + simulated_rollout_length=simulated_rollout_length, + dqn_time_limit=simulated_rollout_length, + simulation_flip_first_random_for_beginning=False, + dqn_eval_episodes_num=3, + + # TODO(kc): only for model-free compatibility, remove this + epochs_num=-1, + ) + update_hparams(hparams, dqn_params) + return hparams + + +@registry.register_hparams +def rlmb_dqn_guess1(): + """DQN guess1 params.""" + hparams = rlmb_dqn_base() + hparams.set_hparam("base_algo_params", "dqn_guess1_params") + # At the moment no other option for evaluation, so we want long rollouts to + # not bias scores. + hparams.set_hparam("eval_rl_env_max_episode_steps", 5000) + return hparams + + +@registry.register_hparams +def rlmb_dqn_guess1_rainbow(): + """Rainbow rlmb_dqn guess1 params.""" + hparams = rlmb_dqn_guess1() + hparams.set_hparam("base_algo_params", "dqn_guess1_rainbow_params") + return hparams + + +@registry.register_hparams +def rlmb_dqn_rainbow_large_epsilon(): + """Rainbow rlmb_dqn params.""" + hparams = rlmb_dqn_guess1() + hparams.set_hparam("base_algo_params", "dqn_rainbow_params") + hparams.set_hparam("dqn_agent_epsilon_train", 0.1) + hparams.add_hparam("real_dqn_agent_epsilon_train", 0.02) + simulated_rollout_length = 10 + hparams.set_hparam("simulated_rollout_length", simulated_rollout_length) + hparams.set_hparam("dqn_time_limit", simulated_rollout_length) + return hparams + + +@registry.register_hparams +def rlmb_dqn_guess1_2m_replay_buffer(): + """DQN guess1 params, 2M replay buffer.""" + hparams = rlmb_dqn_guess1() + hparams.set_hparam("base_algo_params", "dqn_2m_replay_buffer_params") + return hparams + + +@registry.register_hparams +def rlmb_dqn_guess1_10m_replay_buffer(): + """DQN guess1 params, 10M replay buffer.""" + hparams = rlmb_dqn_guess1() + hparams.set_hparam("base_algo_params", "dqn_10m_replay_buffer_params") + return hparams + + +@registry.register_hparams +def rlmb_basetest(): + """Base setting but quicker with only 2 epochs.""" + hparams = rlmb_base() + hparams.game = "pong" + hparams.epochs = 2 + hparams.num_real_env_frames = 3200 + hparams.model_train_steps = 100 + hparams.ppo_epochs_num = 2 + return hparams + + +@registry.register_hparams +def rlmb_noresize(): + hparams = rlmb_base() + hparams.resize_height_factor = 1 + hparams.resize_width_factor = 1 + return hparams + + +@registry.register_hparams +def rlmb_ppo_quick(): + """Base setting but quicker with only 2 epochs.""" + hparams = rlmb_ppo_base() + hparams.epochs = 2 + hparams.model_train_steps = 25000 + hparams.ppo_epochs_num = 700 + hparams.ppo_epoch_length = 50 + return hparams + + +@registry.register_hparams +def rlmb_quick(): + """Base setting but quicker with only 2 epochs.""" + return rlmb_ppo_quick() + + +@registry.register_hparams +def rlmb_ppo_quick_param_sharing(): + """HParams for PPO quick with parameter sharing.""" + hparams = rlmb_ppo_quick() + hparams.wm_policy_param_sharing = True + hparams.base_algo_params = "ppo_original_world_model" + return hparams + + +@registry.register_hparams +def rlmb_quick_noresize(): + hparams = rlmb_base() + hparams.resize_height_factor = 1 + hparams.resize_width_factor = 1 + return hparams + + +@registry.register_hparams +def rlmb_quick_sd(): + """Quick setting with stochastic discrete model.""" + hparams = rlmb_quick() + hparams.generative_model = "next_frame_basic_stochastic_discrete" + hparams.generative_model_params = "next_frame_basic_stochastic_discrete" + return hparams + + +@registry.register_hparams +def rlmb_sdtest(): + """Test setting with stochastic discrete model.""" + hparams = rlmb_basetest() + hparams.generative_model = "next_frame_basic_stochastic_discrete" + hparams.generative_model_params = "next_frame_basic_stochastic_discrete" + return hparams + + +@registry.register_hparams +def rlmb_quick_sm(): + """Quick setting with sampling.""" + hparams = rlmb_quick() + hparams.generative_model_params = "next_frame_sampling" + return hparams + + +@registry.register_hparams +def rlmb_base_stochastic(): + """Base setting with a stochastic next-frame model.""" + hparams = rlmb_base() + hparams.initial_epoch_train_steps_multiplier = 5 + hparams.generative_model = "next_frame_basic_stochastic" + hparams.generative_model_params = "next_frame_basic_stochastic" + return hparams + + +@registry.register_hparams +def rlmb_base_sampling_stochastic(): + """Base setting with a stochastic next-frame model.""" + hparams = rlmb_base() + hparams.generative_model = "next_frame_basic_stochastic" + hparams.generative_model_params = "next_frame_sampling_stochastic" + return hparams + + +@registry.register_hparams +def rlmb_base_stochastic_discrete(): + """Base setting with stochastic discrete model.""" + hparams = rlmb_base() + hparams.learning_rate_bump = 1.0 + hparams.grayscale = False + hparams.generative_model = "next_frame_basic_stochastic_discrete" + hparams.generative_model_params = "next_frame_basic_stochastic_discrete" + # The parameters below are the same as base, but repeated for easier reading. + hparams.ppo_epoch_length = 50 + hparams.simulated_rollout_length = 50 + hparams.simulated_batch_size = 16 + return hparams + + +@registry.register_hparams +def rlmb_base_stochastic_discrete_sticky_actions(): + """Base setting, stochastic discrete model with sticky action environment.""" + hparams = rlmb_base_stochastic_discrete() + hparams.sticky_actions = True + return hparams + + +@registry.register_hparams +def rlmb_base_stochastic_discrete_20k(): + """Base setting with stochastic discrete model with 20k steps.""" + hparams = rlmb_base_stochastic_discrete() + # Our num_real_env_frames should be divisible by real_ppo_epoch_length*epochs + # Here we decrease epochs to 6 and make this number 16*200*6. + hparams.num_real_env_frames = 19200 + hparams.epochs = 6 + hparams.ppo_epochs_num = 2000 # Increase PPO steps as we have less epochs. + return hparams + + +@registry.register_hparams +def rlmb_base_stochastic_discrete_50k(): + """Base setting with stochastic discrete model with 50k steps.""" + hparams = rlmb_base_stochastic_discrete() + hparams.num_real_env_frames = 48000 + return hparams + + +@registry.register_hparams +def rlmb_base_stochastic_discrete_75k_model_steps(): + """Base setting with stochastic discrete model with 75k WM steps.""" + hparams = rlmb_base_stochastic_discrete() + hparams.model_train_steps = 15000 * 5 + return hparams + + +@registry.register_hparams +def rlmb_base_stochastic_discrete_20k_model_steps(): + """Base SD setting with 20k WM steps.""" + hparams = rlmb_base_stochastic_discrete() + hparams.model_train_steps = 20000 + return hparams + + +@registry.register_hparams +def rlmb_base_stochastic_discrete_30k_model_steps(): + """Base SD setting with 20k WM steps.""" + hparams = rlmb_base_stochastic_discrete() + hparams.model_train_steps = 30000 + return hparams + + +@registry.register_hparams +def rlmb_base_stochastic_discrete_200k(): + """Base setting with stochastic discrete model with 200k steps.""" + hparams = rlmb_base_stochastic_discrete() + hparams.num_real_env_frames = 96000 * 2 + return hparams + + +@registry.register_hparams +def rlmb_base_stochastic_discrete_500k(): + """Base setting with stochastic discrete model with 500k steps.""" + hparams = rlmb_base_stochastic_discrete() + hparams.num_real_env_frames = 96000 * 5 + return hparams + + +@registry.register_hparams +def rlmb_base_stochastic_discrete_1m(): + """Base setting with stochastic discrete model with 1M steps.""" + hparams = rlmb_base_stochastic_discrete() + hparams.num_real_env_frames = 96000 * 10 + return hparams + + +@registry.register_hparams +def rlmb_base_stochastic_discrete_param_sharing(): + """Base setting with stochastic discrete model with parameter sharing.""" + hparams = rlmb_base_stochastic_discrete() + hparams.wm_policy_param_sharing = True + hparams.base_algo_params = "ppo_original_world_model_stochastic_discrete" + return hparams + + +@registry.register_hparams +def rlmb_long(): + """Long setting with base model.""" + hparams = rlmb_base() + hparams.generative_model_params = "next_frame_pixel_noise_long" + return hparams + + +@registry.register_hparams +def rlmb_long_stochastic_discrete(): + """Long setting with stochastic discrete model.""" + hparams = rlmb_base_stochastic_discrete() + hparams.generative_model_params = "next_frame_basic_stochastic_discrete_long" + hparams.ppo_epochs_num = 1000 + return hparams + + +@registry.register_hparams +def rlmb_long_stochastic_discrete_planner(): + hparams = rlmb_long_stochastic_discrete() + hparams.eval_batch_size = 1 + hparams.eval_sampling_temps = [3.0] + hparams.eval_max_num_noops = 0 + return hparams + + +@registry.register_hparams +def rlmb_long_stochastic_discrete_simulation_deterministic_starts(): + """Long setting with stochastic discrete model & deterministic sim starts.""" + hparams = rlmb_base_stochastic_discrete() + hparams.generative_model_params = "next_frame_basic_stochastic_discrete_long" + hparams.ppo_epochs_num = 1000 + hparams.simulation_random_starts = False + return hparams + + +@registry.register_hparams +def rlmb_long_stochastic_discrete_100steps(): + """Long setting with stochastic discrete model, changed ppo steps.""" + hparams = rlmb_long_stochastic_discrete() + hparams.ppo_epoch_length = 100 + hparams.simulated_rollout_length = 100 + hparams.simulated_batch_size = 8 + return hparams + + +@registry.register_hparams +def rlmb_long_stochastic_discrete_25steps(): + """Long setting with stochastic discrete model, changed ppo steps.""" + hparams = rlmb_long_stochastic_discrete() + hparams.ppo_epoch_length = 25 + hparams.simulated_rollout_length = 25 + hparams.simulated_batch_size = 32 + return hparams + + +@registry.register_hparams +def rlmb_long_stochastic_discrete_gamma95(): + """Long setting with stochastic discrete model, changed gamma.""" + hparams = rlmb_long_stochastic_discrete() + hparams.base_algo_params = "ppo_original_params_gamma95" + return hparams + + +@registry.register_hparams +def rlmb_long_stochastic_discrete_gamma90(): + """Long setting with stochastic discrete model, changed gamma.""" + hparams = rlmb_long_stochastic_discrete() + hparams.base_algo_params = "ppo_original_params_gamma90" + return hparams + + +@registry.register_hparams +def rlmb_base_stochastic_discrete_3epochs(): + """Long setting with stochastic discrete model, changed epochs.""" + hparams = rlmb_base_stochastic_discrete() + hparams.epochs = 3 + hparams.ppo_epochs_num = 2000 + return hparams + + +@registry.register_hparams +def rlmb_base_stochastic_discrete_1epoch(): + """Long setting with stochastic discrete model, changed epochs.""" + hparams = rlmb_base_stochastic_discrete() + hparams.epochs = 1 + hparams.ppo_epochs_num = 3000 + return hparams + + +@registry.register_hparams +def rlmb_base_recurrent(): + """Base setting with recurrent model.""" + hparams = rlmb_base() + hparams.generative_model = "next_frame_basic_recurrent" + hparams.generative_model_params = "next_frame_basic_recurrent" + return hparams + + +@registry.register_hparams +def rlmb_base_stochastic_discrete_noresize(): + """Base setting with stochastic discrete model.""" + hparams = rlmb_base() + hparams.generative_model = "next_frame_basic_stochastic_discrete" + hparams.generative_model_params = "next_frame_basic_stochastic_discrete" + hparams.resize_height_factor = 1 + hparams.resize_width_factor = 1 + return hparams + + +@registry.register_hparams +def rlmb_base_sv2p(): + """Base setting with sv2p as world model.""" + hparams = rlmb_base() + hparams.learning_rate_bump = 1.0 + hparams.generative_model = "next_frame_sv2p" + hparams.generative_model_params = "next_frame_sv2p_atari" + return hparams + + +@registry.register_hparams +def rlmb_base_sv2p_softmax(): + """Base setting with sv2p as world model with softmax.""" + hparams = rlmb_base_sv2p() + hparams.generative_model_params = "next_frame_sv2p_atari_softmax" + return hparams + + +@registry.register_hparams +def rlmb_base_sv2p_deterministic(): + """Base setting with deterministic sv2p as world model.""" + hparams = rlmb_base_sv2p() + hparams.generative_model_params = "next_frame_sv2p_atari_deterministic" + return hparams + + +@registry.register_hparams +def rlmb_base_sv2p_deterministic_softmax(): + """Base setting with deterministic sv2p as world model with softmax.""" + hparams = rlmb_base_sv2p_softmax() + hparams.generative_model_params = ( + "next_frame_sv2p_atari_softmax_deterministic") + return hparams + + +@registry.register_hparams +def rlmb_base_sampling(): + """Base setting with a stochastic next-frame model.""" + hparams = rlmb_base() + hparams.generative_model_params = "next_frame_sampling" + return hparams + + +@registry.register_hparams +def rlmb_base_sampling_noresize(): + hparams = rlmb_base_sampling() + hparams.resize_height_factor = 1 + hparams.resize_width_factor = 1 + return hparams + + +def _rlmb_tiny_overrides(): + """Parameters to override for tiny setting excluding agent-related hparams.""" + return dict( + epochs=1, + num_real_env_frames=128, + model_train_steps=2, + max_num_noops=1, + eval_max_num_noops=1, + generative_model_params="next_frame_tiny", + stop_loop_early=True, + resize_height_factor=2, + resize_width_factor=2, + wm_eval_rollout_ratios=[1], + rl_env_max_episode_steps=7, + eval_rl_env_max_episode_steps=7, + simulated_rollout_length=2, + eval_sampling_temps=[0.0, 1.0], + ) + + +@registry.register_hparams +def rlmb_ppo_tiny(): + """Tiny set for testing.""" + hparams = rlmb_ppo_base() + hparams = hparams.override_from_dict(_rlmb_tiny_overrides()) + update_hparams(hparams, dict( + ppo_epochs_num=2, + ppo_epoch_length=10, + real_ppo_epoch_length=36, + real_ppo_effective_num_agents=2, + real_batch_size=1, + eval_batch_size=1, + )) + return hparams + + +@registry.register_hparams +def rlmb_tiny(): + return rlmb_ppo_tiny() + + +@registry.register_hparams +def rlmb_dqn_tiny(): + """Tiny set for testing.""" + hparams = rlmb_dqn_base() + hparams = hparams.override_from_dict(_rlmb_tiny_overrides()) + update_hparams(hparams, dict( + base_algo_params="dqn_guess1_params", + simulated_rollout_length=2, + dqn_time_limit=2, + dqn_num_frames=128, + real_dqn_replay_buffer_replay_capacity=100, + dqn_replay_buffer_replay_capacity=100, + real_dqn_agent_min_replay_history=10, + dqn_agent_min_replay_history=10, + )) + return hparams + + +@registry.register_hparams +def rlmb_tiny_stochastic(): + """Tiny setting with a stochastic next-frame model.""" + hparams = rlmb_ppo_tiny() + hparams.epochs = 1 # Too slow with 2 for regular runs. + hparams.generative_model = "next_frame_basic_stochastic" + hparams.generative_model_params = "next_frame_basic_stochastic" + return hparams + + +@registry.register_hparams +def rlmb_tiny_recurrent(): + """Tiny setting with a recurrent next-frame model.""" + hparams = rlmb_ppo_tiny() + hparams.epochs = 1 # Too slow with 2 for regular runs. + hparams.generative_model = "next_frame_basic_recurrent" + hparams.generative_model_params = "next_frame_basic_recurrent" + return hparams + + +@registry.register_hparams +def rlmb_tiny_sv2p(): + """Tiny setting with a tiny sv2p model.""" + hparams = rlmb_ppo_tiny() + hparams.generative_model = "next_frame_sv2p" + hparams.generative_model_params = "next_frame_sv2p_tiny" + hparams.grayscale = False + return hparams + + +@registry.register_hparams +def rlmb_tiny_simulation_deterministic_starts(): + hp = rlmb_tiny() + hp.simulation_random_starts = False + return hp + + +# RangedHParams for tuning +# ============================================================================== +# Note that the items here must be scoped with one of +# HP_SCOPES={loop, model, ppo}, which set hyperparameters for the top-level +# hparams, hp.generative_model_params, and hp.ppo_params, respectively. +@registry.register_ranged_hparams +def rlmb_grid(rhp): + """Grid over games and frames, and 5 runs each for variance.""" + rhp.set_categorical("loop.game", ["breakout", "pong", "freeway"]) + base = 100000 + medium = base // 2 + small = medium // 2 + rhp.set_discrete("loop.num_real_env_frames", [base, medium, small]) + + # Dummy parameter to get 5 runs for each configuration + rhp.set_discrete("model.moe_loss_coef", list(range(5))) + + +@registry.register_ranged_hparams +def rlmb_variance(rhp): + # Dummy parameter to get 5 runs for each configuration + rhp.set_discrete("model.moe_loss_coef", list(range(5))) + rhp.set_categorical("loop.game", ["breakout", "pong", "freeway"]) + + +@registry.register_ranged_hparams +def rlmb_variance_nogame(rhp): + # Dummy parameter to get 20 runs for current configuration. + rhp.set_discrete("model.moe_loss_coef", list(range(20))) + + +@registry.register_ranged_hparams +def rlmb_three(rhp): + rhp.set_discrete("model.moe_loss_coef", list(range(10))) + rhp.set_categorical("loop.game", ["breakout", "pong", "boxing"]) + + +@registry.register_ranged_hparams +def rlmb_test1(rhp): + rhp.set_discrete("model.moe_loss_coef", list(range(10))) + rhp.set_categorical("loop.game", ["breakout", "pong", "boxing"]) + rhp.set_discrete("loop.ppo_learning_rate_constant", [5e-5, 1e-4, 2e-4]) + rhp.set_discrete("ppo.optimization_batch_size", [20, 40]) + rhp.set_discrete("loop.epochs", [3, 6]) + + +@registry.register_ranged_hparams +def rlmb_scheduled_sampling(rhp): + rhp.set_float("model.scheduled_sampling_prob", 0.0, 1.0) + + +@registry.register_ranged_hparams +def rlmb_all_games(rhp): + rhp.set_discrete("model.moe_loss_coef", list(range(5))) + rhp.set_categorical("loop.game", gym_env.ATARI_GAMES) + + +@registry.register_ranged_hparams +def rlmb_whitelisted_games(rhp): + rhp.set_discrete("model.moe_loss_coef", list(range(10))) + rhp.set_categorical("loop.game", gym_env.ATARI_WHITELIST_GAMES) + + +@registry.register_ranged_hparams +def rlmb_human_score_games(rhp): + rhp.set_categorical("loop.game", + gym_env.ATARI_GAMES_WITH_HUMAN_SCORE_NICE) + rhp.set_discrete("model.moe_loss_coef", list(range(5))) + + +@registry.register_ranged_hparams +def rlmb_human_score_games_v100unfriendly(rhp): + """Games that for strange reasons often fail on v100s but work on p100s.""" + rhp.set_categorical("loop.game", + ["chopper_command", "boxing", "asterix", "seaquest"]) + rhp.set_discrete("model.moe_loss_coef", list(range(5))) + + +@registry.register_ranged_hparams +def rlmb_curious_games10(rhp): + rhp.set_discrete("model.moe_loss_coef", list(range(10))) + rhp.set_categorical("loop.game", gym_env.ATARI_CURIOUS_GAMES) + + +@registry.register_ranged_hparams +def rlmb_curious_games5(rhp): + rhp.set_discrete("model.moe_loss_coef", list(range(5))) + rhp.set_categorical("loop.game", gym_env.ATARI_CURIOUS_GAMES) + + +@registry.register_ranged_hparams +def rlmb_debug_games(rhp): + rhp.set_discrete("model.moe_loss_coef", list(range(10))) + rhp.set_categorical("loop.game", gym_env.ATARI_DEBUG_GAMES) + + +@registry.register_ranged_hparams +def rlmb_ae_variance(rhp): + # Dummy parameter to get 5 runs for each configuration + rhp.set_discrete("model.moe_loss_coef", list(range(5))) + rhp.set_categorical("loop.game", ["breakout", "pong", "freeway"]) + base = 100000 + small = base // 4 + rhp.set_discrete("loop.num_real_env_frames", [base, small]) + + +@registry.register_ranged_hparams +def rlmb_ppolr_game(rhp): + rhp.set_categorical("loop.game", ["breakout", "pong", "freeway"]) + base_lr = 1e-4 + rhp.set_float("loop.ppo_learning_rate_constant", base_lr / 2, base_lr * 2) + + +@registry.register_ranged_hparams +def rlmb_ppolr(rhp): + base_lr = 1e-4 + rhp.set_float("loop.ppo_learning_rate_constant", base_lr / 2, base_lr * 2) + + +@registry.register_ranged_hparams +def rlmb_ae_ppo_lr(rhp): + rhp.set_categorical("loop.game", ["breakout", "pong", "freeway"]) + base_lr = 1e-4 + rhp.set_float("loop.ppo_learning_rate_constant", base_lr / 2, base_lr * 2) + + +@registry.register_ranged_hparams +def rlmb_dropout_range(rhp): + rhp.set_float("model.dropout", 0.2, 0.4) + + +@registry.register_ranged_hparams +def rlmb_intrinsic_reward_scale(rhp): + rhp.set_float("loop.intrinsic_reward_scale", 0.01, 10.) + + +@registry.register_ranged_hparams +def rlmb_l1l2cutoff_range(rhp): + """Loss and loss-cutoff tuning grid.""" + rhp.set_float("model.video_modality_loss_cutoff", 1.4, 3.4) + + +@registry.register_ranged_hparams +def rlmb_xentcutoff_range(rhp): + """Cross entropy cutoff tuning grid.""" + rhp.set_float("model.video_modality_loss_cutoff", 0.01, 0.05) + + +@registry.register_ranged_hparams +def rlmb_pixel_noise(rhp): + """Input pixel noise tuning grid.""" + rhp.set_categorical("loop.generative_model_params", + ["next_frame_pixel_noise"]) + rhp.set_discrete("model.video_modality_input_noise", + [0.0025 * i for i in range(200)]) + + +@registry.register_ranged_hparams +def rlmb_dummy_range(rhp): + """Dummy tuning grid just to get the variance.""" + rhp.set_float("model.moe_loss_coef", 0.01, 0.02) + + +@registry.register_ranged_hparams +def rlmb_epochs_num(rhp): + rhp.set_categorical("loop.game", gym_env.ATARI_WHITELIST_GAMES) + rhp.set_discrete("model.moe_loss_coef", list(range(5))) + rhp.set_discrete("loop.epochs", [3, 6, 12]) + + +@registry.register_ranged_hparams +def rlmb_ppo_epochs_num(rhp): + rhp.set_categorical("loop.game", gym_env.ATARI_WHITELIST_GAMES) + rhp.set_discrete("model.moe_loss_coef", list(range(5))) + rhp.set_discrete("loop.ppo_epochs_num", [200, 1000, 2000, 4000]) + + +@registry.register_ranged_hparams +def rlmb_ppo_epoch_len(rhp): + rhp.set_categorical("loop.game", gym_env.ATARI_WHITELIST_GAMES) + rhp.set_discrete("model.moe_loss_coef", list(range(5))) + rhp.set_discrete("loop.ppo_epoch_length", [25, 50, 100]) + + +@registry.register_ranged_hparams +def rlmb_num_frames(rhp): + rhp.set_categorical("loop.game", gym_env.ATARI_WHITELIST_GAMES) + rhp.set_discrete("model.moe_loss_coef", list(range(5))) + rhp.set_discrete("loop.num_real_env_frames", + [1000*el for el in [30, 100, 500, 1000]]) + + +@registry.register_ranged_hparams +def rlmb_ppo_optimization_batch_size(rhp): + rhp.set_categorical("loop.game", ["pong", "boxing", "seaquest"]) + rhp.set_discrete("model.moe_loss_coef", list(range(10))) + rhp.set_discrete("ppo.optimization_batch_size", [4, 10, 20]) + + +@registry.register_ranged_hparams +def rlmb_logits_clip(rhp): + rhp.set_categorical("loop.game", ["pong", "boxing", "seaquest"]) + rhp.set_discrete("model.moe_loss_coef", list(range(10))) + rhp.set_discrete("ppo.logits_clip", [0., 5.]) + + +@registry.register_ranged_hparams +def rlmb_games_problematic_for_ppo(rhp): + games = [ + "alien", "boxing", "breakout", "ms_pacman", "video_pinball", + ] + rhp.set_categorical("loop.game", games) + rhp.set_categorical("loop.base_algo_params", ["ppo_original_params"]) + rhp.set_discrete("model.moe_loss_coef", list(range(10))) + rhp.set_discrete("ppo.logits_clip", [0., 4.0]) + + +@registry.register_ranged_hparams +def rlmf_proportional_epoch_length(rhp): + rhp.set_discrete("proportional_epoch_length", [10, 20, 50, 100, 200, 400]) + rhp.set_categorical("loop.game", gym_env.ATARI_GAMES_WITH_HUMAN_SCORE) + + +def merge_unscoped_hparams(scopes_and_hparams): + """Merge multiple HParams into one with scopes.""" + merged_values = {} + for (scope, hparams) in scopes_and_hparams: + for key, value in six.iteritems(hparams.values()): + scoped_key = "%s.%s" % (scope, key) + merged_values[scoped_key] = value + + return hparam.HParams(**merged_values) + + +def split_scoped_hparams(scopes, merged_hparams): + """Split single HParams with scoped keys into multiple.""" + split_values = {scope: {} for scope in scopes} + merged_values = merged_hparams.values() + for scoped_key, value in six.iteritems(merged_values): + scope = scoped_key.split(".")[0] + key = scoped_key[len(scope) + 1:] + split_values[scope][key] = value + + return [ + hparam.HParams(**split_values[scope]) for scope in scopes + ] + + +def training_loop_hparams_from_scoped_overrides(scoped_overrides, trial_id): + """Create HParams suitable for training loop from scoped HParams. + + Args: + scoped_overrides: HParams, with keys all scoped by one of HP_SCOPES. These + parameters are overrides for the base HParams created by + create_loop_hparams. + trial_id: str, trial identifier. This is used to register unique HParams + names for the underlying model and ppo HParams. + + Returns: + HParams suitable for passing to training_loop. + """ + trial_hp_overrides = scoped_overrides.values() + + # Create loop, model, and ppo base HParams + loop_hp = create_loop_hparams() + model_hp_name = trial_hp_overrides.get( + "loop.generative_model_params", loop_hp.generative_model_params) + model_hp = registry.hparams(model_hp_name).parse(FLAGS.hparams) + base_algo_params_name = trial_hp_overrides.get( + "loop.base_algo_params", loop_hp.base_algo_params) + algo_hp = registry.hparams(base_algo_params_name) + + # Merge them and then override with the scoped overrides + combined_hp = merge_unscoped_hparams( + zip(HP_SCOPES, [loop_hp, model_hp, algo_hp])) + combined_hp.override_from_dict(trial_hp_overrides) + + # Split out the component hparams + loop_hp, model_hp, algo_hp = ( + split_scoped_hparams(HP_SCOPES, combined_hp)) + + # Dynamic register the model hp and set the new name in loop_hp + model_hp_name = "model_hp_%s" % str(trial_id) + dynamic_register_hparams(model_hp_name, model_hp) + loop_hp.generative_model_params = model_hp_name + + # Dynamic register the algo hp and set the new name in loop_hp + algo_hp_name = "algo_hp_%s" % str(trial_id) + dynamic_register_hparams(algo_hp_name, algo_hp) + loop_hp.base_algo_params = algo_hp_name + + return loop_hp + + +def dynamic_register_hparams(name, hparams): + + @registry.register_hparams(name) + def new_hparams_set(): + return hparam.HParams(**hparams.values()) + + return new_hparams_set + + +def create_loop_hparams(): + hparams = registry.hparams(FLAGS.loop_hparams_set) + hparams.parse(FLAGS.loop_hparams) + return hparams diff --git a/tensor2tensor/rl/trainer_model_based_recurrent_test.py b/tensor2tensor/rl/trainer_model_based_recurrent_test.py new file mode 100644 index 000000000..74fc24220 --- /dev/null +++ b/tensor2tensor/rl/trainer_model_based_recurrent_test.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tiny run of trainer_model_based. Smoke test.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.rl import trainer_model_based + +import tensorflow.compat.v1 as tf + +FLAGS = tf.flags.FLAGS + + +class ModelRLExperimentRecurrentTest(tf.test.TestCase): + + def test_basic_recurrent(self): + FLAGS.output_dir = tf.test.get_temp_dir() + FLAGS.loop_hparams_set = "rlmb_tiny_recurrent" + FLAGS.schedule = "train" # skip evaluation for world model training + trainer_model_based.main(None) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/rl/trainer_model_based_stochastic_test.py b/tensor2tensor/rl/trainer_model_based_stochastic_test.py new file mode 100644 index 000000000..0e2c1fe31 --- /dev/null +++ b/tensor2tensor/rl/trainer_model_based_stochastic_test.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tiny run of trainer_model_based with stochastic model. Smoke test.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.rl import trainer_model_based + +import tensorflow.compat.v1 as tf + +FLAGS = tf.flags.FLAGS + + +class ModelRLExperimentStochasticTest(tf.test.TestCase): + + def test_basic_stochastic(self): + FLAGS.output_dir = tf.test.get_temp_dir() + FLAGS.loop_hparams_set = "rlmb_tiny_stochastic" + FLAGS.schedule = "train" # skip evaluation for world model training + trainer_model_based.main(None) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/rl/trainer_model_based_sv2p_test.py b/tensor2tensor/rl/trainer_model_based_sv2p_test.py new file mode 100644 index 000000000..420331a4f --- /dev/null +++ b/tensor2tensor/rl/trainer_model_based_sv2p_test.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tiny run of trainer_model_based with stochastic model. Smoke test.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.rl import trainer_model_based + +import tensorflow.compat.v1 as tf + +FLAGS = tf.flags.FLAGS + + +class ModelRLExperimentSv2pTest(tf.test.TestCase): + + def test_sv2p(self): + FLAGS.output_dir = tf.test.get_temp_dir() + FLAGS.loop_hparams_set = "rlmb_tiny_sv2p" + trainer_model_based.main(None) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/rl/trainer_model_based_test.py b/tensor2tensor/rl/trainer_model_based_test.py new file mode 100644 index 000000000..20fecc190 --- /dev/null +++ b/tensor2tensor/rl/trainer_model_based_test.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tiny run of trainer_model_based. Smoke test.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.rl import trainer_model_based + +import tensorflow.compat.v1 as tf + +FLAGS = tf.flags.FLAGS + + +class ModelRLExperimentTest(tf.test.TestCase): + + def _test_hparams_skip_evaluation(self, hparams_set): + FLAGS.output_dir = tf.test.get_temp_dir() + FLAGS.loop_hparams_set = hparams_set + FLAGS.schedule = "train" # skip evaluation for world model training + trainer_model_based.main(None) + + def test_basic(self): + self._test_hparams_skip_evaluation("rlmb_tiny") + + # TODO(kozak): enable when it works. + # def test_dqn_basic(self): + # self._test_hparams_skip_evaluation("rlmb_dqn_tiny") + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/rl/trainer_model_free.py b/tensor2tensor/rl/trainer_model_free.py new file mode 100644 index 000000000..543ac0655 --- /dev/null +++ b/tensor2tensor/rl/trainer_model_free.py @@ -0,0 +1,168 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Training of RL agent with PPO algorithm. + +Example invocation: + +python -m tensor2tensor.rl.trainer_model_free \ + --output_dir=$HOME/t2t/rl_v1 \ + --hparams_set=pong_model_free \ + --hparams='batch_size=15' + +Example invocation with EnvProblem interface: + +python -m tensor2tensor.rl.trainer_model_free \ + --env_problem_name=tic_tac_toe_env_problem \ + --hparams_set=rlmf_tictactoe \ + --output_dir=${OUTPUTDIR} \ + --log_dir=${LOGDIR} \ + --alsologtostderr \ + --vmodule=*/tensor2tensor/*=2 \ +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import pprint + +from tensor2tensor.models.research import rl +from tensor2tensor.rl import rl_utils +from tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import +from tensor2tensor.utils import misc_utils +from tensor2tensor.utils import registry +from tensor2tensor.utils import trainer_lib + +import tensorflow.compat.v1 as tf + + +flags = tf.flags +FLAGS = flags.FLAGS + + +flags.DEFINE_string("env_problem_name", "", + "Which registered env_problem do we want?") + +# To maintain compatibility with some internal libs, we guard against these flag +# definitions possibly erring. Apologies for the ugliness. +try: + flags.DEFINE_string("output_dir", "", "Base output directory for run.") +except: # pylint: disable=bare-except + pass + + +def initialize_env_specs(hparams, env_problem_name): + """Initializes env_specs using the appropriate env.""" + if env_problem_name: + env = registry.env_problem(env_problem_name, batch_size=hparams.batch_size) + else: + env = rl_utils.setup_env(hparams, hparams.batch_size, + hparams.eval_max_num_noops, + hparams.rl_env_max_episode_steps, + env_name=hparams.rl_env_name) + env.start_new_epoch(0) + + return rl.make_real_env_fn(env) + + +step = 0 + + +def train(hparams, output_dir, env_problem_name, report_fn=None): + """Train.""" + env_fn = initialize_env_specs(hparams, env_problem_name) + + tf.logging.vlog(1, "HParams in trainer_model_free.train : %s", + misc_utils.pprint_hparams(hparams)) + tf.logging.vlog(1, "Using hparams.base_algo: %s", hparams.base_algo) + learner = rl_utils.LEARNERS[hparams.base_algo]( + hparams.frame_stack_size, output_dir, output_dir, total_num_epochs=1, + distributional_size=hparams.get("distributional_size", 1), + distributional_subscale=hparams.get("distributional_subscale", 0.04), + distributional_threshold=hparams.get("distributional_threshold", 0.0), + ) + + policy_hparams = trainer_lib.create_hparams(hparams.base_algo_params) + rl_utils.update_hparams_from_hparams( + policy_hparams, hparams, hparams.base_algo + "_" + ) + + tf.logging.vlog(1, "Policy HParams : %s", + misc_utils.pprint_hparams(policy_hparams)) + + # TODO(konradczechowski): remove base_algo dependance, when evaluation method + # will be decided + if hparams.base_algo == "ppo": + total_steps = policy_hparams.epochs_num + tf.logging.vlog(2, "total_steps: %d", total_steps) + + eval_every_epochs = policy_hparams.eval_every_epochs + tf.logging.vlog(2, "eval_every_epochs: %d", eval_every_epochs) + + if eval_every_epochs == 0: + eval_every_epochs = total_steps + policy_hparams.eval_every_epochs = 0 + + metric_name = rl_utils.get_metric_name( + sampling_temp=hparams.eval_sampling_temps[0], + max_num_noops=hparams.eval_max_num_noops, + clipped=False + ) + + tf.logging.vlog(1, "metric_name: %s", metric_name) + + eval_metrics_dir = os.path.join(output_dir, "eval_metrics") + eval_metrics_dir = os.path.expanduser(eval_metrics_dir) + tf.gfile.MakeDirs(eval_metrics_dir) + eval_metrics_writer = tf.summary.FileWriter(eval_metrics_dir) + + def evaluate_on_new_model(model_dir_path): + global step + eval_metrics = rl_utils.evaluate_all_configs(hparams, model_dir_path) + tf.logging.info( + "Agent eval metrics:\n{}".format(pprint.pformat(eval_metrics))) + rl_utils.summarize_metrics(eval_metrics_writer, eval_metrics, step) + if report_fn: + report_fn(eval_metrics[metric_name], step) + step += 1 + + policy_hparams.epochs_num = total_steps + policy_hparams.save_models_every_epochs = eval_every_epochs + else: + def evaluate_on_new_model(model_dir_path): + del model_dir_path + raise NotImplementedError( + "This function is currently implemented only for ppo") + + learner.train(env_fn, + policy_hparams, + simulated=False, + save_continuously=True, + epoch=0, + model_save_fn=evaluate_on_new_model) + + +def main(_): + hparams = trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams) + + tf.logging.info("Starting model free training.") + train(hparams, FLAGS.output_dir, FLAGS.env_problem_name) + tf.logging.info("Ended model free training.") + + +if __name__ == "__main__": + tf.app.run() diff --git a/tensor2tensor/rl/trainer_model_free_test.py b/tensor2tensor/rl/trainer_model_free_test.py new file mode 100644 index 000000000..372bd6497 --- /dev/null +++ b/tensor2tensor/rl/trainer_model_free_test.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests of basic flow of collecting trajectories and training PPO.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.rl import trainer_model_free +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +FLAGS = tf.flags.FLAGS + + +class TrainTest(tf.test.TestCase): + + def _test_hparams_set(self, hparams_set): + hparams = registry.hparams(hparams_set) + FLAGS.output_dir = tf.test.get_temp_dir() + trainer_model_free.train(hparams, FLAGS.output_dir, + env_problem_name=None) + + def test_train_pong(self): + self._test_hparams_set("rlmf_tiny") + + def test_train_pong_dqn(self): + self._test_hparams_set("rlmf_dqn_tiny") + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/rl/trainer_model_free_tictactoe_test.py b/tensor2tensor/rl/trainer_model_free_tictactoe_test.py new file mode 100644 index 000000000..c7f429d46 --- /dev/null +++ b/tensor2tensor/rl/trainer_model_free_tictactoe_test.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests of basic flow of collecting trajectories and training PPO.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.rl import trainer_model_free +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + +FLAGS = tf.flags.FLAGS + + +class TrainerModelFreeTicTacToeTest(tf.test.TestCase): + + def test_train_tictactoe(self): + hparams = registry.hparams("rlmf_tictactoe") + hparams.batch_size = 2 + hparams.eval_sampling_temps = [0.0, 1.0] + hparams.add_hparam("ppo_epochs_num", 2) + hparams.add_hparam("ppo_epoch_length", 3) + + hparams.epochs_num = 100 + hparams.eval_every_epochs = 25 + + FLAGS.output_dir = tf.test.get_temp_dir() + FLAGS.env_problem_name = "tic_tac_toe_env_problem" + trainer_model_free.train(hparams, FLAGS.output_dir, FLAGS.env_problem_name) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/serving/README.md b/tensor2tensor/serving/README.md new file mode 100644 index 000000000..8e3ca3350 --- /dev/null +++ b/tensor2tensor/serving/README.md @@ -0,0 +1,109 @@ +# Serving + +Tensor2Tensor and the TensorFlow ecosystem make it easy to serve a model once +trained. + +## 1. Export for Serving + +First, export it for serving: + +``` +t2t-exporter \ + --model=transformer \ + --hparams_set=transformer_tiny \ + --problem=translate_ende_wmt8k \ + --data_dir=~/t2t/data \ + --output_dir=/tmp/t2t_train +``` + +You should have an export directory in `output_dir` now. + +## 2. Launch a Server + +Install the `tensorflow-model-server` +([instructions](https://www.tensorflow.org/serving/setup#installing_the_modelserver)). + +Start the server pointing at the export: + +``` +tensorflow_model_server \ + --port=9000 \ + --model_name=my_model \ + --model_base_path=/tmp/t2t_train/export/Servo +``` + +## 3. Query the Server + +**Note**: The `t2t-query-server` is meant only as an example. You may need to +modify it to suit your needs. The exported model expects an input +example that is structured identically to what would be found on disk during +training (serialized `tf.train.Example`). For text problems, that means that +it expects the inputs to already be encoded as integers. You can see how the +`t2t-query-server` does this by reading the code. + +Install some dependencies: + +``` +pip install tensorflow-serving-api +``` + +Query: + +``` +t2t-query-server \ + --server=localhost:9000 \ + --servable_name=my_model \ + --problem=translate_ende_wmt8k \ + --data_dir=~/t2t/data +``` + + +## Serve Predictions with Cloud ML Engine + +Alternatively, you can deploy a model on Cloud ML Engine to serve predictions. +To do so, export the model as in Step 1, then do the following: + +[Install gcloud](https://cloud.google.com/sdk/downloads) + +#### Copy exported model to Google Cloud Storage + +``` +ORIGIN= +EXPORTS_PATH=/tmp/t2t_train/export/Servo +LATEST_EXPORT=${EXPORTS_PATH}/$(ls ${EXPORTS_PATH} | tail -1) +gsutil cp -r ${LATEST_EXPORT}/* $ORIGIN +``` + +#### Create a model + +``` +MODEL_NAME=t2t_test +gcloud ml-engine models create $MODEL_NAME +``` + +This step only needs to be performed once. + +#### Create a model version + +``` +VERSION=v0 +gcloud ml-engine versions create $VERSION \ + --model $MODEL_NAME \ + --origin $ORIGIN +``` + +**NOTE:** Due to overhead from VM warmup, prediction requests may timeout. To +mitigate this issue, provide a [YAML configuration +file](https://cloud.google.com/sdk/gcloud/reference/ml-engine/versions/create) +via the `--config flag`, with `minNodes > 0`. These nodes are always on, and +will be billed accordingly. + +#### Query Cloud ML Engine + +``` +t2t-query-server \ + --cloud_mlengine_model_name $MODEL_NAME \ + --cloud_mlengine_model_version $VERSION \ + --problem translate_ende_wmt8k \ + --data_dir ~/t2t/data +``` diff --git a/tensor2tensor/serving/__init__.py b/tensor2tensor/serving/__init__.py new file mode 100644 index 000000000..ff174dd63 --- /dev/null +++ b/tensor2tensor/serving/__init__.py @@ -0,0 +1,15 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + diff --git a/tensor2tensor/serving/export.py b/tensor2tensor/serving/export.py new file mode 100644 index 000000000..49e8dfb5b --- /dev/null +++ b/tensor2tensor/serving/export.py @@ -0,0 +1,223 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Export a trained model for serving.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from tensor2tensor.bin import t2t_trainer +from tensor2tensor.utils import decoding +from tensor2tensor.utils import t2t_model +from tensor2tensor.utils import trainer_lib +from tensor2tensor.utils import usr_dir + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator +import tensorflow_hub as hub + +FLAGS = tf.flags.FLAGS + +tf.flags.DEFINE_bool("export_as_tfhub", False, + "If True, the model will be exported as tfHub module.") + +tf.flags.DEFINE_string( + "export_dir", None, "Directory, where export model should be stored." + "If None, the model will be stored in subdirectory " + "where checkpoints are: --output_dir") + +tf.flags.DEFINE_string( + "checkpoint_path", None, "Which checkpoint to export." + "If None, we will use the latest checkpoint stored in the directory " + "specified by --output_dir") + +tf.flags.DEFINE_bool( + "as_text", True, + "Whether to write the SavedModel proto in text format. Defaults to `False`." +) + + +def _get_hparams_path(): + """Get hyper-parameters file path.""" + hparams_path = None + if FLAGS.output_dir: + hparams_path = os.path.join(FLAGS.output_dir, "hparams.json") + elif FLAGS.checkpoint_path: # Infer hparams.json from checkpoint path + hparams_path = os.path.join( + os.path.dirname(FLAGS.checkpoint_path), "hparams.json") + + # Check if hparams_path really exists + if hparams_path: + if tf.gfile.Exists(hparams_path): + tf.logging.info("hparams file %s exists", hparams_path) + else: + tf.logging.info("hparams file %s does not exist", hparams_path) + hparams_path = None + + # Can't find hparams_path + if not hparams_path: + tf.logging.warning( + "--output_dir not specified or file hparams.json does not exists. " + "Hyper-parameters will be infered from --hparams_set and " + "--hparams only. These may not match training time hyper-parameters.") + + return hparams_path + + +def create_estimator(run_config, hparams): + return trainer_lib.create_estimator( + FLAGS.model, + hparams, + run_config, + decode_hparams=decoding.decode_hparams(FLAGS.decode_hparams), + use_tpu=FLAGS.use_tpu, + export_saved_model_api_version=FLAGS.export_saved_model_api_version, + use_guarantee_const_getter=FLAGS.use_guarantee_const_getter) + + +def create_hparams(): + """Create hyper-parameters object.""" + return trainer_lib.create_hparams( + FLAGS.hparams_set, + FLAGS.hparams, + data_dir=os.path.expanduser(FLAGS.data_dir), + problem_name=FLAGS.problem, + hparams_path=_get_hparams_path()) + + +# TODO(michalski): Move this method into tfhub utils. +def export_module_spec_with_checkpoint(module_spec, + checkpoint_path, + export_path, + scope_prefix=""): + """Exports given checkpoint as tfhub module with given spec.""" + + # The main requirement is that it is possible to know how to map from + # module variable name to checkpoint variable name. + # This is trivial if the original code used variable scopes, + # but can be messy if the variables to export are interwined + # with variables not export. + with tf.Graph().as_default(): + m = hub.Module(module_spec) + assign_map = { + scope_prefix + name: value for name, value in m.variable_map.items() + } + tf.train.init_from_checkpoint(checkpoint_path, assign_map) + init_op = tf.initializers.global_variables() + with tf.Session() as session: + session.run(init_op) + m.export(export_path, session) + + +def export_as_tfhub_module(model_name, + hparams, + decode_hparams, + problem, + checkpoint_path, + export_dir): + """Exports the last checkpoint from the directory as tfhub module. + + It creates the Module spec and signature (based on T2T problem information), + which is later used to create and export the hub module. + Module will be saved inside the ckpt_dir. + + Args: + model_name: name of the model to be exported. + hparams: T2T parameters, model graph will be based on them. + decode_hparams: T2T parameters for decoding. + problem: the name of the problem + checkpoint_path: path to the checkpoint to be exported. + export_dir: Directory to write the exported model to. + """ + + def hub_module_fn(): + """Creates the TF graph for the hub module.""" + model_fn = t2t_model.T2TModel.make_estimator_model_fn( + model_name, + hparams, + decode_hparams=decode_hparams, + use_tpu=FLAGS.use_tpu) + features = problem.serving_input_fn( + hparams, decode_hparams, use_tpu=FLAGS.use_tpu).features + + # we must do a copy of the features, as the model_fn can add additional + # entries there (like hyperparameter settings etc). + original_features = features.copy() + spec = model_fn(features, labels=None, mode=tf_estimator.ModeKeys.PREDICT) + + hub.add_signature( + inputs=original_features, + outputs=spec.export_outputs["serving_default"].outputs) + + # TFHub doesn't support the following collections. + drop_collections = [tf.GraphKeys.LOSSES, + tf.GraphKeys.SUMMARIES, tf.GraphKeys.LOCAL_VARIABLES] + module_spec = hub.create_module_spec( + hub_module_fn, drop_collections=drop_collections) + # Loads the weights from the checkpoint using the model above + # and saves it in the export_path. + export_module_spec_with_checkpoint( + module_spec, + checkpoint_path=checkpoint_path, + export_path=export_dir, + scope_prefix="") + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + trainer_lib.set_random_seed(FLAGS.random_seed) + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + + if FLAGS.checkpoint_path: + checkpoint_path = FLAGS.checkpoint_path + ckpt_dir = os.path.dirname(checkpoint_path) + else: + ckpt_dir = os.path.expanduser(FLAGS.output_dir) + checkpoint_path = tf.train.latest_checkpoint(ckpt_dir) + + hparams = create_hparams() + hparams.no_data_parallelism = True # To clear the devices + problem = hparams.problem + decode_hparams = decoding.decode_hparams(FLAGS.decode_hparams) + + export_dir = FLAGS.export_dir or os.path.join(ckpt_dir, "export") + + if FLAGS.export_as_tfhub: + checkpoint_path = tf.train.latest_checkpoint(ckpt_dir) + export_as_tfhub_module(FLAGS.model, hparams, decode_hparams, problem, + checkpoint_path, export_dir) + return + + run_config = t2t_trainer.create_run_config(hparams) + + estimator = create_estimator(run_config, hparams) + + exporter = tf_estimator.FinalExporter( + "exporter", + lambda: problem.serving_input_fn(hparams, decode_hparams, FLAGS.use_tpu), + as_text=FLAGS.as_text) + + exporter.export( + estimator, + export_dir, + checkpoint_path=checkpoint_path, + eval_result=None, + is_the_final_export=True) + + +if __name__ == "__main__": + tf.logging.set_verbosity(tf.logging.INFO) + tf.app.run() diff --git a/tensor2tensor/serving/query.py b/tensor2tensor/serving/query.py new file mode 100644 index 000000000..f4d05ffd1 --- /dev/null +++ b/tensor2tensor/serving/query.py @@ -0,0 +1,119 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Query an exported model. Py2 only. Install tensorflow-serving-api.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from oauth2client.client import GoogleCredentials +from six.moves import input # pylint: disable=redefined-builtin + +from tensor2tensor import problems as problems_lib # pylint: disable=unused-import +from tensor2tensor.serving import serving_utils +from tensor2tensor.utils import hparam +from tensor2tensor.utils import registry +from tensor2tensor.utils import usr_dir +import tensorflow.compat.v1 as tf + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_string("server", None, "Address to Tensorflow Serving server.") +flags.DEFINE_string("servable_name", None, "Name of served model.") +flags.DEFINE_string("problem", None, "Problem name.") +flags.DEFINE_string("data_dir", None, "Data directory, for vocab files.") +flags.DEFINE_string("t2t_usr_dir", None, "Usr dir for registrations.") +flags.DEFINE_string("inputs_once", None, "Query once with this input.") +flags.DEFINE_integer("timeout_secs", 10, "Timeout for query.") + +# For Cloud ML Engine predictions. +flags.DEFINE_string("cloud_mlengine_model_name", None, + "Name of model deployed on Cloud ML Engine.") +flags.DEFINE_string( + "cloud_mlengine_model_version", None, + "Version of the model to use. If None, requests will be " + "sent to the default version.") + + +def validate_flags(): + """Validates flags are set to acceptable values.""" + if FLAGS.cloud_mlengine_model_name: + assert not FLAGS.server + assert not FLAGS.servable_name + else: + assert FLAGS.server + assert FLAGS.servable_name + + +def make_request_fn(): + """Returns a request function.""" + if FLAGS.cloud_mlengine_model_name: + request_fn = serving_utils.make_cloud_mlengine_request_fn( + credentials=GoogleCredentials.get_application_default(), + model_name=FLAGS.cloud_mlengine_model_name, + version=FLAGS.cloud_mlengine_model_version) + else: + + request_fn = serving_utils.make_grpc_request_fn( + servable_name=FLAGS.servable_name, + server=FLAGS.server, + timeout_secs=FLAGS.timeout_secs) + return request_fn + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + validate_flags() + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + problem = registry.problem(FLAGS.problem) + hparams = hparam.HParams( + data_dir=os.path.expanduser(FLAGS.data_dir)) + problem.get_hparams(hparams) + request_fn = make_request_fn() + while True: + inputs = FLAGS.inputs_once if FLAGS.inputs_once else input(">> ") + outputs = serving_utils.predict([inputs], problem, request_fn) + outputs, = outputs + output, score = outputs + if len(score.shape) > 0: # pylint: disable=g-explicit-length-test + print_str = """ +Input: +{inputs} + +Output (Scores [{score}]): +{output} + """ + score_text = ",".join(["{:.3f}".format(s) for s in score]) + print(print_str.format(inputs=inputs, output=output, score=score_text)) + else: + print_str = """ +Input: +{inputs} + +Output (Score {score:.3f}): +{output} + """ + print(print_str.format(inputs=inputs, output=output, score=score)) + + if FLAGS.inputs_once: + break + + +if __name__ == "__main__": + flags.mark_flags_as_required(["problem", "data_dir"]) + tf.app.run() diff --git a/tensor2tensor/serving/serving_utils.py b/tensor2tensor/serving/serving_utils.py new file mode 100644 index 000000000..264c51d68 --- /dev/null +++ b/tensor2tensor/serving/serving_utils.py @@ -0,0 +1,174 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for serving tensor2tensor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import base64 +import functools +from googleapiclient import discovery +import grpc +import numpy as np + +from tensor2tensor import problems as problems_lib # pylint: disable=unused-import +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.utils import cloud_mlengine as cloud +from tensor2tensor.utils import contrib +import tensorflow.compat.v1 as tf + +from tensorflow_serving.apis import predict_pb2 +from tensorflow_serving.apis import prediction_service_pb2_grpc + + + +def _make_example(input_ids, problem, input_feature_name="inputs"): + """Make a tf.train.Example for the problem. + + features[input_feature_name] = input_ids + + Also fills in any other required features with dummy values. + + Args: + input_ids: list. + problem: Problem. + input_feature_name: name of feature for input_ids. + + Returns: + tf.train.Example + """ + features = { + input_feature_name: + tf.train.Feature(int64_list=tf.train.Int64List(value=input_ids)) + } + + # Fill in dummy values for any other required features that presumably + # will not actually be used for prediction. + data_fields, _ = problem.example_reading_spec() + for fname, ftype in data_fields.items(): + if fname == input_feature_name: + continue + if not isinstance(ftype, tf.FixedLenFeature): + # Only FixedLenFeatures are required + continue + if ftype.default_value is not None: + # If there's a default value, no need to fill it in + continue + num_elements = functools.reduce(lambda acc, el: acc * el, ftype.shape, 1) + if ftype.dtype in [tf.int32, tf.int64]: + value = tf.train.Feature( + int64_list=tf.train.Int64List(value=[0] * num_elements)) + if ftype.dtype in [tf.float32, tf.float64]: + value = tf.train.Feature( + float_list=tf.train.FloatList(value=[0.] * num_elements)) + if ftype.dtype == tf.bytes: + value = tf.train.Feature( + bytes_list=tf.train.BytesList(value=[""] * num_elements)) + tf.logging.info("Adding dummy value for feature %s as it is required by " + "the Problem.", fname) + features[fname] = value + return tf.train.Example(features=tf.train.Features(feature=features)) + + +def _create_stub(server): + channel = grpc.insecure_channel(server) + return prediction_service_pb2_grpc.PredictionServiceStub(channel) + + +def _encode(inputs, encoder, add_eos=True): + input_ids = encoder.encode(inputs) + if add_eos: + input_ids.append(text_encoder.EOS_ID) + return input_ids + + +def _decode(output_ids, output_decoder): + if len(output_ids.shape) > 1: + return [output_decoder.decode(o, strip_extraneous=True) for o in output_ids] + else: + return output_decoder.decode(output_ids, strip_extraneous=True) + + + + +def make_grpc_request_fn(servable_name, server, timeout_secs): + """Wraps function to make grpc requests with runtime args.""" + stub = _create_stub(server) + + def _make_grpc_request(examples): + """Builds and sends request to TensorFlow model server.""" + request = predict_pb2.PredictRequest() + request.model_spec.name = servable_name + request.inputs["input"].CopyFrom( + tf.make_tensor_proto( + [ex.SerializeToString() for ex in examples], shape=[len(examples)])) + response = stub.Predict(request, timeout_secs) + outputs = tf.make_ndarray(response.outputs["outputs"]) + scores = tf.make_ndarray(response.outputs["scores"]) + assert len(outputs) == len(scores) + return [{ # pylint: disable=g-complex-comprehension + "outputs": output, + "scores": score + } for output, score in zip(outputs, scores)] + + return _make_grpc_request + + +def make_cloud_mlengine_request_fn(credentials, model_name, version): + """Wraps function to make CloudML Engine requests with runtime args.""" + + def _make_cloud_mlengine_request(examples): + """Builds and sends requests to Cloud ML Engine.""" + api = discovery.build("ml", "v1", credentials=credentials) + parent = "projects/%s/models/%s/versions/%s" % (cloud.default_project(), + model_name, version) + input_data = { + "instances": [{ # pylint: disable=g-complex-comprehension + "input": { + "b64": base64.b64encode(ex.SerializeToString()) + } + } for ex in examples] + } + response = api.projects().predict(body=input_data, name=parent).execute() + predictions = response["predictions"] + for prediction in predictions: + prediction["outputs"] = np.array([prediction["outputs"]]) + prediction["scores"] = np.array(prediction["scores"]) + return predictions + + return _make_cloud_mlengine_request + + +def predict(inputs_list, problem, request_fn): + """Encodes inputs, makes request to deployed TF model, and decodes outputs.""" + assert isinstance(inputs_list, list) + fname = "inputs" if problem.has_inputs else "targets" + input_encoder = problem.feature_info[fname].encoder + input_ids_list = [ + _encode(inputs, input_encoder, add_eos=problem.has_inputs) + for inputs in inputs_list + ] + examples = [_make_example(input_ids, problem, fname) + for input_ids in input_ids_list] + predictions = request_fn(examples) + output_decoder = problem.feature_info["targets"].encoder + outputs = [ + (_decode(prediction["outputs"], output_decoder), + prediction["scores"]) + for prediction in predictions + ] + return outputs diff --git a/tensor2tensor/test_data/example_usr_dir/__init__.py b/tensor2tensor/test_data/example_usr_dir/__init__.py new file mode 100644 index 000000000..334f2b12b --- /dev/null +++ b/tensor2tensor/test_data/example_usr_dir/__init__.py @@ -0,0 +1,17 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Example T2T user directory.""" +from . import my_submodule diff --git a/tensor2tensor/test_data/example_usr_dir/my_submodule.py b/tensor2tensor/test_data/example_usr_dir/my_submodule.py new file mode 100644 index 000000000..c1133c895 --- /dev/null +++ b/tensor2tensor/test_data/example_usr_dir/my_submodule.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Example registrations for T2T.""" +import re + +from tensor2tensor.data_generators import problem +from tensor2tensor.data_generators import text_problems +from tensor2tensor.layers import common_hparams +from tensor2tensor.utils import registry + +# Use register_model for a new T2TModel +# Use register_problem for a new Problem +# Use register_hparams for a new hyperparameter set + + +@registry.register_hparams +def my_very_own_hparams(): + # Start with the base set + hp = common_hparams.basic_params1() + # Modify existing hparams + hp.num_hidden_layers = 2 + # Add new hparams + hp.add_hparam("filter_size", 2048) + return hp + + +@registry.register_problem +class PoetryLines(text_problems.Text2TextProblem): + """Predict next line of poetry from the last line. From Gutenberg texts.""" + + @property + def approx_vocab_size(self): + return 2**13 # ~8k + + @property + def is_generate_per_split(self): + # generate_data will shard the data into TRAIN and EVAL for us. + return False + + @property + def dataset_splits(self): + """Splits of data to produce and number of output shards for each.""" + # 10% evaluation data + return [{ + "split": problem.DatasetSplit.TRAIN, + "shards": 9, + }, { + "split": problem.DatasetSplit.EVAL, + "shards": 1, + }] + + def generate_samples(self, data_dir, tmp_dir, dataset_split): + del data_dir + del tmp_dir + del dataset_split + + # pylint: disable=g-import-not-at-top + from gutenberg import acquire + from gutenberg import cleanup + # pylint: enable=g-import-not-at-top + + books = [ + # bookid, skip N lines + (19221, 223), + (15553, 522), + ] + + for (book_id, toskip) in books: + text = cleanup.strip_headers(acquire.load_etext(book_id)).strip() + lines = text.split("\n")[toskip:] + prev_line = None + ex_count = 0 + for line in lines: + # Any line that is all upper case is a title or author name + if not line or line.upper() == line: + prev_line = None + continue + + line = re.sub("[^a-z]+", " ", line.strip().lower()) + if prev_line and line: + yield { + "inputs": prev_line, + "targets": line, + } + ex_count += 1 + prev_line = line diff --git a/tensor2tensor/test_data/example_usr_dir/requirements.txt b/tensor2tensor/test_data/example_usr_dir/requirements.txt new file mode 100644 index 000000000..3678319be --- /dev/null +++ b/tensor2tensor/test_data/example_usr_dir/requirements.txt @@ -0,0 +1 @@ +gutenberg diff --git a/tensor2tensor/test_data/transformer_test_ckpt/checkpoint b/tensor2tensor/test_data/transformer_test_ckpt/checkpoint new file mode 100644 index 000000000..9877cc425 --- /dev/null +++ b/tensor2tensor/test_data/transformer_test_ckpt/checkpoint @@ -0,0 +1,2 @@ +model_checkpoint_path: "model.ckpt-1" +all_model_checkpoint_paths: "model.ckpt-1" diff --git a/tensor2tensor/test_data/transformer_test_ckpt/flags.txt b/tensor2tensor/test_data/transformer_test_ckpt/flags.txt new file mode 100644 index 000000000..2ecee8328 --- /dev/null +++ b/tensor2tensor/test_data/transformer_test_ckpt/flags.txt @@ -0,0 +1,49 @@ +--eval_steps=1 +--hparams_range= +--t2t_usr_dir= +--enable_graph_rewriter=False +--sync=False +--eval_run_autoregressive=False +--eval_use_test_set=False +--worker_id=0 +--eval_early_stopping_metric_minimize=True +--worker_replicas=1 +--random_seed=1234 +--worker_gpu_memory_fraction=0.95 +--train_steps=1 +--iterations_per_loop=1000 +--registry_help=False +--worker_gpu=1 +--keep_checkpoint_max=20 +--save_checkpoints_secs=0 +--gpu_order= +--master= +--generate_data=False +--local_eval_frequency=2000 +--export_saved_model=False +--eval_early_stopping_steps=None +--output_dir=/tmp/oss_train +--profile=False +--ps_job=/job:ps +--tmp_dir=/tmp/t2t_datagen +--schedule=continuous_train_and_eval +--problem=translate_ende_wmt8k +--hparams= +--use_tpu=False +--eval_early_stopping_metric_delta=0.1 +--ps_gpu=0 +--keep_checkpoint_every_n_hours=10000 +--decode_hparams= +--tfdbg=False +--data_dir=~/t2t/data +--ps_replicas=0 +--eval_early_stopping_metric=loss +--log_device_placement=False +--hparams_set=transformer_test +--dbgprofile=False +--timit_paths= +--tpu_num_shards=8 +--locally_shard_to_cpu=False +--worker_job=/job:localhost +--model=transformer +--parsing_path= diff --git a/tensor2tensor/test_data/transformer_test_ckpt/hparams.json b/tensor2tensor/test_data/transformer_test_ckpt/hparams.json new file mode 100644 index 000000000..1a6a97223 --- /dev/null +++ b/tensor2tensor/test_data/transformer_test_ckpt/hparams.json @@ -0,0 +1 @@ +{"daisy_chain_variables": true, "optimizer_adam_beta1": 0.9, "scheduled_sampling_prob": 0.0, "num_hidden_layers": 2, "moe_loss_coef": 0.01, "max_target_seq_length": 0, "clip_grad_norm": 0.0, "pos": "timing", "scheduled_sampling_gold_mixin_prob": 0.5, "initializer": "uniform_unit_scaling", "grad_noise_scale": 0.0, "optimizer_momentum_momentum": 0.9, "nbr_decoder_problems": 1, "attention_key_channels": 0, "eval_drop_long_sequences": false, "learning_rate_cosine_cycle_steps": 250000, "prepend_mode": "none", "weight_decay": 0.0, "symbol_modality_skip_top": false, "weight_noise": 0.0, "target_modality": "default", "attention_dropout": 0.1, "parameter_attention_value_channels": 0, "factored_logits": false, "relu_dropout": 0.1, "no_data_parallelism": false, "layer_preprocess_sequence": "n", "sampling_method": "argmax", "learning_rate": 0.2, "num_heads": 2, "max_length": 256, "summarize_grads": false, "attention_value_channels": 0, "num_encoder_layers": 0, "label_smoothing": 0.1, "use_fixed_batch_size": false, "optimizer": "adam", "moe_k": 2, "self_attention_type": "dot_product", "learning_rate_decay_scheme": "noam", "sampling_temp": 1.0, "kernel_height": 3, "use_pad_remover": true, "batch_size": 4096, "max_relative_position": 0, "force_full_predict": false, "min_length_bucket": 8, "layer_prepostprocess_dropout": 0.1, "eval_run_autoregressive": false, "shared_embedding_and_softmax_weights": true, "symbol_modality_num_shards": 16, "dropout": 0.2, "compress_steps": 0, "parameter_attention_key_channels": 0, "length_bucket_step": 1.1, "kernel_width": 1, "hidden_size": 16, "num_decoder_layers": 0, "input_modalities": "default", "filter_size": 8, "optimizer_adam_beta2": 0.98, "scheduled_sampling_warmup_steps": 50000, "norm_type": "layer", "min_length": 0, "moe_num_experts": 64, "multiply_embedding_mode": "sqrt_depth", "max_input_seq_length": 0, "learning_rate_warmup_steps": 8000, "proximity_bias": false, "ffn_layer": "dense_relu_dense", "initializer_gain": 1.0, "layer_postprocess_sequence": "da", "moe_hidden_sizes": "2048", "optimizer_adam_epsilon": 1e-09, "norm_epsilon": 1e-06} diff --git a/tensor2tensor/test_data/transformer_test_ckpt/model.ckpt-1.data-00000-of-00002 b/tensor2tensor/test_data/transformer_test_ckpt/model.ckpt-1.data-00000-of-00002 new file mode 100644 index 000000000..119e367a3 Binary files /dev/null and b/tensor2tensor/test_data/transformer_test_ckpt/model.ckpt-1.data-00000-of-00002 differ diff --git a/tensor2tensor/test_data/transformer_test_ckpt/model.ckpt-1.data-00001-of-00002 b/tensor2tensor/test_data/transformer_test_ckpt/model.ckpt-1.data-00001-of-00002 new file mode 100644 index 000000000..7d3868985 Binary files /dev/null and b/tensor2tensor/test_data/transformer_test_ckpt/model.ckpt-1.data-00001-of-00002 differ diff --git a/tensor2tensor/test_data/transformer_test_ckpt/model.ckpt-1.index b/tensor2tensor/test_data/transformer_test_ckpt/model.ckpt-1.index new file mode 100644 index 000000000..f24748e8a Binary files /dev/null and b/tensor2tensor/test_data/transformer_test_ckpt/model.ckpt-1.index differ diff --git a/tensor2tensor/test_data/transformer_test_ckpt/model.ckpt-1.meta b/tensor2tensor/test_data/transformer_test_ckpt/model.ckpt-1.meta new file mode 100644 index 000000000..fef601895 Binary files /dev/null and b/tensor2tensor/test_data/transformer_test_ckpt/model.ckpt-1.meta differ diff --git a/tensor2tensor/test_data/vocab.translate_ende_wmt32k.32768.subwords b/tensor2tensor/test_data/vocab.translate_ende_wmt32k.32768.subwords new file mode 100644 index 000000000..8036581fe --- /dev/null +++ b/tensor2tensor/test_data/vocab.translate_ende_wmt32k.32768.subwords @@ -0,0 +1,33708 @@ +'' +'' +', _' +'._' +'the_' +'_' +'in_' +'of_' +'and_' +'to_' +'die_' +'der_' +'und_' +'a_' +'s_' +'-_' +'is_' +'that_' +'zu_' +'for_' +'den_' +'von_' +'n_' +'on_' +'ist_' +'an_' +'für_' +'. _' +'be_' +'The_' +'with_' +'en_' +'es_' +'are_' +'das_' +'as_' +'e_' +'des_' +'auf_' +'mit_' +'it_' +'eine_' +'dass_' +'nicht_' +'I_' +'im_' +'not_' +'have_' +'by_' +'this_' +' (_' +' – _' +'sich_' +'was_' +'ein_' +'werden_' +'Die_' +'will_' +'from_' +'we_' +'dem_' +'’_' +'t_' +': _' +'at_' +'or_' +'Sie_' +'which_' +'has_' +'er_' +'als_' +'auch_' +'you_' +'wir_' +'r_' +'In_' +'um_' +'sind_' +'wird_' +') _' +'so_' +'can_' +'sie_' +'ing_' +'all_' +''_' +' - _' +'einer_' +'hat_' +'wie_' +'also_' +'their_' +'European_' +'haben_' +'d_' +'would_' +'ed_' +'oder_' +'its_' +'more_' +'über_' +'but_' +'?_' +'einen_' +'ich_' +'y_' +'zur_' +'our_' +'they_' +'aus_' +'bei_' +'Das_' +'one_' +'been_' +'; _' +'nur_' +'Union_' +'should_' +'It_' +'EU_' +'einem_' +'/_' +'nach_' +'durch_' +'This_' +'können_' +'diese_' +'ung_' +'other_' +'zum_' +'noch_' +'only_' +'there_' +' , _' +'do_' +'am_' +'de_' +'countries_' +'1_' +'kann_' +'dieser_' +'war_' +'than_' +'We_' +'new_' +'o_' +'your_' +'Europe_' +'Der_' +'must_' +'Mr_' +'no_' +'vor_' +'were_' +'2_' +'like_' +'wenn_' +'man_' +'US_' +'Ich_' +'wurde_' +'- _' +'about_' +' "_' +'us_' +'President_' +'m_' +'time_' +'Es_' +'these_' +'if_' +'aber_' +'te_' +'sein_' +'who_' +'up_' +'very_' +'Hotel_' +'world_' +' ._' +'uns_' +'Commission_' +'when_' +'such_' +'A_' +'But_' +'Wir_' +'people_' +'müssen_' +' “_' +'into_' +'ten_' +'ng_' +'China_' +'out_' +'3_' +'mehr_' +'ihre_' +'his_' +'5_' +'now_' +'most_' +'some_' +'what_' +'sehr_' +'Kommission_' +'many_' +'!_' +'i_' +')._' +'l_' +'he_' +'any_' +'% _' +'had_' +' „_' +'States_' +'them_' +',_' +'eines_' +'4_' +'well_' +'Herr_' +'), _' +'" _' +'economic_' +'diesem_' +'need_' +'unter_' +'years_' +'political_' +'between_' +'ly_' +'zwischen_' +'first_' +'hotel_' +'alle_' +'even_' +'policy_' +'make_' +'bis_' +'two_' +'muss_' +'could_' +'over_' +'anderen_' +'use_' +'Parliament_' +'keine_' +'my_' +'work_' +'may_' +'way_' +'important_' +'Council_' +'gegen_' +'report_' +'Präsident_' +'0_' +'system_' +'Europäischen_' +'Europa_' +'gibt_' +'because_' +'If_' +'those_' +'just_' +'support_' +'vom_' +'seine_' +'sowie_' +'k_' +'country_' +'year_' +'much_' +'Wenn_' +'dieses_' +'after_' +'government_' +'Member_' +'al_' +'made_' +' _' +'ungen_' +'able_' +'take_' +'h_' +'möchte_' +'market_' +'being_' +'immer_' +'“ _' +'” _' +'public_' +'own_' +'long_' +'' _' +'Welt_' +'dies_' +'sondern_' +'Zeit_' +'Menschen_' +'Jahren_' +'international_' +'(_' +'where_' +'right_' +'good_' +'financial_' +'how_' +'ihrer_' +'da_' +'diesen_' +'USA_' +'wurden_' +'andere_' +'For_' +':_' +'g_' +'As_' +'Diese_' +'Jahr_' +'both_' +'information_' +'against_' +'Länder_' +'part_' +'same_' +'last_' +'Bericht_' +'unsere_' +'global_' +'dann_' +'z_' +'through_' +'würde_' +'re_' +'6_' +'then_' +'sollte_' +'There_' +'jedoch_' +'hier_' +'high_' +'does_' +'end_' +'damit_' +'Im_' +'10_' +'seiner_' +'heute_' +'S_' +'United_' +'Und_' +'under_' +'social_' +'too_' +'ion_' +'7_' +'national_' +'order_' +'growth_' +'8_' +'example_' +'still_' +'see_' +'me_' +'le_' +'neue_' +'ation_' +'ohne_' +'free_' +'europäischen_' +'Parlament_' +'Land_' +'number_' +'That_' +'Mitgliedstaaten_' +'rights_' +'place_' +'könnte_' +'development_' +'area_' +'And_' +'within_' +'power_' +'her_' +'course_' +'room_' +'point_' +'fact_' +'before_' +'bereits_' +'used_' +'Frage_' +'’ _' +'neuen_' +'while_' +'denen_' +'far_' +'possible_' +'Entwicklung_' +'Ein_' +'20_' +'selbst_' +'wieder_' +'economy_' +' [[_' +'want_' +'la_' +'future_' +'sollten_' +'You_' +'zwei_' +'dazu_' +'Europäische_' +'say_' +'Regierung_' +'great_' +'already_' +' | _' +'without_' +'c_' +'set_' +'Aber_' +'less_' +'9_' +'large_' +'während_' +'human_' +'here_' +'! _' +'weil_' +'today_' +'jetzt_' +'ihren_' +'30_' +'ihr_' +'So_' +'view_' +'se_' +'machen_' +'wäre_' +'therefore_' +'cannot_' +'believe_' +'problem_' +'liegt_' +'wo_' +'since_' +'go_' +'Lage_' +' '_' +'u_' +'crisis_' +'three_' +'state_' +'per_' +'find_' +'few_' +'down_' +'America_' +'et_' +'Ländern_' +'viele_' +'services_' +'000_' +'..._' +'st_' +'process_' +'issue_' +'help_' +'Unternehmen_' +'trade_' +'including_' +'available_' +'15_' +'level_' +'case_' +'Maßnahmen_' +'viel_' +'know_' +'geht_' +'einige_' +'Eine_' +'means_' +'darauf_' +'denn_' +'dafür_' +'00_' +'next_' +'mich_' +'different_' +'Jahre_' +'seit_' +'city_' +'change_' +'areas_' +'rs_' +'real_' +'get_' +'problems_' +'When_' +'Staaten_' +'health_' +'el_' +'Politik_' +'E_' +'C_' +'   _' +'making_' +'best_' +'Mit_' +'Ihnen_' +'however_' +'clear_' +'better_' +'allem_' +'politischen_' +'lassen_' +'finden_' +'x_' +'why_' +'habe_' +'gut_' +'Frau_' +'ers_' +'Dies_' +'B_' +'ne_' +'D_' +'energy_' +'during_' +'become_' +'allen_' +'They_' +'service_' +'access_' +'waren_' +'ganz_' +'Japan_' +'2009_' +'unserer_' +'etwas_' +'daß_' +'Committee_' +'current_' +'wollen_' +'question_' +'doch_' +'stellen_' +'politische_' +'com_' +'back_' +'To_' +'yang_' +'particular_' +'small_' +'ob_' +'did_' +'day_' +'Rat_' +'think_' +'These_' +'Israel_' +'based_' +'bar_' +'ve_' +'interest_' +'debate_' +'common_' +'beim_' +'Doch_' +'Commissioner_' +'letzten_' +'each_' +'again_' +'bietet_' +'Germany_' +'At_' +'security_' +'nun_' +'measures_' +'business_' +'Zimmer_' +'situation_' +'schon_' +'put_' +'offers_' +'p_' +'markets_' +'Wie_' +'ts_' +'tion_' +'taken_' +'seinen_' +'military_' +'major_' +'Ihre_' +'ge_' +'come_' +'another_' +'Bereich_' +'Arbeit_' +'stay_' +'Iran_' +'rate_' +'provide_' +'Ende_' +'tun_' +'result_' +'rather_' +'law_' +'continue_' +'citizens_' +'ce_' +'50_' +'said_' +'action_' +'page_' +'might_' +'House_' +'term_' +'American_' +'2008_' +'worden_' +'recent_' +'given_' +'Unterstützung_' +'Internet_' +'Euro_' +'whether_' +'ies_' +'every_' +'Probleme_' +'|_' +'ment_' +'give_' +'einfach_' +'2000_' +'steht_' +'rooms_' +'governments_' +'ersten_' +'debt_' +'called_' +'Teil_' +'Auch_' +'besteht_' +'Problem_' +'sten_' +'proposal_' +'needs_' +'gen_' +'budget_' +'Bank_' +'12_' +'private_' +'ns_' +'Herrn_' +'weniger_' +'lich_' +'institutions_' +'full_' +'erhalten_' +'On_' +'New_' +'Fall_' +'11_' +'non_' +'P_' +'vote_' +'terms_' +'os_' +'issues_' +'f_' +'data_' +'away_' +'Nach_' +'sehen_' +'little_' +'line_' +'least_' +'further_' +'around_' +'always_' +'Ziel_' +'Sicherheit_' +'Als_' +'que_' +'million_' +'großen_' +'du_' +'ch_' +'France_' +'ter_' +'open_' +'eigenen_' +'Russia_' +'Rolle_' +'systems_' +'life_' +'insbesondere_' +'hatte_' +'group_' +'geben_' +'close_' +'World_' +'sagen_' +'hope_' +'With_' +'Art_' +'All_' +'zwar_' +'using_' +'quality_' +'policies_' +'main_' +'legal_' +'ern_' +'deren_' +'ons_' +'off_' +'located_' +'ab_' +'System_' +'Bürger_' +'money_' +'mir_' +'kommen_' +'bin_' +'age_' +'What_' +'Namen_' +'risk_' +'investment_' +'foreign_' +'drei_' +'ble_' +'allerdings_' +'Zusammenarbeit_' +'Wirtschaft_' +'He_' +'Bedeutung_' +'2005_' +'role_' +'position_' +'große_' +']] _' +'Was_' +'Informationen_' +'würden_' +'women_' +'möglich_' +'form_' +'certain_' +'T_' +'2006_' +'etwa_' +'einmal_' +'done_' +'German_' +'weiter_' +'sector_' +'agreement_' +'2007_' +'rules_' +'increase_' +'ihnen_' +'dan_' +'companies_' +'basis_' +'b_' +'unseren_' +'particularly_' +'members_' +'local_' +'führen_' +'di_' +'create_' +'century_' +'öffentlichen_' +'pro_' +'old_' +'necessary_' +'erreichen_' +'Er_' +'Deutschland_' +'Bei_' +'.”_' +'start_' +'second_' +'once_' +'davon_' +'ago_' +'O_' +'yet_' +'together_' +'ry_' +'going_' +'soll_' +'others_' +'könnten_' +'dort_' +'staff_' +'\u_' +'Für_' +' & _' +'stehen_' +'needed_' +'location_' +'ensure_' +'dabei_' +'conditions_' +'Millionen_' +'Community_' +'Auf_' +'2001_' +'weitere_' +'though_' +'rn_' +'often_' +'key_' +'ins_' +'alles_' +'short_' +'price_' +'meisten_' +'him_' +'fully_' +'especially_' +'September_' +'Fragen_' +'rates_' +'control_' +'central_' +'Weise_' +'Weg_' +'One_' +'low_' +'kein_' +'itself_' +'greater_' +'gegenüber_' +'capital_' +'Rahmen_' +'nt_' +'modern_' +'decision_' +'aller_' +' _' +'sogar_' +'cooperation_' +'Recht_' +'However_' +'innerhalb_' +'costs_' +'Vorschlag_' +'reform_' +'led_' +'food_' +'Zukunft_' +'Wachstum_' +'Stadt_' +'Indeed_' +'2004_' +' $_' +'wissen_' +'third_' +'region_' +'account_' +' ..._' +'leaders_' +'among_' +'Seite_' +'M_' +'Beispiel_' +'whole_' +'tax_' +'protection_' +'present_' +'ic_' +'early_' +'aid_' +'Um_' +'Europas_' +'100_' +'vielen_' +'schen_' +'name_' +'meine_' +'ischen_' +'income_' +'following_' +'excellent_' +'darüber_' +'children_' +'banks_' +'25_' +'welche_' +'several_' +'reason_' +'nen_' +'having_' +'UN_' +'Seiten_' +'weit_' +'progress_' +'centre_' +'Chinese_' +'wirklich_' +'really_' +'möchten_' +'macht_' +'kommt_' +'ja_' +'europäische_' +'value_' +'resources_' +'environment_' +'bringen_' +'bieten_' +'Region_' +'Möglichkeit_' +'16_' +'water_' +'something_' +'left_' +'internationalen_' +'home_' +'democratic_' +'Zusammenhang_' +'Grund_' +'Frauen_' +'2003_' +'sowohl_' +'include_' +'democracy_' +'daher_' +'weiterhin_' +'stellt_' +'states_' +'production_' +'kan_' +'fast_' +'Bush_' +'wirtschaftlichen_' +'single_' +'shall_' +'nden_' +'longer_' +'efforts_' +'Mrs_' +'India_' +'working_' +'until_' +'ty_' +'poor_' +'matter_' +'land_' +'force_' +'chen_' +'French_' +'society_' +'indem_' +'2002_' +'unterstützen_' +'regard_' +'offer_' +'nd_' +'nationalen_' +'list_' +'likely_' +'halten_' +'No_' +'Group_' +'Central_' +'period_' +'never_' +'meiner_' +'gentlemen_' +'entfernt_' +'difficult_' +'beiden_' +'Kinder_' +'18_' +'un_' +'prices_' +'look_' +'fiscal_' +'besonders_' +'ar_' +'approach_' +'Parlaments_' +'Mittel_' +'views_' +'verschiedenen_' +'standards_' +'results_' +'respect_' +'resolution_' +'research_' +'les_' +'industry_' +'developing_' +'cost_' +'L_' +'Iraq_' +'International_' +'G_' +'Dollar_' +' % _' +'wichtig_' +'special_' +'member_' +'hand_' +'created_' +'U_' +'N_' +'Investitionen_' +'40_' +'wirtschaftliche_' +'show_' +'nehmen_' +'management_' +'interests_' +'enough_' +'breakfast_' +'State_' +'24_' +'wenig_' +'proposals_' +'parties_' +'nichts_' +'ihm_' +'experience_' +'etc_' +'almost_' +'Vereinigten_' +'R_' +'Obama_' +'Leben_' +' ' +'transport_' +'taking_' +'remain_' +'programme_' +'play_' +'near_' +'general_' +'family_' +'erste_' +'aufgrund_' +'address_' +'Ziele_' +'Fraktion_' +'Daten_' +'side_' +'ren_' +'products_' +'men_' +'ihn_' +'history_' +'hinaus_' +'easy_' +'billion_' +'Service_' +'Richtlinie_' +'Krise_' +'Grundlage_' +'thus_' +'things_' +'site_' +'sei_' +'questions_' +'ous_' +'nuclear_' +'known_' +'ierung_' +'feel_' +'call_' +'building_' +'Thema_' +'Russland_' +'Regierungen_' +'Kollegen_' +'towards_' +'seinem_' +'ner_' +'iert_' +'ieren_' +'hin_' +'dessen_' +'befindet_' +'Verfügung_' +'Landes_' +'An_' +'wish_' +'technology_' +'rt_' +'proposed_' +'natürlich_' +'keinen_' +'demand_' +'darin_' +'Lösung_' +'Kommissar_' +'F_' +'14_' +'workers_' +'themselves_' +'solution_' +'projects_' +'pay_' +'months_' +'ity_' +'Programm_' +'Macht_' +'version_' +'regional_' +'program_' +'past_' +'ness_' +'lichen_' +'ische_' +'hard_' +'ever_' +'eren_' +'ask_' +'adopted_' +'Gemeinschaft_' +'East_' +'By_' +'60_' +'strategy_' +'points_' +'personal_' +'lead_' +'ive_' +'ihrem_' +'ia_' +'higher_' +'gute_' +'directive_' +'cultural_' +'beispielsweise_' +'agree_' +'Man_' +'welcome_' +'ted_' +'software_' +'sicher_' +'serious_' +'sche_' +'liche_' +'levels_' +'gilt_' +'gehen_' +'found_' +'either_' +'effective_' +'education_' +'above_' +'Situation_' +'After_' +'19_' +'vielleicht_' +'various_' +'specific_' +'schnell_' +'schaffen_' +'related_' +'freedom_' +'deal_' +'che_' +'besser_' +'bedeutet_' +'South_' +'Institutionen_' +'21_' +'rapporteur_' +'ls_' +'enjoy_' +'economies_' +'ebenfalls_' +'direkt_' +'bleiben_' +'big_' +'authorities_' +'allow_' +'November_' +'Meinung_' +'Even_' +'Ansicht_' +'. _' +'à_' +'user_' +'seen_' +'remains_' +'reforms_' +'ra_' +'provided_' +'plan_' +'opportunity_' +'ladies_' +'keit_' +'impact_' +'groups_' +'framework_' +'ens_' +'comes_' +'Schutz_' +'th_' +'strong_' +'simply_' +'significant_' +'quite_' +'let_' +'leading_' +'language_' +'em_' +'concerns_' +'climate_' +'behalf_' +'X_' +'Restaurant_' +'Kosten_' +'Dieser_' +'Da_' +'Africa_' +'town_' +'ting_' +'party_' +'pages_' +'makes_' +'globalen_' +'due_' +'Our_' +'Hilfe_' +'Demokratie_' +'2010_' +'upon_' +'seems_' +'relations_' +'peace_' +'online_' +'oil_' +'forward_' +'effect_' +'W_' +'Treaty_' +'Of_' +'Menschenrechte_' +'Form_' +'zurück_' +'true_' +'total_' +'subject_' +'safety_' +'later_' +'jeder_' +'growing_' +'face_' +'appropriate_' +'amendments_' +'West_' +'Minister_' +'Geschichte_' +'Alle_' +';_' +'young_' +'top_' +'solche_' +'she_' +'recently_' +'kind_' +'internationale_' +'individual_' +'ig_' +'euro_' +'environmental_' +'ebenso_' +'daran_' +'concerned_' +'Ukraine_' +'Strategie_' +'Madam_' +'Gesellschaft_' +'GDP_' +'Dieses_' +'w_' +'spending_' +'share_' +'positive_' +'opinion_' +'ma_' +'light_' +'führt_' +'eurozone_' +'ert_' +'community_' +'care_' +'attention_' +'Some_' +'Rates_' +'Milliarden_' +'EUR_' +'sure_' +'success_' +'son_' +'sollen_' +'sen_' +'outside_' +'km_' +'improve_' +'huge_' +'half_' +'genau_' +'funds_' +'four_' +'content_' +'changes_' +'addition_' +'Western_' +'é_' +'words_' +'traditional_' +'text_' +'station_' +'setzen_' +'rise_' +'nächsten_' +'nutzen_' +'nothing_' +'ions_' +'idea_' +'gab_' +'fundamental_' +'bekannt_' +'V_' +'T' +'Krieg_' +'Italy_' +'Beziehungen_' +'stand_' +'legislation_' +'ging_' +'field_' +'currently_' +'art_' +'Turkey_' +'Paris_' +'Mitglieder_' +'London_' +'Ihr_' +'Hotels_' +'Geld_' +'Frankreich_' +'Bevölkerung_' +'v_' +'times_' +'thank_' +'sense_' +'restaurant_' +'required_' +'population_' +'person_' +'house_' +'held_' +'heart_' +'gemacht_' +'fall_' +'developed_' +'deutlich_' +'bzw_' +'amerikanischen_' +'Während_' +'Members_' +'British_' +'"_' +' . _' +'ur_' +'minutes_' +'living_' +'inflation_' +'ie_' +'forces_' +'facilities_' +'arbeiten_' +'actually_' +'Zugang_' +'UK_' +'Moreover_' +'M' +'Dienstleistungen_' +'Bedingungen_' +'Auswirkungen_' +'Aus_' +'"._' +'sea_' +'run_' +'product_' +'principle_' +'nämlich_' +'model_' +'majority_' +'ll_' +'jobs_' +'hätte_' +'hours_' +'gesagt_' +'erreicht_' +'date_' +'certainly_' +'alten_' +'across_' +'statt_' +'sch_' +'reduce_' +'potential_' +'los_' +'indeed_' +'hatten_' +'game_' +'exchange_' +'employment_' +'einigen_' +'deshalb_' +'days_' +'cases_' +'benefits_' +'Windows_' +'Schritt_' +'National_' +'Markt_' +'Greece_' +'Bezug_' +'1999_' +')_' +'wichtige_' +'whose_' +'via_' +'unterstützt_' +'unserem_' +'tes_' +'stability_' +'similar_' +'range_' +'keyword_' +'igen_' +'directly_' +'consider_' +'company_' +'beautiful_' +'along_' +'air_' +'Umsetzung_' +'Interesse_' +'Erfolg_' +'Entscheidung_' +'Asia_' +',” _' +'si_' +'self_' +'regulation_' +'pool_' +'parts_' +'natural_' +'media_' +'gerade_' +'five_' +'enden_' +'elections_' +'ca_' +'application_' +'anti_' +'TV_' +'Office_' +'Liste_' +'Ihrer_' +'Haus_' +'Berlin_' +'understand_' +'lot_' +'lediglich_' +'job_' +'ende_' +'emerging_' +'War_' +'Tatsache_' +'Personen_' +'April_' +' : _' +'size_' +'prevent_' +'opportunities_' +'nde_' +'na_' +'mean_' +'lange_' +'involved_' +'cy_' +'conflict_' +'co_' +'capacity_' +'bring_' +'bleibt_' +'bed_' +'achieve_' +'While_' +'Tag_' +'Nähe_' +'January_' +'Ergebnis_' +'Ebene_' +'Debatte_' +'&_' +'wide_' +'thing_' +'sprechen_' +'rule_' +'regions_' +'project_' +'meeting_' +'handelt_' +'eher_' +'complete_' +'body_' +'Deshalb_' +'13_' +'violence_' +'verwendet_' +'verhindern_' +'table_' +'ss_' +'response_' +'politik_' +'je_' +'heißt_' +'ger_' +'focus_' +'dürfen_' +'competition_' +'clearly_' +'check_' +'car_' +'ben_' +'ant_' +'add_' +'Verantwortung_' +'Präsidenten_' +'La_' +'Jahres_' +'Interessen_' +'Government_' +'Afghanistan_' +'17_' +' ( _' +'unser_' +'scheint_' +'move_' +'lässt_' +'jeden_' +'increasingly_' +'image_' +'großer_' +'gleichzeitig_' +'gemeinsame_' +'features_' +'existing_' +'everything_' +'event_' +'ent_' +'ds_' +'computer_' +'clean_' +'civil_' +'bereit_' +'amount_' +'administration_' +'Park_' +'Irak_' +'Gewalt_' +'ways_' +'ste_' +'sozialen_' +'provides_' +'popular_' +'obwohl_' +'nie_' +'negotiations_' +'nature_' +'fight_' +'direct_' +'del_' +'culture_' +'center_' +'came_' +'Z' +'Kampf_' +'II_' +'G' +'23_' +'verfügt_' +'training_' +'six_' +'road_' +'night_' +'network_' +'live_' +'internal_' +'instead_' +'file_' +'decisions_' +'betrifft_' +'balance_' +'ary_' +'Türkei_' +'North_' +'Hier_' +'Bereichen_' +'Banken_' +'Aussprache_' +' -_' +'ya_' +'space_' +'meet_' +'keep_' +'extremely_' +'effects_' +'ck_' +'chinesischen_' +'below_' +'activities_' +'Zu_' +'San_' +'Partei_' +'Möglichkeiten_' +'I' +'Einsatz_' +'BIP_' +'Allerdings_' +'", _' +' ''_' +'weiß_' +'weiteren_' +'week_' +'wahrscheinlich_' +'values_' +'unternehmen_' +'simple_' +'return_' +'rest_' +'perhaps_' +'notwendig_' +'net_' +'infrastructure_' +'increased_' +'included_' +'il_' +'contains_' +'commitment_' +'besten_' +'Staat_' +'Spain_' +'Richtung_' +'Ort_' +'Booking_' +'   – _' +'vergangenen_' +'turn_' +'try_' +'tragen_' +'toward_' +'took_' +'tatsächlich_' +'step_' +'reviews_' +'responsible_' +'poverty_' +'negara_' +'möglicherweise_' +'late_' +'importance_' +'ideal_' +'hätten_' +'hohen_' +'former_' +'favour_' +'essential_' +'doing_' +'design_' +'customers_' +'currency_' +'] _' +'Software_' +'80_' +'“, _' +'zusammen_' +'ziehen_' +'wären_' +'spielen_' +'soziale_' +'performance_' +'oft_' +'moment_' +'lack_' +'kleinen_' +'klar_' +'fünf_' +'erst_' +'derzeit_' +'dar_' +'brauchen_' +'befinden_' +'beach_' +'ally_' +'Wert_' +'Party_' +'Großbritannien_' +'Grenzen_' +'Chinas_' +' ‘_' +'wichtigen_' +'weltweit_' +'wegen_' +'responsibility_' +'require_' +'reasons_' +'ngen_' +'negative_' +'liegen_' +'integration_' +'ings_' +'ian_' +'hen_' +'größten_' +'geführt_' +'external_' +'develop_' +'credit_' +'bank_' +'York_' +'Vor_' +'Today_' +'Preis_' +'Außerdem_' +'-, _' +'ta_' +'successful_' +'red_' +'protect_' +'president_' +'places_' +'largest_' +'implementation_' +'heit_' +'friendly_' +'double_' +'decades_' +'darf_' +'challenges_' +'Von_' +'Tatsächlich_' +'S' +'O' +'My_' +'Aufgabe_' +'Am_' +'ure_' +'sub_' +'stark_' +'soon_' +'rich_' +'pressure_' +'option_' +'neu_' +'jedes_' +'ien_' +'glaube_' +'events_' +'established_' +'despite_' +'comfortable_' +'cause_' +'built_' +'board_' +'benefit_' +'although_' +'Wahl_' +'Verfahren_' +'Regionen_' +'May_' +'Mal_' +'K_' +'Ihren_' +'Enterprise_' +'Britain_' +'Behörden_' +'Ausschuss_' +'Amerika_' +'ändern_' +'verschiedene_' +'takes_' +'strategic_' +'steps_' +'status_' +'len_' +'labor_' +'guarantee_' +'gehört_' +'einzelnen_' +'designed_' +'ces_' +'card_' +'cal_' +'behind_' +'agenda_' +'Website_' +'Verbindung_' +'Russian_' +'Rechte_' +'Presidency_' +'Initiative_' +'F' +'Court_' +'City_' +'Abstimmung_' +'�_' +'wichtigsten_' +'walk_' +'video_' +'type_' +'terrorism_' +'stop_' +'standard_' +'schließlich_' +'risks_' +'regime_' +'post_' +'nya_' +'nor_' +'internet_' +'gehören_' +'ermöglichen_' +'bisher_' +'beginning_' +'became_' +'Zum_' +'Verfassung_' +'Uhr_' +'Spanien_' +'Platz_' +'First_' +'Fed_' +'D' +'Bemühungen_' +'Armut_' +'. - (_' +'zeit_' +'zeigen_' +'wohl_' +'visit_' +'verbunden_' +'threat_' +'thought_' +'receive_' +'reach_' +'probably_' +'practice_' +'official_' +'nice_' +'mal_' +'lower_' +'looking_' +'ler_' +'gemeinsam_' +'gar_' +'findet_' +'fen_' +'complex_' +'committee_' +'closed_' +'angesichts_' +'ability_' +'Zahl_' +'Yet_' +'Vorschläge_' +'Version_' +'Verhandlungen_' +'Politiker_' +'More_' +'Mehrheit_' +'How_' +'Führung_' +'Eurozone_' +'Ergebnisse_' +'Bar_' +'B' +'Ansatz_' +'70_' +'200_' +'ze_' +'wobei_' +'study_' +'started_' +'rund_' +'reality_' +'purpose_' +'programs_' +'plans_' +'music_' +'monetary_' +'limited_' +'konnte_' +'ke_' +'isch_' +'highly_' +'guests_' +'falls_' +'enable_' +'confidence_' +'bad_' +'according_' +'accept_' +'V' +'Reformen_' +'Prozess_' +'Nationen_' +'NATO_' +'Kunden_' +'K' +'Indien_' +'Handel_' +'From_' +'Druck_' +'Dabei_' +'Antwort_' +'..." _' +' / _' +'vier_' +'sustainable_' +'style_' +'shown_' +'raise_' +'previous_' +'matters_' +'lives_' +'ken_' +'industrial_' +'helfen_' +'creating_' +'context_' +'consumers_' +'consequences_' +'con_' +'basic_' +'answer_' +'Prozent_' +'Only_' +'June_' +'English_' +'Development_' +', “_' +'zehn_' +'werde_' +'unique_' +'ton_' +'setting_' +'seines_' +'presented_' +'ors_' +'lost_' +'konnten_' +'knowledge_' +'ihres_' +'gegeben_' +'gebracht_' +'gas_' +'erforderlich_' +'effort_' +'creation_' +'cht_' +'choose_' +'caused_' +'categories_' +'bus_' +'beyond_' +'asked_' +'active_' +'Wort_' +'Seit_' +'Punkt_' +'Now_' +'Gruppe_' +'Entscheidungen_' +'Berichterstatter_' +'Artikel_' +'Arab_' +'öffentliche_' +'zen_' +'zeigt_' +'ties_' +'seem_' +'saying_' +'politicians_' +'partner_' +'note_' +'nahe_' +'latest_' +'ks_' +'hohe_' +'guten_' +'gewährleisten_' +'gesamten_' +'finance_' +'failure_' +'evidence_' +'entwickeln_' +'enthält_' +'darum_' +'dadurch_' +'challenge_' +'alone_' +'act_' +'Spiel_' +'Putin_' +'P' +'Hinblick_' +'General_' +'Gelegenheit_' +'Gefahr_' +'Gebiet_' +'Förderung_' +'Europeans_' +'Darüber_' +'Dank_' +'Damit_' +'Beginn_' +'Barcelona_' +'August_' +'Abkommen_' +'Öffentlichkeit_' +'verwenden_' +'unemployment_' +'treatment_' +'source_' +'sound_' +'sometimes_' +'solutions_' +'quickly_' +'programmes_' +'please_' +'objective_' +'lines_' +'larger_' +'ker_' +'guest_' +'damage_' +'build_' +'aware_' +'average_' +'aktuellen_' +'agricultural_' +'achieved_' +'University_' +'St_' +'Schlusselwortern_' +'Regeln_' +'Produkte_' +'Middle_' +'March_' +'H' +'Datei_' +'CD_' +'Bildung_' +'500_' +'1990_' +' " _' +'“._' +'äußerst_' +'zone_' +'ves_' +'v' +'throughout_' +'t' +'ssen_' +'request_' +'politics_' +'movement_' +'mentioned_' +'leben_' +'jede_' +'independent_' +'gleichen_' +'gleich_' +'ganzen_' +'fragen_' +'fest_' +'fair_' +'failed_' +'ermöglicht_' +'equal_' +'enlargement_' +'distribution_' +'direction_' +'ding_' +'danken_' +'coming_' +'choice_' +'cally_' +'Terrorismus_' +'Palestinian_' +'Minuten_' +'IMF_' +'Herren_' +'Funktion_' +'Anfang_' +'Abgeordneten_' +'zed_' +'völlig_' +'verstehen_' +'test_' +'supported_' +'shows_' +'setzt_' +'recht_' +'procedure_' +'principles_' +'lt_' +'lose_' +'ini_' +'includes_' +'ht_' +'hold_' +'gestellt_' +'gemeinsamen_' +'final_' +'fear_' +'e' +'domestic_' +'deficit_' +'consumer_' +'cher_' +'charge_' +'book_' +'base_' +'anything_' +'akan_' +'advanced_' +'X' +'W' +'Since_' +'Ressourcen_' +'Notwendigkeit_' +'Natürlich_' +'Kraft_' +'Korea_' +'Kontrolle_' +'Israeli_' +'Hand_' +'Fortschritte_' +'Erweiterung_' +'Debian_' +'Ausdruck_' +'Aufmerksamkeit_' +'übernachten_' +'x' +'web_' +'verfügen_' +'submitted_' +'speed_' +'reached_' +'produce_' +'perfect_' +'objectives_' +'mind_' +'ments_' +'initiative_' +'i' +'hoffe_' +'ground_' +'goods_' +'giving_' +'famous_' +'fallen_' +'entwickelt_' +'don_' +'considered_' +'class_' +'ck' +'cities_' +'bekommen_' +'additional_' +'accommodation_' +'Y_' +'Wann_' +'Viele_' +'Tage_' +'Security_' +'Rights_' +'Many_' +'Lisbon_' +'Folgen_' +'Federal_' +'E' +'Damen_' +'Blick_' +'Bild_' +'Bekämpfung_' +'Ausgaben_' +'Anwendung_' +'Angesichts_' +'Americans_' +'90_' +'45_' +'27_' +'22_' +'%._' +' % _' +'zudem_' +'wrong_' +'worked_' +'weder_' +'ut_' +'untuk_' +'tell_' +'später_' +'speak_' +'situated_' +'richtig_' +'restaurants_' +'res_' +'produced_' +'p' +'news_' +'m' +'ige_' +'häufig_' +'größte_' +'globale_' +'est_' +'enthalten_' +'emissions_' +'decided_' +'death_' +'completely_' +'brought_' +'au_' +'annual_' +'added_' +'Veränderungen_' +'Umwelt_' +'Services_' +'Schaffung_' +'Reihe_' +'Reform_' +'Instead_' +'Here_' +'Gesamt' +'Fund_' +'Finally_' +'Einfluss_' +'Durch_' +'December_' +'Dazu_' +'1791_' +'Änderungsantrag_' +'zahlen_' +'weapons_' +'voted_' +'technologies_' +'target_' +'secure_' +'requirements_' +'partners_' +'package_' +'options_' +'massive_' +'ism_' +'increasing_' +'goal_' +'files_' +'extent_' +'erung_' +'erster_' +'eigene_' +'contact_' +'consumption_' +'ber_' +'allows_' +'aim_' +'agreements_' +'Zentrum_' +'Text_' +'Schließlich_' +'Qualität_' +'Mitglied_' +'L' +'Kosovo_' +'Its_' +'Frieden_' +'During_' +'Chance_' +'300_' +'zweite_' +'won_' +'trotz_' +'tions_' +'technical_' +'students_' +'send_' +'prepared_' +'original_' +'mobile_' +'mail_' +'item_' +'function_' +'front_' +'f' +'extra_' +'entire_' +'election_' +'eben_' +'dialogue_' +'critical_' +'changed_' +'ang_' +'allowed_' +'Wettbewerb_' +'Verwendung_' +'So' +'Nutzung_' +'Nations_' +'Märkte_' +'Kultur_' +'Jahrhundert_' +'Italien_' +'Gästebewertungen_' +'Furthermore_' +'Erklärung_' +'Daher_' +'DE_' +'Beitrag_' +': „_' +'28_' +'1781_' +'zweiten_' +'wenige_' +'website_' +'wealth_' +'voll_' +'versuchen_' +'team_' +'supply_' +'stärker_' +'sorgen_' +'solidarity_' +'scale_' +'ring_' +'providing_' +'players_' +'paid_' +'opposition_' +'ling_' +'lang_' +'kam_' +'influence_' +'ier_' +'geworden_' +'genug_' +'gain_' +'ft_' +'forms_' +'follow_' +'erte_' +'erklären_' +'einschließlich_' +'distance_' +'concern_' +'concept_' +'carried_' +'campaign_' +'borders_' +'began_' +'ate_' +'aspects_' +'allein_' +'[_' +'WTO_' +'Server_' +'Programme_' +'Meer_' +'July_' +'Forschung_' +'Fehler_' +'Familie_' +'Ausschusses_' +'Although_' +'African_' +'? _' +': ' +'story_' +'stage_' +'server_' +'officials_' +'office_' +'offered_' +'nis_' +'legen_' +'leave_' +'jene_' +'insgesamt_' +'immigration_' +'hinter_' +'genannten_' +'fördern_' +'ful_' +'erwartet_' +'erwarten_' +'erneut_' +'doubt_' +'digital_' +'dari_' +'concerning_' +'bitte_' +'bevor_' +'apartment_' +'anderer_' +'Verbraucher_' +'Unsere_' +'Portugal_' +'Person_' +'Pakistan_' +'Organisation_' +'Opfer_' +'Ko' +'Idee_' +'H_' +'Griechenland_' +'Gesundheit_' +'EN_' +'DVD_' +'C' +' [_' +' ) _' +'zumindest_' +'z' +'writing_' +'worldwide_' +'verbessern_' +'uses_' +'users_' +'treffen_' +'tidak_' +'sed_' +'search_' +'save_' +'reports_' +'quiet_' +'professional_' +'privaten_' +'parking_' +'month_' +'map_' +'kosten_' +'jedem_' +'historical_' +'head_' +'gt_' +'generation_' +'funding_' +'einzige_' +'disease_' +'d' +'construction_' +'connection_' +'committed_' +'code_' +'child_' +'airport_' +'Werte_' +'Wasser_' +'Vergangenheit_' +'Unter_' +'Themen_' +'Stunden_' +'Prime_' +'Obwohl_' +'Most_' +'Mediterranean_' +'Linux_' +'Le_' +'Italian_' +'Information_' +'Herausforderung_' +'Flughafen_' +'Dialog_' +'Anti' +'Afrika_' +'übernehmen_' +'Änderungsanträge_' +'ying_' +'usually_' +'tic_' +'seek_' +'practical_' +'nimmt_' +'mus_' +'ms_' +'morning_' +'meinen_' +'material_' +'links_' +'kleine_' +'ja' +'implemented_' +'hoch_' +'helpful_' +'glauben_' +'getan_' +'geschaffen_' +'fishing_' +'erklärt_' +'effectively_' +'dollar_' +'deutschen_' +'demokratischen_' +'demands_' +'decline_' +'communication_' +'ch' +'benötigen_' +'applied_' +'angenommen_' +'amerikanische_' +'alternative_' +'Zeitpunkt_' +'Wahlen_' +'Tat_' +'Stelle_' +'Room_' +'Risiken_' +'People_' +'Parteien_' +'Lösungen_' +'Let_' +'Industrie_' +'Ihrem_' +'Hamas_' +'Fällen_' +'Frühstück_' +'Erstens_' +'Einkommen_' +'Dinge_' +'Dezember_' +'Center_' +'Austria_' +'Affairs_' +': "_' +'   ._' +'   . _' +'zunehmend_' +'zentrale_' +'works_' +'warum_' +'wants_' +'wanted_' +'vermeiden_' +'ver_' +'statement_' +'served_' +'series_' +'safe_' +'relationship_' +'provisions_' +'police_' +'neuer_' +'neben_' +'nce_' +'leadership_' +'leader_' +'join_' +'illegal_' +'gewesen_' +'ess_' +'eigentlich_' +'cs_' +'cken_' +'businesses_' +'border_' +'avoid_' +'authority_' +'applications_' +'appears_' +'agreed_' +'actions_' +'Zudem_' +'Schwierigkeiten_' +'Republic_' +'Präsidentin_' +'Please_' +'Juni_' +'J' +'IT_' +'Höhe_' +'Heute_' +'Greek_' +'Google_' +'George_' +'Egypt_' +'Economic_' +'Convention_' +'Amerikas_' +'2013_' +'1998_' +'160_' +'.  _' +' "..._' +'tra' +'structural_' +'star_' +'stable_' +'speech_' +'somit_' +'solchen_' +'schützen_' +'regards_' +'received_' +'read_' +'property_' +'powerful_' +'politischer_' +'path_' +'overall_' +'nearly_' +'n' +'method_' +'meinem_' +'lle_' +'legislative_' +'ine_' +'igkeit_' +'ideas_' +'getting_' +'folgt_' +'everyone_' +'establish_' +'ell_' +'drive_' +'cut_' +'competitive_' +'compared_' +'chinesische_' +'bit_' +'beitragen_' +'bare_' +'b' +'ations_' +'Zeiten_' +'Tagen_' +'Such_' +'Station_' +'Sozial' +'R' +'Position_' +'Nachfrage_' +'Management_' +'Latin_' +'Kingdom_' +'Integration_' +'Herzen_' +'Globalisierung_' +'Financial_' +'Club_' +'Bestimmungen_' +'Aktivitäten_' +'31_' +'3' +''' _' +'с' +'Änderungen_' +'zahlreiche_' +'word_' +'variety_' +'union_' +'trading_' +'talk_' +'serve_' +'rising_' +'requires_' +'reducing_' +'reduced_' +'mehrere_' +'leicht_' +'jüngsten_' +'joint_' +'instruments_' +'immediately_' +'ierte_' +'hinsichtlich_' +'geschlossen_' +'folgen_' +'erhöhen_' +'ere_' +'equipped_' +'elsewhere_' +'efficient_' +'durchgeführt_' +'discussion_' +'difference_' +'developments_' +'comprehensive_' +'bringt_' +'bewusst_' +'beide_' +'attacks_' +'anders_' +'Vertrauen_' +'Revolution_' +'Plan_' +'PC_' +'Konferenz_' +'Japanese_' +'Ireland_' +'Great_' +'Centre_' +'CO2_' +'Bitte_' +'Anzahl_' +'.' +'wichtiger_' +'weltweiten_' +'types_' +'train_' +'tools_' +'thousands_' +'suggest_' +'stock_' +'sectors_' +'school_' +'sagte_' +'representatives_' +'reichen_' +'promote_' +'productivity_' +'priority_' +'possibility_' +'park_' +'nationale_' +'mit' +'message_' +'medical_' +'las_' +'instrument_' +'initiatives_' +'ierten_' +'ial_' +'genommen_' +'ga_' +'frei_' +'farmers_' +'expected_' +'elements_' +'elected_' +'easily_' +'degree_' +'deficits_' +'chance_' +'bestehen_' +'ausgestattet_' +'attack_' +'ated_' +'affected_' +'Woche_' +'Web_' +'Vertrag_' +'Tu' +'Syria_' +'Stabilität_' +'Pro' +'Preise_' +'Policy_' +'Nicht_' +'Neu' +'März_' +'Microsoft_' +'Markt' +'Mail_' +'Lissabon_' +'Land' +'Jo' +'His_' +'Global_' +'Finanz' +'Energie_' +'Design_' +'Constitution_' +'Brazil_' +'Besuch_' +'Bereiche_' +'Bad_' +'A' +'. ' +'whom_' +'ums_' +'ue_' +'tät_' +'turned_' +'relativ_' +'refugees_' +'reduction_' +'played_' +'para_' +'nächste_' +'ning_' +'ni_' +'middle_' +'mein_' +'letzte_' +'leider_' +'kaum_' +'k' +'ismus_' +'institutional_' +'forced_' +'expect_' +'erfolgreich_' +'enter_' +'diejenigen_' +'crucial_' +'commercial_' +'circumstances_' +'carry_' +'becoming_' +'bald_' +'aufgenommen_' +'activity_' +'Why_' +'Vereinten_' +'Verbesserung_' +'Technologie_' +'Te' +'Systems_' +'Standards_' +'Site_' +'Personal_' +'Osten_' +'Oktober_' +'October_' +'Not_' +'Küche_' +'Just_' +'Infrastruktur_' +'High_' +'Guest_' +'Grand_' +'Freiheit_' +'Free_' +'Finanzierung_' +'Directive_' +'CA' +'Auffassung_' +'According_' +'35_' +'29_' +'1980_' +'., _' +'* _' +')' +'® _' +'y' +'went_' +'travel_' +'ten' +'task_' +'sieht_' +'ship_' +'review_' +'religious_' +'relevant_' +'record_' +'procedures_' +'precisely_' +'pleased_' +'paar_' +'minute_' +'minister_' +'mention_' +'maintain_' +'leisten_' +'jeweiligen_' +'island_' +'investors_' +'improving_' +'hour_' +'hotels_' +'h' +'größere_' +'gesamte_' +'gekommen_' +'firms_' +'ence_' +'dringend_' +'dangerous_' +'conference_' +'colleagues_' +'c' +'break_' +'betrachtet_' +'bereich_' +'apply_' +'ance_' +'akzeptieren_' +'Währung_' +'Waffen_' +'Umgebung_' +'Trade_' +'Therefore_' +'Star_' +'Sicht_' +'N' +'IWF_' +'England_' +'Einführung_' +'Do_' +'Conference_' +'Co' +'Auswahl_' +'Asien_' +'Arbeitnehmer_' +'4' +', ' +'). _' +'zusätzliche_' +'ze' +'written_' +'white_' +'weise_' +'walking_' +'unbedingt_' +'trust_' +'tor_' +'tabled_' +'sts_' +'sse_' +'sign_' +'schaft_' +'sa_' +'s' +'round_' +'reserves_' +'regulations_' +'raised_' +'presence_' +'ped_' +'organisation_' +'neither_' +'namely_' +'mag_' +'länger_' +'ku' +'justice_' +'holiday_' +'historischen_' +'hands_' +'gives_' +'genießen_' +'ganze_' +'feature_' +'facing_' +'equipment_' +'draw_' +'documents_' +'denke_' +'deine_' +'boost_' +'banking_' +'attempt_' +'atmosphere_' +'assistance_' +'aimed_' +'agriculture_' +'advantage_' +'Verordnung_' +'Transparenz_' +'Tagesordnung_' +'Spanish_' +'Sorge_' +'Social_' +'Secondly_' +'Sea_' +'Rooms_' +'Robert_' +'Restaurants_' +'Nur_' +'Mai_' +'Linie_' +'Gründen_' +'Erfahrung_' +'Den_' +'Code_' +'Asian_' +'Also_' +'2' +'.)_' +'--_' +'weeks_' +'voting_' +'votes_' +'unterschiedlichen_' +'trying_' +'stets_' +'sm_' +'shift_' +'section_' +'sechs_' +'recovery_' +'programm_' +'pro' +'press_' +'pre_' +'phone_' +'ory_' +'oben_' +'networks_' +'ned_' +'nations_' +'nation_' +'modified_' +'merely_' +'membership_' +'meines_' +'lösen_' +'ley_' +'largely_' +'keiten_' +'kannst_' +'implement_' +'historic_' +'happen_' +'gs_' +'grounds_' +'goes_' +'gesetzt_' +'friends_' +'fort' +'floor_' +'eten_' +'establishment_' +'erkennen_' +'erfordert_' +'efficiency_' +'draft_' +'der' +'daily_' +'conclusion_' +'ches_' +'changing_' +'carbon_' +'buy_' +'burden_' +'bathroom_' +'assessment_' +'Wettbewerbsfähigkeit_' +'Schulden_' +'Rezession_' +'Regierungs' +'Raum_' +'Punkte_' +'Per' +'Museum_' +'Monaten_' +'Methode_' +'Jahrhunderts_' +'Islam_' +'Inter' +'Innovation_' +'Human_' +'Gruppen_' +'Groß' +'Europäer_' +'Diskussion_' +'Both_' +'Bedrohung_' +'Arbeitslosigkeit_' +'Al_' +'Airport_' +'8' +'2012_' +'Änderung_' +'zuletzt_' +'ys_' +'unten_' +'sta' +'sing_' +'ses_' +'scientific_' +'schwierig_' +'running_' +'rten_' +'regarding_' +'plus_' +'plant_' +'participation_' +'output_' +'normal_' +'nbsp_' +'mission_' +'lag_' +'ko' +'j_' +'ität_' +'innovation_' +'innen_' +'improved_' +'impossible_' +'hält_' +'hol' +'haus_' +'gezeigt_' +'ges_' +'gen' +'financing_' +'fe_' +'express_' +'export_' +'entsprechende_' +'ei_' +'deep_' +'decade_' +'contribution_' +'considerable_' +'competitiveness_' +'bodies_' +'bilden_' +'begin_' +'außerhalb_' +'Zweitens_' +'Zweifel_' +'Wirtschaftswachstum_' +'Wer_' +'Warum_' +'Vorteile_' +'Unterkategorien_' +'She_' +'Resort_' +'Republik_' +'Ra' +'Projekt_' +'Produktion_' +'Partner_' +'No' +'Mitte_' +'Lo' +'Investoren_' +'Forum_' +'Erfahrungen_' +'Energie' +'Einigung_' +'Du' +'Article_' +'Angebot_' +'... _' +'überzeugt_' +'ß_' +'zing_' +'za' +'vision_' +'versucht_' +'up' +'treten_' +'transparent_' +'told_' +'ti' +'spend_' +'speaking_' +'sites_' +'shopping_' +'sh_' +'screen_' +'says_' +'refer_' +'reading_' +'rd_' +'raum_' +'post' +'policymakers_' +'outcome_' +'operations_' +'operation_' +'opening_' +'ol' +'nach' +'multi_' +'mass_' +'manufacturing_' +'lies_' +'king_' +'ir_' +'intended_' +'insurance_' +'hu' +'hin' +'highest_' +'happened_' +'handeln_' +'gewinnen_' +'film_' +'families_' +'exports_' +'erfüllen_' +'ellen_' +'easier_' +'document_' +'derartige_' +'defense_' +'darstellt_' +'darstellen_' +'controls_' +'congratulate_' +'compromise_' +'clients_' +'braucht_' +'betrachten_' +'bestimmten_' +'bar' +'au' +'appear_' +'ans_' +'ale_' +'addressed_' +'Westen_' +'Welt' +'Wasser' +'Vielfalt_' +'Technologien_' +'Su' +'Street_' +'Spa_' +'RE' +'Public_' +'Privat' +'Poland_' +'Online_' +'Musik_' +'Kommissarin_' +'Kolleginnen_' +'John_' +'Inflation_' +'Handels' +'Folge_' +'Erholung_' +'ECB_' +'Da' +'Costa_' +'Because_' +'A5_' +'26_' +'2011_' +'überhaupt_' +'überall_' +'ßen_' +'zunächst_' +'ya' +'wählen_' +'wing_' +'wider_' +'waste_' +'vital_' +'victims_' +'useful_' +'urban_' +'theory_' +'structure_' +'ster_' +'staatlichen_' +'schwer_' +'saw_' +'sales_' +'relation_' +'rapidly_' +'profitieren_' +'primary_' +'presidency_' +'pre' +'powers_' +'planning_' +'offen_' +'numerous_' +'neues_' +'muß_' +'moral_' +'mainly_' +'lagen_' +'kurz_' +'ka_' +'investments_' +'inter' +'innovative_' +'heard_' +'gern_' +'generally_' +'gelegen_' +'fund_' +'freien_' +'finally_' +'establishing_' +'entspricht_' +'entscheiden_' +'eiten_' +'eindeutig_' +'details_' +'desire_' +'dengan_' +'core_' +'calls_' +'bestimmte_' +'bessere_' +'analysis_' +'amendment_' +'alt_' +'aktuelle_' +']], _' +'Zentralbank_' +'Vorschriften_' +'Volkswirtschaften_' +'Unser_' +'Tri' +'Time_' +'Team_' +'Strand_' +'Stimme_' +'Sinne_' +'Sicherheits' +'Sektor_' +'See' +'Schritte_' +'Reserve_' +'Re' +'Maße_' +'Juli_' +'Installation_' +'Herausforderungen_' +'Haushalts' +'Euro' +'En' +'Despite_' +'Dateien_' +'Congress_' +'Bundes' +'Brexit_' +'Brasilien_' +'Berichte_' +'Benutzer_' +'Arbeitsplätze_' +'Anteil_' +'Annahme_' +'Angelegenheiten_' +'Amsterdam_' +'Amendment_' +'worth_' +'worse_' +'vorgeschlagen_' +'transfer_' +'tool_' +'territory_' +'taxes_' +'steigen_' +'stated_' +'spirit_' +'spa' +'sent_' +'richtige_' +'release_' +'reference_' +'ping_' +'phase_' +'paper_' +'o' +'numbers_' +'moderne_' +'met_' +'menu_' +'me' +'linked_' +'limits_' +'learn_' +'interesting_' +'interested_' +'icht_' +'hoch' +'helped_' +'halte_' +'gesehen_' +'genuine_' +'gefunden_' +'g' +'freie_' +'fordern_' +'fixed_' +'fine_' +'fail_' +'extension_' +'examples_' +'erzielt_' +'enormous_' +'endlich_' +'encourage_' +'else_' +'eit_' +'dly_' +'district_' +'criteria_' +'continued_' +'consensus_' +'candidate_' +'buffet_' +'britischen_' +'books_' +'bonds_' +'bestimmt_' +'becomes_' +'baren_' +'apartments_' +'animals_' +'adopt_' +'accepted_' +'Worten_' +'Weltwirtschaft_' +'Wege_' +'Vor' +'Vertreter_' +'Urlaub_' +'Um' +'Turkish_' +'Ste' +'Status_' +'Selbst_' +'Sache_' +'Red_' +'RI' +'Nice_' +'NEW_' +'Muslim_' +'Meine_' +'Lebens' +'Last_' +'Konzept_' +'Januar_' +'Golf_' +'Gaza_' +'Einige_' +'Behandlung_' +'Alternative_' +'Agenda_' +'400_' +'0' +', „_' +'“_' +'у' +'Über' +'wo' +'willing_' +'weisen_' +'warm_' +'verbundenen_' +'understanding_' +'ul_' +'tried_' +'traffic_' +'tomorrow_' +'to' +'ter' +'television_' +'targets_' +'suggests_' +'sufficient_' +'stärken_' +'stronger_' +'spread_' +'signed_' +'shared_' +'separate_' +'seeking_' +'scope_' +'ro' +'respond_' +'released_' +'regionale_' +'ready_' +'putting_' +'published_' +'pass_' +'owing_' +'org_' +'modernen_' +'mittel_' +'minimum_' +'managed_' +'lu' +'log_' +'lernen_' +'kommenden_' +'kennen_' +'integrated_' +'improvement_' +'immediate_' +'identity_' +'hear_' +'green_' +'governance_' +'got_' +'games_' +'flight_' +'fellow_' +'exactly_' +'evening_' +'europäischer_' +'el' +'ehemaligen_' +'earlier_' +'difficulties_' +'damals_' +'dagegen_' +'cross_' +'crime_' +'comfort_' +'character_' +'camera_' +'box_' +'bezüglich_' +'beste_' +'behavior_' +'aten_' +'approved_' +'anti' +'acht_' +'abge' +'Zustimmung_' +'Za' +'Vielleicht_' +'USS_' +'Tra' +'Sterne_' +'Solidarität_' +'Sinn_' +'Risiko_' +'Regime_' +'Rechts' +'Prodi_' +'Nachbarn_' +'Monat_' +'Modell_' +'Ku' +'J_' +'In' +'III_' +'Her' +'Health_' +'Eindruck_' +'EZB_' +'Do' +'Clinton_' +'Business_' +'Bis_' +'Bilder_' +'Bau' +'Barack_' +'Au' +'+_' +'о' +'ät_' +'Überwachung_' +'www_' +'wirtschaftlicher_' +'wert_' +'welfare_' +'voters_' +'vo' +'verringern_' +'verpflichtet_' +'van_' +'unge' +'une_' +'unable_' +'umgesetzt_' +'ultimately_' +'summer_' +'street_' +'specifically_' +'sort_' +'sicherzustellen_' +'ser_' +'revolution_' +'resolve_' +'rer_' +'reflect_' +'quote_' +'ps_' +'protected_' +'port_' +'planet_' +'placed_' +'pada_' +'otherwise_' +'ones_' +'offering_' +'morgen_' +'millions_' +'mer' +'measure_' +'machine_' +'licher_' +'letztlich_' +'konzentrieren_' +'je' +'j' +'io_' +'host_' +'holding_' +'größeren_' +'greatest_' +'gleiche_' +'gelangen_' +'ga' +'führte_' +'faces_' +'expressed_' +'expensive_' +'era_' +'entweder_' +'entsprechend_' +'ensuring_' +'durch' +'download_' +'divided_' +'discussions_' +'discussed_' +'described_' +'dennoch_' +'deliver_' +'continues_' +'continent_' +'conclude_' +'comments_' +'color_' +'click_' +'broad_' +'bestand_' +'berg_' +'begrüße_' +'beginnen_' +'bedarf_' +'außerdem_' +'aus' +'arbeitet_' +'anderem_' +'alte_' +'accession_' +'abzu' +'Vorteil_' +'Video_' +'Versuch_' +'Trotz_' +'Tre' +'Standard_' +'Saudi_' +'Polen_' +'Pe' +'Or' +'Open_' +'Nachdem_' +'NA' +'Mo' +'Mit' +'Michael_' +'James_' +'Ist_' +'Haltung_' +'Gäste_' +'Gegenteil_' +'Entschließung_' +'Ent' +'El_' +'Bürgern_' +'Ben' +'Beitritt_' +'Arbeits' +'Anstieg_' +'75_' +'33_' +'32_' +'30' +'.._' +'%, _' +'” – _' +'ı' +'Ä' +'­_' +'window_' +'widely_' +'west_' +'wer_' +'vollständig_' +'veröffentlicht_' +'ve' +'ufen_' +'tte_' +'tradition_' +'thereby_' +'tan_' +'spent_' +'southern_' +'sources_' +'skills_' +'sanctions_' +'rural_' +'root_' +'reception_' +'profit_' +'priorities_' +'player_' +'partly_' +'oc' +'obligations_' +'nachdem_' +'militärische_' +'mar' +'läuft_' +'loss_' +'lo_' +'llen_' +'liberal_' +'ite_' +'it' +'industrie_' +'individuals_' +'höhere_' +'himself_' +'heutigen_' +'granted_' +'gi' +'format_' +'firm_' +'ff_' +'fand_' +'fahren_' +'expectations_' +'exclusive_' +'erlaubt_' +'entsprechenden_' +'eln_' +'einiger_' +'dürfte_' +'doesn_' +'detailed_' +'denken_' +'default_' +'cuts_' +'cover_' +'communities_' +'claim_' +'britische_' +'außer_' +'associated_' +'article_' +'ahead_' +'actual_' +'absolutely_' +'Wohn' +'Uni' +'UNO_' +'Trump_' +'Teilen_' +'Systeme_' +'Strategien_' +'Square_' +'Secretary_' +'Schulden' +'Regel_' +'Präsidentschaft_' +'Po' +'Männer_' +'Ma' +'Leute_' +'Kindern_' +'Kapital' +'Justice_' +'Ja' +'Islamic_' +'Homepage_' +'Geschäfts' +'Ger' +'Gebäude_' +'Frankfurt_' +'Firmen_' +'Erachtens_' +'Einrichtungen_' +'Ebenso_' +'Di' +'Christian_' +'Breakfast_' +'Bio' +'Ausland_' +'Argentina_' +'Ad' +'2014_' +'" (_' +' -, _' +'—_' +'ó' +'ä' +'Öl' +'wieder' +'whatever_' +'vorhanden_' +'ures_' +'unver' +'uner' +'ul' +'tt_' +'treated_' +'sun_' +'suchen_' +'stra' +'someone_' +'so' +'smaller_' +'slow_' +'sides_' +'seven_' +'ro_' +'represents_' +'relating_' +'regionalen_' +'rapid_' +'r' +'pour_' +'permanent_' +'pe_' +'payments_' +'parliamentary_' +'oren_' +'operating_' +'ons' +'nie' +'ne' +'möglichkeiten_' +'monitoring_' +'miteinander_' +'mark_' +'loans_' +'lo' +'listed_' +'link_' +'limit_' +'leads_' +'languages_' +'land' +'kt_' +'ju' +'ji' +'ization_' +'iten_' +'inzwischen_' +'introduced_' +'ians_' +'hören_' +'höher_' +'geschehen_' +'gel' +'garden_' +'funktioniert_' +'fuel_' +'französischen_' +'ff' +'expansion_' +'enti' +'discuss_' +'cutting_' +'corporate_' +'contemporary_' +'connected_' +'combination_' +'causes_' +'benutzt_' +'begann_' +'bear_' +'battle_' +'bars_' +'auszu' +'ating_' +'as' +'ana' +'Zur_' +'Zinsen_' +'Währungs' +'Volk_' +'U' +'Thus_' +'Their_' +'Standpunkt_' +'Se' +'Realität_' +'Prioritäten_' +'Nutzen_' +'Netherlands_' +'Natur_' +'MySQL_' +'Mer' +'Leistungen_' +'Krankheiten_' +'Klima' +'Klicken_' +'Insel_' +'Hoffnung_' +'Gre' +'God_' +'Gesellschaften_' +'Gegensatz_' +'Film_' +'Fe' +'Falle_' +'Eastern_' +'Dr_' +'Denn_' +'Democrats_' +'Car' +'Bewegung_' +'Best_' +'Augen_' +'Atmosphäre_' +'Abschluss_' +'2015_' +'. (_' +', "_' +'„_' +'über' +'äu' +'  _' +'wrote_' +'write_' +'wodurch_' +'win_' +'wesentlich_' +'weg_' +'verlieren_' +'verfahren_' +'va' +'unto_' +'unless_' +'ungefähr_' +'unacceptable_' +'umfassende_' +'turning_' +'trägt_' +'truth_' +'transparency_' +'transition_' +'traditionellen_' +'thinking_' +'thanks_' +'terrorist_' +'technological_' +'talks_' +'swimming_' +'suffer_' +'strategies_' +'stimmen_' +'stellte_' +'starke_' +'st' +'schließen_' +'russischen_' +'resulting_' +'represent_' +'relate_' +'regulatory_' +'ran_' +'punkt_' +'presidential_' +'picture_' +'oni' +'ok_' +'offiziellen_' +'offensichtlich_' +'ns' +'nis' +'nachhaltige_' +'models_' +'migration_' +'mid_' +'meaning_' +'maßnahmen_' +'materials_' +'lebih_' +'laut_' +'laid_' +'komplett_' +'ing' +'industries_' +'ical_' +'ha' +'großes_' +'gleich' +'gezwungen_' +'getroffen_' +'geo' +'functions_' +'followed_' +'folgenden_' +'figures_' +'faced_' +'fa' +'ey_' +'extensive_' +'eu' +'erhöht_' +'equally_' +'enz' +'entstehen_' +'ele' +'einander_' +'directives_' +'determined_' +'ded_' +'debates_' +'deaths_' +'daten_' +'contain_' +'closer_' +'cheap_' +'che' +'besondere_' +'berücksichtigt_' +'behandelt_' +'auf' +'arms_' +'arbeit_' +'announced_' +'Wochen_' +'Vergleich_' +'Unabhängigkeit_' +'Un' +'Umständen_' +'Two_' +'Stimmen_' +'Steuer' +'Sta' +'Spieler_' +'Shi' +'Second_' +'SA' +'Royal_' +'Pri' +'Op' +'Old_' +'Ohne_' +'Nun_' +'Nachfolgend_' +'Mitteilung_' +'Me' +'Lu' +'Le' +'Kritik_' +'Ke' +'Is_' +'Instrument_' +'ID_' +'Home_' +'Hinsicht_' +'Haupt' +'Gra' +'Gold_' +'Go' +'Given_' +'Gesetz_' +'Forschungs' +'Engagement_' +'Einrichtung_' +'EADS_' +'Dennoch_' +'Bildungs' +'Beschäftigung_' +'Beschreibung_' +'Apartments_' +'Amt_' +'Alliance_' +'Air_' +'AIDS_' +'9' +'5' +'1997_' +'1996_' +'1995_' +'150_' +'01_' +'и' +'е' +'á_' +'Über_' +'zimmer_' +'wollte_' +'weltweite_' +'vertreten_' +'verloren_' +'unseres_' +'unlikely_' +'track_' +'tischen_' +'the' +'supporting_' +'suffering_' +'sub' +'stress_' +'strengthen_' +'starting_' +'stands_' +'standing_' +'signal_' +'selbstverständlich_' +'sea' +'saving_' +'rt' +'ries_' +'restrictions_' +'radical_' +'proper_' +'politisch_' +'piece_' +'physical_' +'persönlichen_' +'perspective_' +'per' +'olitik_' +'older_' +'moving_' +'mis' +'min_' +'medium_' +'manage_' +'maintained_' +'laws_' +'keits' +'ked_' +'kaufen_' +'jährlich_' +'ischer_' +'introduction_' +'introduce_' +'inside_' +'independence_' +'increases_' +'imports_' +'ik_' +'humanitarian_' +'housing_' +'historische_' +'guidelines_' +'gs' +'gold_' +'gerecht_' +'gave_' +'gar' +'fo' +'flexible_' +'fire_' +'fields_' +'falsch_' +'expression_' +'exist_' +'except_' +'eventually_' +'euch_' +'erten_' +'ep' +'entry_' +'employees_' +'emphasis_' +'eingeführt_' +'ee_' +'duty_' +'dir_' +'dia' +'delegation_' +'criminal_' +'collapse_' +'coffee_' +'claims_' +'chi' +'chaft_' +'cat' +'carefully_' +'car' +'bottom_' +'bestehenden_' +'begrüßen_' +'barkeit_' +'ausge' +'armed_' +'anyone_' +'angeht_' +'ah_' +'ad_' +'Wissenschaft_' +'Wein' +'Verb' +'Ungleichheit_' +'Teile_' +'THE_' +'Stärkung_' +'Staats' +'Staates_' +'Sehr_' +'Sehenswürdigkeiten_' +'Rotary_' +'Ro' +'Reaktion_' +'Produktions' +'Ph' +'Ne' +'Name_' +'Na' +'NI_' +'Mi' +'La' +'Kunst_' +'Kompromiss_' +'Ka' +'Israelis_' +'Irish_' +'Initiativen_' +'IS' +'Hintergrund_' +'Forderung_' +'Ereignisse_' +'Copenhagen_' +'Chi' +'Can_' +'Bur' +'Binnenmarkt_' +'Beispiele_' +'Bau_' +'Basis_' +'Barroso_' +'Bar' +'Aufbau_' +'Aspekt_' +'Anfrage_' +'36_' +'14' +''._' +' = _' +' ... _' +'ö' +'ée_' +'zusätzlich_' +'zation_' +'welches_' +'weiterer_' +'victory_' +'vergessen_' +'ver' +'unbe' +'ub' +'truly_' +'teilweise_' +'tar' +'suffered_' +'struggle_' +'ski_' +'shops_' +'seriously_' +'selected_' +'riesigen_' +'resort_' +'remember_' +'pursue_' +'purchase_' +'playing_' +'phrase_' +'ourselves_' +'ort_' +'oral_' +'on' +'no' +'möglichen_' +'ming_' +'mehreren_' +'lt' +'looks_' +'lichkeit_' +'leistungen_' +'ld_' +'ld' +'launch_' +'laufen_' +'lan' +'laden_' +'kürzlich_' +'kle' +'jo' +'ium_' +'ish_' +'ire_' +'intervention_' +'implementing_' +'he' +'hauptsächlich_' +'happy_' +'grundlegende_' +'geändert_' +'gerne_' +'fähigkeit_' +'fresh_' +'flexibility_' +'fish_' +'erinnern_' +'erhält_' +'equivalent_' +'enterprises_' +'ene_' +'email_' +'dynamic_' +'diplomatic_' +'declaration_' +'database_' +'counter_' +'contrast_' +'conflicts_' +'completed_' +'combat_' +'collective_' +'calling_' +'ber' +'benutzen_' +'automatisch_' +'asylum_' +'asset_' +'anstatt_' +'animal_' +'angezeigt_' +'ach_' +'Zi' +'Y' +'Volks' +'Vo' +'Vereinbarung_' +'Verbindungen_' +'Unter' +'Soviet_' +'Sorgen_' +'Ri' +'Projekte_' +'Pro_' +'Private_' +'Post' +'Pi' +'Organisationen_' +'Mitarbeiter_' +'Krieges_' +'Korruption_' +'Investitions' +'Institute_' +'Informations' +'IP_' +'He' +'Haushalt_' +'Gesetze_' +'Front_' +'Foundation_' +'Fortschritt_' +'Fort' +'February_' +'Familien' +'Entwicklungsländern_' +'Dra' +'Computer_' +'Ca' +'CAMBRIDGE_' +'Bre' +'Board_' +'Bo' +'Beziehung_' +'Aufgrund_' +'Another_' +'48_' +'13' +' /_' +'”, _' +'т' +'р' +'és_' +'är' +'Ökonomen_' +'zufolge_' +'zahlreichen_' +'warming_' +'wa' +'verlangen_' +'ven_' +'under' +'tz_' +'tu' +'translation_' +'tests_' +'terrace_' +'tasks_' +'ständig_' +'stations_' +'starken_' +'staatliche_' +'spacious_' +'sofort_' +'sin' +'siehe_' +'shower_' +'selection_' +'seemed_' +'science_' +'rn' +'ri_' +'responsibilities_' +'relax_' +'relatively_' +'prosperity_' +'promoting_' +'por_' +'platz_' +'partnership_' +'parliament_' +'opened_' +'ongoing_' +'obvious_' +'nf' +'nennen_' +'methods_' +'meetings_' +'mechanism_' +'les' +'langen_' +'labour_' +'ise_' +'internationaler_' +'installation_' +'ina' +'ill_' +'ierungs' +'ier' +'ide_' +'ice_' +'houses_' +'ha_' +'gutes_' +'größer_' +'goals_' +'gemäß_' +'gegenwärtig_' +'französische_' +'faster_' +'erzielen_' +'ergreifen_' +'erfolgen_' +'entwickelten_' +'entirely_' +'entered_' +'eingesetzt_' +'economists_' +'du' +'driving_' +'dollars_' +'display_' +'defined_' +'darunter_' +'danger_' +'danach_' +'dalam_' +'crimes_' +'corruption_' +'contract_' +'constitution_' +'charged_' +'cer' +'cancer_' +'bu' +'bre' +'bly_' +'biggest_' +'beruht_' +'benötigt_' +'believed_' +'beds_' +'ausschließlich_' +'assets_' +'ans' +'agen_' +'advance_' +'administrative_' +'ade' +'achten_' +'accordance_' +'a' +']] [[_' +'\\_' +'Zunächst_' +'Your_' +'Würde_' +'Wissen_' +'Waren_' +'Vietnam_' +'Verpflichtungen_' +'Verpflichtung_' +'Verhältnis_' +'Verfassungs' +'Unterschied_' +'Unfortunately_' +'TA' +'Syrien_' +'Straße_' +'San' +'SI' +'SE' +'Ru' +'Q_' +'Pre' +'Pacific_' +'Neben_' +'Mor' +'Monetary_' +'Miss' +'Mexiko_' +'Mexico_' +'Men' +'Medien_' +'Mal' +'Live_' +'Landwirtschaft_' +'Königreich_' +'Kultur' +'Kopf_' +'Je' +'Irland_' +'Internationalen_' +'Hotel' +'Hong_' +'Hoch' +'Hause_' +'Han' +'HIV_' +'HA' +'Geld' +'Formen_' +'Fahr' +'Every_' +'Einklang_' +'EC_' +'Dar' +'DI' +'Click_' +'Cha' +'Ce' +'Bus' +'Bra' +'Bi' +'Atom' +'Arten_' +'Angriff_' +'Abend_' +'95_' +'64_' +'1989_' +'', _' +'”._' +'ст' +'а' +'ür' +'ß' +'zuvor_' +'wi' +'weg' +'wages_' +'w' +'verfolgen_' +'umzusetzen_' +'trend_' +'tre' +'tly_' +'ti_' +'tel_' +'teilen_' +'summit_' +'significantly_' +'sets_' +'sektor_' +'scha' +'sagt_' +'sa' +'rr' +'rin_' +'reservation_' +'reported_' +'rely_' +'rejected_' +'recognize_' +'rechts' +'rasch_' +'qua' +'prime_' +'pri' +'plants_' +'pictures_' +'persons_' +'peaceful_' +'par' +'ou_' +'ou' +'opposite_' +'op' +'obviously_' +'nu' +'north_' +'ni' +'ng' +'nes_' +'nder_' +'nationaler_' +'nar' +'named_' +'moved_' +'mm_' +'mer_' +'manchmal_' +'machte_' +'ll' +'lessons_' +'learning_' +'krise_' +'ki_' +'initial_' +'igung_' +'iger_' +'ied_' +'hn_' +'helping_' +'hei' +'guaranteed_' +'gesprochen_' +'gender_' +'genannt_' +'gelten_' +'geleistet_' +'formal_' +'fisheries_' +'finanziellen_' +'finanzielle_' +'figure_' +'fat' +'extended_' +'extend_' +'explain_' +'experts_' +'enen_' +'dy_' +'durchaus_' +'drug_' +'dra' +'do' +'diseases_' +'deutsche_' +'cuisine_' +'courses_' +'couple_' +'cor' +'contrary_' +'constitutional_' +'commitments_' +'charges_' +'cast_' +'capable_' +'candidates_' +'bound_' +'beachten_' +'ban_' +'balanced_' +'außerordentlich_' +'argue_' +'appeal_' +'anzu' +'ang' +'allowing_' +'alliance_' +'allgemeine_' +'ages_' +'absolute_' +'abhängig_' +'Wähler_' +'Wegen_' +'Verhalten_' +'Umwelt' +'Transport_' +'Tradition_' +'Städte_' +'Stadt' +'Sol' +'Si' +'Schäden_' +'Schule_' +'SS_' +'Rezeption_' +'Report_' +'Q' +'Perhaps_' +'Paul_' +'Pa' +'PHP_' +'Niveau_' +'Ni' +'Nahen_' +'NE' +'Mu' +'Mitgliedschaft_' +'Militär' +'Merkel_' +'Mat' +'Located_' +'Lin' +'Leistung_' +'Las_' +'LI' +'Kong_' +'Klein' +'Kern' +'Jetzt_' +'Instrumente_' +'Hälfte_' +'Generation_' +'Gegen' +'Flug' +'Finanzkrise_' +'Far' +'Familien_' +'Erde_' +'Du_' +'Daten' +'Chancen_' +'Cameron_' +'Berichts_' +'BMW_' +'Auto' +'Ausweitung_' +'Ausbildung_' +'Aufenthalt_' +'Anstrengungen_' +'Anforderungen_' +'Am' +'Altstadt_' +'AR' +'25' +'15' +'11' +' — _' +' –, _' +'ы' +'é' +'è' +'Öl_' +'Ägypten_' +'yourself_' +'wine_' +'wiederum_' +'wenigen_' +'welt' +'weight_' +'vulnerable_' +'voice_' +'verlassen_' +'verantwortlich_' +'vast_' +'urgent_' +'ual_' +'te' +'tan' +'tahun_' +'supports_' +'studies_' +'structures_' +'ss' +'species_' +'south_' +'solve_' +'smoking_' +'sitting_' +'sion_' +'sierung_' +'ships_' +'sharing_' +'severe_' +'session_' +'select_' +'seien_' +'season_' +'schlagen_' +'remove_' +'relative_' +'recommend_' +'recession_' +'reaching_' +'race_' +'pu' +'provision_' +'proved_' +'prospects_' +'promotion_' +'promise_' +'practices_' +'positions_' +'photos_' +'photo_' +'pension_' +'owned_' +'out' +'organisations_' +'nisse_' +'names_' +'nahme_' +'mutual_' +'mountain_' +'minority_' +'micro' +'memory_' +'love_' +'ln_' +'lesen_' +'langfristige_' +'la' +'kitchen_' +'ker' +'kar' +'ir' +'improvements_' +'images_' +'hy' +'hundreds_' +'honourable_' +'gre' +'garantiert_' +'führenden_' +'fällt_' +'funktionieren_' +'frühen_' +'founded_' +'fighting_' +'felt_' +'eye_' +'exists_' +'exercise_' +'ethnic_' +'essentially_' +'equality_' +'entschieden_' +'entscheidender_' +'enk' +'elegant_' +'einzigen_' +'einge' +'dritten_' +'dinner_' +'defend_' +'defence_' +'currencies_' +'criticism_' +'crises_' +'compatible_' +'closely_' +'cha' +'budgetary_' +'bt_' +'ble' +'berücksichtigen_' +'automatically_' +'austerity_' +'arrangements_' +'arabischen_' +'anderes_' +'and' +'amp' +'ag_' +'Wunsch_' +'Worte_' +'West' +'Washington_' +'Wahl' +'Wachstums' +'Voraussetzungen_' +'Vielzahl_' +'Verwaltungs' +'Untersuchung_' +'Treffen_' +'Times_' +'Teil' +'Süd' +'Sweden_' +'Steuern_' +'Stand_' +'Sprache_' +'Sohn_' +'Serbia_' +'See_' +'Schlüssel' +'Safety_' +'Rome_' +'Regulation_' +'Rechtsvorschriften_' +'Phase_' +'PT_' +'Ost' +'NT' +'Monate_' +'Mindest' +'MO' +'Liberalisierung_' +'Krisen' +'Kontakt_' +'Kind_' +'Kar' +'Ju' +'Jean_' +'Iraqi_' +'Inseln_' +'Größe_' +'Grund' +'Grenze_' +'Gleichzeitig_' +'Gi' +'Gesundheits' +'Friedens' +'Erhöhung_' +'El' +'Ei' +'Durchführung_' +'Drittens_' +'De_' +'David_' +'Dann_' +'Buch_' +'Bel' +'Bank' +'Bal' +'Bahn_' +'Aspekte_' +'Anwendungen_' +'Anspruch_' +'Angst_' +'Anerkennung_' +'Al' +'Agreement_' +'65_' +'39_' +'з' +'überprüfen_' +'ón_' +'í' +'á' +'   . – _' +'za_' +'wor' +'wirksam_' +'weak_' +'war' +'wagen_' +'vieler_' +'verstanden_' +'usw_' +'us' +'universal_' +'unabhängig_' +'uf' +'tz' +'turns_' +'tung_' +'tourist_' +'touch_' +'tive_' +'tis' +'testing_' +'surrounding_' +'sugar_' +'subsidies_' +'stre' +'sti' +'ssi' +'sonst_' +'soft_' +'societies_' +'serves_' +'ser' +'schreiben_' +'schneller_' +'scheme_' +'russische_' +'run' +'rit' +'richten_' +'representative_' +'remained_' +'reich_' +'recognise_' +'rechts_' +'ra' +'prüfen_' +'productive_' +'pra' +'po' +'pick_' +'peoples_' +'pen' +'payment_' +'participate_' +'parliaments_' +'parents_' +'over' +'origin_' +'organizations_' +'onen_' +'ogen_' +'occur_' +'musste_' +'mu' +'motion_' +'mode_' +'mo' +'maximum_' +'manner_' +'mandate_' +'mal' +'lä' +'losses_' +'lokalen_' +'lity_' +'lesson_' +'legt_' +'leaving_' +'le' +'kurzem_' +'killed_' +'kept_' +'jener_' +'ized_' +'ionen_' +'institution_' +'ig' +'ici' +'ia' +'hostels_' +'hit_' +'gung_' +'griechischen_' +'gla' +'gestimmt_' +'gegenwärtigen_' +'focused_' +'father_' +'factors_' +'extreme_' +'expense_' +'expenditure_' +'exit_' +'enorme_' +'emergency_' +'element_' +'einfache_' +'ehen_' +'east_' +'drugs_' +'discovered_' +'differences_' +'destruction_' +'demokratische_' +'dank_' +'da' +'cu' +'coordination_' +'consideration_' +'confirmed_' +'command_' +'choices_' +'ce' +'buildings_' +'brings_' +'bility_' +'bi' +'bestätigen_' +'belief_' +'bath_' +'außen_' +'ausreichend_' +'ausdrücklich_' +'aufzu' +'aufnehmen_' +'attractive_' +'arrival_' +'ard_' +'apart_' +'aims_' +'ably_' +'abe_' +'Ziel' +'Zeitraum_' +'Weltbank_' +'War' +'Vom_' +'Verwaltung_' +'Venezuela_' +'Var' +'Umfang_' +'Tour_' +'Tor' +'To' +'Thank_' +'Stu' +'Stil_' +'Stellung_' +'Sat' +'Santa_' +'Sanktionen_' +'Sand' +'Saddam_' +'SQL_' +'Rück' +'Russlands_' +'Roman_' +'Roma_' +'Richtlinien_' +'Rede_' +'Rechts_' +'Quelle_' +'Prozesses_' +'Pen' +'Patienten_' +'Other_' +'Option_' +'Mas' +'Mar' +'Luft' +'Leit' +'Leistungs' +'Lebanon_' +'Kyoto_' +'Krankheit_' +'Konflikt_' +'Klimawandel_' +'Kinder' +'Kan' +'Jeder_' +'Hel' +'Hauptstadt_' +'Ha' +'Gründe_' +'Green_' +'Gespräche_' +'Gemeinsamen_' +'Gebieten_' +'Führer_' +'Fähigkeit_' +'Funktionen_' +'Fonds_' +'Find_' +'Faktoren_' +'FR_' +'Emissionen_' +'Ein' +'Each_' +'Dadurch_' +'Charakter_' +'Brüssel_' +'Berichterstatterin_' +'Beide_' +'Begriff_' +'Beach_' +'BA' +'Auf' +'Administration_' +'80' +'23' +'21' +'1' +'.“ _' +' (' +'ären_' +'äre_' +'äge_' +'Übereinstimmung_' +'zählen_' +'zuge' +'yesterday_' +'wirtschaftlich_' +'wichtigste_' +'westlichen_' +'weiteres_' +'wachsende_' +'vous_' +'verstärken_' +'verfolgt_' +'van' +'unter' +'uni' +'ungs' +'und' +'umfassenden_' +'um' +'tzen_' +'trotzdem_' +'tor' +'talking_' +'systeme_' +'surrounded_' +'supposed_' +'super' +'succeed_' +'substances_' +'su' +'stru' +'spielt_' +'sovereignty_' +'sor' +'sit' +'sell_' +'seitens_' +'schools_' +'ru' +'rte' +'rs' +'rly_' +'rental_' +'remote_' +'reicht_' +'referred_' +'records_' +'recognition_' +'radio_' +'quick_' +'properly_' +'producers_' +'processes_' +'prevention_' +'pp' +'patients_' +'pan' +'packages_' +'pa' +'ot_' +'osi' +'ordinary_' +'müssten_' +'myself_' +'mittlerweile_' +'miss' +'militärischen_' +'mas' +'markt_' +'malaria_' +'losing_' +'looked_' +'ler' +'len' +'legitimate_' +'langsam_' +'l' +'kulturelle_' +'kt' +'konkrete_' +'keinerlei_' +'keiner_' +'intellectual_' +'informiert_' +'informed_' +'imposed_' +'impose_' +'import_' +'immigrants_' +'imagine_' +'households_' +'her' +'har' +'hand' +'han_' +'guide_' +'gruppe_' +'grow_' +'golf_' +'ght_' +'geschützt_' +'geb' +'gap_' +'ft' +'früher_' +'freiheit_' +'flows_' +'flat_' +'fit_' +'fell_' +'explanation_' +'ex_' +'ets_' +'est' +'eri' +'ended_' +'ek_' +'eight_' +'economics_' +'dunia_' +'division_' +'discover_' +'devices_' +'device_' +'detail_' +'derzeitigen_' +'depends_' +'del' +'definition_' +'deeply_' +'cycle_' +'cri' +'covered_' +'consultation_' +'conducted_' +'concluded_' +'compensation_' +'colleague_' +'coal_' +'cies_' +'cars_' +'bringing_' +'born_' +'bor' +'bon' +'blocks_' +'block_' +'bildet_' +'beziehen_' +'bezeichnet_' +'bestimmen_' +'beschlossen_' +'bemüht_' +'beigetragen_' +'beaches_' +'ban' +'ball_' +'back' +'ba' +'ausgesetzt_' +'attempts_' +'ati' +'at' +'assume_' +'asking_' +'arguments_' +'appeared_' +'andererseits_' +'an' +'allgemeinen_' +'allgemein_' +'al' +'ai' +'ahmen_' +'agency_' +'Wohlstand_' +'Will_' +'Widerstand_' +'Villa_' +'Very_' +'VO' +'Trek_' +'Ton' +'Tod_' +'Test' +'Ta' +'Streit' +'Straßen' +'Standard' +'Sprachen_' +'Speicher' +'Skype_' +'Sieg_' +'Sa' +'Rückgang_' +'Risiko' +'Regelung_' +'Real' +'Que' +'Pu' +'Produkt' +'Problemen_' +'Praxis_' +'Partnerschaft_' +'Ordnung_' +'OS_' +'OR_' +'Not' +'Nobel_' +'Nevertheless_' +'Media_' +'Mann_' +'Macht' +'MI' +'Leider_' +'Lei' +'Lebens_' +'Kriterien_' +'Kommunikation_' +'Kombination_' +'Karte_' +'Inhalt_' +'Industrie' +'Identität_' +'IT' +'Hä' +'Hostel_' +'Handels_' +'Geschäftsordnung_' +'Geldpolitik_' +'Geb' +'Fra' +'Foto_' +'Foreign_' +'Forderungen_' +'Februar_' +'Fax_' +'Experten_' +'Entwurf_' +'Entwicklungs' +'End' +'Ed' +'ER' +'Download_' +'Direkt' +'Dimension_' +'DE' +'Control_' +'Bos' +'Balkan_' +'Austrian_' +'Aussicht_' +'Aufgaben_' +'Arm' +'Analyse_' +'Allgemeinen_' +'Ale' +'Ala' +'Absch' +'AN' +'192' +'™ _' +'– _' +'м' +'zurückge' +'zero_' +'work' +'weit' +'vorge' +'virtually_' +'village_' +'ur' +'unterschiedliche_' +'unternommen_' +'unmittelbar_' +'tut_' +'ts' +'trifft_' +'trans' +'title_' +'temporary_' +'telephone_' +'substantial_' +'stance_' +'square_' +'sprach_' +'sports_' +'spa_' +'sovereign_' +'sized_' +'sieben_' +'sicherlich_' +'sha' +'sensitive_' +'senior_' +'schönen_' +'sage_' +'returns_' +'represented_' +'relaxing_' +'registered_' +'reflects_' +'referendum_' +'reden_' +'rag' +'quantitative_' +'profits_' +'producing_' +'print_' +'pi' +'perfectly_' +'pan_' +'overcome_' +'ord' +'or' +'onto_' +'olo' +'nts_' +'ngs_' +'newly_' +'nan' +'mussten_' +'multi' +'mor' +'ministers_' +'meist_' +'match_' +'marketing_' +'macroeconomic_' +'länder_' +'lten_' +'lovely_' +'lim' +'launched_' +'kraft_' +'klare_' +'kla' +'ki' +'keeping_' +'itu' +'isierung_' +'ise' +'ip' +'instance_' +'install_' +'inequality_' +'il' +'identify_' +'ian' +'hot_' +'ho' +'hinweisen_' +'heiten_' +'head' +'hardly_' +'groß_' +'globalization_' +'gli' +'gewählt_' +'gewisse_' +'gestalten_' +'ged_' +'ge' +'furniture_' +'formed_' +'forget_' +'flow_' +'fel' +'federal_' +'farming_' +'et' +'erstellt_' +'ernst_' +'erle' +'ergeben_' +'erfahren_' +'entschlossen_' +'enabling_' +'emphasise_' +'elle_' +'ek' +'ehr' +'edi' +'ear' +'distributed_' +'disputes_' +'destroyed_' +'deserves_' +'demanding_' +'decide_' +'dealing_' +'crew_' +'contribute_' +'continuing_' +'concrete_' +'comment_' +'combined_' +'combating_' +'cohesion_' +'cards_' +'button_' +'bul' +'broader_' +'briefly_' +'boom_' +'blood_' +'bezahlen_' +'bewegen_' +'bee' +'background_' +'auto' +'ausgaben_' +'aufge' +'atau_' +'argument_' +'ara_' +'ar' +'angeboten_' +'ancient_' +'ana_' +'am' +'aktiv_' +'afternoon_' +'ae' +'ada_' +'ad' +'ach' +'accounts_' +'accompanied_' +'accessible_' +'Zusammenbruch_' +'Zentralbanken_' +'Ze' +'Wo' +'Wirkung_' +'Verlust_' +'Unternehmens' +'Texte_' +'TI' +'Studie_' +'Sprach' +'Sport_' +'Spar' +'Sonder' +'Selbst' +'Sein_' +'School_' +'Schluss_' +'Schaden_' +'Runde_' +'Reform' +'Priorität_' +'Politik' +'Over_' +'Nord' +'Nach' +'Musik' +'Menschenrechts' +'Menge_' +'Madrid_' +'MA' +'Li' +'Law_' +'Lateinamerika_' +'Kredit' +'Kon' +'Justiz_' +'IN' +'Hinweis_' +'Hill_' +'Grundrechte_' +'Grunde_' +'Grad_' +'Good_' +'Gerichtshof_' +'Gemeinschafts' +'Enjoy_' +'Earth_' +'EL' +'Deutsche_' +'Dass_' +'Dan' +'DA' +'Cre' +'Con' +'Cho' +'Charta_' +'Cap' +'CO' +'Bus_' +'Budget_' +'Book_' +'Bon' +'Beihilfen_' +'Bay_' +'BR' +'Ausnahme_' +'Armee_' +'Apple_' +'Antrag_' +'Anreize_' +'Akteure_' +'Airbus_' +'AG_' +'@_' +': “_' +'44_' +'42_' +'1945_' +'194' +'190' +'188' +'12' +' €_' +'“' +'де' +'д' +'übernommen_' +'ör' +'zurück' +'zufrieden_' +'zeit' +'ysteme_' +'yo' +'wünschen_' +'wirtschaft_' +'wind_' +'wie' +'werk_' +'vorschlagen_' +'vorher_' +'vorgelegt_' +'vor' +'verändert_' +'verdient_' +'verbessert_' +'usual_' +'usi' +'una_' +'umfasst_' +'ue' +'täglich_' +'tting_' +'tten_' +'transfers_' +'ther_' +'th' +'tera' +'technische_' +'survive_' +'super_' +'sun' +'suite_' +'stone_' +'statements_' +'spring_' +'sold_' +'sobald_' +'sit_' +'secret_' +'seats_' +'schwe' +'runs_' +'roads_' +'rein_' +'regardless_' +'refugee_' +'recognized_' +'rch' +'rat_' +'rat' +'railway_' +'rage_' +'purposes_' +'protecting_' +'promised_' +'processing_' +'primarily_' +'precise_' +'politisches_' +'platform_' +'permitted_' +'paragraph_' +'organization_' +'offizielle_' +'occasions_' +'ob' +'nötig_' +'nten_' +'nte_' +'nta' +'notwendigen_' +'normalerweise_' +'nord' +'nit' +'niemand_' +'nge_' +'nearby_' +'ndo_' +'naturally_' +'na' +'museums_' +'mostly_' +'mini_' +'mini' +'metro_' +'metres_' +'menschlichen_' +'mechanisms_' +'luxurious_' +'liquidity_' +'leisure_' +'learned_' +'lay_' +'latter_' +'lage_' +'kh' +'journalists_' +'itt_' +'issued_' +'involve_' +'initially_' +'incentives_' +'ina_' +'impressive_' +'implications_' +'ik' +'id_' +'höheren_' +'höchsten_' +'ht' +'household_' +'hostel_' +'hoher_' +'hn' +'hip_' +'hinzu' +'heits' +'guter_' +'gun' +'gu' +'go' +'gewährleistet_' +'gewa' +'gespielt_' +'gelungen_' +'gel_' +'gegründet_' +'gegenwärtige_' +'gefallen_' +'garantieren_' +'gained_' +'führung_' +'fun_' +'forum_' +'fordert_' +'for' +'finding_' +'finanziert_' +'fied_' +'feststellen_' +'festgelegt_' +'feed_' +'fantastic_' +'existence_' +'exclusively_' +'excessive_' +'erwiesen_' +'erleben_' +'erklärte_' +'ering_' +'erfolgreichen_' +'erfolg' +'engineering_' +'endorse_' +'end' +'electronic_' +'electricity_' +'einzusetzen_' +'einsetzen_' +'eingerichtet_' +'eingehen_' +'effectiveness_' +'dé' +'dri' +'diversity_' +'disaster_' +'determine_' +'danke_' +'correct_' +'convenient_' +'communications_' +'coast_' +'club_' +'cho' +'chief_' +'chen' +'centuries_' +'cation_' +'category_' +'bur' +'brachte_' +'booking_' +'bla' +'bitten_' +'besseren_' +'bedroom_' +'availability_' +'aufzunehmen_' +'ationen_' +'army_' +'ari' +'appreciate_' +'apa' +'ante_' +'anbieten_' +'ama' +'ai_' +'agencies_' +'af' +'addressing_' +'ace' +'aba' +'ab' +']]_' +'Zweiten_' +'Zeit' +'Z_' +'YORK_' +'Within_' +'Willen_' +'Wieder' +'White_' +'Wahrheit_' +'WI' +'Verteidigung_' +'Vereinbarungen_' +'Verbrechen_' +'Val' +'Ur' +'Unterschiede_' +'Trans' +'Those_' +'Test_' +'Tau' +'Taiwan_' +'Tages_' +'TNG_' +'Städten_' +'Studien_' +'Spiele_' +'Son' +'Sommer_' +'Sind_' +'Sin' +'She' +'Sha' +'Seine_' +'Schul' +'Sarkozy_' +'SP' +'Regimes_' +'RA' +'Prä' +'Problems_' +'Prinzipien_' +'Premierminister_' +'Preis' +'Plaza_' +'Pin' +'Perspektive_' +'Page_' +'PS' +'Opposition_' +'ON' +'Nor_' +'Nein_' +'Neben' +'Muslims_' +'Multi' +'Meiner_' +'Meanwhile_' +'Location_' +'Libanon_' +'Lassen_' +'Lang' +'Kriegs' +'Kredite_' +'Kor' +'Kontroll' +'Konsens_' +'Klimaanlage_' +'Kirk_' +'King_' +'Kenntnis_' +'Jedes_' +'JavaScript_' +'Jahrzehnten_' +'Jahres' +'Island_' +'Iranian_' +'Ideen_' +'IC' +'Holz' +'Gefahren_' +'Finanzierungs' +'Fest' +'Facilities_' +'FI' +'Executive_' +'Erwartungen_' +'Erd' +'Entwicklungsländer_' +'Einzel' +'Einwohner_' +'Eigen' +'Doha_' +'Des' +'Depression_' +'Data_' +'Cuba_' +'Chile_' +'Bä' +'Bri' +'Ber' +'Bas' +'Ban' +'BE' +'AP' +'AM' +'> _' +'800_' +'52_' +'1991_' +'10' +'05_' +'.” _' +'."_' +') ' +' € _' +' > _' +'қа' +'я' +'к' +'в' +'б' +'č_' +'übertragen_' +'überrascht_' +'Österreich_' +'zugleich_' +'zo' +'zer' +'zentralen_' +'yr' +'winter_' +'widespread_' +'wesentlichen_' +'welcher_' +'weiter' +'wars_' +'warning_' +'wage_' +'volle_' +'vic' +'verwende_' +'verurteilt_' +'vehicles_' +'vehicle_' +'uss_' +'update_' +'unit_' +'unde' +'u' +'tä' +'troops_' +'tro' +'traditionelle_' +'tourism_' +'tori' +'tisch_' +'tend_' +'temp' +'technischen_' +'taste_' +'tal' +'swa' +'sustainability_' +'surplus_' +'sur' +'strengthening_' +'store_' +'sterben_' +'sport_' +'spoke_' +'spectrum_' +'sought_' +'solcher_' +'smus_' +'sme' +'situations_' +'sing' +'signs_' +'sel' +'schwierigen_' +'sar' +'sam' +'ré' +'rung_' +'rum' +'rti' +'rio' +'rin' +'rf' +'returned_' +'resource_' +'resolved_' +'replaced_' +'rei' +'regular_' +'raten_' +'push_' +'ption_' +'propose_' +'proc' +'prior_' +'preis' +'pleasure_' +'planned_' +'pie' +'phi' +'persönlich_' +'personally_' +'ought_' +'ora' +'opposed_' +'opinions_' +'ome' +'ok' +'offenen_' +'of' +'oe' +'nte' +'niemals_' +'ngs' +'nen' +'müsste_' +'möglichst_' +'mö' +'motor' +'mont' +'mon' +'mix_' +'mental_' +'meant_' +'mat' +'ma' +'luxury_' +'lp' +'li' +'letzter_' +'law' +'künftige_' +'kulturellen_' +'kostenlos_' +'ko_' +'knows_' +'kleiner_' +'kamen_' +'jeweils_' +'jam' +'intensive_' +'im' +'ile_' +'ie' +'ichen_' +'hrt_' +'hof_' +'hel' +'gruppen_' +'grundlegenden_' +'greenhouse_' +'greatly_' +'gra' +'gone_' +'glo' +'gewissen_' +'gan' +'führende_' +'fruit_' +'freue_' +'fi' +'fairly_' +'eyes_' +'essen_' +'erscheinen_' +'ersch' +'erg' +'erforderlichen_' +'era' +'enormen_' +'eng_' +'enabled_' +'elite_' +'electoral_' +'durchzuführen_' +'drei' +'dishes_' +'dining_' +'dienen_' +'derart_' +'depend_' +'definiert_' +'ct_' +'convinced_' +'condition_' +'cing_' +'chl' +'cher' +'cas' +'ca' +'burg_' +'budgets_' +'break' +'black_' +'bie' +'beschäftigt_' +'beobachten_' +'bela' +'bekämpfen_' +'basiert_' +'ay_' +'author_' +'ange' +'ambitious_' +'ale' +'ain_' +'afrikanischen_' +'aff' +'adoption_' +'acts_' +'acceptable_' +'absence_' +'abgeschlossen_' +'[[_' +'Zug' +'Ziffer_' +'Zer' +'Zentral' +'Wirklichkeit_' +'Win' +'Weitere_' +'Wandel_' +'WLAN_' +'Völker_' +'Vorbereitung_' +'Volkes_' +'Vienna_' +'Verkehrs' +'Verkehr_' +'Us' +'Umfeld_' +'Tele' +'Teilnahme_' +'Tar' +'Swedish_' +'Summit_' +'Suche_' +'Strukturen_' +'Straf' +'Stellen_' +'Stadtzentrum_' +'Sport' +'Spitze_' +'Spiel' +'Special_' +'Sollte_' +'Sel' +'Science_' +'Sche' +'Samsung_' +'Saint_' +'Saa' +'Rou' +'Rest' +'Research_' +'Rahmen' +'Protokoll_' +'Pay' +'Pan' +'Paket_' +'Optionen_' +'ON_' +'Nutzer_' +'Mon' +'Mitteln_' +'Menschen' +'Maß_' +'Martin_' +'Mac' +'MEPs_' +'Los_' +'LONDON_' +'Körper_' +'Kräfte_' +'Konvent_' +'Kommunikations' +'Kil' +'Kandidaten_' +'Java_' +'Japans_' +'Gu' +'GmbH_' +'General' +'Gefühl_' +'Gedanken_' +'Gebiete_' +'Gas' +'Garten_' +'Fähigkeiten_' +'For' +'Firstly_' +'Fach' +'Export' +'Ex' +'Erklärungen_' +'Eisenbahn' +'ES' +'EA' +'Druck' +'Dor' +'Der' +'DS' +'Czech_' +'Ci' +'Chris_' +'Chinesen_' +'Check_' +'Che' +'CIA_' +'CH' +'Bor' +'Beweis_' +'Beschäftigungs' +'Bei' +'Ball' +'Bade' +'BI' +'Atlantic_' +'Argentinien_' +'Apartment_' +'Angelegenheit_' +'Amerikaner_' +'Aktien' +'Ag' +'Absicht_' +'Ab' +'AC' +'?' +'47_' +'16' +'06_' +'03_' +'00' +'...' +',' +'!' +'і' +'п' +'ни' +'н' +'л' +'ück' +'übrigen_' +'öl' +'öffentlich_' +'ía_' +'än' +'Überprüfung_' +'Ü' +'zweifellos_' +'zentrum_' +'zahl' +'ystem_' +'xi' +'wirken_' +'welt_' +'wall_' +'waiting_' +'wait_' +'wachsenden_' +'wa_' +'vorgesehenen_' +'vorgeschlagenen_' +'visitors_' +'vir' +'verstärkt_' +'verleihen_' +'urs_' +'unmöglich_' +'unity_' +'unfortunately_' +'ug' +'uer' +'uel' +'typical_' +'tt' +'tit' +'tionen_' +'threats_' +'threatened_' +'tet_' +'teile_' +'sustained_' +'sur_' +'suitable_' +'submit_' +'strongly_' +'strength_' +'stimulus_' +'steuer' +'ster' +'steigern_' +'stattfinden_' +'sozial' +'son' +'sky_' +'ski' +'sinnvoll_' +'sight_' +'settlement_' +'sen' +'selbst' +'seite_' +'sei' +'seat_' +'schriftlich_' +'schlecht_' +'scher_' +'scheinen_' +'rou' +'rk' +'ride_' +'rge' +'reply_' +'rep' +'rend' +'remaining_' +'relief_' +'reliable_' +'regimes_' +'reb' +'reagieren_' +'raising_' +'quarter_' +'prove_' +'prospect_' +'proposing_' +'prison_' +'praktisch_' +'possibly_' +'plenty_' +'pattern_' +'outdoor_' +'operate_' +'ola' +'obtain_' +'nsi' +'nommen_' +'nom' +'nk_' +'neighbors_' +'necessarily_' +'nda' +'mögen_' +'mp' +'movements_' +'monitor_' +'mm' +'meter_' +'med_' +'managing_' +'maintenance_' +'lor' +'loo' +'lokale_' +'lly_' +'liked_' +'lier' +'lie_' +'leu' +'length_' +'legitimacy_' +'lad' +'kti' +'kostet_' +'konzentriert_' +'ite' +'islands_' +'invest_' +'interface_' +'ini' +'ingly_' +'immt_' +'ih' +'iel' +'identified_' +'hundred_' +'hr' +'hoffen_' +'hing_' +'highlight_' +'hi' +'heißen_' +'hearing_' +'harus_' +'han' +'halt_' +'globalisation_' +'gerichtet_' +'geraten_' +'generated_' +'gelegt_' +'gehalten_' +'freundlich_' +'frei' +'foot_' +'fonds_' +'festgestellt_' +'false_' +'falling_' +'experienced_' +'estate_' +'eröffnet_' +'erwähnt_' +'erstellen_' +'erfolgt_' +'ered_' +'entscheidenden_' +'entscheidende_' +'enten_' +'ent' +'enhance_' +'ene' +'ending_' +'enables_' +'eg' +'edited_' +'ed' +'ect' +'drop_' +'drink_' +'dramatic_' +'didn_' +'demonstrated_' +'delivered_' +'delay_' +'dal' +'customer_' +'ction_' +'credibility_' +'counter' +'controlled_' +'contracts_' +'considering_' +'confirm_' +'conclusions_' +'client_' +'cit' +'church_' +'chosen_' +'chain_' +'browser_' +'bo' +'bin' +'bezahlt_' +'betroffen_' +'besuchen_' +'besonderen_' +'besitzt_' +'beschränkt_' +'bequem_' +'behalten_' +'begonnen_' +'bedeuten_' +'bas' +'bahwa_' +'aut_' +'ausgesprochen_' +'audio_' +'ative_' +'association_' +'argued_' +'approximately_' +'applies_' +'applicable_' +'ano' +'alt' +'allies_' +'alb' +'akzeptiert_' +'aka' +'aircraft_' +'afford_' +'advantages_' +'adults_' +'adjustment_' +'absch' +']]._' +'Zahlen_' +'Yu' +'Währungsunion_' +'Women_' +'Without_' +'Wien_' +'Webseite_' +'Web' +'Vorsitzenden_' +'Vorschlägen_' +'Voraussetzung_' +'Verhandlungs' +'Verg' +'Verbreitung_' +'Verbraucher' +'Unternehmens_' +'USD_' +'USB_' +'UR' +'Tro' +'Then_' +'Thailand_' +'Termin' +'Ter' +'Technik_' +'Tal' +'Tages' +'TE' +'Symbol_' +'Sub' +'Start_' +'Sitzung_' +'Sim' +'Schutz' +'Schau' +'Reise_' +'Rechnung_' +'Ratsvorsitz_' +'Pool_' +'Player_' +'Plan' +'Partei' +'Par' +'Palästinenser_' +'Palace_' +'Pact_' +'PPE_' +'Ob' +'OECD_' +'Nr_' +'Note_' +'Nos_' +'Nigeria_' +'Nacht' +'Märkten_' +'Myanmar_' +'Munich_' +'Mittelpunkt_' +'Mitgliedern_' +'Mission_' +'Ministerpräsident_' +'Minderheiten_' +'Micro' +'Market_' +'Mag' +'Len' +'Laufe_' +'Labour_' +'Kunst' +'Kooperation_' +'Kontinent_' +'Kim_' +'Israels_' +'Indian_' +'Hussein_' +'Hostelsclub_' +'Handeln_' +'Hamburg_' +'Hal' +'Gründung_' +'Glaubwürdigkeit_' +'Gesch' +'Geh' +'Frühstücksbuffet_' +'Frei' +'Fi' +'Fernsehen_' +'Fal' +'Faktor_' +'Express_' +'Environment_' +'Entwicklungen_' +'Energy_' +'Eltern_' +'Due_' +'Dienste_' +'Die' +'Dep' +'Dem_' +'Datenbank_' +'Cyprus_' +'Cor' +'Company_' +'Cold_' +'Charter_' +'Charles_' +'Char' +'CI' +'CAN' +'Buch' +'Brussels_' +'Blog_' +'Bild' +'Bewertung_' +'Belarus_' +'Before_' +'Bedenken_' +'Back' +'Audio_' +'Association_' +'Arbeitskräfte_' +'Alt' +'Aktionen_' +'Adresse_' +'Abgeordnete_' +'AT' +'3D_' +'37_' +'24' +'20' +'193' +'191' +'09_' +'03' +'/ _' +'/' +') - _' +'() _' +'"' +' -- _' +'” (_' +'н_' +'е_' +'überzeugen_' +'Öffnung_' +'zusätzlichen_' +'zi' +'yn' +'yes_' +'ye_' +'wächst_' +'wood_' +'wonderful_' +'wishes_' +'wireless_' +'weist_' +'weather_' +'wave_' +'wahr' +'vorzu' +'vorgesehen_' +'visible_' +'vis_' +'verändern_' +'verteidigen_' +'vert' +'verlangt_' +'verhindert_' +'ven' +'uren_' +'una' +'uk' +'tzung_' +'tur' +'trouble_' +'trip' +'trillion_' +'treaty_' +'totally_' +'top' +'tele' +'teil_' +'tas_' +'symbol_' +'swi' +'sum_' +'successfully_' +'streets_' +'strategischen_' +'strategische_' +'ste' +'stag' +'sso' +'spricht_' +'spot_' +'somewhat_' +'solely_' +'sodass_' +'sichern_' +'scho' +'schlägt_' +'schi' +'sch' +'sbe' +'sauna_' +'sale_' +'rta' +'roughly_' +'rischen_' +'rg_' +'restore_' +'residential_' +'rescue_' +'removed_' +'religion_' +'regularly_' +'reflection_' +'ree' +'rechte_' +'reaction_' +'rare_' +'qualität_' +'preparation_' +'preise_' +'populist_' +'pollution_' +'pli' +'pleasant_' +'pit' +'pin' +'personnel_' +'perform_' +'patent_' +'passiert_' +'passed_' +'outstanding_' +'ott' +'orm' +'op_' +'ology_' +'olin' +'ol_' +'od' +'noise_' +'nke' +'nights_' +'nicht' +'ndi' +'muslimischen_' +'mpe' +'motor_' +'momentan_' +'ml' +'mindestens_' +'meaningfully_' +'mba' +'markt' +'manche_' +'maintaining_' +'lla' +'lived_' +'linie_' +'liches_' +'liberalisation_' +'let' +'lar' +'lan_' +'kur' +'kontrollieren_' +'kom' +'kin' +'ken' +'karte_' +'jenen_' +'jemand_' +'jegliche_' +'japanische_' +'ita' +'iso' +'installed_' +'ins' +'ingen_' +'inform_' +'inevitable_' +'index_' +'inden_' +'incomes_' +'imbalances_' +'ien' +'ida' +'hä' +'humanity_' +'http_' +'hoo' +'hingegen_' +'hi_' +'heavy_' +'hardware_' +'guarantees_' +'grammatically_' +'gradually_' +'gr' +'gezogen_' +'gestärkt_' +'genügend_' +'genutzt_' +'generations_' +'gene' +'gem' +'gefordert_' +'gan_' +'fördert_' +'free' +'frage_' +'forest_' +'flo' +'firstly_' +'fest' +'fer' +'fen' +'female_' +'fehlt_' +'fees_' +'fears_' +'fe' +'ez_' +'explained_' +'excess_' +'exception_' +'ex' +'estimated_' +'es' +'erreichbar_' +'erinnert_' +'erf' +'er' +'entstanden_' +'entlang_' +'entitled_' +'enst' +'eni' +'ender_' +'ende' +'ences_' +'en' +'eliminate_' +'ektor_' +'einerseits_' +'ei' +'egen_' +'echte_' +'dro' +'driver_' +'driven_' +'door_' +'discussing_' +'discrimination_' +'dis' +'direkten_' +'diplomacy_' +'dimension_' +'dent' +'deiner_' +'declared_' +'daraus_' +'dapat_' +'dans_' +'cru' +'cou' +'copy_' +'convergence_' +'conventional_' +'contextually_' +'containing_' +'contained_' +'cons' +'connections_' +'conce' +'computers_' +'col' +'co' +'classic_' +'ckt_' +'civilian_' +'chten_' +'charming_' +'channels_' +'cash_' +'capitalism_' +'cad' +'blue_' +'bislang_' +'bildung_' +'bewältigen_' +'betreffen_' +'beschleunigen_' +'ben' +'bel' +'bei' +'beginnt_' +'bef' +'barriers_' +'bal' +'baby_' +'ba_' +'ausgezeichneten_' +'aufzubauen_' +'aufgezahlt_' +'attitude_' +'assure_' +'asi_' +'arbeits' +'appointed_' +'amounts_' +'alter_' +'alle' +'album_' +'ala' +'ag' +'affects_' +'adi' +'acknowledge_' +'achieving_' +'acce' +'abuse_' +'Zustand_' +'Zeichen_' +'Ya' +'Wohl' +'Wird_' +'Wiki' +'Wettbewerbs' +'Wa' +'Volkswirtschaft_' +'Veränderung_' +'Verträge_' +'Verf' +'Verbot_' +'Verbesserungen_' +'Up' +'Universität_' +'Under_' +'Tun' +'Ti' +'The' +'Süden_' +'Sy' +'Sur' +'Stärke_' +'Straßen_' +'Stellungnahme_' +'Stability_' +'Staats_' +'Spe' +'Slo' +'Ski' +'Sitz_' +'Schulen_' +'SM' +'SA_' +'Rund' +'Rom' +'Regional_' +'Regierungschefs_' +'Ratschlag_' +'Quellen_' +'Puerto_' +'Programms_' +'Prinzip_' +'Pra' +'Port' +'Para' +'Paper_' +'Palestine_' +'PL' +'Os' +'Orten_' +'Open' +'OK_' +'Norden_' +'Non_' +'Netz_' +'Nacht_' +'NO' +'Mont' +'Ministers_' +'Mel' +'Mehr_' +'May' +'Man' +'Malaysia_' +'Lord_' +'Likewise_' +'Like_' +'Lebensmittel' +'LE' +'LA' +'Konsum_' +'Komfort_' +'Klima_' +'KDE_' +'Jahrzehnt_' +'Islands_' +'Innen' +'Herstellung_' +'Hersteller_' +'Herrschaft_' +'Grün' +'Gro' +'Gesetzgebung_' +'Gerichts' +'Geist_' +'Gegner_' +'Ge' +'Gast' +'Gang_' +'GE' +'Fotos_' +'Foto' +'Flüchtlinge_' +'Finland_' +'Finde_' +'Falls_' +'Est' +'Einwanderung_' +'Einstellung_' +'EC' +'Does_' +'Dienstleistungs' +'Dia' +'Demokraten_' +'Deb' +'Datei' +'DO' +'DC_' +'Class_' +'Canada_' +'C5_' +'Botschaft_' +'Beim_' +'BO' +'Avi' +'Auto_' +'Aufstieg_' +'As' +'Arbeitsmarkt_' +'Arbeiter_' +'Amendments_' +'Alter_' +'Ai' +'Agency_' +'Adobe_' +'Action_' +'Abschließend_' +'AU' +'://_' +'700_' +'49_' +'250_' +'1979_' +'1970_' +'196' +'186' +'08_' +'07_' +'.“_' +'-/_' +'," _' +'): _' +') (_' +'(' +'' ' +''' +' ….. _' +' )._' +'š' +'überlassen_' +'ös' +'öffnen_' +'ès_' +'ätzen_' +'äng' +'ähnlich_' +'Übersetzung_' +'Übersch' +'Ökonomie_' +'Ö' +'zy' +'zustimmen_' +'zusammenge' +'zusammen' +'zugrunde_' +'zuerst_' +'zeichen_' +'ys' +'yp' +'yer_' +'ws_' +'worst_' +'wonder_' +'wollten_' +'windows_' +'willkommen_' +'wesentliche_' +'warten_' +'wal' +'wahren_' +'vors' +'visited_' +'vis' +'versions_' +'verschärft_' +'verehrte_' +'vere' +'va_' +'ux_' +'ute' +'ut' +'ust' +'use' +'ursprünglichen_' +'ursprünglich_' +'uan_' +'twenty_' +'tti' +'tter_' +'travel' +'towns_' +'topic_' +'time' +'tersebut_' +'teachers_' +'taxpayers_' +'tag_' +'ta' +'sz' +'substance_' +'stun' +'stocks_' +'stem_' +'staying_' +'stayed_' +'statistics_' +'stadt_' +'sse' +'speakers_' +'spe' +'soil_' +'sla' +'sis_' +'sicherheit_' +'shed_' +'shares_' +'serviert_' +'sensible_' +'seg' +'see' +'schä' +'savings_' +'saved_' +'satellite_' +'sal' +'ry' +'rund' +'rst' +'rre' +'rot' +'roll_' +'ris' +'richtlinie_' +'richtigen_' +'revolutionary_' +'retten_' +'retirement_' +'reserve_' +'remind_' +'religiösen_' +'reject_' +'ref' +'reasonable_' +'ram' +'que' +'prä' +'prepare_' +'praktische_' +'pr' +'ports_' +'porta' +'politically_' +'phenomenon_' +'pem' +'pass' +'parallel_' +'par_' +'pal' +'oy' +'operational_' +'oma' +'offices_' +'offene_' +'obstacles_' +'näher_' +'nz' +'ntr' +'nto' +'nt' +'notwendige_' +'noted_' +'nochmals_' +'nc' +'nahm_' +'nachhaltig_' +'my' +'mut' +'mother_' +'mixed_' +'mittel' +'mic' +'messen_' +'marks_' +'mand' +'manager_' +'machines_' +'ländlichen_' +'ländern_' +'ls' +'listen_' +'lin_' +'ließen_' +'li_' +'lei' +'lea' +'lawyers_' +'langfristig_' +'lam' +'kurze_' +'kov' +'kor' +'knew_' +'kinds_' +'kel' +'jungen_' +'jpg_' +'joined_' +'items_' +'interessiert_' +'intention_' +'input_' +'innovations_' +'indicated_' +'inde' +'inc' +'in' +'ili' +'igt_' +'ified_' +'ideological_' +'ich' +'ible_' +'höchste_' +'hren_' +'hole_' +'hm' +'hilft_' +'hier' +'hervor_' +'heraus_' +'heil' +'heavily_' +'ham_' +'gäbe_' +'gro' +'gets_' +'gesellschaft_' +'ges' +'gera' +'ger' +'genauso_' +'früheren_' +'frag' +'ford_' +'flights_' +'fla' +'finde_' +'ffer' +'fertig_' +'failures_' +'expand_' +'ese' +'escape_' +'ern' +'erheblich_' +'erb' +'epi' +'entwickelte_' +'ents_' +'ente_' +'enhanced_' +'engagement_' +'engage_' +'encouraging_' +'emi' +'emerged_' +'ema' +'eite_' +'einzu' +'einzigartigen_' +'einfachen_' +'eigener_' +'ef' +'echten_' +'eat_' +'drawn_' +'down' +'disc' +'died_' +'democracies_' +'dee' +'court_' +'corner_' +'convert_' +'contributions_' +'consequence_' +'conditioning_' +'components_' +'collection_' +'cold_' +'coalition_' +'ci_' +'ci' +'careful_' +'cal' +'cable_' +'bri' +'brand_' +'betreiben_' +'besitzen_' +'berg' +'beinahe_' +'behaupten_' +'behandeln_' +'begrenzt_' +'bathrooms_' +'attractions_' +'ator_' +'ates_' +'applying_' +'angesprochen_' +'angen_' +'amenities_' +'alen_' +'ah' +'affairs_' +'advertising_' +'adequate_' +'abroad_' +'Zugriff_' +'Zug_' +'Zu' +'Yes_' +'Währungsfonds_' +'Wor' +'Wirtschaftspolitik_' +'Who_' +'Where_' +'Weltkrieg_' +'Wall_' +'Videos_' +'Verh' +'Ve' +'Valencia_' +'Ursachen_' +'UN' +'Truppen_' +'Todes' +'Tiere_' +'Though_' +'Tests_' +'Terroristen_' +'Telefon' +'Tab' +'TER' +'System' +'Stimm' +'Stein' +'Stan' +'Sony_' +'Sho' +'Set_' +'Scho' +'Ruhe_' +'Rest_' +'Reise' +'Regulierung_' +'Regelungen_' +'Ratspräsidentschaft_' +'Rats' +'RD' +'Prüfung_' +'Produkt_' +'Polizei_' +'Polish_' +'Place_' +'Pla' +'Peter_' +'Palestinians_' +'Pala' +'Or_' +'Ol' +'OS' +'Nas' +'My' +'Moscow_' +'Moo' +'Moment_' +'Mittelmeer_' +'Meeres' +'Medien' +'Mass' +'Marktes_' +'Mad' +'MS_' +'MP' +'Licht_' +'Kurz_' +'Kr' +'Konsum' +'Konsequenzen_' +'Konjunktur' +'Komponenten_' +'Kilometer_' +'Ker' +'Ken' +'Keine_' +'Katastrophe_' +'Kap' +'Joseph_' +'Institution_' +'Insbesondere_' +'Inn_' +'Immobilien' +'Ihres_' +'IA' +'Ho' +'Having_' +'Haushalte_' +'Hauptbahnhof_' +'Gut' +'Golden_' +'Glück_' +'Gleichgewicht_' +'Gipfel_' +'Gew' +'Gemeinden_' +'Gegenstand_' +'GNU_' +'Fälle_' +'Fu' +'Freunde_' +'Freuen_' +'Fla' +'Fisch' +'Firma_' +'Fer' +'Fenster_' +'FA' +'Einheit_' +'Einhaltung_' +'ES_' +'Dutch_' +'Deutschlands_' +'Details_' +'Definition_' +'Day_' +'Dam' +'Cu' +'Common_' +'Comm' +'College_' +'Cam' +'CE_' +'CE' +'CD' +'Burg' +'Bulgaria_' +'Boden_' +'Bla' +'Bis' +'Betriebs' +'Besides_' +'Beschluss_' +'Berücksichtigung_' +'Berufs' +'Bedarf_' +'Bars_' +'Banken' +'Ba' +'Auge_' +'Auftrag_' +'Aufst' +'Aufnahme_' +'Are_' +'Arbeiten_' +'Ar' +'Ant' +'Anpassung_' +'Angela_' +'Angaben_' +'Anbetracht_' +'Amtszeit_' +'Ambiente_' +'All' +'AS' +'A3' +':' +'85_' +'60' +'34_' +'26' +'1993_' +'1992_' +'02' +'-' +') – _' +'({{_' +'  _' +'’._' +'у_' +'ж' +'во' +'ün' +'übertr' +'überaus_' +'ña_' +'ätze_' +'änder_' +'älteren_' +'Übergang_' +'Ärzte_' +'´_' +' – _' +'zuständig_' +'zin' +'ziemlich_' +'zieht_' +'zer_' +'zar' +'ystems_' +'wissenschaftlichen_' +'whereas_' +'western_' +'wel' +'wei' +'wachsen_' +'vorstellen_' +'vorlegen_' +'voller_' +'ving_' +'vieles_' +'vi' +'vern' +'verfügbar_' +'verbindet_' +'urge_' +'unterzeichnet_' +'unt' +'une' +'undoubtedly_' +'uncertainty_' +'unab' +'umzu' +'ult' +'uh' +'ug_' +'uf_' +'uen_' +'tätig_' +'twice_' +'tri' +'trends_' +'tours_' +'tough_' +'tle' +'tische_' +'tin' +'thy_' +'terrorists_' +'telah_' +'tea_' +'tackle_' +'syn' +'swe' +'surface_' +'subsequent_' +'su_' +'storage_' +'stimmt_' +'stic_' +'stein_' +'spaces_' +'soweit_' +'solid_' +'sm' +'sion' +'sh' +'se' +'schweren_' +'sche' +'san' +'rück' +'rte_' +'route_' +'robust_' +'rm' +'rkt_' +'rip' +'ringen_' +'rig' +'revenue_' +'reu' +'respected_' +'rent_' +'renewable_' +'rem' +'regeln_' +'reduzieren_' +'raw_' +'ratio_' +'rap' +'ral_' +'qualified_' +'puts_' +'pun' +'pt_' +'providers_' +'promises_' +'profile_' +'produktion_' +'pred' +'potentially_' +'possibilities_' +'pose_' +'pointed_' +'piel_' +'pic' +'ph' +'periods_' +'percentage_' +'percent_' +'passengers_' +'passenger_' +'para' +'oz' +'ow_' +'our' +'organised_' +'ordered_' +'ona' +'occurred_' +'obligation_' +'object_' +'nötigen_' +'nst' +'nsch' +'novel_' +'none_' +'nk' +'nine_' +'nic' +'ney_' +'newspaper_' +'neu' +'nder' +'nces_' +'ms' +'mountains_' +'mn' +'miles_' +'mes' +'mere_' +'menschliche_' +'medicines_' +'meals_' +'marquis_' +'lug' +'los' +'logical_' +'locations_' +'lobby_' +'ller_' +'lit' +'leaves_' +'layer_' +'lac' +'ky_' +'krieg_' +'klein_' +'kit' +'ize_' +'ists_' +'iron_' +'integriert_' +'informieren_' +'ine' +'incentive_' +'ij' +'ignored_' +'ifi' +'if' +'ics_' +'hs_' +'hot' +'hoped_' +'herrscht_' +'heim' +'happens_' +'handle_' +'haften_' +'grown_' +'griechische_' +'gri' +'grateful_' +'grad' +'gg' +'geöffnet_' +'gestaltet_' +'gering_' +'genannte_' +'gegen' +'gedr' +'gardens_' +'gang_' +'fying_' +'fu' +'fri' +'fourth_' +'foundation_' +'finanzieren_' +'financed_' +'feeling_' +'facto_' +'exciting_' +'everywhere_' +'eur_' +'eta' +'esta' +'erte' +'erscheint_' +'ero' +'erm' +'erlauben_' +'erk' +'erfüllt_' +'erfordern_' +'entw' +'enr' +'engine_' +'engaged_' +'enb' +'ely_' +'ell' +'eli' +'ein' +'eht_' +'eh' +'educational_' +'eco' +'eau_' +'eastern_' +'earth_' +'earned_' +'ea' +'don' +'distinguish_' +'difficulty_' +'dient_' +'di' +'describes_' +'deflation_' +'dedicated_' +'de' +'dates_' +'creative_' +'cre' +'constantly_' +'consistent_' +'clubs_' +'cleaning_' +'cla' +'ción_' +'citizen_' +'chal' +'centres_' +'causing_' +'castle_' +'cas_' +'bs' +'breath' +'boot_' +'boot' +'blind_' +'bild_' +'bilateral_' +'betroffenen_' +'betonen_' +'beschäftigen_' +'beschränken_' +'besch' +'berühmten_' +'beraten_' +'behörde_' +'begins_' +'beendet_' +'bedeutenden_' +'bea' +'bau_' +'backed_' +'awareness_' +'ausschuss_' +'art' +'ari_' +'architecture_' +'anzunehmen_' +'ani' +'angemessene_' +'angegebenen_' +'anc' +'anb' +'amongst_' +'alp' +'aks' +'air' +'affordable_' +'affect_' +'admit_' +'ade_' +'adalah_' +'accepting_' +']]' +'Zwischen_' +'Zweck_' +'Zwar_' +'Zusammenhalt_' +'Zivil' +'Zinssätze_' +'Zimmern_' +'Zei' +'Year_' +'Wirtschafts' +'William_' +'Wesentlichen_' +'Werk_' +'Weiß' +'Vol' +'Vision_' +'Vice_' +'Verd' +'Vel' +'Va' +'VI' +'Trotzdem_' +'Titel_' +'Thanks_' +'Terror' +'Terrasse_' +'Technology_' +'TO' +'Swiss_' +'Support_' +'Suite_' +'Such' +'Steuer_' +'Statt_' +'Staff_' +'Souveränität_' +'Sogar_' +'Signal_' +'Sierra_' +'Serie_' +'Sektoren_' +'Schwellenländer_' +'Schiff_' +'Sc' +'Sar' +'Resolution_' +'Referendum_' +'Ram' +'Rad' +'Profi' +'Produkten_' +'Poker_' +'Past' +'Pap' +'Pakete_' +'PRO' +'PE' +'Organe_' +'Oder_' +'Ob_' +'Nicht' +'News_' +'Mädchen_' +'Mubarak_' +'Mou' +'Module_' +'Mitgliedsstaaten_' +'Met' +'Menü_' +'Menschheit_' +'Maria_' +'Mac_' +'Life_' +'Lern' +'Küsten' +'Kur' +'Kra' +'Koordinierung_' +'Konvents_' +'Konflikte_' +'Kommunismus_' +'Komm' +'Kluft_' +'Kla' +'Kapital_' +'Kampagne_' +'Ji' +'Jerusalem_' +'Jede_' +'Is' +'IM' +'Haus' +'Hand' +'Haftung_' +'Großteil_' +'Gri' +'Grenz' +'Gewinne_' +'Geschäfte_' +'Georgia_' +'Garden_' +'GA' +'Fuß_' +'Form' +'Fischerei' +'Film' +'Ferner_' +'Federation_' +'Fair' +'Entscheidungsträger_' +'Entscheidungs' +'Ents' +'Effizienz_' +'EN' +'Dynamik_' +'Distribution_' +'Din' +'Digital_' +'Desk_' +'Delegation_' +'Del' +'Darum_' +'Cro' +'Colo' +'Climate_' +'Chairman_' +'Cat' +'Card_' +'Captain_' +'Call' +'CF' +'CC' +'Bug' +'Blue_' +'Blair_' +'Black_' +'Bewältigung_' +'Bevölkerungs' +'Beschlüsse_' +'Berg' +'Bereitschaft_' +'Bemerkungen_' +'Bemerkung_' +'Beiträge_' +'Beh' +'Baltic_' +'Balkans_' +'Außenpolitik_' +'Authority_' +'Arabia_' +'Any_' +'Anst' +'Anleger_' +'Anlagen_' +'Agrar' +'Agentur_' +'AI' +'; ' +'98_' +'97_' +'79' +'63_' +'59_' +'55_' +'41_' +'38_' +'237' +'2020_' +'182' +'.  _' +'...._' +'''_' +'''' +' !_' +'‘_' +'ра' +'но' +'ме' +'ма' +'ла' +'й' +'übers' +'ú' +'ände_' +'zugunsten_' +'zierung_' +'zieren_' +'zeug' +'yt' +'ye' +'yar' +'wunder' +'wu' +'woman_' +'wirksame_' +'wir' +'wife_' +'wiederholen_' +'weitgehend_' +'waters_' +'vorliegenden_' +'vorbei_' +'volume_' +'vollständige_' +'vollen_' +'violent_' +'vin' +'verursacht_' +'versetzt_' +'verpflichten_' +'verk' +'veri' +'verabschiedet_' +'variable_' +'unterstütze_' +'untergraben_' +'ungsp' +'underway_' +'understood_' +'unbea' +'ums' +'umgeben_' +'ular_' +'uk_' +'tte' +'trip_' +'tions' +'tief_' +'terror_' +'tennis_' +'tea' +'tat' +'stärkere_' +'stä' +'stylish_' +'stu' +'stellung_' +'speichern_' +'sou' +'solches_' +'smooth_' +'sleep_' +'showed_' +'shape_' +'securities_' +'secular_' +'schöne_' +'schu' +'schrieb_' +'schnelle_' +'schie' +'sake_' +'sah_' +'ruling_' +'ruhig_' +'rl' +'rich' +'ric' +'rg' +'resulted_' +'restructuring_' +'rest' +'ress_' +'respects_' +'respective_' +'resistance_' +'requirement_' +'renowned_' +'regionaler_' +'regarded_' +'reflected_' +'reduziert_' +'red' +'recorded_' +'reconstruction_' +'recommendations_' +'realen_' +'rc' +'raph' +'rant' +'ranging_' +'rain' +'rah' +'purchases_' +'ps' +'präsentiert_' +'proportion_' +'profound_' +'produktiv' +'produces_' +'previously_' +'preis_' +'practically_' +'positiv_' +'pol' +'plenary_' +'ple' +'plays_' +'piel' +'persistent_' +'peri' +'pea' +'pays_' +'password_' +'passieren_' +'partnerships_' +'palästinensischen_' +'ow' +'overlooking_' +'ous' +'ote' +'orts_' +'organized_' +'organisiert_' +'organisationen_' +'organis' +'ordnung_' +'opponents_' +'ond' +'odu' +'observers_' +'oa' +'nz_' +'ny_' +'nse' +'notice_' +'notably_' +'nnen_' +'nn' +'nken_' +'niveau_' +'niedrigen_' +'new' +'ner' +'neighbouring_' +'nch' +'nb' +'nab' +'mpo' +'mount_' +'modest_' +'mod' +'mil' +'mi' +'mend' +'marked_' +'mani' +'male_' +'ly' +'lounge_' +'lots_' +'logic_' +'lle' +'lität_' +'lin' +'liefern_' +'leg' +'langfristigen_' +'landscape_' +'lands_' +'konkreten_' +'komp' +'kin_' +'keineswegs_' +'ka' +'jährigen_' +'justified_' +'ji_' +'japanischen_' +'isn_' +'ions' +'involvement_' +'introducing_' +'installiert_' +'ino_' +'informal_' +'ines_' +'illi' +'id' +'ichts' +'iche_' +'ica' +'hör' +'hr_' +'home' +'hl_' +'hl' +'hilfreich_' +'herzlich_' +'hed_' +'harm_' +'happening_' +'größerer_' +'großem_' +'groß' +'graphics_' +'gh' +'gestiegen_' +'gestern_' +'geschieht_' +'geringere' +'gemein' +'gele' +'geeignet_' +'gains_' +'furnished_' +'freuen_' +'frequently_' +'fort_' +'folgende_' +'floors_' +'finanzierung_' +'fill_' +'fewer_' +'feld_' +'fate_' +'fashion_' +'fare_' +'faith_' +'failing_' +'fach' +'explore_' +'existieren_' +'exi' +'evaluation_' +'euros_' +'esti' +'erstmals_' +'erst' +'ers' +'error_' +'erhielt_' +'ergriffen_' +'entsprechen_' +'enterprise_' +'enf' +'enc' +'ements_' +'einzig_' +'einzelne_' +'einheitlichen_' +'edge_' +'economically_' +'ebene_' +'eas' +'durchführen_' +'druck_' +'drivers_' +'dor_' +'dnung_' +'diverse_' +'dite' +'diskutieren_' +'disco' +'discipline_' +'directed_' +'dig' +'dien' +'destination_' +'designs_' +'demonstrate_' +'demokratischer_' +'define_' +'decisive_' +'deals_' +'dead_' +'dea' +'dam' +'cti' +'ct' +'creates_' +'cosy_' +'cop' +'contributing_' +'constitute_' +'conse' +'commodity_' +'com' +'colours_' +'collaboration_' +'clo' +'cin' +'chtlich_' +'certainty_' +'cameras_' +'bs_' +'broken_' +'brief_' +'blu' +'beteiligt_' +'bet' +'besorgt_' +'bers' +'bericht_' +'bemühen_' +'bell' +'bekommt_' +'beinhaltet_' +'behaviour_' +'beha' +'begun_' +'begr' +'begegnen_' +'bedrooms_' +'bed' +'baru_' +'ay' +'avoided_' +'ausgezeichnete_' +'ausger' +'ausgehen_' +'ausgegeben_' +'aufmerksam_' +'assi' +'aspect_' +'aside_' +'asiatischen_' +'arrested_' +'array_' +'aro' +'arkt' +'are' +'arc' +'ara' +'apparent_' +'ap_' +'ant' +'ank' +'angel' +'amo' +'ament' +'ambi' +'altung_' +'ail' +'agi' +'aggressive_' +'adapt_' +'abkommen_' +'Zuständigkeit_' +'Zusammen' +'Zivilgesellschaft_' +'Zins' +'Zeitung_' +'Wikitravel_' +'Whi' +'Whether_' +'Welche_' +'Wald' +'Wachstums_' +'Vorsch' +'Vorbe' +'View_' +'Vier' +'Verständnis_' +'Vater_' +'Van_' +'Update_' +'Untersuchungen_' +'Universitäten_' +'Ums' +'Transport' +'Tat' +'TH' +'Switzerland_' +'Super' +'Stunde_' +'Studio_' +'Strukturreformen_' +'Strukturfonds_' +'Struktur_' +'Strom' +'Strategy_' +'Strasbourg_' +'Steigerung_' +'Spezialitäten_' +'Sowjetunion_' +'Similarly_' +'Siege' +'Show_' +'Set' +'Sep' +'Selbstverständlich_' +'Sec' +'Sea' +'Schweden_' +'Schlussfolgerungen_' +'Schicksal_' +'Schi' +'Schatten_' +'Sau' +'SO' +'Rückkehr_' +'Ruh' +'Romania_' +'Rob' +'Road_' +'River_' +'Richard_' +'Renten' +'Ren' +'Religion_' +'Regierungskonferenz_' +'Regi' +'Red' +'Raum' +'Projekt' +'Premier_' +'Point_' +'PSE_' +'PL_' +'Nu' +'Nonetheless_' +'Nie' +'Nero_' +'Nazi_' +'National' +'Nachrichten_' +'Nachricht_' +'Monopol' +'Mol' +'Mitgliedstaat_' +'Minute_' +'Mikro' +'Methoden_' +'Mein_' +'Maßnahme_' +'Massen' +'ME' +'MB_' +'League_' +'Laut_' +'LabVIEW_' +'Küste_' +'Künstler_' +'König_' +'Kä' +'Kulturen_' +'Krisen_' +'Kit' +'Kirche_' +'Kategorie_' +'Kas' +'Karten_' +'Kampf' +'Jones_' +'Je_' +'Ins' +'Innenstadt_' +'Indiens_' +'Index_' +'IR' +'IL' +'IG' +'IF' +'Has' +'Har' +'Hai' +'HTML_' +'Grundsatz_' +'Glauben_' +'Gerechtigkeit_' +'Gegens' +'Gas_' +'GS' +'GI' +'Fun' +'Früh' +'Format_' +'Fo' +'Flugzeuge_' +'Finnish_' +'Festlegung_' +'Fat' +'Errichtung_' +'Erm' +'Entw' +'Einbeziehung_' +'Ehe' +'Effekt_' +'Education_' +'ET' +'EP' +'Dritte_' +'Donald_' +'Diskussionen_' +'Diskriminierung_' +'Disk' +'Dis' +'Demokratien_' +'Clo' +'Chávez_' +'Cas' +'Cal' +'Cab' +'Budgets_' +'Budapest_' +'Box_' +'Block' +'Bill_' +'Betr' +'Besucher_' +'Besch' +'Belgium_' +'Beijing_' +'Be' +'Bahnhof_' +'B5_' +'Außen' +'Ausnahme' +'Arbeitgeber_' +'Anlass_' +'Anders_' +'An' +'Alm' +'Alles_' +'Alexander_' +'Abe_' +'Abe' +'AB' +'78_' +'7' +'28' +'2030_' +'1990er_' +'189' +'183' +'… _' +'• _' +'я_' +'ы_' +'ч' +'те' +'п_' +'не' +'ž' +'üsse_' +'üge_' +'üche_' +'überwinden_' +'überprüft_' +'ön' +'ó_' +'ét' +'ég' +'Ökonomien_' +'· _' +' ' +'{{_' +'zählt_' +'zurückzu' +'zunehmende_' +'wünsche_' +'wre' +'wohnen_' +'wohn' +'wohl' +'widerspr' +'wider' +'werte_' +'wer' +'wenden_' +'welcoming_' +'weiterge' +'web' +'water' +'wart' +'ware_' +'vorgenommen_' +'voraus_' +'virus_' +'verspricht_' +'verkaufen_' +'vent' +'veni' +'vel' +'valid_' +'ution' +'uss' +'usa' +'upgrade_' +'unlike_' +'university_' +'universities_' +'ungsm' +'umfangreiche_' +'ultra' +'ud' +'uch_' +'twin_' +'twe' +'tude_' +'tru' +'treat_' +'tobacco_' +'threshold_' +'tha' +'ters_' +'teilnehmen_' +'tech_' +'teaching_' +'targeted_' +'tap' +'tabl' +'supporters_' +'supplies_' +'sul' +'suites_' +'suggested_' +'successes_' +'studio_' +'string_' +'stories_' +'stor' +'stops_' +'sser' +'spüren_' +'spezifische_' +'spezielle_' +'solange_' +'sofern_' +'sma' +'sichtig' +'showing_' +'shortage_' +'shock_' +'sheet_' +'sharp_' +'sexual_' +'setzte_' +'serving_' +'senken_' +'selten_' +'seeks_' +'schön_' +'schnellen_' +'sb' +'sala' +'rä' +'ruhigen_' +'rob' +'rib' +'resolutions_' +'requested_' +'representation_' +'reporting_' +'replace_' +'repeatedly_' +'repeated_' +'repeat_' +'remarks_' +'reli' +'rein' +'regulated_' +'regret_' +'registration_' +'refused_' +'recommended_' +'rechtlichen_' +'recall_' +'rd' +'rb' +'rational_' +'ration_' +'rapporteurs_' +'ran' +'quo_' +'q_' +'q' +'proposes_' +'produziert_' +'privileged_' +'privacy_' +'premi' +'prefer_' +'ppe' +'populations_' +'pon' +'plu' +'phon' +'peak_' +'patterns_' +'parks_' +'oth' +'ose_' +'oro' +'orders_' +'opt_' +'ong_' +'oli' +'olen_' +'oh' +'offiziell_' +'ock' +'occasion_' +'nunmehr_' +'notion_' +'noti' +'not' +'nnte' +'nds_' +'natürlichen_' +'natur' +'nan_' +'namen_' +'mü' +'multiple_' +'multilateral_' +'mpa' +'movie_' +'moralische_' +'mon_' +'moments_' +'mo_' +'minimal_' +'mine_' +'meters_' +'merk' +'medi' +'meal_' +'mbi' +'mber' +'mas_' +'maritime_' +'mann_' +'man' +'magnificent_' +'lte_' +'losigkeit_' +'literature_' +'lis' +'lebt_' +'lebens' +'lat' +'kriege' +'kop' +'konfrontiert_' +'klicken_' +'killing_' +'key' +'kernel_' +'kern_' +'kation_' +'junge_' +'joining_' +'jahr_' +'itte' +'italienischen_' +'istischen_' +'islamischen_' +'is' +'ious_' +'ior' +'involving_' +'invested_' +'inva' +'intentions_' +'intelligence_' +'insufficient_' +'ino' +'inis' +'inf' +'indischen_' +'indi' +'impr' +'implies_' +'ils_' +'igkeiten_' +'ight_' +'idi' +'ica_' +'hängt_' +'humans_' +'hoping_' +'homes_' +'holds_' +'hit' +'hinter' +'hingewiesen_' +'hero' +'hergestellt_' +'helps_' +'hat' +'handel_' +'halbe' +'größeres_' +'grant_' +'grand' +'gor' +'gle' +'glass_' +'gewähren_' +'gesetz' +'gesellschaften_' +'gese' +'geplant_' +'geltenden_' +'gefährdet_' +'gefährden_' +'gebunden_' +'gal' +'fühlen_' +'fy_' +'fte_' +'fs_' +'forthcoming_' +'foot' +'follows_' +'fleet_' +'fitness_' +'fication_' +'fall' +'factor_' +'extraordinary_' +'extra' +'exclusion_' +'eva' +'etz' +'ete_' +'estr' +'esse' +'esi' +'ese_' +'erweisen_' +'erlangen_' +'erheblichen_' +'erhebliche_' +'equity_' +'entscheidend_' +'entgegen_' +'endo' +'emerge_' +'elt_' +'ela' +'eist' +'eintr' +'einfacher_' +'eilung_' +'eil' +'ehe' +'effektiv_' +'eer' +'eda' +'ea_' +'dus' +'dt_' +'drawing_' +'dos_' +'dominant_' +'dl' +'dit' +'disk_' +'direkte_' +'devoted_' +'det' +'derer_' +'dependent_' +'departure_' +'dep' +'den' +'delicious_' +'dealt_' +'cur' +'ctive_' +'corresponding_' +'copyright_' +'converted_' +'contents_' +'conta' +'constant_' +'considerably_' +'conservative_' +'concentrate_' +'con' +'composed_' +'component_' +'comply_' +'comp' +'colour' +'class' +'claimed_' +'cker_' +'cker' +'cht' +'chemical_' +'chel' +'charm' +'caught_' +'capabilities_' +'can' +'calendar_' +'bä' +'bun' +'buchen_' +'bubble_' +'bridge_' +'breiten_' +'breite_' +'boo' +'boa' +'bo_' +'blieb_' +'birth_' +'bild' +'betreffenden_' +'bestehende_' +'beseitigen_' +'beschließen_' +'bekannten_' +'bek' +'beige' +'befassen_' +'beauty_' +'beantworten_' +'bat' +'basieren_' +'badly_' +'autumn_' +'autonomy_' +'auswählen_' +'auswirken_' +'ausländische_' +'ause' +'aufrechtzuerhalten_' +'atz_' +'atz' +'attempted_' +'ath' +'articles_' +'arten_' +'arrived_' +'armen_' +'arise_' +'arian_' +'arge' +'anzuzeigen_' +'anywhere_' +'anf' +'ami' +'amerikanischer_' +'alo' +'alcohol_' +'ak_' +'agr' +'adopting_' +'acting_' +'aci' +'abwe' +']' +'Zusammens' +'Zo' +'Yo' +'Währungen_' +'Would_' +'Work_' +'Wladimir_' +'Winter_' +'Wild' +'Wikicars_' +'Wi_' +'Wel' +'Vorstellung_' +'Vorausschau_' +'Vielmehr_' +'Vielen_' +'Video' +'Verteidigungs' +'Versuche_' +'Vermi' +'Verm' +'Verlauf_' +'Verfolgung_' +'Verein' +'Valley_' +'VE_' +'Use_' +'Uns_' +'Umgang_' +'Treaties_' +'Tochter_' +'Tip_' +'Tim' +'Tier' +'Ticket_' +'Texas_' +'Syn' +'Sudan_' +'Stä' +'Stattdessen_' +'Start' +'Spannungen_' +'Sor' +'Sonnen' +'Sid' +'Shanghai_' +'Several_' +'Serbien_' +'Sei' +'Schweiz_' +'Schuld' +'Sauna_' +'SD' +'Roman' +'Republicans_' +'Rec' +'Punkten_' +'Presse' +'Preisen_' +'Pop' +'Politikern_' +'Pläne_' +'Planung_' +'PP' +'PI' +'Otherwise_' +'Opera_' +'Once_' +'Ober' +'Noch_' +'Niemand_' +'Natur' +'Nat' +'Mur' +'Mos' +'Morgen_' +'Mobile_' +'Migranten_' +'Messe' +'Mensch_' +'Mechanismen_' +'Marketing_' +'Mark_' +'Mario_' +'Mari' +'Manhattan_' +'Mangel_' +'Mala' +'Luc' +'Links_' +'Libya_' +'Lea' +'LateRooms_' +'Ladies_' +'Lab' +'LL' +'LE_' +'Können_' +'Kurz' +'Kurs_' +'Kunden' +'Kuba_' +'Konf' +'Klasse_' +'Keynes_' +'Key' +'Kauf_' +'Kat' +'Kal' +'KO' +'Journalisten_' +'Ja_' +'Interventionen_' +'Internationale_' +'Insgesamt_' +'Ing' +'Infrastruktur' +'Info' +'IS_' +'ISO_' +'Hu' +'Helsinki_' +'Hauses_' +'Handelss' +'Ham' +'Haft' +'Guests_' +'Großen_' +'Grab' +'Ges' +'Gentoo_' +'Generationen_' +'Gelder_' +'Gef' +'Garantie_' +'Ga' +'GB_' +'Führungs' +'Funds_' +'Frank_' +'Formular_' +'Florida_' +'Fischerei_' +'Fi_' +'Feld_' +'Farb' +'Europa' +'Empire_' +'Empfehlungen_' +'Emissions' +'Em' +'Elemente_' +'Element_' +'Einst' +'Eg' +'Dorf_' +'Disc' +'Deutschen_' +'Des_' +'Demo' +'Declaration_' +'Darin_' +'DER_' +'Cra' +'Constitutional_' +'Chu' +'CON' +'Bud' +'Branchen_' +'Bl' +'Bestandteil_' +'Bes' +'Beitritts' +'Beispielsweise_' +'Band' +'Ave' +'Ava' +'Australia_' +'Ausführungen_' +'Ausführung_' +'Arc' +'Arbeitslosen' +'Anschluss_' +'Ann' +'Anlage_' +'Anlage' +'Andernfalls_' +'Alli' +'Alle' +'Aktion_' +'Airlines_' +'Afrikas_' +'Abhängigkeit_' +'ASEAN_' +'AL' +'72_' +'600_' +'6' +'50' +'22' +'200' +'1994_' +'1971_' +'1967_' +'187' +'184' +'120_' +'1000_' +'04_' +'01' +'. „_' +''' (_' +'$_' +'’, _' +'қ' +'да' +'ł' +'ütte' +'üh' +'übersch' +'überdenken_' +'üb' +'öffentlicher_' +'äten_' +'änge_' +'ähnliche_' +'ão_' +'» _' +'  ' +'}} **{{_' +'zweit' +'zwei' +'zunehmenden_' +'zun' +'zugänglich_' +'zuges' +'zione_' +'zentren_' +'youth_' +'yma' +'yi' +'yard_' +'wonach_' +'wis' +'wirkt_' +'wirkliche_' +'whilst_' +'whi' +'wesen_' +'we' +'wan' +'wake_' +'voran' +'voluntary_' +'vollkommen_' +'vier' +'vielmehr_' +'verteilt_' +'verringert_' +'vermutlich_' +'vermitteln_' +'ury_' +'urgently_' +'urf_' +'unte' +'unsch' +'uno' +'united_' +'ungss' +'ungs_' +'undermine_' +'unange' +'umwelt' +'umso_' +'ultimate_' +'uct' +'uchen_' +'uche_' +'tzt_' +'typically_' +'ture_' +'tritt_' +'trial_' +'trag' +'tik_' +'tier' +'ticket_' +'territories_' +'tern' +'tely_' +'teilt_' +'tall' +'tal_' +'tag' +'süd' +'sé' +'sätze_' +'surprised_' +'surely_' +'sure' +'stützen_' +'ständigen_' +'struck_' +'strike_' +'stri' +'strategie_' +'sto' +'stie' +'stell' +'steel_' +'starts_' +'stammen_' +'stal' +'staatlicher_' +'staaten_' +'sst_' +'spoken_' +'specified_' +'sp' +'sozialer_' +'sons_' +'some' +'soldiers_' +'sna' +'sn' +'ska' +'shop_' +'sho' +'shadow_' +'senden_' +'scu' +'sci' +'schr' +'schla' +'schemes_' +'schaden_' +'sauber_' +'san_' +'rweise_' +'rop' +'rooted_' +'ron' +'rle' +'ris_' +'rightly_' +'rie_' +'ria' +'revealed_' +'reta' +'ret' +'reputation_' +'repair_' +'renoviert_' +'ren' +'reg' +'rede' +'recover_' +'recommendation_' +'rechten_' +'reas' +'reactions_' +'rea' +'rating_' +'radi' +'qui_' +'qui' +'question' +'quest_' +'quel' +'qu' +'punkte_' +'psycho' +'prose' +'prop' +'promoted_' +'proceedings_' +'pris' +'presents_' +'posed_' +'poorest_' +'poly' +'play' +'pho' +'pf' +'perubahan_' +'pert' +'permanently_' +'path' +'participants_' +'pace_' +'ously_' +'os' +'orte' +'organ' +'optional_' +'optimal_' +'oppose_' +'opi' +'oo' +'ona_' +'off' +'ode' +'occupation_' +'obtained_' +'objects_' +'ntu' +'nti' +'nswerte' +'normalen_' +'niedriger_' +'nger' +'neun_' +'netz_' +'nent_' +'ndes_' +'narrow_' +'nachhaltigen_' +'nachge' +'müsse_' +'mächtig' +'mä' +'myth' +'modell_' +'mitten_' +'mittels_' +'mistake_' +'missing_' +'minorities_' +'mid' +'messages_' +'mess' +'mes_' +'menye' +'menu' +'menjadi_' +'mela' +'mein' +'mea' +'manufacturers_' +'manchen_' +'makers_' +'lung_' +'lunch_' +'loved_' +'logy_' +'lm' +'lling_' +'lla_' +'lk' +'lists_' +'linguistic_' +'ling' +'light' +'ließ_' +'lieb' +'lev' +'lets_' +'lem' +'leistung_' +'lee' +'lde' +'lau' +'lasting_' +'last' +'lake_' +'kümmern_' +'kämpfen_' +'kä' +'kurzfristig_' +'kurzer_' +'ksi' +'kosten' +'kontrolliert_' +'kon' +'komme_' +'km' +'klär' +'kita_' +'ket' +'ke' +'kan' +'justify_' +'jemals_' +'jederzeit_' +'iter' +'irre' +'ique_' +'interessen_' +'intend_' +'instability_' +'ink' +'indicate_' +'inder' +'inadequate_' +'impression_' +'impa' +'imagin' +'ika' +'ignore_' +'ign' +'iges_' +'iesen_' +'ide' +'ichten_' +'icher' +'ib' +'holidays_' +'hohem_' +'hinder' +'hielt_' +'heritage_' +'herauszu' +'hence_' +'hau' +'handling_' +'haft_' +'gy_' +'gy' +'gui' +'granting_' +'gn' +'gh_' +'gewiss_' +'getragen_' +'geschrieben_' +'geringe_' +'genetic_' +'gelöst_' +'gefa' +'geboren_' +'gallery_' +'gaben_' +'förder' +'functioning_' +'ften_' +'friend_' +'freundliche_' +'franc' +'fossil_' +'fore' +'fon' +'focusing_' +'flying_' +'fly_' +'flug' +'flu' +'filled_' +'fic' +'favor_' +'farm_' +'familiar_' +'exploitation_' +'expert_' +'exceptional_' +'ewa' +'evi' +'eure_' +'etzen_' +'essi' +'erlassen_' +'erkennt_' +'erkannt_' +'eric' +'eo' +'entsteht_' +'entf' +'enge' +'enforcement_' +'ends_' +'empfangen_' +'els_' +'eit' +'eins_' +'einh' +'eingegangen_' +'eingebracht_' +'eignet_' +'eigenes_' +'egal_' +'een_' +'educated_' +'ede' +'economist_' +'echen_' +'duties_' +'dul' +'dte' +'dritte_' +'doctors_' +'doctor_' +'displayed_' +'disasters_' +'ding' +'dialog_' +'des' +'derzeitige_' +'dens' +'demo' +'demi' +'delighted_' +'defeat_' +'decorated_' +'debian_' +'deadline_' +'dd' +'davor_' +'dat' +'dar' +'dahin_' +'cted_' +'creditors_' +'covering_' +'countless_' +'correctly_' +'coordinated_' +'cooperate_' +'compact_' +'codes_' +'cle' +'clause_' +'classes_' +'chef_' +'checking_' +'channel_' +'chances_' +'centr' +'centers_' +'cast' +'cart' +'career_' +'cam' +'buses_' +'bug_' +'bright_' +'bond_' +'bol' +'bit' +'bisa_' +'bis' +'bill_' +'bilateralen_' +'bi_' +'bewirken_' +'bewahren_' +'besar_' +'beruhen_' +'bert' +'bel_' +'behoben_' +'behauptet_' +'beg' +'beantwortet_' +'bauen_' +'basi' +'bahn' +'aver' +'ausländischen_' +'ausgedrückt_' +'aufgeb' +'auff' +'aub' +'ate' +'assen_' +'assa' +'artists_' +'artistic_' +'appropriations_' +'annehmen_' +'angewiesen_' +'anerkannt_' +'anda' +'amp_' +'ambition_' +'alts' +'aktive_' +'ak' +'agen' +'aged_' +'ado_' +'adjustments_' +'actor_' +'achen_' +'acc' +'Zwei' +'Zuge_' +'Zen' +'Zahlungen_' +'Wit' +'Werke_' +'Wal' +'WE' +'Vorstellungen_' +'Vorsitz_' +'Vors' +'Vorlage_' +'Vorgehen_' +'Viertel_' +'Vertretern_' +'Verteidigungspolitik_' +'Verst' +'Using_' +'Unterkunft_' +'Ukrainian_' +'Tunisia_' +'Tru' +'Traum_' +'Transfer' +'Traditionen_' +'Town_' +'Tonnen_' +'Tok' +'Teilnehmer_' +'Tausende_' +'Taliban_' +'TO_' +'Sunday_' +'Straßburg_' +'Stra' +'Stimmung_' +'Steuerzahler_' +'Star' +'Staatsanleihen_' +'Soldaten_' +'Ska' +'Simbabwe_' +'Schätzungen_' +'Schnell' +'Schlüssel_' +'Schlag' +'Schie' +'Satz_' +'SU' +'SG' +'Russians_' +'Rot' +'Roll' +'Rock_' +'Regional' +'Ree' +'Rand_' +'Radio_' +'Qualitäts' +'Professor_' +'Power_' +'Portuguese_' +'Politiken_' +'Playa_' +'Planeten_' +'Pl' +'Pis' +'Philosophie_' +'Pf' +'Personal' +'Performance_' +'Pas' +'Partnership_' +'Partnerschaften_' +'PA' +'Orte_' +'Orient' +'Ok' +'Offenheit_' +'Notes_' +'Northern_' +'Normen_' +'Nizza_' +'Nation_' +'NGOs_' +'NG' +'NCC_' +'Mot' +'Mobil' +'Mittelmeer' +'Mitglieds' +'Missbrauch_' +'Minutes_' +'Mess' +'Menschenhandel_' +'Mei' +'Mau' +'Mark' +'Mallorca_' +'Mach' +'Maastricht_' +'Lö' +'Luft_' +'Ltd_' +'Lounge_' +'Lockerung_' +'Lizenz' +'Lim' +'Liberal' +'Legitimität_' +'Lee_' +'Landwirte_' +'Lake_' +'Lag' +'LO' +'Kritiker_' +'Korean_' +'Klimawandels_' +'Kir' +'Ki' +'Kara' +'Jugend' +'Jahrzehnte_' +'Invasion_' +'Internet' +'Indonesien_' +'Indonesia_' +'Indem_' +'Imp' +'Image_' +'Il_' +'Höchst' +'Hostels_' +'Hor' +'Hit' +'Hin' +'Hilfs' +'Hezbollah_' +'Have_' +'Hall_' +'Hafen_' +'Gua' +'Gil' +'Gewicht_' +'Geschwindigkeit_' +'Germans_' +'Gerichte_' +'Gericht_' +'Gegend_' +'Gard' +'Ganz_' +'Gal' +'GH' +'Friendly_' +'Freude_' +'Freiheiten_' +'Fre' +'Franc' +'Ford_' +'Flüchtlings' +'Fluss_' +'Fischer_' +'Finanzmärkte_' +'Fin' +'Fil' +'Festival_' +'Ferienwohnung_' +'Feld' +'FO' +'Erstellung_' +'Entschließungsantrag_' +'Ele' +'Einsch' +'Einkommens' +'Einer_' +'Eigenschaften_' +'EL_' +'Dun' +'Drogen_' +'Dreh' +'Dokument_' +'Di_' +'Description_' +'Democracy_' +'Dec' +'Dauer_' +'DS9_' +'Cons' +'Conf' +'Com' +'Cit' +'Church_' +'Christ_' +'Chamber_' +'Canon_' +'Camp' +'Cambridge_' +'CS' +'Bürgerinnen_' +'Bürger' +'Börse_' +'Bulgarien_' +'Briten_' +'Blut' +'Blo' +'Ble' +'Betrieb_' +'Beteiligung_' +'Besitz' +'Bereitstellung_' +'Beratung_' +'Begleit' +'Bedürfnisse_' +'Bed' +'Be_' +'Bauern_' +'BS' +'BB' +'Außenminister_' +'Aussch' +'Ausl' +'Assad_' +'Armen_' +'Ari' +'Arch' +'Anzeichen_' +'Anti_' +'Annehmlichkeiten_' +'Anl' +'Anhänger_' +'Angebote_' +'Andere_' +'Ana' +'Amb' +'Ak' +'Air' +'Af' +'Acc' +'Abwe' +'Abr' +'Abd' +'Ab_' +'AV' +'92_' +'68_' +'51_' +'43_' +'40' +'32' +'31' +'19th_' +'02_' +'" ' +' -' +' ,,_' +' ), _' +'ң_' +'та' +'й_' +'и_' +'ды_' +'ε' +'übernimmt_' +'ökonomischen_' +'éa' +'ätig' +'ändische' +'ägen_' +'án_' +'ßt_' +'Überzeugung_' +'Überwachungs' +'Übers' +'Überleben_' +'Übergangs' +'Überblick_' +'zusammenarbeiten_' +'zug_' +'zuf' +'zub' +'zerstört_' +'zerstören_' +'zel' +'zeiten_' +'zahl_' +'younger_' +'wr' +'worrying_' +'win' +'wiederholt_' +'wichtiges_' +'welchen_' +'weiten_' +'watch_' +'wasser' +'warn' +'wahr_' +'wahl_' +'vorauss' +'voices_' +'vit' +'viertel_' +'viable_' +'verwaltung_' +'verr' +'verkehr_' +'verhalten_' +'verboten_' +'vely_' +'var' +'ute_' +'urn' +'untersuchen_' +'unterscheiden_' +'unta' +'unc' +'unan' +'unabhängigen_' +'umge' +'umb' +'uit' +'uar' +'uan' +'türkischen_' +'tsche' +'tsch' +'tro_' +'trib' +'treiben_' +'trees_' +'transportation_' +'transitional_' +'transferred_' +'tranquil' +'tragic_' +'tracks_' +'tra_' +'tour_' +'topics_' +'tone_' +'ton' +'tlich_' +'tion' +'ting' +'till_' +'tig' +'tifi' +'tie' +'tickets_' +'tic' +'tes' +'territorial_' +'terra' +'teils_' +'tehen_' +'taxi_' +'tables_' +'systemen_' +'systematic_' +'sy' +'switch_' +'sustain_' +'surf' +'supplementary_' +'summe' +'suited_' +'sucht_' +'subs' +'strict_' +'stream_' +'strange_' +'stood_' +'sting_' +'stimme_' +'steigenden_' +'stei' +'stars_' +'stammt_' +'stad' +'staat_' +'spät_' +'spiel_' +'spezifischen_' +'socio' +'sli' +'sis' +'significance_' +'sicheren_' +'sicher' +'shocks_' +'shel' +'settlements_' +'seri' +'selben_' +'sehe_' +'seh' +'seconds_' +'schutz_' +'schrittweise_' +'schme' +'schm' +'satz_' +'sation_' +'sand' +'safer_' +'sad' +'sab' +'rv' +'rut' +'rum_' +'row_' +'roots_' +'rme' +'risk' +'rie' +'rian_' +'ri' +'rhetoric_' +'residence_' +'rese' +'reproduction_' +'rene' +'remo' +'remarkable_' +'refers_' +'reductions_' +'recognised_' +'rechtliche_' +'rec' +'realistic_' +'realer_' +'react_' +'re' +'rbe' +'rau' +'rati' +'rail_' +'ragen_' +'racism_' +'quent' +'purchasing_' +'pur' +'prozess_' +'provider_' +'proven_' +'progressive_' +'professor_' +'prisoners_' +'pride_' +'predicted_' +'praktischen_' +'ppi' +'potenzielle_' +'pledge' +'plan' +'pipeline_' +'pioneer' +'pil' +'photograph' +'phones_' +'persönliche_' +'personenbezogene' +'perfekt_' +'pen_' +'painful_' +'pain_' +'overn' +'ov_' +'ost' +'ort' +'originally_' +'organiz' +'ore' +'optimale_' +'ommen_' +'omi' +'om_' +'oll' +'og' +'oftmals_' +'offen' +'odi' +'nü' +'nter' +'notes_' +'northern_' +'nj' +'nischen_' +'nische_' +'niedrig_' +'nieder' +'ngi' +'negativen_' +'nci' +'nche' +'natürliche' +'mögliche_' +'märkten_' +'murder_' +'mou' +'motiv' +'mona' +'mol' +'modify_' +'moderate' +'mode' +'mittleren_' +'minor_' +'minds_' +'min' +'migrants_' +'mi_' +'mete' +'mereka_' +'memper' +'mel' +'medicine_' +'meat_' +'measured_' +'mail' +'magic_' +'lös' +'lum' +'llo' +'lled_' +'lig' +'lift_' +'lieber_' +'lid' +'library_' +'lenken_' +'leidet_' +'leich' +'legte_' +'legally_' +'leak' +'lbe' +'lav' +'lateral' +'langem_' +'lang' +'lai' +'kürze' +'künftigen_' +'könne_' +'kö' +'ktor' +'korrekt_' +'klaren_' +'kill_' +'kesehatan_' +'kas' +'kana' +'jüngste_' +'jedenfalls_' +'iz' +'iv' +'ition_' +'itali' +'isation_' +'ional_' +'involves_' +'invite_' +'investigation_' +'intervene_' +'inten' +'institutionellen_' +'inl' +'incredible_' +'incon' +'immense_' +'illegalen_' +'illegale_' +'illa' +'ility_' +'iel_' +'ideale_' +'hyper' +'hundert_' +'hopes_' +'hood_' +'holders_' +'histori' +'hinzufügen_' +'hinge' +'hierbei_' +'hes' +'hervorragende_' +'heraus' +'hera' +'hee' +'heads_' +'handels' +'halt' +'hal' +'hab' +'günstig_' +'gültige' +'gä' +'gte_' +'größtenteils_' +'grundsätzlich_' +'gramm' +'governing_' +'gos' +'gleichermaßen_' +'gkeit_' +'gi_' +'gesunde' +'gesch' +'genuinely_' +'geni' +'gelingt_' +'gefährliche_' +'gee' +'gebiet_' +'gate' +'fällen_' +'fur' +'fundamental' +'fuels_' +'fro' +'freely_' +'fre' +'fortsetzen_' +'forschung_' +'forests_' +'football_' +'folge' +'flee' +'flash_' +'flag_' +'fire' +'finish_' +'fin' +'figur' +'ffe' +'festgelegten_' +'fanden_' +'fam' +'fails_' +'fahrt_' +'expenses_' +'expanded_' +'exp' +'examine_' +'exa' +'eti' +'esch' +'erzeugt_' +'erwähnen_' +'erreichte_' +'erne' +'erl' +'erfolgreiche_' +'entre' +'enthalt_' +'enth' +'entertainment_' +'entering_' +'ensch' +'ens' +'ener_' +'encouraged_' +'ena' +'employed_' +'emergence_' +'ement_' +'embrace_' +'em' +'eller_' +'elektronischen_' +'electric_' +'eld_' +'eite' +'einzuführen_' +'einverstanden_' +'eintreten_' +'einr' +'einl' +'einheitliche_' +'eingerichtete_' +'ego' +'egen' +'efe' +'ec' +'easing_' +'dw' +'durchzusetzen_' +'dung_' +'dun' +'ds' +'drängen_' +'dream_' +'dot' +'dor' +'doors_' +'distinction_' +'dispute_' +'disposal_' +'diskutiert_' +'dise' +'directory_' +'director_' +'dim' +'dieselbe_' +'dictatorship_' +'determination_' +'deswegen_' +'destroy_' +'dependence_' +'departments_' +'department_' +'demonstrations_' +'definitely_' +'dating_' +'dark_' +'cz' +'cut' +'critique_' +'credits_' +'crat' +'cra' +'count_' +'corporations_' +'conventions_' +'convenience_' +'contributed_' +'contextual_' +'constructive_' +'considerations_' +'conduct_' +'cond' +'compete_' +'coherent_' +'codecision_' +'chtig' +'chte' +'chau' +'characterized_' +'cel' +'cc' +'catch_' +'carrying_' +'busy_' +'bre_' +'bought_' +'bot_' +'boards_' +'black' +'bio' +'binding_' +'bigger_' +'bezieht_' +'beträgt_' +'bestätigt_' +'bes_' +'bere' +'bera' +'bene' +'bele' +'beings_' +'begründet_' +'beglückwünschen_' +'begl' +'beeinflussen_' +'bedroh' +'bedeutende_' +'barer_' +'barely_' +'band_' +'backing_' +'az' +'aw' +'authoritarian_' +'ausb' +'auftreten_' +'aufgeführt_' +'atte' +'att_' +'att' +'ato_' +'asi' +'arte' +'aria' +'argumentieren_' +'apparently_' +'api' +'ap' +'ants_' +'ano_' +'anhaltenden_' +'angebracht_' +'ands_' +'anderswo_' +'alternatives_' +'alongside_' +'ali' +'aktuell_' +'ahm' +'aga' +'adverse_' +'adds_' +'adaptation_' +'acknowledged_' +'ack' +'accused_' +'acceptance_' +'Zwecke_' +'Zusätzlich_' +'Zusatz' +'Zimmerservice_' +'Zielen_' +'Zerstörung_' +'You' +'Wirksamkeit_' +'Wir' +'Werten_' +'Werkzeuge_' +'Wellness_' +'Welcome_' +'Weiter' +'We' +'Wander' +'Wall' +'Wahrscheinlichkeit_' +'WHO_' +'Vorbild_' +'Virus_' +'Vir' +'Vic' +'Verz' +'Versprechen_' +'Verringerung_' +'Vermögenswerte_' +'Verkauf_' +'Verhaltens' +'Vereinigte_' +'Vene' +'Urteil_' +'Umge' +'UE' +'Tätigkeit_' +'Typ_' +'True_' +'Tool_' +'Tibet_' +'Three_' +'Ten' +'Temple_' +'Tee' +'Teams_' +'Südkorea_' +'Sus' +'Sul' +'Structural_' +'Stre' +'Still_' +'Stahl' +'Spo' +'Spielen_' +'Spezial' +'Spa' +'Sound_' +'Sky' +'Situationen_' +'Sit' +'Sir' +'Siehe_' +'Sicherheitspolitik_' +'Service' +'Section_' +'Scotland_' +'Schwäche_' +'Schwerpunkt_' +'Schreiben_' +'Schon_' +'Schli' +'Schle' +'Schiff' +'Scheitern_' +'Schalt' +'Sach' +'SE_' +'Rä' +'Ruf_' +'Rub' +'Rom_' +'Ret' +'Republican_' +'Rechtsstaatlichkeit_' +'Rechten_' +'Rechnungs' +'Rat' +'Rand' +'RO' +'RM' +'Qui' +'Prozess' +'Projekts_' +'Procedure_' +'Presse_' +'Potenzial_' +'Pos' +'Pol' +'Pflicht_' +'Passwort_' +'Parlaments' +'Pakt_' +'PRI' +'PR' +'PO' +'PD' +'Out_' +'Orl' +'Organization_' +'Offen' +'Off' +'Od' +'Nichtraucherzimmer_' +'Netzwerk_' +'Net' +'NI' +'Modern_' +'Mittel' +'Mir_' +'Mini' +'Minderheit_' +'Min' +'Mil' +'Mechanismus_' +'Mc' +'Maschinen_' +'Martin' +'Mar_' +'Mandat_' +'Manchmal_' +'Mak' +'Mai' +'MPEG_' +'MP3_' +'Löhne_' +'Long_' +'Lie' +'Leitung_' +'Lehr' +'Later_' +'Lac' +'LIN' +'Kru' +'Konvention_' +'Konto_' +'Kons' +'Kongress_' +'Kompromiss' +'Kompetenzen_' +'Komp' +'Kernel_' +'Kauf' +'Kai' +'KORE_' +'KE' +'KA' +'Jung' +'Jak' +'Jackson_' +'Jack' +'Internal_' +'Instruments_' +'Innovationen_' +'Inf' +'IP' +'ION_' +'ING_' +'ID' +'Hinzu' +'Herz' +'Henry_' +'Heil' +'Harvard_' +'HI' +'Gästen_' +'Gott_' +'Gor' +'Gleich' +'Glas' +'Gipfeltreffen_' +'Gh' +'Gewerkschaften_' +'Gener' +'Gem' +'Gel' +'Gehminuten_' +'Gebrauch_' +'Gar' +'Fuß' +'Fur' +'Friedens_' +'Fremd' +'Free' +'Frankreichs_' +'Frank' +'Framework_' +'Flughäfen_' +'Flash_' +'Firefox_' +'Feier' +'Fehl' +'Fast_' +'Farben_' +'Fahrzeuge_' +'Fa' +'Exp' +'Esta' +'Er' +'Energies' +'Elite_' +'Einwanderer_' +'Einstellungen_' +'Einnahmen_' +'Einb' +'Egyptian_' +'Edition_' +'EX' +'ER_' +'EM' +'EIB_' +'EI' +'EG_' +'ED' +'Dy' +'Dublin_' +'Drogen' +'Drive' +'Dritten_' +'Drittel_' +'Don_' +'Deswegen_' +'Department_' +'Denmark_' +'Dek' +'Debatten_' +'Cur' +'Cruz_' +'Country_' +'Cou' +'Colombia_' +'Col' +'Client_' +'Chef_' +'Chal' +'Ch' +'Card' +'Can' +'California_' +'Burma_' +'Bu' +'Brok_' +'Bro' +'Br' +'Beweise_' +'Beurteilung_' +'Bett' +'Berichten_' +'Beobachter_' +'Befürworter_' +'Befugnisse_' +'Beachtung_' +'Bea' +'Based_' +'BU' +'BC_' +'Ausz' +'Ausstattung_' +'Ausst' +'Ausr' +'Aus' +'At' +'Asyl' +'Aspekten_' +'Architektur_' +'Arafat_' +'Ansichten_' +'Ans' +'Anonymous_' +'Anliegen_' +'Ange' +'Andererseits_' +'Allein_' +'Act' +'Ac' +'Abschnitt_' +'Absatz_' +'Abf' +'AD' +'66_' +'62_' +'54_' +'46_' +'35' +'29' +'1985_' +'1980s_' +'180_' +'17' +'.) _' +'. - _' +'. &#_' +''', _' +' ?_' +' ; _' +' +_' +' + _' +' "' +'…"..._' +'” ' +'’' +'ül' +'üg' +'ü' +'év' +'éc' +'äußern_' +'äts' +'ät' +'ähl' +'äh' +'Übrigen_' +'Übertragung_' +'Übernachtung_' +'Übereinkommen_' +'Überdies_' +'Ära_' +'zweitens_' +'zuzu' +'zusch' +'zung_' +'zulassen_' +'zug' +'zu' +'ziel_' +'zie' +'zeigte_' +'zb' +'zahlungen_' +'yu' +'you' +'ym' +'yield_' +'yc' +'yan' +'würdigen_' +'wü' +'worthy_' +'wonder' +'wise_' +'willingness_' +'werke_' +'werfen_' +'welcomes_' +'weis' +'wards_' +'ward_' +'waffen_' +'vulnerability_' +'vorhandenen_' +'vollständigen_' +'viv' +'vil' +'vielfältige' +'vid' +'vessels_' +'veröffentlichte_' +'verä' +'verst' +'versorgung_' +'versichern_' +'versch' +'verlor_' +'verkauft_' +'verg' +'verbreitet_' +'venture_' +'ved_' +'vas' +'vacation_' +'uung_' +'uta' +'unterstützte_' +'unterge' +'unsustainable_' +'unions_' +'unin' +'ungsf' +'ung' +'unf' +'unentgeltlich_' +'underlying_' +'underground_' +'umfassen_' +'uis' +'ui' +'ugs' +'uga' +'ues_' +'ude' +'uch' +'tätigen_' +'täglichen_' +'tw' +'ture' +'trick' +'tras' +'trained_' +'traditions_' +'trace' +'tr' +'tolerance_' +'tip' +'thumbnail_' +'throw_' +'texts_' +'teuer_' +'tested_' +'termin' +'tene' +'tendency_' +'techniques_' +'tec' +'tau' +'tari' +'tab' +'sämtliche_' +'sympathy_' +'sver' +'survival_' +'surveillance_' +'suppliers_' +'supervision_' +'superior_' +'succ' +'subsequently_' +'strategi' +'straightforward_' +'str' +'stored_' +'steigt_' +'starten_' +'stand' +'stan_' +'stake_' +'stabiliz' +'stab' +'ssu' +'ssion_' +'sr' +'späten_' +'spl' +'spiegelt_' +'spiegel' +'speziell_' +'sole_' +'ske' +'situ' +'silence_' +'sil' +'side' +'sicherstellen_' +'shot_' +'sf' +'settings_' +'seperti_' +'sens' +'sending_' +'selling_' +'sekarang_' +'sek' +'screens_' +'score_' +'scientists_' +'schätzen_' +'schwer' +'schutz' +'schlimmsten_' +'schlechten_' +'schaften_' +'sama_' +'sai' +'rücken_' +'rü' +'rze' +'ruktur' +'rp' +'rose_' +'ront' +'rken_' +'rium_' +'rif' +'rier' +'rid' +'rh' +'rgi' +'revenues_' +'reve' +'restricted_' +'responses_' +'respect' +'reserved_' +'rer' +'render' +'religiöse_' +'rel' +'reich' +'regulate_' +'regionen_' +'reforme' +'recruit' +'receiving_' +'rece' +'reagiert_' +'read' +'rder' +'rans' +'rai' +'rage' +'queries_' +'qualifi' +'qi' +'pue' +'publicly_' +'proximity_' +'province_' +'promising_' +'prominent_' +'professionals_' +'producer_' +'problem' +'prinzip_' +'preserve_' +'preferred_' +'port' +'politi' +'poli' +'planen_' +'pillar_' +'performed_' +'perceived_' +'pe' +'paying_' +'past' +'parameters_' +'pala' +'pack' +'pa_' +'ov' +'orn' +'orit' +'ores_' +'order' +'ora_' +'opti' +'onne' +'onia_' +'ong' +'omm' +'oleh_' +'oke' +'ogen' +'og_' +'officers_' +'offens' +'offenbar_' +'occupied_' +'observed_' +'observations_' +'obliged_' +'nützlich_' +'nutz' +'nung_' +'nowadays_' +'novel' +'noticed_' +'nominal_' +'nne' +'nn_' +'nh' +'ngan_' +'net' +'neo' +'negotiate_' +'ndung_' +'ncy_' +'nati' +'nant_' +'nal' +'nachfrage_' +'märkte_' +'mächtigen_' +'má' +'mus' +'mot' +'mono' +'moderate_' +'mobility_' +'mmer_' +'minder' +'mier' +'met' +'merc' +'mente_' +'menc' +'mena' +'men' +'mema' +'melt' +'mehr' +'medieval_' +'maßen_' +'max_' +'mate' +'mam' +'machten_' +'lösung_' +'läge_' +'lut' +'lon_' +'logi' +'load_' +'llte_' +'listings_' +'lip' +'life' +'lic' +'leichter_' +'leib' +'legacy_' +'leb' +'lautet_' +'laufenden_' +'langer_' +'lah' +'lag' +'künftig_' +'kü' +'ktion_' +'kritisiert_' +'kri' +'kostenlose_' +'kontextuellen_' +'kontextuell_' +'konst' +'konse' +'komplexe_' +'kommunistischen_' +'komm' +'kne' +'kli' +'kleineren_' +'klarer_' +'kette_' +'kes_' +'kers_' +'kennt_' +'kem' +'kel_' +'keen_' +'kat' +'jährige_' +'jara' +'jar' +'ivi' +'ively_' +'iti' +'israelischen_' +'isi' +'isen_' +'iri' +'iranische_' +'ira' +'investiert_' +'interpretation_' +'internationales_' +'intends_' +'intelligent_' +'instructions_' +'instantly_' +'inspired_' +'inspection_' +'initiated_' +'inhabitants_' +'inflows_' +'indicators_' +'inclusion_' +'importantly_' +'ille' +'ill' +'ification_' +'ideology_' +'ic' +'hältnis' +'hydro' +'hor' +'honour_' +'honest_' +'hoff' +'hire_' +'hinzufugen_' +'hilfe_' +'hidden_' +'hes_' +'hervorragenden_' +'hervorheben_' +'hervorgehoben_' +'herunterladen_' +'herunter' +'heat_' +'healthy_' +'hd' +'hast_' +'hart_' +'harmful_' +'harder_' +'hamm' +'halte' +'hack' +'guided_' +'gua' +'gründen_' +'gru' +'grati' +'grammatisch_' +'governmental_' +'gne_' +'gis' +'gier' +'ghe' +'gewählten_' +'gewonnen_' +'gewicht' +'get' +'gesto' +'gest' +'geschw' +'gerü' +'geprüft_' +'gep' +'gens' +'generous_' +'generell_' +'gend' +'gelang_' +'gefördert_' +'gefragt_' +'gebildet_' +'gear' +'gat' +'gaining_' +'ful' +'fueled_' +'friedlichen_' +'frequency_' +'fran' +'frame_' +'fra' +'foundations_' +'foster_' +'fordere_' +'forcing_' +'fli' +'fle' +'flaw' +'flags_' +'fix_' +'firmly_' +'finanz' +'films_' +'fie' +'festival_' +'ferner_' +'fee_' +'fans_' +'falsche_' +'facts_' +'facilitate_' +'extremists_' +'expressing_' +'explo' +'experiences_' +'expenditures_' +'expe' +'expanding_' +'executive_' +'ew' +'evident_' +'everybody_' +'estimates_' +'ess' +'erzeugen_' +'ery_' +'erwä' +'erweitern_' +'ersetzen_' +'erkunden_' +'erhöhte' +'erholen_' +'erforderliche_' +'erd' +'erarbeitet_' +'eo_' +'environmentally_' +'entst' +'entdeckst_' +'ensures_' +'ensi' +'enn' +'enjoying_' +'enjoyed_' +'ening_' +'englische' +'enger_' +'enge_' +'enemy_' +'endet_' +'endes_' +'emotional_' +'eme' +'embark' +'eingel' +'eingehalten_' +'ege' +'ega' +'effektive_' +'echt' +'ebe' +'eb' +'eate' +'earn_' +'ean' +'durchs' +'dungen_' +'dru' +'drives_' +'dramatically_' +'dol' +'dm' +'disa' +'dil' +'dignity_' +'digit' +'die' +'df' +'desktop_' +'desk_' +'designer_' +'deserve_' +'description_' +'describe_' +'ders_' +'dern_' +'derl' +'derartigen_' +'depth_' +'depending_' +'denk' +'dem' +'def' +'das' +'dak' +'customs_' +'cua' +'creat' +'courts_' +'count' +'corrupt_' +'cope_' +'conveniently_' +'consult' +'constraints_' +'connect_' +'confronted_' +'confrontation_' +'confront_' +'concentration_' +'complicated_' +'comparison_' +'communism_' +'commit_' +'combine_' +'collect_' +'closing_' +'clarity_' +'civilians_' +'cit_' +'circle_' +'chtung_' +'chne' +'chant' +'chan' +'challenging_' +'cepti' +'cell_' +'cand' +'camp_' +'bö' +'broadcast' +'brechen_' +'brauch' +'bot' +'bon_' +'blog_' +'blind' +'billig' +'bevölkerung_' +'betrieben_' +'bes' +'bert_' +'berge' +'berat' +'belegt_' +'beitr' +'beibehalten_' +'behe' +'beenden_' +'bau' +'band' +'ball' +'bail' +'bai' +'bags_' +'bagi_' +'bag' +'bad' +'backs_' +'awa' +'authors_' +'aut' +'ausschusses_' +'aussch' +'ausgeschlossen_' +'ausgeführt_' +'ausführliche' +'ausführ' +'aufgegeben_' +'aufgefordert_' +'asso' +'ass_' +'arti' +'arrest_' +'aris' +'arg' +'arbitrary_' +'appearance_' +'app' +'antr' +'annt' +'anlagen_' +'anischen_' +'angs_' +'angewandt_' +'angest' +'anger_' +'angepasst_' +'anerkennen_' +'anerkannte' +'ander' +'andel' +'all' +'alism_' +'alisierung_' +'akt' +'aki' +'ake' +'aine' +'aft_' +'advances_' +'adv' +'achtet_' +'accurate_' +'abzusch' +'abst' +'abi' +'aben_' +'abandon_' +'Zucker' +'Zone_' +'Zimmer' +'Zimbabwe_' +'Ye' +'Wr' +'Wort' +'Wissenschaft' +'Widerstands' +'Wi' +'Werbung_' +'Wende' +'Weg' +'Wechselkurs_' +'Webseiten_' +'Watson_' +'Water' +'Ware_' +'Wahr' +'Waffen' +'WASHINGTON_' +'WAR' +'Vorgehensweise_' +'Voll' +'Vi' +'Vert' +'Verluste_' +'Verl' +'Verge' +'Veranstaltungen_' +'Veranstaltung_' +'Vera' +'Ver' +'VER' +'User_' +'Up_' +'Umweltschutz_' +'US' +'UM' +'Transa' +'Trag' +'Tourismus_' +'Todesstrafe_' +'Thu' +'Tha' +'Textil' +'Temperaturen_' +'Tas' +'Tarifa_' +'Tan' +'TE_' +'Sä' +'Subventionen_' +'Str' +'Stor' +'Stockholm_' +'Stell' +'Stau' +'Stat' +'Sports_' +'Spielraum_' +'Spi' +'Space_' +'Sound' +'Sol_' +'Sof' +'Sign' +'Siemens_' +'Sicherheitsrat_' +'Shar' +'Serb' +'Ser' +'Senkung_' +'Sen' +'Sektors_' +'Schwer' +'Schwellenländern_' +'Schwei' +'Schw' +'Schu' +'Schlusselwort_' +'Schlusselphrase_' +'Schloss_' +'Rücks' +'Russ' +'Rumänien_' +'Rules_' +'Rubrik_' +'Rose' +'Right_' +'Ric' +'Rela' +'Rei' +'Ref' +'Rather_' +'RT' +'Provence_' +'Project_' +'Prize_' +'Praktiken_' +'Positionen_' +'Porto_' +'Pod' +'Plus_' +'Plat' +'Phänomen_' +'Pers' +'Pen_' +'Peace_' +'Park' +'Papa' +'Pana' +'Palma_' +'Palm' +'Pal' +'Pad' +'PC' +'PARIS_' +'Original' +'On' +'OP' +'OF_' +'Null' +'Notenbank_' +'Nordkorea_' +'Nord_' +'Niederlage_' +'Netto' +'Nationalismus_' +'Nar' +'Mutter_' +'Mut_' +'Muslime_' +'Morocco_' +'Monitor' +'Modelle_' +'Milosevic_' +'Mid' +'Metro_' +'Meter_' +'Medizin_' +'Material_' +'Marina_' +'Mani' +'Manager_' +'Manage' +'Mana' +'Malaria_' +'Mah' +'Magi' +'MM' +'Ly' +'Logik_' +'Lit' +'Listings_' +'Link_' +'Light' +'Liefer' +'Lib' +'Let' +'Lernen_' +'Leitlinien_' +'Lehren_' +'Laun' +'Lastly_' +'Laeken_' +'Kürze_' +'Kö' +'Kopenhagen_' +'Komplex' +'Kle' +'Kie' +'Kata' +'Kapazitäten_' +'Kandidat' +'Kanada_' +'Jun' +'Jer' +'Iran' +'Irak' +'Interessen' +'Install' +'Industrieländern_' +'Indi' +'Import' +'Immobilien_' +'IBM_' +'IB' +'Hunger_' +'Hunde' +'Hongkong_' +'Hol' +'Hitler_' +'Histori' +'Hei' +'Haushaltsdefizit' +'Haupts' +'Halb' +'Had' +'HD' +'Güter_' +'Gulf_' +'Guide_' +'Grünbuch_' +'Große_' +'Großbritanniens_' +'Growth_' +'Griechenlands_' +'Governments_' +'Golf' +'Gleichstellung_' +'Gla' +'Get_' +'Gestaltung_' +'Gesicht_' +'Genu' +'Gene' +'Geber' +'Games_' +'GP' +'Funktions' +'Fro' +'Friday_' +'Fri' +'Freunden_' +'Freizügigkeit_' +'Fraktionen_' +'Fortunately_' +'Force_' +'Food_' +'Fol' +'Florence_' +'Fle' +'Fischler_' +'Fisch_' +'Finnland_' +'Finanzsystem_' +'Finanzs' +'Finanzen_' +'Fernseh' +'FT' +'FS' +'FE' +'Export_' +'Existenz_' +'Exchange_' +'Eta' +'Este' +'Erwachsene_' +'Ep' +'Entwicklungsp' +'Einfluss' +'Eigentum_' +'ED_' +'Dä' +'Durchsetzung_' +'Dort_' +'Dokumente_' +'Dim' +'Dienst_' +'Det' +'Design' +'Democratic_' +'Def' +'Dea' +'De' +'Davos_' +'Darfur_' +'Daniel_' +'Cy' +'Creati' +'Content_' +'Consumer_' +'Comp' +'Communist_' +'Clubs_' +'Cla' +'Civil_' +'Chan' +'Casino_' +'Café_' +'CT' +'CA_' +'Bürokratie_' +'Bun' +'Bucht_' +'Bol' +'Black' +'Billionen_' +'Bez' +'Bewohner_' +'Betten_' +'Berlusconi_' +'Berg_' +'Belgien_' +'Bele' +'Beifall_' +'Beha' +'Bedürfnissen_' +'Bearbeitung_' +'Bay' +'Baum' +'Base_' +'Band_' +'Bahn' +'Az' +'Außen_' +'Autonomie_' +'Automobil' +'Ausmaß_' +'Ausgangspunkt_' +'Aufforderung_' +'Ass' +'Argument_' +'Antworten_' +'Ansätze_' +'Anleihen_' +'Angriffe_' +'Angl' +'Andr' +'And' +'Ami' +'Alters' +'Alta' +'Ali' +'Album_' +'Aktien_' +'Ah' +'Agrarpolitik_' +'Age_' +'Ablehnung_' +'Ablauf_' +'AS_' +'AC_' +'? ' +'96_' +'88_' +'86_' +'67_' +'61_' +'57_' +'53' +'33' +'27' +'236' +'1990s_' +'1960_' +'185' +'04' +'001' +',/_' +', ‘_' +'+ _' +'%-_' +'%' +'#_' +'! ' +' ("_' +'�' +'“ – _' +'–_' +'ш' +'по' +'м_' +'ли' +'ко' +'ка' +'ер' +'ва' +'ар' +'üstung_' +'ührung_' +'übrigens_' +'überwiegend_' +'überw' +'übernahm_' +'österreichischen_' +'öffentlich' +'ô' +'í_' +'ë' +'è_' +'ç' +'ätz' +'ärmsten_' +'änden_' +'ält_' +'ähr' +'äd' +'Übernahme_' +'°_' +' –, _' +'}}) _' +'| _' +'{_' +'zö' +'zust' +'zukommen_' +'zivil' +'zins' +'ziert_' +'zi_' +'zes_' +'zentral_' +'zen' +'zahlt_' +'yields_' +'yea_' +'xo' +'xe' +'xa' +'wort_' +'worried_' +'worker_' +'witness_' +'wissenschaftliche_' +'winning_' +'will' +'wild_' +'widmen_' +'whereby_' +'wh' +'weshalb_' +'werken_' +'welchem_' +'weite_' +'weigh' +'weapon_' +'weaker_' +'was' +'warme' +'wander' +'walt' +'walls_' +'wahlen_' +'vs' +'vorzunehmen_' +'vorsieht_' +'vorschlag_' +'vorn' +'vore' +'voran_' +'vollst' +'voll' +'volatility_' +'villages_' +'via' +'vez_' +'verz' +'versuch' +'verse' +'verschiedener_' +'verschaffen_' +'verme' +'verliert_' +'verletzt_' +'verhe' +'verh' +'vergeben_' +'verge' +'verfügbaren_' +'verd' +'verbr' +'verbinden_' +'vegeta' +'vari' +'valuable_' +'vak' +'uto' +'uti' +'urs' +'uri' +'urf' +'updated_' +'unzureichend_' +'unw' +'unusual_' +'unterwegs_' +'untern' +'unprecedented_' +'unpa' +'ungew' +'ungen' +'undi' +'unders' +'undergo' +'unden_' +'unau' +'un' +'umr' +'ump' +'uli' +'ukan_' +'ui_' +'uge' +'ucht_' +'ucht' +'ually_' +'ua_' +'ua' +'u0027s_' +'türkische_' +'tät' +'tänd' +'tube_' +'tter' +'très_' +'träge' +'triumph' +'treaties_' +'travellers_' +'transformation_' +'trafficking_' +'tow' +'tors_' +'tons_' +'toll_' +'tol' +'tl' +'tionally_' +'tional_' +'tim' +'til' +'thumb_' +'ths_' +'thousand_' +'thirds_' +'thir' +'thi' +'theo' +'tensions_' +'tens_' +'tellung_' +'teams_' +'taxation_' +'tax' +'tas' +'tana' +'sus_' +'surroundings_' +'surprising_' +'suddenly_' +'substantially_' +'stü' +'studied_' +'student_' +'stoppen_' +'stoff' +'stimulate_' +'stimmung' +'stil' +'stic' +'stelle_' +'steigende_' +'stehenden_' +'steer' +'sted_' +'stattfindet_' +'stages_' +'stabilize_' +'sta_' +'sste' +'ssa' +'spra' +'spor' +'spiel' +'spi' +'spect' +'specifications_' +'speaker_' +'spart' +'sparen_' +'spar' +'spanischen_' +'sow' +'sounds_' +'sorgt_' +'som' +'sol' +'slowdown_' +'slopes_' +'slight_' +'slave' +'skin_' +'sixt' +'simultaneously_' +'sieren_' +'sichere_' +'si' +'shrink' +'sharply_' +'sex' +'ses' +'servers_' +'sent' +'semi' +'segment' +'sec' +'sd' +'schönsten_' +'schwieriger_' +'schwerer_' +'schwach_' +'scht_' +'schri' +'schlu' +'schlimm' +'schlicht_' +'schl' +'schau' +'scene_' +'sav' +'saubere_' +'sat' +'sant' +'sand_' +'rz' +'rufen_' +'rse' +'rsch' +'rov' +'routes_' +'ros' +'rom' +'roll' +'roles_' +'rma' +'rku' +'reverse_' +'retain_' +'resse' +'res' +'requiring_' +'requests_' +'rent' +'rena' +'rek' +'reinforcing_' +'reine_' +'reiben_' +'regulat' +'regelung_' +'refuse_' +'referring_' +'rechtzeitig_' +'reali' +'rasche' +'rar' +'racing_' +'quin' +'quil' +'quantities_' +'qualitative_' +'pä' +'put' +'pursued_' +'punkt' +'proud_' +'protest' +'prote' +'profession' +'private' +'prim' +'preventing_' +'prevented_' +'prevailing_' +'pressures_' +'presenting_' +'predict_' +'ppl' +'ppen_' +'pou' +'posts_' +'positiven_' +'pools_' +'polnischen_' +'polls_' +'pm_' +'plötzlich_' +'ple_' +'platforms_' +'planung_' +'plant' +'pick' +'pflicht' +'pet_' +'pers' +'permit_' +'periphery_' +'pens' +'pel_' +'patio_' +'passing_' +'pas_' +'pas' +'partition_' +'part' +'palästinensische_' +'pakete_' +'pag' +'oß_' +'owners_' +'overseas_' +'oto' +'ossen_' +'ose' +'ories_' +'orientierte' +'oriented_' +'org' +'orc' +'ops_' +'operated_' +'opera' +'ony_' +'ono' +'ole' +'ois' +'oi' +'oga' +'officially_' +'od_' +'nos_' +'normale_' +'non' +'nobody_' +'nnung_' +'nni' +'nin' +'nig' +'nem' +'nell' +'neighbourhood_' +'negotiation_' +'nau' +'native_' +'nai' +'nahmen_' +'m²_' +'museum_' +'moderner_' +'mobilis' +'mmi' +'mme_' +'missions_' +'missile_' +'miss_' +'minu' +'milk_' +'militärischer_' +'merit' +'menja' +'mengen' +'mene' +'medizinischen_' +'mediat' +'meaningful_' +'mb' +'mati' +'massiven_' +'maschine_' +'mart' +'mar_' +'manu' +'mano' +'mals_' +'m2_' +'lässig' +'lur' +'lter_' +'lter' +'lst' +'lowest_' +'low' +'losen_' +'loi' +'logische' +'llt_' +'llers_' +'llen' +'linear_' +'lie' +'lichkeiten_' +'leng' +'leiden_' +'legend' +'lament' +'lalu_' +'lak' +'lacks_' +'lab' +'kund' +'kun' +'kredit' +'konzipiert_' +'kontra' +'konf' +'kommerziellen_' +'klima' +'kek' +'kehren_' +'kata' +'kat_' +'kandidat' +'juga_' +'judicial_' +'journey_' +'jointly_' +'jer' +'jahr' +'itu_' +'ith' +'isten_' +'ising_' +'isiert_' +'isch' +'isa' +'irgend' +'ione' +'investing_' +'interven' +'internen_' +'interessante_' +'institut' +'inso' +'insist_' +'inos_' +'inn' +'ining_' +'incidents_' +'inci' +'impos' +'ime_' +'iklim_' +'ika_' +'igra' +'iger' +'iew' +'ierungen_' +'iegen_' +'ieben_' +'hur' +'hung_' +'human' +'htig' +'hte' +'hro' +'host' +'hom' +'hochwertige' +'hochge' +'hlt_' +'hinaus' +'hill_' +'heu' +'herstell' +'hen' +'harten_' +'harmonisation_' +'happiness_' +'hall_' +'hair_' +'hai' +'guard_' +'gt' +'gründe' +'grew_' +'grenzen_' +'greifen_' +'gratulieren_' +'gran' +'grab' +'gon' +'gol' +'glaub' +'gk' +'geäußert_' +'gewinn' +'gewachsen_' +'getötet_' +'gestr' +'gestartet_' +'gesellschaftliche_' +'geschäft' +'geräumige' +'gerät_' +'gers_' +'geringer_' +'gepr' +'genes_' +'generate_' +'gemeinschaftlichen_' +'gemeinsamer_' +'geli' +'gelassen_' +'gek' +'geistigen_' +'gegangen_' +'gefährlich_' +'gefolgt_' +'gefe' +'gebühr' +'geboten_' +'gd' +'gari' +'gam' +'führer_' +'führe' +'fte' +'frische' +'fried' +'fragment' +'forums_' +'forth_' +'formation_' +'forge_' +'folder_' +'fläche_' +'fiskal' +'fine' +'finds_' +'finances_' +'fft_' +'ffic' +'ffi' +'fei' +'feet_' +'featuring_' +'fault_' +'fan' +'ey' +'expos' +'exporting_' +'exhibit' +'execution_' +'exe' +'excuse_' +'exceptions_' +'exc' +'exact_' +'everyday_' +'eun' +'eth' +'eten' +'essen' +'esca' +'erzielte_' +'ersi' +'ersetzt_' +'erse' +'err' +'erode' +'ernst' +'erlebt_' +'erhoben_' +'erh' +'ergänzen_' +'erer_' +'erende' +'erei' +'ere' +'enze' +'entr' +'entdecken_' +'ense' +'enha' +'energie_' +'enemies_' +'endgültigen_' +'empfängt_' +'empfehlen_' +'emp' +'emo' +'emission' +'embedded_' +'elli' +'elegante_' +'electrical_' +'ekonomi_' +'einzusch' +'einstellen_' +'einst_' +'einrichtungen_' +'einkommen_' +'eingeschränkt_' +'eingesch' +'eindeutige_' +'egte' +'effizienter_' +'ees_' +'eba' +'dürften_' +'dynamism_' +'durchge' +'dur' +'dramatisch_' +'dr' +'downturn_' +'dorthin_' +'domin' +'dle' +'dism' +'discovery_' +'disagree_' +'disabled_' +'direkt' +'dige' +'differ' +'dieselben_' +'dienste_' +'dic' +'deutliche_' +'deuten_' +'desto_' +'denkt_' +'deni' +'demanded_' +'delivery_' +'defining_' +'defi' +'deckt_' +'deck' +'decent_' +'deb' +'dau' +'darstell' +'cycli' +'cyber' +'curs' +'cro' +'credit' +'credible_' +'covers_' +'costly_' +'controlling_' +'contract' +'consolidation_' +'consists_' +'conscious' +'conc' +'como_' +'committees_' +'comme' +'colors_' +'cm_' +'clock_' +'cler' +'cken' +'cke_' +'cia_' +'chts' +'chs' +'chri' +'ched_' +'characters_' +'chaos_' +'champion' +'chair_' +'cet' +'ceme' +'cara_' +'capita_' +'calm_' +'büro' +'bus' +'bury_' +'bungen_' +'broadly_' +'brit' +'bric' +'breed' +'bracht_' +'boundaries_' +'boosting_' +'boat_' +'blow' +'bing_' +'binde' +'big' +'bha' +'bezahl' +'bez' +'bewiesen_' +'beweisen_' +'betont_' +'best' +'besonderer_' +'benutzer' +'benefited_' +'believes_' +'bekannte_' +'bekam_' +'behindert_' +'bedroht_' +'beding' +'bedeutsame' +'be' +'batt' +'bah' +'award_' +'avi' +'avec_' +'ava' +'aux_' +'auszuüben_' +'aussehen_' +'ausgewogene' +'ausgew' +'ausgeb' +'auseinander_' +'aufw' +'auftr' +'aufb' +'attraktive_' +'attract_' +'attended_' +'atr' +'asymmetri' +'ast' +'aspirations_' +'ash' +'ase_' +'artificial_' +'arn' +'arme_' +'arm' +'argues_' +'ard' +'archive_' +'arat' +'appl' +'apo' +'anzuwenden_' +'anzupassen_' +'anwenden_' +'answers_' +'ansehen_' +'anonym' +'anis' +'angesehen_' +'angemessen_' +'angeb' +'andes_' +'amt_' +'ame_' +'allzu_' +'allerg' +'alität_' +'alis' +'ales_' +'ald' +'aku' +'aj' +'airline_' +'ahn' +'ahme' +'aha' +'agieren_' +'afrika_' +'affecting_' +'aer' +'advised_' +'advise' +'adult_' +'ada' +'actors_' +'acted_' +'accounting_' +'accelerating_' +'aca' +'abgesehen_' +'abgelehnt_' +'abg' +'abend' +'aan_' +']], [[_' +'] ' +'Zust' +'Zus' +'Zur' +'Zugleich_' +'Youth_' +'Yas' +'XP_' +'XI' +'Wäscheservice_' +'Wärme' +'Wä' +'Wunder' +'Wissenschaftler_' +'Wirtschaftsp' +'Wirtschaftsf' +'Wirtschafts_' +'Wireless_' +'Wiederherstellung_' +'Wie' +'Widerspruch_' +'Wichtigkeit_' +'Wettbewerbs_' +'Wesen_' +'Werbe' +'Wer' +'Wen' +'Weit' +'Wein_' +'Wei' +'Wea' +'WC_' +'Vorschlags_' +'Vork' +'Vorhaben_' +'Vorf' +'Vollbeschäftigung_' +'Vertrags' +'Verteilung_' +'Versorgung_' +'Verletzung_' +'Verle' +'Verla' +'Verk' +'VI_' +'Urlaubs' +'Unternehmer_' +'Unterkünfte_' +'Unt' +'Typ' +'Twitter_' +'Trend_' +'Tot' +'Top' +'Thursday_' +'Third_' +'Thai_' +'Telefon_' +'Take_' +'Tagung_' +'Systemen_' +'Sun' +'Summe_' +'Suites_' +'Sturz_' +'Studenten_' +'Strom_' +'Strand' +'Stoffe_' +'Stabilitäts_' +'Spring_' +'Spitzen' +'Solange_' +'Smoking_' +'Situated_' +'Sie' +'Sicherheits_' +'Sicher' +'Shuttle_' +'Should_' +'Shop_' +'Sevilla_' +'Seba' +'Schrift' +'Schmerz' +'Schlag_' +'Schl' +'Schei' +'Salz' +'Salo' +'Sala' +'Sai' +'Russen_' +'Ross' +'Rit' +'Ris' +'Rhetorik_' +'Rettungs' +'Rese' +'Reg' +'Real_' +'Reagan_' +'Ratifizierung_' +'Rap' +'Radio' +'RER_' +'Provinz_' +'Programm' +'Prognosen_' +'Produzenten_' +'Produktivität_' +'Press_' +'Presidents_' +'Post_' +'Pont' +'Poll' +'Pole' +'Poker' +'Plenum_' +'Piazza_' +'Patten_' +'Patri' +'Pat' +'Papier' +'Panorama' +'Pale' +'Pak' +'Out' +'Oh' +'OC' +'Nummer_' +'Nuklear' +'None_' +'Nielson_' +'Nichtraucherzonen_' +'Next_' +'Netzwerk' +'Net_' +'Nei' +'Natura_' +'NS' +'NC' +'München_' +'Mul' +'Much_' +'Moskau' +'Mona' +'Moment' +'Modernisierung_' +'Modell' +'Mio_' +'Mini_' +'Millennium_' +'Mill' +'Migration_' +'Mic' +'Meinungs' +'Med' +'Mart' +'Marktwirtschaft_' +'Marg' +'Mao_' +'Mail' +'MD' +'Lü' +'Lärm' +'Lobby' +'Liu_' +'Libyen_' +'Les_' +'Lehrer_' +'Legal_' +'Lebensmittel_' +'Lau' +'Lap' +'Language_' +'Labor_' +'LU' +'LS_' +'LG' +'Kriminalität_' +'Kriege_' +'Kremlin_' +'Kontrollen_' +'Kontext_' +'Kont' +'Konkurrenz_' +'Kollegin_' +'Klaus_' +'Kl' +'Key_' +'Kern_' +'Kenntnisse_' +'Kart' +'Karriere_' +'Kann_' +'KI' +'Jü' +'Justiz' +'Jur' +'Juan_' +'Jordanien_' +'Jordan_' +'Jahrestag' +'Investition_' +'Institut_' +'Ingenieur' +'Industry_' +'Industrieländer_' +'Inc_' +'Ill' +'Id' +'IK' +'Hände_' +'Hypotheken' +'Human' +'Hour_' +'Holiday_' +'Hohe_' +'Hir' +'Hind' +'Herzlich' +'Heraus' +'Haut' +'Hau' +'Hart' +'Harmonisierung_' +'Haar' +'HO' +'HD_' +'Güter' +'Guan' +'Grundsätze_' +'Grundlagen_' +'Gross' +'Governance_' +'Gottes_' +'Gold' +'Global' +'Gestatten_' +'Geschlechter' +'Gen' +'Gemeinschaften_' +'Gefängnis_' +'Gebi' +'Gay_' +'Gates_' +'Gat' +'Gast_' +'Gara' +'GN' +'Führungsrolle_' +'Fü' +'Freizeit' +'Fr' +'Fou' +'Flu' +'Fire' +'Ferienwohnungen_' +'Ferien' +'Feinde_' +'Farm' +'Family_' +'Fallout_' +'FL' +'FC_' +'Excellent_' +'Everything_' +'Europol_' +'Etwa' +'Et' +'Essen_' +'Esp' +'Erwerbs' +'Erne' +'Erk' +'Erfolge_' +'Engine_' +'Energien_' +'Empf' +'Einschätzung_' +'Einschränkung_' +'Eingabe' +'Effektivität_' +'Economy_' +'Ebola_' +'Eb' +'EP_' +'Doppel' +'Dom' +'Dol' +'District_' +'Disp' +'Diplomatie_' +'Deutsch_' +'Deposit_' +'Denken_' +'Defence_' +'Deep_' +'Danach_' +'DL' +'DD' +'DA_' +'Culture_' +'Cru' +'Corb' +'Consensus_' +'Commissioners_' +'Come_' +'Code' +'Coa' +'Churchill_' +'Children_' +'Chat' +'Chancellor_' +'Catal' +'Castle_' +'Cast' +'Cand' +'CP' +'CM' +'Büro' +'Böse' +'Börsen' +'Butt' +'Bru' +'Brand' +'Boy' +'Bord_' +'Boot_' +'Bolkestein_' +'Blin' +'Binnen' +'Bilanz' +'Big_' +'Bie' +'Bibliothek_' +'Bewegungen_' +'Bew' +'Beu' +'Betracht_' +'Beteiligten_' +'Bet' +'Best' +'Besondere' +'Besitz_' +'Benjamin_' +'Bene' +'Ben_' +'Begründung_' +'Beendigung_' +'Bee' +'Bedrohungen_' +'Bedien' +'Beamten_' +'Basel_' +'Barón_' +'Badezimmer_' +'Back_' +'BRICS_' +'Avenue_' +'Automati' +'Ausgangs' +'Ausgabe_' +'Ausflüge_' +'Augen' +'Aufzug_' +'Aufnahme' +'Ast' +'Assembly_' +'Arte' +'Argumente_' +'Arabs_' +'Aqua' +'Ap' +'Anwendungs' +'Anträge_' +'Allianz_' +'Alex' +'Aktivität_' +'Aktionäre_' +'Act_' +'Absichten_' +'Abschaffung_' +'Abg' +'Abendessen_' +'AGE' +'AAA_' +'= _' +'900_' +'90' +'56_' +'2050_' +'1986_' +'1950_' +'195' +'181' +'176' +'.&_' +'. “_' +'+' +'))._' +'", "_' +' {{_' +' ,_' +' &_' +' $ _' +'”' +'“-_' +'“ (_' +'א' +'ө' +'ұ' +'с_' +'ры' +'ны' +'нд' +'в_' +'še' +'üssen_' +'üssel' +'ühren_' +'überschuss_' +'überlegen_' +'überf' +'öm' +'én' +'él' +'äus' +'äufe' +'äst' +'äss' +'ändert_' +'ämpf' +'äl' +'Überlegungen_' +'Überl' +'Überg' +'Är' +'Änderungs' +'zzi' +'zwingen_' +'zweiter_' +'zweifel' +'zutiefst_' +'zuständigen_' +'zus' +'zurückzuführen_' +'zurückf' +'zunehmen_' +'zul' +'zugute_' +'zue' +'zt_' +'zog_' +'zige' +'ziele_' +'zial' +'zentraler_' +'würdig_' +'worry_' +'wood' +'withdrawal_' +'wirklichen_' +'wherever_' +'whenever_' +'wheel_' +'wettbewerbsfähige' +'westliche_' +'wesens_' +'werde' +'wenigsten_' +'wealthy_' +'waves_' +'warrant' +'wann_' +'wand' +'wai' +'wahrscheinliche' +'wachstum_' +'wachs' +'vorzulegen_' +'vorw' +'vorn_' +'vorgeschlagene_' +'vorbe' +'volatile_' +'vist' +'visa_' +'virtue_' +'virtual_' +'villa_' +'videos_' +'victim_' +'vest' +'verwiesen_' +'verwei' +'verwe' +'verwandelt_' +'verwandeln_' +'verwa' +'vertr' +'versteht_' +'versetzen_' +'versa' +'vermittelt_' +'verkehrs_' +'verifi' +'verf' +'verbieten_' +'verbesserte_' +'verband_' +'vera' +'vene' +'varia' +'val_' +'val' +'utz_' +'uted_' +'uste' +'ust_' +'usage_' +'ure' +'uranium_' +'ura' +'upaya_' +'unterstützten_' +'unterst' +'unterschiedlich_' +'unterbreitet_' +'unstable_' +'uns' +'unre' +'unn' +'universe_' +'ungsbe' +'ungsa' +'ungeachtet_' +'unemployed_' +'undenen_' +'unclear_' +'uncertain_' +'unce' +'unabhängige_' +'umgekehrt_' +'umgehen_' +'umen_' +'ull' +'ulat' +'ula_' +'uj' +'uck' +'uchs_' +'uc' +'ubi' +'tätigkeit_' +'tyr' +'typisch_' +'twentieth_' +'tut' +'turno' +'tun' +'tum_' +'tua' +'tta' +'tsunami_' +'träger_' +'tragedy_' +'traf_' +'toxi' +'tot' +'tom' +'tn' +'tliche' +'tigen_' +'tige' +'tien' +'tiefer_' +'tie_' +'threaten_' +'thin' +'thes' +'thermal_' +'there' +'theme_' +'thek' +'theater_' +'tf' +'terrible_' +'terr' +'teri' +'terhadap_' +'tent' +'temperature_' +'tells_' +'teiln' +'teil' +'teh' +'technologischen_' +'techno' +'teacher_' +'tatsächlichen_' +'taten_' +'taste' +'südlichen_' +'sus' +'surprise_' +'supp' +'suggestions_' +'suggestion_' +'sufficiently_' +'succeeded_' +'suc' +'stärksten_' +'stunning_' +'strukturelle_' +'strictly_' +'strengthened_' +'streit' +'stores_' +'stopp' +'stimulat' +'sth' +'steiger' +'steady_' +'station' +'startet_' +'star' +'ssp' +'ssl' +'ssel' +'ssch' +'split_' +'spielte_' +'specialities_' +'sowjetischen_' +'sonn' +'songs_' +'solch_' +'soa' +'smo' +'slow' +'sle' +'sko' +'sk' +'sitze' +'sische' +'sinken_' +'sin_' +'shipping_' +'shing_' +'sher' +'sheer_' +'shareholders_' +'sey_' +'setz' +'sett' +'servi' +'sene' +'sem' +'sely_' +'seits_' +'seit' +'seemingly_' +'seeing_' +'sed' +'sebagai_' +'seas' +'script_' +'schwere_' +'schwachen_' +'schuld' +'schlimmer_' +'schiff_' +'schien_' +'scher' +'sau' +'sarge' +'sanit' +'saf' +'sache' +'räume_' +'rus' +'ruins_' +'ruck' +'rub' +'rts_' +'rtet_' +'roof_' +'rolle_' +'rock_' +'roc' +'rnen_' +'rmo' +'rkte' +'river_' +'riv' +'ring' +'rik_' +'rien' +'rieg' +'restored_' +'responsi' +'reso' +'resist_' +'resign' +'researchers_' +'repression_' +'renewed_' +'relevante' +'reiten_' +'reite' +'reif' +'reichsten_' +'rei_' +'referen' +'redu' +'reco' +'reck' +'realise_' +'real' +'rchi' +'rauch' +'rated_' +'rami' +'radical' +'rab' +'quit' +'quer' +'quelle_' +'pushing_' +'pus' +'pursuing_' +'pure_' +'pto' +'präg' +'provo' +'propaganda_' +'projekte_' +'prohibited_' +'programming_' +'produzieren_' +'proceed_' +'privatis' +'printed_' +'principal_' +'prevail_' +'prev' +'preserv' +'pres' +'preference_' +'preferable_' +'prefer' +'precious_' +'prec' +'popula' +'poorer_' +'politician_' +'pola' +'plä' +'plug_' +'plat' +'plane_' +'pla' +'pilot_' +'pig' +'pieces_' +'physi' +'phy' +'perm' +'performing_' +'penalty_' +'pemerintah_' +'ped' +'peacefully_' +'parteien_' +'parlamentarischen_' +'param' +'pane' +'paket_' +'pak' +'pain' +'overwhelming_' +'overview_' +'overs' +'overr' +'ours_' +'ots_' +'oten_' +'ost_' +'oss' +'osa' +'orthodox' +'orn_' +'orge' +'organic_' +'ore_' +'ordnungsgemäß_' +'ordnete' +'oran' +'ont' +'oned_' +'one' +'onder' +'omis' +'om' +'oliti' +'oldest_' +'ohn' +'ohl_' +'ograph' +'officer_' +'occurs_' +'obe' +'nächster_' +'nze' +'num' +'ntly_' +'nth' +'nso' +'nr' +'nowhere_' +'normally_' +'nm' +'nissen_' +'nga' +'neig' +'nehme_' +'neck' +'ndel' +'nd' +'nationalism_' +'national' +'nat' +'nahe' +'nah' +'nachzu' +'nachh' +'nachdrücklich_' +'multimedia_' +'mul' +'mouth_' +'mounting_' +'mos' +'mood_' +'monopoly_' +'modification_' +'modes_' +'mk' +'mittelalterliche' +'mite' +'mist' +'missbrauch' +'mild' +'mie' +'mehrs' +'mega' +'mee' +'medizinische' +'measur' +'mble_' +'maß' +'maybe_' +'maximi' +'mant' +'manage' +'mana' +'mala' +'mailing_' +'mage' +'mach' +'lö' +'lé' +'lusi' +'loyalty_' +'loyal' +'lowering_' +'lou' +'loose_' +'log' +'locker' +'loan_' +'llig' +'literally_' +'linken_' +'line' +'like' +'liefert_' +'lick' +'lichsten_' +'lich' +'liation_' +'lgen' +'letter_' +'leiten_' +'legitime' +'lays_' +'laste' +'lass' +'lant' +'lance_' +'labelling_' +'kurs' +'ktur' +'ktive' +'kte_' +'ks' +'kritische_' +'konsum' +'konstruktive_' +'komplette' +'kommuni' +'kol' +'kno' +'kni' +'knapp_' +'kleinere_' +'klar' +'kit_' +'kilo' +'kehrt_' +'keb' +'kau' +'jä' +'jum' +'jo_' +'jetzigen_' +'jet' +'ject' +'jan_' +'jah' +'izing_' +'its' +'iter_' +'iss' +'israelische_' +'isolation_' +'ishing_' +'ish' +'ised_' +'ironi' +'iro_' +'irischen_' +'invi' +'investieren_' +'intr' +'internasional_' +'interm' +'intensi' +'intense_' +'installieren_' +'insta' +'innocent_' +'initiat' +'ingung_' +'infolge_' +'info_' +'inflation' +'infected_' +'iner' +'indoor_' +'individuellen_' +'individuelle_' +'individuali' +'indig' +'indicates_' +'ind_' +'incorporated_' +'inad' +'implied_' +'imperative_' +'imper' +'imo' +'imme' +'iller' +'ile' +'iko' +'igen' +'ically_' +'höchst_' +'hy_' +'hs' +'hren' +'hou' +'holes_' +'hob' +'hne' +'hly_' +'hinnehmen_' +'hide_' +'hide' +'heutige_' +'hervorgebracht_' +'herum' +'herself_' +'herausge' +'hem' +'hegemony_' +'headquarters_' +'harte' +'harsh_' +'halben_' +'hal_' +'günstige_' +'gues' +'grünen_' +'grund' +'grosse_' +'gross_' +'grand_' +'graf' +'grade_' +'governed_' +'golden_' +'gnen_' +'gm' +'gio' +'gil' +'ggl' +'gewählte_' +'getr' +'gespeichert_' +'gespa' +'geräte_' +'geringen_' +'geregelt_' +'genießt_' +'geniessen_' +'geneti' +'generi' +'geholfen_' +'geführten_' +'gefährlichen_' +'geeigneten_' +'gedacht_' +'gebi' +'gas' +'ganze' +'fünf' +'führten_' +'füg' +'fü' +'fähr' +'funded_' +'functional_' +'fs' +'freundliches_' +'freige' +'freier_' +'fraud_' +'fragt_' +'formuliert_' +'format' +'forderte_' +'focuses_' +'flicht' +'flexib' +'fits_' +'finished_' +'finanzieller_' +'fina' +'fin_' +'fil' +'fifth_' +'festzustellen_' +'festen_' +'fertig' +'ferr' +'fern_' +'feelings_' +'fan_' +'fak' +'fait' +'fahr' +'factory_' +'fac' +'extremen_' +'extrem_' +'explicitly_' +'experiment_' +'existed_' +'exce' +'ewi' +'ever' +'eut' +'europäisches_' +'etzung_' +'ett' +'etic_' +'ethnischen_' +'ethical_' +'essa' +'esc' +'erwerben_' +'erste' +'ernsthafte_' +'ernsthaft_' +'erleichtern_' +'erhältlich_' +'erhielten_' +'erfreut_' +'erbe' +'erba' +'erati' +'episode_' +'eous_' +'enve' +'entwurf_' +'enthaltenen_' +'entfernen_' +'entdeckt_' +'entar' +'enta' +'engen_' +'empty_' +'employers_' +'empire_' +'emb' +'elte' +'elo' +'eliminating_' +'elf' +'elect' +'ekt' +'einziges_' +'einschl' +'einladende' +'eingestellt_' +'eing' +'eine' +'eind' +'eig' +'eib' +'ehemalige_' +'egung_' +'egt_' +'egi' +'effektiven_' +'edition_' +'ech' +'ebo' +'ease_' +'dynami' +'dscha' +'dry_' +'druck' +'drew_' +'downtown_' +'doubts_' +'donor_' +'domain_' +'disp' +'disorder_' +'disk' +'discharge_' +'disastrous_' +'direct' +'diplomats_' +'dine' +'dienst_' +'dian' +'diam' +'diagnose' +'detaillierte' +'desirable_' +'desi' +'deri' +'depart' +'deny_' +'denselben_' +'denied_' +'demonstrates_' +'demnächst_' +'deli' +'delegate' +'dein' +'dei_' +'degradation_' +'defin' +'deemed_' +'decrease_' +'deco' +'declining_' +'debts_' +'debating_' +'daz' +'dauert_' +'dauerhafte_' +'dasselbe_' +'dangers_' +'dance_' +'damaged_' +'cyclical_' +'cuti' +'ctions_' +'crowd' +'crack' +'countryside_' +'cos' +'cook' +'conte' +'constructed_' +'connect' +'confusion_' +'confirmation_' +'configuration_' +'confident_' +'condemned_' +'comprises_' +'compliance_' +'complement' +'commande' +'coloni' +'collaborat' +'coincide' +'codi' +'coc' +'clip' +'clarify_' +'cks_' +'cio_' +'cio' +'cian' +'cia' +'choosing_' +'chet_' +'cheaper_' +'char' +'centrally_' +'cells_' +'celebrate_' +'ced_' +'catalog' +'capitalist_' +'cancelled_' +'campaigns_' +'by' +'buying_' +'brutale' +'brutal_' +'bru' +'bro' +'breit' +'breakdown_' +'brain_' +'bra' +'boy' +'bou' +'bold_' +'bod' +'boasts_' +'blo' +'blic' +'bles_' +'blank' +'bitter' +'bisherigen_' +'bir' +'biodiversity_' +'bill' +'bic' +'bewegt_' +'betreu' +'betre' +'besuchte_' +'beste' +'beschä' +'beschreibt_' +'beri' +'berechtig' +'berechnet_' +'bemerkenswert_' +'beliebte' +'beizutragen_' +'begrüßt_' +'begab_' +'beeinträchtigt_' +'bedürf' +'bedingten_' +'bedienen_' +'bedauer' +'beda' +'bec' +'bearing_' +'beantragen_' +'battery_' +'bankers_' +'bahn_' +'bab' +'aß' +'az_' +'awarded_' +'aute' +'ausste' +'ausgestattete' +'ausgesch' +'ausgerichtet_' +'ausgelöst_' +'aufweist_' +'aufs' +'aufn' +'aufh' +'aufgerufen_' +'aufgeh' +'aufbauen_' +'audience_' +'attraction_' +'attend_' +'atten' +'attempting_' +'ator' +'ath_' +'atas_' +'assurance_' +'assuming_' +'assumed_' +'assist_' +'assigned_' +'asiatische_' +'ars_' +'arrogant_' +'arro' +'arin' +'argumentiert_' +'approve_' +'approaches_' +'appreciated_' +'appr' +'appe' +'anzeigen_' +'anwe' +'anste' +'anscheinend_' +'annimmt_' +'anne' +'anlage_' +'anhand_' +'angefangen_' +'angef' +'ane_' +'ane' +'amm' +'amend_' +'ama_' +'allocated_' +'allgemeinem_' +'ality_' +'alist' +'alismus_' +'aktivieren_' +'airports_' +'ahl' +'aggregate_' +'ager' +'agents_' +'age' +'ado' +'adequately_' +'aden_' +'addresses_' +'addict' +'activists_' +'achievements_' +'achievement_' +'accus' +'accelerate_' +'abzielt_' +'abt' +'abstimmen_' +'absolut_' +'abrupt' +'abr' +'abo' +'abhängen_' +'abgew' +'abgestimmt_' +'abgest' +'abf' +'abd' +'abandoned_' +'aa' +'Zuständigkeiten_' +'Zugeständnisse_' +'Zugangs' +'Zoll' +'Zeitalter_' +'Zeichen' +'Yugoslavia_' +'Yan' +'Word_' +'Wo_' +'Westens_' +'Wes' +'Weltk' +'Weil_' +'Websites_' +'Watch_' +'Wart' +'Waren' +'Warcraft_' +'Vorz' +'Vorsitzende_' +'Volksgesundheit_' +'Voi' +'Vista_' +'Veröffentlichung_' +'Versionen_' +'Verschl' +'Versammlung_' +'Verkehrsmittel_' +'Verfügbarkeit_' +'Verfahrens' +'Verbündeten_' +'Vac' +'Until_' +'Unterzeichnung_' +'Unterstütz' +'Unterhaltungs' +'Ungarn_' +'Unf' +'Umweltfragen_' +'Umf' +'UT' +'UNG_' +'UB' +'Tö' +'Turn' +'Turk' +'Turb' +'Tschetschenien_' +'Ts' +'Trends_' +'Tourism_' +'Tour' +'Torre' +'Todesfälle_' +'Thomas_' +'Theorie_' +'Tempo_' +'TR' +'TB_' +'Syrian_' +'Swi' +'Swa' +'Störungen_' +'Studium_' +'Struktur' +'Strei' +'Stream' +'Storage_' +'Starfleet_' +'Standort_' +'Spur' +'Spieler' +'Speisen_' +'Sowohl_' +'Southern_' +'Sonne_' +'Sommer' +'Sollten_' +'Smith_' +'Singapore_' +'Sicherheitsa' +'Shin' +'Serbi' +'Select_' +'Sekunde_' +'Scott_' +'Sco' +'Schüler_' +'Schwarz' +'Schulz_' +'Schr' +'Schne' +'Schm' +'Schlafzimmer_' +'Schlaf' +'Sad' +'Sachen_' +'SPA' +'SER' +'Rü' +'Roosevelt_' +'Ronald_' +'Roh' +'Richter_' +'Rhein' +'Rettung_' +'Respekt_' +'Resi' +'Reservierung_' +'Rent' +'Renaissance_' +'Rena' +'Remo' +'Reduzierung_' +'Reaktionen_' +'Ratspräsident_' +'Railway_' +'Rahmens_' +'Rahmenbedingungen_' +'Raf' +'Rab' +'ROM_' +'RG' +'Quick' +'Question_' +'Queen_' +'Qua' +'Qaeda_' +'Pul' +'Prozesse_' +'Protocol_' +'Proteste_' +'Projekten_' +'Privatisierung_' +'Prague_' +'Pr' +'Port_' +'Por' +'Play' +'Pierre_' +'Pha' +'Pflanzen' +'Pfa' +'Peking_' +'Partnern_' +'Paradi' +'Panel_' +'Pack_' +'Pac' +'PS_' +'Over' +'Orts' +'Organ' +'Opti' +'Oper' +'Olympic_' +'Oc' +'OU' +'OT' +'OD' +'Nä' +'Nutz' +'Nor' +'Nokia_' +'Nikon_' +'Niederlanden_' +'Niederlande_' +'Nicolas_' +'Nichts' +'New' +'Never_' +'Napoleon_' +'Nachhaltigkeit_' +'NT_' +'NICHT_' +'ND' +'Mö' +'Muster_' +'Mugabe_' +'Mozilla_' +'Motto_' +'Motor' +'Motiv' +'Moral' +'Monte' +'Mond' +'Modus_' +'Mode_' +'Mitt' +'Milliarde_' +'Mili' +'Michel' +'Metro' +'Merk' +'Ment' +'Mengen_' +'Medic' +'Media' +'Maßstab_' +'Marx_' +'Mars' +'Marokko_' +'Marine_' +'Marc' +'Luxembourg_' +'Love' +'Lou' +'Log' +'List_' +'Liebe_' +'Library_' +'Less' +'Lese' +'Leicht' +'Lehre_' +'Late' +'Lange' +'Lama_' +'Lam' +'LA_' +'Ky' +'Kreuz' +'Kreditkarte_' +'Krediten_' +'Krebs' +'Kranken' +'Kosten' +'Konzept' +'Konvergenz_' +'Kontakt' +'Kongo_' +'Konflikten_' +'Kommissionspräsident_' +'Kommissions' +'Kol' +'Kohlen' +'Koalition_' +'Klar' +'Kei' +'Katastrophen_' +'Katalog_' +'Karls' +'Kamera_' +'Kalten_' +'Jewish_' +'Jesus_' +'Jere' +'Jen' +'Jan_' +'Jacques_' +'Jac' +'Isa' +'Irr' +'Ironi' +'Intervention_' +'Internetzugang_' +'Intergovernmental_' +'Intel_' +'Installer_' +'Innerhalb_' +'Inha' +'Inflationsrate_' +'Immo' +'Images_' +'Ima' +'Identifi' +'Ibiza_' +'IE' +'IC_' +'Hü' +'Hy' +'Hungary_' +'Hum' +'Hub' +'Hot' +'Host' +'Hon' +'Hollande_' +'Hilfen_' +'Hi' +'Herb' +'Heizung_' +'Hea' +'Hauptver' +'Hass' +'Hardware_' +'Harbour_' +'Handlungs' +'HT' +'HS' +'Gö' +'Gäste' +'Gun' +'Griff_' +'Graf' +'Gläubiger_' +'Gläubige' +'Glo' +'Gewiss' +'Geschäft_' +'Geräte_' +'Gepäckraum_' +'Georgien_' +'Geo' +'Gender_' +'Gegenstände_' +'Gefolge_' +'Garantien_' +'Gan' +'Galileo_' +'Freund_' +'Freund' +'Freiheits' +'Frauen' +'Franzosen_' +'Francisco_' +'Forscher_' +'Flugzeug' +'Flor' +'Flo' +'Fleisch_' +'Five_' +'File_' +'Fett' +'Festplatte_' +'Fern' +'Feed' +'Fas' +'Farbe_' +'Fang' +'Fahrrad' +'Extra' +'Exporte_' +'Explorer_' +'Ever' +'Euros_' +'Erz' +'Erst_' +'Erst' +'Erreichung_' +'Erinnerung_' +'Ergebnissen_' +'Englisch_' +'Energiequellen_' +'Energieeffizienz_' +'Enc' +'Emi' +'Elevator_' +'Einsatz' +'Eines_' +'Effekte_' +'Eco' +'EU' +'ECH' +'EB' +'Dubai_' +'Dro' +'Drittländern_' +'Dos' +'Domin' +'Display_' +'Director_' +'Dir' +'Dingen_' +'Dilemma_' +'Diesel' +'Diagnose' +'Desktop_' +'Den' +'Dem' +'Defizit' +'Dau' +'Darstellung_' +'Danish_' +'DAT' +'Custom' +'Crespo_' +'Create_' +'Count' +'Corporation_' +'Consider_' +'Computers' +'Computer' +'Clearly_' +'Cle' +'Christmas_' +'Christine_' +'Chin' +'Child_' +'Cher' +'Chen' +'Chaos_' +'Change_' +'Chancengleichheit_' +'Cent' +'Celsius_' +'Cardassian' +'Cannes_' +'Cala_' +'Cai' +'CU' +'CR' +'COM_' +'CC_' +'CB' +'Bü' +'Brust' +'Brief_' +'Brennstoffe_' +'Brennstoff' +'Branche_' +'Brad' +'Botschaften_' +'Borg_' +'Boo' +'Blue' +'Bilanz_' +'Bil' +'Bier' +'Bezahlung_' +'Betreiber' +'Bestimm' +'Beste' +'Besorgnis_' +'Beschäftigten_' +'Beschr' +'Bern' +'Bericht' +'Bereits_' +'Beobachtung' +'Behörde_' +'Bef' +'Balkon_' +'Bai' +'Bag' +'BM' +'Ayatollah_' +'Ay' +'Autor_' +'Austr' +'Auslands' +'Ausf' +'Ausbruch_' +'Ausbeutung_' +'Aufklärung_' +'Aufhebung_' +'Aufenthalts' +'Att' +'Atomwaffen_' +'Ash' +'Arzneimittel' +'Arti' +'Area_' +'Architekt' +'Arbeitsplätzen_' +'Arbeitsmarkt' +'Ara' +'Apo' +'Anspr' +'Ansehen_' +'Anschläge_' +'Anschl' +'Annan_' +'Ank' +'Anfragen_' +'Andre' +'Anbindung_' +'Among_' +'Ameri' +'Alpen_' +'Alp' +'Allow_' +'Allah_' +'Albert_' +'Albani' +'Aktions' +'Adria' +'Abu_' +'Abteilung_' +'About_' +'Abk' +'Abb' +'AVI_' +'AT_' +'AM_' +'AF' +'ACP_' +'?”_' +'== _' +'="_' +': '_' +'99_' +'93_' +'77_' +'76_' +'68' +'65' +'64' +'58_' +'53_' +'51' +'47' +'350_' +'1st_' +'1956_' +'1930_' +'150' +'.&#_' +'. )' +'. "_' +'--' +'- (_' +', '_' +'); _' +'( _' +'% ' +'!!_' +' – ' +' »_' +' «_' +' - ' +' * _' +' %._' +' %' +'€' +'ә' +'ғ' +'ір' +'э' +'ты' +'ки' +'жа' +'ен' +'ге' +'г' +'ал' +'а_' +'α_' +'α' +'ğ' +'č' +'ültig' +'ückte' +'ücher_' +'überz' +'überwachen_' +'überst' +'übermäßige' +'überge' +'überd' +'ø' +'öße' +'ötig' +'ösen_' +'ökonomische_' +'öh' +'öf' +'ín' +'éta' +'ér' +'ça_' +'å_' +'å' +'äußerster_' +'ärkte' +'är_' +'änglich' +'änger' +'änd' +'ällig' +'äft' +'ächtig' +'án' +'ßig' +'ße_' +'Äußerungen_' +'Änderungsanträgen_' +'®' +'« _' +'«' +'}} ' +'zügig' +'züge' +'zz' +'zusammenbr' +'zurzeit_' +'zum' +'zukünftige_' +'zuh' +'zogen_' +'zl' +'zier' +'zia' +'zh' +'zerstör' +'zers' +'zeichne' +'zei' +'zehn' +'zan' +'ypt' +'yl_' +'yacht' +'xp' +'wünscht_' +'wünschenswert_' +'wäch' +'wusste' +'wur' +'worte' +'word' +'wohin_' +'woch' +'wn_' +'wn' +'with' +'wit' +'wisdom_' +'wirk' +'wines_' +'wettbewerbs' +'wett' +'wes' +'wertung_' +'wellness_' +'wellbeing_' +'weißen_' +'weiterzu' +'weitem_' +'watching_' +'watch' +'wasn_' +'warf' +'wahre_' +'wahl' +'wachsende' +'vorschl' +'vornehmen_' +'vorliegt_' +'vorgeschrieben' +'voranzutreiben_' +'voneinander_' +'volunteers_' +'vollz' +'vol' +'virtuelle' +'vin_' +'vielf' +'vie' +'vi_' +'veto_' +'veränderte' +'verweisen_' +'vertrete' +'verständlich_' +'versi' +'verschwinden_' +'verschle' +'verschieden_' +'verschieben_' +'verschie' +'vermieden_' +'vermi' +'verlängern_' +'verlust' +'verletz' +'verlager' +'verkehrs' +'verha' +'verglichen_' +'vergleich' +'verfassungs' +'verdienen_' +'verdi' +'verbindlich_' +'verbesserten_' +'verantwort' +'veran' +'variations_' +'uve' +'usse' +'usel' +'usch' +'urm' +'url' +'uring_' +'urgency_' +'ups_' +'upcoming_' +'uous_' +'unzählige_' +'unweit_' +'unwahrscheinlich_' +'untersucht_' +'unterm' +'unpro' +'unmittelbaren_' +'unmi' +'unknown_' +'univers' +'unhe' +'ungssystem' +'ungsre' +'ungan_' +'unft' +'unfair_' +'unen' +'uneingeschränkt_' +'understands_' +'underscore' +'unbr' +'unanimously_' +'ume' +'ulen' +'ule' +'uer_' +'ud_' +'tür' +'té' +'tungen_' +'tum' +'tub' +'ttel' +'tron' +'tremendous_' +'treatments_' +'traten_' +'transmission_' +'transit' +'transformed_' +'transc' +'transatlantischen_' +'transactions_' +'transaction_' +'trains_' +'trad' +'tou' +'total' +'tory_' +'tolle' +'todo' +'tod' +'tive' +'tischer_' +'tire' +'tir' +'tieren_' +'tiefe_' +'tib' +'throw' +'thor' +'tho' +'tge' +'teuer' +'terminal_' +'teria' +'tens' +'tende' +'tema' +'technische' +'techni' +'tc' +'tasty_' +'tar_' +'tankers_' +'talent_' +'tak' +'tai' +'tage_' +'tage' +'systematically_' +'symbolic_' +'sym' +'sy_' +'suspension_' +'surpluses_' +'surg' +'supplement' +'suff' +'sudah_' +'subway_' +'substan' +'stück_' +'stö' +'stuff_' +'stuf' +'studie' +'struktur_' +'stro' +'strikte' +'strikes_' +'stretch' +'streng_' +'straight_' +'straf' +'stoffe_' +'stle' +'stieg_' +'stick_' +'stet_' +'stes_' +'steam_' +'steadily_' +'stea' +'statt' +'statisti' +'stamm' +'stakes_' +'stabile_' +'ssen' +'sprachliche' +'sprachen_' +'spell' +'speed' +'speeches_' +'spectacular_' +'specify_' +'sound' +'soul_' +'sorgfältige' +'sorgfältig_' +'sophisticated_' +'solved_' +'sogenannten_' +'sofa_' +'smi' +'smart_' +'slowly_' +'sive_' +'sinnvolle' +'simpli' +'sim' +'sili' +'signing_' +'signature_' +'sierungs' +'sierte' +'sieg' +'sie' +'sic' +'shut_' +'shortcomings_' +'ship' +'shifts_' +'shee' +'sge' +'sexuelle_' +'settled_' +'sema' +'secondly_' +'seba' +'scrutin' +'screening_' +'scrap' +'schwächere' +'schwäch' +'schwedischen_' +'scholar' +'schließt_' +'sches_' +'schauen_' +'sca' +'saubere' +'satisfied_' +'sak' +'safeguard_' +'rückg' +'rö' +'räge_' +'rui' +'ruh' +'ruch_' +'rter' +'roo' +'romantic_' +'roman' +'roa' +'rnähr' +'rna' +'rm_' +'rka' +'ritten_' +'risiko_' +'rige_' +'richtet_' +'ria_' +'rhe' +'reward_' +'revive' +'revers' +'reveal_' +'returning_' +'retro' +'retr' +'restoration_' +'ress' +'responded_' +'residents_' +'reside' +'reproduc' +'repr' +'repli' +'repa' +'renov' +'renminbi_' +'removing_' +'remind' +'remark' +'relocat' +'reinforce_' +'reiche_' +'register_' +'regierung_' +'regelmäßig_' +'refusal_' +'redistribution_' +'recon' +'recht' +'rechnen_' +'receives_' +'reben_' +'realistische' +'realisier' +'realised_' +'reader_' +'rator' +'ratifiziert_' +'ratification_' +'rapide_' +'ranks_' +'rank' +'rall' +'rahmen' +'ract' +'quoten_' +'quisit' +'quir' +'quie' +'ques_' +'quanti' +'qualifizierte_' +'qualifications_' +'pushed_' +'pup' +'präsentier' +'prozesses_' +'prove' +'protektionistische' +'prosperous_' +'proof_' +'prol' +'projekt_' +'programmen_' +'professionelle' +'prob' +'prize_' +'privilege_' +'printing_' +'preventive_' +'prevail' +'prestigious_' +'preserved_' +'presentation_' +'prescri' +'premature_' +'pragmatic_' +'potenziellen_' +'poss' +'poses_' +'pollut' +'pole' +'polar_' +'po_' +'plural' +'pill' +'pier' +'philosophy_' +'phas' +'pflege' +'pfl' +'pfel' +'pfe_' +'petit' +'pes_' +'persönliche' +'persuade_' +'persone' +'persist' +'perpet' +'permi' +'periode_' +'pere' +'perat' +'pensions_' +'penda' +'pemba' +'pel' +'peacekeeping_' +'patient_' +'passes_' +'partitions_' +'parlamentarische_' +'parity_' +'paren' +'papers_' +'panel_' +'pana' +'painting_' +'own' +'owe_' +'overw' +'oversight_' +'overe' +'outs_' +'outs' +'outl' +'oti' +'ote_' +'oste' +'osen_' +'osc' +'ori' +'operates_' +'opens_' +'openly_' +'opa' +'onym' +'onat' +'onal_' +'ome_' +'ologie_' +'ologi' +'oil' +'ofi' +'offensichtliche' +'odie' +'ock_' +'ochen_' +'och_' +'occupation' +'oca' +'obst' +'observation_' +'obli' +'ober' +'nzi' +'nutrition_' +'ntw' +'nsu' +'nous_' +'nost' +'nos' +'nomin' +'noi' +'nnt_' +'nner' +'nne_' +'nlage' +'nko' +'niu' +'nity_' +'nio' +'nik_' +'nightlife_' +'nien_' +'niedrigere' +'niedrige_' +'ngst' +'ngn' +'nglich_' +'ngel' +'neuro' +'neuesten_' +'nett' +'ness' +'nesian' +'ners_' +'neglig' +'neglect_' +'negativ_' +'necessity_' +'ndete' +'nden' +'naval_' +'namens_' +'nah_' +'nada_' +'männ' +'mutige' +'musik_' +'multilateralen_' +'mpin' +'mpf' +'mp_' +'movies_' +'mov' +'mounted_' +'mortgage_' +'monument' +'moment' +'modules_' +'moderni' +'modern' +'mixture_' +'mitge' +'missi' +'mism' +'misc' +'mir' +'minist' +'mining_' +'minimi' +'mina' +'militärisch' +'militant' +'meta' +'mengu' +'mengak' +'menga' +'membu' +'med' +'mechanismen_' +'measurement_' +'matche' +'master' +'massiv_' +'massage_' +'marine_' +'marginal_' +'mapp' +'mali' +'makroökonomische' +'mak' +'mai' +'mah' +'magne' +'magazine_' +'ländliche_' +'lve' +'lv' +'lungs' +'lue' +'luc' +'lua' +'lu_' +'loses_' +'logo_' +'loc' +'lobb' +'lize' +'liza' +'litik' +'liti' +'literar' +'listening_' +'liste' +'list' +'limiting_' +'liebe_' +'licherweise_' +'license_' +'lib' +'lf' +'leute_' +'letting_' +'lending_' +'lem_' +'leitung_' +'leitete_' +'leitet_' +'leis' +'legislat' +'leere' +'lect' +'laute' +'lati' +'latein' +'lager_' +'künstlich' +'käm' +'kta' +'kritisch' +'kris' +'kre' +'kopieren_' +'kontrollierte' +'kontrolle_' +'konkret_' +'konflikt' +'komplette_' +'kommunistische_' +'kolle' +'kenne_' +'kebijakan_' +'karten_' +'kampf' +'kam' +'kali' +'kale' +'kai' +'jüngere' +'jährliche_' +'judgment_' +'judge_' +'judge' +'jegliche' +'jed' +'jas' +'itze' +'itution_' +'ities_' +'istisch' +'ister' +'issen_' +'irt' +'irgendwann_' +'iranischen_' +'ira_' +'ip_' +'iot' +'io' +'invade' +'interpreti' +'interprete' +'interim_' +'interessanten_' +'intellektuelle' +'integrierten_' +'integrieren_' +'integral' +'institutionelle_' +'installer_' +'insch' +'inner_' +'inj' +'inie' +'inglich' +'ingen' +'infringement_' +'informati' +'influential_' +'ineffective_' +'industrielle_' +'induce' +'incredibly_' +'inan' +'imposing_' +'immer' +'ily_' +'illo' +'ildung_' +'ihrerseits_' +'ignoriert_' +'ignorieren_' +'ift' +'iete' +'iere_' +'iere' +'ielen_' +'iehen_' +'iegel' +'idor' +'identical_' +'idealer_' +'icul' +'ichte' +'ices_' +'ican' +'ias_' +'hü' +'hôtel_' +'hunting_' +'hunderte' +'hrung_' +'hre' +'hosted_' +'hospitality_' +'hone' +'holen_' +'hol_' +'hochwertige_' +'hne_' +'hme' +'hm_' +'hle' +'historisch_' +'his' +'hinzugefügt_' +'hint' +'hil' +'hike' +'hierzu_' +'hic' +'heutzutage_' +'hersteller_' +'heri' +'here' +'herausragende' +'height_' +'hearts_' +'health' +'heading_' +'hbo' +'haushalt' +'has' +'harg' +'hard' +'handled_' +'handed_' +'ham' +'hall' +'halb' +'hafte_' +'had' +'habit' +'gutem_' +'gten_' +'greift_' +'grasp_' +'good' +'gni' +'gn_' +'globale' +'gleicher_' +'git' +'gewünschten_' +'gespe' +'geru' +'gericht_' +'gerei' +'gerechter' +'gerechten_' +'geprägt_' +'gepflegt' +'geographical_' +'gent' +'gena' +'gemäßigte' +'gelo' +'geklärt_' +'geist' +'gehandelt_' +'gehabt_' +'gegens' +'geblieben_' +'gebeten_' +'geber' +'gation_' +'ganis' +'gangen_' +'fäll' +'fä' +'fut' +'fus' +'fung_' +'friedliche_' +'freundlicher_' +'freu' +'freez' +'fossile' +'formula_' +'formats_' +'formally_' +'forever_' +'fores' +'foods_' +'fond' +'fließen_' +'fl' +'finnische' +'findings_' +'fig' +'fiel_' +'fi_' +'ffs' +'ffen_' +'fet' +'fern' +'fer_' +'fel_' +'feind' +'fehler' +'federa' +'favourite_' +'favour' +'fassen_' +'fascinating_' +'fantas' +'fals' +'fairen_' +'faire_' +'fahrzeuge_' +'facility_' +'eßen_' +'extremism_' +'externen_' +'externe_' +'express' +'explicit_' +'expertise_' +'experimental_' +'exhibition_' +'executi' +'executed_' +'excluded_' +'exclude' +'ewe' +'evol' +'evo' +'eventuelle' +'eve' +'etzt_' +'eter_' +'ete' +'este_' +'esan' +'erweiterten_' +'erwe' +'ervi' +'ertrag' +'erstklassige' +'erson' +'erschienen' +'errichtete' +'errichten_' +'erra' +'erp' +'ernste_' +'erneuer' +'ermutigen_' +'erla' +'erka' +'erin' +'erheben_' +'ergibt_' +'erfü' +'erarbeiten_' +'entscheide' +'entl' +'ensu' +'enm' +'eng' +'ened_' +'enco' +'emphasis' +'emm' +'els' +'ellt_' +'elit' +'eliminated_' +'elf_' +'einse' +'einsch' +'eins' +'einmalige_' +'einig' +'eingereicht_' +'eingeg' +'eingeb' +'einf' +'einbr' +'einbezogen_' +'eigentlichen_' +'eigentliche_' +'eich' +'egel' +'effektiver_' +'ee' +'ecological_' +'echn' +'eche' +'ebu' +'eat' +'earn' +'durchd' +'dunkle' +'duc' +'drittens_' +'dre' +'draw' +'drag_' +'dos' +'donat' +'dominate_' +'dlich' +'dle_' +'disturb' +'distribut' +'distr' +'diss' +'disrupti' +'disi' +'discretion_' +'disappear' +'dip' +'dina' +'digitalen_' +'digitale_' +'dig_' +'differently_' +'dier' +'dich_' +'dge_' +'dge' +'devastating_' +'dete' +'destabili' +'desp' +'design' +'dero' +'derjenigen_' +'derive_' +'dera' +'deposit_' +'deport' +'deployment_' +'deployed_' +'denjenigen_' +'demonstrators_' +'demographic_' +'delle_' +'delicate_' +'defending_' +'defect' +'defeat' +'declined_' +'declare_' +'declar' +'decken_' +'dad_' +'dac' +'cus' +'cultures_' +'cts_' +'crystal' +'cry_' +'courage_' +'coordinat' +'convey' +'convert' +'conv' +'controller_' +'contaminated_' +'consistently_' +'cong' +'conf_' +'comput' +'compulsory_' +'complaints_' +'competent_' +'competences_' +'compatibility_' +'command' +'column' +'colo' +'collected_' +'clothing_' +'clim' +'clicking_' +'clarification_' +'citizenship_' +'cil' +'cigarette' +'cien' +'cial_' +'chung_' +'chte_' +'chs_' +'chronische' +'chro' +'christliche' +'chlich' +'chlag' +'chir' +'chic' +'chemische_' +'chee' +'chart' +'chairman_' +'chafts' +'ces' +'cent_' +'cen' +'cement_' +'cea' +'carrier_' +'cap' +'camp' +'cali' +'burned_' +'bung_' +'buch' +'bt' +'brother_' +'broker' +'breiter_' +'breast_' +'bran' +'brachten_' +'boy_' +'boom' +'booked_' +'blow_' +'blis' +'blick_' +'bless' +'blame_' +'bili' +'bike_' +'bien_' +'bewährte' +'beweist_' +'bewaffneten_' +'bevorzugte' +'betragen_' +'betrachte_' +'bet_' +'beständig' +'bestre' +'beschrieben_' +'beschleunigt' +'besagt_' +'berl' +'berichtet_' +'bereitgestellt_' +'bereich' +'berechtigt_' +'beneficial_' +'benefi' +'benannt' +'benachrichtigt_' +'bemerkt_' +'beliebtes' +'bekämpfung_' +'behörden_' +'befürwortet_' +'befreien_' +'befehl' +'befasst_' +'beeinflusst_' +'bedingungen_' +'bede' +'bedanken_' +'beck_' +'beberapa_' +'bbi' +'baut_' +'bathing_' +'bath' +'basically_' +'banyak_' +'bankr' +'bahan_' +'azi' +'aya_' +'automatic_' +'auto_' +'authorise' +'auszusch' +'auswärtige_' +'ausschl' +'ausreichende_' +'ausreichen_' +'ausgi' +'ausgel' +'aufweisen_' +'aufste' +'aufrecht_' +'aufges' +'auffordern_' +'auber' +'attribute' +'attending_' +'attacking_' +'attached_' +'attach' +'ats_' +'atis' +'atic_' +'aster' +'aste' +'assessments_' +'arts_' +'armo' +'ark' +'aries_' +'ards_' +'archa' +'arbeiter_' +'approval_' +'approaching_' +'appointment_' +'ape' +'anzus' +'anzug' +'anxiety_' +'anu' +'anticipated_' +'anschließend_' +'anschl' +'ansa' +'anr' +'annually_' +'annten_' +'anle' +'anhalten' +'angka' +'anger' +'angenehme' +'anfangen_' +'ando' +'andern' +'ande' +'ances_' +'analyze' +'analyst' +'amtierende' +'ame' +'ambitions_' +'amazing_' +'albeit_' +'alarm' +'ala_' +'akzept' +'aktionen_' +'airlines_' +'ahren_' +'ahr' +'ahl_' +'agte' +'aging_' +'affi' +'advocate_' +'advice_' +'adidas_' +'adhere' +'adapti' +'adapted_' +'adapt' +'acy_' +'actively_' +'act' +'acr' +'acquired_' +'acknowledg' +'achts' +'accura' +'accountable_' +'accountability_' +'accomplished_' +'accidents_' +'accident_' +'academic_' +'ac' +'abzulehnen_' +'absolute' +'ablehnen_' +'abh' +'abges' +'abe' +'abba' +'abb' +']] | _' +'Zwischenzeit_' +'Zwei_' +'Zustellbetten_' +'Zusammenfassung_' +'Zusagen_' +'Zugriff' +'Zuge' +'Zimmerbeschreibung_' +'Zertifikat' +'Zeitungen_' +'Zeitplan_' +'Zauber' +'Zahlreiche_' +'Young_' +'Xi' +'XML_' +'Wur' +'Wu' +'Wohnungs' +'Wissenschaftlern_' +'Wirtschaftss' +'Wirtschaftsr' +'Wirtschaftsm' +'Wirtschaftskrise_' +'Wirtschaftsa' +'Wirtschaftlich' +'Winter' +'Wilhelm_' +'Wiederg' +'Wichtig_' +'Who' +'Werte' +'Weltraum' +'Wellness' +'Well_' +'Weiteren_' +'Wechselkurs' +'Wachstumspakt' +'WO' +'WM' +'Voyager_' +'Votum_' +'Vorsorge' +'Vorgänger' +'Vordergrund_' +'Vorbereitungen_' +'Visionen_' +'Vin' +'Village_' +'Vil' +'Viertens_' +'Vid' +'Verwe' +'Vertretung_' +'Versorgungs' +'Versicherungen_' +'Versi' +'Verr' +'Verordnungen_' +'Vern' +'Veri' +'Verheugen_' +'Vergnügen_' +'Vereinigung_' +'Verbrauch_' +'Verantwortungs' +'Ven' +'VP_' +'Ut' +'Untersuchungs' +'Unterricht' +'Unterdrückung_' +'Unsere' +'Unruhen_' +'Unr' +'Unm' +'Unlike_' +'Ungl' +'Una' +'Umstrukturierung_' +'UNESCO_' +'Trä' +'Tower_' +'Touristen_' +'Tourismus' +'Tos' +'Tools_' +'Tom' +'Together_' +'Tisch_' +'Tickets_' +'Throughout_' +'Through_' +'Thro' +'Thi' +'Theater_' +'Terror_' +'Tempora' +'Tel_' +'Tech_' +'Tea' +'Taxi' +'Taten_' +'Tap' +'Tao' +'Tak' +'Tag' +'Tabelle_' +'TU' +'TC' +'Sü' +'Szen' +'Swoboda_' +'Sun_' +'Suche' +'Subject_' +'Stü' +'Sturm_' +'Stri' +'Streben_' +'Stiftung_' +'Steuerung_' +'Sternen' +'Staatsverschuldung_' +'Staatsschulden_' +'Staatss' +'Spiels' +'Spezie' +'Speci' +'Spani' +'Spaltung_' +'Sp' +'Sometimes_' +'Solid' +'Solche_' +'Solarium_' +'Solar' +'Soci' +'Sm' +'Slow' +'Six_' +'Sisko_' +'Single_' +'Singapur_' +'Similar_' +'Signale_' +'Shops_' +'Server' +'Series_' +'Sende' +'Senate_' +'Senat' +'Semi' +'Scien' +'Schöne' +'Schweizer_' +'Schre' +'Schlu' +'Scha' +'Say' +'Saturday_' +'SanDisk_' +'Sam' +'Salzburg_' +'ST_' +'STO' +'STE' +'ST' +'Rüstungs' +'Rule_' +'Rohstoff' +'Rock' +'Robert' +'River' +'Rica_' +'Republikaner_' +'Repo' +'Rental_' +'Renn' +'Rem' +'Rekord' +'Registrierung_' +'Regions_' +'Reduc' +'Rede' +'Recent_' +'Rea' +'RS_' +'RI_' +'RC_' +'Quo' +'Quest' +'QE_' +'Putins_' +'Publik' +'Prävention_' +'Program_' +'Political_' +'Polit' +'Poi' +'Plu' +'Platz' +'Picard_' +'Photo' +'Philip' +'Phasen_' +'Pflege_' +'Petr' +'Pet' +'Pes' +'Personen' +'Perl' +'Pent' +'Peer_' +'Patt' +'Patent' +'Patch' +'Parlament' +'Parks_' +'Parag' +'Palästina_' +'Palacio_' +'Paar_' +'PXI_' +'PM' +'PH' +'PDF_' +'Ozean' +'Ot' +'Oslo_' +'Original_' +'Ordner_' +'Ora' +'Operation_' +'Online' +'Ombudsman_' +'Olympus_' +'Og' +'Offizier' +'Obamas_' +'ODER_' +'Nue' +'Nova' +'Normal' +'Nob' +'Nik' +'Niger' +'Nichts_' +'Neue_' +'Netzwerke' +'Neigung' +'Nehmen_' +'Nan' +'Nahrungsmittel_' +'Nachrichten' +'Nachfolger_' +'Nachf' +'NP' +'NH' +'NF' +'Mü' +'Möglich' +'Männern_' +'Mund' +'Multi_' +'Mountain' +'Motion_' +'Mord_' +'Monats_' +'Moldova_' +'Moham' +'Modul_' +'Mode' +'Mod' +'Mittag' +'Mitgliedsländer_' +'Mitentscheidung' +'Michel_' +'Messen' +'Messe_' +'Mes' +'Mem' +'Meinungsumfragen_' +'Medi' +'Mea' +'McC' +'Mazedonien_' +'Maus' +'Materialien_' +'Marco_' +'Map' +'Manu' +'Manche_' +'Male' +'Make_' +'Maha' +'MH' +'ME_' +'MED' +'Lösungs' +'Länge_' +'Län' +'Luxus_' +'Lula_' +'Lui' +'Luftverschmutzung_' +'Luftraum' +'Low_' +'Louis_' +'Look_' +'Logo_' +'Loc' +'Lobby_' +'Lis' +'Linke_' +'Lif' +'Leuten_' +'Lesung_' +'Leone_' +'Lektion_' +'Leiden' +'Leid_' +'Leader' +'Lauf' +'Last' +'Lands' +'Lah' +'LT' +'LD' +'Körper' +'König' +'Kurd' +'Kreis' +'Kre' +'Kontinents_' +'Kontin' +'Konsumenten_' +'Konst' +'Konflikts_' +'Konferenz' +'Kom' +'Kolonial' +'Koll' +'Kohle' +'Know_' +'Know' +'Klin' +'Keynesian_' +'Kel' +'Kein_' +'Kau' +'Kath' +'Kapitalismus_' +'Kanal' +'Kam' +'Kaffee' +'Kabel_' +'KON' +'Juli' +'Jugendlichen_' +'Jud' +'Jews_' +'Jakob' +'Ist' +'Islamist_' +'Ir' +'Investment_' +'Intera' +'Instanz_' +'Instan' +'Installations' +'Inhalte_' +'Inflations' +'Industriep' +'Impl' +'Impf' +'Immunität_' +'Imagin' +'Illusion' +'Il' +'IV_' +'ISIS_' +'IN_' +'ICEcat_' +'IA_' +'Höhen' +'Häufig' +'Häfen_' +'Hyatt_' +'Hop' +'Hollywood_' +'Holland_' +'Hohen_' +'Hisbollah_' +'Hinweise_' +'Hinter' +'Hindernis_' +'Hil' +'High' +'Herz_' +'Herrsch' +'Herbst_' +'Her_' +'Hen' +'Heinrich_' +'Hat_' +'Hard' +'Hannover_' +'HI_' +'Guinea_' +'Guatemala_' +'Grupp' +'Greens_' +'Greeks_' +'Glücklicherweise_' +'Gibt_' +'Gewinner_' +'Gewinn_' +'Gewalt' +'Gesetzes' +'Gerichten_' +'Generalsekretär_' +'Genau_' +'Gemeinw' +'Gemeinschaftsp' +'Gemein' +'Geiste' +'Gegenwärtig_' +'Gegenwart_' +'Gee' +'Gaz' +'Gaulle_' +'Ganze_' +'Gall' +'G20_' +'Förder' +'Föderation_' +'Fußball' +'Funktionsweise_' +'Fundamental_' +'Frie' +'Freundschaft_' +'Freitag_' +'Freihandelsabkommen_' +'Freihandels' +'Franklin_' +'Format' +'Folgendes_' +'Folge' +'Flug_' +'Flexibilität_' +'Fis' +'Find' +'Finance_' +'Fehler' +'Fee' +'Fau' +'Fass' +'Fam' +'Fakten_' +'Fahrer_' +'Face' +'FM' +'FD' +'Explosi' +'Experience_' +'Evans_' +'Eva' +'Eu' +'Ethi' +'Estonia_' +'Esc' +'Erwärmung_' +'Erste' +'Erscheinung_' +'Erl' +'Erkenntnis_' +'Erfind' +'Erdgas' +'Erbe_' +'Entschlossenheit_' +'Entr' +'Entlastung_' +'Ens' +'End_' +'Empfänger_' +'Emp' +'Elf' +'Electronic' +'Einzelnen_' +'Einw' +'Einladung_' +'Einigkeit_' +'Eigentümer' +'Ehre_' +'Ehr' +'Ecuador_' +'Ebenen_' +'EWG_' +'ENT_' +'Dé' +'Dusche_' +'Durch' +'Dur' +'Drei_' +'Dre' +'Donnerstag_' +'Dona' +'Don' +'Disabled_' +'Differenzen_' +'Differenz_' +'Diesen_' +'Diag' +'Demonstranten_' +'Demokrati' +'Deflation_' +'Defense_' +'Deck' +'Davi' +'Darau' +'DP' +'Cup_' +'Crew_' +'Countries_' +'Cooperation_' +'Converter_' +'Continental_' +'Cont' +'Congo_' +'Conce' +'Compa' +'Come' +'Colon' +'Citizens_' +'Cin' +'Christ' +'Chr' +'Chicago_' +'Chemi' +'Cer' +'Cau' +'Carolin' +'Carlo_' +'Car_' +'CEO_' +'CDs_' +'Bühne_' +'Bushs_' +'Bull' +'Bulgarian_' +'Buchung_' +'Brut' +'Browser_' +'Brit' +'Boutique_' +'Bot' +'Boh' +'Binnenmarktes_' +'Bin' +'Beweg' +'Betrug_' +'Betroffenen_' +'Besteuerung_' +'Beschwerden_' +'Besatzung_' +'Bernanke_' +'Berliner_' +'Belo' +'Bekanntlich_' +'Beit' +'Beförderung_' +'Beamte_' +'Basic_' +'Bara' +'BL' +'Azer' +'Autor' +'Australien_' +'Austausch_' +'Ausgaben' +'Ausb' +'Augenmerk_' +'Auftr' +'Aufs' +'Aufr' +'Assa' +'Asiens_' +'Asi' +'Archer_' +'Arbeitss' +'Arbeitskräften_' +'Arabischen_' +'Arabien_' +'Arabi' +'Araber_' +'Appe' +'Anstatt_' +'Anna' +'Anh' +'Angola_' +'Ander' +'Amts' +'Amm' +'American' +'Alternativen_' +'Alpe' +'Akzeptanz_' +'Aktionsplan_' +'Akte' +'Airways_' +'Addi' +'Achtung_' +'Account' +'Abz' +'Abst' +'Abs' +'Able' +'Abgesehen_' +'Abfall' +'Abenteuer_' +'Abbau_' +'AND_' +'AG' +'AB_' +'A6_' +'>' +'==' +'87_' +'83_' +'76' +'74' +'69_' +'66' +'44' +'41' +'38' +'360_' +'1988_' +'1984_' +'1980er_' +'198' +'1978_' +'1975_' +'1970er_' +'1940_' +'1920' +'178' +'002' +') ._' +'%_' +'") _' +' “ _' +' » _' +' ``_' +' = [[_' +' --> _' +'…' +'”)' +'ү' +'ын' +'ыл' +'ті' +'ти' +'са' +'рі' +'ро' +'ре' +'ос' +'ле' +'ке' +'ес' +'да_' +'бол' +'ба' +'ай' +'Б' +'ι' +'üs' +'ürd' +'ündig' +'üllung' +'ühr' +'üge' +'ückt_' +'übt_' +'üblichen_' +'überzeugende' +'überwacht_' +'überschreiten_' +'überleben_' +'überh' +'überb' +'öss' +'öse_' +'ór' +'ña' +'ê' +'éri' +'äußere' +'äuft_' +'ätzung_' +'ätte' +'ärk' +'älteste_' +'älter' +'ält' +'äisch' +'ähnlichen_' +'ächte' +'äc' +'ä_' +'â' +'ár' +'ße' +'Übersetzungs' +'Überschuss_' +'Übereinkunft_' +'° _' +'}} _' +'{' +'zza_' +'zwölf_' +'zweite' +'zusätzliche' +'zustellen_' +'zustande_' +'zurückz' +'zurecht' +'zukünftig' +'zufrieden' +'zte' +'zones_' +'zon' +'zis' +'zip_' +'zio' +'zil' +'zig_' +'zierungs' +'zielen_' +'ziel' +'zeug_' +'zersch' +'zent' +'zeitge' +'zauber' +'yo_' +'yn_' +'xy' +'wäh' +'wunderschönen_' +'writ' +'worauf_' +'wooden_' +'wm' +'witnessing_' +'witnessed_' +'withdraw_' +'wissens' +'wirtschaftspolitische' +'wirtschaftliches_' +'wirt' +'winner_' +'wind' +'width_' +'widerspiegelt_' +'wi_' +'whol' +'westlich_' +'wertvollen_' +'werten_' +'wert' +'weltweite' +'welle' +'welcomed_' +'weiß' +'weich' +'wede' +'wed_' +'wechseln_' +'weakened_' +'weak' +'wd' +'wartet_' +'warned_' +'wan_' +'wage' +'wachstums_' +'vr' +'vorschlägt_' +'vorschläge_' +'vorsch' +'vorr' +'vorherige_' +'vorgegeben' +'vorgeb' +'vorg' +'vora' +'volks' +'void' +'vity_' +'visits_' +'visitor_' +'violations_' +'violation_' +'ville_' +'viewed_' +'view' +'vierte' +'vet' +'veröffentlichten_' +'verwu' +'verwirklichen_' +'verwendeten_' +'verträge' +'vertraut_' +'vertrauen_' +'vertrag_' +'verteilen_' +'verstr' +'versammlung_' +'verp' +'vernichte' +'vermögen_' +'verlä' +'verliehen_' +'verheerende' +'vergi' +'vereint_' +'vereinfacht' +'verde' +'verbreiteten_' +'verbreite' +'verbrauche' +'verbrauch' +'verbl' +'verbindung' +'verarbeitung' +'verantwortungsvolle' +'verabschieden_' +'vention' +'veness_' +'vegetables_' +'vat' +'vary_' +'variant' +'valley_' +'uß_' +'uß' +'utzte' +'utiliz' +'utilit' +'uten_' +'usu' +'uropa_' +'uro' +'urg_' +'urce' +'uran' +'ural' +'upt' +'upgrading_' +'upgraded_' +'updates_' +'unvermeidlich_' +'unv' +'unum' +'unterliegt_' +'unsp' +'unmittelbare_' +'unm' +'unli' +'unl' +'unktion' +'unkt' +'unkonventionelle' +'unklar_' +'unk' +'universelle' +'unif' +'ungsw' +'ungsr' +'ungsprogramm' +'ungsmaßnahmen_' +'ungsk' +'ungleiche' +'unfa' +'unein' +'undertaking_' +'undertaken_' +'understandable_' +'underp' +'undermining_' +'uncon' +'unch' +'unbekannte' +'umu' +'umi' +'umfassende' +'umer' +'umbu' +'ult_' +'uldig' +'uku' +'uin' +'uh_' +'uft_' +'uen' +'uble_' +'uber' +'uat' +'uali' +'ual' +'tzten_' +'typ' +'tv' +'turm_' +'tune' +'tu_' +'tst' +'trum' +'truck_' +'trocken' +'triggered_' +'trigger_' +'tric' +'treffen' +'treat' +'trea' +'traum' +'transportier' +'transnational_' +'translated_' +'translate_' +'transform_' +'tragische' +'traditionell_' +'traditionally_' +'tract' +'tour' +'tos_' +'tm' +'tlin' +'tipp' +'tili' +'tighte' +'tight' +'thriv' +'threatening_' +'thread_' +'thought' +'thick' +'therapie' +'theoretische' +'themes_' +'thanking_' +'testen_' +'teru' +'terk' +'terie' +'tere' +'tends_' +'tenden' +'tell' +'tel' +'teilzunehmen_' +'technisch_' +'technik_' +'teach_' +'tausende_' +'taus' +'tatsächliche_' +'tation_' +'tat_' +'tant' +'tank_' +'tangible_' +'tande' +'tand_' +'tam' +'taatliche' +'szen' +'symptom' +'sympath' +'switched_' +'swee' +'suspicion_' +'surveys_' +'superb_' +'summ' +'suit_' +'suicide_' +'sui' +'subsid' +'stärkt_' +'städte' +'stunde_' +'strukturen_' +'strongest_' +'stressed_' +'strengths_' +'strecke' +'streben_' +'stischen_' +'stige' +'stete' +'stern_' +'steri' +'stereo' +'stepp' +'stens_' +'stellten_' +'stel' +'stehende_' +'state' +'starb' +'stands' +'stair' +'stagnation_' +'stabilen_' +'sst' +'ssions' +'ssion' +'squa' +'squ' +'späte' +'spre' +'spo' +'spite_' +'spezifisch' +'speziellen_' +'spezi' +'specific' +'specialize' +'spec' +'spare_' +'soziales_' +'south' +'somewhere_' +'sociali' +'sno' +'smart' +'sman' +'slu' +'slogan' +'slo' +'slightly_' +'sleeping_' +'sku' +'situationen_' +'sions_' +'sinkende' +'simulation_' +'simi' +'signat' +'sicherer_' +'shu' +'shortly_' +'shoe' +'shelter' +'shell_' +'shame' +'shake' +'sg' +'sexuellen_' +'sex_' +'sevent' +'sessions_' +'separate' +'sep' +'sentence_' +'sende' +'sel_' +'sektors_' +'sees_' +'sections_' +'secondary_' +'secara_' +'script' +'schützt_' +'schwächen_' +'schwi' +'schwarz' +'schnelles_' +'schloss_' +'schließe_' +'schlich' +'schlechte_' +'schle' +'schic' +'schem_' +'schein' +'schei' +'schalte' +'schaftliche' +'schafft_' +'scare' +'sc' +'sba' +'sat_' +'santa_' +'sang' +'sammeln_' +'samkeit' +'salt_' +'sailing_' +'sagten_' +'safely_' +'safeguard' +'saat_' +'räng' +'räger' +'räft' +'rup' +'rud' +'rtung_' +'rteil' +'rounds_' +'ros_' +'rop_' +'rogramm' +'rod' +'robot' +'robe' +'road' +'rne' +'rke' +'riskier' +'riots_' +'rim' +'right' +'rig_' +'richte' +'rian' +'rez' +'reveals_' +'retail' +'resum' +'restrict_' +'restraint_' +'resso' +'ression' +'respektieren_' +'resiste' +'resilien' +'reservations_' +'republic' +'representing_' +'renovated_' +'reno' +'remuneration_' +'remit' +'reminiscent_' +'reminded_' +'reliability_' +'relative' +'relationships_' +'relat' +'rejection_' +'reit' +'reisen_' +'reine' +'reihe' +'reifen_' +'reichlich' +'reha' +'regen' +'regelungen_' +'rega' +'reformist' +'reformi' +'reflecting_' +'refle' +'referendums_' +'recourse_' +'reconciliation_' +'reconcile' +'reckung' +'recipient' +'rechung' +'reche' +'recal' +'rebel' +'reasonably_' +'reap' +'ream' +'realistische_' +'reale_' +'readily_' +'rbeit_' +'ratione' +'rational' +'ras_' +'ras' +'rangi' +'raises_' +'radar_' +'rachte' +'rach' +'race' +'rac' +'rable_' +'quotas_' +'quip' +'quest' +'py_' +'py' +'pursuit_' +'punish' +'pun_' +'pull' +'pte' +'psychische' +'prüf' +'präsident' +'protest_' +'protects_' +'pros' +'prophe' +'properties_' +'produkte_' +'produkt' +'produk' +'proceed' +'prisons_' +'printer_' +'prevents_' +'pretty_' +'presidents_' +'preparing_' +'premium_' +'prejudice_' +'predictable_' +'prechen_' +'preach' +'power' +'poten' +'pot_' +'possession_' +'posi' +'populär' +'popularity_' +'pop' +'polo' +'poll' +'polic' +'poker_' +'pointing_' +'pocket' +'poc' +'plätze_' +'pläne_' +'pity_' +'pis' +'pipe' +'pine' +'pilot' +'pi_' +'physici' +'pharmaceutical_' +'phan' +'pha' +'ph_' +'pflanz' +'personen_' +'personali' +'permission_' +'perception_' +'perc' +'pent' +'pene' +'pemimpin_' +'peer_' +'pedia_' +'pear' +'pazi' +'patr' +'pat_' +'participated_' +'partei_' +'park' +'panoramic_' +'panisch' +'panic_' +'ows_' +'owner_' +'owed_' +'ove' +'outset_' +'outline_' +'outcomes_' +'outbreak_' +'ound' +'otr' +'otel_' +'ota' +'ossene' +'osition' +'orth' +'orm_' +'orium_' +'orientiert_' +'organisieren_' +'ordnungsgemäße' +'ordert_' +'ord_' +'orbit' +'oppress' +'opp' +'operator_' +'opera_' +'ont_' +'onisch' +'omp' +'omin' +'olt' +'ological_' +'olge' +'ole_' +'oku' +'oin' +'oid_' +'oi_' +'ohner' +'ogi' +'offset_' +'odo' +'oci' +'obtaining_' +'obi' +'oat' +'nützliche_' +'näh' +'näch' +'nza' +'nutzt_' +'nutzbar_' +'nummer_' +'nuklearen_' +'nuestr' +'ntsch' +'nting' +'ntin' +'notification_' +'nor' +'nomi' +'noisy_' +'nna_' +'nka' +'nitte' +'nisses_' +'ning' +'niederländischen_' +'ngly_' +'ngka' +'nger_' +'nge' +'newspapers_' +'newsletter_' +'neue' +'nera' +'neo_' +'nel_' +'nein' +'neighbor' +'neigen_' +'nehm' +'negotiating_' +'negotiated_' +'nego' +'neg' +'nec' +'ndige' +'nders' +'nded_' +'ndan' +'nca' +'nba' +'navi' +'nationalist_' +'nary_' +'nal_' +'nacht' +'nachfolgenden_' +'nac' +'müt' +'mündlichen_' +'müh' +'möge_' +'mäßige' +'mäßig_' +'myster' +'mysql' +'music' +'muscle' +'mung' +'multinational_' +'multinational' +'muda' +'mt' +'mpt' +'mouse_' +'mos_' +'moon_' +'moo' +'monopol' +'monitored_' +'moni' +'modula' +'modernisier' +'mmt_' +'mmen' +'mma' +'mix' +'mitzu' +'mitteln_' +'mitteilen_' +'mitte' +'mitglieder_' +'misu' +'mischen_' +'mirror' +'ming' +'mineral_' +'mind' +'mill' +'migra' +'mien' +'midst_' +'micro_' +'methoden_' +'mesis' +'ment' +'mening' +'menge' +'memba' +'melalui_' +'mel_' +'meister' +'meistens_' +'mei' +'mehrfach_' +'mehrerer_' +'media' +'mechanismus_' +'meantime_' +'mbur' +'mayor_' +'mature_' +'master_' +'masse' +'maschinen_' +'masa_' +'market' +'marken' +'march' +'maps_' +'manufacturer_' +'manipulation_' +'manifest' +'mangelnde_' +'mangel' +'mandatory_' +'mall' +'malig' +'maler' +'mais_' +'maintain' +'mainstream_' +'magni' +'mac' +'lüge' +'lü' +'löst_' +'längst_' +'läng' +'ländische' +'länd' +'lz' +'lw' +'luxuriöse_' +'lust' +'lungen_' +'luen' +'luar_' +'ltung_' +'loud_' +'lone' +'lon' +'lokal_' +'loka' +'logisti' +'logie_' +'logg' +'locked_' +'lock_' +'lob' +'loaded_' +'ln' +'lli_' +'lively_' +'lis_' +'liquid_' +'liquid' +'likelihood_' +'lik' +'liegenden_' +'liefer' +'liederung_' +'licht_' +'libr' +'liberalization_' +'liability_' +'lfe' +'leverage_' +'level' +'lette_' +'lend_' +'leichter' +'leicht' +'legitimier' +'lec' +'lebenslange' +'lds_' +'lava' +'laud' +'lateralism' +'lasse' +'lase' +'lar_' +'langjährigen_' +'langfristige' +'lamp' +'label_' +'kürz' +'künstlerische' +'kühne' +'kör' +'kuli' +'ktions' +'kräftig' +'krä' +'kritischen_' +'kraft' +'kr' +'kos' +'korrigieren_' +'konzept' +'kontrollen_' +'kontro' +'kontinuierliche' +'kont' +'konsolidier' +'konservativen_' +'konkurr' +'konkreter_' +'kong' +'kondi' +'komplizierte' +'kommerzielle_' +'kommen' +'komfortablen_' +'komfortable_' +'kombiniert_' +'kollektiven_' +'knowing_' +'klingt_' +'klassischen_' +'klassische_' +'klasse_' +'kki' +'kis_' +'kirche_' +'kins_' +'kingdom_' +'kilometers_' +'kie' +'kg_' +'keys_' +'keu' +'kes' +'kere' +'kenn' +'kemajuan_' +'keinesfalls_' +'keinem_' +'keeps_' +'kb' +'kate' +'kapazität' +'kap' +'kalte' +'jüng' +'jährlichen_' +'just' +'jurisdiction' +'junger_' +'junct' +'jug' +'judiciary_' +'judges_' +'ju_' +'jekt' +'jak' +'jahres' +'ié' +'izier' +'ix_' +'ives_' +'iven_' +'itten_' +'ito_' +'ition' +'istung' +'ista' +'iss_' +'isla' +'isierte' +'isches_' +'irtschaft' +'irakischen_' +'ionary_' +'ional' +'ion' +'investitionen_' +'investigate_' +'invention' +'inv' +'inu' +'interp' +'interna' +'intern' +'interinstitutional_' +'interact' +'intensiven_' +'integrity_' +'integrierte_' +'integrate_' +'insurgents_' +'insure' +'insu' +'insti' +'installi' +'inspections_' +'insecurity_' +'innovat' +'inmitten_' +'inkl' +'inhalt' +'inhal' +'inhaftiert' +'inge' +'influenced_' +'inen_' +'inefficient_' +'industri' +'indispensable_' +'indirekt_' +'indebted' +'ind' +'incident_' +'inch_' +'inacti' +'improv' +'imported_' +'immune_' +'immo' +'immens' +'imm' +'imi' +'ilt' +'illusion' +'ille_' +'ilig' +'ilfe_' +'ild_' +'ild' +'ila' +'iki' +'ikan_' +'ii' +'iha' +'igung' +'igu' +'ignoring_' +'igi' +'iff' +'iet' +'iest_' +'ies' +'ierter_' +'ierende' +'ieht_' +'ieb' +'ids' +'ideologi' +'icon_' +'ico' +'ickl' +'ichtung_' +'ichtung' +'ibt_' +'iati' +'iPod_' +'iPhone_' +'höhere' +'hängige' +'händler_' +'hypo' +'hybrid_' +'hunger_' +'hung' +'humanitäre_' +'hum' +'hub' +'hu_' +'hrte_' +'hospital_' +'horror' +'hop_' +'honor' +'hon' +'holdings_' +'ho_' +'hnung' +'hlen_' +'hle_' +'hir' +'hinweg_' +'hinein_' +'hind' +'hiking_' +'hielten_' +'hidup_' +'herzustellen_' +'herunter_' +'herrliche' +'herr' +'herbei' +'herb' +'heran' +'henden_' +'hell_' +'heli' +'heiz' +'heim_' +'heed_' +'heating_' +'heart' +'hazardous_' +'harmoni' +'happi' +'handful_' +'hamper' +'hak_' +'gänger' +'guard' +'gründlich_' +'größter_' +'grundlegender_' +'grundlegend_' +'grouping' +'grobe' +'grip' +'grenzüberschreitenden_' +'grenzt_' +'grenze_' +'greifende' +'gratis_' +'graphi' +'graph_' +'got' +'gna' +'gly' +'glu' +'globally_' +'globaler_' +'glieder' +'gliche' +'glaubt_' +'glad_' +'gipfel' +'gingen_' +'ging' +'gien' +'ghter_' +'ghte' +'ggi' +'gezielte' +'gewährt_' +'gewo' +'gewisser' +'gewidmet_' +'gewer' +'gew' +'gesundheitliche' +'gestü' +'gestattet_' +'gestatten_' +'gesorgt_' +'gesetzgeb' +'geschwächt_' +'geschlossene' +'geschafft_' +'gesamt' +'gentl' +'genetische' +'generating_' +'genera' +'genauer_' +'gemütliche_' +'gemi' +'gemeinschaft_' +'geln_' +'gelegentlich_' +'gelder_' +'gelangt_' +'geko' +'gekennzeichnet_' +'gegenseitige' +'gefährlicher_' +'gefr' +'gebühren_' +'gebra' +'gebaut_' +'gases_' +'gard' +'garage_' +'gangs_' +'gabe_' +'fünfzig_' +'fühl' +'fällig_' +'fähig_' +'futures_' +'funktion_' +'funk' +'functionality_' +'frühere_' +'früh_' +'freilich_' +'freedoms_' +'frau' +'fraction_' +'fr' +'founding_' +'fought_' +'fossilen_' +'forming_' +'formen_' +'formation' +'form' +'forgotten_' +'forge' +'foreigners_' +'followers_' +'folg' +'fol' +'flood_' +'fizier' +'fixes_' +'fitt' +'fit' +'firm' +'finali' +'film' +'file' +'fh' +'fg' +'fetch' +'festzulegen_' +'fests' +'fenster_' +'felder_' +'feier' +'fehlen_' +'feels_' +'feedback_' +'fax_' +'favourable_' +'faszinierend' +'fas' +'farms_' +'far' +'familiengeführte' +'famili' +'falschen_' +'faktor' +'eye' +'extr' +'extending_' +'expulsion_' +'export' +'exploit_' +'expla' +'experiment' +'existiert_' +'exhaust' +'exemption_' +'exacerbate' +'evolution_' +'eventuell_' +'evan' +'evaluate_' +'euren_' +'eu_' +'etu' +'eto' +'etch' +'eta_' +'esten_' +'este' +'ession_' +'espa' +'esk' +'esen_' +'esen' +'erö' +'erzeug' +'erz' +'erwi' +'erweitert_' +'ert' +'ersucht_' +'ersuchen_' +'erstreckt_' +'erstre' +'erschw' +'erregend' +'erre' +'ernannt_' +'ermutigt_' +'erleb' +'erlangt_' +'erklärung_' +'erkennbar' +'erke' +'eria' +'ergänzt_' +'erge' +'erg_' +'erfolgte_' +'erfa' +'erbringen_' +'equi' +'equ' +'enw' +'entwicklung_' +'entstand_' +'entsprechende' +'entspannen_' +'entsch' +'ently_' +'entlich' +'entlassen_' +'entgegenzu' +'entertain' +'ente' +'ental' +'enor' +'eno' +'enme' +'enjoys_' +'enjoyable_' +'enheit_' +'engi' +'enforce_' +'endgültig_' +'endeavour' +'enda' +'encourages_' +'enburg_' +'employ' +'emie_' +'ement' +'embr' +'embo' +'ember' +'embargo_' +'ella_' +'ella' +'elites_' +'eligible_' +'eless' +'elektrische' +'electronic' +'elb' +'eise_' +'einzelner_' +'eint' +'einnehmen_' +'einiges_' +'eingetr' +'eingebe' +'eile' +'eight' +'eidung' +'eide' +'ehnen_' +'eg_' +'efizit' +'effizienten_' +'een' +'eel' +'edu' +'edl' +'eden_' +'ecken_' +'ecke' +'eben' +'earnings_' +'ean_' +'eager_' +'dys' +'dy' +'dt' +'dropped_' +'droht_' +'dringende' +'dream' +'drau' +'drafted_' +'downward_' +'dow' +'doubled_' +'donors_' +'donations_' +'dokument' +'doctrine_' +'dit_' +'districts_' +'distortion' +'distant_' +'disruption_' +'displays_' +'dismantl' +'dish' +'disen' +'discret' +'discredit' +'diplomatische_' +'din_' +'din' +'differentia' +'diesbezüglichen_' +'did' +'dicht_' +'dial' +'dhi' +'dha' +'dez_' +'deux_' +'deutet_' +'deteriorati' +'detected_' +'desired_' +'descent_' +'desa' +'derselben_' +'derivative' +'deregulation_' +'deprived_' +'deno' +'dene' +'dema' +'delivers_' +'delivering_' +'defende' +'deepening_' +'deckung' +'dec' +'debtor' +'debatte' +'deadl' +'daten' +'dateien_' +'databases_' +'dara' +'dan' +'damp' +'damages_' +'daf' +'cy' +'cult' +'cul' +'criticized_' +'criminals_' +'creature_' +'cow' +'counterparts_' +'counterfeit' +'council_' +'cost' +'coole' +'cool_' +'conversion_' +'convention_' +'contradict' +'contin' +'conten' +'construct_' +'constrain' +'consiste' +'considers_' +'conservation_' +'consequently_' +'confused_' +'confront' +'config' +'confi' +'conduc' +'condemn_' +'condemn' +'concludes_' +'conci' +'comprehen' +'compound' +'compli' +'complexity_' +'completion_' +'complain' +'compile' +'competing_' +'competence_' +'compari' +'compare_' +'compar' +'compa' +'communist_' +'communicate_' +'communi' +'commonly_' +'comment' +'combines_' +'college_' +'coherence_' +'cog' +'coa' +'closest_' +'cliff' +'cleane' +'clau' +'classical_' +'clara_' +'claiming_' +'cks' +'ckel' +'cked_' +'citi' +'circum' +'ciones_' +'cier' +'cide' +'chä' +'chsen' +'chose_' +'chnungen_' +'chil' +'chief' +'chest' +'chemicals_' +'checks_' +'chauen' +'chaften_' +'ceremon' +'cer_' +'cease_' +'caution_' +'catastrophe_' +'casinos_' +'cari' +'care' +'capt' +'capitals_' +'capitali' +'cano' +'cac' +'cabin' +'bürgerliche' +'butt' +'bust' +'burn_' +'bureaucrats_' +'bull' +'broke_' +'brochure' +'brilliant_' +'bread_' +'branche_' +'bour' +'borrowing_' +'bombing' +'bombe' +'bolster' +'blocked_' +'block' +'bl' +'biot' +'billions_' +'bezug_' +'bewusste' +'bewegung_' +'bewa' +'bevorzug' +'beu' +'betriebe_' +'betrieb_' +'betrachtete' +'betra' +'besucht_' +'bestä' +'bestimmter_' +'besticht_' +'beso' +'beschaff' +'berufen_' +'berp' +'bereiten_' +'bereiche_' +'berei' +'bemerkenswerte' +'beliebige' +'belie' +'bekannter' +'beizu' +'beis' +'behold_' +'beh' +'begleiten_' +'beginne' +'begehen_' +'began' +'befr' +'beeinträchtigen_' +'bedeutendsten_' +'beauftragt' +'bears_' +'beanspruch' +'bble' +'bay_' +'basierte_' +'bargain' +'baltischen_' +'balcony_' +'balancing_' +'bak' +'bach' +'ax_' +'avo' +'außer' +'außenpolitische' +'automo' +'automatische' +'auszur' +'auszuführen_' +'auswe' +'auslösen_' +'ausgewählt_' +'ausgenutzt_' +'auft' +'aufrechterhalten_' +'aufl' +'aufkommen_' +'aufget' +'aufgebaut_' +'auen_' +'audit' +'attracti' +'ats' +'ata' +'astr' +'assignment' +'asser' +'asia' +'ase' +'ars' +'arrive_' +'arre' +'arme' +'arit' +'arises_' +'arische' +'aren_' +'architekt' +'architect' +'arche' +'arb' +'appro' +'appar' +'apologi' +'anzuh' +'anz' +'anyway_' +'any' +'anxi' +'anwesend_' +'antwortete_' +'antworten_' +'anten_' +'anteil_' +'ante' +'anstreben_' +'ansteigen_' +'ansprechen_' +'anschließen_' +'ansch' +'annu' +'announce_' +'anno' +'anme' +'anlässlich_' +'anl' +'ani_' +'angenehm_' +'angemessenen_' +'angelegt_' +'angegriffen_' +'angebot_' +'angebliche' +'angan_' +'anes' +'anen' +'anderweitig_' +'anden_' +'andauern' +'anbelangt_' +'analyse_' +'amplif' +'amme' +'ami_' +'amended_' +'amen_' +'amb' +'alter' +'alte' +'allocat' +'allo' +'alliances_' +'alleged_' +'alleg' +'alla' +'alit' +'alike_' +'alig' +'alie' +'alia' +'alg' +'ain' +'agog' +'agent_' +'aft' +'afrikanische' +'affirm' +'advertisement' +'adventur' +'admit' +'activat' +'aco' +'achung_' +'accr' +'accord_' +'accompanying_' +'accommodati' +'ac_' +'abw' +'absurd_' +'abse' +'abschließen' +'abs' +'able' +'abl' +'abhängt_' +'abhängig' +'abgele' +'abgehalten_' +'abgeben_' +'aat' +'`_' +']]''' +'] ._' +'Zypern_' +'Zwangs' +'Zunahme_' +'Zulassung_' +'Zukunfts' +'Zivilisten_' +'Zie' +'Zh' +'Zentrala' +'Zeita' +'Zealand_' +'Zahlungs' +'Zahlung_' +'YO' +'Xi_' +'XL' +'Würden_' +'Wünsche_' +'Wäre' +'Wurzel' +'Wri' +'Works' +'Wood' +'Wolf_' +'Wol' +'Wohl_' +'Wis' +'Wirtschaftsl' +'Wind' +'Willi' +'Wies' +'Wiederaufbau_' +'Wider' +'WiFi_' +'Wetter_' +'Wett' +'Werk' +'Werden_' +'Weltb' +'Weiterhin_' +'Weis' +'Wechselkurse_' +'Wechsel_' +'Wars' +'Wand' +'Walt' +'Wai' +'Wah' +'Wach' +'WA' +'Vorw' +'Vorteil' +'Vorr' +'Vorfeld_' +'Volle' +'Vogel' +'Vit' +'Visual_' +'Visit' +'Ville' +'Villa' +'Viel' +'Vet' +'Verzögerungen_' +'Verzeichnis_' +'Verwa' +'Vertrauens' +'Versicherungs' +'Versand' +'Versa' +'Vermögens' +'Vermittlungs' +'Verlängerung_' +'Vergleich' +'Vergessen' +'Verfasser' +'Vereinfachung_' +'Verda' +'Veranstaltungsräume_' +'Vegas_' +'Varia' +'Van' +'Vale' +'VAT_' +'VA' +'Users_' +'Ursprung_' +'Urheber' +'Unterschied' +'Untere' +'Unsicherheit_' +'Uns' +'Universum' +'Unge' +'Unbe' +'Unabhängig_' +'Un_' +'Umstände_' +'Uganda' +'Ub' +'UL' +'UC' +'Tür_' +'Tätigkeiten_' +'Twin' +'Tul' +'Tuesday_' +'Treiber_' +'Treib' +'Travel_' +'Translat' +'Track' +'Total_' +'Tor_' +'Top_' +'Tochter' +'Tob' +'Tir' +'Tibe' +'Theater' +'Text' +'Teufel' +'Territori' +'Terminal_' +'Techno' +'Tarif' +'Tanz' +'TT' +'TS' +'TEC' +'Südafrika_' +'Szenario_' +'Synchron' +'Suppo' +'Suiten_' +'Stütz' +'Studie' +'Strände_' +'Strahlung_' +'Straftat' +'Store_' +'Steuersystem' +'Statistik_' +'Stand' +'Stalin_' +'Stagnation_' +'Spri' +'Sprech' +'Spre' +'Spl' +'Spiegel' +'Sozialversicherung' +'Sozialdemokrati' +'Source_' +'Sou' +'Sonnens' +'Solo' +'Solana_' +'Software' +'Soft_' +'Socialist_' +'Sno' +'Sn' +'Small_' +'Sla' +'Sky_' +'SilverFast_' +'Sigma' +'Sicherung_' +'Sicherheitsfragen_' +'Sh' +'Seve' +'Settings_' +'Sem' +'Sek' +'Seen_' +'Seele_' +'Script' +'Schwan' +'Schulungs' +'Schuld_' +'Schuh' +'Schriftsteller_' +'Schriften_' +'Schreib' +'Schluss' +'Schiffs' +'Schic' +'Schach' +'Satellite' +'Santiago_' +'Sanc' +'Sammlung_' +'Sale' +'SW' +'SV_' +'STA' +'SS' +'SP_' +'SLR_' +'SL' +'SH' +'SD_' +'SCO' +'SCH' +'Rö' +'Routine' +'Route' +'Round_' +'Roten_' +'Rosa' +'Rod' +'Richtlinien' +'Rich' +'Rezept' +'Revolution' +'Revision_' +'Reserven_' +'Res' +'Repr' +'Renminbi_' +'Religio' +'Relax_' +'Reisende' +'Reisen_' +'Reinigung_' +'Reichen_' +'Regulierungs' +'Registr' +'Redner_' +'Record' +'Rechtsg' +'Rechtsetzung' +'Ratsvorsitzes_' +'Rate' +'Randlage_' +'Rande_' +'Ramblas_' +'Rahmenprogramm' +'RSS_' +'RGB_' +'RA_' +'Quer' +'Quark' +'Pun' +'Prüf' +'Präsidenten' +'Präsenz_' +'Prozent' +'Protest' +'Protection_' +'Pros' +'Promo' +'Progress' +'Programmen_' +'Prof_' +'Product_' +'Process_' +'Prob' +'Priv' +'Princes' +'Prince' +'Premi' +'Prad' +'Power' +'Pou' +'Portal' +'Polizei' +'Poli' +'Plätze_' +'Pil' +'Pie' +'Physi' +'Photos' +'Phone' +'Philosoph' +'Philadelphia_' +'Petersburg_' +'Peru_' +'Pensions' +'Pel' +'Paul' +'Partners' +'Parliamentary_' +'Parlamentswahlen_' +'Parkplätze_' +'Parameter_' +'Papier_' +'Pakistani_' +'Ox' +'Outs' +'Others_' +'Ost_' +'Os_' +'Ori' +'Options' +'Optimismus_' +'Olive' +'Oli' +'Offensi' +'Objektiv' +'Oberfläche_' +'OLAF_' +'OL' +'OG' +'Nü' +'Nuevo_' +'Nothing_' +'Notenbanken_' +'Norm' +'Nordic_' +'Nin' +'Nieder' +'News' +'Nepal' +'Nelson_' +'Neg' +'Near_' +'Nap' +'Nahrungsmittel' +'Nachdruck_' +'NS_' +'NRO_' +'NOT' +'NE_' +'NB' +'Müll' +'Mä' +'Mut' +'Music_' +'Mus' +'Monte_' +'Mitter' +'Mittelschicht_' +'Mitarbeitern_' +'Ministeri' +'Mine' +'Milo' +'Mille' +'Militar' +'Milan_' +'Migrations' +'Miet' +'Mexik' +'Metall' +'Meta' +'Meinungsäußerung_' +'Meines_' +'Meilen' +'Mehrere_' +'McCa' +'Maz' +'Mauer_' +'Master_' +'Marken' +'Marke_' +'Mang' +'Malta_' +'MC' +'M5' +'Lösch' +'Luxemburg_' +'Lux' +'Lunch' +'Luggage_' +'Lud' +'Lot' +'Lor' +'Lohn_' +'Lohn' +'Lock' +'Loca' +'Lizenz_' +'Literatur_' +'Linien_' +'Line_' +'Lind' +'Liikanen_' +'Lich' +'Liberalen_' +'Leu' +'Letzte' +'Leonardo_' +'Leistungsbilanz' +'Leica_' +'Leh' +'Lega' +'Lebensst' +'Lebensqualität_' +'Learn_' +'Las' +'Lar' +'Landes' +'Lan' +'Lama' +'Lai' +'Lager_' +'Laden_' +'Kürz' +'Königs' +'Kämpfe_' +'Kurse' +'Kub' +'Kräften_' +'Kroatien_' +'Kro' +'Kreditgeber_' +'Krankenh' +'Kraftstoff' +'Korr' +'Kopie' +'Konservativen_' +'Kompon' +'Komplettpreise_' +'Kollege_' +'Kohäsions' +'Kohlenstoff' +'Kohle_' +'Knoten_' +'Kn' +'Klon' +'Kleid' +'Kis' +'Kinders' +'Ket' +'Kenya_' +'Kennzeichnung' +'Kathedrale_' +'Kaffee_' +'Jose' +'Jon' +'Jobs_' +'Jin' +'Jh' +'Jas' +'Jam' +'Jagd' +'Jacuzzi_' +'JA' +'Italia' +'Inzwischen_' +'Investi' +'Inve' +'Intelligen' +'Intellektuelle' +'Integrität_' +'Inte' +'Instabilität_' +'Insp' +'Insofern_' +'Innere' +'Inland' +'Impfung' +'Imperial' +'Imm' +'Ice' +'INI_' +'ICA' +'Höhepunkt_' +'Händen_' +'Hyper' +'Hurri' +'Hungers' +'How' +'Hos' +'Homo' +'Holocaust_' +'Holl' +'Hold' +'Hoffnungen_' +'Hof' +'Hindernisse_' +'Hilton_' +'Hillary_' +'Hierzu_' +'Hier' +'Hes' +'Heran' +'Hence_' +'Help_' +'Helm' +'Heimatl' +'Heb' +'Heads_' +'Haushalts_' +'Hauptb' +'Hast' +'Hans_' +'Hall' +'HP_' +'Gü' +'Göteborg_' +'Gute_' +'Gui' +'Greenspan_' +'Greater_' +'Grad' +'Gr' +'Gordon_' +'God' +'Goals_' +'Gn' +'Glaubens' +'Gipfels_' +'Gewährleistung_' +'Gesellschafts' +'Gesan' +'Gerät_' +'Gere' +'Gerade_' +'Gepäck' +'Geno' +'Genf' +'Gelegenheiten_' +'Gelds' +'Gegen_' +'Gefähr' +'Gebühr_' +'Game' +'Gam' +'Gad' +'Gabriel' +'Gab' +'GR' +'Fünfte' +'Fünf' +'Further_' +'Fuji' +'Fris' +'Friedensprozess_' +'Freib' +'Freedom_' +'Four_' +'Fotokopiereinrichtungen_' +'Fond' +'Fokus_' +'Flughafen' +'Fluggesellschaften_' +'Flexi' +'Fleisch' +'Fitness_' +'Fischereipolitik_' +'Finanzsystems_' +'Finanzminister_' +'Finanzau' +'Filter' +'Fertig' +'Fernseher_' +'Fehlen_' +'Features_' +'Fabriken_' +'FR' +'FDI_' +'FA_' +'FARC_' +'Extrem' +'Expo' +'Exi' +'Excel_' +'Evolution_' +'Events_' +'Event_' +'Europäisches_' +'Eth' +'Ess' +'Erö' +'Erweiterungs' +'Ersch' +'Erre' +'Eropa_' +'Ern' +'Ermittlung' +'Erkrankung' +'Ericsson_' +'Erfüllung_' +'Erfolgs' +'Erdöl' +'Episode' +'Entwicklungsst' +'Entwicklungsa' +'Enth' +'Enron_' +'Enl' +'Enk' +'Eng' +'Elektronik' +'Elektro' +'Eisen' +'Einzelh' +'Einkaufss' +'Einhei' +'Einh' +'Eing' +'Einfuhr' +'Einf' +'Einerseits_' +'Eine' +'Eigenschaft_' +'Eigenheim' +'Edin' +'Eden_' +'Eck' +'Easy_' +'ESS' +'ENT' +'Dörfer' +'Dutzend_' +'Drittstaaten_' +'Drei' +'Draghi_' +'Dow_' +'Dou' +'Dominion_' +'Dominica' +'Doc' +'Diktator' +'Digi' +'Dienstes_' +'Dienste' +'Did' +'Dick' +'Dez' +'Dev' +'Dest' +'Denk' +'Demand_' +'Defi' +'Days_' +'Damals_' +'Dach' +'DR_' +'DR' +'DPJ_' +'DB' +'DAM' +'Crystal_' +'Croatia_' +'Cri' +'Court' +'Could_' +'Corr' +'Conver' +'Contact_' +'Consult' +'Consequently_' +'Conc' +'Comple' +'Commerce_' +'Commander_' +'Coll' +'Coffee_' +'Chirac_' +'Chip' +'Chechnya_' +'Chat_' +'Charlie_' +'Champs_' +'Chai' +'Centr' +'Cell' +'Cath' +'Carr' +'Carmen_' +'Cari' +'CL' +'Büro_' +'Büchern_' +'Bäume_' +'By' +'Business' +'Bundesrepublik_' +'Bundeskanzlerin_' +'Building_' +'Buck' +'Brüder_' +'Bruttoinlandsprodukt' +'Brown_' +'Bridge_' +'Brandenburg' +'Bour' +'Borde' +'Bonus_' +'Bonn_' +'Bomb' +'Bog' +'Bob' +'Blut_' +'Blockade_' +'Bilde' +'Bibliotheken_' +'Bezeichnung_' +'Bewe' +'Bevor_' +'Between_' +'Betriebssystem_' +'Betrachtung_' +'Besuche' +'Bestätigung_' +'Bestände_' +'Bestellung_' +'Bestand' +'Besser' +'Beruf' +'Berechnung_' +'Bere' +'Benutzern' +'Benach' +'Bem' +'Bell_' +'Beli' +'Belgian_' +'Bekenntnis_' +'Bekannt' +'Beg' +'Befürchtungen_' +'Befür' +'Beau' +'Bearbeit' +'Bath_' +'Baj' +'Bahrain_' +'Bac' +'BSE_' +'BERLIN_' +'BD' +'B6_' +'Award' +'Automobilindustrie_' +'Auswahl' +'Austritt_' +'Australian_' +'Austin_' +'Ausstellung_' +'Aussichten_' +'Ausschluss_' +'Aussagen_' +'Aussage_' +'Ausrüstung_' +'Ausbr' +'Ausblick_' +'Ausbildungs' +'Ausarbeitung_' +'Aufwertung_' +'Aufl' +'Aufg' +'Aufb' +'Atlantik' +'Ate' +'Astro' +'Arzneimittel_' +'Arth' +'Arr' +'Arms' +'Armenia_' +'Armeni' +'Arme' +'Archiv' +'Arbeitsplatz_' +'Arbeitnehmern_' +'Arbeitnehmer' +'Applikation' +'Appl' +'Appart' +'Appar' +'Apart_' +'Apa' +'Any' +'Anschlag_' +'Ansch' +'Anruf' +'Anreiz_' +'Anr' +'Anpassungs' +'Annäherung_' +'Anne' +'Anmeld' +'Angabe_' +'Anf' +'Anderson_' +'Anbieter_' +'Analysen_' +'Anal' +'Amerikanern_' +'Alon' +'Algeria_' +'Alge' +'Alf' +'Alc' +'Albu' +'Aktienm' +'Aero' +'Admi' +'Acro' +'Acqu' +'Acht_' +'Access' +'Abänderung' +'Abw' +'Absicherung_' +'Abschwung' +'Absa' +'Abkommens_' +'Abbas_' +'ASE' +'AL_' +'ALDE_' +'AKVIS_' +'89_' +'82_' +'240_' +'215' +'1982_' +'1973_' +'1948_' +'1900_' +'18th_' +'177' +'175' +'1701_' +'160' +'128_' +'/-_' +'. ({{_' +'*_' +'**' +')]' +''']]' +'%) _' +'">' +' �_' +' – _' +' ==' +' "._' +' ", _' +'€ _' +'’ – _' +'– ' +'ע' +'өз' +'і_' +'ю' +'х_' +'то' +'т_' +'ри' +'про' +'ор' +'на' +'мо' +'ль' +'лы' +'к_' +'ин' +'ем' +'ве' +'ο_' +'μ' +'ž_' +'ši' +'Č' +'ützen' +'üte' +'ügen_' +'üchter' +'üch' +'übrig_' +'üblicherweise_' +'überzeugend_' +'überwältigende' +'übertrag' +'überraschen' +'übermitteln_' +'überla' +'übergeben_' +'überarbeitet' +'ør' +'öt' +'ört_' +'örper' +'örigkeit_' +'ören_' +'örder' +'öpf' +'ökonomisch' +'ökologischen_' +'ökologische_' +'ök' +'ño' +'és' +'éo' +'ées_' +'ça' +'äßig_' +'änk' +'ängt_' +'änen_' +'ändig' +'äm' +'ältig' +'älle_' +'ährung_' +'ähnliches_' +'ächtnis' +'ál' +'Übertr' +'Übel' +'Äthiopien_' +'Ägypte' +'» | _' +'»' +'}: _' +'|''_' +'zügig_' +'zähl' +'zynisch' +'zyklus_' +'zwing' +'zweckmäßig' +'zwanzig_' +'zw' +'zuzus' +'zuzug' +'zuwe' +'zutage_' +'zunichte_' +'zukünftigen_' +'zuha' +'zugeg' +'zugeben_' +'zona_' +'zo_' +'zn' +'zle' +'zk' +'zivile_' +'zim' +'zierte' +'zid' +'zessi' +'zens' +'zeitlich' +'zahlreicher_' +'ystemen_' +'yss' +'yle' +'yh' +'yen_' +'yed_' +'ycl' +'xio' +'xen' +'würdig' +'wür' +'wählt' +'wä' +'wunderbare' +'wun' +'worm' +'womöglich_' +'wol' +'wiss' +'wished_' +'wische' +'wirkungen_' +'wirksamer_' +'winzige' +'winds_' +'winding_' +'willig_' +'wiederholte' +'widerspiegel' +'widen' +'wid' +'wichtig' +'wich' +'wheel' +'wesentlicher_' +'werte' +'werk' +'wenigstens_' +'wen_' +'weithin_' +'weil' +'weigert' +'wechselt' +'wechsel_' +'websites_' +'wear_' +'wear' +'watched_' +'warrant_' +'warnen_' +'ware' +'wandeln_' +'wandel' +'waktu_' +'wak' +'wahrgenommen_' +'wag' +'vu' +'vre' +'vot' +'vorzus' +'vorteil' +'vorsichtig' +'vorsehen_' +'vorläufige' +'vorliegende_' +'vorgez' +'vorgestellt_' +'vorgehen_' +'voraussichtlich_' +'volunteer_' +'volle' +'vo_' +'vital' +'visual_' +'visiting_' +'vice_' +'veterinar' +'vessel_' +'verzeichnet' +'verzeichnen_' +'verwunde' +'verwi' +'verw' +'verursachten_' +'verursachte_' +'vertrieb' +'vertrau' +'verteidig' +'verstärkte_' +'versich' +'versehen' +'verschw' +'verschli' +'verschlechtert' +'verschl' +'verpflichtend' +'vermute' +'vermis' +'verlängert_' +'verlorene' +'verle' +'verlangsamt_' +'verkündet_' +'verknüpft_' +'verhäng' +'verhältnis_' +'vergangen' +'verfüg' +'verbundene_' +'verbringen_' +'verbreitete_' +'verbot' +'verbi' +'verbe' +'verbal' +'verb' +'verantwortlichen_' +'verans' +'verab' +'vede' +'vate' +'varied_' +'valued_' +'vall' +'validate' +'vacuum_' +'vaccination_' +'uver' +'uu' +'utz' +'uts_' +'utm' +'usted_' +'usst' +'ussi' +'usr' +'usion' +'user' +'ursprüngliche_' +'uropäischen_' +'urh' +'urges_' +'urge' +'upload_' +'upholding_' +'uo' +'unzulä' +'unwilling_' +'unwi' +'unverzüglich_' +'unus' +'untr' +'unterz' +'unterstreicht_' +'unterschr' +'unterscheidet_' +'unterrichte' +'unternehme' +'unterliegen_' +'unterhalt' +'untereinander_' +'unterbrochen' +'unterb' +'unsu' +'unsicheren_' +'unschuldige' +'unsa' +'unres' +'unr' +'unnecessary_' +'unle' +'unjust' +'universali' +'uniti' +'union' +'unified_' +'ungst' +'ungspro' +'ungsge' +'ungsfrei' +'ungl' +'unequ' +'underw' +'underli' +'unbest' +'umgehend_' +'umgeb' +'ulation' +'ulan' +'uhig' +'uhe' +'ugh' +'ufung_' +'ufte' +'udi' +'uche' +'uc_' +'tück' +'töten_' +'tö' +'té_' +'tätige_' +'tägliche_' +'tze' +'typischer' +'typischen_' +'typische_' +'tures_' +'tungs' +'tts' +'ttl' +'tse' +'trü' +'träum' +'truktur' +'troubled_' +'troll' +'tril' +'tribunal' +'trenn' +'trauma' +'trate' +'trap_' +'transpos' +'transmitted_' +'trail' +'trade' +'toxic_' +'tower_' +'towels_' +'tot_' +'tonn' +'toffe' +'tnis' +'tliche_' +'tles' +'tiv_' +'tisa' +'tionary_' +'tionali' +'tiny_' +'timely_' +'tile' +'tide_' +'tics_' +'tically_' +'tical' +'tial_' +'tial' +'thro' +'thou' +'thirty_' +'thinks_' +'therapy_' +'thee_' +'tern_' +'termasuk_' +'terb' +'tep' +'tension_' +'tener' +'tena' +'tempora' +'tempo_' +'tem' +'telling_' +'telecommunications_' +'teilig' +'technologi' +'td' +'tche' +'tch_' +'taucht' +'tati' +'tapi_' +'tang' +'tane' +'tain_' +'tain' +'tailored_' +'tail' +'tags' +'tac' +'tab_' +'szi' +'systematische_' +'system' +'sys_' +'synthesi' +'synchroniz' +'symboli' +'symbol' +'swor' +'sweise' +'svoll' +'sverfahren_' +'sv' +'suspect' +'survey_' +'surprisingly_' +'surge_' +'suprem' +'suppressi' +'supplier_' +'supplied_' +'supermarkets_' +'suns' +'sunnitische' +'sumber_' +'suche_' +'successor_' +'substanzielle' +'subst' +'subsidiz' +'subsidiar' +'subjects_' +'stürzen_' +'stärkeren_' +'stärke' +'stupid_' +'struggle' +'strophen' +'strive_' +'strik' +'stric' +'strengere' +'streng' +'strebt_' +'stream' +'strategisch' +'strat' +'strand' +'strahl' +'strafrechtlich' +'ston' +'stolz_' +'stoff_' +'stin' +'stim' +'steuern_' +'sten' +'stellend_' +'steile' +'steckt_' +'stecken_' +'stec' +'stays_' +'statute_' +'starv' +'starters_' +'starkes_' +'standen_' +'stande' +'standardis' +'stakeholder' +'stabili' +'staats' +'staatlich_' +'staate' +'ssta' +'ssin' +'sses_' +'sserung' +'ssene_' +'ssend' +'ssed_' +'ssal' +'srecht' +'squeeze_' +'sque' +'sprin' +'spreche_' +'spreading_' +'spoil' +'spirit' +'sphere_' +'spends_' +'spell_' +'specialist_' +'special' +'speaks_' +'spatial' +'spann' +'sos' +'sorti' +'sorry_' +'sore' +'sooner_' +'sont_' +'sonder' +'sollt' +'solide_' +'solid' +'sold' +'solar_' +'socially_' +'socialism_' +'smokers_' +'smit' +'slos' +'slim' +'slide_' +'slee' +'slan' +'skri' +'skr' +'skiing_' +'skie' +'sket' +'skepticism_' +'skat' +'sir_' +'sir' +'sip' +'sins' +'simul' +'silver_' +'sika' +'sik' +'signals_' +'sights_' +'sig' +'siert_' +'sier' +'sichtbar_' +'sible_' +'sian_' +'sia_' +'sia' +'shutt' +'show' +'shortc' +'shores_' +'shaping_' +'shaped_' +'shal' +'sgr' +'seu' +'setzung_' +'set' +'separation_' +'sentiment_' +'sensi' +'sensation' +'sends_' +'sement' +'seltene' +'seller' +'sell' +'seiner' +'sebuah_' +'sebe' +'sco_' +'sco' +'scienti' +'schüre' +'schö' +'schwerwiegende' +'schweigen_' +'schwank' +'schung_' +'schulden' +'schuh' +'schreib' +'schottische' +'schnitt' +'schnellere' +'schneide' +'schmerzhaft' +'schlechteste' +'schlechter_' +'schlag_' +'schickt' +'schicken_' +'schenken_' +'scheinlich' +'scheinbar_' +'schafts' +'scarc' +'scan' +'sberei' +'satu_' +'satisfactor' +'sani' +'sanct' +'sample_' +'sal_' +'saja_' +'sacrifice_' +'rät' +'ränk' +'rzt_' +'rungen_' +'ruled_' +'ruhige_' +'ruct' +'rth' +'rro' +'royal' +'ron_' +'roi' +'rodukt' +'rocket' +'roblem' +'rla' +'rk_' +'rist' +'risik' +'risen_' +'rine' +'rily_' +'rigoro' +'rigid' +'ried' +'riding_' +'rid_' +'rice_' +'rgen_' +'rf_' +'revolutionäre' +'reviv' +'revis' +'rever' +'resur' +'resultierende' +'restrictive_' +'restr' +'respekt' +'resorts_' +'resolving_' +'reserven_' +'reserv' +'republik' +'repressive_' +'reporte' +'reor' +'renz' +'rende' +'relies_' +'relaxed_' +'relaxation_' +'rej' +'reitung' +'reiterate_' +'reiches_' +'reichende' +'reibung' +'regulierung' +'registrierte' +'registriert_' +'regiert_' +'regelmäßige' +'refusing_' +'recurr' +'recover' +'recording' +'recipe_' +'reci' +'rechtliche' +'rechtfertigen_' +'rechnung' +'rech' +'recap' +'reasoning_' +'realized_' +'reaches_' +'rdl' +'rdin' +'rche' +'raus' +'rats_' +'ratified_' +'rase' +'rasant' +'rarely_' +'rapi' +'ramp' +'ramm' +'rale' +'rak' +'rail' +'raft_' +'raff' +'radiation_' +'rad_' +'query_' +'quellen_' +'quarte' +'quantitativen_' +'purely_' +'puls' +'pull_' +'pulat' +'publish_' +'pts_' +'pta' +'pré' +'proxy_' +'provinc' +'proto' +'protests_' +'protecti' +'protagonist' +'propriet' +'proportional_' +'propo' +'prope' +'pron' +'promptly_' +'proliferation_' +'projected_' +'profit' +'profi' +'proclaimed_' +'problemlos_' +'prinzipie' +'prin' +'primäre' +'pricing_' +'pric' +'pretext_' +'prediction' +'precedent_' +'precautionary_' +'preca' +'prachigen_' +'ppo' +'pping_' +'potenziell_' +'potato' +'pot' +'postponed_' +'positioned_' +'portray' +'porte' +'por' +'poorly_' +'politischem_' +'polariz' +'pod' +'plum' +'plot' +'plo' +'plai' +'placing_' +'pita' +'pit_' +'piscin' +'pin_' +'pilla' +'pielen_' +'piele_' +'pie_' +'pian' +'photograph_' +'phe' +'pfer' +'pfen_' +'pets_' +'petition_' +'pers_' +'permits_' +'peripheral_' +'penu' +'peni' +'pende' +'pek' +'pedestrian_' +'patron' +'paths_' +'patent' +'patch_' +'passport_' +'passive_' +'passiv' +'passionate' +'partnerschaftliche' +'participating_' +'parl' +'parent_' +'paraly' +'paradi' +'pap' +'pang' +'pad_' +'packaging_' +'owe' +'overha' +'outsourc' +'outlined_' +'outlet' +'outf' +'outd' +'ount' +'ounce' +'oul_' +'oui' +'oud_' +'otis' +'osten_' +'osk' +'orti' +'ork' +'origins_' +'orient' +'ori_' +'organisms_' +'orf' +'ordne' +'ordentliche' +'orat' +'orang_' +'oran_' +'optimize' +'optimism_' +'optimier' +'optical_' +'oppo' +'opoulo' +'opol' +'opo' +'operators_' +'openness_' +'oon_' +'ood_' +'ontr' +'onist' +'onic' +'ones' +'onds_' +'ond_' +'ommene' +'ologis' +'oli_' +'oka' +'oing_' +'oid' +'ogramm' +'ogn' +'ogg' +'ofs_' +'offene' +'odell' +'occup' +'observer_' +'observe_' +'oberste_' +'oberfläch' +'obere' +'oasis_' +'nütz' +'nördlichen_' +'nächstes_' +'nä' +'nwe' +'nver' +'nur' +'null' +'ntion_' +'ntie' +'nted_' +'nstein_' +'nsh' +'nsa' +'np' +'notorious' +'nostalgi' +'normali' +'nominat' +'noc' +'nkt_' +'nineteenth_' +'nifi' +'niederge' +'nian' +'neueste_' +'netze_' +'nest' +'nese_' +'nern_' +'nente' +'neiden' +'neglect' +'neben' +'ndu' +'nds' +'ndr' +'ndo' +'ndl' +'ndet_' +'nde' +'ncia' +'nationales_' +'nas' +'nant' +'nano' +'nami' +'nahezu_' +'nad' +'nacht_' +'nachhaltiger' +'mé' +'mäßigen_' +'mäß' +'mächte_' +'muy_' +'muti' +'muster' +'murdered_' +'murder' +'mund' +'mt_' +'mst' +'mse' +'mpi' +'moves_' +'motive' +'mortgages_' +'morph' +'morat' +'moralischen_' +'monatlich' +'molecul' +'modernization_' +'modernis' +'modernes_' +'mobile' +'mmern_' +'mium_' +'mittlere' +'mistakes_' +'misleading_' +'miracle_' +'ministeri' +'minded_' +'mili' +'mie_' +'mh' +'mexikanische' +'metr' +'method' +'metal_' +'messe' +'menya' +'menta' +'menschen' +'meme' +'melden_' +'md' +'mba_' +'maximize_' +'maximal' +'mau' +'matic_' +'mathematics_' +'materielle' +'massa' +'mask_' +'marginali' +'margin_' +'marble_' +'mann' +'manipulier' +'mangelnde' +'manc' +'managers_' +'malt' +'maker_' +'maj' +'magi' +'maga' +'mad' +'macro_' +'mache' +'läßt_' +'läufig' +'lze' +'lying_' +'lush_' +'luck' +'ltungs' +'lous_' +'loss' +'lokaler_' +'lohnt_' +'logistics_' +'llusion' +'llung_' +'llschaft' +'lli' +'ller' +'llel' +'live' +'listened_' +'lism' +'lisi' +'lische' +'lio' +'link' +'lings_' +'liness_' +'lighting_' +'lifetime_' +'ließ' +'lien' +'licensing_' +'liberat' +'liberalisi' +'liberali' +'liberalen_' +'liberale' +'liat' +'lian' +'liabilities_' +'letztendlich_' +'lens_' +'lengthy_' +'lender' +'leistungs' +'leh' +'left' +'lebte' +'leas' +'lding_' +'ldet_' +'lba' +'lb' +'laureate' +'laundry_' +'laufende' +'lauf_' +'latz' +'laser_' +'laptop_' +'landwirtschaftlichen_' +'landschaft_' +'lana' +'lakes_' +'lah_' +'lage' +'ladi' +'lacking_' +'könnt_' +'käme_' +'kw' +'kurzfristigen_' +'kurzfristige_' +'kurzen_' +'kurse_' +'kup' +'kunden' +'kultur_' +'kula' +'kua' +'ktivi' +'kten_' +'kreuz' +'kret' +'kreise' +'krei' +'kratisch' +'krank_' +'kow' +'kostenfreien_' +'kostenfreiem_' +'korre' +'koreanische' +'kontinentale' +'kontaktieren_' +'konstitutionelle' +'konservative' +'komplizierten_' +'komplexen_' +'kompet' +'kompakt' +'komfortabel_' +'kolonial' +'koll' +'kok_' +'knappe' +'kluge' +'klinische' +'klich' +'kleines_' +'klassische' +'klassi' +'klapp' +'kl' +'kka' +'kische' +'kinder' +'kilometre' +'kid' +'kf' +'kett' +'kerja_' +'kehr_' +'kehr' +'kamera' +'kal' +'jö' +'justification_' +'juristische' +'jung' +'jun' +'journalist_' +'journal' +'jou' +'join' +'jeu' +'jeni' +'jeglicher_' +'jan' +'izi' +'iza' +'ix' +'ivat' +'iva' +'iv_' +'ium' +'ité_' +'itz' +'itts' +'itis' +'ita_' +'istic_' +'isti' +'iste' +'issuing_' +'issi' +'ison_' +'isolated_' +'isolat' +'islamistische' +'islamische_' +'isi_' +'irrev' +'irr' +'irgendeiner_' +'ires_' +'irc' +'iplin' +'ios_' +'ioni' +'iona' +'inviting_' +'investasi_' +'invasion_' +'intuitive' +'intra' +'intolera' +'interview_' +'interventions_' +'interv' +'interrupt' +'internationally_' +'interior_' +'interg' +'intensive' +'intensiv_' +'intelligente_' +'inte' +'intak' +'inta' +'institutionalis' +'instances_' +'inst' +'inspiri' +'inspectors_' +'insofern_' +'insist' +'inse' +'inquir' +'innehat' +'inne' +'injection' +'iniert' +'inhibit' +'inhaltlich' +'infra' +'inflict' +'infi' +'infectious_' +'infe' +'inex' +'inevitably_' +'iness_' +'ineffiziente' +'industriellen_' +'industrialis' +'indu' +'indische_' +'indirect_' +'indication_' +'incur' +'incorrect' +'incorporate_' +'inclusive_' +'incl' +'incapable_' +'inca' +'inat' +'inander_' +'inability_' +'impl' +'imperial' +'impedi' +'impede' +'impair' +'immigrant_' +'imba' +'ilte' +'illnesses_' +'ilde' +'iken_' +'ignor' +'igli' +'ight' +'ifor' +'ifel' +'ieß' +'iev' +'iess' +'ient' +'ieg_' +'ieden' +'ied' +'ido_' +'idio' +'identification_' +'identifi' +'ideale' +'ick' +'icial' +'ichtig' +'icht' +'icate' +'ibility_' +'ibi' +'iba' +'hübsch' +'höf' +'höchste' +'häus' +'häufiger_' +'hängen_' +'hw' +'hus' +'hurr' +'hunt' +'hts_' +'htm' +'hten_' +'hrung' +'hospitals_' +'hos' +'hors' +'hop' +'homo' +'homeland_' +'holt' +'hohes_' +'hochrangige' +'hnte' +'hmig' +'hmen_' +'hija' +'highway_' +'hierfür_' +'herz' +'hervorrufen_' +'hervorragende' +'herkömmlichen_' +'herausf' +'hende_' +'hemm' +'heb' +'heav' +'heated_' +'headlines_' +'header' +'headed_' +'hb' +'hav' +'hauses_' +'hause' +'haus' +'hate_' +'hast' +'harmonise_' +'harm' +'harde' +'harass' +'hanya_' +'handlungen_' +'halb_' +'haft' +'haf' +'habt_' +'günstige' +'gängig' +'gym' +'gura' +'gul' +'guitar' +'guide' +'guidance_' +'guest' +'guardian_' +'gste' +'grüne' +'gründet_' +'größerem_' +'größe' +'grösste' +'großartige_' +'grou' +'grie' +'gravierende' +'grausam' +'gog' +'god' +'gnet' +'gma' +'glücklich_' +'globalis' +'glichen_' +'girls_' +'gion' +'gin_' +'gin' +'gie_' +'giant_' +'geäußerten_' +'gewöhnlich' +'gewi' +'gewec' +'gewe' +'gewalt' +'getau' +'gesti' +'gesteckt_' +'gestaltete' +'geson' +'gesenkt_' +'gesellschaftlichen_' +'geschäft_' +'geschwindigkeit_' +'geschm' +'gerufen_' +'geriet' +'geri' +'gerechtfertigt_' +'gerechte_' +'geprägten_' +'geopolitische' +'geopolitical_' +'genom' +'gener' +'genen_' +'gende' +'gence_' +'gemessen_' +'gemeins' +'geldpolitische' +'geld' +'gehe_' +'geha' +'gegr' +'gegner' +'gegenseitige_' +'gefü' +'gefo' +'gefangen_' +'gef' +'geeignete_' +'geehrt' +'gedenkt_' +'gebiete_' +'geber_' +'gebe' +'gea' +'gather_' +'gastro' +'gasse_' +'gaps_' +'gall' +'gage' +'füllen_' +'förderung_' +'fähigen_' +'fungier' +'fung' +'fundamentally_' +'func' +'ftig' +'fter_' +'fst' +'früh' +'fruit' +'fronts_' +'frontier' +'fristig' +'frisch_' +'freut_' +'freundlichen_' +'frequent' +'fragile_' +'founder_' +'foun' +'fortzu' +'fortune_' +'fortgeschrittene' +'formulierte_' +'formali' +'formale' +'forg' +'foresee' +'forecast_' +'forder' +'forci' +'font' +'folgte' +'fold_' +'fn' +'flü' +'flugzeuge' +'flowing_' +'flower' +'flexibler' +'flexible' +'fled' +'flavo' +'flag' +'fizi' +'fix' +'fisch' +'finger' +'fiction_' +'fica' +'ffl' +'ffene' +'ffekt' +'feststellt' +'feste_' +'feiern_' +'fehlenden_' +'fec' +'featured_' +'feasible_' +'fea' +'favorite_' +'favored_' +'fau' +'fasc' +'farbe' +'fantastische' +'fangen_' +'faire' +'fahrts' +'fahrer' +'fahren' +'facilitating_' +'facilitat' +'fache_' +'fache' +'fabric_' +'fab' +'exzellenten_' +'extensi' +'exquisite_' +'exposure_' +'exploration_' +'exploiting_' +'experiencing_' +'exklusiv' +'existierende' +'exert_' +'exercise' +'exempl' +'exclu' +'exchange' +'excellence_' +'even' +'euro' +'euer' +'ette_' +'eting' +'eth_' +'eternal_' +'etabliert' +'etablieren_' +'esu' +'esto' +'estig' +'esta_' +'esses_' +'eso' +'erwecken_' +'erupt' +'erungen_' +'erta' +'erstens_' +'erstatte' +'erschl' +'errors_' +'errichtet_' +'erreich' +'erpr' +'ernen' +'ermä' +'erma' +'erläutern_' +'erleichter' +'erklär' +'erit' +'eris' +'eries_' +'erholsame' +'ergi' +'erge_' +'ergab' +'erfe' +'ereich_' +'erbe_' +'erbaut_' +'erbar' +'eran' +'equip' +'equat' +'epidemic_' +'epidemi' +'envi' +'entziehen_' +'entworfen_' +'enttäuscht' +'entspr' +'entschied_' +'entscheidung' +'entity_' +'entgegens' +'entgegenge' +'entfernten_' +'entfallen_' +'entail' +'ensured_' +'enorm_' +'enli' +'enh' +'engineer' +'energisch' +'ener' +'endanger' +'enact' +'employee_' +'empfäng' +'empfunden' +'empf' +'emotions_' +'emble' +'ellung_' +'elles_' +'electr' +'ekte' +'ej' +'eitig_' +'eir' +'einzust' +'einzuräumen_' +'einzurichten_' +'einzul' +'einzuf' +'einseitig' +'einräumen_' +'eingerichteten_' +'eingeh' +'einführen_' +'einfü' +'einbezieh' +'einbar' +'einb' +'eilt_' +'eilen_' +'eigne' +'eigenständige' +'eigens' +'eigenem_' +'eid' +'eichn' +'eho' +'ehl' +'eful_' +'efi' +'effizientere' +'effiziente_' +'effizient_' +'effizien' +'effekt_' +'editor' +'ede_' +'ecu' +'eck' +'echo' +'ebung_' +'ebnen_' +'dü' +'durchsetzen_' +'durchschnittliche_' +'durchschnittlich_' +'durchgesetzt_' +'duration_' +'duk' +'dst' +'dry' +'drucken' +'dron' +'drinking_' +'drift' +'dreh' +'drastisch_' +'drastic_' +'drasti' +'drag' +'dou' +'dorf_' +'dop' +'dog_' +'document' +'divisions_' +'dividing_' +'divide_' +'divi' +'diversen_' +'div' +'dition' +'distress_' +'distort' +'dist' +'dispose' +'dismissed_' +'disg' +'discre' +'discourse_' +'discourage' +'dische' +'disappointed_' +'disabilit' +'diper' +'dioxide_' +'dik' +'dih' +'digung' +'digkeit' +'diesbezügliche_' +'dib' +'diak' +'dg' +'devote_' +'devi' +'deutlichen_' +'detriment_' +'deten' +'det_' +'destru' +'despair_' +'designation_' +'desde_' +'derived_' +'dere' +'derartiger_' +'depressed_' +'deposits_' +'deploy' +'dense' +'denounce' +'demonstrieren_' +'demokratisch_' +'demograph' +'democrat' +'dementsprechend_' +'dell_' +'delightful_' +'delete_' +'degrees_' +'ded' +'decor' +'declines_' +'debe' +'debated_' +'dde' +'dba' +'dauerhafte' +'dare' +'dad' +'dacht' +'cycles_' +'curtail' +'cultivat' +'culminat' +'cue' +'ctur' +'ction' +'crush' +'crude_' +'critici' +'cript' +'crash_' +'cras' +'craft_' +'craft' +'cr' +'cozy_' +'couple' +'coup_' +'cotton_' +'cosmetic' +'correspond_' +'corps' +'copie' +'convince_' +'convicted_' +'convict' +'conversation_' +'controversial_' +'contagion_' +'contacts_' +'consult_' +'constitutes_' +'constituenc' +'constitu' +'consist' +'conquer_' +'connecting_' +'coni' +'confo' +'confined_' +'configure_' +'confe' +'conf' +'conciliation_' +'concentrated_' +'compre' +'compr' +'compliment_' +'complaint_' +'complain_' +'comparative_' +'common' +'commodities_' +'commission' +'commercial' +'comer' +'come' +'colonial_' +'collectively_' +'collections_' +'cola' +'cock' +'coat' +'coastal_' +'clu' +'closure_' +'clinical_' +'clin' +'cli' +'ckig' +'cket_' +'cke' +'cis' +'cinema' +'chtigen_' +'chsel' +'child' +'checke' +'chas' +'charisma' +'charakter_' +'characteristics_' +'characteristic_' +'change' +'chair' +'cere' +'censorship_' +'ced' +'cce' +'cca' +'cave' +'cautious_' +'cau' +'casual' +'casino_' +'carries_' +'capture_' +'captur' +'capability_' +'cap_' +'cant' +'cans_' +'cana' +'came' +'calculat' +'cafe' +'caci' +'bürokratische' +'bösartige' +'byl' +'but' +'burning_' +'burn' +'bureaucracy_' +'bum' +'bullet' +'builds_' +'buffer' +'brü' +'browsing_' +'brothers_' +'brake' +'boxe' +'bottle' +'borrowers_' +'borne_' +'bora' +'bookings_' +'bombard' +'boli' +'boil' +'bn' +'bloße_' +'bloß' +'blocking_' +'blockiert_' +'bloc' +'bliche' +'blase_' +'blam' +'birds_' +'billig_' +'bilität' +'bid_' +'bias' +'bia' +'bezi' +'bezeichnen_' +'bewert' +'beverages_' +'beunruhig' +'bett_' +'bett' +'betrü' +'beträchtliche' +'betrieb' +'betreiber' +'bete' +'bestrafen_' +'bestehender_' +'bestanden_' +'besitze' +'beside' +'besetzte' +'beset' +'beschw' +'beschreiben_' +'beschrei' +'berühmte_' +'beru' +'bereitstellen_' +'bereitet_' +'benötigten_' +'benötigte_' +'benen' +'benchmark' +'benachteiligt' +'benachbarten_' +'bemerken_' +'belong' +'beliefs_' +'beke' +'bekannteste' +'beit' +'beides_' +'behinder' +'begünstig' +'begriffen_' +'begrenzten_' +'begleitet_' +'begeben_' +'bege' +'begangen_' +'beförder' +'befriedigen_' +'befri' +'befrei' +'befa' +'bedeutete_' +'bedding_' +'bedauerlich_' +'bearbeiten_' +'bb' +'bat_' +'bases_' +'base' +'barri' +'banner' +'banned_' +'bang_' +'bailout_' +'bai_' +'ays_' +'aya' +'aw_' +'avoiding_' +'aviation_' +'avel' +'außergewöhnlichen_' +'automati' +'author' +'auszuw' +'auszul' +'auszuarbeiten_' +'ausstatt' +'aussi' +'ausser' +'auss' +'ausreichende' +'ausr' +'ausp' +'ausn' +'ausgest' +'ausges' +'ausgehend_' +'ausgedehnt' +'ausfallen_' +'ausbr' +'aum_' +'ault_' +'auli' +'aufzuh' +'aufrufen_' +'aufri' +'aufregende' +'aufgez' +'aufgel' +'aufgegriffen_' +'aufgef' +'auferleg' +'aufbe' +'auer_' +'audit_' +'aucht' +'ature_' +'aturan_' +'attr' +'atta' +'atra' +'atori' +'atm' +'atla' +'ativ' +'ata_' +'asy' +'astu' +'ast_' +'assumption_' +'assum' +'assessing_' +'assess_' +'asserti' +'assembly_' +'assembl' +'asse_' +'aspir' +'asks_' +'asa' +'artige' +'artif' +'arsen' +'arrangement_' +'arranged_' +'arm_' +'arität' +'arist' +'arising_' +'arde' +'archives_' +'arbeitende' +'arabische_' +'aqua' +'apt' +'approv' +'appoint' +'apart' +'anzust' +'anzukurbeln_' +'anzugehen_' +'anzuerkennen_' +'anzi' +'antis' +'antiqu' +'antic' +'anta' +'anstieg' +'anstehende' +'anspruchsvolle' +'ansi' +'anny' +'announcement' +'anni' +'anna' +'ann' +'ankomm' +'anische_' +'angry_' +'angre' +'angestrebte' +'angesprochene' +'anges' +'angene' +'angelegte' +'angekündigte' +'angekündigt_' +'angehör' +'angehen_' +'angegeben_' +'angebot' +'angeblich_' +'anfä' +'anen_' +'ands' +'anat' +'analysier' +'analyses_' +'amin' +'amer' +'ambiance_' +'aman' +'alu' +'altogether_' +'alternat' +'allu' +'alls' +'algo' +'aler_' +'alem' +'akzeptabel_' +'aktivist' +'aktiviert_' +'aktiven_' +'aktiv' +'aktion' +'ais' +'aim' +'ahlung_' +'ahlen_' +'ahe' +'ags_' +'agon' +'aggressive' +'aggre' +'after' +'afraid_' +'afi' +'afford' +'afflict' +'advocating_' +'advocates_' +'adventure_' +'adu' +'ads_' +'admitted_' +'administrat' +'adjust' +'adj' +'add' +'acute_' +'actress_' +'acquisition_' +'acle' +'acke' +'aches_' +'ache_' +'ace_' +'accumulati' +'accounted_' +'accessories_' +'accesse' +'abzust' +'abzule' +'abuses_' +'abstain_' +'abstain' +'absor' +'abschl' +'aboard_' +'ablen' +'ablauf' +'abkommens_' +'abgez' +'abgegeben_' +'abandoning_' +'Zwischen' +'Zwi' +'Zweig' +'Zut' +'Zuschauer' +'Zuf' +'Zucker_' +'Zit' +'Zell' +'Zeitschrift_' +'Zeitr' +'Zeilen' +'Zehn_' +'Zar' +'Yuk' +'Yen_' +'Yemen_' +'Yam' +'Xa' +'XVI' +'XLS' +'Wüsten' +'Wür' +'Wälder_' +'Wy' +'Worf_' +'Word' +'Woods_' +'Wissens_' +'Wissens' +'Wirtschaftswachstums_' +'Winters' +'Winds' +'Will' +'Wiener_' +'Widersprüche_' +'Wide' +'Whenever_' +'Wettbewerbe' +'Wertpapiere_' +'Wertpapier' +'Werkzeug_' +'Werkst' +'Werde_' +'Wenige' +'Weltwirtschafts' +'Wellnessbereich_' +'Weiterentwicklung_' +'Weihnachten_' +'Weich' +'Wed' +'Weber_' +'Wave' +'Watt' +'Wasch' +'Warnung' +'Wandels_' +'Wan' +'Wahrnehmung_' +'Wahlkampf' +'Wag' +'Wachstumss' +'WP' +'Völkerrecht' +'Vs_' +'Vorredner' +'Vorre' +'Vorrang_' +'Vorherrschaft_' +'Voraus_' +'Voraus' +'Volume' +'Vitorino_' +'Visu' +'Visa_' +'Vis' +'Vinc' +'Victoria_' +'Via_' +'Verwirklichung_' +'Vertrieb' +'Vertreter' +'Vertrags_' +'Vertrages_' +'Verteidigungsminister' +'Vermögen_' +'Verletz' +'Verlagerung_' +'Verkehrsnetz' +'Verkehrsa' +'Verkaufs' +'Verhältnis' +'Vereinig' +'Verbraucherschutz' +'Verbrauchern_' +'Verantwortlichkeit' +'Vario' +'VIC' +'VE' +'VD' +'Ura' +'Updates_' +'Unterscheidung_' +'Unters' +'Unternehmer' +'Unterfangen_' +'Unst' +'Universal_' +'Unionsbürger' +'Unfälle' +'Underground_' +'Unde' +'Umst' +'Umsatz_' +'Umbr' +'Ultimate' +'Ul' +'Uf' +'USE_' +'UP' +'UNM' +'Türen_' +'Tät' +'Twe' +'Turni' +'Turm' +'Turi' +'Tunnel' +'Tud' +'Tsi' +'Tschech' +'Truppe' +'Troi' +'Tric' +'Tradi' +'Tr' +'Toyota_' +'Ton_' +'Tomo' +'Tom_' +'Toleranz_' +'Tode_' +'Tod' +'Thor' +'Thom' +'Thirdly_' +'Thinking_' +'Theor' +'Theatre_' +'Thal' +'Th' +'Terrace_' +'Terra' +'Tenn' +'Tendenz_' +'Ten_' +'Temp' +'Tell' +'Tehran_' +'Technologie' +'Tay' +'Tausend' +'Tatsachen_' +'Task_' +'Take' +'Table' +'Tabak' +'TP' +'TOS_' +'TION' +'Süde' +'Südamerika' +'Säule' +'Sydney_' +'Superma' +'Sum' +'Sud' +'Subve' +'Substanz' +'Subsidiarität_' +'Stück_' +'Stuttgart_' +'Stufe_' +'Studierende' +'Student' +'Stress_' +'Stock_' +'Sto' +'Stil' +'Stig' +'Stift' +'Sti' +'Steuererhöhungen_' +'Stereo' +'Steigen' +'Stay' +'Statut' +'Statistiken_' +'Station' +'Starts' +'Standort' +'Stamm' +'Stal' +'Stabilitäts' +'Staatsb' +'Staatsa' +'Sri_' +'Sponsor' +'Spenden' +'Spekulation' +'Speed_' +'Spaziergang_' +'Sozials' +'Sozialpartner' +'Souvenir' +'Sonic_' +'Songs' +'Somit_' +'Solutions_' +'Sobald_' +'Slowakei_' +'Slideshows_' +'Sk' +'Sina' +'Simpl' +'Silver_' +'Silv' +'Sil' +'Siedlungen_' +'Sichtweise_' +'Sich' +'Shopping_' +'Sharon_' +'Sex_' +'Seuche' +'Session_' +'Serikat_' +'Seri' +'Sensor' +'Selbstver' +'Selbstbe' +'Sekunden_' +'Sekt' +'Seitens' +'Segel' +'Seg' +'Schü' +'Schönheit_' +'Schä' +'Schwerpunkt' +'Schulb' +'Schra' +'Schmidt_' +'Schlacht_' +'Schiffe_' +'Schichten_' +'Schengen_' +'Schauspieler_' +'Scandinavia' +'Save_' +'Sav' +'Sat_' +'Sanierung_' +'Samu' +'Samstag_' +'Same' +'Saharan_' +'Sah' +'Sag' +'Safe_' +'Sac' +'Sab' +'Saatgut_' +'SOL' +'SC_' +'Rückzug_' +'Rücken_' +'Roth' +'Rollen' +'Ring' +'Rig' +'Ries' +'Richtig' +'Rice_' +'Ria' +'Review_' +'Reu' +'Result' +'Ressourcen' +'Residenz_' +'Residence_' +'Reparatur' +'Rennen_' +'Renditen_' +'Rek' +'Reit' +'Reinh' +'Reihenfolge_' +'Reife' +'Reichtum_' +'Reichs' +'Reich' +'Regulierungsbehörden_' +'Regen' +'Reformp' +'Refle' +'Referen' +'Redebeitr' +'Recovery_' +'Rechtsvorschrift_' +'Rechtsgrundlage_' +'Rechnungshof' +'Rechner_' +'Rechn' +'Recently_' +'Read_' +'Read' +'Raumfahrt' +'Rauch' +'Ras' +'Rang' +'Radisson_' +'RS' +'RP' +'REACH_' +'RC' +'Quin' +'Quart' +'Qi' +'Pä' +'Pyr' +'Putsch' +'Ps' +'Präsidentschafts' +'Provid' +'Protokolls_' +'Prost' +'Promi' +'Produktp' +'Produktivitäts' +'Prinz' +'Print' +'Primär' +'Prima' +'Price_' +'Pres' +'Prag_' +'Posten_' +'Portfolio' +'Populis' +'Polizist' +'Polizeia' +'Poettering_' +'Poe' +'Plugin' +'PlayStation_' +'Plattform_' +'Pir' +'Pipe' +'Philippines_' +'Phil' +'Pfe' +'Persönlichkeiten_' +'Persian_' +'Pec' +'Pazifik' +'Passag' +'Partition' +'Part_' +'Part' +'Parlamente_' +'Parking_' +'Palästinensern_' +'Paketen_' +'Paa' +'PNR_' +'PCs_' +'PA_' +'PAR' +'Otto' +'Osteuropa_' +'Ostasien_' +'Oscar_' +'Ort' +'Oro' +'Orange_' +'Oppositions' +'Operationen_' +'Olympischen_' +'Olympi' +'Office' +'Ocean_' +'Obs' +'Obl' +'Oberflächen' +'OSZE_' +'OM' +'Nähr' +'Nous_' +'Nixon_' +'Nicaragua_' +'Nic' +'Neus' +'Netz' +'Netanyahu_' +'Nes' +'Nenn' +'Navigation_' +'Nau' +'Natural_' +'Nationalen_' +'Namens' +'Nahrung_' +'Nag' +'Nad' +'Nachweis_' +'Nachk' +'NU' +'NPT_' +'NN_' +'NL_' +'NL' +'NET_' +'NAFTA_' +'Mühl' +'Mächte' +'Must' +'Motors' +'Motor_' +'Moro' +'Morgan_' +'Morg' +'Monti_' +'Mont_' +'Mone' +'Monday_' +'Moderni' +'Mittelmeerraum_' +'Mittela' +'Mitgliedsländern_' +'Mis' +'Minister' +'Mind' +'Migu' +'Mexican_' +'Meth' +'Mercosur_' +'Menschenrechten_' +'Meldung' +'Mehrheit' +'Meeting_' +'Medikament' +'Mayo' +'Maximum_' +'Materi' +'Masse_' +'Maschine_' +'Marktk' +'Marken_' +'Marine' +'Marin' +'Mandrake' +'Mandela_' +'Mandel' +'Mand' +'Manchester_' +'Main' +'Maes' +'MU' +'MT' +'MIT_' +'MIL' +'Lyon_' +'Lun' +'Luftverkehr_' +'Los' +'Londoner_' +'Liv' +'Little_' +'Lithuania_' +'Liquiditäts' +'Linz_' +'Linken_' +'Line' +'Limit_' +'Lig' +'Licht' +'Libert' +'Liberia_' +'Liberal_' +'Level_' +'Lev' +'Les' +'Leiter_' +'Leib' +'Legislat' +'Legi' +'Lebensmittelsicherheit_' +'Lebensmitteln_' +'Leb' +'Lay' +'Lauf_' +'Large_' +'Lanzarote_' +'Lane_' +'Landschaft_' +'Lad' +'Labora' +'Labor' +'LL_' +'LCD_' +'LAN_' +'Kurs' +'Kura' +'Kuch' +'Kreis_' +'Kredit_' +'Kosm' +'Kopf' +'Kooperations' +'Konzert' +'Konzentration_' +'Kontakte_' +'Konsultation_' +'Konsolidierung' +'Konse' +'Kongress' +'Konflikt' +'Konfiguration' +'Kompl' +'Kommen' +'Kommando_' +'Kommando' +'Komit' +'Kohä' +'Kofi_' +'Koch' +'Kob' +'Knopf' +'Klä' +'Klick' +'Klarheit_' +'Klang' +'Kirchen' +'Kirch' +'Ki_' +'Khamenei_' +'Kernkraft' +'Kennedy_' +'Kay' +'Kasse' +'Kartell' +'Karibik_' +'Kapitel_' +'Kapitalm' +'Kapazität_' +'Kanten_' +'Kampa' +'Kaiser_' +'KT_' +'KT' +'KP' +'KOM_' +'Jungen_' +'Jugoslawien_' +'Jugendliche_' +'Jugend_' +'Journal_' +'José_' +'Joint_' +'Joe' +'Jia' +'Jeff' +'Jedoch_' +'Jede' +'Jardin_' +'Jane' +'Jahrzehnts_' +'Jahr' +'Isle' +'Islamist' +'Investmentbank' +'Investitionsbank_' +'Interpret' +'Internetseite' +'Interinstitution' +'Integrationsp' +'Instrument' +'Instituts_' +'Institution' +'Insi' +'Innovat' +'Innenhof_' +'Innen_' +'Ingenieure_' +'Informationsschalter_' +'Infolgedessen_' +'Indians_' +'Index' +'Independen' +'Impulse_' +'Importe' +'Immigration_' +'Ig' +'Ideal_' +'Ib' +'IST' +'ION' +'IDE' +'ICC_' +'Händler_' +'Hyde_' +'Hung' +'Hotelsafe_' +'Horn' +'Horde_' +'Hochschule' +'Hochschul' +'History_' +'Hintergr' +'Him' +'Heu' +'Herkunft_' +'Herausgeber_' +'Heim' +'Heat' +'Haushaltsp' +'Haushaltsl' +'Hat' +'Harbor_' +'Handt' +'Handlungen_' +'Handelspartner_' +'Handelsb' +'Handb' +'Halle_' +'Halle' +'Halbinsel_' +'Haiti_' +'Hag' +'Hafen' +'Hab' +'HN' +'HE' +'Gy' +'Gul_' +'Grünen_' +'Gründer_' +'Grundst' +'Grill' +'Graz' +'Gras' +'Gran' +'Grafik' +'Governor_' +'Gouverneur' +'Go_' +'Gleichw' +'Gleichheit_' +'Gleiche' +'Gift_' +'Gewissen_' +'Gett' +'Get' +'Gesundheitsp' +'Gesprächen_' +'Gespräch' +'Gesetzgeb' +'Geschäftsle' +'Geschäftsb' +'Geschäfts_' +'Gerhard_' +'Gent' +'Geni' +'Geneva_' +'Genehmigung_' +'Gene_' +'Gemüse' +'Gemeinschaftsm' +'Gemeinde_' +'Geltung' +'Geis' +'Geheimdienst' +'Gegenzug_' +'Gefühle_' +'Gefä' +'Gefangenen_' +'Geduld_' +'Gebäuden_' +'Gebäude' +'Geburt_' +'Gazastreifen_' +'Garten' +'Garni' +'Gare_' +'GNOME_' +'GM_' +'GEN' +'GC' +'GAP_' +'G8_' +'Führungskräfte' +'Födera' +'Future_' +'Futtermittel' +'Fusion' +'Furcht_' +'Funktion' +'Fuku' +'Fuerte' +'Frühling_' +'Frühjahr_' +'Friedrich' +'Friedman_' +'Friedensnobelpreis' +'Fremden' +'Freie_' +'Fran' +'Frageb' +'Fracht' +'Former_' +'Forge' +'Foot' +'Fon' +'Following_' +'Flüchtlingen_' +'Flächen_' +'Fläche' +'Flugzeug_' +'Fluggäste_' +'Flugg' +'Flotte' +'Florenz_' +'Fli' +'Fisher' +'Fine_' +'Finanzr' +'Finanzinstitute_' +'Finanzielle_' +'Finanz_' +'Files_' +'Fift' +'Few_' +'Fests' +'Festplatten_' +'Festland' +'Ferien_' +'Fels' +'Felder' +'Feind' +'Fei' +'Fea' +'Fav' +'Fasc' +'Fantas' +'Fall' +'Fahrzeug_' +'Fachw' +'FU' +'FPGA_' +'FP' +'FOR_' +'FF_' +'Extremisten_' +'External_' +'Ex_' +'Evo' +'Ev' +'Euros' +'Euch_' +'Eti' +'Etage_' +'Erwerb_' +'Erw' +'Ersten_' +'Ersatz_' +'Ero' +'Erneuerung_' +'Eri' +'Erforder' +'Erdoğan_' +'Erdbeben' +'Erbe' +'Entwicklungsb' +'Entschädigung' +'Entschuldigung' +'Entschließungen_' +'Enhance' +'Engl' +'Energieb' +'Energiea' +'Endes_' +'Employment_' +'Empfang_' +'Electric' +'Eisb' +'Eis' +'Einzig' +'Einver' +'Eintritt' +'Einsparungen_' +'Einschränkungen_' +'Eins' +'Einkaufs' +'Einheiten_' +'Eingreif' +'Einfa' +'Eigentumsrechte' +'Eigenkapital_' +'Eiffel_' +'Eif' +'Ef' +'Economi' +'Ecke_' +'Ec' +'Dänemark_' +'Dut' +'Durban_' +'Drug' +'Drohungen_' +'Dritt' +'Dringlichkeit_' +'Dresden_' +'Drama' +'Download' +'Double_' +'Doll' +'Dokument' +'Document' +'Division_' +'Dist' +'Diskurs' +'Disco' +'Direktinvestitionen_' +'Diplom' +'Dies' +'Dialog' +'Dha' +'Devisen' +'Denkens_' +'Deng' +'Demokratischen_' +'Deli' +'Delhi_' +'Deg' +'Defizite_' +'Decision_' +'Datum_' +'Datenschutz' +'Dat' +'Danke_' +'Dan_' +'Dali' +'DT' +'DNA_' +'Cz' +'Currently_' +'Curren' +'Cubase_' +'Cs_' +'Crown' +'Cross_' +'Crisis_' +'Criminal_' +'Cove' +'Cost' +'Corporate_' +'Corn' +'Cori' +'Copyright_' +'Convenient' +'Contra' +'Continu' +'Connect' +'Competiti' +'Columbia_' +'Color_' +'Colla' +'Cocktail' +'Client' +'Clear' +'Claudi' +'Clar' +'Civi' +'Choose_' +'Chemie' +'Chef' +'Check' +'Charakter' +'Channel_' +'Chame' +'Certain' +'Catholic_' +'Cathedral_' +'Castel' +'Cash_' +'Case_' +'Casa_' +'Casa' +'Carolyn_' +'Carne' +'Cara' +'Capital_' +'Cance' +'Cala' +'Cafés_' +'CS_' +'CSS_' +'COM' +'COD_' +'CNS_' +'CN' +'CHI' +'CAS' +'Burk' +'Bundesregierung_' +'Bui' +'Buche' +'Brutto' +'Brun' +'Bruch' +'Brothers_' +'Brot' +'Broadway_' +'Bring' +'Brid' +'Brea' +'Brazilian_' +'Bou' +'Boris_' +'Bombe' +'Bolivien_' +'Blume' +'Blizzard_' +'Blitz' +'Bisc' +'Bir' +'Biokraftstoffe' +'Bildungss' +'Bib' +'Bh' +'Bezug' +'Bezirk' +'Bevölkerungen_' +'Betriebe' +'Betre' +'Betrag_' +'Bestrebungen_' +'Beste_' +'Besonders_' +'Beseitigung_' +'Beschränkungen_' +'Bergen_' +'Berechtigung' +'Berater_' +'Berat' +'Benzin' +'Benutzer' +'Benutz' +'Bemühen_' +'Belle' +'Bell' +'Beleidigung' +'Beitrittsverhandlungen_' +'Behinderte' +'Behauptung_' +'Begriffe_' +'Begriff' +'Begleiter' +'Begin' +'Befehl' +'Bedauerlicherweise_' +'Bed_' +'Beam' +'Bavaria_' +'Baust' +'Battle' +'Basi' +'Bashir_' +'Bart' +'Barrier' +'Barnier_' +'Barcode_' +'Barcelon' +'Barc' +'Barbara_' +'Banglades' +'Bang' +'Ban_' +'Balkan' +'Baker' +'Bahnh' +'BT' +'BES' +'BA_' +'Außenministeri' +'Autorit' +'Autonom' +'Ausw' +'Ausse' +'Ausmaße' +'Auslös' +'August' +'Augenblick_' +'Auftritt' +'Auftrags' +'Aufsichtsrat' +'Aufsichts' +'Aufschwung_' +'Aufruf' +'Aufpreis_' +'Auflösung_' +'Audio' +'Ath' +'Astrium_' +'Asp' +'Argumentation_' +'Arbeitsweise_' +'Arbeitsk' +'Arbeitsbedingungen_' +'Arbeits_' +'App' +'Anwe' +'Antonio_' +'Anton' +'Anth' +'Anreisedatum_' +'Ano' +'Anhang_' +'Angriffs' +'Angestellte' +'Angehörige' +'Ang' +'Andrew_' +'Andreas' +'Anb' +'Alten' +'Alt_' +'Alpha_' +'Alltag_' +'Allgemein' +'Allen_' +'Alco' +'Alan_' +'Akteur_' +'Akku' +'Aix_' +'Ahn' +'Ahmadinejad_' +'Agr' +'Again_' +'Afrikan' +'Affi' +'Aff' +'Adv' +'Admiral_' +'Adi' +'Add_' +'Add' +'Activ' +'Achse_' +'Academy_' +'Abstand_' +'Abend' +'Abbildung' +'ATT' +'ASPs_' +'API' +'AN_' +'AKP_' +'A4' +'=' +'94' +'91_' +'89' +'86' +'77' +'75' +'74_' +'73' +'61' +'58' +'55' +'500' +'48' +'400' +'34' +'225' +'20th_' +'1976_' +'1970s_' +'197' +'1962_' +'1933_' +'1929_' +'1914_' +'18' +'169' +'168' +'163' +'154' +'152' +'111_' +'108' +'0s_' +'05' +'006' +'.. _' +'.-_' +', [_' +'*' +'), "_' +'))_' +') , _' +'''' ' +'&#_' +'"-_' +'")' +' :' +' // _' +' ...' +' ,' +' *' +' (* _' +' ''[[_' +' #' +' _' +'„' +'“ ' +'— _' +'ь_' +'ше' +'ци' +'х' +'ф' +'тт' +'сти' +'от' +'он' +'ол' +'о_' +'за' +'же' +'ді' +'ды' +'ан' +'С' +'И' +'τ' +'ša' +'Š' +'ław_' +'če' +'ć_' +'ütz' +'üt' +'üstung' +'üste_' +'üste' +'ürzung' +'ürzt' +'ünst' +'ünder' +'ünde' +'üllen_' +'ührt_' +'ühre' +'ügliche' +'üchte_' +'ücht' +'übl' +'überwunden_' +'überwiegen' +'übertrieben_' +'übersteig' +'überraschend_' +'übern' +'übermäßig_' +'übermittelt_' +'überlegt_' +'übergreifende' +'überga' +'überein_' +'üben_' +'ún_' +'úl' +'ösung_' +'östlich' +'österreichische' +'öpfen_' +'öne' +'öhne' +'öhn' +'ögen' +'öfe_' +'öder' +'ño_' +'ère_' +'çais' +'äuter' +'äumt_' +'ätzlich' +'äte_' +'äte' +'ässer' +'äse' +'ärts' +'ärs' +'ärme' +'äni' +'ängen' +'ändigkeit_' +'ändig_' +'änderungen_' +'änderung_' +'äme' +'älteste' +'ältere_' +'äle' +'ähnlicher_' +'ähnel' +'ähigkeit_' +'ägyptische_' +'äglich' +'ägige' +'ât' +'ßte' +'ßnahmen' +'Überw' +'Übersetzungen_' +'Überschw' +'Überraschung' +'Überprüf' +'Überlegen' +'Überein' +'Üb' +'Ölpreise_' +'Ökosystem' +'Ökonom' +'Öffentliche_' +'Öff' +'Ängste_' +'Än' +'Ähnlich' +'©' +'    ' +'}}) {{_' +'}}' +'}' +'zögern_' +'zzo_' +'zze' +'zy_' +'zwischenstaatlichen_' +'zwecke' +'zwangsläufig_' +'zuwei' +'zuversichtlich_' +'zuverlässig' +'zuteil_' +'zut' +'zuständige_' +'zuste' +'zusammensetz' +'zusammengebr' +'zurückzukehren_' +'zurückl' +'zurückgreifen_' +'zurückgeh' +'zurückgeg' +'zur' +'zuläss' +'zuk' +'zugestimmt_' +'zugefü' +'zt' +'zqu' +'zoom_' +'zna' +'zitier' +'zini' +'zin_' +'zig' +'zielle' +'ziell_' +'ziehungen_' +'ziehung' +'zeugen_' +'zeuge' +'zentrums_' +'zellen' +'zeitwei' +'zeitung' +'zeichnet_' +'zehnte' +'zed' +'zahlung_' +'zahler' +'zad' +'yw' +'yto' +'ypto' +'yl' +'ybo' +'xw' +'xt' +'xit' +'xin' +'xenophob' +'xe_' +'xamp' +'würdigkeit_' +'wünschte' +'wöhnlich' +'wäsche_' +'wärtig' +'wunderschöne_' +'wound' +'worsen_' +'wors' +'workforce_' +'wora' +'woody_' +'wollend' +'woll' +'wohlhabende' +'wle' +'wl' +'wishing_' +'wirtschafts' +'wirkungs' +'wirksamere' +'wirksamen_' +'wirksam' +'wing' +'willi' +'wil' +'wig' +'wiederherzustellen_' +'widerst' +'widening_' +'wick_' +'whit' +'westliche' +'weste' +'west' +'wertvoll' +'werb' +'wendet' +'wem' +'weiße' +'weitreichende' +'weitergehen_' +'weig' +'weifel' +'wehr' +'week' +'wedding' +'weakness_' +'weaken_' +'wea' +'wast' +'washing_' +'wary_' +'warnings_' +'wandel_' +'walke' +'wald_' +'wahrnehmen_' +'waffen' +'wad' +'wab' +'völker' +'vé' +'vä' +'vy_' +'vri' +'vou' +'vorübergehend_' +'vorzubereiten_' +'vorstellung' +'vorste' +'vorsitzende' +'vorschriften_' +'vorliegen_' +'vorkommen_' +'vork' +'vorige' +'vorhers' +'vorher' +'vord' +'vorbereitet' +'vorbereiten_' +'vorbei' +'vorange' +'vons' +'volu' +'voltage_' +'vollem_' +'volcan' +'vocational_' +'voc' +'visuali' +'visor' +'vision' +'visib' +'vine' +'vigor' +'vielerlei_' +'vici' +'vibrant_' +'veränderten_' +'verzögert' +'verzweifelt' +'verzichte' +'verweist_' +'verweigern_' +'verwalten_' +'veru' +'vertreter_' +'vertreib' +'vertraue' +'vertrags_' +'vertraglich' +'verteilung_' +'verstärkten_' +'verständ' +'verstoßen_' +'verstorben' +'versprochen_' +'versprechen' +'verspr' +'versicherung' +'verschärfen_' +'verschwunden_' +'verschwende' +'verschuld' +'verscho' +'verschmutz' +'verschi' +'verringerte' +'vernünftigen_' +'vernünftige_' +'vernünftig_' +'vermeid' +'vermehrt' +'verma' +'verm' +'verleite' +'verleg' +'verlaufen_' +'verla' +'verkörpert_' +'verkn' +'verhandeln_' +'vergrößern_' +'vergr' +'vergleichen_' +'vergeb' +'verfolgten_' +'verfasst' +'verfallen_' +'vereinen_' +'vereinbarung' +'vereinbart_' +'vereinbar' +'verei' +'verehrte' +'verda' +'verbot_' +'verbleibenden_' +'verbindlichen_' +'verarbeitet' +'verarbeiten' +'verantwortungs' +'veranstaltungen_' +'veranlassen_' +'veraltet' +'verabschiedeten_' +'venue' +'ventu' +'vehement' +'vea' +'variables_' +'valu' +'validity_' +'valid' +'vai' +'vague_' +'vage' +'vaccine' +'vaca' +'vac' +'uzz' +'uum' +'utze' +'utung' +'utuh' +'utu' +'utter_' +'utter' +'utt' +'utilis' +'utig' +'utan' +'ustan' +'ussion' +'usschuss_' +'uso' +'usly_' +'usher' +'ush' +'usan' +'urteile' +'urte' +'urt_' +'urso' +'urlaub_' +'uris' +'urie' +'ured_' +'uras_' +'uptc' +'upper_' +'uppe' +'upl' +'uph' +'unüber' +'unwind_' +'unvorher' +'unverzichtbar_' +'unvers' +'unvergessliche' +'unterziehen_' +'unterschiedlicher_' +'unterl' +'unterhält_' +'untere' +'unterbreite' +'unsicher' +'unsi' +'unse' +'unrec' +'unrealisti' +'unpopul' +'unp' +'unnötige' +'unke' +'units_' +'unit' +'unilateral_' +'uniform_' +'unification_' +'ungswe' +'ungsv' +'ungsl' +'ungsgr' +'ungsan' +'ungewöhnliche' +'ungene_' +'ungeb' +'unga' +'unfähig_' +'unfo' +'unerlässlich_' +'undin' +'undertakings_' +'undertake_' +'underpinn' +'undermined_' +'underline_' +'unda' +'unconventional_' +'uncom' +'unchanged_' +'unberührt' +'unanimity_' +'unal' +'unabhängiges_' +'umweltfreundlich' +'umsetzen_' +'umg' +'umfeld' +'umfassender_' +'umfassend_' +'umfangreicher' +'umfang_' +'umd' +'umben' +'ultur' +'ultimative' +'uls' +'ule_' +'uldung' +'ular' +'uing_' +'ugh_' +'ufhin_' +'ufer' +'ued_' +'uder' +'uda' +'uci' +'uation_' +'tü' +'töt' +'tödliche_' +'täusch' +'tändig' +'tzte_' +'typen_' +'two' +'twist' +'twelve_' +'twar' +'turn' +'turbulen' +'tums' +'tumor' +'tue' +'tuc' +'tual_' +'ttung' +'tto' +'ttler_' +'ttet_' +'tsu' +'träglich' +'trust' +'trump' +'trugen_' +'tropical_' +'triple' +'tries_' +'trial' +'trend' +'treibende' +'treib' +'trei' +'tre_' +'travelling_' +'traveller_' +'trav' +'traurige' +'trategie_' +'transported_' +'transporte' +'transport' +'transparenter' +'transpar' +'transnationale' +'translations_' +'translat' +'transform' +'transferring_' +'trans_' +'train' +'tragi' +'trafe' +'traditione' +'traders_' +'trademark_' +'traded_' +'tracking_' +'toute' +'tout_' +'tourists_' +'touristi' +'toughe' +'touching_' +'touched_' +'totalitäre' +'torture_' +'toren_' +'tooth' +'toleran' +'toc' +'tne' +'tle_' +'tland' +'tk' +'tious' +'tionier' +'tine_' +'timetable_' +'tigt_' +'tigkeit' +'tiert' +'tiefe' +'tiat' +'thrill' +'thre' +'thr' +'therm' +'therapeuti' +'theorie_' +'theme' +'theit_' +'theatre_' +'theatr' +'thal' +'texte' +'teure' +'teu' +'tete' +'tet' +'testi' +'ters' +'terrasse_' +'terp' +'tenure_' +'tentang_' +'tenen_' +'tender' +'tendenziell_' +'temptation_' +'temples_' +'tem_' +'tellt' +'tellen_' +'telefoni' +'tein' +'teilnehmenden_' +'teilgenommen_' +'teigerung' +'teg' +'teen' +'tee' +'technologische_' +'technike' +'tched_' +'tausche' +'tariff_' +'tante' +'tanta' +'tanding' +'talked_' +'talis' +'talent' +'tad' +'tackl' +'sämtliche' +'sze' +'systemische' +'synt' +'synchronisier' +'switching_' +'swer' +'swell' +'sw' +'svor' +'survive' +'surro' +'surrender' +'supranational_' +'supposedly_' +'supervisor' +'superf' +'sung' +'summari' +'suits_' +'sue' +'sudden_' +'sud' +'successive_' +'subversi' +'subscribe' +'submi' +'sua' +'stützt_' +'stür' +'stät' +'ständige_' +'ständige' +'styles_' +'studierte_' +'stud' +'strukturellen_' +'structured_' +'strom_' +'stroll_' +'stricte' +'stress' +'strengen_' +'strafe_' +'stoßen_' +'stom' +'stol' +'stoffen_' +'stock' +'stl' +'stitch' +'stirbt_' +'stilvolle' +'still' +'steuerung_' +'steuerlichen_' +'stetig' +'sters_' +'steril' +'stenz' +'stehe' +'statu' +'stattgefunden_' +'stattfindenden_' +'statistische' +'statistical_' +'stationen_' +'stat' +'start' +'starker_' +'stando' +'standardiz' +'stan' +'staltung_' +'stagnieren' +'stagnat' +'stagn' +'stabiler_' +'stabil_' +'staatlich' +'ssysteme' +'ssungs' +'sstsein' +'ssing_' +'ssige' +'ssan' +'ssa_' +'sreg' +'sre' +'sq' +'späteren_' +'spyware_' +'spur_' +'spur' +'sprü' +'sprache_' +'spr' +'sporting_' +'spontane' +'sponsor' +'spiral_' +'spin' +'sphäre' +'spen' +'spektakulär' +'speedi' +'speculat' +'spars' +'sozialistische' +'souveränen_' +'sorte' +'soph' +'sonstige_' +'solving_' +'solide' +'sole' +'sola' +'sogenannte_' +'soft' +'soeben_' +'socialist_' +'soc' +'sob' +'soared_' +'snowb' +'snow_' +'snack' +'smooth' +'sly_' +'slower_' +'slot' +'slos_' +'slippe' +'slides_' +'slic' +'sl' +'sky' +'skill_' +'skill' +'skand' +'ska_' +'sk_' +'siz' +'sive' +'siv' +'sitzung' +'sition' +'sinnlos' +'sinn' +'singles_' +'sine' +'simplifi' +'simple' +'signifikant' +'sightseeing_' +'sighted_' +'sien' +'sid_' +'sicht_' +'sicht' +'sichert_' +'shoulder_' +'shou' +'shortages_' +'short' +'shooting_' +'shoot' +'shocked_' +'shock' +'shim' +'shifted_' +'shi' +'shar' +'sew' +'severely_' +'sever' +'settle' +'seti' +'servant' +'serta_' +'sers_' +'seren' +'serbische' +'sequenc' +'seperate' +'senti' +'sensors_' +'sensor_' +'senk' +'selecting_' +'sele' +'selber_' +'seja' +'seize_' +'seekers_' +'securing_' +'secured_' +'sect' +'secretar' +'seating_' +'seasons_' +'scre' +'scra' +'score' +'schöner_' +'schätzungsweise_' +'schätzt_' +'schädliche' +'schwierige_' +'schwache' +'schw' +'schrä' +'schreckliche' +'schreck' +'schnell' +'schn' +'schmutz' +'scheitern_' +'scheduled_' +'schedule_' +'scharfe' +'schaftung_' +'schaftl' +'schaff' +'sceptic' +'scenes_' +'scenario_' +'scape' +'scann' +'scandal_' +'scan_' +'sber' +'sati' +'samt_' +'samples_' +'sam_' +'salaries_' +'saison' +'sahen_' +'sade' +'saddle' +'sacrific' +'sack' +'sach' +'röme' +'räumen_' +'räum' +'räsident' +'räfte_' +'räch' +'rá' +'rust_' +'runde_' +'rulers_' +'ruk' +'ruin' +'ruhiger_' +'ruhe' +'ructi' +'rtuni' +'rtu' +'rtain' +'rows_' +'row' +'route' +'rot_' +'rosy_' +'ropriate' +'romantische' +'rom_' +'rojekt' +'rohstoff' +'roduc' +'robuste' +'rmen_' +'rlich_' +'rlich' +'rland' +'rke_' +'rivers_' +'rival_' +'rival' +'rity_' +'rism' +'riskante' +'risch' +'ringt' +'rimier' +'ril' +'rike' +'rika' +'rift_' +'ries' +'riert_' +'rieren_' +'rien_' +'rider' +'richtig' +'richtete_' +'richt_' +'riches' +'rial' +'rhythm' +'rfen_' +'rfe' +'revised_' +'review' +'reuni' +'rette' +'retre' +'retired_' +'retire' +'reti' +'restriktive' +'restricti' +'restoring_' +'restaur' +'respon' +'resid' +'resc' +'repu' +'repräsentieren_' +'reproductive_' +'reprodu' +'repri' +'replacement_' +'replace' +'repetiti' +'repeal' +'repar' +'repai' +'renovierte_' +'renn' +'reng' +'renamed_' +'removal_' +'rement' +'remember' +'reme' +'rema' +'relying_' +'reluctant_' +'rejects_' +'rejecting_' +'reise_' +'reis' +'reint' +'reinsta' +'reinforced_' +'reicht' +'reicher_' +'regulieren' +'regulators_' +'regrettabl' +'registrieren_' +'regierung' +'regieren' +'refund' +'refuge_' +'refrain' +'refor' +'redirect' +'redefin' +'recycle' +'recreational_' +'recoveri' +'reconst' +'recognizes_' +'recognis' +'rechtmäßig' +'rechtl' +'rebalancing_' +'reassure_' +'realm_' +'realize_' +'realist' +'realis' +'readers_' +'reactor' +'rdi' +'rde' +'rce' +'rca' +'rben_' +'rbeiten_' +'rba' +'ray_' +'raw' +'rativ' +'rategie_' +'rate' +'rar_' +'ranking' +'rane' +'random_' +'rand_' +'rand' +'rance' +'ralis' +'ral' +'raft' +'radioa' +'radikalen_' +'radikale_' +'rade' +'rada' +'quota' +'quot_' +'quet_' +'quet' +'quem' +'quarters_' +'quantity_' +'qualitativ_' +'qualit' +'qualifizierten_' +'qualifiziert' +'quaint' +'pursu' +'puede' +'publik' +'publici' +'public' +'pub_' +'ptu' +'ptions_' +'psychiatris' +'pse' +'prüfung_' +'präsent' +'provin' +'prototype' +'protectionism_' +'prospe' +'proportiona' +'prompt' +'promote' +'prom' +'prolonged_' +'projection' +'prohibiti' +'programm' +'profitiert_' +'profitable_' +'profitability_' +'professionellen_' +'profess' +'prof' +'produzierende' +'produkti' +'produ' +'problematisch_' +'problematic_' +'privileg' +'prioriti' +'print' +'pries' +'pretend_' +'presu' +'prest' +'pressing_' +'pressed_' +'preside' +'present' +'prerequisite_' +'prer' +'preparatory_' +'preparations_' +'prematurely_' +'preliminary_' +'preiswerte' +'preferences_' +'predict' +'preci' +'prech' +'precarious_' +'praxis' +'praktizieren' +'praktikable' +'prakti' +'praise_' +'pragmatische' +'practic' +'pph' +'pped_' +'ppe_' +'pp_' +'powered_' +'pour' +'postpone' +'posted_' +'possess' +'positiv' +'portugiesische' +'portrait' +'portion_' +'porn' +'populistischen_' +'pok' +'pois' +'poet' +'plötzliche' +'plate_' +'plastic' +'plas' +'planes_' +'plain_' +'plag' +'placem' +'plac' +'pixels_' +'pir' +'pio' +'pilgrim' +'pid' +'pia' +'physicians_' +'philosophi' +'phenomena_' +'pf_' +'pez' +'peu' +'petani_' +'pet' +'pest' +'perver' +'pertan' +'personnes_' +'personality_' +'permissi' +'perlu_' +'perl' +'perish' +'perf' +'perba' +'pera' +'penge' +'penetration_' +'penduduk_' +'pendidikan_' +'pendi' +'penalties_' +'pembe' +'pei' +'peg' +'pec' +'pd' +'pav' +'patents_' +'pate' +'pat' +'paste' +'passt_' +'passion_' +'partic' +'parti' +'pari' +'parameter_' +'pand' +'pal_' +'pair' +'painter' +'paint_' +'pact' +'packen_' +'pac' +'oßen' +'oxida' +'oxi' +'ox' +'ovi' +'overt' +'overlooks_' +'overlooked_' +'overlook_' +'overflow_' +'overco' +'outweigh' +'outlook_' +'outermost_' +'otic_' +'oster' +'orsch' +'ors' +'orische' +'oris' +'orin' +'organize_' +'organisierte' +'orene' +'ordnungsp' +'ordnet_' +'orderly_' +'orde' +'oration_' +'opu' +'optionen_' +'optimiert_' +'oppos' +'opia' +'oph' +'opfer' +'operier' +'operativen_' +'oot_' +'ook_' +'onsk' +'onna' +'omy_' +'omo' +'ologischen_' +'ollst' +'olla' +'olis' +'olic' +'oler_' +'olat' +'oh_' +'offs_' +'offre' +'offizieller_' +'offenbar' +'odds_' +'oda' +'och' +'ocean_' +'oce' +'obsess' +'obs' +'oblem_' +'objecti' +'obersten_' +'oberst' +'oberh' +'obat' +'oad' +'nützliche' +'nötige_' +'nördlich' +'nö' +'nähere' +'nya' +'nw' +'nuts_' +'nut' +'ntwicklung_' +'ntl' +'ntif' +'nta_' +'nswert_' +'nste' +'nsp' +'nschl' +'nsche' +'nre' +'nqu' +'npr' +'nov' +'notable_' +'north' +'norms_' +'norm_' +'nommene' +'nomen' +'nod' +'nnen' +'nme' +'nly_' +'nle' +'nkt' +'nji' +'niños_' +'niv' +'nisch_' +'nießer' +'niert' +'nico' +'nick' +'ngun' +'ngt_' +'ngg' +'nges_' +'ngeb' +'nfalls_' +'newer_' +'nevertheless_' +'neutrali' +'neutral_' +'neuartige' +'netzwerk' +'nes' +'neoliberal' +'nennt_' +'neighborhood_' +'nehmbar_' +'negotiators_' +'near' +'ndt' +'ndre' +'ndlung' +'ndere' +'ncier' +'nchi' +'navigati' +'navigate_' +'nav' +'nationals_' +'nationalists_' +'nata' +'nast' +'nas_' +'narrative' +'nament' +'nam' +'nachweis' +'nachteilig' +'nachkommen_' +'nachgewiesen' +'nachdenken_' +'nable_' +'mündliche_' +'mäßigkeit_' +'mäler' +'mysti' +'mutual' +'musik' +'musical_' +'multipl' +'multilaterale' +'multic' +'mption_' +'mpfe' +'moti' +'monst' +'monet' +'momentum_' +'module_' +'modu' +'modernem_' +'modell' +'mobiliz' +'mmungen_' +'mml' +'mmer' +'mmen_' +'mmel' +'mliche' +'mle' +'mittelfristig' +'mitigat' +'mitget' +'mission' +'missed_' +'misg' +'mische' +'mins' +'ministry_' +'minimize_' +'mines_' +'miner' +'mik' +'mig' +'mia_' +'mia' +'mg' +'mers_' +'mern_' +'merkwürdig' +'mercury_' +'merchan' +'merat' +'mera' +'meny' +'menti' +'menschlicher_' +'mengha' +'meng' +'memu' +'memo' +'memi' +'meldungen_' +'meister_' +'meist' +'meint' +'mehrmals_' +'meets_' +'medication' +'mechanisch' +'mean' +'mbl' +'maß_' +'matik' +'materiellen_' +'mate_' +'masih_' +'masalah_' +'marriage_' +'marri' +'marktor' +'markieren_' +'marke' +'marit' +'marina_' +'mare' +'manufactured_' +'manual_' +'mankind_' +'manipulati' +'manipulated_' +'manifest_' +'manden_' +'mammals_' +'mainst' +'magneti' +'machine' +'löschen_' +'läufe' +'längerfristige' +'längere_' +'lz_' +'lys' +'luxu' +'lun' +'luggage_' +'luft' +'ludi' +'lud' +'ltig' +'lth' +'lte' +'lovers_' +'lof' +'locken' +'lock' +'locally_' +'loca' +'loads_' +'llung' +'llin' +'llige' +'llia' +'lizi' +'litera' +'lisiert_' +'lion_' +'liner_' +'lineare' +'linder' +'lige' +'lifestyle_' +'lif' +'lieferte' +'lieben_' +'lichste_' +'licence_' +'libysche' +'liberty_' +'liaison_' +'lh' +'lg' +'letzte' +'letz' +'letters_' +'lessly_' +'lernt' +'lerat' +'lep' +'leistungsfähig' +'leistet_' +'leiden' +'leichte_' +'lehnt' +'legitim' +'legislator' +'lebend' +'lear' +'league_' +'lder_' +'lde_' +'laying_' +'laya' +'laus' +'launch' +'lauf' +'lation_' +'las' +'landm' +'landing_' +'lain' +'laime' +'labeled_' +'label' +'künft' +'köstliche' +'könig' +'ky' +'kwa' +'kungen_' +'kultur' +'ktr' +'ktivitäten_' +'ktionen_' +'kter' +'kst' +'ksch' +'kreativ' +'krati' +'krat' +'krank' +'kra' +'kostspielige' +'kostenloses_' +'kostenlose' +'kostengünstig' +'kostenfreie' +'korrekte_' +'koordinierte' +'koordinieren_' +'konzer' +'konvertier' +'konvention' +'konte' +'konstruktiv' +'konsequente_' +'konsequent_' +'kons' +'kono' +'konferenz_' +'komplizierter_' +'kompatib' +'kombinier' +'kollektive' +'kohlenstoffarme' +'kod' +'knüpf' +'know' +'klu' +'kleinste' +'klargestellt_' +'klare' +'kis' +'kirch' +'kir' +'kill' +'kidnapp' +'ketten_' +'kese' +'kepe' +'kepada' +'kens' +'kende' +'kemu' +'keln' +'kei' +'kea' +'kaya' +'kategori' +'kare' +'kapital_' +'kant' +'kanis' +'kand' +'kanadische' +'kamer_' +'kali_' +'kala' +'kah_' +'jut' +'junt' +'jud' +'jos' +'joga' +'jin' +'jeweilige_' +'jeti' +'jes' +'jeopardi' +'jenseits_' +'jenige_' +'jenem_' +'jat' +'jalan' +'jahre' +'jadi' +'jack_' +'ière' +'ivo' +'ivit' +'ius_' +'itäten_' +'ität' +'itting_' +'itter' +'itra' +'itor' +'itive' +'itier' +'itet_' +'ites_' +'italienische_' +'isung_' +'ister_' +'isten' +'ist' +'isoliert_' +'isk' +'isieren_' +'ished_' +'irrational' +'irgendeinem_' +'irgendeine' +'ipati' +'iones_' +'invoke' +'invites_' +'invited_' +'invitation_' +'investor_' +'investigat' +'intri' +'intra_' +'intl' +'intimate_' +'interpret_' +'interne' +'intermediar' +'interiors_' +'interinstitutionelle' +'interfere_' +'interessante' +'interessant_' +'interess' +'interaktive' +'inter_' +'intent_' +'intensivier' +'intelligente' +'insula' +'instrumente' +'instructi' +'institutionen_' +'institute_' +'instan' +'installations_' +'instabile' +'inspiration' +'insisting_' +'insisted_' +'insig' +'insert_' +'insect' +'inoffiziell' +'innovativen_' +'inneren_' +'innere_' +'inner' +'inklusive_' +'injured_' +'inige' +'inhuman' +'inherent_' +'inhaber_' +'inh' +'ings' +'ingredient' +'ingl' +'inger_' +'inga' +'infrastruktur' +'informellen_' +'influences_' +'infla' +'infer' +'ined' +'industrializ' +'indlich' +'individuell_' +'individually_' +'indis' +'indirectly_' +'indigenous_' +'indifferen' +'indicating_' +'indicat' +'inconsisten' +'incompeten' +'incom' +'inal' +'inakzeptabel_' +'importiert' +'impo' +'implement' +'impetus_' +'immunity_' +'imen_' +'imate' +'imaging_' +'ima_' +'ima' +'ilung' +'ilm' +'illustr' +'illiberal' +'illegale' +'illegal' +'ilität' +'ilita' +'ilie' +'ili_' +'ilen_' +'ileg' +'ilat' +'ilan' +'ikation' +'ige' +'iga' +'ifft_' +'iffen_' +'ifen_' +'ießen_' +'ieß_' +'ierungsa' +'iens_' +'ielt_' +'iele' +'iegs' +'iede' +'iebene' +'ieb_' +'idle' +'idier' +'ider_' +'ider' +'identit' +'identisch' +'identifiziert_' +'ideally_' +'idad_' +'icon' +'icken_' +'ick_' +'ichtigt' +'iche' +'ibut' +'ibu' +'iat' +'iali' +'höhe' +'höh' +'höchstens_' +'hö' +'hän' +'häl' +'hypocrisy_' +'hydrocarbon' +'hungry_' +'humo' +'humanitären_' +'hts' +'hrs' +'house' +'hostile_' +'hostage_' +'horse_' +'horn' +'hore_' +'hopeful' +'honor_' +'homosexual' +'hofft' +'hof' +'hoc_' +'hnen' +'hman' +'hkan_' +'historically_' +'hip' +'hing' +'hinein' +'hindern_' +'hinder_' +'hinausgeh' +'hilfsbereit_' +'hilfs' +'hilfen_' +'hila' +'highlight' +'hierarch' +'hful' +'heti' +'hetero' +'hervorr' +'hervor' +'hers' +'herrschaft_' +'herein_' +'herd' +'herbeizuführen_' +'herausstellen_' +'heran_' +'herab' +'hent' +'hens' +'hend' +'hemi' +'heme' +'heirate' +'hedge' +'hed' +'hebt_' +'healing_' +'hea' +'hazard' +'haushaltspolitische' +'haupt' +'haul_' +'hath_' +'hase_' +'hars' +'harmonisiert' +'harmonis' +'hari_' +'harbour_' +'har_' +'hang' +'handl' +'handicap' +'hag' +'hace' +'haber_' +'habe' +'güter_' +'gänge' +'gw' +'guy' +'gur' +'gungs' +'gsa' +'gründ' +'grundsätzliche' +'grundlegend' +'großzügige_' +'groundwater_' +'grossen_' +'grosse' +'griffen_' +'griff_' +'grenzübergreifende' +'grenz' +'green' +'greed' +'grave_' +'grat' +'grass_' +'graphic_' +'graph' +'gram_' +'grafische' +'graduate' +'grade' +'glä' +'glich' +'gleichberechtigt' +'glaubten_' +'glas_' +'gische_' +'girl' +'giga' +'gifts_' +'gian' +'ghan' +'ggen' +'geza' +'gewor' +'gewohn' +'gewissem_' +'gewinnt_' +'getä' +'getro' +'getre' +'getestet_' +'geta' +'gesunken_' +'gesund' +'gestützt_' +'gestellten_' +'geste' +'gespräche_' +'gesp' +'geschätzte' +'geschäfte_' +'geschä' +'geschenkt_' +'geschah_' +'gescha' +'gesamte' +'geräten_' +'gerisch' +'gerichtete_' +'gerech' +'gere' +'geplanten_' +'gepa' +'geordnet' +'geographische' +'gent_' +'genocide_' +'genk' +'genia' +'generieren_' +'generator' +'genehmigt' +'gene_' +'gende_' +'genaue_' +'gemütliche' +'gemeldet' +'gemeinsames_' +'gemachte' +'geltend_' +'gelenk' +'geleitete' +'gelegene_' +'geleg' +'gelb' +'gekü' +'gekämpft_' +'geiz' +'gei' +'geho' +'geheim' +'gefällt_' +'gefährlich' +'gefä' +'gefasst_' +'geeignet' +'geehrte_' +'gedie' +'gede' +'ged' +'gebraucht_' +'gebot' +'gebor' +'gebildete' +'gebene' +'gb' +'gau_' +'garr' +'garette' +'garantierte' +'gant' +'gang' +'gaming_' +'galt_' +'galax' +'gag' +'fürchten_' +'fünfte' +'fühlte' +'fähige' +'fähig' +'furthermore_' +'furt' +'furnishings_' +'funktionierende' +'funktionen_' +'fund' +'fun' +'fulfilling_' +'fuer' +'ftl' +'frustrated_' +'fru' +'frischen_' +'friend' +'friedlich_' +'freundschaft' +'freundlich' +'fres' +'freight_' +'freies_' +'französischer_' +'frankly_' +'fotografi' +'foster' +'fortgeführt_' +'formulier' +'formulate' +'formul' +'formel' +'foreg' +'forecasts_' +'foo' +'folk' +'foc' +'fläch' +'fluss_' +'flourish_' +'flour' +'flotte' +'flos' +'florier' +'flora_' +'flop' +'flood' +'fliegen_' +'fleisch' +'flas' +'fishermen_' +'fischen_' +'fis' +'firma' +'firewall' +'fires_' +'finest_' +'find' +'financially_' +'filter_' +'filter' +'fika' +'fighters_' +'fifteen_' +'fierce' +'fielen_' +'ffnung' +'ffende' +'ffel' +'ffe_' +'festlegen_' +'festivals_' +'festi' +'festges' +'festgehalten_' +'fes' +'fertigte' +'fertige' +'fere' +'fens' +'feine' +'feindlichen_' +'fehlge' +'fed' +'fast' +'fassung' +'fass' +'fashion' +'fascis' +'fare' +'farb' +'faktisch' +'fairer_' +'fad' +'factories_' +'ezieh' +'ez' +'exzellente' +'extremist' +'extraordinarily_' +'extracti' +'extern' +'extends_' +'exposed_' +'exporter_' +'exploring_' +'explains_' +'expire' +'experiments_' +'expedi' +'expectation_' +'existent' +'execute_' +'exe_' +'excite' +'excessively_' +'excel' +'examined_' +'evil_' +'eventual_' +'eve_' +'evade' +'europaweite' +'europaweit_' +'europa' +'europ' +'eur' +'etzungs' +'ette' +'ett_' +'etr' +'etisch' +'eties_' +'etie' +'etho' +'ethnische_' +'ethischen_' +'etan' +'estimat' +'ester_' +'espo' +'esh' +'esco' +'escalati' +'erörtert_' +'erörtern_' +'eröffnete' +'erzähl' +'erzeugte' +'erwähnte_' +'erwies_' +'erweiterte_' +'erweise_' +'erwartete' +'erw' +'erve' +'erto' +'ertig' +'erstes_' +'erstaunliche' +'erstaun' +'erstattung' +'erspar' +'erschien_' +'erschaffen_' +'eros' +'erobe' +'ernsthaften_' +'erni' +'erneute_' +'erneuerbaren_' +'erneuerbare_' +'ermu' +'erledigen_' +'erlan' +'erj' +'erinner' +'erier' +'erhöhungen_' +'erhobene' +'erhalte' +'ergänzende' +'ergen' +'ergebnisse' +'ergeb' +'erfreu' +'erfolgreicher_' +'erfahrene' +'erfahr' +'ereigne' +'erect' +'erbr' +'eras' +'erarbeitete' +'equal' +'episodes_' +'env' +'entspannten_' +'entsende' +'entschuldig' +'entries_' +'entrant' +'entrance_' +'entm' +'entlich_' +'entities_' +'entirety_' +'enthusiasm_' +'entha' +'entgehen_' +'entgegen' +'enswerte_' +'ension' +'ensh' +'enriche' +'enp' +'enorm' +'enl' +'englis' +'engineers_' +'engaging_' +'engagieren_' +'enforced_' +'energi_' +'energi' +'enduring_' +'endung' +'endorsed_' +'endi' +'endete' +'endemi' +'ency' +'encryption_' +'encouragement_' +'encountered_' +'encompassing_' +'encompass' +'ena_' +'empower' +'empl' +'empiri' +'empha' +'empfind' +'empfiehlt_' +'empfehle' +'empe' +'emotional' +'emer' +'embryon' +'embass' +'embarrass' +'eman' +'eln' +'ellungs' +'ellte' +'elle' +'elin' +'elig' +'elevated_' +'elevat' +'elemente' +'elem' +'eleganten_' +'elegan' +'elan' +'ektiv' +'ektion' +'eks' +'eke' +'eitige' +'eitet_' +'eisung' +'eisen' +'eis_' +'einzutreten_' +'einzuh' +'einzugehen_' +'einzub' +'einzigartige_' +'einzigartig' +'einz' +'einw' +'eintritt_' +'einstimmig_' +'einsti' +'einstellungen_' +'einste' +'einsa' +'einrichten_' +'einkommens' +'einhergehen' +'einher' +'einheimischen_' +'eingest' +'eingesp' +'eingeschl' +'eingeräumt_' +'eingeleitet_' +'eingeladen_' +'eingehe' +'einfl' +'eindruck' +'eindeutige' +'einander' +'eih' +'eigen_' +'eigen' +'eift' +'eien_' +'eichnet_' +'ehrung' +'ehrgeizige_' +'ehrgeizige' +'ehn' +'ehmen_' +'ehens' +'ehe_' +'egg_' +'efor' +'eff' +'ef_' +'editing_' +'edit_' +'eding' +'eder' +'ection_' +'eci' +'echse' +'ebenen_' +'dämm' +'dynamische' +'dyna' +'dwindl' +'dust_' +'durchschnitt' +'dumping_' +'dumm' +'dum' +'dua_' +'drohende' +'droh' +'dritter_' +'dring' +'dres' +'dramatische_' +'dox' +'downs_' +'downloading_' +'downloade' +'dose_' +'dore' +'doppelt_' +'doppel' +'dong' +'dominiert_' +'dominated_' +'domains_' +'dom' +'dogs_' +'dog' +'documentation_' +'dn' +'dk' +'diving_' +'diversi' +'divergen' +'disziplin' +'distributions_' +'distract' +'distinct_' +'disso' +'disre' +'dispo' +'displa' +'dispe' +'dispa' +'diskriminierung' +'discriminat' +'discovering_' +'discount_' +'disclose_' +'disciplin' +'disappointing_' +'disagreement' +'disadvantage' +'dire_' +'diplomatische' +'dip_' +'dioxid' +'dio' +'diminish' +'dimensions_' +'dili' +'dilemma_' +'dikti' +'digt_' +'dif' +'diet_' +'dies' +'dienste' +'dick' +'dichte' +'diagnosti' +'dez' +'devo' +'devis' +'developer' +'devaluation' +'deutscher_' +'deutsch' +'deut' +'determining_' +'determin' +'deter_' +'deter' +'detection_' +'detai' +'destinations_' +'designated_' +'deserv' +'ders' +'derogations_' +'derartig' +'depression_' +'depreciation_' +'depreciat' +'depre' +'depos' +'dependency_' +'depan_' +'denomination' +'dende' +'demonstration_' +'demonstr' +'delte' +'deliberate' +'delegati' +'delays_' +'deko' +'dek' +'deinem_' +'deflation' +'definitive' +'definitions_' +'definierte' +'definieren_' +'defer' +'defensive_' +'defa' +'deeper_' +'deepen' +'decreas' +'decoupling_' +'deciding_' +'decides_' +'debattiert' +'deal' +'dauernd' +'date' +'dargelegt' +'dankbar_' +'dana_' +'damaligen_' +'damalige_' +'damaging_' +'dahin' +'cynic' +'customize' +'custom_' +'curr' +'cure' +'cula' +'ctable_' +'crown' +'crossing' +'cron' +'criticise' +'criterion_' +'criminali' +'cove' +'coupon' +'coun' +'couldn_' +'cott' +'corresponds_' +'correlation' +'corrections_' +'correction_' +'correct' +'corr' +'corpora' +'cornerstone' +'corn' +'cooperative_' +'cooked_' +'coo' +'convers' +'contribu' +'contradiction' +'contra' +'continuously_' +'continuous_' +'continents_' +'contest_' +'contemp' +'cont' +'consume' +'consultations_' +'constructi' +'constitution' +'conso' +'consent_' +'connectivity_' +'conjunction_' +'congress_' +'congratulations_' +'confort_' +'conform' +'conflict' +'configured_' +'confess' +'conductors_' +'conditione' +'concessions_' +'concert_' +'concert' +'composit' +'composer' +'complied_' +'complementary_' +'complement_' +'comple' +'complacen' +'compensat' +'compelling_' +'compe' +'comparable_' +'communa' +'commu' +'commend' +'comf' +'comb' +'colorful_' +'color' +'colony_' +'collecti' +'collect' +'collapsed_' +'coi' +'coe' +'cob' +'coastline' +'coach_' +'cluster_' +'clus' +'cloth' +'clos' +'climb_' +'clearer_' +'clean' +'clamp' +'cking_' +'cited_' +'cisi' +'circles_' +'circ' +'cipa' +'cion_' +'cil_' +'cic' +'chw' +'chuld' +'chu' +'chtung' +'chtigt' +'chter_' +'chtbar' +'chste' +'chst' +'chor' +'choi' +'chk' +'chips_' +'ching' +'chi_' +'chei' +'cheat' +'chat_' +'chat' +'chase' +'charit' +'charakteristisch' +'chapter' +'challenge' +'chairs_' +'cerca' +'centrali' +'center' +'cens' +'celebration_' +'cease' +'cci' +'categori' +'cated_' +'cate_' +'catalyst_' +'carri' +'card' +'capa' +'cancell' +'camps_' +'cals_' +'call' +'calculation' +'calculated_' +'caf' +'cabl' +'bürge' +'busi' +'bureaucratic_' +'bureaucra' +'burdens_' +'bundle' +'bukan' +'bud' +'buck' +'buchs' +'brücke' +'bräu' +'broaden' +'broad' +'bridge' +'brid' +'breites_' +'breakthrough_' +'breaking_' +'breach_' +'brat' +'brand' +'branche' +'brai' +'brac' +'boutique_' +'bout' +'borrow_' +'bone_' +'bond' +'bombs_' +'bold' +'boh' +'board' +'blä' +'blutigen_' +'blutig' +'blue' +'bloods' +'blo_' +'blier' +'bliebe' +'blicken' +'blick' +'blen_' +'bkommen_' +'bj' +'bits_' +'biss' +'bish' +'bip' +'biologischen_' +'biologis' +'biolog' +'bination' +'billigen_' +'bilie' +'bile' +'bilaterale' +'bila' +'bicycle' +'bezogene' +'beziehungsweise_' +'bezeichnete' +'bewä' +'bewussten_' +'bewirkt_' +'bewerten_' +'beweg' +'bew' +'bevorstehende' +'bev' +'beurteilen_' +'beträchtliche_' +'beträ' +'betrug_' +'betreffend_' +'betray' +'besuchten_' +'bestätig' +'bestraft_' +'bestmögliche' +'bestimmungen_' +'bester_' +'bestehe' +'besseres_' +'besonderem_' +'besitz_' +'besiege' +'besichtig' +'beseitig' +'bese' +'beschädigt_' +'beschränkung' +'beschränkte' +'beschl' +'berühr' +'berühmteste' +'berüc' +'beruh' +'berufliche_' +'berufliche' +'bern_' +'berkembang_' +'berk' +'berichterstatt' +'berichten_' +'bericht' +'bereitzustellen_' +'bereite' +'berda' +'berb' +'bequeme' +'beobacht' +'bent' +'bemerkenswerte_' +'beme' +'bem' +'belonging' +'believ' +'beliebige_' +'belgischen_' +'beleb' +'belastet_' +'belast' +'bekämpft' +'bekräftigt_' +'bekräftig' +'bekanntlich_' +'beiträgt_' +'beitreten_' +'beiten_' +'beit_' +'beispiellose' +'beispielhaft' +'bein' +'behält_' +'behindertenfreundliche_' +'behi' +'beherrsch' +'beher' +'behandlung' +'begrenzte_' +'begreifen_' +'begeistert' +'befolgt_' +'befand' +'beer_' +'beeindruckende' +'beein' +'bedien' +'bedenke' +'bedenk' +'beautifully_' +'beating' +'beat_' +'beamte' +'beam_' +'beam' +'beachtet_' +'beabsichtigt_' +'bd' +'baute' +'baue' +'basket' +'basierende' +'barometer' +'bares_' +'barem_' +'bao_' +'bankruptcy_' +'banken_' +'bands_' +'ballot_' +'balances_' +'baik_' +'backward_' +'backd' +'awful_' +'awaken' +'await_' +'aven' +'avant' +'autoritären_' +'autor' +'automated_' +'autob' +'authorize' +'authentic' +'ausüben_' +'auszugleichen_' +'auszud' +'auszubauen_' +'ausspr' +'ausse' +'ausschließ' +'ausmachen_' +'auslös' +'ausgezeichnet_' +'ausgez' +'ausgewählten_' +'ausgewogenen_' +'ausgestellt' +'ausgeprägte' +'ausgelegt_' +'ausgehende' +'ausgeh' +'ausgebildete' +'ausg' +'ausführlich_' +'ausf' +'ausd' +'ausbe' +'augment' +'aufzuz' +'aufwa' +'auftragten_' +'aufstrebende' +'aufle' +'aufhören_' +'aufgestellt_' +'aufen_' +'aufeinander' +'aue' +'audiovisual_' +'auchen_' +'atur' +'attraktiv_' +'attitudes_' +'attent' +'attacked_' +'ato' +'atively_' +'ations' +'ational_' +'ation' +'atie' +'atemberaubende' +'asur' +'astronom' +'astonish' +'asti' +'aster_' +'assung_' +'assiste' +'assist' +'asses' +'ass' +'asil' +'ash_' +'asch' +'asc' +'aru' +'artner' +'articulate' +'arte_' +'arrives_' +'arri' +'arkt_' +'ark_' +'aring_' +'ardo' +'architectural_' +'arbe' +'approximat' +'appreciation_' +'applicant_' +'appease' +'appear' +'appeal' +'appalling_' +'app_' +'aper' +'anzusehen_' +'anze' +'anza' +'antwort' +'antra' +'antly_' +'antin' +'antik' +'antibiotics_' +'antara_' +'answered_' +'anstr' +'anstelle_' +'ansta' +'anspruch' +'anspr' +'anor' +'announc' +'anniversary_' +'annex' +'ankurbel' +'ankung' +'animation' +'anima' +'ania' +'anhe' +'anhaltende_' +'anh' +'angu' +'angle_' +'angez' +'angewendet_' +'angewa' +'angeschlossen_' +'angeh' +'angeführt' +'angebotenen_' +'anfällig_' +'aneinander_' +'anderung' +'andern_' +'andards_' +'anbieter' +'anbet' +'analog' +'ams' +'amount' +'amerikanische' +'aly' +'aluminum_' +'altige' +'alti' +'alor' +'allocation_' +'allgemeiner' +'allge' +'aller' +'allen' +'alkohol' +'aliz' +'alische' +'algorithms_' +'ald_' +'alas' +'alarming' +'alarmier' +'aktuelle' +'akibat' +'akh' +'aket' +'aken_' +'ais_' +'ains' +'aining_' +'ahrt_' +'agu' +'ags' +'agree' +'agne' +'agn' +'aggressi' +'aggravat' +'agentur' +'afts' +'aften_' +'affiliate' +'affe' +'advoca' +'advisor' +'advisers_' +'advanc' +'adr' +'adore' +'ador' +'admi' +'adjusted_' +'additi' +'adding_' +'acu' +'activate_' +'acquire_' +'acion' +'acies_' +'achse' +'achievable_' +'achi' +'accumulate' +'accordingly_' +'accord' +'accomplish_' +'accompany_' +'accom' +'accidental' +'access' +'abzuwe' +'abz' +'abweichen' +'abus' +'abstract' +'absti' +'abschließend_' +'abra' +'abolish' +'ables_' +'ablehn' +'abit' +'abgeschnitten_' +'abger' +'abgeb' +'aber' +']]), ' +'].' +'Zö' +'Zweit' +'Zwangsv' +'Zuwanderer' +'Zusätzlich' +'Zurich_' +'Zun' +'Zuerst_' +'Zube' +'Zor' +'Zoom' +'Zoo_' +'Zionis' +'Zielsetzung' +'Ziels' +'Zentrali' +'Zel' +'Zehn' +'Yugoslav_' +'YouTube_' +'Yi' +'Yel' +'Yacht' +'Xinjiang_' +'Xin' +'Xe' +'Wörter_' +'Wört' +'Wählern_' +'Worse_' +'Working_' +'Worker' +'Wonder' +'Wollen_' +'Woll' +'Wochenende_' +'Wirtschaftsw' +'Wirtschaftse' +'Wirkung' +'Willkommen_' +'Williams_' +'Wild_' +'Wil' +'Wiederbelebung_' +'Whatever_' +'Wette_' +'Westjordanland_' +'Werke' +'Wen_' +'Weltmarkt' +'Weltkriegs_' +'Weltgesundheitsorganisation_' +'Wellen' +'Welle_' +'Weißbuch_' +'Weiterver' +'Wehr' +'Week' +'Wasserstoff' +'Wassers' +'Want_' +'Wales_' +'Wahrscheinlich' +'Waf' +'WWII_' +'WE_' +'Vulkan' +'Vr' +'Vorwand_' +'Vortr' +'Vorsicht_' +'Vormittag_' +'Vorm' +'Vorhan' +'Vorg' +'Vorb' +'Vollst' +'Voice_' +'Vita' +'Visa' +'Vila' +'Vig' +'Vietnam' +'Vie' +'Victor' +'Veto' +'Verwundbarkeit' +'Verwend' +'Verv' +'Verurteilung_' +'Vertriebs' +'Vertreibung' +'Vertiefung_' +'Versuchen_' +'Versu' +'Verstöße' +'Verstärkung_' +'Verständ' +'Verschärf' +'Verschuldung_' +'Verschmutzung_' +'Versch' +'Versagen_' +'Vermögenswerten_' +'Vermittlung_' +'Verlierern_' +'Verknüpfung' +'Verkehrsanbindung_' +'Veridian_' +'Verhältnisse_' +'Verhinderung_' +'Verhaftung_' +'Verfechter_' +'Verfassungsvertrag_' +'Verdienst' +'Verbrennung' +'Verbreche' +'Verbindungs' +'Verarbeitung' +'Verabschiedung_' +'Vend' +'Vat' +'Vas' +'Vall' +'Vali' +'Valent' +'VIP_' +'VII' +'VC_' +'Uruguay_' +'Ursprungs' +'Ursache_' +'Urb' +'Unzufriedenheit_' +'Unz' +'Unw' +'Unver' +'Untu' +'Unternehmenss' +'Untergang' +'Unterg' +'Unterb' +'Unless_' +'Ungleichgewichte_' +'Ungeachtet_' +'Unemployment_' +'Unein' +'Unabhängig' +'Umweltbe' +'Umweltausschuss_' +'Umwelta' +'Umsch' +'Ultra_' +'Ult' +'Ukrain' +'Uhr' +'Ufer_' +'Ud' +'UNI' +'UNHCR_' +'UND_' +'UA' +'Tür' +'Täuschung' +'Ty' +'Tube' +'Tschechische' +'Tsa' +'Trü' +'Trump' +'Truc' +'Trop' +'Trock' +'Trip' +'Trinkwasser' +'Trin' +'Tribu' +'Trial' +'Trennung_' +'Traum' +'Transparen' +'Tran' +'Tram' +'Trainings' +'Training_' +'Train_' +'Trail' +'Touristen' +'Toulouse_' +'Touch' +'Toten_' +'Tol' +'Titel' +'Titan' +'Tit' +'Tip' +'Tin' +'Tierschutz' +'Tiefe' +'Tie' +'Tibetan' +'Tia' +'Thyssen' +'Threa' +'Thr' +'Tho' +'Therma' +'Thema' +'Textes_' +'Termine_' +'Tennis_' +'Templates_' +'Telekommunikations' +'Techniken_' +'Tauche' +'Tastatur' +'Tank' +'Tang' +'Tamp' +'Tam' +'Tai' +'Tahrir_' +'Tabellen_' +'TTIP_' +'TS_' +'TRA' +'TG' +'TFT' +'Sünden_' +'Südwest' +'Sá' +'Synth' +'Symptom' +'Symp' +'Symbol' +'Swe' +'Sustainable_' +'Surf' +'Supp' +'Superior_' +'Super_' +'Sunni_' +'Subsidiarit' +'Stö' +'Sty' +'Student_' +'Stro' +'Stress' +'Strecke_' +'Strategi' +'Strat' +'Strassen_' +'Stran' +'Strafgericht' +'Strafe' +'Stol' +'Stoffen_' +'Stipendi' +'Still' +'Stick' +'Stich' +'Steve_' +'Steuersenkungen_' +'Stern_' +'Sterb' +'Steph' +'Step' +'Stellvertreter_' +'Stellungnahmen_' +'Stellenwert_' +'Steinberg_' +'Steel' +'Statisti' +'Stadtteil_' +'Stadium_' +'Stable_' +'Stabilisierung' +'Staatsanw' +'Staat' +'Später_' +'Spyware_' +'Spy' +'Spr' +'Sporta' +'Spion' +'Speak' +'Spannung' +'Spanische' +'Spaniens_' +'South' +'Sonntag_' +'Song' +'Sonders' +'Sonderbe' +'Somet' +'Solution_' +'Solidarity_' +'Sofi' +'Socialists_' +'Smart' +'Slovakia' +'Sli' +'Skript' +'Ski_' +'Skepti' +'Ske' +'Sixt' +'Sitzung' +'Sis' +'Singh' +'Simply_' +'Silva_' +'Silicon' +'Signal' +'Sig' +'Sicherheitsst' +'Sicherheitsrates_' +'Sicherheitskräfte_' +'Sich_' +'Show' +'Short_' +'Sheraton_' +'Sham' +'Shaf' +'Shadow' +'Sev' +'Setz' +'Sensibili' +'Sender_' +'Seminar_' +'Select' +'Segment' +'Securit' +'Screen' +'Scottish_' +'Schätz' +'Schäd' +'Schwung_' +'Schwimm' +'Schwelle' +'Schwed' +'Schwarzen_' +'Schutzmaßnahmen_' +'Schutze' +'Schur' +'Schuldner_' +'Schuldenerlass_' +'Schottland_' +'Schnittstelle_' +'Schn' +'Schmu' +'Schmit' +'Schmi' +'Schlussfolgerung_' +'Schließung_' +'Schim' +'Schiffen_' +'Schar' +'Schan' +'Schall' +'Scar' +'Scanne' +'Scal' +'Saudis_' +'Satelliten_' +'Sant_' +'Sample' +'Sammel' +'Sal' +'Saison' +'Saf' +'Sacr' +'Sachver' +'Sachs_' +'SY' +'SV' +'SSL' +'SSE' +'SR' +'SN' +'SK' +'SITE_' +'SIM' +'SDR' +'SARS_' +'SAP_' +'Rückz' +'Rückschlag_' +'Rückg' +'Röm' +'Räume' +'Russische' +'Rus' +'Rumsfeld_' +'Rov' +'Route_' +'Rotarian' +'Rota' +'Romulan' +'Romans_' +'Romano' +'Rol' +'Rog' +'Roc' +'Road' +'Rival' +'Risk' +'Rindfleisch' +'Rin' +'Rif' +'Ride' +'Richtungen_' +'Rh' +'Revolutionary_' +'Revol' +'Resultate_' +'Restaura' +'Resources_' +'Resol' +'Reso' +'Repu' +'Representative' +'Rentner' +'Renov' +'Reli' +'Release' +'Relati' +'Rein' +'Reichweite_' +'Reich_' +'Regulierungen_' +'Regulat' +'Regionalpolitik_' +'Regierungsvertreter' +'Regierungsführung_' +'Regierungse' +'Regent' +'Regens' +'Regel' +'Rega' +'Referenz' +'Redner' +'Recon' +'Rechtsp' +'Rechtsakt' +'Rechtfertigung_' +'Recht' +'Rechenschaft_' +'Rebellen' +'Realis' +'Reakt' +'Rav' +'Raus' +'Ratspräsident' +'Rassismus_' +'Rasse' +'Range' +'Ramada' +'Rak' +'Rai' +'Rag' +'Radi' +'Rabatt' +'RU' +'ROM' +'RL' +'Quit' +'Quartet' +'Quartal' +'Quality_' +'Qualifi' +'Quadra' +'QU' +'Py' +'Purvis_' +'Pur' +'Puffer' +'Präventi' +'Prämi' +'Prozessor_' +'Provokation' +'Provi' +'Protest_' +'Protektionismus_' +'Prophet' +'Prope' +'Prog' +'Profite_' +'Production' +'Produ' +'Probe' +'Privatsphäre_' +'Privathaus' +'Price' +'Preventi' +'Prestige_' +'Prese' +'Preiss' +'Portug' +'Porti' +'Popular' +'Poor_' +'Poly' +'Politis' +'Polic' +'Points_' +'Plug_' +'Ple' +'Platte' +'Platform_' +'Plasma' +'Pix' +'Pitt' +'Pira' +'Picture' +'PiS_' +'Photo_' +'Pflege' +'Pferde' +'Pfei' +'Petitions_' +'Petitions' +'Petition' +'Pesti' +'Peru' +'Persönlichkeit' +'Peripherie_' +'Period' +'Pere' +'Pentax_' +'Pensionen_' +'Penis' +'Peninsula_' +'Pear' +'Pav' +'Pauls' +'Patente_' +'Passi' +'Passagiere_' +'Partition_' +'Partie' +'Parlamentsabgeordnete' +'Pare' +'Panzer' +'Panasonic_' +'Palästin' +'Palais_' +'Paket' +'Pai' +'Pack' +'PV_' +'PU' +'PT' +'POS' +'PM_' +'PE_' +'Oz' +'Outdoor_' +'Organismen_' +'Optimierung' +'Omniture_' +'Om' +'Okt' +'Ohren_' +'Oft' +'Offs' +'Official_' +'Occ' +'Obst' +'Obr' +'Objekte_' +'Objekt_' +'Objekt' +'Obers' +'OR' +'ONE_' +'OE' +'Nächte' +'Ny' +'Nuclear_' +'Novo' +'Nov' +'Nots' +'Noti' +'Noten' +'Norwege' +'Norway_' +'Norw' +'Normalerweise_' +'Norde' +'Nordamerika' +'Nikotin' +'Nieders' +'Nicholas_' +'Ng' +'Neuigkeiten_' +'Neug' +'Neue' +'Neube' +'Neub' +'Neuan' +'Netze' +'Network' +'Nephi_' +'Need_' +'Need' +'Nazis_' +'Navy_' +'Navig' +'Natursch' +'Native_' +'Nationalstaaten_' +'Nationalist' +'Nationale_' +'Namun_' +'Nai' +'Nahverkehr' +'Nachteil_' +'Nachbarschafts' +'Münzen_' +'Möchte' +'Mé' +'Mär' +'Mängel_' +'Muss' +'Musk' +'Musiker_' +'Musi' +'Muni' +'Mun' +'Mum' +'Movi' +'Movement_' +'Mosc' +'Montp' +'Montenegro_' +'Montage' +'Monaco_' +'Modells_' +'Mobili' +'Mits' +'Mitgliedstaat' +'Mitgefühl_' +'Mischung_' +'Ministr' +'Minimum_' +'Minderheiten' +'Million' +'Millennium' +'Militär_' +'Miles_' +'Mig' +'Messung_' +'Merkmale' +'Mercur' +'Menschenrechtsverletzungen_' +'Meinungsverschiedenheiten_' +'Mehrwert_' +'Mehr' +'Megapixel' +'Meg' +'Meer' +'Medit' +'Medikamenten_' +'Medikamente_' +'Medicine' +'Maur' +'Matth' +'Matte' +'Mathematik_' +'Mathe' +'Massenvernichtungswaffen_' +'Massen_' +'Massagen_' +'Massage' +'Massachusetts_' +'Maschinen' +'Mary' +'Marshall_' +'Marra' +'Maritime_' +'Marie' +'Margaret_' +'Marco' +'Marbella_' +'Mao' +'Mant' +'Mano' +'Mannschaft_' +'Mann' +'Mandat' +'Malay' +'Mahm' +'Magne' +'Magn' +'Maf' +'Madr' +'Made' +'Maci' +'Machthaber_' +'Machi' +'Macedonia_' +'Maca' +'MID' +'MDGs_' +'MAR' +'MADRID_' +'Luftfahrt' +'Luca' +'Lub' +'Lore' +'Looking_' +'Loo' +'Lond' +'Lodge_' +'Lob' +'Ll' +'Lith' +'Lita' +'List' +'Liquidität_' +'Lip' +'Lion' +'Linu' +'Limited_' +'Lieferungen_' +'Lieferant' +'Lieblings' +'Liebe' +'Lichte_' +'License' +'Letztere_' +'Letter' +'Lek' +'Leistungsfähigkeit_' +'Leipzig_' +'Legitimation_' +'Legislative_' +'Lef' +'Led' +'Lebensbedingungen_' +'Learning_' +'Laur' +'Laufwe' +'Laser_' +'Laos_' +'Lanka_' +'Langstrecken' +'Landschaft' +'Lande_' +'Lance' +'Lamanites_' +'Lack' +'LS' +'LP' +'LOS_' +'LICH' +'LES' +'LDP_' +'Kürzungen_' +'Künstler' +'Könnte_' +'Kurzu' +'Kurve' +'Kun' +'Kumari_' +'Krugman_' +'Krone' +'Krist' +'Krim' +'Kriegsverbreche' +'Kreditk' +'Kreativität_' +'Krat' +'Krank' +'Kraftfahr' +'Korrekt' +'Koordination_' +'Kooperationsabkommen' +'Konzern' +'Konzepte_' +'Konve' +'Kontroverse' +'Kontrast_' +'Konstruktion' +'Konkur' +'Kondition' +'Kompromisse_' +'Kompe' +'Kommunistische' +'Kommissare_' +'Kommentare_' +'Kode' +'Kne' +'Klimasch' +'Klassen_' +'Klassen' +'Klagen' +'Kissinger' +'Kind' +'Kha' +'Kenne' +'Keep_' +'Kaz' +'Katzen' +'Kategorien_' +'Kaste' +'Kasach' +'Karten' +'Karr' +'Karl_' +'Kare' +'Kapitalst' +'Kapell' +'Kanzle' +'Kandidatenländer' +'Kamera' +'Kambodscha_' +'Kalt' +'Kalifornien' +'Kalif' +'Kad' +'Kabine' +'KB_' +'Jörg_' +'Jup' +'Juncker_' +'Julian_' +'Jugendherberge_' +'Jos' +'Johannes_' +'Jiang_' +'Jel' +'Jazz' +'Jarzembowski_' +'Jame' +'JPEG_' +'JP' +'Iss' +'Isolation_' +'Islamischen_' +'Iron' +'Iri' +'Ira' +'Ion' +'Inva' +'Intr' +'Interview' +'Interv' +'Intern' +'Interi' +'Interess' +'Inten' +'Integrat' +'Inspektor' +'Insel' +'Inn' +'Inkrafttreten_' +'Ink' +'Ini' +'Informations_' +'Industrien' +'Indikatoren_' +'Indikat' +'Indian' +'Inde' +'Inc' +'Implementi' +'Imperi' +'Ih' +'If' +'Identitäten_' +'Iber' +'IX' +'ISS' +'INS' +'IND' +'INC' +'Hürde' +'Höh' +'Häusern_' +'Häuser_' +'Hut_' +'Hut' +'Hus' +'Humanit' +'Hug' +'Hu_' +'Hoste' +'Hospi' +'Honor' +'Home' +'Hoheit' +'Hohe' +'Hochzeit' +'Hob' +'Hirsch' +'Hip' +'Hinterl' +'Hinsichtlich_' +'Himm' +'Highlight' +'Hierbei_' +'Herzego' +'Herunter' +'Herstell' +'Herkunftsl' +'Herberge_' +'Hell' +'Held' +'Hektar_' +'Heimat_' +'Heid' +'Hegemonie_' +'Heer' +'Heating_' +'Head_' +'Head' +'Haza' +'Haw' +'Havel_' +'Haushaltsk' +'Haushalten_' +'Hauptziel' +'Hauptg' +'Harry_' +'Hardliner' +'Handy_' +'Hands' +'Handelsd' +'Halbjahr_' +'Haft_' +'Had_' +'Habr' +'Gut_' +'Gus' +'Gul' +'Guid' +'Guer' +'Guantánamo_' +'Gründungs' +'Grö' +'Grä' +'Grundwerte_' +'Grundv' +'Grundge' +'Gru' +'Green' +'Grap' +'Grant_' +'Grand' +'Granada_' +'Gran_' +'Gou' +'Got' +'Goe' +'Glückw' +'Globalis' +'Globale_' +'Glaube_' +'Glacier_' +'Gl' +'Giu' +'Gitarren' +'Gir' +'Ghana_' +'Gewässer' +'Gewi' +'Gewebe' +'Getreide' +'Gesundheitswesen_' +'Gesundheitsschutz' +'Gestern_' +'Gestalt_' +'Gesta' +'Gesichtspunkt' +'Gesetzen' +'Geschäftsreise' +'Geschäftsführ' +'Geschmack' +'Geschichts' +'Geschenk_' +'Gescheh' +'Geräten_' +'Georgian_' +'Gemeinschaftsrecht' +'Gemeinsame_' +'Gemeinsam_' +'Gegebenheiten_' +'Gefangene' +'Gefa' +'Gedanke' +'Ged' +'Gebühren_' +'Gebietskörperschaften_' +'Gaul' +'Gau' +'Gastl' +'Garc' +'Ganzes_' +'Gandhi_' +'Galic' +'Galax' +'Gai' +'GT' +'GMO_' +'GM' +'GIMP_' +'GF' +'GBP_' +'Fürsten' +'Fürs' +'Führungsp' +'Führungen_' +'Führer' +'Fäl' +'Fäh' +'Fuss_' +'Funktionieren_' +'Funktionalität_' +'Full_' +'Fue' +'Früher' +'Frucht' +'Fronte' +'Frist_' +'Friedh' +'Freilassung_' +'FreeBSD_' +'Frattini_' +'Frassoni_' +'Franz_' +'Franz' +'Fox_' +'Fourth_' +'Fotograf' +'Fortschritts' +'Forst' +'Formulierung_' +'Forme' +'Forex_' +'Folter_' +'Folgende' +'Flüge_' +'Flü' +'Flugver' +'Flucht' +'Flach' +'Fl' +'Fitnessraum_' +'Fitnesscenter_' +'Fitness' +'Fiskalp' +'Fischf' +'Finn' +'Finanzt' +'Finanzministeri' +'Finanzma' +'Finanzinstitut' +'Finanziellen_' +'Finanzi' +'Finanzhilfe' +'Finanzdienstleistungen_' +'Filip' +'Figu' +'Field' +'Fie' +'Feu' +'Ferrer' +'Ferr' +'Fernsehs' +'Fen' +'Feli' +'Feinds' +'Fehlern_' +'Fehle' +'Faz' +'Fax' +'Fanati' +'Fan' +'Fail' +'Fahnen' +'Fah' +'FOR' +'FI_' +'FC' +'FBI_' +'Extremismus_' +'Exporte' +'Expansion' +'Exce' +'Exa' +'Everyone_' +'Eurosta' +'Europäer' +'Europe' +'Europarat' +'Eure' +'Eto' +'Ethiopia_' +'Es' +'Erzeug' +'Erwägung_' +'Erwe' +'Ersparnisse_' +'Ernährungs' +'Erlös' +'Erlebnis_' +'Erla' +'Erkenntnisse_' +'Erkenn' +'Erinnerungen_' +'Erika_' +'Eric' +'Erhalt_' +'Erfordernissen_' +'Erfa' +'Erf' +'Ereignissen_' +'Ereignis_' +'Erb' +'Era' +'Equally_' +'Equal' +'Epidemi' +'Entwicklungszusammenarbeit_' +'Entwicklungshilfe_' +'Entspannen_' +'Entschl' +'Entscheidungsfindung_' +'Entscheide' +'Entlassung' +'Entfernung_' +'Entdeckung_' +'Entdecke' +'Engine' +'Engel' +'Energieversorgung_' +'Energieverbrauch' +'Energietr' +'Empfehlung_' +'Emm' +'Emb' +'Eman' +'Elys' +'Eliten_' +'Eli' +'Element' +'Elektrizität' +'Elect' +'Elb' +'Einwohnern_' +'Eintrag_' +'Einstei' +'Einnahme' +'Einla' +'Einig' +'Einheitswährung_' +'Eingriff' +'Eingang_' +'Eindr' +'Eight' +'Eigentums' +'Ehren' +'Effe' +'Edward' +'Economists_' +'Ecol' +'Echt' +'Eben' +'East' +'Earl' +'EV' +'EQ' +'EOS_' +'ENE' +'EME' +'EFSF_' +'EF' +'EEC_' +'EE' +'EA_' +'Durchschnitt_' +'Durchs' +'Duke' +'Duc' +'Dua' +'Dry_' +'Drogenh' +'Drac' +'Dosi' +'Domain' +'Dollars_' +'Dokumenten_' +'Dokt' +'Dod' +'Doctor_' +'Divi' +'Disziplin' +'Distri' +'Disney_' +'Discover' +'Direktor_' +'Direkt_' +'Diamant' +'Devisenwechsel_' +'Device' +'Develope' +'Dess' +'Demonstrationen_' +'Demonstration_' +'Dei' +'Defizit_' +'Definitionen_' +'Deep' +'Deco' +'Deborah_' +'Deal_' +'Davon_' +'Dav' +'Datenbank' +'Darstell' +'Dark_' +'Danube_' +'Dalai_' +'Daily' +'Dafür_' +'Dach_' +'DU' +'DSLR_' +'DF' +'DEL' +'Cyp' +'Cus' +'Cub' +'Crow' +'Cristi' +'Cris' +'Cr' +'Coven' +'Course' +'Coup' +'Cort' +'Corp' +'Cookies_' +'Cookie_' +'Coo' +'Control' +'Conte' +'Contain' +'Const' +'Conservative_' +'Configur' +'Cond' +'Compo' +'Compli' +'Communities_' +'Communis' +'Commonwealth_' +'Commons_' +'Commerc' +'Collection_' +'Co_' +'Cluster' +'Cli' +'Claude_' +'Citi' +'Christi' +'Chief_' +'Chic' +'Chian' +'Chest' +'Chelsea_' +'Charme_' +'Charlotte' +'Champions' +'Catherine_' +'Catalunya_' +'Cart' +'Carol_' +'Cap_' +'Cana' +'Campi' +'Camp_' +'Came' +'Cale' +'Cadiz_' +'CV' +'CK' +'CF_' +'CER' +'C6_' +'Bürgerrecht' +'Bügelservice_' +'Byrne' +'Button_' +'Burning_' +'Bureau_' +'Bul' +'Built_' +'Buffe' +'Buen' +'Budget' +'Buddhist_' +'Bucharest_' +'Brüder' +'Brücken' +'Brücke_' +'Brook' +'Broad' +'Bristol_' +'Brief' +'Brasiliens_' +'Brand_' +'Bow' +'Boutique' +'Borr' +'Born' +'Boote' +'Boom_' +'Bolivia_' +'Boliv' +'Bod' +'Blase' +'Blanch' +'Blan' +'Bit' +'Binnenmarkts_' +'Bind' +'Bildschirm' +'Bic' +'Bibl' +'Bewusstsein_' +'Beton' +'Besuchen_' +'Bestec' +'Bestands' +'Besichtigung' +'Besi' +'Beschl' +'Besatzungs' +'Berna' +'Berichterstatters_' +'Berge' +'Bergbau' +'Berei' +'Beratungs' +'Benalmadena_' +'Beleg' +'Belange_' +'Bek' +'Beitrittsländer_' +'Behinderung' +'Begr' +'Befreiung' +'Befehl_' +'Beck' +'Beantwortung_' +'Bauern' +'Batterie' +'Bat' +'Bass' +'Barre' +'Barr' +'Baron' +'Barba' +'Bankr' +'Bala' +'Bak' +'Bagdad_' +'Baby_' +'Baby' +'BP' +'BEA' +'BBC_' +'Außens' +'Außenhandel' +'Autovermietung_' +'Autos_' +'Autorität_' +'Autok' +'Auth' +'Ausstellung' +'Ausser' +'Ausschüsse_' +'Auss' +'Ausrichtung_' +'Ausländer' +'Auslegung' +'Ausge' +'Ausgang_' +'Ausg' +'Ausfuhr' +'Auseinandersetzungen_' +'Auseinandersetzung_' +'Ausdruck' +'Auschecken_' +'Ausbreitung_' +'Ausbau' +'Auktion' +'Aufwand_' +'Aufw' +'Aufschub_' +'Aufrechterhaltung_' +'Auflagen_' +'Aufla' +'Auditor' +'Attrakti' +'Athens_' +'Assozi' +'Asc' +'Arts_' +'Artikels_' +'Arra' +'Aro' +'Arn' +'Argentiniens_' +'Arg' +'Archive_' +'Arbeitszeit' +'Arbeitsplatz' +'Arbeitsmärkte' +'Arbeiter' +'Aqu' +'Application' +'Applause_' +'Appartement_' +'Apollo_' +'Aparthotel' +'Apartamentos_' +'Apache_' +'Anwende' +'Anweisungen_' +'Antw' +'Antrieb' +'Ansta' +'Anschuldigungen_' +'Anschluss' +'Anrei' +'Anleitung_' +'Ankündigung_' +'Ankara_' +'Animat' +'Anhä' +'Angreifer' +'Angeli' +'Anfä' +'Anfa' +'Andy_' +'Analyst' +'Amat' +'Alvaro_' +'Alternative' +'Alte' +'Almost_' +'Allgemeine' +'Alkohol_' +'Ali_' +'Alexanderplatz_' +'Alba' +'Aktiv' +'Akt_' +'Ahmed' +'Ahmadinedschad_' +'Agriculture_' +'Agent' +'Against_' +'Afrikanischen_' +'Afghan_' +'Advi' +'Advent' +'Adress' +'Adam_' +'Ach' +'Aca' +'Abwärts' +'Abstände' +'Absp' +'Abschreckung_' +'Abschnitt' +'Abschluss' +'Abre' +'Abraham_' +'Above_' +'Abn' +'Abhol' +'AX' +'ATION' +'ANY_' +'ALL_' +'AK' +'AE' +'ABAP_' +'AA_' +'A350_' +'A1_' +'; • _' +';' +'93' +'92' +'91' +'84_' +'84' +'825' +'81_' +'78' +'73_' +'71_' +'70' +'57' +'450' +'42' +'320' +'3000_' +'2nd_' +'226' +'204' +'2018_' +'2017_' +'2016_' +'1981_' +'1974_' +'1969_' +'1958_' +'1957_' +'1955_' +'1951_' +'1930er_' +'1918_' +'179' +'158' +'140_' +'13th_' +'125' +'110_' +'10th_' +'104' +'103_' +'100' +'0ern_' +'011' +'010' +'// _' +'.  ' +'.: _' +'...) _' +'.)._' +'. – ' +'.   _' +'. .' +'. ) _' +'. " _' +', ..._' +')|_' +'): «' +'() , _' +'%), _' +'": _' +'")._' +'!”' +'!!!!' +' „ _' +' –&' +' –' +' « _' +'  ' +' ..' +' ($_' +' ''' +'™-_' +'€_' +'…_' +'”) _' +'“) _' +'‘' +'ا' +'י' +'ң' +'қ_' +'ін' +'ында' +'ші' +'ть_' +'сын' +'со' +'р_' +'пр' +'пар' +'ных_' +'на_' +'кономи' +'ка_' +'ит' +'ел' +'гі' +'го_' +'га' +'бе' +'ас' +'Т' +'К' +'Г' +'ρ' +'ο' +'Ž' +'ż' +'ška_' +'ý_' +'üße' +'üß' +'ütung' +'ütlich' +'ütige' +'üstet_' +'ürze' +'ürt' +'ürge' +'ürfe' +'ürdige' +'üpf' +'üng' +'ündete' +'ünde_' +'üllt_' +'ührten_' +'ührende' +'ühmt' +'üf' +'ücken_' +'üblich_' +'überzogen' +'übersetz' +'überschü' +'überschreitende' +'überra' +'übernahme' +'überleb' +'überholt' +'übergehen' +'überflü' +'übereinstimmen_' +'übereinkommen_' +'ößt_' +'ött' +'östlichen_' +'öster' +'öst_' +'öse' +'örtlichen_' +'örtliche' +'örte' +'örig' +'ör_' +'ökologisch' +'öhnlich_' +'öhe' +'öglichkeiten_' +'öge' +'öffnete' +'öffnet_' +'öffentliches_' +'öd' +'öcke' +'ôte_' +'ò' +'ï' +'î' +'être_' +'ête' +'ém' +'èr' +'ège_' +'äßig' +'äußerte_' +'äußert_' +'äuser' +'äume_' +'äum' +'ässig' +'ärz' +'ärt' +'ärmeren_' +'ärer_' +'ändler' +'ändiger_' +'änderte' +'ämte' +'ällt_' +'äle_' +'ähle' +'ähig' +'ägyptischen_' +'ägt_' +'äger' +'äg' +'ächtige' +'äche' +'ás' +'ßlich' +'ßer_' +'ßer' +'ßb' +'Überwindung_' +'Übersetzer' +'Überna' +'Überlebens' +'Übergriffe' +'Überflu' +'Übereinkommens_' +'Überarbeitung_' +'Österreich' +'Öls' +'Ölpreis' +'Öle' +'Ökolo' +'Äußer' +'Äquivalen' +'Ähnliche' +'Ã_' +'·      ' +'²_' +'®_' +'®, _' +' – ' +' %, _' +'}}) ==' +'}})' +'}{_' +'}, _' +'|' +'zünd' +'zzle' +'zykli' +'zwischenstaatliche' +'zweitgrößte_' +'zweimal_' +'zweige' +'zweier_' +'zweie' +'zwec' +'zwang' +'zuwider' +'zuvorkommende' +'zuverlässiger_' +'zuv' +'zutreffend' +'zusammenzuf' +'zusammenzuarbeiten_' +'zusammentre' +'zusammenhä' +'zusammenf' +'zurückzuh' +'zurückkehren_' +'zurückk' +'zurückbl' +'zunahm' +'zulässig_' +'zula' +'zukomm' +'zugr' +'zugewiesen' +'zugeschnitten' +'zugesa' +'zugelassen_' +'zugehen_' +'zufügen_' +'zufällig' +'zubereitet_' +'zte_' +'zst' +'zoo_' +'zonen_' +'zol' +'zitä' +'zitiere_' +'zit' +'zipation_' +'zines' +'zien' +'zheimer_' +'zerr' +'zero' +'zerbrech' +'zentri' +'zentral' +'zend' +'zeitweilige' +'zeitl' +'zeitiger_' +'zeitig' +'zeita' +'zeilen_' +'zeil' +'zeigten_' +'zeige' +'zeichnung_' +'zar_' +'yte' +'yment' +'yla' +'yie' +'yg' +'yev' +'yell' +'yea' +'ydr' +'yak' +'xts_' +'xon_' +'xn' +'xist' +'xim' +'xes_' +'würdige_' +'wört' +'wöchentlich' +'wärt' +'wäl' +'währung' +'wski_' +'wron' +'writer_' +'wrap' +'wozu_' +'wovon_' +'wounded_' +'worthwhile_' +'worries_' +'workshop' +'workplace' +'womit_' +'wofür_' +'wling' +'withstand_' +'withdrawn_' +'withdraw' +'wissenschaftliche' +'wissenschaftl' +'wirkte' +'wins_' +'winners_' +'winkel' +'willkürlich' +'willen_' +'wilde' +'wiese' +'wiederher' +'wiederge' +'widmet' +'widersetz' +'wick' +'whatsoever_' +'wettbewerbsfähig_' +'wettbewerb_' +'wertig' +'werkzeug' +'wende_' +'wend' +'wen' +'welding_' +'welders_' +'weites' +'weiterf' +'weiterentwickel' +'weilen_' +'weiche_' +'wege_' +'weekend' +'weck' +'wechsels' +'wechs' +'weakening_' +'wc' +'way' +'wav' +'watt' +'watershed_' +'waterfalls_' +'wasted_' +'warrior' +'ward' +'wanting_' +'wang' +'wandte' +'wahrha' +'wach' +'völligen_' +'völ' +'vá' +'vulnerabilities_' +'vul' +'votre_' +'vorzugehen_' +'vorzei' +'vorz' +'vorsorge_' +'vorrangige' +'vorne_' +'vorle' +'vorlag' +'vorhin_' +'vorhersehbar' +'vorherrschende' +'vorherigen_' +'vorherge' +'vorhandene_' +'vorhaben' +'vorgetragen_' +'vorgesehene_' +'vorgeleg' +'vorgebracht_' +'vorb' +'voraus' +'vorantreib' +'vorangehen' +'vorangegangenen_' +'voranbringen_' +'voor_' +'von' +'volunt' +'volumes_' +'vollziehen_' +'vollendet' +'voli' +'vole_' +'voic' +'vivid' +'vität_' +'vita' +'visit' +'viru' +'viou' +'violate_' +'violate' +'viol' +'vio' +'villa' +'vigorously_' +'vigilant' +'viewing_' +'viewer_' +'viet' +'vierz' +'vielversprechend' +'viels' +'vielfach' +'vidue' +'vide' +'vib' +'viat' +'vete' +'vet_' +'verzögern_' +'verzerr' +'verzeichnis_' +'verzauber' +'verwöhn' +'verwirr' +'verwir' +'verwendete_' +'verweigert' +'verweh' +'verwand' +'verwaltung' +'verwaltet_' +'verurteilen_' +'verursachen_' +'vertritt_' +'vertretene' +'vertretende' +'vertiefen_' +'vertie' +'verteil' +'verteidigt_' +'versäume' +'versuchten_' +'versu' +'verstümmel' +'verstehe_' +'versteckt' +'verstecken_' +'versta' +'versp' +'versorgen_' +'versorg' +'versicherung_' +'verschwenderische' +'verschre' +'verschr' +'versatil' +'versammel' +'versagt_' +'vers_' +'verpflichtung' +'verordn' +'vernünftige' +'vernetz' +'vernehm' +'vernachlässigt' +'vernachlässigen_' +'vermindert' +'verlässliche' +'verließ' +'verlie' +'verletzen' +'verleih_' +'verlangsam' +'verlang' +'verlagerung' +'verkünde' +'verkl' +'verkehr' +'verkaufte' +'verhältnisse' +'verhält' +'verheirat' +'verhandl' +'verhandelt' +'verhaftet_' +'vergift' +'verfügbare' +'verfolgte_' +'verfe' +'verfa' +'vereinte' +'vereinig' +'vereinfachen_' +'vereinbarten_' +'vereinbarte' +'verein' +'verdreifach' +'verdoppelt_' +'verdoppel' +'verdan' +'verbu' +'verbrenn' +'verbrachte' +'verborgen' +'verbleiben' +'verban' +'veranstaltet' +'veranlasst_' +'verankert_' +'verabscheu' +'venture' +'vent_' +'vendor' +'veil' +'vec' +'vd' +'vastly_' +'variier' +'variat' +'var_' +'valve' +'vale' +'vald' +'ußen' +'ux' +'uv' +'utsbe' +'utr' +'utin' +'uth' +'utenant' +'ustausch_' +'usgaben_' +'urz' +'uru' +'urt' +'ursa' +'urging_' +'urg' +'urch' +'urbaniz' +'urban' +'urate' +'ura_' +'upte' +'uphold' +'upheaval' +'upa' +'uon' +'unzähligen_' +'unzwe' +'unzureichende' +'unz' +'unwillingness_' +'unweigerlich' +'unwanted_' +'unverä' +'unu' +'unthink' +'unterzeichnete' +'unterzeichn' +'unterworfen_' +'unterteilt_' +'untersuch' +'unterstrichen_' +'unterstreiche' +'unterschätzt' +'unterschiedlich' +'unterschied' +'untersch' +'unterminieren_' +'untergräbt_' +'unteren_' +'unterbrechen_' +'unsc' +'unresolved_' +'unrea' +'unqu' +'unpredictab' +'unmittelbarer_' +'unmen' +'unlängst_' +'unkomp' +'unko' +'unite_' +'unis' +'unilateralism_' +'unica' +'uni_' +'unha' +'ungü' +'ungsze' +'ungsverfahren' +'ungso' +'ungser' +'unglück' +'unglaublich' +'ungerecht' +'ungenügend' +'ungehe' +'unfr' +'unfortunate_' +'unforgettable_' +'unfolding_' +'unfair' +'unexp' +'uneven' +'unes_' +'unerwünscht' +'unerwartete' +'undurch' +'undung' +'undi_' +'underestimate' +'underc' +'undeniabl' +'unden' +'unconditional' +'uncomfortable_' +'uncle' +'unbegrenzt' +'unaufhaltsam' +'unat' +'unam' +'umstr' +'umst' +'umsetz' +'umm' +'umin' +'umh' +'umgest' +'umgeh' +'umgebung' +'umfassendere_' +'umfangreichen_' +'umfangreich' +'umfa' +'umf' +'ume_' +'umbrella' +'uma_' +'ultra_' +'uls_' +'ulos' +'ulierung' +'uldn_' +'ulde' +'ularly_' +'ukr' +'uko' +'uit_' +'ugu' +'ugi' +'ugge' +'ufs' +'ufl' +'uff' +'uerung_' +'uern_' +'uellen_' +'uelle_' +'udo' +'udia' +'ubt_' +'uba' +'ub_' +'uay' +'uate' +'uas' +'tüt' +'tümer' +'tödlich' +'täts' +'tätigkeit' +'täter' +'tär' +'tzl' +'tzende' +'tze_' +'tyrant' +'typis' +'twin' +'tutt' +'tutor' +'tus_' +'turm' +'tur_' +'tuous' +'tunnel' +'tuna_' +'tuition' +'tuber' +'tti_' +'ttert' +'tters' +'tsp' +'tsi' +'tscheni' +'tsa' +'trö' +'trä' +'trusted_' +'truppen_' +'trupp' +'trug_' +'truc' +'trouble' +'trou' +'trot' +'tropis' +'trophi' +'trivial' +'trink' +'trim' +'trigger' +'trieben_' +'trieb' +'tribute_' +'treu_' +'tres_' +'tres' +'tree_' +'treasure_' +'treas' +'traße_' +'travelers_' +'travail_' +'traue' +'trau' +'trat_' +'trat' +'trapp' +'transparente_' +'transp' +'transmitt' +'translator' +'transformer_' +'transformati' +'transatlantic_' +'tran' +'tram_' +'trali' +'trail_' +'trai' +'tragbar' +'traditioneller_' +'trades_' +'trac' +'tournament_' +'tourismus' +'touri' +'totalit' +'tos' +'tops_' +'topbonus_' +'too' +'toni' +'tone' +'toma' +'tolerate' +'token_' +'toilet_' +'toilet' +'tment' +'tma' +'tkan_' +'titles_' +'titi' +'tist' +'tisch' +'tis_' +'tings_' +'timme' +'timing_' +'timi' +'tilt' +'till' +'tiker' +'tightening_' +'tig_' +'tiere' +'tier_' +'tiefen_' +'tief' +'tied_' +'tid' +'tick' +'tici' +'tiate' +'tian_' +'thwart' +'thun' +'throne_' +'thresholds_' +'threatens_' +'thoroughly_' +'thon_' +'theori' +'theoretical_' +'tgut' +'tformen_' +'textiles_' +'textile_' +'text' +'teurer' +'test' +'terte_' +'tert' +'terroristische_' +'terroris' +'territoriale_' +'territoriale' +'terminology_' +'terminat' +'term' +'terli' +'teren' +'terd' +'terblichkeit' +'terba' +'tent_' +'tendi' +'tempt' +'template' +'temperature' +'temperatur' +'tels' +'teles' +'telephones_' +'tekn' +'teilung_' +'teilnehmer' +'technologie_' +'technologie' +'technically_' +'tching_' +'tbe' +'tbar' +'tav' +'tausende' +'taught_' +'tatte' +'tator' +'tation' +'tate_' +'tasa' +'tariffs_' +'targeting_' +'tape_' +'tandard' +'tand' +'tamb' +'tally_' +'tali' +'takti' +'takeover_' +'tains_' +'tail_' +'tahu' +'taha' +'tactic' +'taat' +'taa' +'südlich_' +'sü' +'säum' +'säu' +'sätz' +'säkulare' +'säch' +'szei' +'systemi' +'systematische' +'syrischen_' +'syrische_' +'synd' +'symbolische' +'swing_' +'swin' +'sweet_' +'sweeping_' +'sust' +'suspended_' +'suspend' +'survivors_' +'surviving_' +'surv' +'surgery_' +'surfing_' +'supr' +'suppress_' +'support' +'superpower_' +'superiority_' +'sunny_' +'sung_' +'sums_' +'sug' +'suffice' +'suchte' +'succeed' +'subtr' +'subti' +'substitute' +'subsistence_' +'subsidiaries_' +'subscription' +'subo' +'subjecti' +'stücke' +'ständnis' +'ständi' +'stände_' +'städtischen_' +'styl' +'sty_' +'stumbl' +'stuhl_' +'studying_' +'studios_' +'studiere' +'stub' +'ströme_' +'strukturierte' +'stroke' +'stritt' +'strip' +'stringen' +'striking_' +'strenge_' +'streiche' +'streaming_' +'straße_' +'strain_' +'strain' +'strafrechtliche_' +'storm_' +'stopped_' +'stones_' +'stole' +'stm' +'stliche' +'stische_' +'stipulate' +'stimm' +'stills' +'stig' +'stian' +'stere' +'stems_' +'stelle' +'stee' +'stattfand_' +'stattdessen_' +'statis' +'stating_' +'starte' +'starship' +'starring_' +'starr' +'standar' +'stam' +'stall_' +'staged_' +'staff' +'stabilisi' +'ssystem_' +'ssung_' +'ssten_' +'sslich' +'ssige_' +'ssert' +'ssenen_' +'ssad' +'srat' +'square' +'spürbar' +'spü' +'spy' +'spu' +'sprung' +'sprozess' +'sprogramm' +'spro' +'spritz' +'sprech' +'spreads' +'sprachig' +'spots_' +'spotlight_' +'spora_' +'spoo' +'sponsor_' +'spolitik_' +'spokes' +'splendid_' +'spitz' +'spiritual' +'spin_' +'spill' +'spielte' +'spiele_' +'spher' +'sph' +'spezialisierte' +'spezialisiert_' +'sperr' +'sper' +'spektrum_' +'spekt' +'speicher' +'speculati' +'spectac' +'specif' +'specially_' +'specialist' +'specialis' +'spark_' +'spannende' +'spanische' +'spalte' +'spac' +'sozioökonomische' +'soz' +'sovi' +'souverän' +'souls_' +'sorgte_' +'sorgan' +'sonstigen_' +'song_' +'sonabl' +'solusi_' +'solo_' +'soll' +'soi' +'sofortige' +'soci' +'soaring_' +'soap' +'sness_' +'sneak' +'smug' +'smoke' +'smitte' +'smell_' +'smallest_' +'small' +'slowenisch' +'slope_' +'slin' +'slide' +'slau' +'sland_' +'skontroll' +'skew' +'skeptisch' +'siu' +'sitzt_' +'sistem_' +'sinkt_' +'sincere_' +'simplify_' +'simo' +'similari' +'simila' +'silent' +'signifikante_' +'signi' +'signalisier' +'signal' +'sige' +'sies_' +'sierende' +'sieb' +'sider' +'sidelin' +'sid' +'sichtlich_' +'sicherge' +'sicherere' +'sica_' +'sibl' +'sibi' +'shri' +'showers_' +'shot' +'shortfall_' +'shorter_' +'shore_' +'shof' +'shir' +'shifting_' +'shie' +'shi_' +'she' +'shatter' +'sharpe' +'sham' +'shadow' +'shade' +'sges' +'sger' +'sgebiet' +'sfähig' +'sfr' +'sfe' +'sfa' +'sexual' +'setzten_' +'setze_' +'setup_' +'settle_' +'servici' +'serv' +'seria' +'sequence_' +'sequen' +'sepe' +'separated_' +'sensitivity_' +'sensibl' +'senh' +'sender_' +'sena' +'seminar' +'semi_' +'selu' +'selige' +'self' +'selecti' +'sela' +'sekitar' +'seitig' +'sein' +'sehingga_' +'sehbare' +'seeds_' +'secu' +'sectarian_' +'secrets_' +'secrecy_' +'sechs' +'sece' +'season' +'searche' +'sdi' +'sdat' +'sda' +'scrib' +'scourge_' +'schönes_' +'schädlich_' +'schwing' +'schwierig' +'schwerwiegende_' +'schweizer' +'schwarzen_' +'schwache_' +'schule' +'schuldig_' +'schte_' +'schs' +'schrittweise' +'schriftliche_' +'schriftliche' +'schon' +'scholarship' +'schnellstmöglich' +'schnellstens_' +'schlä' +'schlosse' +'schlichte' +'schlechte' +'schlage' +'schizophreni' +'schirm' +'schienen_' +'schenk' +'scheine' +'scheiden' +'schadet' +'scenery_' +'scenarios_' +'scen' +'scatter' +'scar' +'scal' +'sburg_' +'saver' +'saudi' +'satisfy' +'satisfaction_' +'sar_' +'sangat_' +'sanf' +'sandy_' +'sammlung' +'sames_' +'salv' +'salon' +'salmon' +'sall' +'sail' +'sah' +'sacred_' +'sabotage' +'saa' +'rüst' +'rüh' +'rüf' +'rückt_' +'rücke_' +'rüch' +'römische' +'rés' +'ränken_' +'räglich' +'räder' +'rwart' +'rva' +'rust' +'russi' +'rush' +'rupulous' +'rupt' +'rumo' +'ruft_' +'rufe' +'rue' +'ruch' +'ru_' +'rting_' +'rtig' +'rthe' +'rter_' +'rsion' +'rse_' +'rren_' +'rozess_' +'rox' +'routine' +'rous_' +'rounde' +'rote_' +'rotat' +'rose' +'ror_' +'rooftop' +'rome_' +'roman_' +'roller' +'roh' +'rogue' +'rogramm_' +'rocks_' +'robie' +'rnt_' +'rmt' +'rmina' +'rmat' +'rman_' +'rlin' +'rkste_' +'riös' +'riu' +'ritte' +'ritt_' +'ritis' +'riter' +'risi' +'rises_' +'rische_' +'ringung_' +'ringe' +'rima' +'rigkeit_' +'rigen_' +'rifft_' +'riffen' +'riff' +'riesiger_' +'riesige_' +'rieben' +'ridge' +'richtungen_' +'richtung' +'richer_' +'ribut' +'riad' +'rgen' +'rfs' +'rfer' +'rey_' +'reward' +'revolution' +'revolt' +'revo' +'revision_' +'revision' +'reva' +'rev' +'rett' +'retreat_' +'retic' +'reth' +'reten_' +'rete' +'retains_' +'retained_' +'ret_' +'result' +'rests_' +'restru' +'reste' +'responds_' +'responding_' +'respi' +'respecting_' +'resolu' +'resistant_' +'resident_' +'resi' +'reservier' +'resentment' +'resemble' +'repudiat' +'republikanischen_' +'repräsentati' +'repro' +'representa' +'repositor' +'replacing_' +'repercussions_' +'repea' +'repay_' +'rente' +'renewal_' +'renc' +'remedy_' +'religiös' +'relian' +'relevan' +'releases_' +'rela' +'reitungs' +'reist' +'reise' +'reinve' +'reinforces_' +'reif_' +'reie' +'reicher' +'reichend_' +'reibungslosen_' +'reh' +'reguläre' +'regulier' +'registr' +'regio' +'regi' +'regel' +'regain_' +'refuses_' +'refurbish' +'refu' +'refrigerat' +'refresh' +'reformiert' +'refine' +'reduzierte' +'reduce' +'redo' +'redis' +'redet' +'recyc' +'rect' +'recreat' +'recommend' +'recom' +'reckt_' +'reciproca' +'rechnet' +'rechen_' +'recepti' +'receiver' +'receipt' +'rebuilt_' +'rebuild' +'rebell' +'reba' +'realm' +'reakti' +'reagierte' +'reaffirm' +'readin' +'reacted_' +'react' +'rds_' +'rdnung' +'rdinate_' +'rde_' +'rda' +'rbu' +'rbi' +'rbeit' +'raums_' +'rations_' +'rationalis' +'ratings_' +'ratify_' +'ratifizieren_' +'rassis' +'raschen_' +'rape_' +'rangig' +'rally_' +'rakete' +'raid_' +'raf' +'rado_' +'radikal' +'rad' +'raci' +'quote' +'quicker_' +'quero' +'quenz' +'quee' +'quasi_' +'quart' +'quar' +'qualities_' +'qualify_' +'pé' +'puri' +'purchased_' +'punishment_' +'punishe' +'pundits_' +'punct' +'puff' +'publishe' +'publications_' +'publication_' +'publi' +'pub' +'ption' +'ptic' +'pti' +'pter_' +'psychologi' +'psychi' +'pson_' +'prüfe' +'präzise_' +'präventive' +'pruden' +'prozessor' +'prozess' +'proz' +'proyecto' +'proximité_' +'provoke' +'provisional_' +'protocol' +'protestier' +'protectionist_' +'prostitution_' +'prosper_' +'prosecutor' +'prosecute' +'propag' +'proofed_' +'prone_' +'prompt_' +'promo' +'promi' +'projekten_' +'proj' +'progress' +'programmier' +'programme' +'prognos' +'profile' +'profession_' +'produz' +'produktiver' +'produktiven_' +'prochen' +'processor_' +'processed_' +'problematische' +'probe' +'probability_' +'prizes_' +'privatization' +'privatiz' +'privat' +'principal' +'preview_' +'presumably_' +'pressu' +'presse' +'presenta' +'prep' +'premises_' +'premier_' +'prejud' +'preisen_' +'predecessor_' +'predator' +'precon' +'precis' +'preceding_' +'preceded_' +'praktischer_' +'practise' +'prachige' +'prach' +'ppt_' +'ppne' +'pper_' +'pparat' +'potenzial_' +'potenti' +'posting_' +'poster' +'possesse' +'positiver_' +'positively_' +'positionier' +'posit' +'portugiesischen_' +'portfolio_' +'pornographi' +'porat' +'populist' +'popu' +'pop_' +'pons' +'poni' +'pone' +'polizeilichen_' +'polio_' +'pointer' +'poet_' +'pneum' +'pluralist_' +'plug' +'pling_' +'plikat' +'plica' +'ples' +'pleas' +'playground_' +'plausible_' +'plaus' +'platzier' +'platz' +'platte' +'plates_' +'planet' +'pl' +'pix' +'piso' +'pirate' +'pira' +'pieler' +'phä' +'physische' +'physics_' +'photographer' +'philosophers_' +'philo' +'pher' +'phen' +'phases_' +'pharma' +'pg' +'pflicht_' +'pfer_' +'pfei' +'pfa' +'pezifische' +'petro' +'pest_' +'pessimist' +'pes' +'perusahaan_' +'perta' +'pert_' +'persuaded_' +'perspectiv' +'personal' +'persona' +'perso' +'persiste' +'persecut' +'perse' +'permanente' +'perja' +'periodi' +'performa' +'perform' +'perfekten_' +'perfekte_' +'perceptions_' +'peny' +'pensioner' +'pengu' +'peng' +'penal' +'pektive' +'pedi' +'peculiar' +'pect' +'pc' +'paßt_' +'payer_' +'paya' +'pay' +'pause_' +'patron_' +'patriot' +'patriarch' +'patience_' +'pati' +'patenti' +'patch' +'patan_' +'passp' +'passenden_' +'passende' +'passen_' +'passage_' +'passa' +'pasa' +'partition' +'partisan' +'particul' +'participa' +'partially_' +'partial_' +'partei' +'parte' +'parque' +'parlam' +'paris' +'parc' +'parasit' +'paran' +'parag' +'paradoxical' +'parado' +'paradise_' +'panne' +'panels_' +'panc' +'palästinensische' +'palm' +'pale' +'palace_' +'pai' +'pack_' +'pable_' +'overwhelmingly_' +'overthrow' +'overs_' +'overlap' +'overl' +'overd' +'ova_' +'ova' +'outright_' +'outrageous_' +'outr' +'outer_' +'outbreaks_' +'oup' +'ough' +'otten_' +'osten' +'ostasiatische' +'osphär' +'osph' +'oso' +'osh' +'orz' +'orum_' +'orten' +'orre' +'ormi' +'origine' +'originated_' +'original' +'orientieren_' +'orientation_' +'orie' +'oria' +'organisierten_' +'organise_' +'organisch' +'ordinat' +'orden' +'orch' +'orb' +'orate' +'oral' +'optimistic_' +'optimist' +'optimis' +'optimalen_' +'optim' +'opfern' +'operative_' +'operationen_' +'open' +'ope_' +'oor' +'oo_' +'onwards_' +'onste' +'onomi' +'onment' +'oniert' +'oner' +'onc' +'onale' +'onal' +'ommt_' +'ommen' +'oming_' +'omen_' +'oman' +'olv' +'olungs' +'ols' +'olli' +'olle' +'oll_' +'oliz' +'olive_' +'oliga' +'okan_' +'oire' +'oint_' +'ohnehin_' +'ohl' +'ogic' +'ogenheit_' +'ogati' +'offsho' +'offenkundig_' +'offe_' +'offe' +'ocken_' +'ocht' +'oceans_' +'occurring_' +'occurrence_' +'occupying_' +'occupies_' +'occasion' +'obsole' +'observ' +'obscure' +'oblige' +'obligator' +'objektive' +'objektiv' +'objection_' +'obgleich_' +'obesity_' +'obacht' +'oba' +'oard' +'nützig' +'nöte' +'nössische' +'näher' +'nzende' +'nym' +'nvi' +'nven' +'nutzung' +'nutri' +'nurse' +'nungsl' +'nungs' +'nukl' +'nuan' +'nty_' +'ntrat' +'ntische' +'ntal' +'nstig' +'nst_' +'nsic_' +'npo' +'nour' +'notwendiger' +'notw' +'notified_' +'notebook' +'normen_' +'normale' +'nop' +'nons' +'nonetheless_' +'nominal' +'noble_' +'nob' +'nnial' +'nlich' +'nli' +'nland' +'nl' +'nitt_' +'nistr' +'niss' +'nischer_' +'nik' +'night' +'nieren' +'niedrigste' +'niederzu' +'nichts' +'nia_' +'nho' +'nheit' +'nhaft' +'ngliche' +'ngle' +'ngl' +'ngkin' +'ngh' +'ngens' +'ngen' +'ngeh' +'nfr' +'neutr' +'neuerliche' +'neueren_' +'neuem_' +'nette_' +'nets_' +'nerv' +'nent' +'nenn' +'nel' +'neighbours_' +'neighbor_' +'neid' +'nei_' +'negeri_' +'negativ' +'need' +'ndten_' +'ndliche_' +'ndli' +'nding_' +'nderung_' +'ndern' +'ndene' +'ndelt' +'ndbar_' +'nbild' +'nberg_' +'nbe' +'nationality_' +'nationalistische' +'nationalis' +'nannten_' +'name' +'naive_' +'nahegeleg' +'nage' +'nachzudenken_' +'nachlassen_' +'nachl' +'nachfo' +'münd' +'möglichkeit_' +'möglich' +'même_' +'mé_' +'männliche' +'mw' +'mußt' +'mutmaßliche' +'mutati' +'muslimische_' +'muslimisch' +'municipality_' +'muni' +'multiplie' +'multinationalen_' +'multilingual' +'mulat' +'mud' +'muc' +'mps' +'mple' +'mpho' +'mp3' +'mour' +'motorways_' +'motivierte' +'motivieren_' +'motivated_' +'mother' +'mosqu' +'mosa' +'mort' +'moreover_' +'moralisch_' +'moralis' +'moral' +'monsters_' +'monopolies_' +'mond_' +'monate' +'mois' +'mog' +'modifications_' +'modif' +'moderati' +'modelle_' +'modele' +'modalit' +'modal' +'mobilisiert_' +'mobil' +'mmu' +'mmission' +'mming_' +'mitzuteilen_' +'mittlere_' +'mittl' +'mitt' +'mithilfe_' +'mitgliede' +'mitb' +'mismanage' +'miser' +'mino' +'minimalis' +'minati' +'minat' +'mimic_' +'millenni' +'militia' +'militarily_' +'militari' +'milia' +'milestone' +'mildern_' +'migrator' +'miete' +'mfa' +'metry_' +'metropolitan' +'metropolis' +'meti' +'metaphor' +'metal' +'merupakan_' +'mering' +'merikas_' +'merika' +'merger_' +'merge_' +'menun' +'mention' +'mentally_' +'mentali' +'menschlich' +'meno' +'meni' +'mengg' +'mendo' +'mendapat' +'memori' +'memiliki_' +'membe' +'melo' +'meld' +'melak' +'meinte_' +'mehrheitlich' +'mediterran' +'medien' +'mbr' +'mbo' +'maßgebliche' +'maßgeblich_' +'mayors_' +'may' +'maturit' +'matt' +'matri' +'matisch' +'matis' +'mathematical_' +'mathemati' +'material' +'matching_' +'masyarakat_' +'massacre' +'mass' +'marvel' +'mars' +'markiert_' +'marki' +'mari' +'manufa' +'mans' +'manifestation' +'mane' +'mandate' +'mancher_' +'managements' +'macht' +'lüsse' +'lösungen_' +'lärung' +'lärm' +'längeren_' +'läh' +'lädt_' +'läche' +'lve_' +'luss' +'lui' +'ltungen_' +'lts' +'lton' +'ltet' +'ltes' +'ltere' +'ltene' +'lpin' +'love' +'lov' +'loser_' +'lop' +'loop' +'looming_' +'longe' +'lohnen' +'logisch_' +'login' +'logen_' +'lod' +'locals_' +'loading_' +'load' +'lls_' +'lligte' +'lles_' +'llect' +'llb' +'lj' +'liziert_' +'livel' +'litä' +'litt' +'lita' +'lit_' +'listing_' +'lish' +'lio_' +'linux' +'linking_' +'linke' +'lining_' +'linien_' +'lini' +'linguist' +'lingeri' +'linge_' +'linen_' +'limitations_' +'limit' +'likes_' +'liga' +'lifts_' +'lifting_' +'liest_' +'liegende_' +'lieferungen_' +'lief' +'lied' +'liebe' +'licht' +'lichem_' +'licenses_' +'licens' +'lic_' +'libert' +'liberalism' +'liable_' +'lia_' +'lia' +'lge_' +'lga' +'lfi' +'leverag' +'leva' +'leut' +'letztere' +'letzt' +'lete' +'lest_' +'lesse' +'lers_' +'lern_' +'leo' +'lent_' +'lende' +'lend' +'lement' +'lek' +'leite' +'leistungsstarke' +'leiste' +'leidenden_' +'leid' +'leichten_' +'lehnte_' +'legung_' +'legu' +'legislative' +'legale' +'lega' +'lecht' +'lebendige' +'lebenden_' +'lean' +'lden' +'lc' +'layo' +'layers_' +'laut' +'launder' +'laugh_' +'laub' +'latte' +'lations' +'lateinamerikanischen_' +'lated_' +'lares_' +'lapse' +'langwierige' +'langsamer' +'langsame' +'landwirtschaftliche' +'landscape' +'landmark_' +'landes_' +'landes' +'landed_' +'lamat' +'lain_' +'laim_' +'lager' +'laboratory_' +'küste_' +'künften_' +'kündig' +'kümmert_' +'kühle' +'körperlich' +'käufe' +'kämpfung_' +'kämpft' +'kus_' +'kurzlebig' +'kurdische' +'kunst_' +'kung_' +'kundig' +'kulturelle' +'kul' +'ktive_' +'ktie' +'ksh' +'kse' +'kräfte_' +'kritisieren_' +'kriti' +'kriminelle' +'krie' +'kreis_' +'krebs_' +'kreative_' +'krankheit' +'kraftwerke_' +'kota' +'kosm' +'korrigiert_' +'korr' +'kopp' +'koordin' +'kooperieren' +'konzipier' +'konzentrierte_' +'konzentr' +'konv' +'kontinu' +'kontakt' +'konsum_' +'konsultier' +'konfigur' +'kompromi' +'komponente' +'komplexer' +'komple' +'kommiss' +'kommene_' +'kommende_' +'kommand' +'komitmen' +'komfort' +'kolo' +'kohärent' +'kna' +'klüg' +'klini' +'kling' +'klick_' +'klein' +'klause' +'kland_' +'kk' +'kipun_' +'king' +'kids_' +'kick' +'keyboards_' +'keyboard_' +'keti' +'kep' +'kennzeichne' +'kenntnis' +'kenne' +'kelo' +'kell' +'kela' +'keitsp' +'kehrten_' +'kehrte_' +'keh' +'keeper' +'ked' +'kd' +'kauft' +'kati' +'katastrophen' +'katastrophalen_' +'katastroph' +'kass' +'kart' +'kapitalistischen_' +'kapitalistisch' +'kapitali' +'kapit' +'kanäle_' +'kanzler_' +'kannte' +'kanische_' +'kane' +'kampagnen_' +'kammer' +'kad' +'kabel' +'kab' +'kW_' +'jüngst_' +'jüdisch' +'jähriger_' +'jähr' +'juta_' +'justizielle' +'justifiable_' +'junior' +'jump_' +'juice' +'judgments_' +'juan' +'jp' +'joy_' +'joy' +'jours_' +'journe' +'journal_' +'jour_' +'jor_' +'jon' +'jm' +'jk_' +'jk' +'jen' +'jemandem_' +'jected_' +'jaz' +'jang' +'jail_' +'jahrzehntelang' +'jahrelange' +'jad' +'jacuzzi_' +'iß' +'izer_' +'iw' +'iver' +'itäre' +'itä' +'itz_' +'ituation' +'itted_' +'itiv' +'itischen_' +'itions' +'itin' +'ithm' +'itglied' +'ited_' +'itas_' +'istische_' +'istans_' +'istan_' +'issen' +'isse_' +'isse' +'issa' +'israel' +'ism' +'ision' +'isier' +'irs' +'irrig' +'irresponsibl' +'irresp' +'irrel' +'irregular' +'iro' +'irku' +'irische_' +'irgendwo_' +'irgendwie_' +'irgendwelche' +'ipt' +'ipp' +'ipl' +'iose' +'ios' +'ionss' +'ionist' +'iologi' +'inz' +'inward_' +'invoice' +'investition' +'investigations_' +'invest' +'inventor' +'invented_' +'invent' +'introduces_' +'inti' +'inters' +'interoperability_' +'interne_' +'internally_' +'interfere' +'interessierte' +'interdi' +'interconnect' +'interactive_' +'intensiver_' +'intensify' +'intellectuals_' +'intell' +'integrative' +'integrati' +'integr' +'int_' +'int' +'insurgen' +'insul' +'instruct' +'institutionelle' +'inste' +'inspirierende' +'inspire' +'inspe' +'insolvenc' +'insight_' +'inser' +'insel' +'inputs_' +'innu' +'innovativer' +'inners' +'innenpolitische_' +'innenpolitisch' +'inne_' +'inm' +'inland_' +'inko' +'injustice_' +'initi' +'iniste' +'inherit' +'inherently_' +'ingt_' +'informi' +'informelle' +'informationen_' +'informa' +'influen' +'infl' +'infizier' +'inferior_' +'infection_' +'infecti' +'inexpe' +'inertia' +'inequalities_' +'inen' +'ineff' +'indung' +'indices_' +'indications_' +'indexe' +'indefinite' +'inda' +'incremental' +'incorporati' +'incor' +'incomplete_' +'incompa' +'incline' +'inciden' +'inbe' +'inau' +'ination_' +'inapp' +'inanzierung' +'inan_' +'inali' +'inacc' +'imstande_' +'impul' +'impu' +'imprisoned_' +'impressi' +'impressed_' +'importi' +'importe' +'implying_' +'implizi' +'impli' +'implant' +'impf' +'imperialist' +'imperial_' +'imperfect_' +'impe' +'immun' +'imminent_' +'immigkeit' +'immerhin_' +'imma' +'imeter' +'imer_' +'iment' +'iman' +'imagination' +'image' +'illustrate' +'illig' +'ilis' +'ilian' +'ilet' +'iles' +'iler' +'ildet_' +'ila_' +'iki_' +'ikan' +'ihood' +'igte_' +'igte' +'igste' +'igert_' +'igenen_' +'igende_' +'igend' +'igat' +'igan' +'ify_' +'ifica' +'ific' +'iffe' +'ifer' +'ießungs' +'ieu' +'iertes_' +'ierte' +'ierli' +'ierenden_' +'ientierte' +'ient_' +'iell' +'iehungs' +'iegende' +'iegen' +'ieferung_' +'iefe' +'idylli' +'idos' +'ido' +'ideologische' +'identifizieren_' +'identi' +'iden_' +'iden' +'icu' +'ickt_' +'ickel' +'ichtliche' +'ichteten_' +'ichtete' +'icherte_' +'icherheit_' +'icherheit' +'ibus' +'ibly_' +'iben' +'ibel' +'ibe' +'iate' +'iar' +'iani' +'iana_' +'iam' +'ially_' +'iale' +'iad' +'höheres_' +'höchstwahrscheinlich_' +'häuser_' +'häufigste' +'härter' +'hysteri' +'hypothes' +'husband_' +'hurt_' +'hunde' +'humiliati' +'humanis' +'humane' +'huk' +'hub_' +'hter' +'hrte' +'hrop' +'hp' +'hov' +'hotele' +'hostilit' +'hostages_' +'hospi' +'horses_' +'horri' +'hormon' +'horizontal' +'horizon' +'honour' +'hones' +'holis' +'holder_' +'hochgradig_' +'hochentwickelte' +'hnya_' +'hns' +'hma' +'hlung_' +'hls' +'hlen' +'hitt' +'historischer_' +'hist' +'hion' +'hinweg' +'hinterla' +'hina' +'hilfe' +'highlights_' +'high' +'hig' +'hieß' +'hie' +'hid' +'hibit' +'hib' +'het_' +'hesitate_' +'hesita' +'herzlichen_' +'hervorgeh' +'hervorge' +'herstellen_' +'herrschende_' +'herrschen_' +'herrsche' +'herrlichen_' +'heroic_' +'hergestell' +'hered' +'heranzu' +'hene_' +'helpful' +'helme' +'hellen' +'helle_' +'helle' +'helicopter_' +'heitlich' +'heilige' +'heikle' +'hegemon' +'heftig' +'heel' +'hectare' +'heblich' +'heat' +'hear' +'headwa' +'haven_' +'hasn' +'harvest' +'harn' +'harmonisch' +'hap' +'hanging_' +'handlung' +'handelte_' +'handelbar' +'haltung_' +'haltig' +'haltestelle' +'halle_' +'half' +'hake_' +'hairdryer' +'hafter_' +'hafte' +'hadi' +'habitacion' +'habita' +'habet' +'güt' +'günstigste' +'günstigen_' +'gültig' +'gé' +'gäste' +'gänzlich_' +'gut' +'gust' +'guise' +'guilty_' +'guiding_' +'gue_' +'guaranteeing_' +'größtmögliche' +'grundlage_' +'großzügige' +'großartigen_' +'grow' +'groundwork_' +'grin' +'grim' +'grid_' +'gres' +'greife' +'greet' +'gray_' +'grave' +'grau' +'gratul' +'grass' +'grants_' +'grande' +'gram' +'grain_' +'graduated_' +'gradual_' +'grac' +'governor_' +'gove' +'gott' +'goldene' +'goa' +'gno' +'gnizing_' +'gnisse' +'gne' +'gnant_' +'gmatis' +'glück' +'glori' +'globe_' +'globalisierte' +'global' +'glied' +'gler' +'gleichg' +'gle_' +'glaubwürdige' +'glanc' +'gl' +'gische' +'gins' +'ginat' +'giganti' +'gift_' +'gift' +'ghts_' +'gha' +'ggf_' +'gger' +'gged' +'gf' +'geänderte' +'gezielt_' +'geze' +'gez' +'gewünschte_' +'gewü' +'gewöhnt_' +'gewährte' +'gewä' +'gewohnt_' +'gewisses_' +'gewerbliche' +'geweiht_' +'gewarnt_' +'gewann_' +'gewandt' +'gewalttätig' +'gewaltsam' +'gewaltigen_' +'gewaltige' +'getreten_' +'getrennt_' +'geteilte' +'geteilt_' +'gesunde_' +'gesucht_' +'gestric' +'gesteuert' +'gestell' +'gestalt' +'gesichert_' +'gesetzlichen_' +'gesetzliche_' +'gesetzlich_' +'geschütz' +'geschu' +'geschränkt' +'geschri' +'geschoss' +'geschn' +'geschmackvoll_' +'geschla' +'geschickt' +'geschichte' +'geschic' +'geschi' +'gescheiterten_' +'gescheitert' +'gerä' +'gert' +'germ' +'geringst' +'geringfügig_' +'geringe' +'gerichteten_' +'gerichte' +'gericht' +'geplante' +'geometri' +'geologi' +'geographic' +'geografisch' +'genügt_' +'gentur' +'generosity_' +'generali' +'genauen_' +'gemischte' +'gemeinschaftliche_' +'geme' +'gelöscht_' +'gelä' +'gelobt_' +'gelingen_' +'gelin' +'geliebt' +'geleistete' +'gelei' +'gelegenen_' +'gelangte' +'geladen_' +'gela' +'gekauft_' +'geka' +'gehörte' +'gehöre' +'gehend' +'geheimnis' +'gehe' +'geh' +'gegl' +'gegenübersteht_' +'gegebenenfalls_' +'gegebene' +'geführte' +'gefühl_' +'gefüg' +'gefälscht' +'gefährdete' +'gefundenen_' +'gefl' +'gefahren_' +'geeinigt' +'geehrter_' +'geda' +'gebun' +'gebro' +'gebnis' +'gebilligt_' +'gebieten_' +'geben' +'gay_' +'gathering' +'gathered_' +'gastronomy_' +'garten_' +'garde_' +'gant_' +'game' +'gamb' +'galow' +'galleries_' +'gale' +'gab' +'fürs' +'fürchtet' +'fünfzehn' +'führend_' +'führ' +'fühle' +'fügt_' +'fügen_' +'förm' +'fälsch' +'fusion_' +'funktions' +'funktional' +'funktion' +'fundamentalism_' +'fulfilled_' +'fulfill_' +'fulfil_' +'fug' +'fueling_' +'fting_' +'ftige' +'frühe' +'frustration' +'fruitful_' +'fruchtbare' +'front' +'froh_' +'frighten' +'frequen' +'fremden' +'freiz' +'freiwillige' +'frameworks_' +'frames_' +'fram' +'fraglich' +'foto' +'fosil_' +'fortunately_' +'fortunate_' +'fortschrittliche' +'fortschritt' +'fortgesetzte' +'fortgesetzt_' +'forsche' +'fors' +'formulation' +'formier' +'formelle_' +'forme' +'forgive' +'foreseeable_' +'foremost_' +'forecast' +'forbidden_' +'foodstuffs_' +'folgender' +'folgend' +'folde' +'flüsse_' +'flüge' +'flächen_' +'flus' +'fluctuations_' +'fluc' +'flow' +'fließ' +'fliehen_' +'flich' +'flex' +'flat' +'flam' +'fk' +'fizierten_' +'fixing_' +'fist' +'fishe' +'fische' +'firmen_' +'finishing_' +'finishes_' +'fing' +'finanzierten_' +'finanzier' +'finanziell_' +'filme' +'fill' +'fies_' +'fid' +'fici' +'fib' +'feuer' +'fett' +'festh' +'festge' +'fester_' +'fertiliz' +'fert' +'fers' +'fernseh' +'ferenz' +'fera' +'feminist' +'felde' +'feindliche_' +'fehlerhafte' +'fehlende_' +'fehl' +'feed' +'fear' +'faz' +'favori' +'favorable_' +'faun' +'fathers_' +'fastest_' +'fashioned_' +'farm' +'fang_' +'fana' +'familien' +'falt' +'fallend' +'fair' +'fahrten_' +'facult' +'faction_' +'faction' +'facet' +'fabricat' +'fabri' +'fa_' +'ezi' +'ezei' +'eze' +'extract_' +'extinction_' +'exte' +'expressions_' +'exportiert' +'exportieren' +'exporters_' +'exported_' +'exponenti' +'explosion' +'exploited_' +'explod' +'explizit' +'explanations_' +'expecting_' +'expectancy_' +'expansive' +'expans' +'expandier' +'exot' +'exklusive_' +'existen' +'exert' +'exer' +'excuse' +'exceeds_' +'exceeded_' +'exceed_' +'examining_' +'examination' +'ewicht' +'evolved_' +'evolve_' +'eviden' +'evaluated_' +'eva_' +'ev_' +'eut_' +'eus' +'europä' +'etung_' +'ets' +'etri' +'etliche' +'ethni' +'ethisch' +'ethics_' +'etet' +'estan' +'establishes_' +'establ' +'esst' +'essourcen_' +'essors_' +'espe' +'esp' +'esisch' +'esie' +'esh_' +'esar' +'esa_' +'esa' +'erzählt_' +'erzwingen_' +'erzw' +'erzig' +'erzielten_' +'erzieh' +'erzi' +'erzeugung_' +'erworben' +'erwirt' +'erweckt_' +'erwach' +'eruf' +'eru' +'ertr' +'erti' +'erteilt_' +'erteilen_' +'ersönlichkeit' +'ersto' +'erstmalig' +'ersorgung_' +'erschütter' +'erschweren_' +'erri' +'erreg' +'ero_' +'ernähr' +'ernsten_' +'erneuten_' +'ermöglichte' +'ermutigend' +'ermordet' +'ermittelt' +'ermitt' +'ermaßen_' +'erläutert' +'erlich' +'erkrank' +'erklärten_' +'erische' +'erin_' +'erhol' +'erho' +'erhebt_' +'erhaltene' +'ergreif' +'erfuhr' +'erforsch' +'erfasst_' +'erfassen_' +'eres_' +'erer' +'erenz' +'erend_' +'eren' +'ereit' +'ereignis' +'erbracht_' +'erate' +'erals' +'eradicati' +'erac' +'eption' +'epl' +'eou' +'envisage' +'environments_' +'entzi' +'entwickelnde' +'entwick' +'entum' +'entsp' +'entschä' +'entschl' +'ents' +'entrepreneurs_' +'entrepreneurial_' +'entrepr' +'entra' +'ento' +'entn' +'entla' +'entko' +'entitlement' +'ention' +'entia' +'enthusiastic' +'enthusiast' +'enthielt_' +'entg' +'entfern' +'entfalten_' +'enter' +'enteil' +'entdeckte' +'entailed_' +'entag' +'ensw' +'enrichment_' +'enpr' +'enormer_' +'ennen_' +'enne' +'enna_' +'enlarged_' +'enke' +'enische' +'enie' +'enhancing_' +'enhancement' +'engst' +'engines_' +'engere_' +'engagiert_' +'engag' +'energies' +'enerati' +'endors' +'endlos' +'endig' +'endg' +'encr' +'encounter' +'enchant' +'enberg_' +'enau' +'enan' +'enade' +'emulat' +'empör' +'empt' +'emphasized_' +'emphasises_' +'empfohlen_' +'empfinden' +'empfan' +'emitte' +'emit_' +'emis' +'eminent' +'emic' +'emergen' +'ementa' +'embraced_' +'embodie' +'emancipation_' +'elz' +'elu' +'elte_' +'elpr' +'elnde' +'elm' +'ellit' +'ellig' +'elimin' +'elektronische_' +'elektroni' +'eleg' +'electorate_' +'elderly_' +'ela_' +'ektors_' +'eiz' +'eiv' +'eise' +'eis' +'einzubringen_' +'einziger_' +'einzigartiger_' +'einzelstaatliche_' +'einzelstaatliche' +'einsetzt_' +'einschließ' +'einnimmt_' +'einn' +'einm' +'einleite' +'einlage' +'einka' +'einig_' +'einheitlich' +'einheit_' +'einheimische_' +'einhalten_' +'eingreif' +'eingesetzte' +'eingereichten_' +'einger' +'eingehende' +'eingefü' +'eingebrachte' +'einführt' +'einfließen' +'einfache' +'eindr' +'eilnehmer' +'eilig' +'eigt' +'eifen_' +'eichnungen_' +'eichnete' +'eichheit' +'eichen_' +'eiche' +'eibungen_' +'ehrliche' +'ehren' +'ehmend' +'ehm' +'ehemaliger_' +'egu' +'egr' +'egio' +'efficiently_' +'effi' +'effektive' +'eein' +'eed' +'educat' +'edoni' +'edly_' +'editorial_' +'editi' +'edingungen_' +'ediate_' +'ectiv' +'ect_' +'ecosystem_' +'economic' +'ecommerce_' +'eckung' +'echter_' +'echnologi' +'echni' +'ece' +'ec_' +'ebi' +'earthquake_' +'eare' +'eagle' +'ead' +'eab' +'eB' +'düstere_' +'dürft' +'dünne' +'dün' +'dó' +'dí' +'dè' +'dänische' +'dämpf' +'dynamisch_' +'dyn' +'dying_' +'durchgeführten_' +'durchführ' +'durable_' +'dura' +'duplicate' +'dule' +'dubio' +'dual_' +'dsch' +'drückt' +'drücke' +'drängt_' +'drink' +'dringender_' +'drige' +'dress_' +'dreim' +'dreie' +'drea' +'dran_' +'dramatische' +'drama_' +'drai' +'drafts' +'dozens_' +'dozen_' +'downs' +'downfall_' +'douche' +'doubl' +'dort' +'doppelten_' +'door' +'doom' +'dominieren_' +'dominance_' +'dom_' +'dokument_' +'doct' +'dock' +'doc' +'dne' +'dlin' +'divide' +'divert' +'diversification_' +'diversif' +'diver' +'dity' +'distributi' +'distinguish' +'distinctive_' +'distin' +'distanzier' +'distan' +'dissi' +'disqualif' +'disproportionate' +'disposition' +'displaced_' +'dispens' +'disparities_' +'disorders_' +'disor' +'diskriminier' +'diskreditier' +'disintegrati' +'discover' +'discontent' +'discl' +'discard' +'disar' +'disappoint' +'disappeared_' +'disadvantage_' +'disabl' +'dirty_' +'diri' +'direkter_' +'directors_' +'dir' +'diplomati' +'diplom' +'dingung' +'dinar' +'dimensionale' +'dimension' +'dilakukan_' +'diktat' +'dika' +'digte' +'diffus' +'differentiated_' +'differ_' +'diesjährigen_' +'diesel_' +'diesbezüglich_' +'diente_' +'dienstleistung' +'dictator_' +'dictator' +'dicta' +'dich' +'dice' +'dicat' +'dibandingkan_' +'dial_' +'diagnostizier' +'diagnosis_' +'dezentral' +'devise_' +'develop' +'devastat' +'deva' +'dev_' +'deutschs' +'deutliche' +'deutig' +'deterrence_' +'determina' +'detect_' +'detaine' +'destroying_' +'destotrotz_' +'destiny_' +'desti' +'destabilisieren' +'dest' +'desse' +'desperate_' +'desk' +'designi' +'designe' +'desert_' +'desert' +'descript' +'describing_' +'desc' +'derte' +'derse' +'derin' +'deput' +'depri' +'depressive' +'deplor' +'depict' +'depe' +'dent_' +'denkbar' +'demselben_' +'demonstrati' +'demoli' +'demokratische' +'demografische' +'democratiz' +'democratically_' +'demis' +'demics_' +'demi_' +'demagogue' +'dell' +'delight' +'delic' +'deleveraging_' +'deleted_' +'delet' +'deleg' +'delay' +'dei' +'deg' +'defizite' +'definite_' +'definierten_' +'deficien' +'defenses_' +'dedicati' +'decree' +'decoration_' +'decisively_' +'decen' +'dece' +'deca' +'debu' +'debat' +'deba' +'dealers' +'deaktiviert' +'deadlock_' +'daya_' +'day' +'davon' +'daughter_' +'dauern_' +'dauerhaft_' +'dauer_' +'datei_' +'dat_' +'darl' +'dark' +'darge' +'dank' +'dangerously_' +'dane' +'dance' +'dairy_' +'dai' +'dage' +'dae' +'dachte_' +'custom' +'curi' +'cup_' +'culos' +'culi' +'culati' +'cui' +'cture_' +'ctor_' +'cto' +'cteri' +'cruel_' +'crop_' +'crop' +'critics_' +'criticise_' +'cription' +'cred' +'creativity_' +'crea' +'cram' +'crafted_' +'covert' +'coverage_' +'courte' +'court' +'courag' +'counting_' +'counterpart' +'cosmo' +'corrupt' +'correspond' +'coordinate_' +'cool' +'cooking_' +'cookies_' +'convinc' +'conviction_' +'convey_' +'controvers' +'contro' +'contributes_' +'contraction' +'continuity_' +'continual' +'continental_' +'conti' +'contempt_' +'contemplate' +'container' +'consum' +'consultant' +'consolidate' +'console' +'consig' +'conservat' +'conscien' +'congratulat' +'confirms_' +'confine' +'conducti' +'condo' +'conditi' +'conceptual' +'concepts_' +'concentrating_' +'conceive' +'conceivable_' +'compromises_' +'comprise_' +'composition_' +'compos' +'complimentary_' +'competitors_' +'compet' +'communicati' +'commission_' +'commi' +'commerce' +'commentator' +'comitology_' +'combination' +'combin' +'colleg' +'collateral_' +'collaps' +'colla' +'coins_' +'cof' +'coexist' +'codec' +'cod_' +'cock_' +'clou' +'cloning_' +'cliente' +'clear' +'classification_' +'classif' +'clash_' +'clarifi' +'ckung_' +'ckten_' +'ckr' +'ckne' +'ckier' +'civiliz' +'civilisation' +'civ' +'circulation_' +'circula' +'circuit_' +'cip' +'ciona' +'ciation' +'ciar' +'chwor' +'chwi' +'chwe' +'chut' +'chuss' +'chur' +'chunk' +'chuh' +'chtliche' +'chtet_' +'chtern' +'chslung' +'chrono' +'chronis' +'chronic_' +'christ' +'chriften_' +'chreib' +'chooses_' +'choc' +'chnik' +'chni' +'chn' +'chme' +'chläge_' +'chli' +'chke' +'chisc' +'chip_' +'chinesischer_' +'chine' +'china_' +'chier' +'chicken_' +'chess' +'cherung' +'chere' +'chemis' +'chem_' +'charta_' +'characteris' +'chara' +'chanis' +'champions_' +'chambres_' +'chamber' +'cham' +'chain' +'chaff' +'ceu' +'cet_' +'certification' +'certificates_' +'certificate_' +'cents_' +'centrist_' +'centraliz' +'cent' +'cen_' +'cema' +'celebrati' +'celebrat' +'cele' +'ceiling_' +'cattle_' +'cations_' +'cater' +'catching_' +'catas' +'carte_' +'caro' +'cardi' +'cape' +'capacities_' +'cane_' +'campaign' +'calme_' +'calibration' +'cache_' +'bürger_' +'bünd' +'bücher' +'bü' +'byte_' +'byp' +'buyers_' +'bustl' +'burst' +'burgh' +'burg' +'bure' +'bundes' +'build' +'bug' +'buf' +'buen' +'bten' +'bte_' +'bsc' +'brut' +'brus' +'bruch' +'browse' +'brow' +'britischer_' +'bringen' +'brig' +'brew' +'brethren_' +'breiteren_' +'brechen' +'brav' +'brasilianische_' +'bras' +'brands_' +'bourne_' +'boundary_' +'boss' +'bos' +'borrowed_' +'bonus_' +'bomb' +'bodi' +'boden_' +'bnis' +'blü' +'blur' +'blood' +'blogs_' +'blogg' +'blockier' +'blin' +'bled_' +'blaue' +'blatt_' +'blasen' +'bisc' +'birthday_' +'birth' +'bird_' +'biom' +'biological_' +'biog' +'biofuel' +'bindung_' +'binary_' +'bilitati' +'bilis' +'bildete' +'bilder_' +'bilanz_' +'bil_' +'bike' +'biete_' +'bid' +'bibliot' +'bias_' +'bia_' +'bezwe' +'bezog' +'bezieh' +'bewunder' +'bewohne' +'bewirt' +'bewi' +'bewerbe' +'bewege' +'bewe' +'bewahr' +'bewaffnet' +'beur' +'beunruhigende' +'betriebs' +'betriebene' +'betr' +'betonte' +'beteiligten_' +'beteiligen_' +'beta' +'bestände_' +'bestr' +'besto' +'bestimmtes_' +'besti' +'bestens_' +'bestellt' +'bestellen_' +'besser' +'besorgniserregende' +'besie' +'besetzt_' +'beschriebenen_' +'beschrieb_' +'beschr' +'beschlossene' +'beschloss_' +'bescheiden' +'besche' +'besa' +'berühren_' +'beruhte_' +'beruhigen' +'berke' +'berichtete_' +'berh' +'bereits' +'bereichen_' +'berater' +'berada_' +'beobachtet_' +'benign_' +'bende' +'benar_' +'bemüh' +'belt_' +'belt' +'belongs_' +'belohn' +'belo' +'beln_' +'beliebt_' +'belegen_' +'beleben' +'bekunde' +'bekomm' +'beklagen_' +'bekl' +'beinhalten_' +'beider_' +'beherrscht_' +'behave' +'behauptete_' +'beharrt' +'begrüß' +'begrenzter' +'begrenzen_' +'beginn' +'begi' +'begegnet' +'befürworten_' +'befürworte_' +'befürchten_' +'befürchte' +'befu' +'befo' +'befindlichen_' +'befindliche' +'beeindruckt' +'beeindruckenden_' +'bedingt_' +'bedeutsam' +'bedeutender' +'bedeutend_' +'bedauere_' +'bedank' +'beauf' +'bearbeitung' +'bearbeitet' +'bear' +'beant' +'beans' +'beachtliche' +'beachte' +'beabsichtig' +'bbe' +'battles_' +'baths_' +'basierte' +'basel' +'baru' +'barre' +'barr' +'baro' +'barbe' +'barbari' +'bant' +'bans_' +'banker' +'bani' +'bana' +'bakar_' +'baj' +'baggage_' +'baden_' +'bacteria' +'backup_' +'backpack' +'backlash_' +'až_' +'año' +'axis_' +'ax' +'awards_' +'await' +'awa_' +'avoid' +'außergewöhnliche_' +'außerge' +'außenst' +'autoritäre' +'automatisier' +'automatischen_' +'authorita' +'authorisation_' +'authentic_' +'ausüb' +'auszusp' +'auszugeben_' +'auswirkt_' +'ausweiten' +'ausweis' +'auswa' +'austausch' +'aust' +'aussp' +'aussichten_' +'aussa' +'ausrüstung' +'ausrichte' +'ausreicht_' +'auslöste' +'ausländischer_' +'ausle' +'ausla' +'ausgewählte_' +'ausgewogen_' +'ausgewertet_' +'ausgetragen_' +'ausgerichteten_' +'ausgenommen_' +'ausgeglichene' +'ausgegli' +'ausgefü' +'ausgebe' +'ausgearbeitet' +'ausgab' +'ausfäll' +'auseinandersetz' +'ausdrückliche' +'ausdrück' +'ausar' +'aur' +'augen' +'aufzut' +'aufzust' +'aufzub' +'aufwer' +'auftrag' +'aufsch' +'aufs_' +'aufr' +'aufnahme_' +'aufkommende' +'aufk' +'aufhalten' +'aufgreif' +'aufgeworfen' +'aufgewe' +'aufgetr' +'aufgeben_' +'aufeinander_' +'auern_' +'audi' +'auction' +'auch' +'auben' +'atu' +'attraktiver_' +'attraktiv' +'attracted_' +'attacker' +'atrocities_' +'aton' +'atom' +'atmospher' +'ative' +'atische' +'atisch_' +'ationsa' +'athlet' +'ater_' +'aten' +'atel' +'atastrophe' +'assured_' +'assur' +'assu' +'associate' +'assert_' +'asse' +'assault' +'assassination' +'asis' +'asin' +'asce' +'artung' +'artist' +'artis' +'artikel_' +'artig' +'arriving_' +'arrange' +'arra' +'arom' +'armies_' +'arl' +'arke' +'aris_' +'arien_' +'arians_' +'argument' +'arguabl' +'arf_' +'arena_' +'ardin' +'archäologische' +'archiv' +'architektonische' +'archipelago' +'archi' +'arbeitung_' +'arbeitslos_' +'arbeitete' +'arabisch' +'appropriati' +'appropriate' +'appreciat' +'applicants_' +'applaud' +'appelliere_' +'apli' +'ao_' +'anzub' +'anzeig' +'anybody_' +'anwendungen_' +'antwortlich' +'antrags_' +'antivirus_' +'antim' +'antiken_' +'anticipate_' +'antibioti' +'anter' +'antag' +'anstrebt' +'ansieht_' +'anschließende' +'anschaue' +'anpassen_' +'annähernd_' +'anktionen_' +'ankl' +'anken_' +'ank_' +'anj' +'angriffe' +'angig' +'angga' +'angetrieben' +'angesp' +'angese' +'angesch' +'angesammelt' +'angenomme' +'angenehme_' +'angemessener' +'angeme' +'angehoben_' +'anga' +'anfü' +'anfängt_' +'anforderung' +'anesische' +'anerkenn' +'aner_' +'andin' +'andi' +'andeu' +'anchor' +'ance' +'analyti' +'analysing_' +'analy' +'analog_' +'anak_' +'ample_' +'amid' +'amending_' +'ambiguous_' +'ambient' +'ambience_' +'amba' +'altr' +'altet_' +'alternative' +'altern' +'alm' +'allo_' +'allmählich' +'alljährlich' +'alli' +'alleine_' +'allegedly_' +'alk' +'alive_' +'alität' +'alition' +'alisieren_' +'alin' +'align_' +'align' +'alienation_' +'alien_' +'alan_' +'aktualisiert' +'aktu' +'aktivitäten_' +'aktive' +'aktion_' +'akti' +'airspace_' +'aire' +'aikan' +'aid' +'ahlung' +'agung_' +'agt_' +'agrees_' +'agierende' +'aggebe' +'agendas_' +'agein' +'agar' +'aftermath_' +'afor' +'afghanischen_' +'afghanische_' +'affen_' +'affair_' +'aerospace_' +'advertise' +'adversaries_' +'adver' +'admissi' +'admira' +'administrator_' +'administrativ' +'administer' +'admin_' +'adjusting_' +'adic' +'ader' +'adan' +'activis' +'activ' +'acquis_' +'acquiring_' +'acqui' +'achtung' +'achte' +'acht' +'ached_' +'accueil_' +'accomodati' +'accommodate_' +'acco' +'accessibility_' +'accede' +'abzuw' +'abzus' +'abzielen_' +'abwechs' +'abundan' +'abstra' +'absorbi' +'absorb_' +'absol' +'absi' +'absent_' +'abschrecken' +'abort' +'abolition_' +'abolished_' +'abnehmen' +'abilität' +'abilities_' +'abili' +'abide_' +'abhalten_' +'abgeschl' +'abgescha' +'abgesch' +'aben' +'abduction' +'aban' +'aat_' +']]. _' +']] ' +'[ _' +'[' +'Zürich_' +'Zü' +'Zyp' +'Zyklus_' +'Zyklen_' +'Zwe' +'Zw' +'Zuwa' +'Zuw' +'Zuverlässigkeit' +'Zutaten_' +'Zuständ' +'Zusch' +'Zusammenschluss_' +'Zusammenhänge' +'Zusammen_' +'Zurückh' +'Zula' +'Zuhöre' +'Zuhause_' +'Zugeh' +'Zufriedenheit_' +'Zuflu' +'Zivilisation_' +'Zinssatz' +'Zin' +'Zimmerman' +'Zielvorgabe' +'Zeug' +'Zentren_' +'Zensur' +'Zem' +'Zellen' +'Zeitungs' +'Zeits' +'Zeitlinie_' +'Zeitge' +'Zahlungsv' +'Zahlungsaus' +'Zah' +'Yor' +'Yale_' +'YA' +'XWB_' +'XT' +'XII' +'XF' +'X1' +'Wünsch' +'Wäldern_' +'Wähler' +'Wähl' +'Wut' +'Wunde' +'Works_' +'Wolfs' +'Wolfgang_' +'Wolfensohn_' +'Wohnungen_' +'Wohnung_' +'Wohnb' +'Wohlstands_' +'Wohlergehen' +'Wohlbefinden_' +'Wochen' +'Wirtschaftsn' +'Wirtschaftsg' +'Wirtschaftsbe' +'Wirts' +'Wirks' +'Wire' +'Wirbelst' +'Wins' +'Wing' +'Winde' +'Wind_' +'Willens' +'Wille_' +'Wilders_' +'Wiedervereinigung' +'Wiederholung_' +'Wiederh' +'Wiederaufnahme_' +'Wieder_' +'Widget' +'Wichtiger' +'Wic' +'Whirlpool_' +'Whereas_' +'Whe' +'Wetter' +'Wettbewerbsvor' +'Wettbewerbsver' +'Wettbewerbspolitik_' +'Westeuropa_' +'Wesentliche' +'Wertschöpfung' +'Werts' +'Wert' +'Weltr' +'Weltor' +'Weltme' +'Weltkultur' +'Weltkrieges_' +'Welthandels' +'Weltg' +'Weltbevölkerung_' +'Weiße_' +'Weish' +'Weine_' +'Weile' +'Weigerung_' +'Wednesday_' +'Wechsel' +'Webserver' +'Way' +'Wasserkraft' +'Washing' +'Wanderung' +'Wallström_' +'Waldbrände' +'Wald_' +'Wake' +'Wahler' +'Wahlbe' +'Wagen_' +'Waffenstillstand_' +'Wachstumsraten_' +'Wachstumsrate_' +'WT' +'WS_' +'WM_' +'WEI' +'WAV_' +'Völkern_' +'Völkermord' +'Vö' +'Vé' +'Vä' +'Vá' +'Vulcan_' +'Vul' +'Vot' +'Vos' +'Vorstands' +'Vorsta' +'Vorst' +'Vorsitzende' +'Vorliebe' +'Vorla' +'Vorhersage' +'Vorgang' +'Vorgabe' +'Vorfall_' +'Vorder' +'Volvo_' +'Vollm' +'Volkswagen_' +'Volkspartei_' +'Volksabstimmung_' +'Vladimir_' +'Vizepräsident_' +'Vize' +'Vitamin' +'Virginia' +'Viol' +'Vik' +'Viewer_' +'View' +'Viertel' +'Vielf' +'Via' +'Verzögerung_' +'Verzweiflung' +'Verzug' +'Verwer' +'Verweise' +'Verw' +'Verträgen_' +'Vertrauens_' +'Vertragsver' +'Verteil' +'Verteidiger' +'Versäum' +'Verste' +'Versp' +'Verschwörung' +'Verschwi' +'Verschwendung_' +'Verschuld' +'Verschlechterung' +'Verschiedene_' +'Verschieb' +'Vers' +'Verpa' +'Verp' +'Vero' +'Vernichtung_' +'Verne' +'Vermeidung_' +'Verlaufe_' +'Verlangsamung_' +'Verkäufe' +'Verkehrst' +'Verkehrss' +'Verhandlung_' +'Vergleichss' +'Vergew' +'Verfassungsentwurf_' +'Verfall' +'Vere' +'Verbü' +'Verbr' +'Verbe' +'Verantwortlichen_' +'Veranstaltungs' +'Veran' +'Venus_' +'Vent' +'Venezia' +'Vec' +'Vaters_' +'Variablen_' +'VS' +'VIN' +'VA_' +'Uti' +'Urteile' +'Ursprünge_' +'Urlaube' +'Uribe_' +'Urheberrechts' +'Urbanis' +'Upgrade_' +'Unterwa' +'Untert' +'Unterschrift' +'Untersch' +'Unternehmensf' +'Unterl' +'Unterhaltung' +'Unterha' +'Unterbrechung' +'Untera' +'Unsinn_' +'Unrecht_' +'Unix_' +'Universitäts' +'Universal' +'Unit_' +'Unglücklicherweise_' +'Unglück_' +'Ungleichgewicht' +'Ungere' +'Une' +'Umweltver' +'Umweltschutz' +'Umwelts' +'Umweltpr' +'Umweltpolitik_' +'Umverteilung' +'Umstellung_' +'Umfragen_' +'UV_' +'URL' +'UNICEF_' +'Türke' +'Tyrol' +'Tyran' +'Tyr' +'Type_' +'Tusk' +'Turnier_' +'Tunis' +'Tunes_' +'Tsunami_' +'Tschad_' +'Träger_' +'Truste' +'Trojan' +'Trichet_' +'Trend' +'Tren' +'Treibhausgasemissionen_' +'Treibhausgas' +'Treasury_' +'Travel' +'Trau' +'Trap' +'Transit_' +'Transform' +'Transaktionen_' +'Trans_' +'Trainer_' +'Tow' +'Tours_' +'Tourist' +'Tool' +'Too' +'Tole' +'Tisch' +'Timor' +'Timo' +'Til' +'Tiger' +'Tiere' +'Tiera' +'Tic' +'Think' +'Thing' +'Therme' +'Therap' +'Ther' +'Theorien_' +'Theodore_' +'Thatcher_' +'Texte' +'Terrorismusbekämpfung_' +'Terroranschl' +'Territorium_' +'Terr' +'Terms_' +'Tendenzen_' +'Temperatur_' +'Temperatur' +'Televis' +'Telekommunikation_' +'Teilung' +'Teilnehmer' +'Technologies_' +'Technis' +'Technical_' +'Tea_' +'Taylor_' +'Tax_' +'Tax' +'Tatatabot_' +'Taste_' +'Tasche' +'Target_' +'Tale' +'Tagung' +'TZ' +'TW' +'TU_' +'TRO' +'TPP_' +'TEN' +'TA_' +'T2' +'Südtirol_' +'Südoste' +'Südostasien' +'Süd_' +'Säug' +'Sän' +'São_' +'Sz' +'Syriza_' +'Syndrom' +'Swim' +'Sustain' +'Surve' +'Surely_' +'Suprem' +'Sup' +'Sunnis_' +'Sunn' +'Summers_' +'Summer_' +'Summen_' +'Summ' +'Suf' +'Suda' +'Substantiv' +'Subsi' +'Subscri' +'Subs' +'Subm' +'Subjekt' +'Stücke_' +'Störun' +'Stärken_' +'Stuf' +'Studi' +'Ström' +'Stric' +'Strau' +'Strategic_' +'Strasse' +'Strafv' +'Stornierung' +'Stop' +'Stone' +'Stock' +'Stimmen' +'Stillstand_' +'Stie' +'Steven' +'Steuerh' +'Steuerer' +'Steuereinnahmen_' +'Stern' +'Stehende_' +'Stefan_' +'Stea' +'Statute_' +'Standpunkten_' +'Standpunkte_' +'Stammzellen' +'Stahl_' +'Stadtk' +'Stadi' +'Stabilitätspakt_' +'Staatsp' +'Staatsbürgerschaft_' +'Sprung' +'Sprin' +'Split' +'Spitzenpo' +'Spit' +'Spirit' +'Spezifikation' +'Spezialist' +'Spend' +'Spen' +'Spektrum' +'Speise' +'Speicherka' +'Spaß_' +'Spazier' +'Sparpolitik_' +'Sparmaßnahmen_' +'Sparen' +'Spam' +'Spalte' +'Sozialp' +'Sozialisten_' +'Sozialismus_' +'Sozialdemokraten_' +'Sowjet' +'Sonnenunterg' +'Sonderg' +'Sommers' +'Somalia_' +'Soft' +'Society_' +'Snowboard' +'Slu' +'Slovenia_' +'Sle' +'Skl' +'Skandal' +'Sitzungsperiode_' +'Sitz' +'Simon_' +'Silber' +'Siena_' +'Siedl' +'Sieben' +'Side' +'Sicherheitsbe' +'Sicherheit' +'Shut' +'Shows' +'Shop' +'Shinzo' +'Shell_' +'Shel' +'Sharia_' +'Sex' +'Seven_' +'Ses' +'Serv' +'Senkaku_' +'Senior' +'Senegal_' +'Sendung_' +'Semit' +'Self' +'Selbstz' +'Selbstvertrauen_' +'Selbstmord' +'Seiten' +'Seit' +'Sehen' +'Seeverkehr' +'Sechste' +'Sech' +'Seattle_' +'Seas' +'Screening' +'Schüt' +'Schönheit' +'Schön' +'Schö' +'Schwächung' +'Schwächen_' +'Schwinde' +'Schwimmbad_' +'Schwie' +'Schwester_' +'Schwellenmärkte' +'Schweigen_' +'Schwe' +'Schwarze' +'Schwachstelle' +'Schutzge' +'Schuss' +'Schuman' +'Schulter' +'Schuldenlast' +'Schuhputzmaschine_' +'Schröder' +'Schritten_' +'Schrei' +'School' +'Schock_' +'Schock' +'Schnitt' +'Schloss' +'Schlo' +'Schlecht' +'Schlacht' +'Schla' +'Schil' +'Schiene_' +'Schied' +'Schein' +'Schauspiele' +'Schatten' +'Schaff' +'Schaf' +'Schadens' +'Schad' +'Saš' +'Savo' +'Savi' +'Sauberkeit_' +'Sauber' +'Sard' +'Saraj' +'Sarah_' +'Santo' +'Sandstr' +'Sand_' +'Samo' +'Sammlungen_' +'Samb' +'Salva' +'Sali' +'Sak' +'Saint' +'Sahara_' +'Sachverhalte' +'Saal_' +'SSI' +'SPE' +'SOEs_' +'SMEs_' +'SL_' +'SIS_' +'SING' +'SDL_' +'SB' +'Rücküberweisung' +'Rücktritt_' +'Rückst' +'Rückhalt_' +'Rückführung_' +'Ryan_' +'Ry' +'Rural_' +'Run_' +'Run' +'Rum' +'Ruine' +'Ruhes' +'Rue' +'Roy' +'Row' +'Rotterdam_' +'Rotarier' +'Rost' +'Ros' +'Root' +'Roo' +'Ron_' +'Ron' +'Rohstoffpreise' +'Rohstoffe_' +'Robot' +'Robin' +'Risikobe' +'Rio' +'Rim' +'Richter' +'Rib' +'Rhythm' +'Rho' +'Rhin' +'Reze' +'Revi' +'Reve' +'Respons' +'Residen' +'Reservierung' +'Reservation' +'Republikan' +'Repräsentanten_' +'Repräsentant' +'Repression' +'Renzi_' +'Renten_' +'Rente_' +'Religions' +'Relevanz_' +'Relation' +'Reisez' +'Reisever' +'Reisetipp_' +'Reis' +'Reha' +'Register' +'Regierungspo' +'Regarding_' +'Refu' +'Reden_' +'Reco' +'Rechtssysteme' +'Rechtssystem' +'Rechtsst' +'Rechtssicherheit_' +'Rechtsr' +'Rechtschreib' +'Rechtsbe' +'Rechtsausschuss' +'Rechtsanw' +'Recherche' +'Rechenschaftspflicht_' +'Recep' +'Rebellion_' +'Realit' +'Realisierung_' +'Reading' +'Reac' +'Raums_' +'Raume' +'Rauchen_' +'Ration' +'Rating_' +'Ratifi' +'Rathaus' +'Rapid_' +'Rangliste' +'Ral' +'Raketen_' +'Rail_' +'Raci' +'RT_' +'RK' +'RANT' +'RAM_' +'RAC' +'Quoten_' +'Quellcode_' +'Quar' +'Quanti' +'Quant' +'Qualifikationen_' +'Qual' +'Quadrat' +'Qu' +'Qatar_' +'Qaddafi_' +'Qa' +'QUI' +'Pv' +'Push' +'Pump' +'Pull' +'Pub' +'Psycho' +'Psych' +'Präzisi' +'Präsidentschaftswahlen_' +'Präsentation' +'Prämien_' +'Präf' +'Provinz' +'Provider_' +'Prototyp' +'Proto' +'Proteine_' +'Protein' +'Protect' +'Propo' +'Propaganda_' +'Promenade_' +'Programmier' +'Profit_' +'Profil_' +'Professional_' +'Produkts' +'Produktionsst' +'Produktionspr' +'Product' +'Proc' +'Problemati' +'Privileg_' +'Privats' +'Privatis' +'Prior_' +'Priester' +'Prev' +'Prepa' +'Premierminister' +'Preisstabilität_' +'Preises_' +'Prakti' +'Prag' +'PowerP' +'Potter' +'Potsdam' +'Poten' +'Postgre' +'Postdienst' +'Posse' +'Position' +'Portugies' +'Portale' +'Porta' +'Population_' +'Pon' +'Politicians_' +'Pola' +'Plo' +'Plenar' +'Play_' +'Plata_' +'Plast' +'Planet_' +'Pizza' +'Pino' +'Pilote' +'Pilot' +'Pier' +'Pick' +'Picc' +'Pic' +'Photocopying_' +'Phoeni' +'Philippi' +'Pharma' +'Pflanz' +'Pfl' +'Pfeiler_' +'Pfad_' +'Peters' +'Perspective_' +'Persi' +'Perm' +'Perfe' +'Pension_' +'Pennsylvania_' +'Pend' +'Pemb' +'Pedro' +'Peak' +'Pax_' +'Paus' +'Pati' +'Patent_' +'Passw' +'Passport_' +'Passei' +'Passe' +'Pass_' +'Partner' +'Partic' +'Parti' +'Parm' +'Parlamentarier_' +'Parkplatz_' +'Parke' +'Paris' +'Parc' +'Paramet' +'Parallelen_' +'Parallel_' +'Paradox' +'Paolo_' +'Pani' +'Pand' +'Pan_' +'Palästinensischen_' +'Pakets_' +'Paint' +'Packag' +'PROGR' +'POL' +'PN' +'PLAYER' +'PIC' +'PAS' +'Oxford_' +'Ow' +'Outlook_' +'Ostse' +'Oster' +'Osborne_' +'Ortho' +'Ortega' +'Organisat' +'Organen_' +'Ordn' +'Order' +'Orden_' +'Optimi' +'Opportuni' +'Opfern_' +'Operation' +'Omni' +'Offi' +'Oe' +'Obersten_' +'Oberste_' +'Oberh' +'Obergrenze_' +'ORT' +'OM_' +'OFI' +'Nöt' +'Nö' +'Nutzungsbedingungen_' +'Nutze' +'Num' +'Now' +'Nove' +'Notwendig' +'Notfall' +'North' +'Normali' +'Nordi' +'Nomin' +'Nomad_' +'Nobody_' +'Nis' +'Nina_' +'Niko' +'Night_' +'Night' +'Niederla' +'Niedergang_' +'Nico' +'Nick' +'Nichte' +'Newsletter_' +'Nevada_' +'Nev' +'Neustart' +'Neuschwanstein_' +'Neuf' +'Neues_' +'Neuen' +'Neuausrichtung_' +'Nerv' +'Ner' +'Neo' +'Neighbo' +'Nego' +'Neb' +'Neapel_' +'Nazi' +'Naturwissenschaft' +'Naturpark' +'Naturk' +'Natural' +'Nationalst' +'Nationalpark_' +'Nationalpar' +'Namib' +'Nam' +'Nak' +'Nahrungs' +'Nahost_' +'Nahe' +'Nachw' +'Nachmittag_' +'Nachhaltige' +'Nachbarschaftspolitik_' +'Nachbarschaft_' +'Nachbarländer' +'Nachbar' +'NY' +'NIC' +'NG_' +'NGO_' +'NEC' +'ND_' +'NDE' +'Mütter_' +'Mün' +'Mönch' +'Möglicherweise_' +'Mythos_' +'MySpace_' +'Mutter' +'Muslim' +'Musical' +'Museums_' +'Museen_' +'Muse' +'Murdoch_' +'Mull' +'Mozart' +'Moz' +'Movielearn_' +'Movie_' +'Mosle' +'Moses_' +'Mosambik_' +'Mord' +'Mora' +'Montreal_' +'Monterrey_' +'Montai' +'Montag' +'Monster_' +'Mons' +'Molda' +'Modi_' +'Moderne_' +'Moder' +'Model' +'Moda' +'Mobiltelefon_' +'Mobilität_' +'Mittleren_' +'Mittelwe' +'Mitteleurop' +'Mitteil' +'Mitleid' +'Mitbe' +'Mitarbeiter' +'Mist' +'Missverständnis' +'Misstrauen_' +'Mission' +'Missb' +'Mira' +'Mir' +'Mins' +'Ministerrat_' +'Ministerpräsidenten_' +'Min_' +'Millions' +'Millia' +'Militära' +'Milchprodukt' +'Milch' +'Mikrof' +'Mike_' +'Metropoli' +'Metalle' +'Metal' +'Messa' +'Mercedes_' +'Menü' +'Menschenrechtsko' +'Mena' +'Memory_' +'Memori' +'Meli' +'Meister' +'Mein' +'Mehrzahl_' +'Mehrwertsteuer' +'Meeresf' +'Meere' +'Medina' +'Mechani' +'McK' +'McG' +'Maßstäbe' +'Maxi' +'Max_' +'Mauri' +'Mauer' +'Massenm' +'Massaker' +'Marí' +'Marta_' +'Mars_' +'Marr' +'Marqu' +'Marktzug' +'Marktt' +'Market' +'Mare' +'Mara' +'Malware_' +'Mali_' +'Maje' +'Mainstream' +'Main_' +'Mailand_' +'Mahlzeiten_' +'Magst_' +'Magazin_' +'Magazin' +'Machtver' +'Machts' +'MX' +'MT_' +'MS' +'MP4_' +'MOV_' +'MOS' +'MIN' +'MIDI_' +'MG' +'MENT' +'MB' +'MAT' +'MAN_' +'MAN' +'M4' +'Lüg' +'Lücke_' +'Lä' +'Luz' +'Lup' +'Luk' +'Lufthansa_' +'Ludwig_' +'Lucas_' +'Loyali' +'Louvre' +'Lohns' +'Logi' +'Lloyd' +'Livi' +'Liverpool_' +'Liter' +'Listen_' +'Lima_' +'Liese' +'Lieferu' +'Lied' +'Libye' +'Libyan' +'Leser_' +'Lesen_' +'Leo' +'Leno' +'Leiche' +'Lehrer' +'Lehman_' +'Legend' +'Leg' +'Lebensstandard_' +'Lebense' +'Lebanese_' +'Leave_' +'Laz' +'Lava' +'Laufzeit_' +'Laufen' +'Latvia' +'Latin' +'Lass' +'Larry_' +'Lannoye_' +'Langzeit' +'Landwirtschafts' +'Landw' +'Lampe' +'Lamaniten_' +'Lager' +'Lagen' +'Lady' +'Label_' +'Lab_' +'LR' +'LC_' +'Küsten_' +'Künst' +'Kün' +'Kühlschra' +'Kü' +'Körperschaft' +'Königreichs_' +'Köln' +'Käufer' +'Käuf' +'Kyr' +'Kyi' +'Kuwait_' +'Kurze' +'Kuro' +'Kurdish_' +'Kunde_' +'Kulissen_' +'Kuli' +'Kriterium_' +'Kriminelle' +'Krieg' +'Krem' +'Kreditvergabe_' +'Kreditv' +'Kreditnehmer' +'Kreditkarten' +'Kreditb' +'Kreditaufnahme_' +'Krebs_' +'Kreaturen' +'Kreat' +'Kraftwerk' +'Kow' +'Kosov' +'Korruptions' +'Kori' +'Koreans_' +'Koran' +'Kora' +'Kopiere' +'Kopfs' +'Koordin' +'Konzerne' +'Konvertier' +'Konvers' +'Kontextmenü' +'Konte' +'Konsultationen_' +'Konserv' +'Konferenzr' +'Konferenze' +'Kompetenz_' +'Kommuni' +'Kommissionsvorschlag_' +'Kommissionsmitglied' +'Kommissar' +'Kommentar_' +'Kolumbien_' +'Kolonie' +'Kollekti' +'Koizumi_' +'Kohlendioxid_' +'Koh' +'Kofinanzierung' +'Knowledge_' +'Kno' +'Kni' +'Klu' +'Klingon' +'Kleine' +'Klein_' +'Klausel' +'Klassifi' +'Klage_' +'Kinnock_' +'King' +'Kindes' +'Kinderar' +'Kin' +'Khomeini_' +'Kho' +'Khan' +'Khal' +'Keys' +'Kernel' +'Kenn' +'Kaukasus_' +'Katastrophenschutz' +'Katalo' +'Kaschmir' +'Karzai_' +'Kapitalmärkte_' +'Kapitalflu' +'Kanäle_' +'Kana' +'Kammer' +'Kamin' +'Kama' +'Kalk' +'Kaliningrad' +'Kale' +'Kade' +'Kaczyński_' +'Kabel' +'KW' +'KU' +'KMU_' +'KLM_' +'KING' +'KEI' +'KB' +'Jury' +'Juris' +'Jugendlicher_' +'Jugendherberg' +'Juden_' +'Journalist' +'Jord' +'Jong_' +'Jonas' +'Johnson' +'Johanne' +'Jog' +'Joan' +'Jim_' +'Jet' +'Jes' +'Jem_' +'Javi' +'Jaro' +'Jar' +'Japaner_' +'Jan' +'Jahrtausend' +'Jahrhunderte' +'Jahresz' +'Jag' +'Jacob_' +'JRE_' +'JE' +'Ivan' +'Iv' +'Ite' +'Italiens_' +'Italien' +'Issu' +'Iraker_' +'Investor' +'Internetverbindung_' +'Interneta' +'International' +'Intensität' +'Int' +'Insur' +'Insti' +'Inse' +'Innovation' +'Innenpoliti' +'Initiati' +'Inhalts' +'Inhaber_' +'Infrastructure_' +'Infos_' +'Informationss' +'Informationsa' +'Informati' +'Inflationsr' +'Infineon' +'Infektions' +'Infekt' +'Infe' +'Industriesta' +'Industrielle' +'Industrial_' +'Industri' +'Indoor_' +'Indo' +'Indiz' +'Individual' +'Indic' +'Increased_' +'Inco' +'Inclu' +'Inci' +'Improvi' +'Impres' +'Impfstoffe_' +'Immerhin_' +'Immer_' +'Ign' +'Ideologie_' +'Ideolog' +'Ideally_' +'Ideale' +'Ideal' +'Icon' +'Ibn_' +'IR_' +'INE' +'INCLUD' +'ILA' +'IFA' +'IES_' +'IEN' +'ICT' +'Hügel_' +'Hör' +'Höl' +'Höf' +'Hôtel_' +'Händler' +'Hypo' +'Hyg' +'Hydr' +'Hybri' +'Hv' +'Hungarian_' +'Hunderttausende' +'Hunderte' +'Hund' +'Hul' +'Hubschrauber_' +'Hua' +'House' +'Hotelzimmer_' +'Hot_' +'Horizonte' +'Horizont' +'Hond' +'Hom' +'Holy_' +'Hoff' +'Hochwasser' +'Hochgeschwindigkeits' +'His' +'Hinr' +'Hindus_' +'Himmel_' +'Hilfsmittel_' +'Hilfe' +'Highway' +'Hig' +'Heut' +'Het_' +'Herzog' +'Herv' +'Herrscher_' +'Herman' +'Herangehensweise_' +'Heilige' +'Hed' +'Hebr' +'Header' +'Hay' +'Hava' +'Haustür' +'Haushaltspolitik_' +'Haushaltsplan' +'Haushaltsmittel' +'Haushaltskon' +'Haushaltsausschusses_' +'Haushaltsaus' +'Hauptt' +'Hauptstr' +'Hauptp' +'Hauptau' +'Hatoyama_' +'Hass_' +'Harris_' +'Harr' +'Harm' +'Hariri' +'Hara' +'Happ' +'Hanse' +'Handl' +'Handelsa' +'Hande' +'Hamm' +'Halte' +'Hack' +'Haben' +'Haag' +'HR' +'HOT' +'HER' +'HEN' +'HC_' +'HAVEN_' +'H1' +'Gültigkeit_' +'Gül' +'Göttin' +'Gän' +'Gutes_' +'Gunsten_' +'Gues' +'Guardi' +'Guard' +'Größen_' +'Größ' +'Grundzüge' +'Grundwasser' +'Grundsätzlich' +'Grundsätzen_' +'Grundsatze' +'Grunds' +'Grundrechte' +'Grundl' +'Großka' +'Große' +'Grou' +'Griechen' +'Grey' +'Grenzwert' +'Gremien_' +'Gregori' +'Grego' +'Gramm' +'Graci' +'Gourmet_' +'Gothic_' +'Goth' +'Gos' +'Gore_' +'Good' +'Golfplätze' +'Golds' +'Gob' +'Glück' +'Gloucester' +'Glied' +'Glen' +'Gleichg' +'Gleichbehandlung_' +'Gle' +'Girl' +'Gio' +'Gibraltar_' +'Gia' +'Gewinne' +'Gewin' +'Gewerkschaft' +'Gewerbe' +'Gewalttaten_' +'Getränke' +'Getränk_' +'Gesundheitsz' +'Gesundheitsv' +'Gesundheitssystem' +'Gesundheitsm' +'Geste' +'Gesicht' +'Gesetz' +'Geschäftsv' +'Geschäftsbereich' +'Geschäften_' +'Geschw' +'Geschlecht' +'Geschirr' +'Geschichten_' +'Gesamth' +'Gesamtbe' +'Geräte' +'Germani' +'Germ' +'Gerichtsh' +'Georgi' +'George' +'Geogra' +'Genießen_' +'Generalsekret' +'Genauigkeit' +'Genau' +'Gen_' +'Gemäß' +'Gemeinschaftsin' +'Gelände_' +'Gele' +'Geldes_' +'Gei' +'Gehälter' +'Gehirn_' +'Gehirn' +'Geheimnis' +'Gehei' +'Gehalt_' +'Gegenden_' +'Gefüh' +'Gefängnisse' +'Gefahren' +'Gedächtnis_' +'Gedicht' +'Geburtstag' +'Geburten' +'Gebot' +'Gate_' +'Gastge' +'Garden' +'Garan' +'Ganz' +'Gamm' +'Gaming_' +'Gallery_' +'Galerie_' +'Gale' +'Gala' +'GW_' +'GUI' +'GUE_' +'GP_' +'GO_' +'GMT_' +'GMOs_' +'GL' +'GE_' +'Fülle' +'Fä' +'Fut' +'Fundament' +'Frühstücks' +'Frustration_' +'Frontier' +'Frist' +'Freud' +'Frequenz' +'Fremdenverkehr_' +'Fremdenfeindlichkeit_' +'Freizeita' +'Freilich' +'Freigabe_' +'Freedoms_' +'Fred_' +'Fred' +'François_' +'Franco' +'Fragment' +'Fragestunde_' +'Frage' +'Founde' +'Fotografie_' +'Fortschr' +'Fort_' +'Forschungss' +'Forschungsergebnisse_' +'Forschungsa' +'Formular' +'Formel_' +'Forest_' +'Fore' +'Football_' +'Fonta' +'Folglich_' +'Focus' +'Flut_' +'Flusse' +'Flus' +'Flie' +'Flemi' +'Flasche' +'Flam' +'Flagg' +'Fixed_' +'Fit' +'Fischereiabkommen_' +'Fische_' +'Firmware_' +'Firm' +'Firew' +'Finger_' +'Finanzwesen_' +'Finanzst' +'Finanzp' +'Finanzmittel_' +'Finanzmi' +'Finanzinstrument' +'Finanzb' +'Fina' +'Filme_' +'Filme' +'Fig' +'Fic' +'Feuerwe' +'Feststellung' +'Festiv' +'Feste' +'Fertigkeit' +'Ferna' +'Ferienhäuser_' +'Fed' +'Fatah_' +'Fans_' +'Familienzimmer_' +'Falle' +'Faktum_' +'Fahrzeug' +'Fahrt_' +'Fahrplan_' +'Fahren_' +'Fact' +'Faci' +'Fachk' +'Fabrik' +'Fab' +'FRE' +'FAQ' +'Extreme_' +'Extras_' +'Exten' +'Exporteure' +'Experts_' +'Experte' +'Experimente' +'Exper' +'Exo' +'Exist' +'Exhibit' +'Exekutiv' +'Except' +'Ew' +'Evidence_' +'Everest_' +'Even' +'Evangeli' +'Eurojust_' +'Eurocopter_' +'Euroc' +'Eurobonds_' +'EuroM' +'Eurasi' +'Eur' +'Euph' +'Establishment_' +'Essential_' +'Esse' +'Especially_' +'Erziehung_' +'Erzeugung' +'Erzeugnisse_' +'Erwähnung_' +'Erwägungen_' +'Erweiterungen_' +'Erwachsenen_' +'Erwach' +'Erträge_' +'Ertr' +'Erstelle' +'Erste_' +'Erstau' +'Ersparnissen_' +'Erscheinungsbild_' +'Ersatz' +'Err' +'Ernähr' +'Ernst_' +'Erleichterung_' +'Erleb' +'Erheb' +'Erha' +'Erh' +'Ergänz' +'Erg' +'Erfolgsgeschichte' +'Erfolg' +'Erbr' +'Eras' +'Equip' +'Equ' +'Entwicklungsziele' +'Entwicklungs_' +'Entstehung' +'Entspann' +'Entscheidungsprozess' +'Entführung' +'Entf' +'Enter' +'Enr' +'Englischen_' +'Englische' +'Energietechnologie' +'Energier' +'Energiepolitik_' +'Endp' +'Endl' +'Ende' +'Employ' +'Empfang' +'Emotion' +'Emirate' +'Embr' +'Email_' +'Elysées_' +'Ell' +'Elizabeth_' +'Elis' +'Elend_' +'Elemente' +'Eleganz_' +'Elefanten' +'Elde' +'Eisenbahnver' +'Einzelpersonen_' +'Einwi' +'Einwanderungspolitik_' +'Einwanderungs' +'Eint' +'Einsp' +'Einse' +'Einrei' +'Einmischung' +'Einmarsch' +'Einm' +'Einkommens_' +'Einkaufszentr' +'Einflüsse' +'Einfach_' +'Eindämmung' +'Einblick_' +'Einbindung_' +'Eid' +'Editor_' +'Edit_' +'Echtzeit_' +'Early_' +'EUR' +'ESM' +'ENI' +'EMAS_' +'Düsseldorf_' +'Dür' +'Dü' +'Dynast' +'Dynamik' +'Dutzende_' +'Dus' +'Durchschnitts' +'Durchführ' +'Durchb' +'Duomo_' +'Duff_' +'Dub' +'Ds_' +'Drücke' +'Drum' +'Drucker' +'Droh' +'Drittl' +'Dritte' +'Dringlichkeits' +'Dri' +'Dream' +'Drago' +'Dr' +'Doyle_' +'Downloads' +'Down' +'Dornik_' +'Dorn_' +'Dorf' +'Doppelzimmer_' +'Doo' +'Dominikan' +'Dolomit' +'Dolmetsch' +'Dokumentation_' +'Divers' +'DivX_' +'Div' +'Distributoren_' +'Disku' +'Diskriminierung' +'Directory_' +'Director' +'Directi' +'Diplomaten_' +'Diktatur_' +'Different_' +'Dienststelle' +'Dienstleistungssektor' +'Dienstleist' +'Diensta' +'Dienst' +'Dictionary_' +'Dichte' +'Dich' +'Dialogs_' +'Diabetes_' +'Deutsche' +'Deut' +'Detail_' +'Designs' +'Designer_' +'Desi' +'Deregul' +'Derartige_' +'Denkweise' +'Denis' +'Demokratisierung_' +'Demogra' +'Demagog' +'Delu' +'Delta_' +'Delo' +'Delegation' +'Dele' +'Dela' +'Deine' +'Deckmantel_' +'Death_' +'Daw' +'Datenschutz_' +'Datenbl' +'Date' +'Dasselbe_' +'Daseins' +'Das' +'Darwi' +'Darlehen' +'Darauf_' +'Dara' +'Dampfb' +'Damas' +'Dalma' +'Dai' +'Dafürhalten_' +'DVDs_' +'DK' +'DJ_' +'DI_' +'DIC' +'DES' +'DEN_' +'DC' +'DAX_' +'Cycl' +'Cyber' +'Curt' +'Cul' +'Cott' +'Cord' +'Copy' +'Cop' +'Cooper' +'Cool' +'Controller_' +'Conti' +'Constant' +'Conservati' +'Congress' +'Confedera' +'Condo' +'Conditions_' +'Conci' +'Concern' +'Computern_' +'Compr' +'Compani' +'Communication' +'Commen' +'Comi' +'Comfort' +'Combi' +'Colomb' +'Collect' +'Cohe' +'Coelho_' +'Coch' +'Cob' +'Clu' +'Close_' +'Clip' +'Clif' +'Cleverl' +'Cleaning_' +'Clean_' +'Classic' +'Clas' +'Clark' +'Circle_' +'Circ' +'Cind' +'Chrom' +'Christus_' +'Christopher_' +'Christo' +'Christie_' +'Christiani' +'Christen_' +'Christdemokraten_' +'Chr_' +'Chev' +'Cherno' +'Chechen' +'Charle' +'Chapel_' +'Chap' +'Champ' +'Ces' +'Center' +'Cav' +'Caucas' +'Castil' +'Cassi' +'Casio' +'Casino' +'Cash' +'Carrie_' +'Carp' +'Carlo' +'Carl_' +'Caribbean_' +'Care' +'Cardi' +'Capt' +'Canc' +'Canari' +'Canal' +'Canadian_' +'Campingpl' +'Camera' +'Cambodia_' +'Calendar_' +'Cairo' +'Caesar' +'COS' +'CONT' +'CHF_' +'CHE' +'CGI_' +'CET_' +'Bürgerkrieg_' +'Bürgerkrieg' +'Bürgerbe' +'Bündnis_' +'Bünd' +'Bücher_' +'Böge_' +'Buy' +'Busse' +'Busc' +'Bundestag_' +'Bundesstaaten_' +'Bundesp' +'Bum' +'Buddh' +'Buchungs' +'Buchst' +'Buchführung' +'Bucher_' +'Bry' +'Brunnen_' +'Bruc' +'Brow' +'Brooklyn_' +'Bronze' +'Broc' +'Britis' +'Brigade' +'Brian_' +'Brew' +'Bretton_' +'Bret' +'Brenner' +'Bremen_' +'Breitband' +'Brei' +'Bree' +'Brav' +'Braun' +'Branc' +'Box' +'Boul' +'Botschafter' +'Boston' +'Bosnien_' +'Bosnia_' +'Boots' +'Boot' +'Bond' +'Bombardier' +'Boeing' +'Boe' +'Bodensch' +'Bode' +'Boar' +'Blog' +'Blingee_' +'Blick' +'Blaž_' +'Birma' +'Biot' +'Biog' +'Binnenm' +'Bindungen_' +'Bin_' +'Billig' +'Billi' +'Bildern_' +'Bhutan_' +'Bezirks' +'Bey' +'Bewä' +'Bewu' +'Bewertungs' +'Bewert' +'Bewerber' +'Better_' +'Betrü' +'Beträge' +'Betreuer' +'Betracht' +'Besuchern_' +'Bestra' +'Besten' +'Bestell' +'Besonderheiten_' +'Besetzung_' +'Beschwerde_' +'Beschlussfassung' +'Beschlusse' +'Beschleunigung' +'Beschaff' +'Berufsbildung' +'Berufe' +'Bert' +'Berl' +'Berichterstattung' +'Berb' +'Beobachtungsst' +'Benz_' +'Belohnung_' +'Beliebt' +'Belgrade_' +'Belastung_' +'Belast' +'Belarus' +'Beitrittsl' +'Beitrittskandidaten_' +'Beis' +'Being_' +'Beine' +'Bein' +'Beihilfe' +'Behau' +'Behandlungs' +'Begrenzung_' +'Bege' +'Beförderungs' +'Befu' +'Befr' +'Bedürf' +'Bedienung_' +'Bede' +'Bec' +'Beauf' +'Beatri' +'Beachten_' +'Bayer' +'Bavarian_' +'Baute' +'Baustein' +'Baum_' +'Batt' +'Bath' +'Basket' +'Basis' +'Base' +'Barry_' +'Barro' +'Baro' +'Bargeld' +'Barbe' +'Bann' +'Banker' +'Bankensystem' +'Bande' +'Banc' +'Ballo' +'Bald_' +'Bajor_' +'Bahnstation_' +'Bah' +'Baghdad' +'Baden_' +'Bachelo' +'Bach_' +'Babys' +'BOJ_' +'BN' +'BJ' +'BF' +'BERKELEY_' +'Azu' +'Ax' +'Aw' +'Avatar' +'Außenbe' +'Autoren_' +'Autonomiebehörde_' +'Autobahn_' +'Ausübung' +'Auswärtige' +'Ausweg_' +'Auswe' +'Austausch' +'Aust' +'Ausspr' +'Ausschü' +'Ausschuß_' +'Ausscheiden' +'Ausscha' +'Auskunft' +'Ausgrenzung_' +'Ausgew' +'Ausgehend_' +'Ausgabe' +'Ausflu' +'Ausd' +'Aurora_' +'Aug' +'Aufträge_' +'Auft' +'Aufstände' +'Aufstand_' +'Aufse' +'Aufge' +'Auffü' +'Auffassungen_' +'Attraktionen_' +'Attent' +'Atta' +'Atlantis' +'Atla' +'Asylsuchende' +'Asylant' +'Assist' +'Asset' +'Asians_' +'Arzt' +'Aru' +'Articles_' +'Arsenal_' +'Ars' +'Arou' +'Armenian_' +'Ark' +'Argumen' +'Argentine' +'Argen' +'Arena' +'Are' +'Arbeitszeit_' +'Arbeitsver' +'Arbeitsrecht' +'Arbeitspro' +'Arbeitsp' +'Arbeitsgruppe_' +'Arbeitsg' +'Arbeitsbe' +'Arbeit' +'Apr' +'Appro' +'Anzeigen' +'Anzeige' +'Anz' +'Anyone_' +'Anwesenheit_' +'Anwendungsbereich_' +'Anwender_' +'Anwa' +'Antrags' +'Anteils' +'Anteile_' +'Ansä' +'Ansprüchen_' +'Ansprech' +'Anson' +'Anschrift' +'Anschließend_' +'Ansatzes_' +'Anreise' +'Anregungen_' +'Anpassungen_' +'Annex' +'Annahmen_' +'Anmerkungen_' +'Anmerkung_' +'Anleihe' +'Ankunft_' +'Anku' +'Anklage_' +'Ani' +'Anhörung_' +'Angr' +'Angeles_' +'Angel' +'Angebots' +'Angeb' +'Ane' +'Andria_' +'Andorra' +'Anden' +'Andalusien' +'Andalusia' +'Anda' +'Anbau_' +'Anato' +'Analys' +'Amu' +'Amo' +'Ambitionen_' +'Amazon_' +'Ama' +'Aly' +'Alum' +'Altern' +'Alta_' +'Alr' +'Alps_' +'Alpi' +'Alltags' +'Alkohol' +'Algor' +'Algerie' +'Algar' +'Alegr' +'Alber' +'Albanian' +'Alb' +'Aktuell' +'Aktualisierung_' +'Aktivisten_' +'Aktionär' +'Aktionsprogramm_' +'Akk' +'Aki' +'Aka' +'Airp' +'Airconditioning_' +'Aid_' +'Agrars' +'Agenturen_' +'Agent_' +'Afri' +'Advanced_' +'Administrat' +'Ade' +'Addis_' +'Ada' +'Active_' +'Acid' +'Acht' +'Achsen' +'Accord' +'Abzug_' +'Abwicklung_' +'Abweichung' +'Abtreibung' +'Abtei' +'Abstimmungs' +'Absti' +'Absolvent' +'Abso' +'Absen' +'Abschl' +'Abl' +'Abhängig' +'Abh' +'Abgeordnete' +'Abfälle_' +'Abdullah' +'Abbe' +'Aa' +'ATM' +'AR_' +'AP_' +'AO' +'AMS' +'AMR_' +'ALE_' +'AD_' +'ADE' +'ACI' +'ACCE' +'ABS' +'A2' +'=_' +';&' +'90er_' +'83' +'750' +'70er_' +'681' +'67' +'63' +'5th_' +'520' +'52' +'4th_' +'45' +'43' +'3G_' +'3G' +'39' +'370' +'37' +'270_' +'220_' +'21s' +'202' +'201' +'199' +'1972_' +'1961_' +'1960s_' +'1960' +'1959_' +'1950er_' +'1946_' +'1939_' +'1936_' +'1907_' +'171' +'170' +'16th_' +'145_' +'142' +'127_' +'124_' +'121_' +'117' +'116' +'105_' +'102' +'101' +'0er_' +'07' +'020' +'007' +'/+_' +'/ ' +'......' +'....' +'.'_' +'->' +',..._' +',- ' +',,_' +', (_' +', $_' +'++' +'* ' +'):_' +'), ' +')) (' +'))' +'() ._' +'': _' +'')' +'''.' +''''_' +'%\\' +'$ ' +'">- _' +'"...' +'". _' +'" ._' +'" -' +'!! _' +'!! !' +' …' +' ”' +' ’_' +' ­' +' £_' +' [...]' +' = {_' +' = ' +' ;' +' -> _' +' ***' +' ). _' +' (“_' +' (.' +' ('' +' &#_' +' !!' +'−' +'ي' +'ט' +'ג' +'ь' +'щ' +'ц' +'σ' +'ş' +'œ' +'ě' +'ę' +'ā' +'õ' +'ñ' +'¿' +'º' +'~' +'$' +'™' +'†' +'–' +'ن' +'ل' +'ف' +'ر' +'ר' +'נ' +'Ж' +'Д' +'υ' +'ν' +'λ' +'ś' +'ń' +'ù' +'ì' +'Ñ' +'É' +'Ã' +'Á' +'§' +'–' +'&' +'ー' +'‚' +'م' +'ק' +'ד' +'Я' +'П' +'О' +'Л' +'Е' +'А' +'π' +'κ' +'θ' +'β' +'ū' +'Ś' +'ō' +'ć' +'æ' +'Ê' +'Â' +'¼' +'·' +'¶' +'´' +'¥' +'`' +'@' +'#' +'' +'년' +'語' +'简' +'本' +'日' +'文' +'年' +'中' +'•' +'ṳ' +'ศ' +'พ' +'ा' +'र' +'ى' +'ه' +'ص' +'ت' +'ب' +'פ' +'ס' +'ן' +'ו' +'ֿ' +'В' +'ω' +'χ' +'δ' +'Ω' +'̤' +'ư' +'ů' +'ř' +'ľ' +'ė' +'ĕ' +'ą' +'û' +'À' +'½' +'¹' +'­' +'¤' +'¡' +'’' +'\' +':' +'' +'fi' +'黵' +'黃' +'鰀' +'鋘' +'鋓' +'遝' +'蒸' +'致' +'美' +'网' +'紙' +'熨' +'斗' +'応' +'女' +'味' +'友' +'信' +'介' +'丨' +'一' +'ャ' +'バ' +'チ' +'ジ' +'カ' +'ん' +'ら' +'め' +'●' +'▼' +'→' +'※' +'ớ' +'ọ' +'ị' +'ẽ' +'ẻ' +'ấ' +'ी' +'ि' +'य' +'ब' +'त' +'छ' +'आ' +'ِ' +'ك' +'غ' +'ع' +'د' +'ج' +'إ' +'،' +'צ' +'ל' +'ה' +'Қ' +'Ғ' +'Э' +'Ш' +'Ц' +'Х' +'Р' +'М' +'φ' +'ζ' +'γ' +'Χ' +'Τ' +'Ι' +'Ε' +'̯' +'̆' +'ː' +'ˈ' +'ɾ' +'ɛ' +'ɐ' +'ſ' +'ű' +'ŭ' +'ő' +'Ő' +'ŏ' +'ň' +'İ' +'ī' +'đ' +'Đ' +'ă' +'ý' +'ã' +'à' +'Ô' +'Ó' +'È' +'Å' +'¾' +'µ' +'³' +'°' +'¬' +'¢' +'' +'™' +'—' +'“' +'' +'^' +'—' +'²' +'£' +'<' diff --git a/tensor2tensor/test_data/vocab.translate_ende_wmt8k.8192.subwords b/tensor2tensor/test_data/vocab.translate_ende_wmt8k.8192.subwords new file mode 100644 index 000000000..072c420ca --- /dev/null +++ b/tensor2tensor/test_data/vocab.translate_ende_wmt8k.8192.subwords @@ -0,0 +1,8190 @@ +'' +'' +'_' +', _' +'._' +'the_' +'s_' +'in_' +'of_' +'and_' +'to_' +'die_' +'der_' +'und_' +'a_' +'n_' +'en_' +'e_' +'-_' +'t_' +'is_' +'that_' +'zu_' +'d_' +'den_' +'es_' +'ed_' +'on_' +'ing_' +'for_' +'von_' +'r_' +'an_' +'ist_' +'er_' +'y_' +'. _' +'für_' +'be_' +'The_' +'are_' +'with_' +'as_' +'das_' +'it_' +'des_' +'ung_' +'auf_' +'mit_' +'eine_' +'dass_' +'nicht_' +'I_' +'im_' +'by_' +'not_' +'have_' +'this_' +' (_' +' – _' +'sich_' +'or_' +'was_' +'um_' +'ein_' +'dem_' +'werden_' +'Die_' +'will_' +'from_' +'we_' +'ly_' +'’_' +'at_' +': _' +'te_' +'Sie_' +'which_' +'ng_' +'als_' +'has_' +'m_' +'ten_' +'auch_' +'l_' +'you_' +'wir_' +'In_' +'sind_' +'ion_' +'wird_' +'o_' +') _' +'all_' +'so_' +'can_' +''_' +'sie_' +' - _' +'al_' +'einer_' +'its_' +'de_' +'hat_' +'wie_' +'also_' +'their_' +'haben_' +'European_' +'more_' +'would_' +'oder_' +'über_' +'ich_' +'but_' +'us_' +'einen_' +'?_' +'ungen_' +'one_' +'our_' +'g_' +'aus_' +'zur_' +'they_' +'bei_' +'k_' +'Das_' +'ation_' +'am_' +'2_' +'i_' +'been_' +'; _' +'1_' +'/_' +'ce_' +'nur_' +'Union_' +'should_' +'durch_' +'h_' +'EU_' +'It_' +'le_' +'einem_' +'A' +'tion_' +'5_' +'nach_' +'other_' +'noch_' +'do_' +'This_' +'können_' +' ' +'diese_' +'st_' +'zum_' +'only_' +' , _' +'there_' +'lich_' +'countries_' +'kann_' +'dieser_' +'ch_' +'war_' +'than_' +'We_' +'new_' +'- _' +'your_' +'man_' +'Europe_' +'vor_' +'se_' +'gen_' +'Der_' +'must_' +'3_' +'no_' +'z_' +'Mr_' +'like_' +'were_' +'ment_' +'I' +'ge_' +'wenn_' +'US_' +'Ich_' +'wurde_' +'O' +' "_' +'about_' +'4_' +'ne_' +'time_' +'E' +'re_' +'President_' +'if_' +'Es_' +'up_' +'ve_' +'aber_' +'A_' +'sein_' +'these_' +'ts_' +'ble_' +'who_' +'very_' +'et_' +'ers_' +' ._' +'c_' +'able_' +'Hotel_' +'world_' +'out_' +'S' +'uns_' +'Commission_' +'rs_' +'mehr_' +'such_' +'when_' +'But_' +'B' +'Wir_' +' “_' +'people_' +'he_' +'müssen_' +'P' +'ns_' +'ter_' +'into_' +'G' +'China_' +'his_' +'ihre_' +'most_' +')._' +' _' +'what_' +'now_' +'some_' +'D' +'ungs' +'p_' +'!_' +'any_' +'sehr_' +'Kommission_' +'many_' +'ies_' +'F' +',_' +'8_' +'way_' +'chen_' +'ive_' +'), _' +'% _' +' „_' +'0_' +'unter_' +'had_' +'ent_' +'" _' +'use_' +'T' +'S_' +'States_' +'C' +'w_' +'ry_' +'x_' +'them_' +'nd_' +'economic_' +'6_' +'eines_' +'well_' +'ty_' +'Herr_' +'d' +'me_' +'Er' +'da_' +'M' +'ischen_' +'K' +'diesem_' +'7_' +'need_' +'my_' +'da' +'ein' +'f_' +'zwischen_' +'years_' +'political_' +'Ab' +'(_' +'ions_' +'her_' +'between_' +'ar_' +'alle_' +'over_' +'hotel_' +'first_' +'gegen_' +'work_' +'che_' +'bis_' +'lichen_' +'even_' +'make_' +'policy_' +'N' +'two_' +'could_' +'L' +'muss_' +'anderen_' +'Di' +'Parliament_' +'9_' +'ting_' +'Ta' +'where_' +'keine_' +'hen_' +'ons_' +'ss_' +'ally_' +'system_' +'may_' +'ren_' +'sa' +'ern_' +'iert_' +'important_' +'ben_' +'Council_' +'gibt_' +'gi' +'heit_' +'ck_' +' _' +'ro' +'report_' +'Präsident_' +'just_' +'her' +'Europäischen_' +'Europa_' +'because_' +'If_' +'4' +'those_' +'An' +'U' +'R' +'gel' +'La' +'support_' +'do' +'ieren_' +'rt_' +'igen_' +'B_' +'z' +'nt_' +'immer_' +'ho' +'take_' +'“ _' +'vom_' +'seine_' +'pro' +'Bo' +'el_' +'dies_' +'sowie_' +'end_' +'Be' +'hi' +'liche_' +'country_' +'H' +'” _' +'year_' +'much_' +'k' +'W' +'C_' +'Wenn_' +'P_' +'dieses_' +'ange' +'ted_' +'government_' +'Member_' +'ke_' +'du' +'Lo' +'w' +'after_' +'own_' +'made_' +'u_' +'ments_' +'te' +'schen_' +'Ver' +'0' +'unter' +'ra' +'möchte_' +'ab' +'D_' +'market_' +'being_' +'ity_' +'ance_' +'To' +'' _' +'ru' +'right_' +'public_' +'long_' +'ate_' +'Welt_' +'Al' +'Un' +'sondern_' +'sen_' +'lu' +'ja' +'ors_' +'Zeit_' +'ds_' +'Menschen_' +'Jahren_' +'th_' +'international_' +'5' +'pa' +'ci' +'Ha' +':_' +'good_' +'cht_' +'how_' +'Le' +'financial_' +'b' +'ausge' +'na' +'ihrer_' +'Se' +'diesen_' +'USA_' +'wurden_' +'ke' +'ert_' +'en' +'andere_' +'For_' +'ve' +'Po' +'s' +'ic_' +'eu' +'la_' +'6' +'sti' +'age_' +'part_' +'len_' +'Diese_' +'same_' +'der' +'Li' +'Jahr_' +'De' +'As_' +'ls_' +'tri' +'no' +'Ne' +'both_' +'cy_' +'V' +'ür' +'t' +'ken_' +'information_' +'bar_' +'to' +'sten_' +'last_' +'against_' +'Ro' +'Länder_' +'through_' +'lo' +'ations_' +'Da' +'10_' +'über' +'ur' +'um' +'ien_' +'as' +'Do' +'8' +'rn_' +'over' +'Bericht_' +'unsere_' +'ri' +'keit_' +'global_' +'20_' +'Über' +'zu' +'i' +'et' +'dann_' +'aus' +'ig_' +'used_' +'nden_' +'Ber' +'2' +'würde_' +'gen' +'then_' +'ss' +'sollte_' +'king_' +'eri' +'ent' +'00_' +' [[_' +'national_' +'There_' +'too_' +'jedoch_' +'hier_' +'un' +'high_' +'does_' +'T_' +'mar' +'auf' +'ar' +'che' +'ba' +'Wi' +'damit_' +'Im_' +'seiner_' +'sch_' +'order_' +'or' +'less_' +'heute_' +'а' +'ng' +'ful_' +'ca' +'b_' +'Sa' +'ut' +'ta' +'men_' +'3' +'United_' +'O_' +'Ein' +'under_' +'sen' +'ed' +'Und_' +'är' +'social_' +'l' +'Fa' +'Ar' +'sche_' +'a' +'ische_' +'ia_' +'fen_' +'bar' +'n' +'In' +'go_' +'7' +'.' +'still_' +'m' +'growth_' +'eb' +'E_' +'example_' +'Ma' +'9' +'g' +'day_' +'al' +'ö' +'sp' +'ning_' +'ris' +'les_' +'inter' +'é' +'so' +'europäischen_' +'sta' +'see_' +'power_' +'neue_' +'ohne_' +'nen_' +'free_' +'Parlament_' +'Land_' +'Ba' +'rights_' +'nn' +'ner_' +'ha' +'That_' +'Mitgliedstaaten_' +'Mar' +'number_' +'ce' +'place_' +'nde_' +'könnte_' +'development_' +'ck' +'area_' +'And_' +'kommen_' +'Her' +'within_' +'while_' +'fact_' +'course_' +' | _' +'view_' +'point_' +'hr' +'before_' +'ac' +'ter' +'bereits_' +'Co' +'os_' +'op' +'per_' +'h' +'Frage_' +'’ _' +'ä' +'uf' +'room_' +'neuen_' +'se' +'ft_' +'come_' +'ad' +'30_' +'possible_' +'denen_' +'Unter' +'Entwicklung_' +'Ein_' +'Bi' +'199' +'tt' +'selbst_' +'aufge' +'wieder_' +'up' +'ma' +'he' +'far_' +'Mo' +'Aus' +'economy_' +'Auf' +'want_' +'set_' +'here_' +'gu' +'future_' +'sollten_' +'ks_' +'ger' +'ta_' +'stellen_' +'rec' +'od' +'ni' +'f' +'M_' +'wo_' +'tly_' +'mp' +'land_' +'es' +'bi' +'You_' +'nehmen_' +'iti' +'zwei_' +'sh_' +'dazu_' +'R_' +'Europäische_' +'under' +'ina' +'down_' +'..._' +'ti' +'say_' +'ens' +'con' +'Regierung_' +'Pa' +'ste' +'Vor' +'tra' +'pri' +'great_' +'already_' +'without_' +'red_' +'out' +'gra' +'gesch' +'liegt_' +'Zu' +'Aber_' +'ör' +'ure_' +'ht_' +'fi' +'put_' +'mo' +'il_' +'di' +'men' +'lassen_' +'large_' +'So' +'No' +'! _' +'während_' +'para' +'ous_' +'nce_' +'machen_' +'human_' +'Me' +'Go' +'ll_' +'bo' +'Sta' +'Mu' +'Man' +'000_' +'weil_' +'problem_' +'ko' +'е' +'today_' +'jetzt_' +'ihren_' +'ver' +'nd' +'ine_' +'igkeit_' +'ige_' +'F_' +'versch' +'line_' +'ihr_' +'go' +'get_' +'So_' +'15_' +'ig' +'ie_' +'hl' +'el' +'ban' +'Wa' +'Re' +'Ad' +'wäre_' +'therefore_' +'cannot_' +'believe_' +'service_' +'le' +'fa' +'den' +'Welt' +' '_' +'teil' +'sk' +'san' +'pi' +'is' +'finden_' +'We' +'Je' +'Fe' +'st' +'since_' +'ok' +'gef' +'Lage_' +'Ja' +'Finanz' +'Du' +'ssen_' +'fe' +'state_' +'re' +'Ge' +'crisis_' +'X' +'Ra' +'Ni' +'Bei' +'201' +'ves_' +'three_' +'ine' +'gs_' +'ger_' +'fort' +'er' +'c' +'G_' +'val' +'po' +'pen' +'ness_' +'fully_' +'find_' +'few_' +'change_' +'cal_' +'ary_' +'America_' +'u' +'si' +'inc' +'geht_' +'eren_' +'ch' +'bel' +'Ti' +'N_' +'Ländern_' +'Ka' +'viele_' +'net' +'na_' +'Pi' +'ßen_' +'us' +'ten' +'services_' +'process_' +'lt_' +'ki' +'issue_' +'help_' +'Unternehmen_' +'Jo' +'trade_' +'ori' +'it' +'including_' +'enden_' +'available_' +'Car' +'und' +'sit' +'politik_' +'dig' +'allen_' +')' +'tions_' +'sol' +'pol' +'level_' +'dis' +'case_' +'ult' +'p' +'cha' +'Maßnahmen_' +'uti' +'rk' +'agen_' +'real_' +'ne' +'know_' +'einige_' +'ber' +'Vi' +'Eine_' +'50_' +'viel_' +'que' +'om' +'means_' +'de' +'darauf_' +'bu' +'Fi' +'una' +'politischen_' +'o' +'denn_' +'dafür_' +'clear_' +'ät_' +'tru' +'rate_' +'next_' +'mich_' +'gest' +'different_' +'di_' +'city_' +'Pro' +'Jahre_' +'seit_' +'que_' +'car' +'areas_' +'Fu' +'ü' +'ps_' +'ngen_' +'mm' +'kan' +'ht' +'ex' +'em_' +'ea' +'sto' +'geben_' +'e' +'bes' +'and' +'о' +'   _' +'ster' +'problems_' +'par' +'gut_' +'When_' +'Staaten_' +'Rechts' +'Nach' +'L_' +'wa' +'stre' +'ran' +'ler_' +'health_' +'Politik_' +'ug' +'ou' +'ir' +'form_' +'best_' +'Z' +'Na' +'Mit_' +'Ihnen_' +'Gr' +'н' +'ssi' +'spa' +'sch' +'ret' +'mus' +'making_' +'led_' +'however_' +'better_' +'allem_' +'Su' +'Spe' +'Bel' +'org' +'nis' +'hin' +'ganz_' +'est_' +'ß' +'why_' +'tan' +'page_' +'ord' +'mit' +'lis' +'be' +'Mi' +'ms_' +'lly_' +'habe_' +'ua' +'tes_' +'on' +'ip' +'ings_' +'Frau_' +'Arbeits' +'tu' +'su' +'sicher' +'kan_' +'je' +'in' +'doch_' +'ding_' +'Dies_' +'zug' +'ul' +'ant_' +'Inter' +'ze_' +'politische_' +'hol' +'erung_' +'energy_' +'ely_' +'during_' +'ak' +'zi' +'mor' +'become_' +'They_' +'v_' +'sur' +'sin' +'rt' +'ren' +'pre' +'ner' +'back_' +'ari' +'access_' +'W_' +'ye' +'sehen_' +'ma_' +'Japan_' +'2009_' +'. ' +'waren_' +'ur_' +'unserer_' +'tung_' +'ob_' +'nt' +'nach' +'j' +'hin_' +'führen_' +'etwas_' +'daß_' +'cht' +'Committee_' +'Cha' +'ya' +'war' +'taken_' +'man' +'durch' +'current_' +'ara' +'At' +'yl' +'wollen_' +'v' +'ura' +'ual_' +'qui' +'question_' +'pass' +'ling_' +'au' +'aff' +'yang_' +'inde' +'ile' +'To_' +'Si' +'Energie' +'Bar' +'tro' +'particular_' +'har' +'ence_' +'action_' +']]' +'18' +'small_' +'schaft_' +'markets_' +'lic' +'gt_' +'enti' +'did_' +'com_' +'ag' +'Ru' +'Rat_' +'Lu' +'Kont' +'Ho' +'vera' +'tä' +'ins' +'fre' +'co' +'These_' +'Israel_' +'x' +'vor' +'tig' +'think_' +'ise' +'eg' +'based_' +'anti' +'vi' +'ts' +'interest_' +'des' +'debate_' +'common_' +'beim_' +'Kon' +'Gi' +'Ges' +'Doch_' +'Commissioner_' +'mag' +'letzten_' +'again_' +'Is' +'Am' +'т' +'äu' +'nun_' +'ie' +'geb' +'bietet_' +'betr' +'bef' +'Tre' +'Germany_' +'At_' +'tz' +'security_' +'schon_' +'ppe' +'measures_' +'enc' +'each_' +'business_' +'ated_' +'Zimmer_' +'Bre' +'quality_' +'pe_' +'offers_' +'nes_' +'military_' +'eli' +'bre' +'Spa' +'sho' +'rn' +'rl' +'per' +'lten_' +'ging_' +'bur' +'Zeit' +'Wie_' +'Te' +'Auto' +'win' +'seinen_' +'möglich_' +'ite' +'im' +'Pe' +'Alt' +'y' +'stri' +'ra_' +'non_' +'major_' +'lä' +'la' +'ität_' +'erhalten_' +'eh' +'act' +'Ihre_' +'12_' +'ß_' +'tic' +'stay_' +'og' +'halten_' +'end' +'bew' +'another_' +'alt' +'Ko' +'Bereich_' +'Arbeit_' +'vo' +'tzt_' +'tin' +'situation_' +'multi' +'gem' +'bility_' +'Iran_' +'|_' +'rm' +'provide_' +'mu' +'ins_' +'dan_' +'Ende_' +'11_' +'result_' +'rather_' +'position_' +'nta' +'ns' +'ngs' +'main_' +'law_' +'lar' +'ert' +'ear' +'continue_' +'citizens_' +'said_' +'read' +'nts_' +'ll' +'kra' +'ers' +'ende_' +'Auss' +'wo' +'nit' +'mis' +'might_' +'kti' +'ier' +'ces_' +'ass' +'House_' +'un_' +'term_' +'steht_' +'ste_' +'oo' +'nst' +'nahme_' +'mer' +'ld' +'icher' +'hou' +'ehen_' +'ab_' +'American_' +'2008_' +'Ä' +'ym' +'worden_' +'recent_' +'ot' +'ory_' +'given_' +'ersch' +'ens_' +'Unterstützung_' +'San' +'Internet_' +'Euro_' +'whether_' +'using_' +'uns' +'systems_' +'stra' +'ph' +'ici' +'every_' +'ef' +'ct_' +'bet' +'V_' +'Ri' +'Probleme_' +'Nu' +'Gen' +'xi' +'tun_' +'pers' +'ol' +'nk' +'mon' +'ft' +'ess' +'einge' +'dec' +'bin_' +'außer' +'En' +'2000_' +'nl' +'mas' +'il' +'hm' +'halt' +'give_' +'for' +'ersten_' +'einfach_' +'ee' +'called_' +'ans_' +'Sc' +'El' +'trans' +'ses_' +'life_' +'ju' +'governments_' +'ents_' +'enter' +'debt_' +'Teil_' +'J' +'Auch_' +'ück' +'ze' +'stor' +'pt' +'pan' +'ons' +'ill' +'gri' +'ght_' +'führt_' +'disa' +'besteht_' +'Problem_' +'Gu' +'rooms_' +'proposal_' +'pe' +'needs_' +'hu' +'hei' +'anzu' +'Sol' +'Lin' +'Kü' +'Bank_' +'á' +'private_' +'ond' +'ler' +'iss' +'eigenen_' +'ei' +'budget_' +'Sch' +'Herrn_' +'р' +'weniger_' +'rü' +'rä' +'institutions_' +'ierung_' +'ial_' +'full_' +'ele' +'Schwe' +'Scha' +'On_' +'New_' +'For' +'Fall_' +'vis' +'rig' +'mir_' +'li' +'fest' +'fas' +'bli' +'ap' +'Y' +'Ob' +'Ga' +'Chi' +'vote_' +'verb' +'terms_' +'issues_' +'ec' +'du_' +'away_' +'Wei' +'Son' +'Nach_' +'än' +'xe' +'vers' +'rr' +'me' +'little_' +'least_' +'hope_' +'further_' +'erm' +'data_' +'around_' +'an' +'always_' +'Ziel_' +'Wirtschafts' +'Sicherheit_' +'Poli' +'Des' +'Als_' +'üt' +'off_' +'million_' +'kel' +'ian_' +'großen_' +'fast_' +'ain' +'adv' +'Verg' +'France_' +'spiel' +'sma' +'rea' +'oni' +'hy' +'era' +'Ste' +'Russia_' +'Rolle_' +'Che' +'üb' +'äh' +'ver_' +'tat' +'ring_' +'leg' +'kt_' +'ise_' +'insbesondere_' +'if' +'hatte_' +'fel' +'close_' +'World_' +'zurück' +'ys_' +'ud' +'tte_' +'tre' +'tom' +'sts_' +'open_' +'nis_' +'lit' +'group_' +'ere' +'deren_' +'bri' +']] _' +'With_' +'U_' +'Medi' +'Art_' +'All_' +', ' +'zwar_' +'sagen_' +'policies_' +'oa' +'mb' +'legal_' +'aren' +'ali' +'Ir' +'wind' +'we' +'ut_' +'nge' +'located_' +'link' +'ja_' +'int' +'inform' +'id' +'hen' +'eti' +'System_' +'Gar' +'Ex' +'Bürger_' +'zer' +'wis' +'unge' +'ther' +'order' +'necessary_' +'money_' +'lus' +'ben' +'What_' +'Tra' +'Namen_' +'Char' +'Ch' +'Air' +'é_' +'weit_' +'tze' +'stehen_' +'risk_' +'pp' +'investment_' +'foreign_' +'ero' +'drei_' +'bor' +'ana' +'allerdings_' +'age' +'Zusammenarbeit_' +'Wirtschaft_' +'Viel' +'He_' +'Gef' +'Fo' +'CO' +'Bedeutung_' +'2005_' +'women_' +'ving_' +'stellt_' +'rst' +'role_' +'person' +'oren_' +'itu' +'ita' +'große_' +'disc' +'chl' +'bringen_' +'ang_' +'Was_' +'Rück' +'Informationen_' +'Haupt' +'Cr' +'würden_' +'weiter_' +'wal' +'sec' +'schi' +'old_' +'nu' +'hold' +'exist' +'err' +'certain_' +'Par' +'Op' +'Lebens' +'2006_' +'zen_' +'writ' +'sed_' +'len' +'etwa_' +'einmal_' +'done_' +'dessen_' +'bs' +'ani' +'ago_' +'Wo' +'Verl' +'German_' +'Dis' +'Bra' +'verk' +'uni' +'une' +'start_' +'sector_' +'not' +'nb' +'going_' +'ges_' +'fla' +'conf' +'chr' +'agreement_' +'Zi' +'Ke' +'Hin' +'2007_' +'и' +'zahl' +'rules_' +'pat' +'ow_' +'ort_' +'increase_' +'ild' +'ihnen_' +'ien' +'expect' +'ere_' +'companies_' +'basis_' +'asi' +'app' +'Steuer' +'ying_' +'wel' +'unseren_' +'tou' +'ski' +'pro_' +'particularly_' +'oll' +'members_' +'kn' +'ess_' +'einzu' +'cu' +'create_' +'co_' +'century_' +'all' +'Sy' +'Per' +'Entwicklungs' +'Au' +'40_' +'25_' +'öffentlichen_' +'ute_' +'rates_' +'our' +'mes_' +'lt' +'local_' +'gehen_' +'ga' +'eth' +'erreichen_' +'cut' +'ct' +'ati' +'art' +'Vie' +'Va' +'Pal' +'Er_' +'Deutschland_' +'Cu' +'Can' +'Bei_' +'Ans' +'.”_' +'vie' +'stand' +'second_' +'quen' +'once_' +'oc' +'llen_' +'gh' +'fic' +'ffen_' +'eni' +'davon_' +'allow' +'Red' +'Neu' +'1' +'ы' +'yet_' +'west' +'vert' +'ture_' +'tor' +'together_' +'rel' +'onis' +'los' +'ku' +'io' +'him_' +'han' +'ever_' +'ect' +'dort_' +'cri' +'cke' +'Bea' +'tis' +'spo' +'soll_' +'ras' +'others_' +'one' +'mat' +'mal' +'könnten_' +'hoch' +'head' +'ffe' +'ded_' +'beh' +'Mit' +'Men' +'Har' +'Ele' +'ühr' +'ute' +'uss_' +'trag' +'ties_' +'staff_' +'setzen_' +'sent' +'sam' +'rac' +'port' +'mittel' +'lle' +'ka' +'hip_' +'fan' +'ez' +'dra' +'\u_' +'Ur' +'Or' +'Für_' +'Abs' +'"_' +' & _' +'с' +'uri' +'tw' +'serve' +'sche' +'needed_' +'meine' +'ite_' +'invest' +'hä' +'ensure_' +'early_' +'del' +'dan' +'dabei_' +'control_' +'conditions_' +'chi' +'book' +'ast' +'Schul' +'Millionen_' +'Community_' +'Auf_' +'2001_' +'weitere_' +'ui' +'though_' +'sou' +'sha' +'rde' +'pf' +'park' +'often_' +'location_' +'ing' +'gro' +'establish' +'bei' +'ate' +'alles_' +'alis' +'Kur' +'Gl' +'Ger' +'ön' +'zen' +'wissen_' +'wer' +'vote' +'sy' +'short_' +'ria' +'price_' +'ologi' +'meisten_' +'kt' +'isti' +'ions' +'inv' +'express' +'especially_' +'erh' +'cho' +'aid_' +'ade' +'Zug' +'September_' +'He' +'Gra' +'Gesch' +'Fragen_' +'Dar' +'wohl_' +'weise_' +'verst' +'sse' +'lei' +'kosten' +'hren_' +'fer' +'central_' +'Weise_' +'Weg_' +'One_' +'Ju' +'19' +') ' +'rd_' +'ps' +'os' +'oli' +'key_' +'kein_' +'itself_' +'isch_' +'greater_' +'geo' +'gegenüber_' +'fund' +'force_' +'cra' +'capital_' +'belie' +'Rahmen_' +'Euro' +'ya_' +'nne' +'never_' +'mme' +'mil' +'mand' +'look_' +'kon' +'kle' +'ket' +'ker_' +'gin' +'face_' +'export' +'eich' +'dre' +'decision_' +'cle' +'auszu' +'appear' +'aller_' +'akt' +'Vers' +'Stre' +'Sk' +'Grund' +'197' +'unc' +'ue' +'sogar_' +'sel' +'scha' +'pal' +'modern_' +'list_' +'ka_' +'ila' +'hea' +'handel' +'genu' +'date_' +'cooperation_' +'ant' +'ale' +'Stra' +'Staats' +'Recht_' +'Ki' +'However_' +' $_' +'zuge' +'zo' +'ys' +'spi' +'nti' +'nder_' +'lli' +'innerhalb_' +'inf' +'gez' +'gar' +'frei' +'costs_' +'cally_' +'bil' +'Vorschlag_' +'RI' +'Q' +'Kan' +'Ins' +'II_' +'Hal' +'welche_' +'water_' +'sure_' +'sb' +'regard' +'oi' +'nie' +'nes' +'ned_' +'name' +'lem' +'late_' +'install' +'hn' +'food_' +'ern' +'conte' +'cit' +'bro' +'alit' +'ala' +'aim' +'ad_' +'Zukunft_' +'Wachstum_' +'Stadt_' +'Kredit' +'Indeed_' +'2004_' +' ..._' +'wide_' +'third_' +'rp' +'rein' +'region_' +'reform_' +'profit' +'od_' +'mut' +'mic' +'mate' +'log' +'lines_' +'lea' +'kat' +'ionen_' +'hard_' +'fish' +'ekt' +'date' +'bank_' +'account_' +'Wer' +'Mittel' +'Industrie' +'Comm' +'Bef' +'Bau' +'vern' +'unt' +'tax_' +'rse' +'rit' +'rest' +'medi' +'leaders_' +'implement' +'ene' +'dia' +'dat' +'cken_' +'bek' +'ard' +'among_' +'Ze' +'UN_' +'The' +'Seite_' +'Erw' +'Beispiel_' +'18_' +'16_' +'100_' +'whole_' +'tal' +'rat' +'prä' +'protection_' +'present_' +'mel' +'low_' +'los_' +'lau' +'isierung_' +'ir_' +'hear' +'eve' +'dar' +'chu' +'ca_' +'bla' +'bis' +'bieten_' +'ator' +'alten_' +'ability_' +'Wh' +'Um_' +'Pla' +'Europas_' +'CA' +'yo' +'won' +'vielen_' +'shall_' +'run' +'nung_' +'nte' +'nig' +'nei' +'meine_' +'kommt_' +'income_' +'ide' +'hervor' +'gang_' +'following_' +'fee' +'excellent_' +'disp' +'demo' +'darüber_' +'children_' +'cer' +'ble' +'banks_' +'ain_' +'Web' +'Sie' +'Pol' +'Form' +'Ca' +'Bed' +'äg' +'wirtschaftlichen_' +'ssion_' +'several_' +'reason_' +'ic' +'having_' +'haus' +'get' +'ellen_' +'ek' +'dit' +'beste' +'Sicherheits' +'Seiten_' +'Klima' +'Cli' +'Cas' +'vil' +'ure' +'the' +'suggest' +'sat' +'progress_' +'ommen_' +'mä' +'mini' +'lat' +'kor' +'kons' +'ib' +'fur' +'dri' +'comm' +'bin' +'back' +'Verb' +'Nor' +'EC' +'Chinese_' +'Bu' +'Ap' +'xt' +'wirklich_' +'umge' +'tät_' +'side_' +'sca' +'regulat' +'really_' +'q' +'nationalen_' +'name_' +'möchten_' +'iff' +'gü' +'flu' +'europäische_' +'este' +'emp' +'ech' +'centre_' +'cel' +'ante' +'Zw' +'X_' +'UN' +'Mos' +'Hä' +'24_' +'was' +'value_' +'turn' +'stand_' +'sign' +'sbe' +'ro_' +'resources_' +'relati' +'ping_' +'nde' +'nahmen_' +'mal_' +'ied_' +'icht_' +'gh_' +'geta' +'function' +'erste' +'erk' +'environment_' +'eig' +'Region_' +'Möglichkeit_' +'K_' +'Gegen' +'%' +'ton_' +'something_' +'sm' +'reve' +'represent' +'radi' +'r' +'ona' +'nat' +'left_' +'ism_' +'internationalen_' +'home_' +'gese' +'gele' +'esti' +'down' +'democratic_' +'const' +'conse' +'blo' +'beg' +'ai' +'Zusammenhang_' +'US' +'Pu' +'Mon' +'Grund_' +'Frauen_' +'Flug' +'Bus' +'2003_' +'– _' +'zeug' +'zeit' +'views_' +'tern_' +'teil_' +'stu' +'sowohl_' +'ration_' +'ol_' +'include_' +'gs' +'größere' +'freie' +'dro' +'democracy_' +'daher_' +'Schl' +'Haushalts' +'Bild' +'working_' +'weiterhin_' +'unf' +'unbe' +'thr' +'super' +'states_' +'she' +'rou' +'production_' +'macht_' +'lan' +'kin' +'jo' +'icht' +'getr' +'fu' +'erg' +'emb' +'ehr' +'drive' +'def' +'comes_' +'com' +'ans' +'air_' +'Bush_' +'Ac' +'werk' +'upt' +'ster_' +'standards_' +'son_' +'single_' +'sei' +'regard_' +'play' +'ov' +'ogen' +'nds_' +'longer_' +'kun' +'ik' +'groß' +'gar_' +'entr' +'efforts_' +'bring' +'at' +'arbeiten_' +'ament' +'Süd' +'Rec' +'Mrs_' +'Kom' +'India_' +'Hu' +'Hi' +'Frei' +'Ea' +'14_' +''' +'"' +'л' +'üsse' +'wei' +'until_' +'tol' +'tic_' +'taking_' +'sal' +'ress' +'rep' +'poor_' +'play_' +'matter_' +'lin' +'lf' +'ind' +'hand_' +'hal' +'gek' +'fall_' +'dru' +'dep' +'compl' +'beschr' +'am' +'abge' +'Nicht' +'French_' +'Cont' +'Cam' +'Bri' +'20' +'д' +'äng' +'wish_' +'user_' +'unit' +'ue_' +'tional' +'tele' +'tch' +'stimm' +'speci' +'son' +'society_' +'sion_' +'sf' +'ser' +'sco' +'rif' +'reb' +'operation' +'nder' +'nda' +'mi' +'indem_' +'ik_' +'hil' +'happen' +'gie' +'exp' +'est' +'elle_' +'ede' +'col' +'cc' +'attack' +'ating_' +'anz' +'agree' +'Vo' +'Mor' +'60_' +'2002_' +'öl' +'wä' +'wirtschaftliche_' +'unterstützen_' +'th' +'temp' +'swe' +'sun' +'ssel' +'spe' +'sis' +'sing_' +'rar' +'press' +'partners' +'onen_' +'offer_' +'nomi' +'nk_' +'member_' +'likely_' +'let' +'lag' +'ion' +'ili' +'hte' +'gne' +'coa' +'cla' +'but' +'body_' +'Star' +'Rei' +'Pf' +'Os' +'No_' +'Kor' +'Imp' +'Group_' +'Central_' +'zus' +'yn' +'vors' +'uss' +'tell' +'sn' +'rd' +'period_' +'ow' +'ont' +'nz' +'meiner_' +'ls' +'lig' +'ick' +'hör' +'gre' +'gentlemen_' +'erz' +'entfernt_' +'difficult_' +'detail' +'dera' +'cur' +'chs' +'bou' +'bem' +'beiden_' +'ah' +'ack' +'Text' +'Kinder_' +'IN' +'Gäste' +'Groß' +'Gesetz' +'Den' +'/' +'äte' +'äche' +'zwei' +'zusammen' +'verf' +'target' +'reli' +'prices_' +'nä' +'min' +'lung_' +'ischer_' +'house_' +'gener' +'geh' +'fra' +'fiscal_' +'entw' +'claim' +'besonders_' +'approach_' +'Zust' +'Sto' +'Selbst' +'Res' +'Projekt' +'Parlaments_' +'Mittel_' +'Min' +'Kla' +'Hol' +'übern' +'zw' +'verschiedenen_' +'usi' +'ub' +'tzen_' +'sm_' +'sect' +'results_' +'respect_' +'research_' +'reich' +'rati' +'rage_' +'light_' +'leistung' +'land' +'ire' +'industry_' +'ih' +'firm' +'ete' +'erb' +'enta' +'developing_' +'cost_' +'Sub' +'Pat' +'Lei' +'Iraq_' +'International_' +'Fra' +'Dollar_' +' % _' +'wor' +'weis' +'vorge' +'vat' +'talk' +'special_' +'site_' +'seek' +'rid' +'resolution_' +'rei' +'ort' +'onal' +'mea' +'management_' +'lif' +'let_' +'leich' +'jun' +'ian' +'gep' +'follow' +'enk' +'eil' +'ds' +'dly_' +'created_' +'coming_' +'ber_' +'bea' +'arm' +'Tru' +'State_' +'Kar' +'Investitionen_' +'Ent' +'Dan' +'Buch' +'ätze_' +'vision_' +'vel' +'uld' +'uc' +'transport_' +'sub' +'star' +'self_' +'reiche' +'pt_' +'pose' +'pha' +'ot_' +'nü' +'mid' +'menta' +'manage' +'interests_' +'id_' +'ia' +'gat' +'enough_' +'ema' +'can' +'breakfast_' +'bol' +'best' +'ank' +'amm' +'Will' +'Wasser' +'System' +'Handels' +'Gleich' +'Geschäfts' +'Bro' +'Best' +'Ben' +'zz' +'wichtig_' +'wenig_' +'volle' +'ven_' +'ven' +'uer' +'suc' +'rze' +'rise_' +'punkt_' +'proposals_' +'pie' +'parties_' +'nichts_' +'miss' +'kr' +'ives_' +'indi' +'ihm_' +'igung_' +'ground_' +'gew' +'fe_' +'experience_' +'etzen_' +'ete_' +'etc_' +'dea' +'cul' +'cover' +'cher' +'bit' +'almost_' +'ak_' +'Vereinigten_' +'Obama_' +'Leben_' +'Fest' +'Ei' +'wider' +'use' +'sst' +'remain_' +'reflect' +'operat' +'ong' +'num' +'nen' +'near_' +'kte_' +'jene' +'ish_' +'inco' +'general_' +'family_' +'erste_' +'ena' +'deck' +'compa' +'cal' +'aufgrund_' +'arbeite' +'address_' +'Ziele_' +'Wor' +'OR' +'Fre' +'Fraktion_' +'Daten_' +'Ant' +'&_' +'ul_' +'table_' +'ship_' +'programme_' +'prof' +'products_' +'ora' +'merk' +'licher_' +'kü' +'ihn_' +'iden' +'ibe' +'history_' +'hinaus_' +'ged_' +'ftig' +'forward_' +'exc' +'erkenn' +'easy_' +'comp' +'cause_' +'call_' +'building_' +'billion_' +'anst' +'Wä' +'VI' +'Service_' +'Richtlinie_' +'Rats' +'Präsident' +'Privat' +'Ol' +'ME' +'Krise_' +'Grundlage_' +'19_' +'zun' +'zed_' +'za' +'ungsp' +'thus_' +'things_' +'show_' +'sei_' +'see' +'ron' +'questions_' +'ption_' +'port_' +'nuclear_' +'nommen_' +'mos' +'metr' +'known_' +'kleine' +'ilit' +'hes' +'fol' +'fle' +'feel_' +'extra' +'eva' +'erhöh' +'darin_' +'cor' +'chtig' +'beha' +'ase_' +'ame' +'ach' +'abs' +'Thema_' +'Sozial' +'Russland_' +'Regierungs' +'Regierungen_' +'NE' +'Kollegen_' +'Chance' +'''' +'vention' +'towards_' +'technology_' +'tar' +'swi' +'stati' +'spr' +'seinem_' +'schwer' +'sau' +'rs' +'rie' +'reas' +'ore' +'ml' +'maintain' +'lichkeit_' +'iz' +'imi' +'hold_' +'hat' +'gewäh' +'easi' +'dor' +'concern' +'cli' +'break' +'befindet_' +'Verfügung_' +'Tha' +'Standard' +'Sho' +'Plan' +'Pen' +'Mari' +'Landes_' +'LA' +'Betr' +'An_' +'wat' +'solution_' +'software_' +'select' +'proposed_' +'produ' +'pris' +'personal_' +'part' +'natürlich_' +'moderne' +'ländern_' +'leading_' +'lang' +'kung' +'keinen_' +'ize_' +'instrument' +'hor' +'graph' +'fäll' +'fin' +'din' +'demokratische' +'demand_' +'ction_' +'cker' +'cing_' +'border' +'berg' +'bas' +'aufzu' +'atz_' +'aba' +'Zusammen' +'War' +'Umwelt' +'Tur' +'Ph' +'Mitglieds' +'Lösung_' +'Lib' +'Leistung' +'Kommissar_' +'Ci' +'è' +'änder' +'zeichnet' +'ws_' +'workers_' +'wer_' +'wegen_' +'weg' +'want' +'version_' +'treat' +'themselves_' +'test' +'staatliche' +'schaffen_' +'roc' +'raum_' +'projects_' +'power' +'positi' +'open' +'months_' +'ji' +'entsch' +'eis' +'eib' +'dic' +'depend' +'cultural_' +'began' +'Sen' +'Programm_' +'Not' +'Macht_' +'Jahrhundert' +'Forschung' +'Eigen' +'Dienst' +'ät' +'ände' +'via_' +'tle' +'seri' +'revi' +'reso' +'res_' +'report' +'rema' +'regional_' +'publi' +'programm' +'program_' +'plan_' +'pay_' +'past_' +'mie' +'io_' +'ical_' +'hing_' +'hem' +'govern' +'erw' +'erst' +'enz' +'ende' +'dy' +'dari' +'cies_' +'chte_' +'bio' +'betrachte' +'ask_' +'ama' +'adopted_' +'Mont' +'Georg' +'Gemeinschaft_' +'Fer' +'East_' +'By_' +'Ausb' +'21_' +'15' +'ün' +'äre_' +'zentr' +'yc' +'tte' +'strategy_' +'sh' +'prospe' +'liberal' +'lege' +'lead_' +'ihrem_' +'ier_' +'higher_' +'gute_' +'directive_' +'beispielsweise_' +'bb' +'ata' +'ane' +'agree_' +'ag_' +'Stand' +'Natur' +'Mer' +'Man_' +'IS' +'Hand' +'H_' +'As' +'”' +'ziehen_' +'wie' +'welcome_' +'uch' +'tiv' +'tim' +'solid' +'shop' +'setzt_' +'serious_' +'sach' +'red' +'rece' +'rch' +'quar' +'pul' +'points_' +'ost' +'nci' +'nar' +'minist' +'levels_' +'lang_' +'import' +'ient_' +'gilt_' +'expan' +'ente' +'eng' +'empf' +'either_' +'eign' +'effective_' +'ee_' +'education_' +'chan' +'art_' +'anges' +'allow_' +'af' +'acht_' +'above_' +'Wal' +'Um' +'Transp' +'Trad' +'Situation_' +'Rea' +'Nord' +'Nachf' +'Met' +'Maß' +'Kultur' +'Bru' +'After_' +'198' +'zin' +'wirk' +'vielleicht_' +'verbesser' +'various_' +'town_' +'substan' +'stan' +'spen' +'specific_' +'sicher_' +'seite' +'seen_' +'schnell_' +'ry' +'related_' +'reduc' +'pun' +'plu' +'mt' +'länder_' +'lie' +'leader' +'lands_' +'kit' +'investi' +'ines' +'hed_' +'gm' +'gesp' +'gan_' +'freedom_' +'found_' +'farm' +'eten_' +'erv' +'erst_' +'deal_' +'cre' +'corr' +'broad' +'bleiben_' +'besser_' +'bedeutet_' +'av' +'anc' +'affect' +'TE' +'South_' +'Pra' +'Institutionen_' +'Indi' +'Aufs' +'Anst' +' -' +'zip' +'wand' +'van' +'urs' +'tum' +'tragen_' +'tm' +'spezi' +'sli' +'seve' +'rapporteur_' +'prec' +'ote_' +'ordnung_' +'nge_' +'mem' +'ly' +'iger_' +'hand' +'gk' +'erle' +'erforder' +'enjoy_' +'eld' +'eins' +'einges' +'economies_' +'ebenfalls_' +'direkt_' +'conven' +'chten_' +'cast' +'bilde' +'authorities_' +'ah_' +'Win' +'Technologie' +'Such' +'Sam' +'November_' +'Meinung_' +'Geb' +'Even_' +'Cor' +'Ansicht_' +'=' +'10' +'. _' +'änge' +'äl' +'à_' +'zuf' +'zeitig' +'whe' +'tn' +'ther_' +'suffer' +'stal' +'ssion' +'schw' +'rte' +'ress_' +'repr' +'remains_' +'reforms_' +'rag' +'provided_' +'pin' +'opportunity_' +'ntr' +'nten_' +'mot' +'ladies_' +'kind_' +'ized_' +'iv' +'iso' +'ismus_' +'ische' +'impact_' +'gung' +'groups_' +'gefähr' +'framework_' +'fern' +'equip' +'due_' +'document' +'cru' +'big_' +'big' +'bez' +'automati' +'atic' +'ater' +'arbeit' +'alte' +'ach_' +'accept' +'Verein' +'Schutz_' +'Qua' +'Markt' +'Kam' +'Fahr' +'Berei' +'And' +'Agen' +'works_' +'ways_' +'versi' +'univers' +'uh' +'tt_' +'top_' +'tit' +'strong_' +'simply_' +'significant_' +'sicht' +'shi' +'rus' +'rung_' +'rsch' +'ral_' +'quite_' +'polic' +'party_' +'organis' +'omi' +'oh' +'obe' +'nh' +'nan' +'lige' +'language_' +'imp' +'ily_' +'gliche' +'gestellt_' +'frag' +'fie' +'explo' +'erie' +'eigene' +'distri' +'deep' +'dd' +'cour' +'concerns_' +'climate_' +'cation' +'bie' +'besondere' +'benötig' +'behalf_' +'ausges' +'atu' +'ang' +'abzu' +'Restaurant_' +'Kosten_' +'Em' +'Dieser_' +'Da_' +'Cons' +'Bla' +'Berichte' +'All' +'Africa_' +'öse' +'ätz' +'Ö' +' ' +'zust' +'wahr' +'verd' +'verbr' +'uk' +'ttel' +'travel' +'tier' +'tial_' +'tax' +'seh' +'road_' +'rin' +'rekt' +'refer' +'record' +'rechts' +'presen' +'pea' +'pages_' +'office' +'offen_' +'nisse_' +'mmer_' +'mine' +'method' +'mer_' +'mass' +'makes_' +'liche' +'kont' +'ieb' +'globalen_' +'geri' +'essen' +'erl' +'einzige' +'direct' +'did' +'cat' +'bera' +'attempt' +'ates_' +'angeb' +'Str' +'Rom' +'RE' +'Produkt' +'Our_' +'Lab' +'Ku' +'Komp' +'Hilfe_' +'Fri' +'Einig' +'Einf' +'Dr' +'Demokratie_' +'DS' +'Bud' +'Ban' +'Ang' +'2010_' +'00' +'“_' +'ändig' +'wit' +'wi' +'upon_' +'uct' +'tor_' +'summ' +'suit' +'si_' +'she_' +'seems_' +'rz' +'ros' +'rma' +'rese' +'relations_' +'rechte' +'ppen_' +'pleas' +'oph' +'online_' +'nischen_' +'nal' +'lü' +'loc' +'keiten_' +'join' +'izie' +'inh' +'ile_' +'hers' +'grant' +'gr' +'ff_' +'ew' +'ement_' +'ell_' +'efficien' +'effect_' +'economi' +'eben_' +'ean_' +'cro' +'bra' +'bank' +'any' +'Treaty_' +'Of_' +'Menschenrechte_' +'Kre' +'Form_' +'Ed' +'DE' +'Con' +'Anf' +';_' +'80_' +'*' +')_' +'м' +'Änderung' +'zy' +'zurück_' +'ziert' +'yr' +'wen' +'verwe' +'va' +'unk' +'tung' +'true_' +'total_' +'tliche' +'subject_' +'sse_' +'sprechen_' +'schutz' +'scher_' +'safety_' +'rung' +'rh' +'prop' +'parliament' +'nutzen_' +'mount' +'loa' +'lk' +'limit' +'learn' +'later_' +'lant' +'lab' +'kli' +'jeder_' +'ini' +'ierte_' +'growing_' +'ght' +'förder' +'every' +'erfolgreich' +'enen_' +'dge' +'cou' +'chn' +'char' +'cas' +'bezieh' +'betre' +'bestimmte' +'appropriate_' +'ances_' +'amendments_' +'ail' +'abges' +'West_' +'Verm' +'Ref' +'Minister_' +'Lis' +'Kr' +'Ham' +'Geschichte_' +'Fort' +'Einw' +'Col' +'Alle_' +',' +'(' +'öhn' +'zeigt_' +'yp' +'young_' +'weit' +'til' +'tac' +'start' +'sses_' +'solche_' +'ruct' +'recently_' +'real' +'rasch' +'qua' +'protect' +'pli' +'phi' +'offer' +'off' +'nch' +'nce' +'nationale' +'media_' +'lim' +'legt_' +'lect' +'kal' +'irk' +'internationale_' +'intell' +'individual_' +'improv' +'held_' +'harm' +'grat' +'gewi' +'gal' +'gab_' +'format' +'forces_' +'euro_' +'erte_' +'ep' +'environmental_' +'ebenso_' +'dun' +'det' +'daran_' +'concerned_' +'conce' +'combin' +'care_' +'auto' +'aten_' +'archi' +'andere' +'Wieder' +'Wettbewerbs' +'Ukraine_' +'Strategie_' +'Sti' +'Schi' +'Rel' +'NG' +'Mass' +'Madam_' +'Hea' +'Gewi' +'Gesellschaft_' +'Gericht' +'GDP_' +'Fin' +'Erh' +'Dieses_' +'Bes' +'Außen' +'к' +'öt' +'Übers' +'zation_' +'wage' +'umfa' +'transp' +'sym' +'spending_' +'share_' +'rw' +'rre' +'rity_' +'rene' +'priorit' +'positive_' +'peace_' +'path' +'parts_' +'ously_' +'ore_' +'optim' +'opinion_' +'ob' +'ndlung' +'nati' +'mäßig' +'ks' +'kation' +'kam' +'isier' +'illegal' +'ierten_' +'gg' +'gan' +'fri' +'four_' +'eurozone_' +'employment_' +'discuss' +'conver' +'community_' +'ckt' +'cher_' +'changes_' +'attention_' +'air' +'agen' +'[_' +'Woche' +'Tat' +'Some_' +'Rest' +'Rates_' +'Pan' +'Off' +'Milliarden_' +'Meer' +'Jun' +'Hil' +'EUR_' +'Bürger' +'Bad' +'Ass' +'17_' +'überw' +'äs' +'tlich_' +'success_' +'sollen_' +'sie' +'popul' +'pla' +'perat' +'parti' +'pac' +'outside_' +'oun' +'orti' +'ola' +'nte_' +'nse' +'net_' +'nation_' +'mus_' +'maß' +'main' +'liste' +'km_' +'keine' +'jud' +'ize' +'ive' +'improve_' +'iegen' +'ieden' +'idea' +'ica' +'huge_' +'half_' +'gol' +'gewe' +'genau_' +'funds_' +'fragen_' +'field_' +'fail' +'exce' +'etzung_' +'erat' +'eo' +'entst' +'entire' +'endi' +'electr' +'dom_' +'destr' +'dent' +'danger' +'content_' +'cent' +'bru' +'block' +'beda' +'auss' +'ative_' +'ath' +'ann' +'ami' +'ambi' +'ale_' +'addition_' +'act_' +'Western_' +'Sou' +'Sin' +'See' +'Rot' +'Regi' +'Real' +'Miss' +'Kapital' +'Ira' +'ID' +'Fl' +'Bü' +'Bas' +' . _' +'zel' +'traditional_' +'tober_' +'station_' +'stadt_' +'run_' +'rich_' +'post' +'ple' +'passi' +'oil_' +'of' +'nächsten_' +'now' +'nothing_' +'nf' +'mun' +'mee' +'mani' +'legen_' +'leb' +'ition_' +'idea_' +'gemacht_' +'fü' +'fundamental_' +'flexib' +'fal' +'entl' +'eite' +'eid' +'egen_' +'drück' +'draw' +'die' +'deliver' +'compo' +'character' +'bekannt_' +'apartment' +'ache' +'Vorsch' +'Vis' +'Verbraucher' +'Val' +'Tu' +'Schulden' +'Schu' +'Mitte' +'Krieg_' +'Italy_' +'Hotels_' +'Herausforderung' +'Frank' +'Ec' +'Dem' +'Del' +'Bot' +'Beziehungen_' +'Bet' +': ' +'zit' +'zei' +'ws' +'words_' +'verh' +'usual' +'tet_' +'terr' +'tain' +'solche' +'schwierig' +'sc' +'regionale' +'population_' +'pool_' +'player' +'pl' +'orate' +'ole' +'medic' +'lot_' +'legislation_' +'komme' +'iva' +'institution' +'inent' +'ieh' +'high' +'gui' +'genie' +'gene' +'fäh' +'fs' +'exte' +'esp' +'eren' +'else' +'ebe' +'don' +'defi' +'darstell' +'currently_' +'competitive' +'bitte' +'bau_' +'amerikanischen_' +'alist' +'Turkey_' +'Tri' +'Sl' +'Sha' +'Sau' +'Ren' +'Paris_' +'Net' +'Mitglieder_' +'Mat' +'London_' +'Kommiss' +'Ihr_' +'Geld_' +'Führ' +'Frankreich_' +'Foto' +'DE_' +'Comp' +'Bevölkerung_' +'Besuch' +'23_' +'üh' +'wert' +'verle' +'verein' +'ung' +'trä' +'traf' +'trad' +'tool' +'times_' +'thing_' +'thank_' +'stische' +'sim' +'restaurant_' +'required_' +'reich_' +'recht' +'präsident' +'pil' +'photo' +'participat' +'nic' +'lti' +'letzte' +'leide' +'ktur' +'komple' +'inst' +'inge' +'individu' +'indicat' +'heart_' +'hap' +'hab' +'gl' +'gewa' +'gesagt_' +'faci' +'developed_' +'deutlich_' +'days_' +'chaft_' +'car_' +'bzw_' +'bat' +'adi' +'Während_' +'Uns' +'Tages' +'Members_' +'MA' +'Land' +'Isla' +'Genera' +'Et' +'Erd' +'Eins' +'Bur' +'British_' +'Besch' +'70_' +'.._' +'ül' +'Öl' +'züg' +'zig' +'zess' +'weltweite' +'vent' +'ungss' +'tst' +'tive_' +'tie' +'statt_' +'sia' +'sea_' +'schn' +'reta' +'rer' +'prot' +'plat' +'pic' +'new' +'my' +'minutes_' +'mente' +'mehrere' +'material' +'lte_' +'living_' +'line' +'itt' +'insta' +'insi' +'increas' +'immigra' +'hre' +'help' +'hel' +'goal' +'game_' +'flow' +'fit' +'ffer' +'facilities_' +'eta' +'erwei' +'deutsche' +'demand' +'cus' +'beschl' +'att' +'arti' +'aris' +'appro' +'ae' +'actually_' +'acht' +'abe' +'Zugang_' +'UK_' +'Sup' +'Regel' +'Produktion' +'Pac' +'Organisation' +'My' +'Moreover_' +'Let' +'Ide' +'Hei' +'Geld' +'Fern' +'Dienstleistungen_' +'DA' +'Bez' +'Bedingungen_' +'Auswirkungen_' +'Aus_' +'AS' +'35' +'13_' +'"._' +'üge' +'zie' +'zentrale' +'wesentlich' +'vict' +'union_' +'tur' +'transfer' +'tischen_' +'tha' +'text_' +'stü' +'smo' +'sagt' +'rö' +'rne' +'rapid' +'provid' +'product_' +'priv' +'principle_' +'politische' +'person_' +'orm' +'nämlich_' +'model_' +'mati' +'majority_' +'llen' +'lia' +'ktion_' +'jobs_' +'itte' +'intr' +'industrie' +'inder' +'imag' +'ichts' +'hätte_' +'hours_' +'hilfe' +'gte' +'gli' +'fort_' +'erten_' +'erreicht_' +'dist' +'demonstrat' +'control' +'cis' +'certainly_' +'bus_' +'bung_' +'bereit_' +'bed_' +'ausw' +'aue' +'ark' +'applica' +'aner' +'anders' +'ake' +'across_' +'Vorteil' +'Tod' +'Struktur' +'Sit' +'Sim' +'Schw' +'SS' +'Reze' +'Rep' +'Pl' +'Nah' +'MI' +'Lie' +'Gew' +'Gas' +'GE' +'Erfahrung' +'Ce' +'Bewe' +'Amerika' +'ünde' +'äum' +'zung_' +'ziel' +'zeigen_' +'zahlreiche' +'worte' +'wende' +'vorl' +'verg' +'turn_' +'träge' +'surp' +'stärker' +'sge' +'setz' +'rund_' +'rol' +'reserve' +'regist' +'reduce_' +'presiden' +'pres' +'potential_' +'por' +'plo' +'organisation' +'nya_' +'neighbo' +'lös' +'lose_' +'lo_' +'lik' +'lb' +'ktionen_' +'kenn' +'je_' +'ino' +'innen_' +'inflation_' +'indeed_' +'ika' +'igen' +'häng' +'heraus' +'hatten_' +'glaub' +'füh' +'fli' +'fl' +'extrem' +'exchange_' +'except' +'env' +'entsprechende' +'emerge' +'elect' +'einigen_' +'deshalb_' +'cop' +'coo' +'cons' +'colla' +'cases_' +'bt_' +'bleibt_' +'bere' +'benefits_' +'bene' +'batt' +'awa' +'asse' +'anti_' +'Windows_' +'Stelle' +'Sei' +'Schritt_' +'Schluss' +'Pre' +'Office_' +'Nic' +'National_' +'Mä' +'Markt_' +'Greece_' +'Bezug_' +'1999_' +',” _' +'і' +'üg' +'zunehmend' +'zil' +'whose_' +'werte' +'vita' +'unterstützt_' +'unserem_' +'umfassende' +'trotz' +'tend' +'sus' +'stability_' +'stabil' +'sive_' +'similar_' +'sier' +'sense_' +'selb' +'resi' +'reme' +'regulation_' +'range_' +'provision' +'nsch' +'ndet' +'natural_' +'moral' +'mod' +'mittel_' +'mische' +'mere' +'lässt_' +'länger' +'lon' +'lib' +'leite' +'kto' +'keyword_' +'jede' +'interess' +'immediate' +'hs_' +'house' +'historische' +'hafte' +'gemein' +'gebracht_' +'freundlich' +'financ' +'esc' +'erge' +'enb' +'element' +'ei_' +'ege' +'directly_' +'ding' +'ders' +'consider_' +'brauch' +'bereit' +'beautiful_' +'bahn' +'atz' +'appe' +'along_' +'abl' +'Umsetzung_' +'TV_' +'She' +'Scho' +'Sach' +'Menschen' +'Luft' +'Interesse_' +'Instrument' +'Imm' +'Hun' +'His' +'Erfolg_' +'Entscheidung_' +'Durch' +'Bundes' +'Aust' +'Ausl' +'Asia_' +'Aktion' +'Afrika' +'196' +'17' +'01' +'...' +'", _' +'í' +'yt' +'wichtige_' +'vol' +'unden_' +'ulat' +'tionen_' +'tik' +'ters_' +'stet' +'shed_' +'schön' +'schein' +'ruh' +'res' +'regi' +'referen' +'recommend' +'pit' +'package' +'oy' +'ote' +'opposit' +'nze' +'neu' +'ms' +'lle_' +'lern' +'leicht_' +'lation' +'jekt' +'ham' +'gua' +'gerade_' +'gegenwärtig' +'ge' +'ga_' +'ften_' +'fris' +'flo' +'five_' +'erfa' +'elections_' +'eilig' +'eder' +'eas' +'discussion' +'dama' +'contra' +'company_' +'breite' +'besi' +'becom' +'aut' +'are' +'application_' +'analys' +'Versch' +'Verha' +'Spiele' +'Sec' +'Republic' +'Prin' +'OS' +'Liste_' +'LI' +'Kun' +'Ihrer_' +'Haus_' +'Goo' +'Free' +'Far' +'Fac' +'Ev' +'DI' +'Cou' +'Cl' +'Cal' +'Berlin_' +'Bal' +'Ander' +'!' +' : _' +'whi' +'wenige' +'warm' +'vertei' +'ved_' +'understand_' +'ule' +'tter' +'trac' +'ton' +'tast' +'support' +'stic' +'starke' +'soci' +'slow' +'schritt' +'rvati' +'rule_' +'ruf' +'reib' +'pret' +'ple_' +'ones' +'offene' +'nglich' +'minim' +'minat' +'looking_' +'lla' +'liegen_' +'lediglich_' +'kis' +'kehr' +'joy' +'job_' +'hti' +'hn_' +'guide' +'grad' +'geführt_' +'front' +'ernst' +'ence' +'emerging_' +'eit' +'dem' +'deli' +'credi' +'contain' +'comple' +'communication' +'communi' +'bte' +'britische' +'boo' +'bear' +'ausl' +'atische' +'argument' +'amount_' +'ade_' +'Zwei' +'Wu' +'War_' +'Tatsache_' +'Stimme' +'Regul' +'RA' +'Prod' +'Port' +'Personen_' +'Kö' +'Krit' +'Gran' +'Gegens' +'Deutsch' +'April_' +'„_' +'ünft' +'welche' +'vic' +'ust' +'uer_' +'tr' +'sver' +'sup' +'speak' +'sor' +'ska' +'schl' +'rth' +'row' +'rich' +'release' +'rate' +'proce' +'prevent_' +'pekt' +'option' +'opportunities_' +'omm' +'om_' +'nz_' +'nut' +'mul' +'move_' +'mba' +'love' +'lay' +'kurz' +'krieg' +'komp' +'ject' +'item' +'iste' +'involved_' +'hund' +'handelt_' +'gge' +'führung_' +'fen' +'fach' +'experi' +'erse' +'erklärt' +'enn' +'einander' +'dung_' +'divers' +'disk' +'disease' +'dens' +'conflict_' +'clo' +'ches_' +'chaften_' +'center_' +'card_' +'capacity_' +'bring_' +'bevor' +'bad_' +'avoid' +'au_' +'astr' +'ano' +'ander' +'aktiv' +'achieve_' +'While_' +'Werk' +'Vertrag' +'Trans' +'Tag_' +'Sp' +'Schri' +'Reso' +'Prä' +'Portug' +'On' +'Nähe_' +'Muslim' +'Japan' +'January_' +'Institut' +'Geh' +'Fall' +'Ergebnis_' +'Erf' +'Ebene_' +'Debatte_' +'Anz' +'Agr' +'überzeug' +'ört' +'{{_' +'zut' +'wahrscheinlich_' +'ustr' +'ugh' +'uen_' +'uche' +'tz_' +'tti' +'territor' +'subs' +'stell' +'size_' +'sil' +'set' +'schnelle' +'rom' +'rent' +'rem' +'regions_' +'refu' +'rb' +'project_' +'politi' +'plant' +'peri' +'pati' +'osit' +'noti' +'moder' +'meeting_' +'mean_' +'mach' +'mac' +'lange_' +'komm' +'ker' +'ito' +'ient' +'identif' +'hom' +'hard' +'größte' +'gee' +'gas_' +'found' +'former_' +'find' +'festge' +'etzt_' +'eratur' +'elt' +'els_' +'ell' +'eit_' +'eher_' +'dollar' +'connect' +'compr' +'complete_' +'clu' +'cial_' +'benutz' +'baren_' +'balance_' +'assen_' +'arra' +'arme' +'anw' +'akte' +'adopt' +'acco' +'Zahl' +'Vors' +'Raum' +'Mitglied' +'Leg' +'Krieg' +'Kri' +'Kommuni' +'IC' +'Gründe' +'Frühstück' +'Dritte' +'Deshalb_' +'Beitritt' +'Austria' +'12' +'zlich' +'würdig' +'wr' +'vorher' +'violence_' +'verwendet_' +'verhindern_' +'verge' +'uses_' +'unver' +'typi' +'tigen_' +'tab' +'stock' +'stage' +'spielen_' +'some' +'save' +'rweise_' +'rti' +'rge' +'rf' +'response_' +'recogni' +'realis' +'put' +'pte' +'popular_' +'piel' +'passen' +'ose' +'nier' +'near' +'nc' +'nature_' +'moti' +'mobil' +'lier' +'ität' +'irr' +'inn' +'ience_' +'ichtet' +'ial' +'hop' +'hinter' +'heißt_' +'haupt' +'gramm' +'gn' +'focus_' +'findet_' +'fic_' +'ffen' +'favo' +'extensi' +'ehl' +'ega' +'edit' +'dürfen_' +'del_' +'competition_' +'clearly_' +'check_' +'cate' +'bald_' +'add_' +'Wirk' +'Vol' +'Verantwortung_' +'Sinn' +'Ser' +'Second' +'SA' +'Präsidenten_' +'Pri' +'Pres' +'National' +'La_' +'Jahres_' +'Interessen_' +'HI' +'Government_' +'Direct' +'CH' +'Afghanistan_' +' (' +'zähl' +'zeit_' +'willi' +'weak' +'var' +'urc' +'unser_' +'ufe' +'tö' +'trie' +'task' +'statt' +'stat' +'space_' +'show' +'sek' +'scheint_' +'ries_' +'ried' +'richtung' +'richt_' +'regul' +'rbe' +'rais' +'phe' +'oten_' +'ople' +'olu' +'night_' +'nie_' +'ming_' +'mail' +'lte' +'loy' +'ling' +'lichkeiten_' +'lagen_' +'jeden_' +'ium_' +'isten_' +'inten' +'insp' +'increasingly_' +'impe' +'image_' +'ight' +'hst' +'hnt' +'handl' +'halb_' +'großer_' +'gleichzeitig_' +'gemeinsame_' +'fix' +'finanzier' +'features_' +'face' +'existing_' +'everything_' +'event_' +'erba' +'ept' +'ehmen_' +'discover' +'digital' +'counter' +'clean_' +'civil_' +'chen' +'came_' +'bs_' +'befinden_' +'beach' +'anden_' +'alli' +'administration_' +'Wes' +'Us' +'Tr' +'Tho' +'Sprach' +'Sh' +'Reise' +'Park_' +'Mai' +'King' +'Irak_' +'Gewalt_' +'Gan' +'Erklärung' +'Daten' +'CE' +'Bor' +'Bil' +'26' +'... _' +' ( _' +'üs' +'öko' +'ähr' +'   . ' +'wirtschaftlich' +'volu' +'verz' +'try_' +'train' +'tnis' +'thi' +'teile' +'tau' +'tal_' +'tage' +'stro' +'stei' +'sozialen_' +'sieh' +'school' +'rv' +'rio' +'richten_' +'raf' +'provides_' +'poten' +'plane' +'obwohl_' +'observ' +'negotiations_' +'neg' +'minal' +'militärische' +'markt_' +'list' +'lde' +'ktr' +'kom' +'ken' +'ities_' +'höchst' +'host' +'hof_' +'halte' +'gesellschaft' +'gende' +'ged' +'fung' +'fische' +'fight_' +'fat' +'expens' +'erfolg' +'enf' +'ef_' +'eck' +'direct_' +'dar_' +'culture_' +'computer_' +'care' +'bestimm' +'beitr' +'bau' +'ants_' +'allgemeine' +'Verh' +'Ven' +'Temp' +'Teil' +'Tag' +'Sw' +'Stat' +'Som' +'Sat' +'Pet' +'Mexi' +'Mal' +'Kop' +'Kinder' +'Kampf_' +'Jede' +'Eb' +'Boo' +'195' +'�_' +'you' +'weisen_' +'wart' +'vin' +'verwa' +'verfügt_' +'unkt_' +'uel' +'training_' +'takes_' +'stun' +'stic_' +'squ' +'six_' +'ront' +'ring' +'rg' +'rence_' +'remi' +'recht_' +'quot' +'prepare' +'pet' +'pel' +'partner_' +'othe' +'original' +'oben_' +'nnen_' +'nke' +'network_' +'mpf' +'mont' +'liz' +'live_' +'lich' +'lam' +'kre' +'ional_' +'internal_' +'interest' +'instead_' +'inis' +'igu' +'generation' +'gegeben_' +'foo' +'fied_' +'ff' +'essi' +'ensi' +'ener' +'emi' +'einger' +'echt' +'dl' +'dict' +'defen' +'decisions_' +'comment' +'circu' +'call' +'bod' +'betrifft_' +'atten' +'angeh' +'address' +'achten' +'] _' +'Zins' +'Wü' +'Werte' +'Wachstums' +'Türkei_' +'Straße' +'Sorge' +'Schwi' +'Sal' +'Reserv' +'Para' +'North_' +'NI' +'Märkte' +'Mot' +'MP' +'Idee' +'Hy' +'Hier_' +'Hel' +'Gal' +'Engl' +'Cla' +'Bereichen_' +'Banken_' +'Aussprache_' +'Absch' +' -_' +'ührung_' +'ästinens' +'äge_' +'zustellen_' +'zuk' +'xa' +'wn_' +'wing_' +'wide' +'vorschl' +'verw' +'unterst' +'unterschiedliche' +'tg' +'stl' +'sten' +'standard_' +'soft_' +'ria_' +'rce' +'prü' +'prove' +'prob' +'ped_' +'och' +'nv' +'neuer' +'nel_' +'meng' +'meet_' +'manufactur' +'mals_' +'lution' +'look' +'logis' +'lm' +'legitim' +'lah_' +'kten_' +'keep_' +'ked_' +'jeweil' +'involv' +'integration_' +'iesen_' +'ichen_' +'iche' +'hle' +'geg' +'funktionier' +'forma' +'fon' +'fo' +'fina' +'file_' +'fet' +'extremely_' +'extend' +'ext' +'exam' +'ession' +'ese_' +'entscheidende' +'enha' +'eme' +'elli' +'ehen' +'echte' +'div' +'dev' +'deine' +'debat' +'cs_' +'close' +'class' +'carrie' +'bot' +'bild' +'bestä' +'bereich' +'below_' +'aufs' +'activities_' +'accu' +'Zu_' +'Ye' +'Y_' +'Währungs' +'Seh' +'San_' +'Russ' +'Roman' +'Ple' +'Partei_' +'Möglichkeiten_' +'Mode' +'Manage' +'Las' +'Konflikt' +'Inf' +'Home' +'Gesundheits' +'Einsatz_' +'BIP_' +'Av' +'Aspekt' +'Allerdings_' +'40' +'194' +' ‘_' +' ''_' +'ämpf' +'}}' +'za_' +'weiß_' +'weiteren_' +'week_' +'wee' +'visit' +'vili' +'verfolg' +'varia' +'values_' +'unr' +'ually_' +'tradition' +'tische_' +'tho' +'tand_' +'suppl' +'simple_' +'sem' +'scr' +'return_' +'rest_' +'reit' +'reg' +'reco' +'rauch' +'rai' +'quest' +'productiv' +'prevent' +'perhaps_' +'obacht' +'nin' +'nia' +'nel' +'memb' +'manch' +'lung' +'ller_' +'law' +'langfristig' +'lage_' +'lad' +'jahr' +'iro' +'ira' +'intend' +'infrastructure_' +'increased_' +'included_' +'ice_' +'höhere' +'hung_' +'hohen_' +'glei' +'gla' +'ges' +'gebe' +'fun' +'fuel' +'fehl' +'evi' +'effective' +'doing_' +'dio' +'difference' +'devi' +'currency_' +'cos' +'continue' +'contains_' +'consider' +'commitment_' +'collecti' +'chtli' +'brauchen_' +'besten_' +'bessere' +'bedi' +'ativen_' +'ationen_' +'alle' +'ahren_' +'absolute' +'\' +'You' +'Wohl' +'Tele' +'Staat_' +'Spain_' +'Roo' +'Richtung_' +'Rat' +'Qualität' +'Pap' +'Ort_' +'Minde' +'Install' +'Exp' +'Dur' +'Cre' +'Booking_' +'Auff' +'Arme' +'Arab' +'€' +'в' +'ühl' +'ös' +'ßt_' +'Ökonom' +'   – _' +'zweite' +'zusätzliche' +'yb' +'wirksam' +'wic' +'wert_' +'verur' +'vergangenen_' +'uli' +'tü' +'toward_' +'took_' +'theor' +'tatsächlich_' +'sung_' +'ständig' +'step_' +'statement' +'stag' +'signa' +'share' +'sell' +'reviews_' +'responsible_' +'respect' +'requirement' +'representative' +'relax' +'recover' +'rds_' +'rap' +'rad' +'pu' +'prech' +'prac' +'poverty_' +'pir' +'pay' +'notwendig_' +'negara_' +'möglicherweise_' +'mous' +'mission' +'mbe' +'lou' +'les' +'lend' +'iona' +'importance_' +'igt_' +'ific' +'ideal_' +'ichten_' +'hätten_' +'humanit' +'hende' +'gam' +'favour_' +'ew_' +'essential_' +'esi' +'enge' +'emphasis' +'effects_' +'door' +'dest' +'design_' +'declar' +'customers_' +'constructi' +'connection' +'cks_' +'chw' +'chinesischen_' +'board_' +'bly_' +'beein' +'bean' +'bare_' +'assess' +'arr' +'agenda_' +'Zus' +'Wir' +'Veran' +'Stabilität' +'Software_' +'Sea' +'Prof' +'Prim' +'Netz' +'König' +'Kn' +'Kir' +'Funktion' +'Freiheit' +'Fran' +'Einh' +'Conf' +'Bahn' +'Anla' +'AC' +'32' +'…' +'“, _' +'“' +'ße' +'zusammen_' +'wären_' +'wors' +'wir' +'vorges' +'vollständig' +'vas' +'user' +'urb' +'unw' +'ungsv' +'ular' +'uelle' +'tter_' +'tren' +'touris' +'telle' +'structure' +'streng' +'sprach' +'soziale_' +'south' +'sla' +'schä' +'schwa' +'richtig' +'reject' +'react' +'quis' +'qualifi' +'pus' +'pra' +'performance_' +'opi' +'oft_' +'ocat' +'ndo' +'moment_' +'mili' +'menti' +'male' +'logi' +'leich_' +'legislat' +'leben_' +'leave' +'lai' +'lack_' +'kontroll' +'kleinen_' +'klar_' +'kla' +'kers_' +'isation_' +'introduce' +'ignor' +'hö' +'grün' +'grenz' +'gericht' +'gang' +'fünf_' +'französische' +'folgende' +'fil' +'fertig' +'ey_' +'erke' +'era_' +'elle' +'egel' +'domin' +'dli' +'deut' +'deal' +'concept' +'colo' +'coh' +'cin' +'ching_' +'boa' +'bel_' +'beginn' +'bede' +'beach_' +'ball' +'bal' +'atur' +'ation' +'artige' +'arian_' +'applie' +'ape' +'apa' +'ansch' +'alter' +'airs_' +'active_' +'achi' +'Wert_' +'Weiter' +'Vorb' +'Video' +'Unterk' +'Techn' +'Sektor' +'Ran' +'Party_' +'Partei' +'Oc' +'Musik' +'Minister' +'Mill' +'Mil' +'MO' +'Justi' +'Ind' +'Höhe' +'Großbritannien_' +'Grenzen_' +'Gem' +'Finanzierung' +'Einkommen' +'EA' +'Design' +'Dep' +'Chinas_' +'Beha' +'Aufg' +'1980' +'0er_' +'п' +'ützt' +'wünsche' +'wirtschaft_' +'wichtigen_' +'weltweit_' +'vorgeschlagen' +'voran' +'vertrete' +'verlang' +'verbind' +'unately_' +'ual' +'treffen_' +'tings_' +'technologie' +'steigen' +'slo' +'sierung_' +'sibl' +'short' +'rier' +'restrict' +'responsibility_' +'require_' +'reif' +'reasons_' +'pursu' +'prefer' +'places_' +'permi' +'perce' +'opol' +'nimmt_' +'negative_' +'mst' +'med_' +'map' +'läss' +'ln' +'lateral' +'kurze' +'kap' +'isin' +'influence' +'iken_' +'igkeit' +'ielt' +'ied' +'gst' +'gori' +'gleichen_' +'gleich_' +'gewisse' +'gerecht' +'gap' +'fore' +'forder' +'finanzielle' +'external_' +'embe' +'develop_' +'derzeit_' +'denk' +'deb' +'darf_' +'conclude' +'campaign' +'burg_' +'begrüße' +'azi' +'aspect' +'animal' +'amerikanische' +'alternative' +'akzeptier' +'York_' +'Vor_' +'Verk' +'Univers' +'Today_' +'TO' +'Stunde' +'Spi' +'Schla' +'Richt' +'Preis_' +'Pass' +'Ot' +'Meine' +'Marke' +'Kra' +'It' +'Invest' +'Ihre' +'Gold' +'Fehler' +'Eff' +'Dor' +'Cat' +'CD_' +'Beschäftig' +'Außerdem_' +'Argentin' +'Arbeit' +'Anl' +'Ange' +'Alb' +'AR' +'--' +'-, _' +''' ' +'” ' +'—' +'és' +'änk' +'ändern_' +'äfte' +'zial' +'zar' +'wes' +'welt' +'vir' +'viert' +'uß' +'urf' +'ture' +'tia' +'threat' +'team' +'tant' +'surround' +'successful_' +'student' +'strong' +'stoff' +'stab' +'spar' +'sof' +'schul' +'schr' +'rim' +'revolution' +'reno' +'remov' +'religio' +'purchas' +'protect_' +'promise' +'professional' +'president_' +'practical' +'pos' +'oppo' +'odi' +'occ' +'nom' +'national' +'nal_' +'mpe' +'monitor' +'mbi' +'massive' +'lth' +'lf_' +'largest_' +'kö' +'kul' +'jenigen_' +'ivit' +'insur' +'initiat' +'implementation_' +'ierung' +'hl_' +'hing' +'gue_' +'gle_' +'gesamt_' +'gebi' +'gari' +'friendly_' +'forg' +'fest_' +'fahr' +'factor' +'eug' +'entsp' +'enthalt' +'elf' +'eigentlich' +'eigen' +'dte' +'double_' +'dies' +'dialog' +'decades_' +'contract' +'confi' +'colleague' +'challenges_' +'chai' +'capa' +'bul' +'bracht' +'blin' +'bers' +'authorit' +'attr' +'arriv' +'arin' +'advantage' +'ada_' +'accessi' +'Worte' +'Vorauss' +'Von_' +'Verkehrs' +'Ve' +'Time' +'Tie' +'Ther' +'Tatsächlich_' +'Stell' +'SE' +'Ris' +'Preise' +'Pers' +'Nation' +'My_' +'Monate' +'Modell' +'Koo' +'Konsu' +'Konferenz' +'Koh' +'Kern' +'Kenn' +'Interna' +'Haf' +'Fälle' +'Es' +'ES' +'Dri' +'Denn' +'Blo' +'Bl' +'Ausf' +'Aufgabe_' +'Am_' +'> _' +'45_' +'-' +'üns' +'überl' +'zweiten' +'xim' +'werde_' +'weapon' +'wai' +'verse' +'vermi' +'ures_' +'ument' +'tten_' +'translat' +'tens' +'sub_' +'spri' +'spec' +'soon_' +'schlecht' +'rolle' +'respond' +'refugee' +'redi' +'rative' +'ragen_' +'rag_' +'promote' +'pressure_' +'option_' +'ock' +'occur' +'neu_' +'nehmer' +'mechanism' +'lve' +'kräfte' +'ko_' +'ki_' +'journ' +'jedes_' +'isten' +'indung' +'immun' +'igi' +'hmen_' +'grund' +'greif' +'glaube_' +'gas' +'events_' +'established_' +'ering_' +'equal' +'encourage' +'enabl' +'ellung_' +'eint' +'einfache' +'dw' +'doubt' +'despite_' +'demi' +'decline' +'cti' +'credit_' +'comfortable_' +'bun' +'built_' +'bran' +'bond' +'benefit_' +'bedro' +'bed' +'bare' +'ativ' +'assist' +'although_' +'agr' +'aft_' +'abi' +'Wahl_' +'Verfahren_' +'Verbesserung' +'Test' +'Serb' +'Risiko' +'Regionen_' +'Note' +'Nachbar' +'May_' +'Mal_' +'Jahrzehnt' +'Insel' +'Inflation' +'Ihren_' +'High' +'Haus' +'Grün' +'Gesamt' +'Flo' +'Fischer' +'Enterprise_' +'Eng' +'Einrichtung' +'Britain_' +'Behörden_' +'Begr' +'Balk' +'Ausschuss_' +'Amerika_' +'Ale' +'22_' +'* _' +'%._' +'übera' +'ächt' +'zb' +'yer' +'xu' +'wäh' +'wan' +'vorzu' +'verschiedene_' +'uring_' +'ug_' +'top' +'tid' +'tec' +'sw' +'strategic_' +'sterda' +'stark_' +'serv' +'ser_' +'secu' +'ritt' +'richte' +'reach_' +'quie' +'preis' +'precise' +'post_' +'oti' +'orary_' +'olge_' +'official_' +'official' +'mü' +'mens' +'meaning' +'ltige' +'lose' +'lne' +'liv' +'lett' +'immen' +'igne' +'has' +'gy' +'guarantee_' +'größten_' +'grundlegende' +'gern_' +'genommen_' +'gelegen' +'gehört_' +'führte' +'forms_' +'forme' +'fine' +'film' +'fig' +'employe' +'ela' +'einzelnen_' +'einig' +'effizien' +'dynami' +'designed_' +'design' +'describe' +'dern' +'degr' +'deci' +'dal' +'cycl' +'contact' +'con_' +'client' +'chaf' +'centr' +'cance' +'bill' +'bewertung' +'behind_' +'base_' +'author' +'auft' +'assung' +'asion' +'arity_' +'anne' +'angs' +'activi' +'Wissenschaft' +'Website_' +'Verf' +'Verbindung_' +'Ungl' +'Tw' +'Teile' +'TI' +'Strateg' +'Sport' +'Spani' +'Russian_' +'Rechte_' +'RO' +'Presidency_' +'Position' +'Ort' +'Ok' +'Micro' +'Mag' +'Mach' +'LO' +'Kata' +'Kat' +'Kal' +'Initiative_' +'Hostel' +'Hon' +'Griechenland' +'Folge' +'Democra' +'Court_' +'City_' +'Christ' +'Binnen' +'Arch' +'Arbeitspl' +'Angebot' +'Amt' +'Abstimmung_' +'Abschl' +'200_' +'.)' +'äß' +'älte' +'®' +'woll' +'wil' +'walk_' +'vu' +'vio' +'villa' +'verstärk' +'verlie' +'verantwort' +'uz' +'ums_' +'ums' +'umb' +'ude_' +'type_' +'traditionelle' +'tp' +'tot' +'tige_' +'tief' +'terrorism_' +'strengthen' +'sensi' +'schließlich_' +'sam_' +'rz_' +'risks_' +'relative' +'regime_' +'rdan' +'rc' +'rang' +'rain' +'quat' +'possibilit' +'picture' +'pfel' +'pert' +'pect' +'pate' +'ordina' +'ny_' +'nor_' +'nks_' +'nice_' +'ngt' +'nahe_' +'model' +'migra' +'meri' +'labor_' +'konzentrier' +'jüngste' +'izi' +'ization_' +'ists_' +'ious_' +'inat' +'imm' +'ikan' +'ifi' +'ieg' +'green' +'gehören_' +'gege' +'gain' +'fos' +'fahre' +'euro' +'etzung' +'erneut' +'ermöglichen_' +'erfüll' +'epi' +'entlich' +'eat' +'diplomat' +'dien' +'crime' +'contribut' +'confirm' +'chaftliche' +'candidate' +'blu' +'bisher_' +'bh' +'beri' +'beginning_' +'became_' +'ausgew' +'attracti' +'associat' +'approv' +'anis' +'anderer_' +'amp' +'amo' +'ace_' +'account' +'abst' +'Zum_' +'Zentralbank' +'Wege' +'Wahr' +'Verfassung_' +'Umst' +'Umf' +'Uhr_' +'Ts' +'Th' +'Stu' +'St' +'Spanien_' +'Schä' +'Schließ' +'Platz_' +'Phil' +'PS' +'PE' +'ON_' +'Mona' +'MB' +'Lea' +'Late' +'Konse' +'Jac' +'Italien' +'Inte' +'Guest' +'First_' +'Firm' +'Fed_' +'Fakt' +'Ever' +'Erst' +'Ents' +'Club' +'Bran' +'Bemühungen_' +'Barr' +'Bank' +'Armut_' +'Anti' +'Anre' +'Anna' +'Akt' +'Aff' +'Acco' +'? ' +'. - (_' +' =' +'öst' +'öffentlich' +'ó' +'ätzlich' +'ältnis' +'wn' +'wichtigsten_' +'weig' +'wandel' +'voll_' +'visit_' +'video_' +'veränder' +'verbunden_' +'uner' +'tural' +'threat_' +'thought_' +'thin' +'stop_' +'steps_' +'stellung_' +'sport' +'sion' +'side' +'shows_' +'shift' +'shar' +'sess' +'sar' +'rück' +'root' +'receive_' +'qualit' +'prüf' +'process' +'probably_' +'practice_' +'plann' +'pain' +'osse' +'music_' +'move' +'messen' +'mental_' +'measure' +'md' +'lower_' +'lion' +'konkret' +'kee' +'island' +'ish' +'internet_' +'integrat' +'ink' +'ilung_' +'ible_' +'hoste' +'hlen' +'globali' +'gemeinsam_' +'gehe' +'gain_' +'fallen_' +'eure' +'ered_' +'ene_' +'edl' +'druck_' +'constitution' +'complex_' +'compet' +'committee_' +'closed_' +'cki' +'bür' +'bein' +'beide' +'ay_' +'aufr' +'angesichts_' +'angen_' +'amend' +'alu' +'acy_' +'acce' +'able' +'Zeitp' +'Zahl_' +'Yet_' +'Ya' +'Vorschläge_' +'Version_' +'Verhandlungen_' +'Up' +'Ter' +'Stä' +'Studie' +'Stud' +'Sig' +'Regime' +'Programme' +'Politiker_' +'Person' +'Partners' +'Parl' +'Pakistan' +'Ober' +'Mus' +'More_' +'Mess' +'Mehrheit_' +'Mas' +'Les' +'Lat' +'Krankheit' +'How_' +'Hoch' +'Führung_' +'Freund' +'Fou' +'Familien' +'Eurozone_' +'Ergebnisse_' +'Dialog' +'Dav' +'Christi' +'Blu' +'Bar_' +'Arti' +'Ansatz_' +'Aben' +'AT' +'zte' +'zone_' +'zers' +'zehn_' +'word_' +'wobei_' +'weitere' +'voi' +'verm' +'untersch' +'understand' +'unabhängig' +'tätig' +'tions' +'tel' +'sze' +'systeme' +'study_' +'started_' +'sle' +'sili' +'secure' +'seas' +'screen' +'schaften_' +'sce' +'sation' +'rten_' +'rien' +'result' +'reform' +'receive' +'reality_' +'purpose_' +'pruch' +'programs_' +'prev' +'phone_' +'oso' +'onne' +'olge' +'nötig' +'nächste' +'nian' +'nent' +'müss' +'monetary_' +'modifi' +'mode' +'mind' +'met_' +'met' +'mentioned_' +'mber_' +'lter' +'lst' +'liti' +'limited_' +'ley_' +'konnte_' +'kk' +'kill' +'kannt' +'kame' +'jet' +'iu' +'isi' +'iri' +'interven' +'ined_' +'improvement' +'iet' +'ieben' +'ide_' +'iar' +'hono' +'highly_' +'hic' +'heiten_' +'gute' +'guests_' +'gelang' +'fru' +'friend' +'freu' +'fly' +'figure' +'fair' +'expl' +'esta' +'esen' +'erte' +'erreich' +'erf' +'ereign' +'engine' +'endl' +'enable_' +'einst' +'eichne' +'dt' +'desir' +'derartige' +'corp' +'confidence_' +'code_' +'chst_' +'chance' +'busi' +'bilit' +'berücksichtig' +'beruh' +'ba_' +'aur' +'atr' +'assum' +'ars' +'anf' +'alo' +'ada' +'according_' +'accept_' +'Zweiten' +'Widers' +'Vertr' +'Versuch' +'Stadt' +'ST' +'Reformen_' +'Que' +'Prozess_' +'Oste' +'Nutzung' +'Nieder' +'Nationen_' +'NATO_' +'Mehr' +'Mail' +'Kunden_' +'Joh' +'Indien_' +'Handel_' +'From_' +'Fr' +'Film' +'Ext' +'Est' +'Entw' +'Entschließung' +'Egypt' +'Economi' +'Druck_' +'Diskussion' +'Dabei_' +'Cap' +'Bis' +'Augen' +'Antwort_' +'AL' +'" ' +' / _' +'örder' +'ähnlich' +'Überw' +'zuv' +'zum' +'zt_' +'wichtige' +'verpflichte' +'vel_' +'ution' +'urs_' +'unternehmen_' +'undene' +'unan' +'ume' +'tli' +'tle_' +'test_' +'teri' +'tb' +'tation_' +'sustainable_' +'sustain' +'stärk' +'stä' +'style_' +'struc' +'stelle' +'status_' +'sre' +'smus_' +'shown_' +'seb' +'schla' +'rieb' +'ric' +'repu' +'rbeite' +'raise_' +'pur' +'propos' +'prom' +'private' +'previous_' +'praktisch' +'ples_' +'plans_' +'ono' +'onic' +'olog' +'oliti' +'oint' +'offizielle' +'obje' +'nr' +'nme' +'nesses_' +'ndlich' +'mein' +'matters_' +'marke' +'lec' +'lac' +'konf' +'kau' +'ist' +'irgend' +'ining' +'ini_' +'industrial_' +'impli' +'ime' +'identi' +'ibi' +'histori' +'helfen_' +'glo' +'geschaff' +'garde' +'fähigkeit_' +'fts' +'fond' +'ffn' +'fei' +'featur' +'falt' +'fac' +'engage' +'dure' +'dramati' +'discr' +'dim' +'definit' +'creating_' +'creat' +'context_' +'consumers_' +'consequences_' +'congr' +'conclusion' +'committe' +'color' +'cities_' +'buy' +'brid' +'bezüglich' +'bewusst' +'beit' +'basic_' +'audi' +'atis' +'apply' +'answer_' +'anb' +'allein' +'ahr' +'ahl_' +'ahl' +'accomp' +'Wohn' +'Waffen' +'Verteidigung' +'Verpflichtung' +'Them' +'Server' +'Sche' +'SI' +'Revolution' +'Qu' +'Prozent_' +'Prote' +'Post' +'PA' +'Opfer' +'Only_' +'LE' +'Kolleg' +'Kli' +'Kle' +'June_' +'Jan' +'Islami' +'Heu' +'Further' +'English_' +'Development_' +'Dec' +'Bew' +'Beschl' +'90_' +'75' +'500_' +'27_' +'..." _' +', “_' +' –' +'ón_' +'zier' +'xis' +'wieder' +'vorsi' +'vertra' +'varie' +'unique_' +'ucat' +'ths_' +'tem' +'table' +'ssive_' +'sozi' +'sov' +'som' +'separat' +'seines_' +'search' +'scienti' +'schre' +'schließ' +'räum' +'rupti' +'reu' +'rete' +'repea' +'reichen_' +'promoti' +'primar' +'presented_' +'phen' +'pen_' +'payment' +'nung' +'nto' +'nahm' +'movi' +'mix' +'machine' +'läuf' +'ließ' +'laut' +'launch' +'las_' +'krise_' +'konnten_' +'kol' +'knowledge_' +'isse_' +'inz' +'int_' +'inner' +'ihres_' +'igte' +'icul' +'hun' +'hne_' +'heit' +'graphic' +'gleiche' +'gkeiten_' +'gewor' +'gesetzt_' +'geringe' +'fte_' +'fello' +'eye' +'erla' +'erforderlich_' +'ells' +'eind' +'eif' +'effort_' +'eciat' +'draft' +'dauer' +'critic' +'cript' +'creation_' +'count' +'commi' +'cious' +'cio' +'chtige' +'choose_' +'change' +'caused_' +'categories_' +'capita' +'beyond_' +'ay' +'ausreich' +'ausf' +'asset' +'artic' +'argue' +'alen_' +'aktuell' +'airport' +'aging_' +'adapt' +'actions_' +'Zo' +'Zimmer' +'Wort_' +'Wettbewerb' +'Weste' +'Well' +'Vertrauen' +'Unterschied' +'Unsere' +'Stil' +'Social' +'Seit_' +'Punkt_' +'Prov' +'Play' +'Pho' +'Paket' +'Nutze' +'Now_' +'NA' +'Mod' +'Mich' +'Mein' +'Mac' +'Leit' +'Kong' +'Innovation' +'Gruppe_' +'Grunds' +'Entscheidungen_' +'End' +'Einz' +'Dro' +'Don' +'Demokrat' +'Defi' +'Constitution' +'Bul' +'Brü' +'Berichterstatter_' +'Ausga' +'Artikel_' +'Art' +'Arbeitnehmer' +'Arab_' +'öffentliche_' +'ñ' +'ärk' +'äch' +'Überein' +'   ' +'zufü' +'zog' +'ziell' +'zeichn' +'wendig' +'website' +'warn' +'ware_' +'vorha' +'virtu' +'vier_' +'uste' +'urg_' +'tut' +'tto' +'tsch' +'treatment_' +'tral' +'tors_' +'terrorist' +'sätz' +'suff' +'studie' +'spir' +'seem_' +'schli' +'saying_' +'sale' +'sa_' +'rot' +'relevan' +'reis' +'rdn' +'rau' +'raelis' +'quid' +'print' +'politicians_' +'platz' +'pfe' +'pap' +'ott' +'orati' +'oral' +'nos' +'normal' +'neues' +'nec' +'ndig' +'ndel' +'nachhaltige' +'minute' +'message' +'mes' +'meist' +'mbo' +'mann' +'lost_' +'losse' +'lives_' +'lien' +'latest_' +'lande' +'kur' +'ktive' +'kart' +'kar' +'joint' +'itut' +'inu' +'innovat' +'inier' +'ida' +'hten' +'hohe_' +'hinzu' +'guten_' +'guest_' +'gruppe' +'gno' +'gewährleisten_' +'gewinn' +'gesamten_' +'gebu' +'foc' +'floor' +'finance_' +'ffee' +'failure_' +'explain' +'evidence_' +'erwart' +'entwickeln_' +'entsprechen' +'entscheide' +'enthält_' +'ectio' +'dritte' +'dr' +'divid' +'dish' +'dier' +'darum_' +'dank' +'dadurch_' +'crit' +'convi' +'chlossen' +'challenge_' +'bus' +'bezahl' +'begin' +'bee' +'bathroom' +'basier' +'aware_' +'aufe' +'asked_' +'annt' +'alone_' +'adver' +'Ziel' +'Vertreter' +'Tou' +'TA' +'Stärk' +'Spiel_' +'Sitz' +'Rad' +'Putin_' +'NT' +'Loca' +'Last' +'Key' +'Jugend' +'Infrastruktur' +'Human' +'Hot' +'Hinblick_' +'General_' +'Gelegenheit_' +'Gefahr_' +'Gebiet_' +'Förderung_' +'Europä' +'Europeans_' +'Dra' +'Dinge' +'Darüber_' +'Dank_' +'Damit_' +'CI' +'Besi' +'Beginn_' +'Barcelona_' +'Ausschu' +'August_' +'Anschl' +'Angelegenheit' +'Alli' +'Aktivität' +'Act' +'Abkommen_' +'23' +' " _' +'änkt' +'ält' +'Öffentlichkeit_' +'Änderungsanträge' +'Änderungsantrag' +'zule' +'zeuge_' +'ype' +'xit' +'worth' +'wohn' +'wealth_' +'wac' +'vig' +'viele' +'verwenden_' +'va_' +'unmi' +'unemployment_' +'uation' +'uar' +'trust' +'technologi' +'technical' +'tand' +'tag_' +'spoliti' +'source_' +'sort' +'sorg' +'sometimes_' +'solutions_' +'setting_' +'schütz' +'rising_' +'riere' +'rding_' +'quickly_' +'py' +'prochen' +'politics_' +'please_' +'pem' +'pani' +'ows_' +'oss' +'osp' +'objective_' +'obi' +'nsti' +'nste' +'note_' +'nische_' +'nich' +'nent_' +'minor' +'minister_' +'min_' +'mi_' +'markt' +'luxur' +'linien' +'lini' +'leiste' +'led' +'laufen' +'larger_' +'künft' +'kriti' +'kos' +'kes_' +'jährlich' +'integri' +'innovation' +'illi' +'icat' +'iat' +'hme' +'hind' +'hier' +'heri' +'haf' +'great' +'geä' +'geste' +'gestalt' +'genannten_' +'gemeins' +'gefa' +'gaben_' +'fs_' +'ffi' +'exac' +'erwartet' +'eru' +'equent' +'entge' +'ender' +'elbe' +'ego' +'dringend' +'don_' +'dire' +'develop' +'determine' +'deter' +'damage_' +'correct' +'contribution' +'consult' +'condition' +'child' +'chec' +'charge' +'ced_' +'carry' +'build_' +'br' +'boost' +'blick' +'bell' +'bauen_' +'average_' +'amen' +'aktuellen_' +'aktive' +'agricultural_' +'admi' +'achieved_' +'aben_' +'Währung' +'Wit' +'Wand' +'Volks' +'Verst' +'Verbre' +'University_' +'Terror' +'Stern' +'St_' +'Schr' +'Schlusselwortern_' +'Sar' +'SQL_' +'Regeln_' +'Rechn' +'Punkte' +'Produkte_' +'Personal' +'Muse' +'Middle_' +'March_' +'Lösung' +'Liberal' +'Lang' +'Karte' +'Investi' +'Innen' +'Globali' +'Global' +'Gebiet' +'Flughafen' +'Export' +'Exper' +'Empf' +'Emi' +'Disk' +'Datei_' +'DVD' +'Conven' +'Cong' +'Bildung_' +'BE' +'Arbeitslos' +'Anwend' +'Alternativ' +'31_' +'1990_' +'193' +'11' +' ".' +'“._' +'öß' +'öge' +'öffnet' +'äußerst_' +'äußer' +'äss' +'ystem' +'ysi' +'wund' +'wort' +'wood' +'ward_' +'viel' +'verständlich_' +'uten_' +'urgen' +'unte' +'twort' +'tially_' +'throughout_' +'teach' +'system' +'stru' +'spro' +'spread_' +'speech' +'settle' +'send_' +'sd' +'russische' +'rob' +'rische' +'riff' +'richtige' +'requir' +'request_' +'replac' +'regel' +'reduction' +'rede' +'recognize' +'race' +'pou' +'pas' +'ors' +'orient' +'ope' +'ohn' +'occup' +'obligat' +'nissen_' +'nied' +'nfall' +'movement_' +'mitte' +'mark_' +'löse' +'lp' +'lor' +'lls_' +'llo' +'lies' +'lange' +'laden_' +'klare' +'jede_' +'iven' +'iten_' +'itali' +'isla' +'intensiv' +'infl' +'independent_' +'immt_' +'icient' +'hotel' +'hie' +'heut' +'ggl' +'gesamt' +'gent' +'ganzen_' +'führende' +'fy' +'fte' +'fores' +'fordert' +'fire' +'fer_' +'familie' +'fair_' +'failed_' +'eventu' +'ermöglicht_' +'equal_' +'ep_' +'ensur' +'enlargement_' +'emo' +'einschließ' +'eding' +'ecke' +'ea_' +'dliche' +'distribution_' +'direction_' +'digen_' +'dienst' +'det_' +'depr' +'danken_' +'cts_' +'conti' +'confe' +'commerc' +'cket' +'chter' +'chsel' +'choice_' +'chine' +'cen' +'buch' +'brie' +'beschä' +'begre' +'aul' +'att_' +'appr' +'angene' +'afr' +'Zur' +'Zentrum' +'Zen' +'Za' +'WT' +'Volkswirtschaft' +'Vereinbar' +'Unternehmens' +'Una' +'Thu' +'Thr' +'Terrorismus_' +'Summ' +'Spr' +'Rooms_' +'Republik' +'Prozess' +'Progr' +'Priorität' +'Palestinian_' +'PR' +'PC_' +'Organ' +'Option' +'Nam' +'Mü' +'Minuten_' +'Methode' +'Lit' +'Lauf' +'J_' +'Inha' +'IT_' +'IMF_' +'Herren_' +'Haushalt' +'Gru' +'Greek' +'Glaub' +'Funktion_' +'Friedens' +'Fle' +'Fla' +'Fir' +'Financ' +'Fel' +'Dre' +'Dez' +'Datei' +'Computer' +'Cod' +'Bestimm' +'Anfang_' +'Amendment' +'Abgeordneten_' +'AN' +'36' +'š' +'ären_' +'zieren' +'zahlen' +'yi' +'wun' +'wissens' +'wiederh' +'who' +'völlig_' +'vorb' +'vora' +'voll' +'veröffentlicht' +'versu' +'verstehen_' +'verringer' +'verlo' +'verha' +'vac' +'usa' +'urte' +'urg' +'ungsb' +'ultimat' +'tschaft' +'transit' +'thousand' +'tere' +'tag' +'supported_' +'suchen' +'subsid' +'street' +'strategie' +'strategi' +'sst_' +'specific' +'sound_' +'signifi' +'sende' +'sav' +'rte_' +'roll' +'rlei' +'rheit' +'resolve' +'relationship' +'rdin' +'ration' +'programmes_' +'procedure_' +'principles_' +'pon' +'perform' +'perf' +'paid_' +'own' +'ose_' +'orte' +'organ' +'omen' +'obvious' +'obt' +'nsta' +'nsi' +'nor' +'ndi' +'mögliche' +'mp_' +'mite' +'menu' +'ltung' +'lokale' +'lity_' +'ließen' +'licht' +'lehn' +'kürz' +'kern' +'initiative_' +'initi' +'includes_' +'impos' +'hält_' +'hum' +'holiday' +'hnen_' +'historical' +'herrsch' +'helpful' +'hai' +'ground' +'grade' +'got' +'gemeinsamen_' +'garantier' +'ganz' +'gabe_' +'fication_' +'feld' +'fear_' +'falsch' +'execut' +'exclusive' +'ette' +'erkläre' +'erinner' +'erho' +'enthalten_' +'enorme' +'elega' +'einzig' +'einhei' +'eingeh' +'eindeutig' +'efo' +'effektiv' +'durchgeführt' +'domestic_' +'distin' +'display' +'denken' +'deficit_' +'ctions_' +'critici' +'coordinat' +'consumer_' +'consist' +'cia' +'bestehende' +'bekommen_' +'bekannt' +'behavio' +'behandel' +'bai' +'ausschu' +'aussch' +'atte' +'anything_' +'anlage' +'anh' +'agier' +'advanced_' +'adjust' +'achung' +'absch' +'aa' +'[' +'Zei' +'Vorg' +'Volk' +'Urlaub' +'Umge' +'Ty' +'Trot' +'Touris' +'Team' +'Stran' +'Since_' +'Ressourcen_' +'Rent' +'Partner' +'Pala' +'Notwendigkeit_' +'Nebe' +'Natürlich_' +'Medit' +'Mad' +'Kraft_' +'Kosovo' +'Korea_' +'Konvent' +'Kontrolle_' +'Kompr' +'Israeli_' +'Integration' +'Informations' +'Hof' +'Hand_' +'Gemeins' +'Fortschritte_' +'Flü' +'Februar' +'Erweiterung_' +'Ersten' +'Einst' +'EN_' +'Dow' +'Dok' +'Debian_' +'Dah' +'Beste' +'Beitrag' +'Bedr' +'Auswa' +'Ausdruck_' +'Auft' +'Aufmerksamkeit_' +'Akti' +'African' +'33' +'29_' +'28_' +'160' +'.  ' +', "' +' [_' +' ) ' +'übersch' +'übernachten_' +'ögen' +'äne' +'ße_' +'zep' +'worked_' +'weiter' +'wechs' +'web_' +'versuche' +'verfügen_' +'uzie' +'unli' +'uble' +'trib' +'trete' +'trei' +'tert' +'tellung_' +'tel_' +'technische' +'techn' +'sv' +'success' +'submitted_' +'staaten_' +'ssen' +'speed_' +'sorge' +'sichtlich_' +'seat' +'schu' +'schrift' +'schem' +'scale' +'safe_' +'räge' +'round' +'ries' +'rf_' +'return' +'restaura' +'rende' +'ref' +'reached_' +'ragend' +'rable_' +'propert' +'proper' +'produce_' +'predict' +'pho' +'pes' +'permanen' +'perfect_' +'pende' +'outs' +'omiss' +'ome' +'ohner' +'oduct' +'objectives_' +'nzi' +'nort' +'normale' +'niedrige' +'neben' +'nachd' +'möglich' +'mär' +'mobile_' +'mmi' +'mind_' +'menschliche' +'länd' +'lz' +'load_' +'lief' +'liebe' +'ld_' +'last' +'lass' +'käm' +'kurs' +'klas' +'ition' +'ision_' +'institutional_' +'inander' +'iger' +'ießen' +'iell' +'ice' +'hoffe_' +'hme_' +'hid' +'goods_' +'glich' +'giving_' +'geschlossen' +'gesche' +'geru' +'genannt' +'gelte' +'formul' +'force' +'folgen' +'final_' +'fft' +'famous_' +'expert' +'erran' +'erlaub' +'erheblich' +'entwickelte' +'entwickelt_' +'ensw' +'eln_' +'eland_' +'doc' +'diff' +'deser' +'dde' +'consumption_' +'considered_' +'considerabl' +'conduc' +'compare' +'cod' +'class_' +'chun' +'chinesisch' +'charge_' +'bud' +'bon_' +'bind' +'bewer' +'bedeutend' +'ava' +'ausgestattet' +'atl' +'announc' +'angen' +'ande' +'ahe' +'aggr' +'administ' +'additional_' +'accommodation_' +'abhäng' +'Whe' +'Wann_' +'Vorsitz' +'Viele_' +'Verwendung' +'Verwaltung' +'Verfass' +'Tage_' +'Syria' +'Swe' +'Sun' +'Spie' +'Sicht' +'Security_' +'Schiff' +'Run' +'Rou' +'Rights_' +'Problem' +'Ord' +'Many_' +'Mala' +'Lisbon_' +'Lehr' +'Landw' +'Jul' +'Informati' +'Hinter' +'Herze' +'Gua' +'Gree' +'Golf' +'Ged' +'Fro' +'Friede' +'Forderung' +'Folgen_' +'Find' +'Federal_' +'Entscheid' +'Eink' +'ER' +'Damen_' +'Com' +'Colo' +'Blick_' +'Bild_' +'Betri' +'Bekämpfung_' +'Ausgaben_' +'Asien' +'Appl' +'Anwendung_' +'Angesichts_' +'Anal' +'Americans_' +': „' +': "' +'300_' +'16' +'03' +'+_' +'#_' +'ürf' +'übert' +'überg' +'öc' +'ée' +'ändische' +'ältig' +' % _' +'  ' +'zudem_' +'zing_' +'zimmer' +'ystem_' +'yal' +'wrong_' +'world' +'work' +'weite' +'weder_' +'wed' +'wachsende' +'wa_' +'völk' +'vy' +'vot' +'vermeid' +'verla' +'veau' +'untuk_' +'unl' +'unis' +'tzu' +'typ' +'transparent' +'term' +'tell_' +'tail' +'säch' +'swei' +'surviv' +'supply' +'strukt' +'steu' +'sta_' +'sra' +'später_' +'speak_' +'situated_' +'sight' +'shing' +'server_' +'schaft' +'sabo' +'respons' +'residen' +'rali' +'quantit' +'prüfen' +'produkti' +'produced_' +'produce' +'prem' +'preise' +'ppin' +'pot' +'plac' +'physic' +'persönliche' +'pension' +'owing' +'orn' +'organiza' +'olg' +'old' +'offens' +'offe' +'obs' +'nsw' +'nken_' +'ngs_' +'news_' +'negotia' +'mittle' +'meter_' +'mete' +'maxi' +'lesso' +'lent' +'leben' +'kulturell' +'kommende' +'klu' +'kennen' +'kauf' +'jährig' +'justif' +'justi' +'its' +'ism' +'irtschaft' +'ior_' +'international' +'intern' +'impre' +'impose' +'ierend' +'ics' +'ichtlich_' +'häufig_' +'hrte' +'hibi' +'heless_' +'hebe' +'hd' +'gän' +'guarantee' +'grund_' +'globale_' +'gets_' +'gesund' +'gers' +'generat' +'gei' +'geben' +'füg' +'fä' +'früher' +'frü' +'folgt' +'flight' +'files' +'fare_' +'fand' +'falls' +'exer' +'evo' +'europä' +'erzielt' +'erzi' +'erwä' +'erungen_' +'eria_' +'ergr' +'erent' +'erence_' +'ereit' +'equi' +'enor' +'emissions_' +'electi' +'eiten_' +'ehemalige' +'dür' +'dne' +'dle_' +'distanc' +'diffic' +'depart' +'deleg' +'defini' +'defin' +'defens' +'decided_' +'death_' +'ctu' +'craft' +'continu' +'comprehen' +'completely_' +'combat' +'chrift' +'chma' +'chea' +'chani' +'ces' +'caus' +'camera' +'brought_' +'beweg' +'besuch' +'berü' +'begr' +'bege' +'bby' +'balance' +'aust' +'atm' +'arc' +'annual_' +'angenomme' +'altung' +'akan_' +'afte' +'added_' +'achstum' +'Wirtschaftsw' +'Ware' +'Wahle' +'Vill' +'Veränderungen_' +'Verordnung' +'Untersuch' +'Umwelt_' +'Tun' +'Terr' +'Statu' +'Station_' +'Soli' +'Services_' +'Schaffung_' +'SP' +'Robe' +'Ric' +'Reihe_' +'Reform_' +'Rahm' +'Quell' +'Pub' +'Prot' +'Premi' +'Politik' +'Open' +'Män' +'Milit' +'Linux' +'Konzept' +'Kin' +'Kap' +'Juli' +'Jav' +'Italian' +'Instead_' +'Inde' +'Here_' +'HE' +'Gren' +'Great' +'Gest' +'Gemeinschafts' +'Gebä' +'Gaz' +'Fäh' +'Fund_' +'Fisch' +'Finanzm' +'Finally_' +'Famili' +'Ers' +'Einfluss_' +'Durch_' +'December_' +'Dazu_' +'Centr' +'Center' +'Beweg' +'Benutzer' +'Basi' +'Asi' +'Apartment' +'Ante' +'Ann' +'Angriff' +'Alle' +'1791_' +'1781' +'’' +'у' +'»' +'&' +'+' +'$' +'Ü' +'қ' +']' +'б' +'«' +'–' +'ç' +';' +'­' +'з' +'й' +'č' +':' +'я' +'г' +'ž' +'ж' +'™' +'ı' +'ô' +'‘' +'{' +'?' +'`' +'ú' +'ь' +'ê' +'}' +'@' +'•' +'ң' +'ш' +'·' +'>' +'|' +'ł' +'ã' +'°' +'х' +'´' +'α' +'å' +'ө' +'ğ' +'ø' +'²' +'ч' +'â' +'ο' +'ε' +'�' +'„' +'ц' +'ë' +'א' +'ұ' +'ә' +'ғ' +'э' +'ń' +'ć' +'Ã' +'ү' +'Б' +'ι' +'ע' +'ю' +'μ' +'Č' +'ф' +'С' +'И' +'τ' +'Š' +'ý' +'©' +'#' +'†' +'ا' +'י' +'Т' +'К' +'Г' +'ρ' +'Ž' +'ż' +'ò' +'ï' +'î' +'£' +'−' +'ي' +'ט' +'ג' +'щ' +'σ' +'ş' +'œ' +'ě' +'ę' +'ā' +'õ' +'¿' +'º' +'~' +'ن' +'ل' +'ف' +'ر' +'ר' +'נ' +'Ж' +'Д' +'υ' +'ν' +'λ' +'ś' +'ù' +'ì' +'Ñ' +'É' +'Á' +'§' +'–' +'ー' +'‚' +'م' +'ק' +'ד' +'Я' +'П' +'О' +'Л' +'Е' +'А' +'π' +'κ' +'θ' +'β' +'ū' +'Ś' +'ō' +'æ' +'Ê' +'Â' +'¼' +'¶' +'¥' +'' +'년' +'語' +'简' +'本' +'日' +'文' +'年' +'中' +'ṳ' +'ศ' +'พ' +'ा' +'र' +'ى' +'ه' +'ص' +'ت' +'ب' +'פ' +'ס' +'ן' +'ו' +'ֿ' +'В' +'ω' +'χ' +'δ' +'Ω' +'̤' +'ư' +'ů' +'ř' +'ľ' +'ė' +'ĕ' +'ą' +'û' +'À' +'½' +'¹' +'¤' +'¡' +'’' +':' +'' +'fi' +'黵' +'黃' +'鰀' +'鋘' +'鋓' +'遝' +'蒸' +'致' +'美' +'网' +'紙' +'熨' +'斗' +'応' +'女' +'味' +'友' +'信' +'介' +'丨' +'一' +'ャ' +'バ' +'チ' +'ジ' +'カ' +'ん' +'ら' +'め' +'●' +'▼' +'→' +'※' +'ớ' +'ọ' +'ị' +'ẽ' +'ẻ' +'ấ' +'ी' +'ि' +'य' +'ब' +'त' +'छ' +'आ' +'ِ' +'ك' +'غ' +'ع' +'د' +'ج' +'إ' +'،' +'צ' +'ל' +'ה' +'Қ' +'Ғ' +'Э' +'Ш' +'Ц' +'Х' +'Р' +'М' +'φ' +'ζ' +'γ' +'Χ' +'Τ' +'Ι' +'Ε' +'̯' +'̆' +'ː' +'ˈ' +'ɾ' +'ɛ' +'ɐ' +'ſ' +'ű' +'ŭ' +'ő' +'Ő' +'ŏ' +'ň' +'İ' +'ī' +'đ' +'Đ' +'ă' +'à' +'Ô' +'Ó' +'È' +'Å' +'¾' +'µ' +'³' +'¬' +'¢' +'' +'™' +'—' +'“' +'' +'^' +'<' diff --git a/tensor2tensor/utils/__init__.py b/tensor2tensor/utils/__init__.py index 27d533abc..ff174dd63 100644 --- a/tensor2tensor/utils/__init__.py +++ b/tensor2tensor/utils/__init__.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tensor2tensor/utils/adafactor.py b/tensor2tensor/utils/adafactor.py new file mode 100644 index 000000000..d54d9adf9 --- /dev/null +++ b/tensor2tensor/utils/adafactor.py @@ -0,0 +1,360 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Optimization.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import quantization + +import tensorflow.compat.v1 as tf + + +class AdafactorOptimizer(tf.train.Optimizer): + """Optimizer that implements the Adafactor algorithm. + + Adafactor is described in https://arxiv.org/abs/1804.04235. + + Adafactor is most similar to Adam (Kingma and Ba), the major differences are: + + 1. For a two-dimensional AxB weight matrix, Adafactor uses only A+B auxiliary + parameters to maintain the second-moment estimator, instead of AB. + This is advantageous on memory-limited systems. In addition, beta1 + (momentum) is set to zero by default, saving an additional auxiliary + parameter per weight. Variables with >=3 dimensions are treated as + collections of two-dimensional matrices - factorization is over the final + two dimensions. + + 2. Adafactor incorporates "update-clipping" - a scale-invariant analog of + gradient clipping. This adds stability + + 3. Adafactor does not require an external "learning rate". By default, it + incorporates a relative-update-scale schedule, corresponding to + inverse-square-root learning-rate-decay in ADAM. We hope this works well + for most applications. + + ALGORITHM: + + parameter -= absolute_update_scale * clip(grad / grad_scale) + + where: + + absolute_update_scale := relative_update_scale * parameter_scale + relative_update_scale := min((step_num + 1)**-0.5, 1e-2) + parameter_scale := max(rms(var)), epsilon2) + clip(x) := x / max(1.0, rms(x)) + grad_scale := tf.sqrt(v) (v is the second-moment estimator) + + The second-moment estimator v is maintained in a manner similar to Adam: + We initialize + ``` + if var is 2-dimensional: + v_r <- zeros([num_rows]) + v_c <- zeros([num_cols]) + if var is 0-dimensional or 1-dimensional: + v <- zeros(shape(var)) + ``` + + The update rule is as follows: + ``` + decay_rate = 1 - (step_num + 1) ^ -0.8 + grad_squared = tf.square(grad) + epsilon1 + if var is 2-dimensional: + v_r <- decay_rate * v_r + (1 - decay_rate) * reduce_mean(grad_squared, 1) + v_c <- decay_rate * v_c + (1 - decay_rate) * reduce_mean(grad_squared, 0) + v = outer_prod(v_r, v_c) / reduce_mean(v_r) + if var is 0-dimensional or 1-dimensional: + v <- decay_rate * v + (1 - decay_rate) * grad_squared + ``` + + For variables with >=3 dimensions, we factorize the second-moment accumulator + over the final 2 dimensions. See the code for details. + + + Several parts of this algorithm are configurable from the initializer. + + multiply_by_parameter_scale: If True, then compute absolute_update_scale + as described above. If False, let absolute_update_scale be the externally + supplied learning_rate. + learning_rate: represents relative_update_scale if + multiply_by_parameter_scale==True, or absolute_update_scale if + multiply_by_parameter_scale==False. + decay_rate: Decay rate of the second moment estimator (varies by step_num). + This should be set to a function such that: + 1-1/(step_num + 1) <= decay_rate(step_num) < 1.0 + beta1: enables momentum, as in Adam. Uses extra memory if nonzero. + clipping_threshold: should be >=1.0 or None for no update clipping + factored: whether to factor the second-moment estimator. True means + less memory usage. + + """ + + def __init__(self, + multiply_by_parameter_scale=True, + learning_rate=None, + decay_rate=None, + beta1=0.0, + clipping_threshold=1.0, + factored=True, + simulated_quantize_bits=None, + parameter_encoding=None, + use_locking=False, + name="Adafactor", + epsilon1=1e-30, + epsilon2=1e-3): + """Construct a new Adafactor optimizer. + + See class comment. + + Args: + multiply_by_parameter_scale: a boolean + learning_rate: an optional Scalar or callable. + decay_rate: an optional Scalar. + beta1: a float value between 0 and 1 + clipping_threshold: an optional float >= 1 + factored: a boolean - whether to use factored second-moment estimator + for 2d variables + simulated_quantize_bits: train with simulated quantized parameters + (experimental) + parameter_encoding: a ParameterEncoding object to use in the case of + bfloat16 variables. + use_locking: If True use locks for update operations. + name: Optional name for the operations created when applying gradients. + Defaults to "AdafactorOptimizer". + epsilon1: Regularization constant for squared gradient. + epsilon2: Regularization constant for parameter scale. + + Raises: + ValueError: if absolute_update_scale and relative_update_scale_fn are both + present or both absent. + """ + super(AdafactorOptimizer, self).__init__(use_locking, name) + self._multiply_by_parameter_scale = multiply_by_parameter_scale + if learning_rate is None: + learning_rate = self._learning_rate_default(multiply_by_parameter_scale) + self._learning_rate = learning_rate + if decay_rate is None: + decay_rate = self._decay_rate_default() + self._decay_rate = decay_rate + self._beta1 = beta1 + self._clipping_threshold = clipping_threshold + self._factored = factored + self._simulated_quantize_bits = simulated_quantize_bits + self._parameter_encoding = parameter_encoding + self._quantization_noise = quantization.noise_from_step_num() + self._epsilon1 = epsilon1 + self._epsilon2 = epsilon2 + + def _should_use_factored_second_moment_estimate(self, shape): + """Should we use a factored second moment estimator. + + Based on the shape of the variable. + + Args: + shape: a list of integers + Returns: + a boolean + """ + return self._factored and len(shape) >= 2 + + def _create_slots(self, var_list): + for var in var_list: + shape = var.get_shape().as_list() + if self._beta1: + self._zeros_slot(var, "m", self._name) + if self._should_use_factored_second_moment_estimate(shape): + r_val = tf.zeros(shape[:-1], dtype=tf.float32) + c_val = tf.zeros(shape[:-2] + shape[-1:], dtype=tf.float32) + self._get_or_make_slot(var, r_val, "vr", self._name) + self._get_or_make_slot(var, c_val, "vc", self._name) + else: + v_val = tf.zeros(shape, dtype=tf.float32) + self._get_or_make_slot(var, v_val, "v", self._name) + + def _apply_dense(self, grad, var): + return self._resource_apply_dense(grad, var) + + def _apply_sparse(self, grad, var): + return self._apply_dense(tf.convert_to_tensor(grad), var) + + def _resource_apply_sparse(self, grad, handle, indices): + return self._resource_apply_dense( + tf.convert_to_tensor(tf.IndexedSlices(grad, indices, tf.shape(handle))), + handle) + + def _parameter_scale(self, var): + """Estimate the scale of the parameters from the current values. + + We include a minimum value of 0.001 to give it a chance to escape 0 + if it was zero-initialized. + + Instead of using the value, we could impute the scale from the shape, + as initializers do. + + Args: + var: a variable or Tensor. + Returns: + a Scalar + """ + return tf.maximum(reduce_rms(var), self._epsilon2) + + def _resource_apply_dense(self, grad, handle): + var = handle + grad = tf.to_float(grad) + grad_squared = tf.square(grad) + self._epsilon1 + grad_squared_mean = tf.reduce_mean(grad_squared) + decay_rate = self._call_if_callable(self._decay_rate) + update_scale = self._call_if_callable(self._learning_rate) + update_scale = tf.convert_to_tensor(update_scale, name="update_scale") + update_scale = tf.cast(update_scale, grad_squared_mean.dtype.base_dtype) + old_val = var + if var.dtype.base_dtype == tf.bfloat16: + old_val = tf.to_float(self._parameter_encoding.decode(old_val)) + if self._multiply_by_parameter_scale: + update_scale *= tf.to_float(self._parameter_scale(old_val)) + # HACK: Make things dependent on grad. + # This confounds the XLA rewriter and keeps it from fusing computations + # across different variables. This fusion is a bad for HBM usage, since + # it causes the gradients to persist in memory. + decay_rate += grad_squared_mean * 1e-30 + update_scale += grad_squared_mean * 1e-30 + # END HACK + mixing_rate = 1.0 - decay_rate + shape = var.get_shape().as_list() + updates = [] + if self._should_use_factored_second_moment_estimate(shape): + grad_squared_row_mean = tf.reduce_mean(grad_squared, -1) + grad_squared_col_mean = tf.reduce_mean(grad_squared, -2) + vr = self.get_slot(var, "vr") + new_vr = (decay_rate * vr + mixing_rate * grad_squared_row_mean) + vc = self.get_slot(var, "vc") + new_vc = (decay_rate * vc + mixing_rate * grad_squared_col_mean) + vr_update = tf.assign(vr, new_vr, use_locking=self._use_locking) + vc_update = tf.assign(vc, new_vc, use_locking=self._use_locking) + updates = [vr_update, vc_update] + long_term_mean = tf.reduce_mean(new_vr, -1, keepdims=True) + r_factor = tf.rsqrt(new_vr / long_term_mean) + c_factor = tf.rsqrt(new_vc) + x = grad * tf.expand_dims(r_factor, -1) * tf.expand_dims(c_factor, -2) + else: + v = self.get_slot(var, "v") + new_v = decay_rate * v + mixing_rate * grad_squared + v_update = tf.assign(v, new_v, use_locking=self._use_locking) + updates = [v_update] + x = grad * tf.rsqrt(new_v) + if self._clipping_threshold is not None: + clipping_denom = tf.maximum(1.0, reduce_rms(x) / self._clipping_threshold) + x /= clipping_denom + subtrahend = update_scale * x + if self._beta1: + m = self.get_slot(var, "m") + new_m = self._beta1 * tf.to_float(m) + (1.0 - self._beta1) * subtrahend + subtrahend = new_m + new_m = common_layers.cast_like(new_m, var) + updates.append(tf.assign(m, new_m, use_locking=self._use_locking)) + new_val = tf.to_float(old_val) - subtrahend + if var.dtype.base_dtype == tf.bfloat16: + new_val = self._parameter_encoding.encode( + new_val, self._quantization_noise) + if self._simulated_quantize_bits: + new_val = quantization.simulated_quantize( + var - subtrahend, self._simulated_quantize_bits, + self._quantization_noise) + new_val = tf.cast(new_val, var.dtype) + var_update = tf.assign(var, new_val, use_locking=self._use_locking) + updates = [var_update] + updates + return tf.group(*updates) + + def _decay_rate_default(self): + return adafactor_decay_rate_pow(0.8) + + def _learning_rate_default(self, multiply_by_parameter_scale): + learning_rate = tf.minimum(tf.rsqrt(step_num() + 1.0), 0.01) + if not multiply_by_parameter_scale: + learning_rate *= 0.05 + return learning_rate + + +def adafactor_decay_rate_adam(beta2): + """Second-moment decay rate like Adam, subsuming the correction factor. + + Args: + beta2: a float between 0 and 1 + Returns: + a scalar + """ + t = tf.to_float(tf.train.get_or_create_global_step()) + 1.0 + decay = beta2 * (1.0 - tf.pow(beta2, t - 1.0)) / (1.0 - tf.pow(beta2, t)) + # decay = tf.cond(tf.equal(t, 1.0), lambda: beta2, lambda: decay) + return decay + + +def adafactor_decay_rate_pow(exponent): + """Second moment decay rate where memory-length grows as step_num^exponent. + + Args: + exponent: a float between 0 and 1 + Returns: + a scalar + """ + return 1.0 - tf.pow((step_num() + 1.0), -exponent) + + +def step_num(): + return tf.to_float(tf.train.get_or_create_global_step()) + + +def adafactor_optimizer_from_hparams(hparams, lr): + """Create an Adafactor optimizer based on model hparams. + + Args: + hparams: model hyperparameters + lr: learning rate scalar. + Returns: + an AdafactorOptimizer + Raises: + ValueError: on illegal values + """ + if hparams.optimizer_adafactor_decay_type == "adam": + decay_rate = adafactor_decay_rate_adam( + hparams.optimizer_adafactor_beta2) + elif hparams.optimizer_adafactor_decay_type == "pow": + decay_rate = adafactor_decay_rate_pow( + hparams.optimizer_adafactor_memory_exponent) + else: + raise ValueError("unknown optimizer_adafactor_decay_type") + if hparams.weight_dtype == "bfloat16": + parameter_encoding = quantization.EighthPowerEncoding() + else: + parameter_encoding = None + return AdafactorOptimizer( + multiply_by_parameter_scale=( + hparams.optimizer_adafactor_multiply_by_parameter_scale), + learning_rate=lr, + decay_rate=decay_rate, + beta1=hparams.optimizer_adafactor_beta1, + clipping_threshold=hparams.optimizer_adafactor_clipping_threshold, + factored=hparams.optimizer_adafactor_factored, + simulated_quantize_bits=getattr( + hparams, "simulated_parameter_quantize_bits", 0), + parameter_encoding=parameter_encoding, + use_locking=False, + name="Adafactor") + + +def reduce_rms(x): + return tf.sqrt(tf.reduce_mean(tf.square(x))) diff --git a/tensor2tensor/utils/adafactor_test.py b/tensor2tensor/utils/adafactor_test.py new file mode 100644 index 000000000..924296866 --- /dev/null +++ b/tensor2tensor/utils/adafactor_test.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for adafactor.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.utils import adafactor + +import tensorflow as tf + + +class AdafactorTest(tf.test.TestCase): + + def testCallableLearningRate(self): + def lr(): + return 0.01 + + opt = adafactor.AdafactorOptimizer(learning_rate=lr) + v1 = tf.Variable([1., 2.]) + v2 = tf.Variable([3., 4.]) + with tf.GradientTape() as tape: + tape.watch([v1, v2]) + loss = v1 * v2 + v1_grad, v2_grad = tape.gradient(loss, [v1, v2]) + opt.apply_gradients(((v1_grad, v1), (v2_grad, v2))) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/utils/adv_attack_utils.py b/tensor2tensor/utils/adv_attack_utils.py new file mode 100644 index 000000000..472bf8ecd --- /dev/null +++ b/tensor2tensor/utils/adv_attack_utils.py @@ -0,0 +1,200 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities to assist in performing adversarial attack using Cleverhans.""" + +from cleverhans import attacks +from cleverhans import model +from cleverhans import utils_tf + +import numpy as np + +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +@registry.register_attack +def fgsm(): + return attacks.FastGradientMethod + + +@registry.register_attack +def madry(): + return attacks.MadryEtAl + + +@registry.register_attack +def random(): + return RandomAttack + + +class T2TAttackModel(model.Model): + """Wrapper of Cleverhans Model object.""" + + def __init__(self, model_fn, features, params, config, scope=None): + self._model_fn = model_fn + self._params = params + self._config = config + self._logits_dict = {} + self._additional_features = features + self._scope = scope + + def fprop(self, x): + if x.name in self._logits_dict: + return self._logits_dict[x.name] + + x = tf.map_fn(tf.image.per_image_standardization, x) + self._additional_features['inputs'] = x + + if self._scope is None: + scope = tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE) + else: + scope = tf.variable_scope(self._scope, reuse=tf.AUTO_REUSE) + + with scope: + logits = self._model_fn( + self._additional_features, + None, + 'attack', + params=self._params, + config=self._config) + self._logits_dict[x.name] = logits + + return {model.Model.O_LOGITS: tf.reshape(logits, [-1, logits.shape[-1]])} + + +class RandomAttack(attacks.FastGradientMethod): + """Blackbox random sample attack.""" + + def __init__(self, m, back='tf', sess=None): + if not isinstance(m, model.Model): + m = model.CallableModelWrapper(m, 'probs') + + super(RandomAttack, self).__init__(m, back, sess) + self.feedable_kwargs = { + 'eps': np.float32, + 'num_samples': np.float32, + 'num_batches': np.float32, + 'y': np.float32, + 'y_target': np.float32, + 'clip_min': np.float32, + 'clip_max': np.float32 + } + self.structural_kwargs = ['ord'] + + def generate(self, x, **kwargs): + # Parse and save attack-specific parameters + assert self.parse_params(**kwargs) + + labels, _ = self.get_or_guess_labels(x, kwargs) + + x_shape = x.shape.as_list() + deltas_shape = [x_shape[0], self.num_samples] + x_shape[1:] + + def cond(i, old_adv_x, old_loss): + del old_adv_x, old_loss + return tf.less(i, self.num_batches) + + def body(i, old_adv_x, old_loss, labels=labels): + """Find example with max loss value amongst batch of perturbations.""" + deltas = tf.random_uniform(deltas_shape) + + # generate uniform samples from the l^p unit ball interior + if self.ord == np.inf: + deltas *= 2. * self.eps + deltas -= self.eps + elif self.ord == 1: + # ref: https://mathoverflow.net/questions/9185/how-to-generate-random-points-in-ell-p-balls pylint: disable=line-too-long + exp = -tf.log(deltas) + shift = -tf.log(tf.random_uniform(deltas_shape[:2])) + norm = tf.reduce_sum(tf.abs(exp), range(2, len(deltas_shape) - 2)) + scale = tf.reshape(shift + norm, + deltas_shape[:2] + [1] * (len(deltas_shape) - 2)) + deltas = exp / scale + elif self.ord == 2: + # ref: https://blogs.sas.com/content/iml/2016/04/06/generate-points-uniformly-in-ball.html pylint: disable=line-too-long + dims = tf.reduce_prod(deltas_shape[2:]) + deltas = tf.pow(deltas, 1. / dims) + normal = tf.random_normal(deltas) + normal /= tf.sqrt( + tf.reduce_sum(normal**2, axis=range(2, + len(deltas_shape) - 2)), + keepdims=True) + deltas *= normal + else: + raise NotImplementedError('Only L-inf, L1 and L2 norms are ' + 'currently implemented.') + + adv_x = tf.expand_dims(x, 1) + deltas + labels = tf.expand_dims(labels, 1) + labels = tf.tile(labels, [1, self.num_samples, 1]) + + if (self.clip_min is not None) and (self.clip_max is not None): + adv_x = tf.clip_by_value(adv_x, self.clip_min, self.clip_max) + + adv_x_r = tf.reshape(adv_x, [-1] + deltas_shape[2:]) + preds = self.model.get_probs(adv_x_r) + preds_shape = preds.shape.as_list() + preds = tf.reshape(preds, deltas_shape[:2] + preds_shape[1:]) + + if labels is None: + # Using model predictions as ground truth to avoid label leaking + preds_max = tf.reduce_max(preds, -1, keep_dims=True) + labels = tf.to_float(tf.equal(preds, preds_max)) + labels = tf.stop_gradient(labels) + labels = labels / tf.reduce_sum(labels, -1, keep_dims=True) + + # Compute loss + loss = utils_tf.model_loss(labels, preds, mean=False) + if self.y_target is not None: + loss = -loss + + # find the maximum loss value + input_idx = tf.one_hot(tf.argmax(loss, axis=1), self.num_samples, axis=1) + loss = tf.reduce_sum(loss * input_idx, axis=1) + input_idx = tf.reshape(input_idx, + deltas_shape[:2] + [1] * (len(deltas_shape) - 2)) + adv_x = tf.reduce_sum(adv_x * input_idx, axis=1) + + condition = tf.greater(old_loss, loss) + new_loss = tf.where(condition, old_loss, loss) + new_adv_x = tf.where(condition, old_adv_x, adv_x) + print(new_loss, new_adv_x) + + return i + 1, new_adv_x, new_loss + + _, adv_x, _ = tf.while_loop( + cond, body, + [tf.zeros([]), + tf.zeros_like(x), -1e10 * tf.ones(x_shape[0])], back_prop=False) + + return adv_x + + def parse_params( + self, + eps=0.3, + num_samples=100, + num_batches=100, + ord=np.inf, # pylint: disable=redefined-builtin + y=None, + y_target=None, + clip_min=None, + clip_max=None, + **kwargs): + self.num_samples = num_samples + self.num_batches = num_batches + return super(RandomAttack, self).parse_params(eps, ord, y, y_target, + clip_min, clip_max, **kwargs) diff --git a/tensor2tensor/utils/avg_checkpoints.py b/tensor2tensor/utils/avg_checkpoints.py index 4e5286f62..e0be08ed4 100644 --- a/tensor2tensor/utils/avg_checkpoints.py +++ b/tensor2tensor/utils/avg_checkpoints.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,18 +18,20 @@ from __future__ import division from __future__ import print_function -# Dependency imports - +import os import numpy as np import six from six.moves import zip # pylint: disable=redefined-builtin -import tensorflow as tf +import tensorflow.compat.v1 as tf flags = tf.flags FLAGS = flags.FLAGS flags.DEFINE_string("checkpoints", "", "Comma-separated list of checkpoints to average.") +flags.DEFINE_integer("num_last_checkpoints", 0, + "Averages the last N saved checkpoints." + " If the checkpoints flag is set, this is ignored.") flags.DEFINE_string("prefix", "", "Prefix (e.g., directory) to append to each checkpoint.") flags.DEFINE_string("output_path", "/tmp/averaged.ckpt", @@ -40,30 +43,45 @@ def checkpoint_exists(path): tf.gfile.Exists(path + ".index")) -def main(unused_argv): - # Get the checkpoints list from flags and run some basic checks. - checkpoints = [c.strip() for c in FLAGS.checkpoints.split(",")] - checkpoints = [c for c in checkpoints if c] - if not checkpoints: - raise ValueError("No checkpoints provided for averaging.") - if flags.FLAGS.prefix: - checkpoints = [FLAGS.prefix + c for c in checkpoints] +def main(_): + if FLAGS.checkpoints: + # Get the checkpoints list from flags and run some basic checks. + checkpoints = [c.strip() for c in FLAGS.checkpoints.split(",")] + checkpoints = [c for c in checkpoints if c] + if not checkpoints: + raise ValueError("No checkpoints provided for averaging.") + if FLAGS.prefix: + checkpoints = [FLAGS.prefix + c for c in checkpoints] + else: + assert FLAGS.num_last_checkpoints >= 1, "Must average at least one model" + assert FLAGS.prefix, ("Prefix must be provided when averaging last" + " N checkpoints") + checkpoint_state = tf.train.get_checkpoint_state( + os.path.dirname(FLAGS.prefix)) + # Checkpoints are ordered from oldest to newest. + checkpoints = checkpoint_state.all_model_checkpoint_paths[ + -FLAGS.num_last_checkpoints:] + checkpoints = [c for c in checkpoints if checkpoint_exists(c)] if not checkpoints: - raise ValueError( - "None of the provided checkpoints exist. %s" % FLAGS.checkpoints) + if FLAGS.checkpoints: + raise ValueError( + "None of the provided checkpoints exist. %s" % FLAGS.checkpoints) + else: + raise ValueError("Could not find checkpoints at %s" % + os.path.dirname(FLAGS.prefix)) # Read variables from all checkpoints and average them. tf.logging.info("Reading variables and averaging checkpoints:") for c in checkpoints: tf.logging.info("%s ", c) - var_list = tf.contrib.framework.list_variables(checkpoints[0]) + var_list = tf.train.list_variables(checkpoints[0]) var_values, var_dtypes = {}, {} for (name, shape) in var_list: if not name.startswith("global_step"): var_values[name] = np.zeros(shape) for checkpoint in checkpoints: - reader = tf.contrib.framework.load_checkpoint(checkpoint) + reader = tf.train.load_checkpoint(checkpoint) for name in var_values: tensor = reader.get_tensor(name) var_dtypes[name] = tensor.dtype @@ -72,10 +90,11 @@ def main(unused_argv): for name in var_values: # Average. var_values[name] /= len(checkpoints) - tf_vars = [ - tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[name]) - for v in var_values - ] + with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE): + tf_vars = [ + tf.get_variable(v, shape=var_values[v].shape, dtype=var_dtypes[v]) + for v in var_values + ] placeholders = [tf.placeholder(v.dtype, shape=v.shape) for v in tf_vars] assign_ops = [tf.assign(v, p) for (v, p) in zip(tf_vars, placeholders)] global_step = tf.Variable( @@ -84,14 +103,14 @@ def main(unused_argv): # Build a model consisting only of variables, set them to the average values. with tf.Session() as sess: - sess.run(tf.initialize_all_variables()) + sess.run(tf.global_variables_initializer()) for p, assign_op, (name, value) in zip(placeholders, assign_ops, six.iteritems(var_values)): sess.run(assign_op, {p: value}) # Use the built saver to save the averaged checkpoint. - saver.save(sess, flags.FLAGS.output_path, global_step=global_step) + saver.save(sess, FLAGS.output_path, global_step=global_step) - tf.logging.info("Averaged checkpoints saved in %s", flags.FLAGS.output_path) + tf.logging.info("Averaged checkpoints saved in %s", FLAGS.output_path) if __name__ == "__main__": diff --git a/tensor2tensor/utils/beam_search.py b/tensor2tensor/utils/beam_search.py index eacbf467f..43df9db90 100644 --- a/tensor2tensor/utils/beam_search.py +++ b/tensor2tensor/utils/beam_search.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,14 +13,20 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Implemetation of beam seach with penalties.""" +"""Implementation of beam search with penalties.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function -# Dependency imports -import tensorflow as tf +import math +import numpy as np + +from tensor2tensor.layers import common_layers +import tensorflow.compat.v1 as tf + +from tensorflow.python.ops import inplace_ops +from tensorflow.python.util import nest # Assuming EOS_ID is 1 EOS_ID = 1 @@ -27,12 +34,64 @@ INF = 1. * 1e7 -def log_prob_from_logits(logits): - return logits - tf.reduce_logsumexp(logits, axis=2, keep_dims=True) +def _merge_beam_dim(tensor): + """Reshapes first two dimensions in to single dimension. + + Args: + tensor: Tensor to reshape of shape [A, B, ...] + + Returns: + Reshaped tensor of shape [A*B, ...] + """ + shape = common_layers.shape_list(tensor) + shape[0] *= shape[1] # batch -> batch * beam_size + shape.pop(1) # Remove beam dim + return tf.reshape(tensor, shape) + + +def _unmerge_beam_dim(tensor, batch_size, beam_size): + """Reshapes first dimension back to [batch_size, beam_size]. + + Args: + tensor: Tensor to reshape of shape [batch_size*beam_size, ...] + batch_size: Tensor, original batch size. + beam_size: int, original beam size. + + Returns: + Reshaped tensor of shape [batch_size, beam_size, ...] + """ + shape = common_layers.shape_list(tensor) + new_shape = [batch_size] + [beam_size] + shape[1:] + return tf.reshape(tensor, new_shape) + + +def _expand_to_beam_size(tensor, beam_size): + """Tiles a given tensor by beam_size. + + Args: + tensor: tensor to tile [batch_size, ...] + beam_size: How much to tile the tensor by. + + Returns: + Tiled tensor [batch_size, beam_size, ...] + """ + tensor = tf.expand_dims(tensor, axis=1) + tile_dims = [1] * tensor.shape.ndims + tile_dims[1] = beam_size + + return tf.tile(tensor, tile_dims) + + +def get_state_shape_invariants(tensor): + """Returns the shape of the tensor but sets middle dims to None.""" + shape = tensor.shape.as_list() + for i in range(1, len(shape) - 1): + shape[i] = None + return tf.TensorShape(shape) def compute_batch_indices(batch_size, beam_size): - """Computes the i'th coodinate that contains the batch index for gathers. + """Computes the i'th coordinate that contains the batch index for gathers. Batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. It says which batch the beam item is in. This will create the i of the i,j coordinate @@ -49,14 +108,215 @@ def compute_batch_indices(batch_size, beam_size): return batch_pos -def compute_topk_scores_and_seq(sequences, scores, scores_to_gather, flags, - beam_size, batch_size): +def fast_tpu_gather(params, indices, name=None): + """Fast gather implementation for models running on TPU. + + This function use one_hot and batch matmul to do gather, which is faster + than gather_nd on TPU. For params that have dtype of int32 (sequences to + gather from), batch_gather is used to keep accuracy. + + Args: + params: A tensor from which to gather values. + [batch_size, original_size, ...] + indices: A tensor used as the index to gather values. + [batch_size, selected_size]. + name: A string, name of the operation (optional). + + Returns: + gather_result: A tensor that has the same rank as params. + [batch_size, selected_size, ...] + """ + with tf.name_scope(name): + dtype = params.dtype + + def _gather(params, indices): + """Fast gather using one_hot and batch matmul.""" + if dtype != tf.float32: + params = tf.to_float(params) + shape = common_layers.shape_list(params) + indices_shape = common_layers.shape_list(indices) + ndims = params.shape.ndims + # Adjust the shape of params to match one-hot indices, which is the + # requirement of Batch MatMul. + if ndims == 2: + params = tf.expand_dims(params, axis=-1) + if ndims > 3: + params = tf.reshape(params, [shape[0], shape[1], -1]) + gather_result = tf.matmul( + tf.one_hot(indices, shape[1], dtype=params.dtype), params) + if ndims == 2: + gather_result = tf.squeeze(gather_result, axis=-1) + if ndims > 3: + shape[1] = indices_shape[1] + gather_result = tf.reshape(gather_result, shape) + if dtype != tf.float32: + gather_result = tf.cast(gather_result, dtype) + return gather_result + + # If the dtype is int, use the gather instead of one_hot matmul to avoid + # precision loss. The max int value can be represented by bfloat16 in MXU is + # 256, which is smaller than the possible id values. Encoding/decoding can + # potentially used to make it work, but the benenfit is small right now. + if dtype.is_integer: + gather_result = tf.batch_gather(params, indices) + else: + gather_result = _gather(params, indices) + + return gather_result + + +def _create_make_unique(inputs): + """Replaces the lower bits of each element with iota. + + The iota is used to derive the index, and also serves the purpose to + make each element unique to break ties. + + Args: + inputs: A tensor with rank of 2 and dtype of tf.float32. + [batch_size, original_size]. + + Returns: + A tensor after element wise transformation, with dtype the same as inputs. + [batch_size, original_size]. + + Raises: + ValueError: If the rank of the input tensor does not equal 2. + """ + if inputs.shape.ndims != 2: + raise ValueError("Input of top_k_with_unique must be rank-2 " + "but got: %s" % inputs.shape) + + height = inputs.shape[0] + width = inputs.shape[1] + zeros = tf.zeros([height, width], dtype=tf.int32) + + # Count_mask is used to mask away the low order bits to ensure that every + # element is distinct. + log2_ceiling = int(math.ceil(math.log(int(width), 2))) + next_power_of_two = 1 << log2_ceiling + count_mask = ~(next_power_of_two - 1) + count_mask_r0 = tf.constant(count_mask) + count_mask_r2 = tf.fill([height, width], count_mask_r0) + + # Smallest_normal is the bit representation of the smallest positive normal + # floating point number. The sign is zero, exponent is one, and the fraction + # is zero. + smallest_normal = 1 << 23 + smallest_normal_r0 = tf.constant(smallest_normal, dtype=tf.int32) + smallest_normal_r2 = tf.fill([height, width], smallest_normal_r0) + + # Low_bit_mask is used to mask away the sign bit when computing the absolute + # value. + low_bit_mask = ~(1 << 31) + low_bit_mask_r0 = tf.constant(low_bit_mask, dtype=tf.int32) + low_bit_mask_r2 = tf.fill([height, width], low_bit_mask_r0) + + iota = tf.tile(tf.expand_dims(tf.range(width, dtype=tf.int32), 0), + [height, 1]) + + # Compare the absolute value with positive zero to handle negative zero. + input_r2 = tf.bitcast(inputs, tf.int32) + abs_r2 = tf.bitwise.bitwise_and(input_r2, low_bit_mask_r2) + if_zero_r2 = tf.equal(abs_r2, zeros) + smallest_normal_preserving_sign_r2 = tf.bitwise.bitwise_or( + input_r2, smallest_normal_r2) + input_no_zeros_r2 = tf.where( + if_zero_r2, smallest_normal_preserving_sign_r2, input_r2) + + # Discard the low-order bits and replace with iota. + and_r2 = tf.bitwise.bitwise_and(input_no_zeros_r2, count_mask_r2) + or_r2 = tf.bitwise.bitwise_or(and_r2, iota) + return tf.bitcast(or_r2, tf.float32) + + +def _create_topk_unique(inputs, k): + """Creates the top k values in sorted order with indices. + + Args: + inputs: A tensor with rank of 2. [batch_size, original_size]. + k: An integer, number of top elements to select. + + Returns: + topk_r2: A tensor, the k largest elements. [batch_size, k]. + topk_indices_r2: A tensor, indices of the top k values. [batch_size, k]. + """ + height = inputs.shape[0] + width = inputs.shape[1] + neg_inf_r0 = tf.constant(-np.inf, dtype=tf.float32) + ones = tf.ones([height, width], dtype=tf.float32) + neg_inf_r2 = ones * neg_inf_r0 + inputs = tf.where(tf.is_nan(inputs), neg_inf_r2, inputs) + + # Select the current largest value k times and keep them in topk_r2. The + # selected largest values are marked as the smallest value to avoid being + # selected again. + tmp = inputs + topk_r2 = tf.zeros([height, k], dtype=tf.float32) + for i in range(k): + kth_order_statistic = tf.reduce_max(tmp, axis=1, keepdims=True) + k_mask = tf.tile(tf.expand_dims(tf.equal(tf.range(k), tf.fill([k], i)), 0), + [height, 1]) + topk_r2 = tf.where(k_mask, tf.tile(kth_order_statistic, [1, k]), topk_r2) + ge_r2 = tf.greater_equal(inputs, tf.tile(kth_order_statistic, [1, width])) + tmp = tf.where(ge_r2, neg_inf_r2, inputs) + + log2_ceiling = int(math.ceil(math.log(float(int(width)), 2))) + next_power_of_two = 1 << log2_ceiling + count_mask = next_power_of_two - 1 + mask_r0 = tf.constant(count_mask) + mask_r2 = tf.fill([height, k], mask_r0) + topk_r2_s32 = tf.bitcast(topk_r2, tf.int32) + topk_indices_r2 = tf.bitwise.bitwise_and(topk_r2_s32, mask_r2) + return topk_r2, topk_indices_r2 + + +def top_k_with_unique(inputs, k): + """Finds the values and indices of the k largests entries. + + Instead of doing sort like tf.nn.top_k, this function finds the max value + k times. The running time is proportional to k, which is be faster when k + is small. The current implementation supports only inputs of rank 2. + In addition, iota is used to replace the lower bits of each element, this + makes the selection more stable when there are equal elements. The + overhead is that output values are approximated. + + Args: + inputs: A tensor with rank of 2. [batch_size, original_size]. + k: An integer, number of top elements to select. + + Returns: + top_values: A tensor, the k largest elements in sorted order. + [batch_size, k]. + indices: A tensor, indices of the top_values. [batch_size, k]. + """ + unique_inputs = _create_make_unique(tf.cast(inputs, tf.float32)) + top_values, indices = _create_topk_unique(unique_inputs, k) + top_values = tf.cast(top_values, inputs.dtype) + return top_values, indices + + +def compute_topk_scores_and_seq(sequences, + scores, + scores_to_gather, + flags, + beam_size, + batch_size, + prefix="default", + states_to_gather=None, + use_tpu=False, + use_top_k_with_unique=True): """Given sequences and scores, will gather the top k=beam size sequences. This function is used to grow alive, and finished. It takes sequences, scores, and flags, and returns the top k from sequences, scores_to_gather, and flags based on the values in scores. + This method permits easy introspection using tfdbg. It adds three named ops + that are prefixed by `prefix`: + - _topk_seq: the tensor for topk_seq returned by this method. + - _topk_flags: the tensor for topk_finished_flags returned by this method. + - _topk_scores: the tensor for tokp_gathered_scores returned by this method. + Args: sequences: Tensor of sequences that we need to gather from. [batch_size, beam_size, seq_length] @@ -66,35 +326,71 @@ def compute_topk_scores_and_seq(sequences, scores, scores_to_gather, flags, [batch_size, beam_size]. We will return the gathered scores from here. Scores to gather is different from scores because for grow_alive, we will need to return log_probs, while for grow_finished, we will need to return - the length penalized scors. + the length penalized scores. flags: Tensor of bools for sequences that say whether a sequence has reached EOS or not beam_size: int batch_size: int + prefix: string that will prefix unique names for the ops run. + states_to_gather: dict (possibly nested) of decoding states. + use_tpu: A bool, whether to compute topk scores and sequences on TPU. + use_top_k_with_unique: bool, whether to use a fast (but decreased precision) + top_k during TPU beam search. + Returns: Tuple of (topk_seq [batch_size, beam_size, decode_length], topk_gathered_scores [batch_size, beam_size], topk_finished_flags[batch_size, beam_size]) """ - _, topk_indexes = tf.nn.top_k(scores, k=beam_size) - # The next three steps are to create coordinates for tf.gather_nd to pull - # out the topk sequences from sequences based on scores. - # batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. It says which - # batch the beam item is in. This will create the i of the i,j coordinate - # needed for the gather - batch_pos = compute_batch_indices(batch_size, beam_size) - - # top coordinates will give us the actual coordinates to do the gather. - # stacking will create a tensor of dimension batch * beam * 2, where the - # last dimension contains the i,j gathering coordinates. - top_coordinates = tf.stack([batch_pos, topk_indexes], axis=2) - - # Gather up the highest scoring sequences - topk_seq = tf.gather_nd(sequences, top_coordinates) - topk_flags = tf.gather_nd(flags, top_coordinates) - topk_gathered_scores = tf.gather_nd(scores_to_gather, top_coordinates) - return topk_seq, topk_gathered_scores, topk_flags + if not use_tpu: + _, topk_indexes = tf.nn.top_k(scores, k=beam_size) + # The next three steps are to create coordinates for tf.gather_nd to pull + # out the topk sequences from sequences based on scores. + # batch pos is a tensor like [[0,0,0,0,],[1,1,1,1],..]. It says which + # batch the beam item is in. This will create the i of the i,j coordinate + # needed for the gather + batch_pos = compute_batch_indices(batch_size, beam_size) + + # top coordinates will give us the actual coordinates to do the gather. + # stacking will create a tensor of dimension batch * beam * 2, where the + # last dimension contains the i,j gathering coordinates. + top_coordinates = tf.stack([batch_pos, topk_indexes], axis=2) + + # Gather up the highest scoring sequences. For each operation added, give + # it a concrete name to simplify observing these operations with tfdbg. + # Clients can capture these tensors by watching these node names. + def gather(tensor, name): + return tf.gather_nd(tensor, top_coordinates, name=(prefix + name)) + topk_seq = gather(sequences, "_topk_seq") + topk_flags = gather(flags, "_topk_flags") + topk_gathered_scores = gather(scores_to_gather, "_topk_scores") + if states_to_gather: + topk_gathered_states = nest.map_structure( + lambda state: gather(state, "_topk_states"), states_to_gather) + else: + topk_gathered_states = states_to_gather + else: + if use_top_k_with_unique: + _, topk_indexes = top_k_with_unique(scores, k=beam_size) + else: + _, topk_indexes = tf.nn.top_k(scores, k=beam_size) + # Gather up the highest scoring sequences. For each operation added, give + # it a concrete name to simplify observing these operations with tfdbg. + # Clients can capture these tensors by watching these node names. + topk_seq = fast_tpu_gather(sequences, topk_indexes, prefix + "_topk_seq") + topk_flags = fast_tpu_gather(flags, topk_indexes, prefix + "_topk_flags") + topk_gathered_scores = fast_tpu_gather(scores_to_gather, topk_indexes, + prefix + "_topk_scores") + if states_to_gather: + topk_gathered_states = nest.map_structure( + # pylint: disable=g-long-lambda + lambda state: fast_tpu_gather(state, topk_indexes, + prefix + "_topk_states"), + states_to_gather) + else: + topk_gathered_states = states_to_gather + return topk_seq, topk_gathered_scores, topk_flags, topk_gathered_states def beam_search(symbols_to_logits_fn, @@ -103,14 +399,37 @@ def beam_search(symbols_to_logits_fn, decode_length, vocab_size, alpha, - eos_id=EOS_ID): + states=None, + eos_id=EOS_ID, + stop_early=True, + use_tpu=False, + use_top_k_with_unique=True): """Beam search with length penalties. - Uses an interface specific to the sequence cnn models; - Requires a function that can take the currently decoded sybmols and return + Requires a function that can take the currently decoded symbols and return the logits for the next symbol. The implementation is inspired by https://arxiv.org/abs/1609.08144. + When running, the beam search steps can be visualized by using tfdbg to watch + the operations generating the output ids for each beam step. These operations + have the pattern: + (alive|finished)_topk_(seq,scores) + + Operations marked `alive` represent the new beam sequences that will be + processed in the next step. Operations marked `finished` represent the + completed beam sequences, which may be padded with 0s if no beams finished. + + Operations marked `seq` store the full beam sequence for the time step. + Operations marked `scores` store the sequence's final log scores. + + The beam search steps will be processed sequentially in order, so when + capturing observed from these operations, tensors, clients can make + assumptions about which step is being recorded. + + WARNING: Assumes 2nd dimension of tensors in `states` and not invariant, this + means that the shape of the 2nd dimension of these tensors will not be + available (i.e. set to None) inside symbols_to_logits_fn. + Args: symbols_to_logits_fn: Interface to the model, to provide logits. Shoud take [batch_size, decoded_ids] and return [batch_size, vocab_size] @@ -122,27 +441,40 @@ def beam_search(symbols_to_logits_fn, vocab_size: Size of the vocab, must equal the size of the logits returned by symbols_to_logits_fn alpha: alpha for length penalty. + states: dict (possibly nested) of decoding states. eos_id: ID for end of sentence. + stop_early: a boolean - stop once best sequence is provably determined. + use_tpu: A bool, whether to do beam search on TPU. + use_top_k_with_unique: bool, whether to use a fast (but decreased precision) + top_k during TPU beam search. + Returns: Tuple of (decoded beams [batch_size, beam_size, decode_length] - decoding probablities [batch_size, beam_size]) + decoding probabilities [batch_size, beam_size]) """ - batch_size = tf.shape(initial_ids)[0] + batch_size = common_layers.shape_list(initial_ids)[0] # Assume initial_ids are prob 1.0 - initial_log_probs = tf.constant([[0.] + [-float("inf")] * (beam_size - 1)]) + initial_log_probs = tf.constant([[0.] + [-INF] * (beam_size - 1)]) # Expand to beam_size (batch_size, beam_size) alive_log_probs = tf.tile(initial_log_probs, [batch_size, 1]) - # Expand each batch to beam_size - alive_seq = tf.tile(tf.expand_dims(initial_ids, 1), [1, beam_size]) - alive_seq = tf.expand_dims(alive_seq, 2) # (batch_size, beam_size, 1) + # Expand each batch and state to beam_size + alive_seq = _expand_to_beam_size(initial_ids, beam_size) + alive_seq = tf.expand_dims(alive_seq, axis=2) # (batch_size, beam_size, 1) + if use_tpu: + alive_seq = tf.tile(alive_seq, [1, 1, decode_length + 1]) + if states: + states = nest.map_structure( + lambda state: _expand_to_beam_size(state, beam_size), states) + else: + states = {} # Finished will keep track of all the sequences that have finished so far # Finished log probs will be negative infinity in the beginning # finished_flags will keep track of booleans - finished_seq = tf.zeros(tf.shape(alive_seq), tf.int32) + finished_seq = tf.zeros(common_layers.shape_list(alive_seq), tf.int32) # Setting the scores of the initial to negative infinity. finished_scores = tf.ones([batch_size, beam_size]) * -INF finished_flags = tf.zeros([batch_size, beam_size], tf.bool) @@ -169,11 +501,12 @@ def grow_finished(finished_seq, finished_scores, finished_flags, curr_seq, log probs of these sequences, Finished flags of these sequences) """ - # First append a column of 0'ids to finished to make the same length with - # finished scores - finished_seq = tf.concat( - [finished_seq, - tf.zeros([batch_size, beam_size, 1], tf.int32)], axis=2) + if not use_tpu: + # First append a column of 0'ids to finished to make the same length with + # finished scores + finished_seq = tf.concat( + [finished_seq, + tf.zeros([batch_size, beam_size, 1], tf.int32)], axis=2) # Set the scores of the unfinished seq in curr_seq to large negative # values @@ -183,10 +516,17 @@ def grow_finished(finished_seq, finished_scores, finished_flags, curr_seq, curr_finished_scores = tf.concat([finished_scores, curr_scores], axis=1) curr_finished_flags = tf.concat([finished_flags, curr_finished], axis=1) return compute_topk_scores_and_seq( - curr_finished_seq, curr_finished_scores, curr_finished_scores, - curr_finished_flags, beam_size, batch_size) - - def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished): + curr_finished_seq, + curr_finished_scores, + curr_finished_scores, + curr_finished_flags, + beam_size, + batch_size, + "grow_finished", + use_tpu=use_tpu, + use_top_k_with_unique=use_top_k_with_unique) + + def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished, states): """Given sequences and scores, will gather the top k=beam size sequences. Args: @@ -197,6 +537,7 @@ def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished): [batch_size, beam_size] curr_finished: Finished flags for each of these sequences. [batch_size, beam_size] + states: dict (possibly nested) of decoding states. Returns: Tuple of (Topk sequences based on scores, @@ -207,10 +548,11 @@ def grow_alive(curr_seq, curr_scores, curr_log_probs, curr_finished): # values curr_scores += tf.to_float(curr_finished) * -INF return compute_topk_scores_and_seq(curr_seq, curr_scores, curr_log_probs, - curr_finished, beam_size, batch_size) + curr_finished, beam_size, batch_size, + "grow_alive", states, use_tpu=use_tpu) - def grow_topk(i, alive_seq, alive_log_probs): - r"""Inner beam seach loop. + def grow_topk(i, alive_seq, alive_log_probs, states): + r"""Inner beam search loop. This function takes the current alive sequences, and grows them to topk sequences where k = 2*beam. We use 2*beam because, we could have beam_size @@ -226,67 +568,104 @@ def grow_topk(i, alive_seq, alive_log_probs): i: loop index alive_seq: Topk sequences decoded so far [batch_size, beam_size, i+1] alive_log_probs: probabilities of these sequences. [batch_size, beam_size] + states: dict (possibly nested) of decoding states. Returns: Tuple of (Topk sequences extended by the next word, The log probs of these sequences, The scores with length penalty of these sequences, - Flags indicating which of these sequences have finished decoding) + Flags indicating which of these sequences have finished decoding, + dict of transformed decoding states) """ # Get the logits for all the possible next symbols - flat_ids = tf.reshape(alive_seq, [batch_size * beam_size, -1]) + if use_tpu and states: + flat_ids = tf.reshape( + tf.slice(alive_seq, [0, 0, i], [batch_size, beam_size, 1]), + [batch_size * beam_size, -1]) + else: + flat_ids = tf.reshape(alive_seq, [batch_size * beam_size, -1]) # (batch_size * beam_size, decoded_length) - flat_logits = symbols_to_logits_fn(flat_ids) - logits = tf.reshape(flat_logits, (batch_size, beam_size, -1)) + if states: + flat_states = nest.map_structure(_merge_beam_dim, states) + flat_logits, flat_states = symbols_to_logits_fn(flat_ids, i, flat_states) + states = nest.map_structure( + lambda t: _unmerge_beam_dim(t, batch_size, beam_size), flat_states) + elif use_tpu: + flat_logits = symbols_to_logits_fn(flat_ids, i) + else: + flat_logits = symbols_to_logits_fn(flat_ids) + + logits = tf.reshape(flat_logits, [batch_size, beam_size, -1]) # Convert logits to normalized log probs - candidate_log_probs = log_prob_from_logits(logits) + candidate_log_probs = common_layers.log_prob_from_logits(logits) - # Multiply the probabilites by the current probabilites of the beam. + # Multiply the probabilities by the current probabilities of the beam. # (batch_size, beam_size, vocab_size) + (batch_size, beam_size, 1) log_probs = candidate_log_probs + tf.expand_dims(alive_log_probs, axis=2) length_penalty = tf.pow(((5. + tf.to_float(i + 1)) / 6.), alpha) curr_scores = log_probs / length_penalty - # Flatten out (beam_size, vocab_size) probs in to a list of possibilites + # Flatten out (beam_size, vocab_size) probs in to a list of possibilities flat_curr_scores = tf.reshape(curr_scores, [-1, beam_size * vocab_size]) - topk_scores, topk_ids = tf.nn.top_k(flat_curr_scores, k=beam_size * 2) + if use_tpu and use_top_k_with_unique: + topk_scores, topk_ids = top_k_with_unique( + flat_curr_scores, k=beam_size * 2) + else: + topk_scores, topk_ids = tf.nn.top_k(flat_curr_scores, k=beam_size * 2) - # Recovering the log probs becuase we will need to send them back + # Recovering the log probs because we will need to send them back topk_log_probs = topk_scores * length_penalty # Work out what beam the top probs are in. topk_beam_index = topk_ids // vocab_size topk_ids %= vocab_size # Unflatten the ids - # The next three steps are to create coordinates for tf.gather_nd to pull - # out the correct seqences from id's that we need to grow. - # We will also use the coordinates to gather the booleans of the beam items - # that survived. - batch_pos = compute_batch_indices(batch_size, beam_size * 2) - - # top beams will give us the actual coordinates to do the gather. - # stacking will create a tensor of dimension batch * beam * 2, where the - # last dimension contains the i,j gathering coordinates. - topk_coordinates = tf.stack([batch_pos, topk_beam_index], axis=2) - - # Gather up the most probable 2*beams both for the ids and finished_in_alive - # bools - topk_seq = tf.gather_nd(alive_seq, topk_coordinates) - - # Append the most probable alive - topk_seq = tf.concat([topk_seq, tf.expand_dims(topk_ids, axis=2)], axis=2) + if not use_tpu: + # The next three steps are to create coordinates for tf.gather_nd to pull + # out the correct sequences from id's that we need to grow. + # We will also use the coordinates to gather the booleans of the beam + # items that survived. + batch_pos = compute_batch_indices(batch_size, beam_size * 2) + + # top beams will give us the actual coordinates to do the gather. + # stacking will create a tensor of dimension batch * beam * 2, where the + # last dimension contains the i,j gathering coordinates. + topk_coordinates = tf.stack([batch_pos, topk_beam_index], axis=2) + + # Gather up the most probable 2*beams both for the ids and + # finished_in_alive bools + topk_seq = tf.gather_nd(alive_seq, topk_coordinates) + if states: + states = nest.map_structure( + lambda state: tf.gather_nd(state, topk_coordinates), states) + + # Append the most probable alive + topk_seq = tf.concat([topk_seq, tf.expand_dims(topk_ids, axis=2)], axis=2) + else: + # Gather up the most probable 2*beams both for the ids and + # finished_in_alive bools + topk_seq = fast_tpu_gather(alive_seq, topk_beam_index) + + if states: + states = nest.map_structure( + lambda state: fast_tpu_gather(state, topk_beam_index), states) + + # Update the most probable alive + topk_seq = tf.transpose(topk_seq, perm=[2, 0, 1]) + topk_seq = inplace_ops.alias_inplace_update(topk_seq, i + 1, topk_ids) + topk_seq = tf.transpose(topk_seq, perm=[1, 2, 0]) topk_finished = tf.equal(topk_ids, eos_id) - return topk_seq, topk_log_probs, topk_scores, topk_finished + return topk_seq, topk_log_probs, topk_scores, topk_finished, states def inner_loop(i, alive_seq, alive_log_probs, finished_seq, finished_scores, - finished_flags): - """Inner beam seach loop. + finished_flags, states): + """Inner beam search loop. There are three groups of tensors, alive, finished, and topk. The alive group contains information about the current alive sequences @@ -317,6 +696,7 @@ def inner_loop(i, alive_seq, alive_log_probs, finished_seq, finished_scores, [batch_size, beam_size] finished_flags: finished bools for each of these sequences. [batch_size, beam_size] + states: dict (possibly nested) of decoding states. Returns: Tuple of @@ -325,30 +705,32 @@ def inner_loop(i, alive_seq, alive_log_probs, finished_seq, finished_scores, Log probs of the alive sequences, New finished sequences, Scores of the new finished sequences, - Flags inidicating which sequence in finished as reached EOS) + Flags indicating which sequence in finished as reached EOS, + dict of final decoding states) """ # Each inner loop, we carry out three steps: # 1. Get the current topk items. # 2. Extract the ones that have finished and haven't finished # 3. Recompute the contents of finished based on scores. - topk_seq, topk_log_probs, topk_scores, topk_finished = grow_topk( - i, alive_seq, alive_log_probs) - alive_seq, alive_log_probs, _ = grow_alive(topk_seq, topk_scores, - topk_log_probs, topk_finished) - finished_seq, finished_scores, finished_flags = grow_finished( + topk_seq, topk_log_probs, topk_scores, topk_finished, states = grow_topk( + i, alive_seq, alive_log_probs, states) + alive_seq, alive_log_probs, _, states = grow_alive( + topk_seq, topk_scores, topk_log_probs, topk_finished, states) + finished_seq, finished_scores, finished_flags, _ = grow_finished( finished_seq, finished_scores, finished_flags, topk_seq, topk_scores, topk_finished) return (i + 1, alive_seq, alive_log_probs, finished_seq, finished_scores, - finished_flags) + finished_flags, states) - def _is_finished(i, unused_alive_seq, alive_log_probs, unused_finished_seq, - finished_scores, finished_in_finished): + def _is_not_finished(i, unused_alive_seq, alive_log_probs, + unused_finished_seq, finished_scores, + unused_finished_in_finished, unused_states): """Checking termination condition. We terminate when we decoded up to decode_length or the lowest scoring item - in finished has a greater score that the higest prob item in alive divided + in finished has a greater score that the highest prob item in alive divided by the max length penalty Args: @@ -356,50 +738,63 @@ def _is_finished(i, unused_alive_seq, alive_log_probs, unused_finished_seq, alive_log_probs: probabilities of the beams. [batch_size, beam_size] finished_scores: scores for each of these sequences. [batch_size, beam_size] - finished_in_finished: finished bools for each of these sequences. - [batch_size, beam_size] Returns: Bool. """ max_length_penalty = tf.pow(((5. + tf.to_float(decode_length)) / 6.), alpha) - # The best possible score of the most likley alive sequence + # The best possible score of the most likely alive sequence. lower_bound_alive_scores = alive_log_probs[:, 0] / max_length_penalty - # Now to compute the lowest score of a finished sequence in finished - # If the sequence isn't finished, we multiply it's score by 0. since - # scores are all -ve, taking the min will give us the score of the lowest - # finished item. - lowest_score_of_fininshed_in_finished = tf.reduce_min( - finished_scores * tf.to_float(finished_in_finished), axis=1) - # If none of the sequences have finished, then the min will be 0 and - # we have to replace it by -ve INF if it is. The score of any seq in alive - # will be much higher than -ve INF and the termination condition will not - # be met. - lowest_score_of_fininshed_in_finished += ( - (1. - tf.to_float(tf.reduce_any(finished_in_finished, 1))) * -INF) + if not stop_early: + # by considering the min score (in the top N beams) we ensure that + # the decoder will keep decoding until there is at least one beam + # (in the top N) that can be improved (w.r.t. the alive beams). + # any unfinished beam will have score -INF - thus the min + # will always be -INF if there is at least one unfinished beam - + # which means the bound_is_met condition cannot be true in this case. + lowest_score_of_finished_in_finished = tf.reduce_min(finished_scores) + else: + # by taking the max score we only care about the first beam; + # as soon as this first beam cannot be beaten from the alive beams + # the beam decoder can stop. + # similarly to the above, if the top beam is not completed, its + # finished_score is -INF, thus it will not activate the + # bound_is_met condition. (i.e., decoder will keep going on). + # note we need to find the max for every sequence eparately - so, we need + # to keep the batch dimension (see axis=1) + lowest_score_of_finished_in_finished = tf.reduce_max(finished_scores, + axis=1) bound_is_met = tf.reduce_all( - tf.greater(lowest_score_of_fininshed_in_finished, + tf.greater(lowest_score_of_finished_in_finished, lower_bound_alive_scores)) return tf.logical_and( tf.less(i, decode_length), tf.logical_not(bound_is_met)) + inner_shape = tf.TensorShape([None, None, None]) + if use_tpu: + inner_shape = tf.TensorShape([batch_size, beam_size, decode_length + 1]) + if use_tpu: + state_struc = nest.map_structure(lambda state: state.get_shape(), states) + else: + state_struc = nest.map_structure(get_state_shape_invariants, states) (_, alive_seq, alive_log_probs, finished_seq, finished_scores, - finished_flags) = tf.while_loop( - _is_finished, + finished_flags, states) = tf.while_loop( + _is_not_finished, inner_loop, [ tf.constant(0), alive_seq, alive_log_probs, finished_seq, - finished_scores, finished_flags + finished_scores, finished_flags, states ], shape_invariants=[ tf.TensorShape([]), - tf.TensorShape([None, None, None]), + inner_shape, alive_log_probs.get_shape(), - tf.TensorShape([None, None, None]), + inner_shape, finished_scores.get_shape(), - finished_flags.get_shape() + finished_flags.get_shape(), + state_struc ], parallel_iterations=1, back_prop=False) @@ -416,4 +811,4 @@ def _is_finished(i, unused_alive_seq, alive_log_probs, unused_finished_seq, tf.reduce_any(finished_flags, 1), finished_seq, alive_seq) finished_scores = tf.where( tf.reduce_any(finished_flags, 1), finished_scores, alive_log_probs) - return finished_seq, finished_scores + return finished_seq, finished_scores, states diff --git a/tensor2tensor/utils/beam_search_test.py b/tensor2tensor/utils/beam_search_test.py index 33439b41f..d83f7c94d 100644 --- a/tensor2tensor/utils/beam_search_test.py +++ b/tensor2tensor/utils/beam_search_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,13 +18,10 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function - -# Dependency imports - import numpy as np from tensor2tensor.utils import beam_search -import tensorflow as tf +import tensorflow.compat.v1 as tf class BeamSearchTest(tf.test.TestCase): @@ -40,13 +38,13 @@ def symbols_to_logits(_): # Just return random logits return tf.random_uniform((batch_size * beam_size, vocab_size)) - final_ids, final_probs = beam_search.beam_search( + final_ids, final_probs, _ = beam_search.beam_search( symbols_to_logits, initial_ids, beam_size, decode_length, vocab_size, 0.) self.assertEqual(final_ids.get_shape().as_list(), [None, beam_size, None]) - self.assertEqual(final_probs.get_shape().as_list(), [None, beam_size]) + self.assertEqual(final_probs.get_shape().as_list(), [batch_size, beam_size]) def testComputeTopkScoresAndSeq(self): batch_size = 2 @@ -60,8 +58,9 @@ def testComputeTopkScoresAndSeq(self): flags = tf.constant([[True, False, False, True], [False, False, False, True]]) - topk_seq, topk_scores, topk_flags = beam_search.compute_topk_scores_and_seq( - sequences, scores, scores, flags, beam_size, batch_size) + topk_seq, topk_scores, topk_flags, _ = ( + beam_search.compute_topk_scores_and_seq( + sequences, scores, scores, flags, beam_size, batch_size)) with self.test_session(): topk_seq = topk_seq.eval() @@ -115,7 +114,7 @@ def symbols_to_logits(ids): logits = tf.to_float(tf.log(probabilities[pos - 1, :])) return logits - final_ids, final_probs = beam_search.beam_search( + final_ids, final_probs, _ = beam_search.beam_search( symbols_to_logits, initial_ids, beam_size, @@ -130,7 +129,7 @@ def symbols_to_logits(ids): self.assertAllEqual([[[0, 0, 1]]], ids) self.assertAllClose([[0.7 * 0.6]], np.exp(probs)) - def testNotGreedyBeamTwo(self): + def testNotGreedyBeamTwoWithStopEarly(self): batch_size = 1 beam_size = 2 vocab_size = 3 @@ -146,18 +145,58 @@ def symbols_to_logits(ids): logits = tf.to_float(tf.log(probabilities[pos - 1, :])) return logits - final_ids, final_probs = beam_search.beam_search( + final_ids, final_probs, _ = beam_search.beam_search( symbols_to_logits, initial_ids, beam_size, decode_length, vocab_size, 0.0, - eos_id=1) + eos_id=1, + stop_early=True) # default value, but just to make this explicit + + with self.test_session(): + ids = final_ids.eval() + probs = final_probs.eval() + # given stop_early = True, the only 'assurance' is w.r.t. the first beam + # (i.e., other beams may not even be completed) + # so, we check only the first beam + first_beam = ids[:, 0] + first_probs = probs[:, 0] + self.assertAllEqual([[0, 2, 1]], first_beam) + self.assertAllClose([0.8 * 0.5], np.exp(first_probs)) + + def testNotGreedyBeamTwoWithoutStopEarly(self): + batch_size = 1 + beam_size = 2 + vocab_size = 3 + decode_length = 3 + + initial_ids = tf.constant([0] * batch_size) # GO + probabilities = tf.constant([[[0.1, 0.1, 0.8], [0.1, 0.1, 0.8]], + [[0.4, 0.5, 0.1], [0.2, 0.4, 0.4]], + [[0.05, 0.9, 0.05], [0.4, 0.4, 0.2]]]) + + def symbols_to_logits(ids): + pos = tf.shape(ids)[1] + logits = tf.to_float(tf.log(probabilities[pos - 1, :])) + return logits + + final_ids, final_probs, _ = beam_search.beam_search( + symbols_to_logits, + initial_ids, + beam_size, + decode_length, + vocab_size, + 0.0, + eos_id=1, + stop_early=False) with self.test_session(): ids = final_ids.eval() probs = final_probs.eval() + # given stop_early = False, the algorithm will return all the beams + # so we can test all of them here self.assertAllEqual([[[0, 2, 1, 0], [0, 2, 0, 1]]], ids) self.assertAllClose([[0.8 * 0.5, 0.8 * 0.4 * 0.9]], np.exp(probs)) @@ -175,7 +214,7 @@ def symbols_to_logits(ids): logits = tf.to_float(tf.log(probabilities[pos - 1, :])) return logits - final_ids, final_probs = beam_search.beam_search( + final_ids, final_probs, _ = beam_search.beam_search( symbols_to_logits, initial_ids, beam_size, @@ -215,7 +254,7 @@ def symbols_to_logits(ids): logits = tf.to_float(tf.log(probabilities[pos - 1, :])) return logits - final_ids, final_scores = beam_search.beam_search( + final_ids, final_scores, _ = beam_search.beam_search( symbols_to_logits, initial_ids, beam_size, @@ -258,7 +297,7 @@ def symbols_to_logits(ids): return logits # Disable early stopping - final_ids, final_scores = beam_search.beam_search( + final_ids, final_scores, _ = beam_search.beam_search( symbols_to_logits, initial_ids, beam_size, @@ -276,6 +315,185 @@ def symbols_to_logits(ids): ]], scores) self.assertAllEqual([[[0, 2, 0, 1], [0, 2, 1, 0]]], ids) + def testStates(self): + batch_size = 1 + beam_size = 1 + vocab_size = 2 + decode_length = 3 + + initial_ids = tf.constant([0] * batch_size) # GO + probabilities = tf.constant([[[0.7, 0.3]], [[0.4, 0.6]], [[0.5, 0.5]]]) + + expected_states = tf.constant([[[0.]], [[1.]]]) + + def symbols_to_logits(ids, _, states): + pos = tf.shape(ids)[1] - 1 + # We have to assert the values of state inline here since we can't fetch + # them out of the loop! + with tf.control_dependencies( + [tf.assert_equal(states["state"], expected_states[pos])]): + logits = tf.to_float(tf.log(probabilities[pos, :])) + + states["state"] += 1 + return logits, states + + states = { + "state": tf.zeros((batch_size, 1)), + } + states["state"] = tf.placeholder_with_default( + states["state"], shape=(None, 1)) + + final_ids, _, _ = beam_search.beam_search( + symbols_to_logits, + initial_ids, + beam_size, + decode_length, + vocab_size, + 0.0, + eos_id=1, + states=states) + + with self.test_session() as sess: + # Catch and fail so that the testing framework doesn't think it's an error + try: + sess.run(final_ids) + except tf.errors.InvalidArgumentError as e: + raise AssertionError(e.message) + + def testStatesAfterLoop(self): + batch_size = 1 + beam_size = 1 + vocab_size = 2 + decode_length = 3 + + initial_ids = tf.constant([0] * batch_size) # GO + probabilities = tf.constant([[[0.7, 0.3]], [[0.4, 0.6]], [[0.5, 0.5]]]) + + def symbols_to_logits(ids, _, states): + pos = tf.shape(ids)[1] - 1 + logits = tf.to_float(tf.log(probabilities[pos, :])) + states["state"] += 1 + return logits, states + + states = { + "state": tf.zeros((batch_size, 1)), + } + states["state"] = tf.placeholder_with_default( + states["state"], shape=(None, 1)) + + _, _, final_states = beam_search.beam_search( + symbols_to_logits, + initial_ids, + beam_size, + decode_length, + vocab_size, + 0.0, + eos_id=1, + states=states) + + with self.test_session() as sess: + final_states = sess.run(final_states) + self.assertAllEqual([[[2]]], final_states["state"]) + + def testStateBeamTwo(self): + batch_size = 1 + beam_size = 2 + vocab_size = 3 + decode_length = 3 + + initial_ids = tf.constant([0] * batch_size) # GO + probabilities = tf.constant([[[0.1, 0.1, 0.8], [0.1, 0.1, 0.8]], + [[0.4, 0.5, 0.1], [0.2, 0.4, 0.4]], + [[0.05, 0.9, 0.05], [0.4, 0.4, 0.2]]]) + + # The top beam is always selected so we should see the top beam's state + # at each position, which is the one thats getting 3 added to it each step. + expected_states = tf.constant([[[0.], [0.]], [[3.], [3.]], [[6.], [6.]]]) + + def symbols_to_logits(ids, _, states): + pos = tf.shape(ids)[1] - 1 + + # We have to assert the values of state inline here since we can't fetch + # them out of the loop! + with tf.control_dependencies( + [tf.assert_equal(states["state"], expected_states[pos])]): + logits = tf.to_float(tf.log(probabilities[pos, :])) + + states["state"] += tf.constant([[3.], [7.]]) + return logits, states + + states = { + "state": tf.zeros((batch_size, 1)), + } + states["state"] = tf.placeholder_with_default( + states["state"], shape=(None, 1)) + + final_ids, _, _ = beam_search.beam_search( + symbols_to_logits, + initial_ids, + beam_size, + decode_length, + vocab_size, + 0.0, + eos_id=1, + states=states) + + with self.test_session() as sess: + # Catch and fail so that the testing framework doesn't think it's an error + try: + sess.run(final_ids) + except tf.errors.InvalidArgumentError as e: + raise AssertionError(e.message) + + def testTPUBeam(self): + batch_size = 1 + beam_size = 2 + vocab_size = 3 + decode_length = 3 + + initial_ids = tf.constant([0] * batch_size) # GO + probabilities = tf.constant([[[0.1, 0.1, 0.8], [0.1, 0.1, 0.8]], + [[0.4, 0.5, 0.1], [0.2, 0.4, 0.4]], + [[0.05, 0.9, 0.05], [0.4, 0.4, 0.2]]]) + + # The top beam is always selected so we should see the top beam's state + # at each position, which is the one thats getting 3 added to it each step. + expected_states = tf.constant([[[0.], [0.]], [[3.], [3.]], [[6.], [6.]]]) + + def symbols_to_logits(_, i, states): + # We have to assert the values of state inline here since we can't fetch + # them out of the loop! + with tf.control_dependencies( + [tf.assert_equal(states["state"], expected_states[i])]): + logits = tf.to_float(tf.log(probabilities[i, :])) + + states["state"] += tf.constant([[3.], [7.]]) + return logits, states + + states = { + "state": tf.zeros((batch_size, 1)), + } + states["state"] = tf.placeholder_with_default( + states["state"], shape=(None, 1)) + + final_ids, _, _ = beam_search.beam_search( + symbols_to_logits, + initial_ids, + beam_size, + decode_length, + vocab_size, + 3.5, + eos_id=1, + states=states, + use_tpu=True) + + with self.test_session() as sess: + # Catch and fail so that the testing framework doesn't think it's an error + try: + sess.run(final_ids) + except tf.errors.InvalidArgumentError as e: + raise AssertionError(e.message) + self.assertAllEqual([[[0, 2, 0, 1], [0, 2, 1, 0]]], final_ids) if __name__ == "__main__": tf.test.main() diff --git a/tensor2tensor/utils/bleu_hook.py b/tensor2tensor/utils/bleu_hook.py index eb8749b3f..9baadc4b9 100644 --- a/tensor2tensor/utils/bleu_hook.py +++ b/tensor2tensor/utils/bleu_hook.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,20 +20,25 @@ import collections import math - -# Dependency imports - +import os +import re +import sys +import time +import unicodedata import numpy as np +import six # pylint: disable=redefined-builtin -from six.moves import xrange +from six.moves import range from six.moves import zip # pylint: enable=redefined-builtin -import tensorflow as tf +from tensor2tensor.data_generators import text_encoder + +import tensorflow.compat.v1 as tf def _get_ngrams(segment, max_order): - """Extracts all n-grams upto a given maximum order from an input segment. + """Extracts all n-grams up to a given maximum order from an input segment. Args: segment: text segment from which n-grams will be extracted. @@ -40,12 +46,12 @@ def _get_ngrams(segment, max_order): methods. Returns: - The Counter containing all n-grams upto max_order in segment + The Counter containing all n-grams up to max_order in segment with a count of how many times each n-gram occurred. """ ngram_counts = collections.Counter() - for order in xrange(1, max_order + 1): - for i in xrange(0, len(segment) - order + 1): + for order in range(1, max_order + 1): + for i in range(0, len(segment) - order + 1): ngram = tuple(segment[i:i + order]) ngram_counts[ngram] += 1 return ngram_counts @@ -91,11 +97,16 @@ def compute_bleu(reference_corpus, matches_by_order[len(ngram) - 1] += overlap[ngram] for ngram in translation_ngram_counts: possible_matches_by_order[len(ngram)-1] += translation_ngram_counts[ngram] - precisions = [0] * max_order - for i in xrange(0, max_order): + smooth = 1.0 + for i in range(0, max_order): if possible_matches_by_order[i] > 0: precisions[i] = matches_by_order[i] / possible_matches_by_order[i] + if matches_by_order[i] > 0: + precisions[i] = matches_by_order[i] / possible_matches_by_order[i] + else: + smooth *= 2 + precisions[i] = 1.0 / (smooth * possible_matches_by_order[i]) else: precisions[i] = 0.0 @@ -104,20 +115,203 @@ def compute_bleu(reference_corpus, geo_mean = math.exp(p_log_sum/max_order) if use_bp: - ratio = translation_length / reference_length - bp = math.exp(1 - 1. / ratio) if ratio < 1.0 else 1.0 - + if not reference_length: + bp = 1.0 + else: + ratio = translation_length / reference_length + if ratio <= 0.0: + bp = 0.0 + elif ratio >= 1.0: + bp = 1.0 + else: + bp = math.exp(1 - 1. / ratio) bleu = geo_mean * bp return np.float32(bleu) -def padded_bleu_score(predictions, - labels, **unused_kwargs): - """Bleu score computation between labels and predictions on non-0s.""" +def bleu_score(predictions, labels, **unused_kwargs): + """BLEU score computation between labels and predictions. + + An approximate BLEU scoring method since we do not glue word pieces or + decode the ids and tokenize the output. By default, we use ngram order of 4 + and use brevity penalty. Also, this does not have beam search. + + Args: + predictions: tensor, model predictions + labels: tensor, gold output. + + Returns: + bleu: int, approx bleu score + """ outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) # Convert the outputs and labels to a [batch_size, input_length] tensor. - outputs = tf.squeeze(outputs) - labels = tf.squeeze(labels) + outputs = tf.squeeze(outputs, axis=[-1, -2]) + labels = tf.squeeze(labels, axis=[-1, -2]) bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32) return bleu, tf.constant(1.0) + + +class UnicodeRegex(object): + """Ad-hoc hack to recognize all punctuation and symbols.""" + + def __init__(self): + punctuation = self.property_chars("P") + self.nondigit_punct_re = re.compile(r"([^\d])([" + punctuation + r"])") + self.punct_nondigit_re = re.compile(r"([" + punctuation + r"])([^\d])") + self.symbol_re = re.compile("([" + self.property_chars("S") + "])") + + def property_chars(self, prefix): + return "".join(six.unichr(x) for x in range(sys.maxunicode) + if unicodedata.category(six.unichr(x)).startswith(prefix)) + + +uregex = UnicodeRegex() + + +def bleu_tokenize(string): + r"""Tokenize a string following the official BLEU implementation. + + See https://github.com/moses-smt/mosesdecoder/" + "blob/master/scripts/generic/mteval-v14.pl#L954-L983 + In our case, the input string is expected to be just one line + and no HTML entities de-escaping is needed. + So we just tokenize on punctuation and symbols, + except when a punctuation is preceded and followed by a digit + (e.g. a comma/dot as a thousand/decimal separator). + + Note that a number (e.g. a year) followed by a dot at the end of sentence + is NOT tokenized, + i.e. the dot stays with the number because `s/(\p{P})(\P{N})/ $1 $2/g` + does not match this case (unless we add a space after each sentence). + However, this error is already in the original mteval-v14.pl + and we want to be consistent with it. + + Args: + string: the input string + + Returns: + a list of tokens + """ + string = uregex.nondigit_punct_re.sub(r"\1 \2 ", string) + string = uregex.punct_nondigit_re.sub(r" \1 \2", string) + string = uregex.symbol_re.sub(r" \1 ", string) + return string.split() + + +def bleu_wrapper(ref_filename, hyp_filename, case_sensitive=False): + """Compute BLEU for two files (reference and hypothesis translation).""" + ref_lines = text_encoder.native_to_unicode( + tf.gfile.Open(ref_filename, "r").read()).split("\n") + hyp_lines = text_encoder.native_to_unicode( + tf.gfile.Open(hyp_filename, "r").read()).split("\n") + assert len(ref_lines) == len(hyp_lines), ("{} != {}".format( + len(ref_lines), len(hyp_lines))) + if not case_sensitive: + ref_lines = [x.lower() for x in ref_lines] + hyp_lines = [x.lower() for x in hyp_lines] + ref_tokens = [bleu_tokenize(x) for x in ref_lines] + hyp_tokens = [bleu_tokenize(x) for x in hyp_lines] + return compute_bleu(ref_tokens, hyp_tokens) + + +StepFile = collections.namedtuple("StepFile", "filename mtime ctime steps") + + +def _try_twice_tf_glob(pattern): + """Glob twice, first time possibly catching `NotFoundError`. + + tf.gfile.Glob may crash with + + ``` + tensorflow.python.framework.errors_impl.NotFoundError: + xy/model.ckpt-1130761_temp_9cb4cb0b0f5f4382b5ea947aadfb7a40; + No such file or directory + ``` + + Standard glob.glob does not have this bug, but does not handle multiple + filesystems (e.g. `gs://`), so we call tf.gfile.Glob, the first time possibly + catching the `NotFoundError`. + + Args: + pattern: str, glob pattern. + + Returns: + list matching filepaths. + """ + try: + return tf.gfile.Glob(pattern) + except tf.errors.NotFoundError: + return tf.gfile.Glob(pattern) + + +def _read_stepfiles_list(path_prefix, path_suffix=".index", min_steps=0): + """Return list of StepFiles sorted by step from files at path_prefix.""" + stepfiles = [] + for filename in _try_twice_tf_glob(path_prefix + "*-[0-9]*" + path_suffix): + basename = filename[:-len(path_suffix)] if path_suffix else filename + try: + steps = int(basename.rsplit("-")[-1]) + except ValueError: # The -[0-9]* part is not an integer. + continue + if steps < min_steps: + continue + if not os.path.exists(filename): + tf.logging.info(filename + " was deleted, so skipping it") + continue + stepfiles.append(StepFile(basename, os.path.getmtime(filename), + os.path.getctime(filename), steps)) + return sorted(stepfiles, key=lambda x: -x.steps) + + +def stepfiles_iterator(path_prefix, wait_minutes=0, min_steps=0, + path_suffix=".index", sleep_sec=10): + """Continuously yield new files with steps in filename as they appear. + + This is useful for checkpoint files or other files whose names differ just in + an integer marking the number of steps and match the wildcard path_prefix + + "*-[0-9]*" + path_suffix. + + Unlike `tf.contrib.training.checkpoints_iterator`, this implementation always + starts from the oldest files (and it cannot miss any file). Note that the + oldest checkpoint may be deleted anytime by Tensorflow (if set up so). It is + up to the user to check that the files returned by this generator actually + exist. + + Args: + path_prefix: The directory + possible common filename prefix to the files. + wait_minutes: The maximum amount of minutes to wait between files. + min_steps: Skip files with lower global step. + path_suffix: Common filename suffix (after steps), including possible + extension dot. + sleep_sec: How often to check for new files. + + Yields: + named tuples (filename, mtime, ctime, steps) of the files as they arrive. + """ + # Wildcard D*-[0-9]* does not match D/x-1, so if D is a directory let + # path_prefix="D/". + if not path_prefix.endswith(os.sep) and os.path.isdir(path_prefix): + path_prefix += os.sep + stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps) + tf.logging.info("Found %d files with steps: %s", + len(stepfiles), + ", ".join(str(x.steps) for x in reversed(stepfiles))) + exit_time = time.time() + wait_minutes * 60 + while True: + if not stepfiles and wait_minutes: + tf.logging.info( + "Waiting till %s if a new file matching %s*-[0-9]*%s appears", + time.asctime(time.localtime(exit_time)), path_prefix, path_suffix) + while True: + stepfiles = _read_stepfiles_list(path_prefix, path_suffix, min_steps) + if stepfiles or time.time() > exit_time: + break + time.sleep(sleep_sec) + if not stepfiles: + return + + stepfile = stepfiles.pop() + exit_time, min_steps = (stepfile.ctime + wait_minutes * 60, + stepfile.steps + 1) + yield stepfile diff --git a/tensor2tensor/utils/bleu_hook_test.py b/tensor2tensor/utils/bleu_hook_test.py index 1838affd6..47b607a3c 100644 --- a/tensor2tensor/utils/bleu_hook_test.py +++ b/tensor2tensor/utils/bleu_hook_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,17 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. +# coding=utf-8 """Tests for tensor2tensor.utils.bleu_hook.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function -# Dependency imports +import os +import tempfile +import six +from tensor2tensor.data_generators import text_encoder from tensor2tensor.utils import bleu_hook -import tensorflow as tf +import tensorflow.compat.v1 as tf class BleuHookTest(tf.test.TestCase): @@ -38,8 +43,9 @@ def testComputeNotEqual(self): translation_corpus = [[1, 2, 3, 4]] reference_corpus = [[5, 6, 7, 8]] bleu = bleu_hook.compute_bleu(reference_corpus, translation_corpus) - actual_bleu = 0.0 - self.assertEqual(bleu, actual_bleu) + # The smoothing prevents 0 for small corpora + actual_bleu = 0.0798679 + self.assertAllClose(bleu, actual_bleu, atol=1e-03) def testComputeMultipleBatch(self): translation_corpus = [[1, 2, 3, 4], [5, 6, 7, 0]] @@ -52,8 +58,50 @@ def testComputeMultipleNgrams(self): reference_corpus = [[1, 2, 1, 13], [12, 6, 7, 4, 8, 9, 10]] translation_corpus = [[1, 2, 1, 3], [5, 6, 7, 4]] bleu = bleu_hook.compute_bleu(reference_corpus, translation_corpus) - actual_bleu = 0.486 + actual_bleu = 0.3436 self.assertAllClose(bleu, actual_bleu, atol=1e-03) -if __name__ == '__main__': + def testBleuTokenize(self): + self.assertEqual(bleu_hook.bleu_tokenize(u"hi, “there”"), + [u"hi", u",", u"“", u"there", u"”"]) + + def _generate_test_data(self, name, hyps, refs): + """Writes test data to temporary files. + + Args: + name: str, used for making temp files unique across tests + hyps: list of unicode strings serving as translation hypotheses + refs: list of unicode strings serving as references + + Returns: + hyp_file: path to temporary file containing the hypotheses + refs_file: path to temporary file containing the references + """ + assert len(hyps) == len(refs) + hyp_file = os.path.join(tempfile.gettempdir(), "{}.hyps".format(name)) + refs_file = os.path.join(tempfile.gettempdir(), "{}.refs".format(name)) + for filename, items in zip([hyp_file, refs_file], [hyps, refs]): + with (open(filename, "wb") + if six.PY2 else open(filename, "w", encoding="utf-8")) as out: + content = text_encoder.unicode_to_native(u"\n".join(items)) + out.write(content) + return hyp_file, refs_file + + def testBleuWrapper(self): + hyp_filename, ref_filename = self._generate_test_data( + "standard", [u"a b a c", u"e f g d"], [u"a b a z", u"y f g d k l m"]) + bleu = bleu_hook.bleu_wrapper(ref_filename, hyp_filename) + actual_bleu = 0.3436 + self.assertAllClose(bleu, actual_bleu, atol=1e-03) + + def testBleuWrapperWithUnicodeLineSeparator(self): + hyp_filename, ref_filename = self._generate_test_data( + "unicode-linesep", [u"a b a c", u"e f \u2028 d"], + [u"a b a z", u"y f g d k l m"]) + bleu = bleu_hook.bleu_wrapper(ref_filename, hyp_filename) + actual_bleu = 0.2638 + self.assertAllClose(bleu, actual_bleu, atol=1e-03) + + +if __name__ == "__main__": tf.test.main() diff --git a/tensor2tensor/utils/checkpoint_compatibility_test.py b/tensor2tensor/utils/checkpoint_compatibility_test.py new file mode 100644 index 000000000..9a3e1d3eb --- /dev/null +++ b/tensor2tensor/utils/checkpoint_compatibility_test.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test for checkpoint compatibility.""" +# The checkpoint in test_data/transformer_test_ckpt is generated with the OSS +# release. +# t2t-trainer \ +# --model=transformer \ +# --hparams_set=transformer_test \ +# --problem=translate_ende_wmt8k \ +# --data_dir=~/t2t/data \ +# --output_dir=/tmp/t2t_train \ +# --train_steps=1 \ +# --eval_steps=1 + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import numpy as np + +from six.moves import range +from tensor2tensor import models # pylint: disable=unused-import +from tensor2tensor import problems # pylint: disable=unused-import +from tensor2tensor.utils import data_reader +from tensor2tensor.utils import trainer_lib + +import tensorflow.compat.v1 as tf + + +def get_data_dir(): + pkg = os.path.abspath(__file__) + pkg, _ = os.path.split(pkg) + pkg, _ = os.path.split(pkg) + return os.path.join(pkg, "test_data") + + +_DATA_DIR = get_data_dir() +_CKPT_DIR = os.path.join(_DATA_DIR, "transformer_test_ckpt") + + +class CheckpointCompatibilityTest(tf.test.TestCase): + BATCH_SIZE = 3 + + def testCompatibility(self): + model = "transformer" + hp_set = "transformer_test" + problem_name = "translate_ende_wmt8k" + + hp = trainer_lib.create_hparams( + hp_set, data_dir=_DATA_DIR, problem_name=problem_name) + run_config = trainer_lib.create_run_config(model, model_dir=_CKPT_DIR) + estimator = trainer_lib.create_estimator(model, hp, run_config) + + for prediction in estimator.predict(self.input_fn): + self.assertEqual(prediction["outputs"].dtype, np.int32) + + def input_fn(self): + types = {"inputs": tf.int32} + shapes = {"inputs": tf.TensorShape([None])} + dataset = tf.data.Dataset.from_generator(self.input_generator, types, + shapes) + dataset = dataset.padded_batch(self.BATCH_SIZE, shapes) + dataset = dataset.map(data_reader.standardize_shapes) + features = dataset.make_one_shot_iterator().get_next() + return features + + def input_generator(self): + for _ in range(self.BATCH_SIZE): + vals = np.random.randint( + 1, 100, size=np.random.randint(20), dtype=np.int32) + yield {"inputs": vals} + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/utils/cloud_mlengine.py b/tensor2tensor/utils/cloud_mlengine.py new file mode 100644 index 000000000..337ffa249 --- /dev/null +++ b/tensor2tensor/utils/cloud_mlengine.py @@ -0,0 +1,354 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Launch on GCP's ML Engine.""" + +import datetime +import os +import pprint +import shutil +import subprocess as sp +import sys +import tempfile + +from googleapiclient import discovery +from oauth2client.client import GoogleCredentials + +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.layers import common_hparams +from tensor2tensor.utils import registry +from tensor2tensor.utils import usr_dir as usr_dir_lib +import tensorflow.compat.v1 as tf + +FLAGS = tf.flags.FLAGS + +CONSOLE_URL = "https://console.cloud.google.com/mlengine/jobs/" +RUNTIME_VERSION = "1.14" +LIST_VM = "gcloud compute instances list" +DEFAULT_PROJECT = "gcloud config get-value project" +DEFAULT_REGION = "gcloud config get-value compute/region" + + +def shell_output(cmd_, **kwargs): + return text_encoder.to_unicode(sp.check_output(format_cmd(cmd_, **kwargs))) + + +def shell_run(cmd_, **kwargs): + return sp.check_call(format_cmd(cmd_, **kwargs)) + + +def format_cmd(cmd_, **kwargs): + return cmd_.format(**kwargs).strip().split() + + +def default_region(): + return shell_output(DEFAULT_REGION).strip() + + +def default_project(): + return shell_output(DEFAULT_PROJECT).strip() + + +def get_setup_file(name, packages=None): + if not packages: + packages = [] + return """ +from setuptools import find_packages +from setuptools import setup +setup( + name="{name}", + version="0.1", + packages=find_packages(), + install_requires={pypi_packages} +) +""".format(name=name, pypi_packages=str(list(packages))) + + +def job_dir(): + # The flag --job-dir is parsed differently before and after switching to absl + return getattr(FLAGS, "job-dir", "") or getattr(FLAGS, "job_dir", "") + + +def get_requirements(usr_dir): + requirements_file = os.path.join(usr_dir, "requirements.txt") + if not tf.gfile.Exists(requirements_file): + return [] + with tf.gfile.Open(requirements_file) as f: + pkg_list = f.readlines() + return [pkg.strip() for pkg in pkg_list if "tensor2tensor" not in pkg] + + +def flags_as_args(): + """Convert FLAGS to list of args suitable for passing on cmd line.""" + if hasattr(FLAGS, "flag_values_dict"): + args_dict = FLAGS.flag_values_dict() + else: + args_dict = dict(FLAGS.__dict__["__flags"]) + del args_dict["cloud_mlengine"] + # Configured later + del args_dict["t2t_usr_dir"] + args_dict.pop("h", None) + args_dict.pop("helpfull", None) + args_dict.pop("helpshort", None) + args_dict.pop("help", None) + args = [] + for name, val in args_dict.items(): + if val is None: + continue + if name.startswith("autotune"): + continue + args.extend(["--%s=%s" % (name, str(val))]) + return args + + +def get_default_master_type(num_gpus=1): + """Returns master_type for trainingInput.""" + gpus_to_master_map = { + 0: "standard", + 1: "standard_p100", + 4: "complex_model_m_p100", + 8: "complex_model_l_gpu", + } + if num_gpus not in gpus_to_master_map: + raise ValueError("Num gpus must be in %s" % + str(sorted(list(gpus_to_master_map.keys())))) + return gpus_to_master_map[num_gpus] + + +def configure_job(): + """Construct jobSpec for ML Engine job.""" + # See documentation: + # https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#traininginput + training_input = { + "pythonModule": "tensor2tensor.bin.t2t_trainer", + "args": flags_as_args(), + "region": text_encoder.native_to_unicode(default_region()), + "runtimeVersion": RUNTIME_VERSION, + "pythonVersion": "3.5" if sys.version_info.major == 3 else "2.7", + "jobDir": FLAGS.output_dir, + "scaleTier": "CUSTOM", + "masterType": FLAGS.cloud_mlengine_master_type or get_default_master_type( + num_gpus=FLAGS.worker_gpu) + } + if FLAGS.use_tpu: + training_input["masterType"] = (FLAGS.cloud_mlengine_master_type or + "standard") + training_input["workerType"] = "cloud_tpu" + training_input["workerCount"] = 1 + if FLAGS.hparams_range: + tf.logging.info("Configuring hyperparameter tuning.") + training_input["hyperparameters"] = configure_autotune( + FLAGS.hparams_range, + FLAGS.autotune_objective, + FLAGS.autotune_maximize, + FLAGS.autotune_max_trials, + FLAGS.autotune_parallel_trials, + ) + + timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S") + job_spec = { + "jobId": "%s_%s_t2t_%s" % (FLAGS.model, FLAGS.problem, timestamp), + "labels": { + "model": FLAGS.model, + "problem": FLAGS.problem, + "hparams": FLAGS.hparams_set + }, + "trainingInput": training_input, + } + return job_spec + + +def launch_job(job_spec): + """Launch job on ML Engine.""" + project_id = "projects/{}".format( + text_encoder.native_to_unicode(default_project())) + credentials = GoogleCredentials.get_application_default() + cloudml = discovery.build("ml", "v1", credentials=credentials, + cache_discovery=False) + request = cloudml.projects().jobs().create(body=job_spec, parent=project_id) + request.execute() + + +def _tar_and_copy(src_dir, target_dir): + """Tar and gzip src_dir and copy to GCS target_dir.""" + src_dir = src_dir.rstrip("/") + target_dir = target_dir.rstrip("/") + tmp_dir = tempfile.gettempdir().rstrip("/") + src_base = os.path.basename(src_dir) + shell_run( + "tar --exclude=.git -zcf {tmp_dir}/{src_base}.tar.gz -C {src_dir} .", + src_dir=src_dir, + src_base=src_base, + tmp_dir=tmp_dir) + final_destination = "%s/%s.tar.gz" % (target_dir, src_base) + shell_run( + ("gsutil cp {tmp_dir}/{src_base}.tar.gz " + "{final_destination}"), + tmp_dir=tmp_dir, + src_base=src_base, + final_destination=final_destination) + return final_destination + + +def tar_and_copy_t2t(train_dir): + """Tar Tensor2Tensor and cp to train_dir.""" + tf.logging.info("Tarring and pushing local Tensor2Tensor package.") + + output = text_encoder.native_to_unicode(shell_output( + "pip show tensor2tensor")).split("\n") + assert output[1].startswith("Version") + assert output[7].startswith("Location") + t2t_version = output[1].split(":")[1].strip() + t2t_dir = output[7].split(":")[1].strip() + + # A local installation cloned from GitHub will have a setup.py file and a docs + # folder + is_local_t2t = all([ + tf.gfile.Exists(os.path.join(t2t_dir, fname)) + for fname in ["setup.py", "docs/cloud_mlengine.md"] + ]) + + if is_local_t2t: + tf.logging.info("Found local T2T installation. Tarring directory %s", + t2t_dir) + else: + # PyPI installation + # Create a folder with just a setup.py file pointing to the right version + tf.logging.info("Found PyPI T2T installation. Launching tensor2tensor==%s", + t2t_version) + t2t_dir = os.path.join(tempfile.gettempdir(), "tensor2tensor_tmp") + shutil.rmtree(t2t_dir, ignore_errors=True) + os.mkdir(t2t_dir) + setup_fname = os.path.join(t2t_dir, "setup.py") + setup_file_str = get_setup_file( + name="DummyT2TPackage", + packages=["tensor2tensor==%s" % t2t_version] + ) + with tf.gfile.Open(setup_fname, "w") as f: + f.write(setup_file_str) + t2t_tar = _tar_and_copy(t2t_dir, train_dir) + return t2t_tar + + +def tar_and_copy_usr_dir(usr_dir, train_dir): + """Package, tar, and copy usr_dir to GCS train_dir.""" + tf.logging.info("Tarring and pushing t2t_usr_dir.") + usr_dir = os.path.abspath(os.path.expanduser(usr_dir)) + # Copy usr dir to a temp location + top_dir = os.path.join(tempfile.gettempdir(), "t2t_usr_container") + tmp_usr_dir = os.path.join(top_dir, usr_dir_lib.INTERNAL_USR_DIR_PACKAGE) + shutil.rmtree(top_dir, ignore_errors=True) + shutil.copytree(usr_dir, tmp_usr_dir) + # Insert setup.py if one does not exist + top_setup_fname = os.path.join(top_dir, "setup.py") + setup_file_str = get_setup_file( + name="DummyUsrDirPackage", + packages=get_requirements(usr_dir) + ) + with tf.gfile.Open(top_setup_fname, "w") as f: + f.write(setup_file_str) + usr_tar = _tar_and_copy(top_dir, train_dir) + return usr_tar + + +def autotune_paramspecs(hparams_range): + rhp = common_hparams.RangedHParams() + registry.ranged_hparams(hparams_range)(rhp) + return rhp.to_parameter_specs(name_prefix="hp_") + + +def configure_autotune(hparams_range, + objective, + maximize=True, + max_trials=10, + parallel_trials=1): + return { + "goal": "MAXIMIZE" if maximize else "MINIMIZE", + "params": autotune_paramspecs(hparams_range), + "maxTrials": max_trials, + "maxParallelTrials": parallel_trials, + "hyperparameterMetricTag": objective, + } + + +def configure_trainer_package(job_spec, t2t_tar): + assert t2t_tar.startswith("gs://") + job_spec["trainingInput"]["packageUris"] = [t2t_tar] + + +def configure_usr_dir(job_spec, usr_tar): + assert usr_tar.startswith("gs://") + job_spec["trainingInput"]["packageUris"].append(usr_tar) + usr_args = ["--t2t_usr_dir", usr_dir_lib.INTERNAL_USR_DIR_PACKAGE] + job_spec["trainingInput"]["args"].extend(usr_args) + + +def validate_flags(): + """Validates flags are set to acceptable values for CloudML Engine runs.""" + assert not job_dir() + assert FLAGS.output_dir.startswith("gs://") + assert FLAGS.data_dir.startswith("gs://") + assert FLAGS.worker_replicas <= 1 + assert FLAGS.ps_replicas <= 0 + if FLAGS.hparams_range: + assert FLAGS.autotune_objective + if FLAGS.worker_gpu: + assert FLAGS.worker_gpu in [1, 4, 8] + if FLAGS.cloud_mlengine_master_type: + if FLAGS.worker_gpu: + if FLAGS.worker_gpu == 1: + assert FLAGS.cloud_mlengine_master_type in ["standard_gpu", + "standard_p100", + "standard_v100"] + elif FLAGS.worker_gpu == 4: + assert FLAGS.cloud_mlengine_master_type in ["complex_model_m_gpu", + "complex_model_m_p100", + "complex_model_m_v100"] + else: + assert FLAGS.cloud_mlengine_master_type in ["complex_model_l_gpu", + "complex_model_l_v100"] + else: + assert FLAGS.cloud_mlengine_master_type in ["standard", "large_model", + "complex_model_s", + "complex_model_m", + "complex_model_l"] + + +def confirm(): + out = input("Confirm (Y/n)? > ") + return out == "Y" + + +def launch(): + """Launch t2t_trainer on Cloud ML Engine.""" + validate_flags() + job_spec = configure_job() + job_name = job_spec["jobId"] + tf.logging.info("Launching job %s with ML Engine spec:\n%s", job_name, + pprint.pformat(job_spec)) + assert confirm() + train_dir = FLAGS.output_dir + t2t_tar = tar_and_copy_t2t(train_dir) + configure_trainer_package(job_spec, t2t_tar) + if FLAGS.t2t_usr_dir: + usr_tar = tar_and_copy_usr_dir(FLAGS.t2t_usr_dir, train_dir) + configure_usr_dir(job_spec, usr_tar) + launch_job(job_spec) + tf.logging.info("Launched %s. See console to track: %s.", job_name, + CONSOLE_URL) + tf.logging.info("Interact with the training job from the command line:") + tf.logging.info("Abort job: gcloud ml-engine jobs cancel %s", job_name) + tf.logging.info("Stream logs: gcloud ml-engine jobs stream-logs %s", job_name) + tf.logging.info("Open tensorboard: tensorboard --logdir %s", train_dir) diff --git a/tensor2tensor/utils/compute_video_metrics.py b/tensor2tensor/utils/compute_video_metrics.py new file mode 100644 index 000000000..4e1765d6d --- /dev/null +++ b/tensor2tensor/utils/compute_video_metrics.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Computes and saves the metrics for video prediction and generation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from six.moves import range +from tensor2tensor.bin import t2t_decoder +from tensor2tensor.utils import video_metrics +import tensorflow.compat.v1 as tf + + +FLAGS = tf.flags.FLAGS + + +def main(_): + hparams = t2t_decoder.create_hparams() + problem = hparams.problem + frame_shape = [problem.frame_height, + problem.frame_width, + problem.num_channels] + decode_hp = t2t_decoder.create_decode_hparams() + + output_dirs = [ + os.path.join(FLAGS.output_dir, "decode_%05d" % decode_id) + for decode_id in range(decode_hp.num_decodes) + ] + + video_metrics.compute_and_save_video_metrics( + output_dirs, + FLAGS.problem, + hparams.video_num_target_frames, + frame_shape) + + +if __name__ == "__main__": + tf.app.run(main) diff --git a/tensor2tensor/utils/contrib.py b/tensor2tensor/utils/contrib.py new file mode 100644 index 000000000..23441aeab --- /dev/null +++ b/tensor2tensor/utils/contrib.py @@ -0,0 +1,221 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Wrappers around tf.contrib to dynamically import contrib packages. + +This makes sure that libraries depending on T2T and TF2, do not crash at import. +""" + +from __future__ import absolute_import +from __future__ import division # Not necessary in a Python 3-only module +from __future__ import print_function # Not necessary in a Python 3-only module + +from absl import logging +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +# Check if we have contrib available +try: + from tensorflow.contrib import slim as tf_slim # pylint: disable=g-import-not-at-top + is_tf2 = False +except: # pylint: disable=bare-except + # tf.contrib, including slim and certain optimizers are not available in TF2 + # Some features are now available in separate packages. We shim support for + # these as needed. + import tensorflow_addons as tfa # pylint: disable=g-import-not-at-top + import tf_slim # pylint: disable=g-import-not-at-top + is_tf2 = True + + +def err_if_tf2(msg='err'): + if is_tf2: + if msg == 'err': + msg = 'contrib is unavailable in tf2.' + raise ImportError(msg) + else: + msg = 'contrib is unavailable in tf2.' + logging.info(msg) + + +class DummyModule(object): + + def __init__(self, **kw): + for k, v in kw.items(): + setattr(self, k, v) + + +def slim(): + return tf_slim + + +def util(): + err_if_tf2() + from tensorflow.contrib import util as contrib_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_util + + +def tfe(): + err_if_tf2(msg='warn') + from tensorflow.contrib.eager.python import tfe as contrib_eager # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_eager + + +def deprecated(reason, date): + del reason + del date + def decorator(fn): + return fn + return decorator + + +def framework(msg='err'): + """Return framework module or dummy version.""" + del msg + if is_tf2: + return DummyModule( + arg_scope=None, + get_name_scope=lambda: tf.get_default_graph().get_name_scope(), + name_scope=tf.name_scope, + deprecated=deprecated, + nest=tf.nest, + argsort=tf.argsort) + + from tensorflow.contrib import framework as contrib_framework # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_framework + + +def nn(): + err_if_tf2(msg='err') + from tensorflow.contrib import nn as contrib_nn # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_nn + + +def layers(): + """Return layers module or dummy version.""" + try: + from tensorflow.contrib import layers as contrib_layers # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_layers + except: # pylint: disable=bare-except + return DummyModule( + OPTIMIZER_CLS_NAMES={}, optimize_loss=tf_slim.optimize_loss) + + +def rnn(): + err_if_tf2(msg='err') + from tensorflow.contrib import rnn as contrib_rnn # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_rnn + + +def seq2seq(): + err_if_tf2(msg='err') + from tensorflow.contrib import seq2seq as contrib_seq2seq # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_seq2seq + + +def tpu(): + err_if_tf2(msg='err') + from tensorflow.contrib import tpu as contrib_tpu # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_tpu + + +def training(): + err_if_tf2(msg='err') + from tensorflow.contrib import training as contrib_training # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_training + + +def summary(): + err_if_tf2(msg='err') + from tensorflow.contrib import summary as contrib_summary # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_summary + + +def metrics(): + err_if_tf2(msg='err') + from tensorflow.contrib import metrics as contrib_metrics # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_metrics + + +def opt(): + if not is_tf2: + from tensorflow.contrib import opt as contrib_opt # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_opt + return DummyModule( + LazyAdam=tfa.optimizers.LazyAdam, + LazyAdamOptimizer=tfa.optimizers.LazyAdam, + ) + + +def mixed_precision(): + err_if_tf2(msg='err') + from tensorflow.contrib import mixed_precision as contrib_mixed_precision # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_mixed_precision + + +def cluster_resolver(): + err_if_tf2(msg='err') + from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_cluster_resolver + + +def distribute(): + err_if_tf2(msg='err') + from tensorflow.contrib import distribute as contrib_distribute # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_distribute + + +def replace_monitors_with_hooks(monitors_or_hooks, estimator): + """Stub for missing function.""" + del estimator + monitors_or_hooks = monitors_or_hooks or [] + hooks = [ + m for m in monitors_or_hooks if isinstance(m, tf_estimator.SessionRunHook) + ] + deprecated_monitors = [ + m for m in monitors_or_hooks + if not isinstance(m, tf_estimator.SessionRunHook) + ] + assert not deprecated_monitors + return hooks + + +def learn(): + """Return tf.contrib.learn module or dummy version.""" + if not is_tf2: + from tensorflow.contrib import learn as contrib_learn # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_learn + return DummyModule( + RunConfig=tf_estimator.RunConfig, + monitors=DummyModule( + replace_monitors_with_hooks=replace_monitors_with_hooks), + ) + + +def tf_prof(): + err_if_tf2(msg='err') + from tensorflow.contrib import tfprof as contrib_tfprof # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_tfprof + + +def eager(): + err_if_tf2(msg='err') + from tensorflow.contrib.eager.python import tfe as contrib_eager # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_eager + + +def image(): + err_if_tf2(msg='err') + from tensorflow.contrib import image as contrib_image # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top + return contrib_image diff --git a/tensor2tensor/utils/data_reader.py b/tensor2tensor/utils/data_reader.py index 0080ecaa6..c23e71083 100644 --- a/tensor2tensor/utils/data_reader.py +++ b/tensor2tensor/utils/data_reader.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,330 +18,558 @@ from __future__ import division from __future__ import print_function -import math -import os - -# Dependency imports +import functools +import multiprocessing +import random import six -from six.moves import zip # pylint: disable=redefined-builtin - -from tensor2tensor.data_generators import problem_hparams -from tensor2tensor.models import common_layers +from six.moves import range # pylint: disable=redefined-builtin -import tensorflow as tf +from tensor2tensor.utils import contrib +from tensor2tensor.utils import mlperf_log +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator -def examples_queue(data_sources, - data_fields_to_features, - training, - capacity=32, - data_items_to_decoders=None, - data_items_to_decode=None): - """Contruct a queue of training or evaluation examples. - This function will create a reader from files given by data_sources, - then enqueue the tf.Examples from these files, shuffling if training - is true, and finally parse these tf.Examples to tensors. +def cast_ints_to_int32(features): + f = {} + for k, v in sorted(six.iteritems(features)): + if v.dtype in [tf.int64, tf.uint8]: + v = tf.to_int32(v) + f[k] = v + return f - The dictionary data_fields_to_features for an image dataset can be this: - - data_fields_to_features = { - 'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''), - 'image/format': tf.FixedLenFeature((), tf.string, default_value='raw'), - 'image/class/label': tf.FixedLenFeature( - [1], tf.int64, default_value=tf.zeros([1], dtype=tf.int64)), - } - and for a simple algorithmic dataset with variable-length data it is this: +def example_length(example): + length = 0 + # Length of the example is the maximum length of the feature lengths + for _, v in sorted(six.iteritems(example)): + # For images the sequence length is the size of the spatial dimensions. + feature_length = tf.shape(v)[0] + if len(v.get_shape()) > 2: + feature_length = tf.shape(v)[0] * tf.shape(v)[1] + length = tf.maximum(length, feature_length) + return length - data_fields_to_features = { - 'inputs': tf.VarLenFeature(tf.int64), - 'targets': tf.VarLenFeature(tf.int64), - } - The data_items_to_decoders dictionary argument can be left as None if there - is no decoding to be performed. But, e.g. for images, it should be set so that - the images are decoded from the features, e.g., like this for MNIST: - - data_items_to_decoders = { - 'image': tfexample_decoder.Image( - image_key = 'image/encoded', - format_key = 'image/format', - shape=[28, 28], - channels=1), - 'label': tfexample_decoder.Tensor('image/class/label'), - } +def example_valid_size(example, min_length, max_length): + length = example_length(example) + return tf.logical_and( + length >= min_length, + length <= max_length, + ) - These arguments are compatible with the use of tf.contrib.slim.data module, - see there for more documentation. - Args: - data_sources: a list or tuple of sources from which the data will be read, - for example [/path/to/train@128, /path/to/train2*, /tmp/.../train3*] - data_fields_to_features: a dictionary from data fields in the data sources - to features, such as tf.VarLenFeature(tf.int64), see above for examples. - training: a Boolean, whether to read for training or evaluation. - capacity: integer, queue capacity; set to 2 * max_batch_size or more. - data_items_to_decoders: a dictionary mapping data items (that will be - in the returned result) to decoders that will decode them using features - defined in data_fields_to_features; see above for examples. By default - (if this is None), we grab the tensor from every feature. - data_items_to_decode: a subset of data items that will be decoded; - by default (if this is None), we decode all items. +def padded_batch(dataset, batch_size, padded_shapes=None): + padded_shapes = padded_shapes or dict( + [(name, [None] * len(shape)) + for name, shape in dataset.output_shapes.items()]) + return dataset.padded_batch(batch_size, padded_shapes) - Returns: - A dictionary mapping each data_field to a corresponding 1D int64 tensor - read from the created queue. - - Raises: - ValueError: if no files are found with the provided data_prefix or no data - fields were provided. - """ - with tf.name_scope("examples_queue"): - # Read serialized examples using slim parallel_reader. - num_epochs = None if training else 1 - _, example_serialized = tf.contrib.slim.parallel_reader.parallel_read( - data_sources, - tf.TFRecordReader, - num_epochs=num_epochs, - shuffle=training, - capacity=2 * capacity, - min_after_dequeue=capacity, - num_readers=4 if training else 1) - - if data_items_to_decoders is None: - data_items_to_decoders = { - field: tf.contrib.slim.tfexample_decoder.Tensor(field) - for field in data_fields_to_features - } - - decoder = tf.contrib.slim.tfexample_decoder.TFExampleDecoder( - data_fields_to_features, data_items_to_decoders) - - if data_items_to_decode is None: - data_items_to_decode = list(data_items_to_decoders) - - decoded = decoder.decode(example_serialized, items=data_items_to_decode) - return { - field: tensor - for (field, tensor) in zip(data_items_to_decode, decoded) - } - - -def input_pipeline(data_file_pattern, capacity, mode): - """Input pipeline, returns a dictionary of tensors from queues.""" - # Read from image TFRecords if the file has "image" in its name. - if data_file_pattern and "image" in data_file_pattern: - data_fields = { - "image/encoded": tf.FixedLenFeature((), tf.string), - "image/format": tf.FixedLenFeature((), tf.string), - "image/class/label": tf.VarLenFeature(tf.int64) - } - data_items_to_decoders = { - "inputs": - tf.contrib.slim.tfexample_decoder.Image( - image_key="image/encoded", - format_key="image/format", - channels=1 if "mnist" in data_file_pattern else 3), - "targets": - tf.contrib.slim.tfexample_decoder.Tensor("image/class/label"), - } - elif data_file_pattern and "audio" in data_file_pattern: - data_type = tf.int64 if "timit" in data_file_pattern else tf.float32 - data_fields = { - "inputs": tf.VarLenFeature(data_type), - "audio/sample_count": tf.FixedLenFeature((), tf.int64), - "audio/sample_width": tf.FixedLenFeature((), tf.int64), - "targets": tf.VarLenFeature(tf.int64), - } - data_items_to_decoders = None - else: - data_fields = { - "inputs": tf.VarLenFeature(tf.int64), - "targets": tf.VarLenFeature(tf.int64) - } - data_items_to_decoders = None - - # Create placeholders for input, rather than reading data from disk. - if data_file_pattern is None: - feature_map = {} - for (field, tp) in data_fields: - if field != "targets": - feature_map[field] = tf.placeholder( - dtype=tp, shape=[None] * 4, name=field) - return feature_map - - # Now the non-trivial case construction. - examples = examples_queue( - [data_file_pattern], - data_fields, - training=(mode == tf.contrib.learn.ModeKeys.TRAIN), - capacity=capacity, - data_items_to_decoders=data_items_to_decoders) - - if "image" in data_file_pattern: - # Small single-example pre-processing for images. - examples["inputs"] = tf.cast(examples["inputs"], tf.int64) - if ("image_imagenet" in data_file_pattern or - "image_mscoco" in data_file_pattern): - # For imagnet/coco, resize images to 299x299 as is standard. - def resize(img): - return tf.to_int64(tf.image.resize_images(img, [299, 299])) - - def preprocess(img): - img = tf.image.resize_images(img, [360, 360]) - img = common_layers.image_augmentation(tf.to_float(img) / 255.) - return tf.to_int64(img * 255.) - - inputs = examples["inputs"] - if mode == tf.contrib.learn.ModeKeys.TRAIN: - examples["inputs"] = tf.cond( # Preprocess 80% of the time. - tf.less(tf.random_uniform([]), 0.8), - lambda img=inputs: preprocess(img), - lambda img=inputs: resize(img)) - else: - examples["inputs"] = tf.to_int64(resize(inputs)) - elif "audio" in data_file_pattern: - # Reshape audio to proper shape - sample_count = tf.to_int32(examples.pop("audio/sample_count")) - sample_width = tf.to_int32(examples.pop("audio/sample_width")) - channel_count = 1 - examples["inputs"] = tf.reshape(examples["inputs"], - [sample_count, sample_width, channel_count]) - if "wsj" in data_file_pattern: - examples["inputs"] = tf.bitcast(examples["inputs"], tf.int32) - elif "a2q_20161229" in data_file_pattern: - # we forgot the EOS when we preprocessed this data. - examples["targets"] = tf.concat([examples["targets"], [1]], 0) - - # We do not want int64s as they do are not supported on GPUs. - return {k: tf.to_int32(v) for (k, v) in six.iteritems(examples)} - - -def batch_examples(examples, batching_scheme): - """Given a queue of examples, create batches of examples with similar lengths. - - We assume that examples is a dictionary with string keys and tensor values, - possibly coming from a queue, e.g., constructed by examples_queue above. - Each tensor in examples is assumed to be 1D. We will put tensors of similar - length into batches togeter. We return a dictionary with the same keys as - examples, and with values being batches of size batch_size. If elements have - different lengths, they are padded with 0s. This function is based on - tf.contrib.training.bucket_by_sequence_length so see there for details. - - For example, if examples is a queue containing [1, 2, 3] and [4], then - this function with batch_size=2 will return a batch [[1, 2, 3], [4, 0, 0]]. - - Args: - examples: a dictionary with string keys and 1D tensor values. - batching_scheme: a dictionary containing - "boundaries": a list of integers for the boundaries that will be - used for bucketing; see tf.contrib.training.bucket_by_sequence_length - for more details. - "batch_sizes": a list of batch sizes corresponding to the buckets - "max_length": an integer. We drop sequences which are longer. - Returns: - A dictionary with the same keys as examples and with values being batches - of examples padded with 0s, i.e., [batch_size x length] tensors. - """ - with tf.name_scope("batch_examples"): - # The queue to bucket on will be chosen based on maximum length. - max_length = 0 - for v in examples.values(): - # For images the sequence length is the size of the spatial dimensions. - sequence_length = (tf.shape(v)[0] if len(v.get_shape()) < 3 else - tf.shape(v)[0] * tf.shape(v)[1]) - max_length = tf.maximum(max_length, sequence_length) - (_, outputs) = tf.contrib.training.bucket_by_sequence_length( - max_length, - examples, - batching_scheme["batch_sizes"], - [b + 1 for b in batching_scheme["boundaries"]], - capacity=2, # Number of full batches to store, we don't need many. - bucket_capacities=[2 * b for b in batching_scheme["batch_sizes"]], - dynamic_pad=True, - keep_input=(max_length <= batching_scheme["max_length"])) - return outputs - - -def bucket_boundaries(max_length, min_length=8, mantissa_bits=2): +def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1): """A default set of length-bucket boundaries.""" + assert length_bucket_step > 1.0 x = min_length boundaries = [] while x < max_length: boundaries.append(x) - x += 2**max(0, int(math.log(x, 2)) - mantissa_bits) + x = max(x + 1, int(x * length_bucket_step)) return boundaries -def hparams_to_batching_scheme(hparams, - drop_long_sequences=False, - shard_multiplier=1, - length_multiplier=1): +def batching_scheme(batch_size, + max_length, + min_length_bucket, + length_bucket_step, + drop_long_sequences=False, + shard_multiplier=1, + length_multiplier=1, + min_length=0): """A batching scheme based on model hyperparameters. - Every batch containins a number of sequences divisible by `shard_multiplier`. - - If `drop_long_sequences` is True, then sequences longer than - `hparams.batch_size` are dropped. This prevents generating batches with - more than the usual number of tokens, which can cause out-of-memory errors. + Every batch contains a number of sequences divisible by `shard_multiplier`. Args: - hparams: a hyperparameters. - drop_long_sequences: a boolean. + batch_size: int, total number of tokens in a batch. + max_length: int, sequences longer than this will be skipped. Defaults to + batch_size. + min_length_bucket: int + length_bucket_step: float greater than 1.0 + drop_long_sequences: bool, if True, then sequences longer than + `max_length` are dropped. This prevents generating batches with + more than the usual number of tokens, which can cause out-of-memory + errors. shard_multiplier: an integer increasing the batch_size to suit splitting across datashards. length_multiplier: an integer multiplier that is used to increase the batch sizes and sequence length tolerance. + min_length: int, sequences shorter than this will be skipped. Returns: - a dictionary + A dictionary with parameters that can be passed to input_pipeline: + * boundaries: list of bucket boundaries + * batch_sizes: list of batch sizes for each length bucket + * max_length: int, maximum length of an example + + Raises: + ValueError: If min_length > max_length """ - max_length = hparams.max_length or hparams.batch_size - boundaries = bucket_boundaries( - max_length, mantissa_bits=hparams.batching_mantissa_bits) + max_length = max_length or batch_size + if max_length < min_length: + raise ValueError("max_length must be greater or equal to min_length") + + boundaries = _bucket_boundaries(max_length, min_length_bucket, + length_bucket_step) + boundaries = [boundary * length_multiplier for boundary in boundaries] + max_length *= length_multiplier + batch_sizes = [ - max(1, hparams.batch_size // length) - for length in boundaries + [max_length] + max(1, batch_size // length) for length in boundaries + [max_length] ] - batch_sizes = [b * shard_multiplier for b in batch_sizes] - max_length *= length_multiplier - boundaries = [boundary * length_multiplier for boundary in boundaries] - return { + max_batch_size = max(batch_sizes) + # Since the Datasets API only allows a single constant for window_size, + # and it needs divide all bucket_batch_sizes, we pick a highly-composite + # window size and then round down all batch sizes to divisors of that window + # size, so that a window can always be divided evenly into batches. + # TODO(noam): remove this when Dataset API improves. + highly_composite_numbers = [ + 1, 2, 4, 6, 12, 24, 36, 48, 60, 120, 180, 240, 360, 720, 840, 1260, 1680, + 2520, 5040, 7560, 10080, 15120, 20160, 25200, 27720, 45360, 50400, 55440, + 83160, 110880, 166320, 221760, 277200, 332640, 498960, 554400, 665280, + 720720, 1081080, 1441440, 2162160, 2882880, 3603600, 4324320, 6486480, + 7207200, 8648640, 10810800, 14414400, 17297280, 21621600, 32432400, + 36756720, 43243200, 61261200, 73513440, 110270160 + ] + window_size = max( + [i for i in highly_composite_numbers if i <= 3 * max_batch_size]) + divisors = [i for i in range(1, window_size + 1) if window_size % i == 0] + batch_sizes = [max([d for d in divisors if d <= bs]) for bs in batch_sizes] + window_size *= shard_multiplier + batch_sizes = [bs * shard_multiplier for bs in batch_sizes] + # The Datasets API splits one window into multiple batches, which + # produces runs of many consecutive batches of the same size. This + # is bad for training. To solve this, we will shuffle the batches + # using a queue which must be several times as large as the maximum + # number of batches per window. + max_batches_per_window = window_size // min(batch_sizes) + shuffle_queue_size = max_batches_per_window * 3 + + ret = { "boundaries": boundaries, "batch_sizes": batch_sizes, - "max_length": (max_length if drop_long_sequences else 10**9) + "min_length": min_length, + "max_length": (max_length if drop_long_sequences else 10**9), + "shuffle_queue_size": shuffle_queue_size, } + return ret -def constant_batching_scheme(constant_batch_size_in_sequences): - """A batching scheme with constant batch size. +def hparams_to_batching_scheme(hparams, + drop_long_sequences=False, + shard_multiplier=1, + length_multiplier=1): + """Wrapper around _batching_scheme with hparams.""" + return batching_scheme( + batch_size=hparams.batch_size, + min_length=hparams.min_length, + max_length=hparams.max_length, + min_length_bucket=hparams.min_length_bucket, + length_bucket_step=hparams.length_bucket_step, + drop_long_sequences=drop_long_sequences, + shard_multiplier=shard_multiplier, + length_multiplier=length_multiplier) + + +class DummyQueueRunner(object): + """Can stand-in for a QueueRunner but does nothing.""" + + def __init__(self): + pass + + def create_threads(self, sess, coord=None, daemon=False, start=False): + del sess, coord, daemon, start + return [] + + +def pad_for_tpu(shapes_dict, hparams, max_length): + """Pads unknown features' dimensions for TPU.""" + padded_shapes = {} + + def get_filler(specified_max_length): + if not specified_max_length: + return max_length + return min(specified_max_length, max_length) + + inputs_none_filler = get_filler(hparams.max_input_seq_length) + targets_none_filler = get_filler(hparams.max_target_seq_length) + + def pad_one_shape(shape, none_filler): + return [ + (dim if dim is not None else none_filler) for dim in shape.as_list() + ] + + for key, shape in six.iteritems(shapes_dict): + if key == "inputs": + padded_shapes[key] = pad_one_shape(shape, inputs_none_filler) + elif key == "targets": + padded_shapes[key] = pad_one_shape(shape, targets_none_filler) + else: + padded_shapes[key] = pad_one_shape(shape, max_length) + return padded_shapes + + +def cpu_count(): + """Return the number of available cores.""" + num_available_cores = multiprocessing.cpu_count() + return num_available_cores + + +def _summarize_features(features, num_shards=1): + with tf.name_scope("input_stats"): + for (k, v) in six.iteritems(features): + if isinstance(v, tf.Tensor) and v.get_shape().ndims > 1: + tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards) + tf.summary.scalar("%s_length" % k, tf.shape(v)[1]) + nonpadding = tf.to_float(tf.not_equal(v, 0)) + nonpadding_tokens = tf.reduce_sum(nonpadding) + tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens) + tf.summary.scalar("%s_nonpadding_fraction" % k, + tf.reduce_mean(nonpadding)) + + +def standardize_shapes(features, batch_size=None): + """Set the right shapes for the features.""" + for fname in ["inputs", "targets"]: + if fname not in features: + continue + f = features[fname] + while len(f.get_shape()) < 4: + f = tf.expand_dims(f, axis=-1) + features[fname] = f + + if batch_size: + # Ensure batch size is set on all features + for _, t in six.iteritems(features): + shape = t.get_shape().as_list() + shape[0] = batch_size + t.set_shape(t.get_shape().merge_with(shape)) + # Assert shapes are fully known + t.get_shape().assert_is_fully_defined() + + return features + + +def _are_shapes_fully_defined(shapes_dict): + for shape in shapes_dict.values(): + if not shape.is_fully_defined(): + return False + return True + + +def _file_num_records_cached(filename): + """Return the number of TFRecords in a file.""" + # Cache the result, as this is expensive to compute + if filename in _file_num_records_cache: + return _file_num_records_cache[filename] + ret = 0 + for _ in tf.python_io.tf_record_iterator(filename): + ret += 1 + _file_num_records_cache[filename] = ret + return ret + + +_file_num_records_cache = {} + + +def skip_random_fraction(dataset, data_file): + # Skip a random fraction at the beginning of the stream. The skip is + # essential for synchronous highly-parallel training to avoid multiple + # replicas reading the same data in lock-step. + num_skip = random.randint(0, _file_num_records_cached(data_file)) + return dataset.skip(num_skip) + + +def pad_batch(features, batch_multiple): + """Pad batch dim of features to nearest multiple of batch_multiple.""" + feature = list(features.items())[0][1] + batch_size = tf.shape(feature)[0] + mod = batch_size % batch_multiple + has_mod = tf.cast(tf.cast(mod, tf.bool), tf.int32) + batch_padding = batch_multiple * has_mod - mod + + padded_features = {} + for k, feature in features.items(): + rank = len(feature.shape) + paddings = [[0, 0] for _ in range(rank)] + paddings[0][1] = batch_padding + padded_feature = tf.pad(feature, paddings) + padded_features[k] = padded_feature + return padded_features + + +# TODO(lukaszkaiser): refactor the API to not be just a list of self params +# but make sense for other uses too. +def input_fn(dataset, + filepattern, + skip_random_fraction_when_training, + batch_size_means_tokens_param, + batch_size_multiplier, + max_length, + mode, + hparams, + data_dir=None, + params=None, + config=None, + force_repeat=False, + prevent_repeat=False): + """Builds input pipeline for problem. Args: - constant_batch_size_in_sequences: an integer + dataset: the dataset to make input function from. + filepattern: the pattern of files to read from. + skip_random_fraction_when_training: whether to skip randomly when training. + batch_size_means_tokens_param: whether batch size should mean tokens. + batch_size_multiplier: how to multiply batch size when bucketing. + max_length: maximum length, + mode: tf.estimator.ModeKeys + hparams: HParams, model hparams + data_dir: str, data directory; if None, will use hparams.data_dir + params: dict, may include "batch_size" + config: RunConfig; should have the data_parallelism attribute if not using + TPU + force_repeat: bool, whether to repeat the data even if not training + prevent_repeat: bool, whether to not repeat when in training mode. + Overrides force_repeat. Returns: - a dictionary + (features_dict, Tensor targets) """ - boundaries = bucket_boundaries(1024) - batch_sizes = [constant_batch_size_in_sequences] * (1 + len(boundaries)) - return { - "boundaries": boundaries, - "batch_sizes": batch_sizes, - "max_length": 10**9 - } + is_training = mode == tf_estimator.ModeKeys.TRAIN + if config and config.use_tpu: + num_threads = 64 + else: + num_threads = cpu_count() if is_training else 1 + + if config and hasattr(config, + "data_parallelism") and config.data_parallelism: + num_shards = config.data_parallelism.n + else: + num_shards = 1 + + mlperf_log.transformer_print( + key=mlperf_log.INPUT_MAX_LENGTH, value=max_length) + + def tpu_valid_size(example): + return example_valid_size(example, hparams.min_length, max_length) + def gpu_valid_size(example): + drop_long_sequences = is_training or hparams.eval_drop_long_sequences + max_validate_length = max_length if drop_long_sequences else 10**9 + return example_valid_size(example, hparams.min_length, max_validate_length) -def get_datasets(problems, data_dir, mode): - """Return the location of a dataset for a given mode.""" - datasets = [] - for problem in problems.split("-"): - problem, _, _ = problem_hparams.parse_problem_name(problem) - path = os.path.join(data_dir, problem) - if mode == tf.contrib.learn.ModeKeys.TRAIN: - datasets.append("%s-train*" % path) + def define_shapes(example): + batch_size = config and config.use_tpu and params["batch_size"] + return standardize_shapes(example, batch_size=batch_size) + + # Read and preprocess + data_dir = data_dir or (hasattr(hparams, "data_dir") and hparams.data_dir) + + if (force_repeat or is_training) and not prevent_repeat: + # Repeat and skip a random number of records + dataset = dataset.repeat() + + if is_training and skip_random_fraction_when_training: + data_files = contrib.slim().parallel_reader.get_data_files(filepattern) + # In continuous_train_and_eval when switching between train and + # eval, this input_fn method gets called multiple times and it + # would give you the exact same samples from the last call + # (because the Graph seed is set). So this skip gives you some + # shuffling. + dataset = skip_random_fraction(dataset, data_files[0]) + + dataset = dataset.map(cast_ints_to_int32, num_parallel_calls=num_threads) + + if batch_size_means_tokens_param: + batch_size_means_tokens = True + else: + if _are_shapes_fully_defined(dataset.output_shapes): + batch_size_means_tokens = False + else: + tf.logging.warning( + "Shapes are not fully defined. Assuming batch_size means tokens.") + batch_size_means_tokens = True + + # Batching + if not batch_size_means_tokens: + # Batch size means examples per datashard. + if config and config.use_tpu: + # on TPU, we use params["batch_size"], which specifies the number of + # examples across all datashards + batch_size = params["batch_size"] + dataset = dataset.batch(batch_size, drop_remainder=True) else: - datasets.append("%s-dev*" % path) - return datasets + batch_size = hparams.batch_size * num_shards + dataset = dataset.batch(batch_size) + else: + # batch_size means tokens per datashard + if config and config.use_tpu: + dataset = dataset.filter(tpu_valid_size) + padded_shapes = pad_for_tpu(dataset.output_shapes, hparams, max_length) + # on TPU, we use params["batch_size"], which specifies the number of + # examples across all datashards + batch_size = params["batch_size"] + if hparams.pad_batch: + tf.logging.warn( + "Padding the batch to ensure that remainder eval batches are " + "processed. This may lead to incorrect metrics for " + "non-zero-padded features, e.g. images. Use a smaller batch " + "size that has no remainder in that case.") + dataset = dataset.padded_batch( + batch_size, padded_shapes, drop_remainder=False) + dataset = dataset.map( + functools.partial(pad_batch, batch_multiple=batch_size), + num_parallel_calls=num_threads) + else: + dataset = dataset.padded_batch( + batch_size, padded_shapes, drop_remainder=True) + else: + # On GPU, bucket by length + dataset = dataset.filter(gpu_valid_size) + cur_batching_scheme = hparams_to_batching_scheme( + hparams, + shard_multiplier=num_shards, + length_multiplier=batch_size_multiplier) + if hparams.use_fixed_batch_size: + # Here batch_size really means examples per datashard. + cur_batching_scheme["batch_sizes"] = [hparams.batch_size] + cur_batching_scheme["boundaries"] = [] + dataset = dataset.apply( + tf.data.experimental.bucket_by_sequence_length( + example_length, cur_batching_scheme["boundaries"], + cur_batching_scheme["batch_sizes"])) + + if not is_training: + batch_multiple = num_shards + if hparams.use_fixed_batch_size: + # Make sure the last batch has the same fixed size as the rest. + batch_multiple *= hparams.batch_size + if batch_multiple > 1: + tf.logging.warn( + "Padding the batch to ensure that remainder eval batches have " + "a batch size divisible by the number of data shards. This may " + "lead to incorrect metrics for non-zero-padded features, e.g. " + "images. Use a single datashard (i.e. 1 GPU) in that case.") + dataset = dataset.map( + functools.partial(pad_batch, batch_multiple=batch_multiple), + num_parallel_calls=num_threads) + + dataset = dataset.map(define_shapes, num_parallel_calls=num_threads) + + # Add shuffling for training batches. This is necessary along with record + # level shuffling in the dataset generation. Record shuffling will shuffle + # the examples. However, in some cases, it's possible that the shuffle + # buffer size for record shuffling is smaller than the batch size. In such + # cases, adding batch shuffling ensures that the data is in random order + # during training + if (is_training and hasattr(hparams, "batch_shuffle_size") and + hparams.batch_shuffle_size): + dataset = dataset.shuffle(hparams.batch_shuffle_size) + + # Split batches into chunks if targets are too long. + # The new "chunk_number" feature is 0 for the first chunk and goes up then. + # Chunks are reversed so the 0th chunk comes first, then the 1st and so on, + # so models can attend to them in the order they arrive. The last chunk is + # usually the one containing the end of the target sentence (EOS). + chunk_length = hparams.get("split_targets_chunk_length", 0) + max_chunks = hparams.get("split_targets_max_chunks", 100) + if chunk_length > 0: + def is_nonzero_chunk(example): + """A chunk is zero if all targets are 0s.""" + return tf.less(0, tf.reduce_sum(tf.abs(example["targets"]))) + + def split_on_length(example): + """Split a batch of ditcs on length.""" + x = example["targets"] + # TODO(kitaev): This code breaks if chunk_length * max_chunks < batch_size + length_diff = chunk_length * max_chunks - tf.shape(x)[1] + padded_x = tf.pad(x, [(0, 0), (0, length_diff), (0, 0), (0, 0)]) + chunks = [padded_x[:, i*chunk_length:(i+1)*chunk_length, :, :] + for i in range(max_chunks - 1)] + chunks.append(padded_x[:, (max_chunks - 1)*chunk_length:, :, :]) + new_example = {} + # Setting chunk_number to be tf.range(max_chunks) is incompatible with TPU + new_example["chunk_number"] = tf.concat([ + tf.expand_dims(tf.ones_like(c) * n, axis=0) + for n, c in enumerate(chunks) + ], + axis=0) + new_example["targets"] = tf.concat( + [tf.expand_dims(c, axis=0) for c in chunks], axis=0) + for k in example: + if k != "targets": + assert k != "chunk_number", ( + "Chunking code expects the chunk_number feature name to be " + "available" + ) + new_example[k] = tf.concat( + [tf.expand_dims(example[k], axis=0) for _ in range(max_chunks)], + axis=0) + return tf.data.Dataset.from_tensor_slices(new_example) + + dataset = dataset.flat_map(split_on_length) + dataset = dataset.filter(is_nonzero_chunk) + + # The chunking data pipeline thus far creates batches of examples where all + # of the examples have the same chunk number. This can lead to periodic + # fluctuations in the loss; for example, when all examples in the batch have + # chunk number 0 the loss may be higher than midway through a sequence. + # Enabling split_targets_strided_training adjusts the data so that each + # batch includes examples at various points within a sequence. + if is_training and hparams.split_targets_strided_training: + # TODO(kitaev): make sure that shape inference works on GPU, not just TPU. + inferred_batch_size = dataset.output_shapes["targets"].as_list()[0] + if inferred_batch_size is None: + raise ValueError( + "Strided training is only implemented when the batch size can be " + "inferred statically, for example when training on TPU." + ) + chunk_stride = inferred_batch_size * max( + 1, max_chunks // inferred_batch_size) + 1 + + def collapse_nested_datasets(example): + """Converts a dataset of datasets to a dataset of tensor features.""" + new_example = {} + for k, v in example.items(): + v = tf.data.experimental.get_single_element( + v.batch(inferred_batch_size, drop_remainder=True)) + new_example[k] = v + return tf.data.Dataset.from_tensor_slices(new_example) + + dataset = dataset.unbatch() + dataset = dataset.window(inferred_batch_size, inferred_batch_size, + chunk_stride) + dataset = dataset.flat_map(collapse_nested_datasets) + dataset = dataset.batch(inferred_batch_size, drop_remainder=True) + + def prepare_for_output(example): + if not config or not config.use_tpu: + _summarize_features(example, num_shards) + if mode == tf_estimator.ModeKeys.PREDICT: + example["infer_targets"] = example.pop("targets") + return example + else: + return example, example[hparams.get( + key="labels_feature_name", default="targets")] + + dataset = dataset.map(prepare_for_output, num_parallel_calls=num_threads) + dataset = dataset.prefetch(2) + + if mode == tf_estimator.ModeKeys.PREDICT: + # This is because of a bug in the Estimator that short-circuits prediction + # if it doesn't see a QueueRunner. DummyQueueRunner implements the + # minimal expected interface but does nothing. + tf.add_to_collection(tf.GraphKeys.QUEUE_RUNNERS, DummyQueueRunner()) + + return dataset diff --git a/tensor2tensor/utils/data_reader_test.py b/tensor2tensor/utils/data_reader_test.py index 883a3673a..ec93f6ac1 100644 --- a/tensor2tensor/utils/data_reader_test.py +++ b/tensor2tensor/utils/data_reader_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,127 +21,196 @@ import os import tempfile - -# Dependency imports - import numpy as np -from six.moves import xrange # pylint: disable=redefined-builtin +from six.moves import range # pylint: disable=redefined-builtin from tensor2tensor.data_generators import generator_utils +from tensor2tensor.data_generators import problem as problem_mod +from tensor2tensor.layers import modalities from tensor2tensor.utils import data_reader +from tensor2tensor.utils import registry -import tensorflow as tf +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator -class DataReaderTest(tf.test.TestCase): +@registry.register_problem +class TestProblem(problem_mod.Problem): - def testExamplesQueue(self): - tf.set_random_seed(1) - tmp_dir = self.get_temp_dir() - (_, tmp_file_path) = tempfile.mkstemp(dir=tmp_dir) - tmp_file_name = os.path.basename(tmp_file_path) - - # Generate a file with 100 examples. - def test_generator(): - for i in xrange(100): - yield {"inputs": [i], "targets": [i], "floats": [i + 0.5]} - - generator_utils.generate_files(test_generator(), tmp_file_name, tmp_dir) - self.assertTrue(tf.gfile.Exists(tmp_file_path + "-00000-of-00001")) - - examples_train = data_reader.examples_queue( - [tmp_file_path + "*"], { - "inputs": tf.VarLenFeature(tf.int64), - "targets": tf.VarLenFeature(tf.int64) - }, - training=True) - examples_eval = data_reader.examples_queue( - [tmp_file_path + "*"], { - "inputs": tf.VarLenFeature(tf.int64), - "targets": tf.VarLenFeature(tf.int64), - "floats": tf.VarLenFeature(tf.float32) - }, - training=False) - with tf.train.MonitoredSession() as session: - # Evaluation data comes in the same order as in the file, check 10. - for i in xrange(10): - examples = session.run(examples_eval) - self.assertEqual(len(examples["inputs"]), 1) - self.assertEqual(len(examples["targets"]), 1) - self.assertEqual(examples["inputs"][0], i) - self.assertEqual(examples["targets"][0], i) - self.assertEqual(examples["floats"][0], i + 0.5) - # Training data is shuffled. - is_shuffled = False - for i in xrange(10): - examples = session.run(examples_train) - self.assertEqual(len(examples["inputs"]), 1) - self.assertEqual(len(examples["targets"]), 1) - self.assertEqual(examples["inputs"][0], examples["targets"][0]) - if examples["inputs"][0] != i: - is_shuffled = True - self.assertTrue(is_shuffled) - - # Clean up. - os.remove(tmp_file_path + "-00000-of-00001") - os.remove(tmp_file_path) - - def testBatchExamples(self): - tf.set_random_seed(1) - tmp_dir = self.get_temp_dir() - (_, tmp_file_path) = tempfile.mkstemp(dir=tmp_dir) - tmp_file_name = os.path.basename(tmp_file_path) + def generator(self, data_dir, tmp_dir, is_training): + del data_dir, tmp_dir, is_training + for i in range(30): + yield {"inputs": [i] * (i + 1), "targets": [i], "floats": [i + 0.5]} - # Generate a file with 100 examples, n-th example of length n + 1. - def test_generator(): - for i in xrange(100): - yield {"inputs": [i + 1 for _ in xrange(i + 1)], "targets": [i + 1]} + def generate_data(self, data_dir, tmp_dir, task_id=-1): + train_paths = self.training_filepaths(data_dir, 1, shuffled=True) + dev_paths = self.dev_filepaths(data_dir, 1, shuffled=True) + generator_utils.generate_files( + self.generator(data_dir, tmp_dir, True), train_paths) + generator_utils.generate_files( + self.generator(data_dir, tmp_dir, False), dev_paths) - generator_utils.generate_files(test_generator(), tmp_file_name, tmp_dir) - self.assertTrue(tf.gfile.Exists(tmp_file_path + "-00000-of-00001")) + def hparams(self, defaults, model_hparams): + hp = defaults + hp.modality = {"inputs": modalities.ModalityType.SYMBOL, + "targets": modalities.ModalityType.SYMBOL} + hp.vocab_size = {"inputs": 30, + "targets": 30} - examples_train = data_reader.examples_queue([tmp_file_path + "*"], { - "inputs": tf.VarLenFeature(tf.int64), - "targets": tf.VarLenFeature(tf.int64) - }, True) - batch_train = data_reader.batch_examples(examples_train, 4) - examples_eval = data_reader.examples_queue([tmp_file_path + "*"], { + def example_reading_spec(self): + data_fields = { "inputs": tf.VarLenFeature(tf.int64), - "targets": tf.VarLenFeature(tf.int64) - }, False) - batch_eval = data_reader.batch_examples(examples_eval, 2) - session, coord = tf.Session(), tf.train.Coordinator() - with session.as_default(): - tf.train.start_queue_runners(coord=coord) - - # Evaluation data comes in the same order as in the file. - # The first batch will be inputs=[[1, 0], [2, 2]], targets=[[1], [2]]. - examples = session.run(batch_eval) - self.assertAllClose(examples["inputs"], np.array([[1, 0], [2, 2]])) - self.assertAllClose(examples["targets"], np.array([[1], [2]])) - # Check the second batch too. - examples = session.run(batch_eval) - self.assertAllClose(examples["inputs"], - np.array([[3, 3, 3, 0], [4, 4, 4, 4]])) - self.assertAllClose(examples["targets"], np.array([[3], [4]])) - - # Training data is shuffled but shouldn't have too many pads. - for _ in xrange(10): - examples = session.run(batch_train) - inputs = examples["inputs"] - # Only 3 out of 4 examples in a batch have padding zeros at all. - pad_per_example = (inputs.size - np.count_nonzero(inputs)) // 3 - # Default bucketing is in steps of 8 until 64 and 32 later. - if int(max(examples["targets"])) < 64: - self.assertLess(pad_per_example, 8) - else: - self.assertLess(pad_per_example, 32) - - # Clean up. - coord.request_stop() - coord.join() - os.remove(tmp_file_path + "-00000-of-00001") - os.remove(tmp_file_path) + "targets": tf.VarLenFeature(tf.int64), + "floats": tf.VarLenFeature(tf.float32), + } + data_items_to_decoders = None + return (data_fields, data_items_to_decoders) + + def preprocess_example(self, example, unused_mode, unused_hparams): + example["new_field"] = tf.constant([42.42]) + return example + + +def generate_test_data(problem, tmp_dir): + problem.generate_data(tmp_dir, tmp_dir) + return [problem.filepattern(tmp_dir, tf_estimator.ModeKeys.TRAIN)] + + +class DataReaderTest(tf.test.TestCase): + + @classmethod + def setUpClass(cls): + tf.set_random_seed(1) + cls.problem = registry.problem("test_problem") + cls.data_dir = tempfile.gettempdir() + cls.filepatterns = generate_test_data(cls.problem, cls.data_dir) + + @classmethod + def tearDownClass(cls): + # Clean up files + for fp in cls.filepatterns: + files = tf.gfile.Glob(fp) + for f in files: + os.remove(f) + + def testBasicExampleReading(self): + dataset = self.problem.dataset( + tf_estimator.ModeKeys.TRAIN, + data_dir=self.data_dir, + shuffle_files=False) + examples = dataset.make_one_shot_iterator().get_next() + with tf.train.MonitoredSession() as sess: + # Check that there are multiple examples that have the right fields of the + # right type (lists of int/float). + for _ in range(10): + ex_val = sess.run(examples) + inputs, targets, floats = (ex_val["inputs"], ex_val["targets"], + ex_val["floats"]) + self.assertEqual(np.int64, inputs.dtype) + self.assertEqual(np.int64, targets.dtype) + self.assertEqual(np.float32, floats.dtype) + for field in [inputs, targets, floats]: + self.assertGreater(len(field), 0) + + def testPreprocess(self): + dataset = self.problem.dataset( + tf_estimator.ModeKeys.TRAIN, + data_dir=self.data_dir, + shuffle_files=False) + examples = dataset.make_one_shot_iterator().get_next() + with tf.train.MonitoredSession() as sess: + ex_val = sess.run(examples) + # problem.preprocess_example has been run + self.assertAllClose([42.42], ex_val["new_field"]) + + def testLengthFilter(self): + max_len = 15 + dataset = self.problem.dataset( + tf_estimator.ModeKeys.TRAIN, + data_dir=self.data_dir, + shuffle_files=False) + dataset = dataset.filter( + lambda ex: data_reader.example_valid_size(ex, 0, max_len)) + examples = dataset.make_one_shot_iterator().get_next() + with tf.train.MonitoredSession() as sess: + ex_lens = [] + for _ in range(max_len): + ex_lens.append(len(sess.run(examples)["inputs"])) + + self.assertAllEqual(list(range(1, max_len + 1)), sorted(ex_lens)) + + def testBatchingSchemeMaxLength(self): + scheme = data_reader.batching_scheme( + batch_size=20, + max_length=None, + min_length_bucket=8, + length_bucket_step=1.1, + drop_long_sequences=False) + self.assertGreater(scheme["max_length"], 10000) + + scheme = data_reader.batching_scheme( + batch_size=20, + max_length=None, + min_length_bucket=8, + length_bucket_step=1.1, + drop_long_sequences=True) + self.assertEqual(scheme["max_length"], 20) + + scheme = data_reader.batching_scheme( + batch_size=20, + max_length=15, + min_length_bucket=8, + length_bucket_step=1.1, + drop_long_sequences=True) + self.assertEqual(scheme["max_length"], 15) + + scheme = data_reader.batching_scheme( + batch_size=20, + max_length=15, + min_length_bucket=8, + length_bucket_step=1.1, + drop_long_sequences=False) + self.assertGreater(scheme["max_length"], 10000) + + def testBatchingSchemeBuckets(self): + scheme = data_reader.batching_scheme( + batch_size=128, + max_length=0, + min_length_bucket=8, + length_bucket_step=1.1) + boundaries, batch_sizes = scheme["boundaries"], scheme["batch_sizes"] + self.assertEqual(len(boundaries), len(batch_sizes) - 1) + expected_boundaries = [ + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 22, 24, 26, 28, 30, + 33, 36, 39, 42, 46, 50, 55, 60, 66, 72, 79, 86, 94, 103, 113, 124 + ] + self.assertEqual(expected_boundaries, boundaries) + expected_batch_sizes = [ + 16, 12, 12, 8, 8, 8, 8, 8, 8, 6, 6, 6, 6, 4, 4, 4, 4, 4, 3, 3, 3, 3, 2, + 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1 + ] + self.assertEqual(expected_batch_sizes, batch_sizes) + + scheme = data_reader.batching_scheme( + batch_size=128, + max_length=0, + min_length_bucket=8, + length_bucket_step=1.1, + shard_multiplier=2) + boundaries, batch_sizes = scheme["boundaries"], scheme["batch_sizes"] + self.assertAllEqual([bs * 2 for bs in expected_batch_sizes], batch_sizes) + self.assertEqual(expected_boundaries, boundaries) + + scheme = data_reader.batching_scheme( + batch_size=128, + max_length=0, + min_length_bucket=8, + length_bucket_step=1.1, + length_multiplier=2) + boundaries, batch_sizes = scheme["boundaries"], scheme["batch_sizes"] + self.assertAllEqual([b * 2 for b in expected_boundaries], boundaries) + self.assertEqual([max(1, bs // 2) + for bs in expected_batch_sizes], batch_sizes) if __name__ == "__main__": diff --git a/tensor2tensor/utils/decoding.py b/tensor2tensor/utils/decoding.py new file mode 100644 index 000000000..383451012 --- /dev/null +++ b/tensor2tensor/utils/decoding.py @@ -0,0 +1,1028 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Decoding utilities.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import operator +import os +import re +import string +import time + +import numpy as np +import six + +from six.moves import input # pylint: disable=redefined-builtin + +from tensor2tensor.data_generators import problem as problem_lib +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators import text_problems +from tensor2tensor.utils import contrib +from tensor2tensor.utils import hparam +from tensor2tensor.utils import mlperf_log +from tensor2tensor.utils import registry +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +FLAGS = tf.flags.FLAGS + +# Number of samples to draw for an image input (in such cases as captioning) +IMAGE_DECODE_LENGTH = 100 + + +def decode_hparams(overrides=""): + """Hyperparameters for decoding.""" + hp = hparam.HParams( + save_images=False, + log_results=True, + extra_length=100, + min_length_ratio=0.0, + batch_size=0, + beam_size=4, + alpha=0.6, + eos_penalty=0.0, + block_size=0, + guess_and_check_top_k=0, + guess_and_check_epsilon=-1, + insertion_parallel=False, + return_beams=False, + write_beam_scores=False, + max_input_size=-1, + identity_output=False, + num_samples=-1, # Number of examples to decode. + delimiter="\n", + decode_to_file="", # str. Prefix for filename to write decodings to. + decode_reference="", # str. Filename to read references from. + decode_in_memory=False, + # How much decode should wait for the next checkpoint + decode_timeout_mins=240, + summaries_log_dir="decode", # Directory to write hook summaries. + shards=1, # How many shards of data to decode (treating 1 as None). + shard_id=0, # Which shard are we decoding if more than 1 above. + shards_start_offset=0, # Number of the first shard to decode. + shard_google_format=False, # If True use Google shard naming format. + num_decodes=1, # Number of times to go over the dataset. + force_decode_length=False, + display_decoded_images=False, + # Multi-problem decoding task id. + multiproblem_task_id=-1, + # Used for video decoding. + frames_per_second=10, + skip_eos_postprocess=False, + # Creates a blue/red border covering border_percent of the frame. + border_percent=2, + # Maximum number of videos displayed. + # number of videos displayed = max_display_outputs * max_display_decodes + max_display_outputs=10, + max_display_decodes=5, + # Used in computation of VGG feature based video metrics. + # Set this to be the path to a trained VGG ckpt to output + # useful metrics. + vgg_ckpt_path="", + # Used for MLPerf compliance logging. + mlperf_decode_step=0.0, + mlperf_threshold=25.0, + mlperf_success=False, + # A comma-delimited list of additional infer() outputs to be exported. + export_extra_infer_outputs="") + hp.parse(overrides) + return hp + + +def log_decode_results(inputs, + outputs, + problem_name, + prediction_idx, + inputs_vocab, + targets_vocab, + targets=None, + save_images=False, + output_dir=None, + identity_output=False, + log_results=True, + skip_eos_postprocess=False): + """Log inference results.""" + + # TODO(lukaszkaiser) refactor this into feature_encoder + is_video = "video" in problem_name or "gym" in problem_name + if is_video: + def fix_and_save_video(vid, prefix): + save_path_template = os.path.join( + output_dir, + "%s_%s_%05d_{:05d}.png" % (problem_name, prefix, prediction_idx)) + # this is only required for predictions + if vid.shape[-1] == 1: + vid = np.squeeze(vid, axis=-1) + save_video(vid, save_path_template) + tf.logging.info("Saving video: {}".format(prediction_idx)) + fix_and_save_video(inputs, "inputs") + fix_and_save_video(outputs, "outputs") + fix_and_save_video(targets, "targets") + + is_image = "image" in problem_name + is_text2class = isinstance(registry.problem(problem_name), + text_problems.Text2ClassProblem) + skip_eos_postprocess = is_image or is_text2class or skip_eos_postprocess + + decoded_inputs = None + if is_image and save_images: + save_path = os.path.join( + output_dir, "%s_prediction_%d.jpg" % (problem_name, prediction_idx)) + show_and_save_image(inputs / 255., save_path) + elif inputs is not None and inputs_vocab: + if identity_output: + decoded_inputs = " ".join(map(str, inputs.flatten())) + else: + decoded_inputs = inputs_vocab.decode(_save_until_eos( + inputs, skip_eos_postprocess)) + + if log_results and not is_video: + tf.logging.info("Inference results INPUT: %s" % decoded_inputs) + + decoded_targets = None + decoded_outputs = None + if identity_output: + decoded_outputs = " ".join(map(str, outputs.flatten())) + if targets is not None: + decoded_targets = " ".join(map(str, targets.flatten())) + else: + decoded_outputs = targets_vocab.decode(_save_until_eos( + outputs, skip_eos_postprocess)) + if targets is not None and log_results: + decoded_targets = targets_vocab.decode(_save_until_eos( + targets, skip_eos_postprocess)) + if log_results and not is_video: + tf.logging.info("Inference results OUTPUT: %s" % decoded_outputs) + if targets is not None and log_results and not is_video: + tf.logging.info("Inference results TARGET: %s" % decoded_targets) + return decoded_inputs, decoded_outputs, decoded_targets + + +def decode_from_dataset(estimator, + problem_name, + hparams, + decode_hp, + decode_to_file=None, + dataset_split=None, + checkpoint_path=None): + """Perform decoding from dataset.""" + tf.logging.info("Performing local inference from dataset for %s.", + str(problem_name)) + # We assume that worker_id corresponds to shard number. + shard = decode_hp.shard_id if decode_hp.shards > 1 else None + + # Setup output directory for any artifacts that may be written out. + output_dir = os.path.join(estimator.model_dir, "decode") + tf.gfile.MakeDirs(output_dir) + + # If decode_hp.batch_size is specified, use a fixed batch size + if decode_hp.batch_size: + hparams.batch_size = decode_hp.batch_size + hparams.use_fixed_batch_size = True + + dataset_kwargs = { + "shard": shard, + "dataset_split": dataset_split, + "max_records": decode_hp.num_samples + } + + # Build the inference input function + problem = hparams.problem + infer_input_fn = problem.make_estimator_input_fn( + tf_estimator.ModeKeys.PREDICT, hparams, dataset_kwargs=dataset_kwargs) + + predictions, output_dirs = [], [] + for decode_id in range(decode_hp.num_decodes): + tf.logging.info("Decoding {}".format(decode_id)) + + # Create decode directory if not in-memory decoding. + if not decode_hp.decode_in_memory: + output_dir = os.path.join(estimator.model_dir, "decode_%05d" % decode_id) + tf.gfile.MakeDirs(output_dir) + output_dirs.append(output_dir) + + result = decode_once(estimator, + problem_name, + hparams, + infer_input_fn, + decode_hp, + decode_to_file, + output_dir, + log_results=decode_hp.log_results, + checkpoint_path=checkpoint_path) + + if decode_hp.decode_in_memory: + output_dirs = [output_dir] + predictions.append(result) + + if decode_hp.decode_to_file: + decode_hp.decode_to_file = _decode_filename( + decode_hp.decode_to_file, problem_name, decode_hp) + + run_postdecode_hooks(DecodeHookArgs( + estimator=estimator, + problem=problem, + output_dirs=output_dirs, + hparams=hparams, + decode_hparams=decode_hp, + predictions=predictions + ), dataset_split) + return predictions + + +def decode_once(estimator, + problem_name, + hparams, + infer_input_fn, + decode_hp, + decode_to_file, + output_dir, + log_results=True, + checkpoint_path=None): + """Decodes once. + + Args: + estimator: tf.estimator.Estimator instance. Used to generate encoded + predictions. + problem_name: str. Name of problem. + hparams: HParams instance. HParams for model training. + infer_input_fn: zero-arg function. Input function for estimator. + decode_hp: HParams instance. See decode_hparams() above. + decode_to_file: str. Prefix for filenames. Used to generated filenames to + which decoded predictions are written. + output_dir: str. Output directory. Only used for writing images. + log_results: bool. If False, return encoded predictions without any + further processing. + checkpoint_path: str. Path to load model checkpoint from. If unspecified, + Estimator's default is used. + + Returns: + If decode_hp.decode_in_memory is True: + List of dicts, one per example. Values are either numpy arrays or decoded + strings. + If decode_hp.decode_in_memory is False: + An empty list. + """ + + # Get the predictions as an iterable + predictions = estimator.predict(infer_input_fn, + checkpoint_path=checkpoint_path) + + if not log_results: + return list(predictions) + + # Prepare output file writers if decode_to_file passed + decode_to_file = decode_to_file or decode_hp.decode_to_file + if decode_to_file: + output_filepath = _decode_filename(decode_to_file, problem_name, decode_hp) + parts = output_filepath.split(".") + parts[-1] = "targets" + target_filepath = ".".join(parts) + parts[-1] = "inputs" + input_filepath = ".".join(parts) + + output_file = tf.gfile.Open(output_filepath, "w") + target_file = tf.gfile.Open(target_filepath, "w") + input_file = tf.gfile.Open(input_filepath, "w") + + problem_hparams = hparams.problem_hparams + # Inputs vocabulary is set to targets if there are no inputs in the problem, + # e.g., for language models where the inputs are just a prefix of targets. + has_input = "inputs" in problem_hparams.vocabulary + inputs_vocab_key = "inputs" if has_input else "targets" + inputs_vocab = problem_hparams.vocabulary[inputs_vocab_key] + targets_vocab = problem_hparams.vocabulary["targets"] + + num_eval_samples = 0 + + # all_outputs[i][j] = (input: str, output: str, target: str). Input, + # decoded output, and target strings for example i, beam rank j. + all_outputs = [] + for num_predictions, prediction in enumerate(predictions): + num_eval_samples += 1 + num_predictions += 1 + inputs = prediction.get("inputs") + targets = prediction.get("targets") + outputs = prediction.get("outputs") + + # Log predictions + decoded_outputs = [] # [(str, str, str)]. See all_outputs above. + if decode_hp.decode_in_memory: + all_outputs.append(decoded_outputs) + decoded_scores = [] + + if decode_hp.return_beams: + output_beams = np.split(outputs, decode_hp.beam_size, axis=0) + scores = None + if "scores" in prediction: + scores = np.split(prediction["scores"], decode_hp.beam_size, axis=0) + for i, beam in enumerate(output_beams): + tf.logging.info("BEAM %d:" % i) + score = scores and scores[i] + decoded = log_decode_results( + inputs, + beam, + problem_name, + num_predictions, + inputs_vocab, + targets_vocab, + save_images=decode_hp.save_images, + output_dir=output_dir, + identity_output=decode_hp.identity_output, + targets=targets, + log_results=log_results) + decoded_outputs.append(decoded) + if decode_hp.write_beam_scores: + decoded_scores.append(score) + else: + decoded = log_decode_results( + inputs, + outputs, + problem_name, + num_predictions, + inputs_vocab, + targets_vocab, + save_images=decode_hp.save_images, + output_dir=output_dir, + identity_output=decode_hp.identity_output, + targets=targets, + log_results=log_results, + skip_eos_postprocess=decode_hp.skip_eos_postprocess) + decoded_outputs.append(decoded) + + # Write out predictions if decode_to_file passed + if decode_to_file: + for i, (d_input, d_output, d_target) in enumerate(decoded_outputs): + # Skip if all padding + if d_input and re.match("^({})+$".format(text_encoder.PAD), d_input): + continue + beam_score_str = "" + if decode_hp.write_beam_scores: + beam_score_str = "\t%.2f" % decoded_scores[i] + output_file.write(str(d_output) + beam_score_str + decode_hp.delimiter) + target_file.write(str(d_target) + decode_hp.delimiter) + input_file.write(str(d_input) + decode_hp.delimiter) + + if (decode_hp.num_samples >= 0 and + num_predictions >= decode_hp.num_samples): + break + + mlperf_log.transformer_print(key=mlperf_log.EVAL_SIZE, + value=num_eval_samples, + hparams=hparams) + + if decode_to_file: + output_file.close() + target_file.close() + input_file.close() + + return all_outputs + + +def decode_from_file(estimator, + filename, + hparams, + decode_hp, + decode_to_file=None, + checkpoint_path=None): + """Compute predictions on entries in filename and write them out.""" + if not decode_hp.batch_size: + decode_hp.batch_size = 32 + tf.logging.info( + "decode_hp.batch_size not specified; default=%d" % decode_hp.batch_size) + + # Inputs vocabulary is set to targets if there are no inputs in the problem, + # e.g., for language models where the inputs are just a prefix of targets. + p_hp = hparams.problem_hparams + has_input = "inputs" in p_hp.vocabulary + inputs_vocab_key = "inputs" if has_input else "targets" + inputs_vocab = p_hp.vocabulary[inputs_vocab_key] + targets_vocab = p_hp.vocabulary["targets"] + problem_name = FLAGS.problem + filename = _add_shard_to_filename(filename, decode_hp) + tf.logging.info("Performing decoding from file (%s)." % filename) + if has_input: + sorted_inputs, sorted_keys = _get_sorted_inputs( + filename, decode_hp.delimiter) + else: + sorted_inputs = _get_language_modeling_inputs( + filename, decode_hp.delimiter, repeat=decode_hp.num_decodes) + sorted_keys = range(len(sorted_inputs)) + num_sentences = len(sorted_inputs) + num_decode_batches = (num_sentences - 1) // decode_hp.batch_size + 1 + + if estimator.config.use_tpu: + length = getattr(hparams, "length", 0) or hparams.max_length + batch_ids = [] + for line in sorted_inputs: + if has_input: + ids = inputs_vocab.encode(line.strip()) + [1] + else: + ids = targets_vocab.encode(line) + if len(ids) < length: + ids.extend([0] * (length - len(ids))) + else: + ids = ids[:length] + batch_ids.append(ids) + np_ids = np.array(batch_ids, dtype=np.int32) + def input_fn(params): + batch_size = params["batch_size"] + dataset = tf.data.Dataset.from_tensor_slices({"inputs": np_ids}) + dataset = dataset.map( + lambda ex: {"inputs": tf.reshape(ex["inputs"], (length, 1, 1))}) + dataset = dataset.batch(batch_size) + return dataset + else: + def input_fn(): + input_gen = _decode_batch_input_fn( + num_decode_batches, sorted_inputs, + inputs_vocab, decode_hp.batch_size, + decode_hp.max_input_size, + task_id=decode_hp.multiproblem_task_id, has_input=has_input) + gen_fn = make_input_fn_from_generator(input_gen) + example = gen_fn() + return _decode_input_tensor_to_features_dict(example, hparams, decode_hp) + decodes = [] + result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path) + + start_time = time.time() + total_time_per_step = 0 + total_cnt = 0 + + def timer(gen): + while True: + try: + start_time = time.time() + item = next(gen) + elapsed_time = time.time() - start_time + yield elapsed_time, item + except StopIteration: + break + + for elapsed_time, result in timer(result_iter): + if decode_hp.return_beams: + beam_decodes = [] + beam_scores = [] + output_beams = np.split(result["outputs"], decode_hp.beam_size, axis=0) + scores = None + if "scores" in result: + if np.isscalar(result["scores"]): + result["scores"] = result["scores"].reshape(1) + scores = np.split(result["scores"], decode_hp.beam_size, axis=0) + for k, beam in enumerate(output_beams): + tf.logging.info("BEAM %d:" % k) + score = scores and scores[k] + _, decoded_outputs, _ = log_decode_results( + result["inputs"], + beam, + problem_name, + None, + inputs_vocab, + targets_vocab, + log_results=decode_hp.log_results, + skip_eos_postprocess=decode_hp.skip_eos_postprocess) + beam_decodes.append(decoded_outputs) + if decode_hp.write_beam_scores: + beam_scores.append(score) + if decode_hp.write_beam_scores: + decodes.append("\t".join([ + "\t".join([d, "%.2f" % s]) + for d, s in zip(beam_decodes, beam_scores) + ])) + else: + decodes.append("\t".join(beam_decodes)) + else: + _, decoded_outputs, _ = log_decode_results( + result["inputs"], + result["outputs"], + problem_name, + None, + inputs_vocab, + targets_vocab, + log_results=decode_hp.log_results, + skip_eos_postprocess=decode_hp.skip_eos_postprocess) + decodes.append(decoded_outputs) + total_time_per_step += elapsed_time + total_cnt += result["outputs"].shape[-1] + duration = time.time() - start_time + tf.logging.info("Elapsed Time: %5.5f" % duration) + tf.logging.info("Averaged Single Token Generation Time: %5.7f " + "(time %5.7f count %d)" % + (total_time_per_step / total_cnt, + total_time_per_step, total_cnt)) + if decode_hp.batch_size == 1: + tf.logging.info("Inference time %.4f seconds " + "(Latency = %.4f ms/setences)" % + (duration, 1000.0*duration/num_sentences)) + else: + tf.logging.info("Inference time %.4f seconds " + "(Throughput = %.4f sentences/second)" % + (duration, num_sentences/duration)) + + # If decode_to_file was provided use it as the output filename without change + # (except for adding shard_id if using more shards for decoding). + # Otherwise, use the input filename plus model, hp, problem, beam, alpha. + decode_filename = decode_to_file if decode_to_file else filename + if not decode_to_file: + decode_filename = _decode_filename(decode_filename, problem_name, decode_hp) + else: + decode_filename = _add_shard_to_filename(decode_filename, decode_hp) + tf.logging.info("Writing decodes into %s" % decode_filename) + outfile = tf.gfile.Open(decode_filename, "w") + for index in range(len(sorted_inputs)): + outfile.write("%s%s" % (decodes[sorted_keys[index]], decode_hp.delimiter)) + outfile.flush() + outfile.close() + + output_dir = os.path.join(estimator.model_dir, "decode") + tf.gfile.MakeDirs(output_dir) + + run_postdecode_hooks(DecodeHookArgs( + estimator=estimator, + problem=hparams.problem, + output_dirs=[output_dir], + hparams=hparams, + decode_hparams=decode_hp, + predictions=list(result_iter) + ), None) + + +def _add_shard_to_filename(filename, decode_hp): + if decode_hp.shards > 1: + shard_id = decode_hp.shard_id + decode_hp.shards_start_offset + if decode_hp.shard_google_format: + filename = filename + "-{0:05d}-of-{1:05d}".format(shard_id, + decode_hp.shards) + else: + filename = filename + ("%.3d" % shard_id) + return filename + + +def _decode_filename(base_filename, problem_name, decode_hp): + """Generates decode filename. + + Args: + base_filename: A string, base of the decode filename. + problem_name: A string, name of the problem. + decode_hp: HParams for decoding. + + Returns: + A string, produced decode filename. + """ + if decode_hp.shards > 1: + base_filename = _add_shard_to_filename(base_filename, decode_hp) + if ("beam{beam}.alpha{alpha}.decodes".format( + beam=str(decode_hp.beam_size), alpha=str(decode_hp.alpha)) + in base_filename): + return base_filename + else: + return ( + "{base}.{model}.{hp}.{problem}.beam{beam}.alpha{alpha}.decodes".format( + base=base_filename, + model=FLAGS.model, + hp=FLAGS.hparams_set, + problem=problem_name, + beam=str(decode_hp.beam_size), + alpha=str(decode_hp.alpha))) + + +def make_input_fn_from_generator(gen): + """Use py_func to yield elements from the given generator.""" + first_ex = six.next(gen) + flattened = contrib.framework().nest.flatten(first_ex) + types = [t.dtype for t in flattened] + shapes = [[None] * len(t.shape) for t in flattened] + first_ex_list = [first_ex] + + def py_func(): + if first_ex_list: + example = first_ex_list.pop() + else: + example = six.next(gen) + return contrib.framework().nest.flatten(example) + + def input_fn(): + flat_example = tf.py_func(py_func, [], types) + _ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)] + example = contrib.framework().nest.pack_sequence_as(first_ex, flat_example) + return example + + return input_fn + + +def decode_interactively(estimator, hparams, decode_hp, checkpoint_path=None): + """Interactive decoding.""" + + is_image = "image" in hparams.problem.name + is_text2class = isinstance(hparams.problem, + text_problems.Text2ClassProblem) + skip_eos_postprocess = ( + is_image or is_text2class or decode_hp.skip_eos_postprocess) + + def input_fn(): + gen_fn = make_input_fn_from_generator( + _interactive_input_fn(hparams, decode_hp)) + example = gen_fn() + example = _interactive_input_tensor_to_features_dict(example, hparams) + return example + + result_iter = estimator.predict(input_fn, checkpoint_path=checkpoint_path) + for result in result_iter: + targets_vocab = hparams.problem_hparams.vocabulary["targets"] + + if decode_hp.return_beams: + beams = np.split(result["outputs"], decode_hp.beam_size, axis=0) + scores = None + if "scores" in result: + if np.isscalar(result["scores"]): + result["scores"] = result["scores"].reshape(1) + scores = np.split(result["scores"], decode_hp.beam_size, axis=0) + for k, beam in enumerate(beams): + tf.logging.info("BEAM %d:" % k) + beam_string = targets_vocab.decode(_save_until_eos( + beam, skip_eos_postprocess)) + if scores is not None: + tf.logging.info("\"%s\"\tScore:%f" % (beam_string, scores[k])) + else: + tf.logging.info("\"%s\"" % beam_string) + else: + if decode_hp.identity_output: + tf.logging.info(" ".join(map(str, result["outputs"].flatten()))) + else: + tf.logging.info( + targets_vocab.decode(_save_until_eos( + result["outputs"], skip_eos_postprocess))) + + +def _decode_batch_input_fn(num_decode_batches, sorted_inputs, vocabulary, + batch_size, max_input_size, + task_id=-1, has_input=True): + """Generator to produce batches of inputs.""" + tf.logging.info(" batch %d" % num_decode_batches) + for b in range(num_decode_batches): + tf.logging.info("Decoding batch %d" % b) + batch_length = 0 + batch_inputs = [] + for inputs in sorted_inputs[b * batch_size:(b + 1) * batch_size]: + input_ids = vocabulary.encode(inputs) + if max_input_size > 0: + # Subtract 1 for the EOS_ID. + input_ids = input_ids[:max_input_size - 1] + if has_input or task_id > -1: # Do not append EOS for pure LM tasks. + final_id = text_encoder.EOS_ID if task_id < 0 else task_id + input_ids.append(final_id) + batch_inputs.append(input_ids) + if len(input_ids) > batch_length: + batch_length = len(input_ids) + final_batch_inputs = [] + for input_ids in batch_inputs: + assert len(input_ids) <= batch_length + x = input_ids + [0] * (batch_length - len(input_ids)) + final_batch_inputs.append(x) + + yield { + "inputs": np.array(final_batch_inputs).astype(np.int32), + } + + +def _interactive_input_fn(hparams, decode_hp): + """Generator that reads from the terminal and yields "interactive inputs". + + Due to temporary limitations in tf.learn, if we don't want to reload the + whole graph, then we are stuck encoding all of the input as one fixed-size + numpy array. + + We yield int32 arrays with shape [const_array_size]. The format is: + [num_samples, decode_length, len(input ids), , ] + + Args: + hparams: model hparams + decode_hp: decode hparams + Yields: + numpy arrays + + Raises: + Exception: when `input_type` is invalid. + """ + num_samples = decode_hp.num_samples if decode_hp.num_samples > 0 else 1 + decode_length = decode_hp.extra_length + input_type = "text" + p_hparams = hparams.problem_hparams + has_input = "inputs" in p_hparams.modality + vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"] + # This should be longer than the longest input. + const_array_size = 10000 + # Import readline if available for command line editing and recall. + try: + import readline # pylint: disable=g-import-not-at-top,unused-variable + except ImportError: + pass + while True: + prompt = ("INTERACTIVE MODE num_samples=%d decode_length=%d \n" + " it= ('text' or 'image' or 'label', default: " + "text)\n" + " ns= (changes number of samples, default: 1)\n" + " dl= (changes decode length, default: 100)\n" + " <%s> (decode)\n" + " q (quit)\n" + ">" % (num_samples, decode_length, + "source_string" if has_input else "target_prefix")) + input_string = input(prompt) + if input_string == "q": + return + elif input_string[:3] == "ns=": + num_samples = int(input_string[3:]) + elif input_string[:3] == "dl=": + decode_length = int(input_string[3:]) + elif input_string[:3] == "it=": + input_type = input_string[3:] + else: + if input_type == "text": + input_ids = vocabulary.encode(input_string) + if has_input: + input_ids.append(text_encoder.EOS_ID) + x = [num_samples, decode_length, len(input_ids)] + input_ids + assert len(x) < const_array_size + x += [0] * (const_array_size - len(x)) + features = { + "inputs": np.array(x).astype(np.int32), + } + elif input_type == "image": + input_path = input_string + img = vocabulary.encode(input_path) + features = { + "inputs": img.astype(np.int32), + } + elif input_type == "label": + input_ids = [int(input_string)] + x = [num_samples, decode_length, len(input_ids)] + input_ids + features = { + "inputs": np.array(x).astype(np.int32), + } + else: + raise Exception("Unsupported input type.") + for k, v in six.iteritems( + problem_lib.problem_hparams_to_features(p_hparams)): + features[k] = np.array(v).astype(np.int32) + yield features + + +def save_video(video, save_path_template): + """Save frames of the videos into files.""" + try: + from PIL import Image # pylint: disable=g-import-not-at-top + except ImportError as e: + tf.logging.warning( + "Showing and saving an image requires PIL library to be " + "installed: %s", e) + raise NotImplementedError("Image display and save not implemented.") + + for i, frame in enumerate(video): + save_path = save_path_template.format(i) + with tf.gfile.Open(save_path, "wb") as sp: + Image.fromarray(np.uint8(frame)).save(sp) + + +def show_and_save_image(img, save_path): + """Shows an image using matplotlib and saves it.""" + try: + import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top + except ImportError as e: + tf.logging.warning( + "Showing and saving an image requires matplotlib to be " + "installed: %s", e) + raise NotImplementedError("Image display and save not implemented.") + plt.imshow(img) + with tf.gfile.Open(save_path, "wb") as sp: + plt.savefig(sp) + + +def _get_language_modeling_inputs(filename, + delimiter="\n", + repeat=1, + append_space_to_final_punctionation=True): + """Read a file of partial texts to continue. + + The purpose of append_space_to_final_punctionation is that SubwordTokenizer + groups punctuation and the ensuing space in the same token. Adding a space + causes the token to be completed. + + Args: + filename: a string + delimiter: a string + repeat: an integer - we repeat the entire file that many times. + append_space_to_final_punctionation: a boolean + + Returns: + a list of strings + """ + with tf.gfile.Open(filename) as f: + text = f.read() + inputs = text.split(delimiter) + if not inputs[-1]: + inputs.pop() + inputs *= repeat + if append_space_to_final_punctionation: + inputs = [ + s + " " if s and s[-1] in string.punctuation else s for s in inputs] + return inputs + + +def _get_sorted_inputs(filename, delimiter="\n"): + """Returning inputs sorted according to decreasing length. + + This causes inputs of similar lengths to be processed in the same batch, + facilitating early stopping for short sequences. + + Longer sequences are sorted first so that if you're going to get OOMs, + you'll see it in the first batch. + + Args: + filename: path to file with inputs, 1 per line. + delimiter: str, delimits records in the file. + + Returns: + a sorted list of inputs + + """ + tf.logging.info("Getting sorted inputs") + with tf.gfile.Open(filename) as f: + text = f.read() + records = text.split(delimiter) + inputs = [record.strip() for record in records] + # Strip the last empty line. + if not inputs[-1]: + inputs.pop() + input_lens = [(i, -len(line.split())) for i, line in enumerate(inputs)] + sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1)) + # We'll need the keys to rearrange the inputs back into their original order + sorted_keys = {} + sorted_inputs = [] + for i, (index, _) in enumerate(sorted_input_lens): + sorted_inputs.append(inputs[index]) + sorted_keys[index] = i + return sorted_inputs, sorted_keys + + +def _save_until_eos(ids, skip=False): + """Strips everything after the first token, which is normally 1.""" + ids = ids.flatten() + if skip: + return ids + try: + index = list(ids).index(text_encoder.EOS_ID) + return ids[0:index] + except ValueError: + # No EOS_ID: return the array as-is. + return ids + + +def _interactive_input_tensor_to_features_dict(feature_map, hparams): + """Convert the interactive input format (see above) to a dictionary. + + Args: + feature_map: dict with inputs. + hparams: model hyperparameters + + Returns: + a features dictionary, as expected by the decoder. + """ + inputs = tf.convert_to_tensor(feature_map["inputs"]) + input_is_image = False if len(inputs.get_shape()) < 3 else True + + x = inputs + if input_is_image: + x = tf.image.resize_images(x, [299, 299]) + x = tf.reshape(x, [1, 299, 299, -1]) + x = tf.to_int32(x) + else: + # Remove the batch dimension. + num_samples = x[0] + length = x[2] + x = tf.slice(x, [3], tf.to_int32([length])) + x = tf.reshape(x, [1, -1, 1, 1]) + # Transform into a batch of size num_samples to get that many random + # decodes. + x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1])) + + p_hparams = hparams.problem_hparams + input_space_id = tf.constant(p_hparams.input_space_id) + target_space_id = tf.constant(p_hparams.target_space_id) + + features = {} + features["input_space_id"] = input_space_id + features["target_space_id"] = target_space_id + features["decode_length"] = ( + IMAGE_DECODE_LENGTH if input_is_image else inputs[1]) + features["inputs"] = x + # Save inputs to "partial_targets" when prepending inputs to targets. Also + # keep "inputs" as some models crash if they don't exist. + if getattr(hparams, "prepend_mode", "none") != "none": + shape = tf.shape(x) + partial_targets = tf.reshape(x, [shape[0], shape[1]]) + partial_targets = tf.pad(partial_targets, [[0, 0], [0, 1]]) + features["partial_targets"] = partial_targets + return features + + +def _decode_input_tensor_to_features_dict(feature_map, hparams, decode_hp): + """Convert the interactive input format (see above) to a dictionary. + + Args: + feature_map: dict with inputs. + hparams: model hyperparameters + decode_hp: decode hyperparameters + + Returns: + a features dictionary, as expected by the decoder. + """ + inputs = tf.convert_to_tensor(feature_map["inputs"]) + input_is_image = False + + x = inputs + p_hparams = hparams.problem_hparams + # Add a third empty dimension + x = tf.expand_dims(x, axis=[2]) + x = tf.to_int32(x) + input_space_id = tf.constant(p_hparams.input_space_id) + target_space_id = tf.constant(p_hparams.target_space_id) + + features = {} + features["input_space_id"] = input_space_id + features["target_space_id"] = target_space_id + features["decode_length"] = ( + IMAGE_DECODE_LENGTH if input_is_image else + tf.constant(decode_hp.extra_length)) + features["inputs"] = x + # Save inputs to "partial_targets" when prepending inputs to targets. Also + # keep "inputs" as some models crash if they don't exist. + if getattr(hparams, "prepend_mode", "none") != "none": + shape = tf.shape(x) + partial_targets = tf.reshape(x, [shape[0], shape[1]]) + partial_targets = tf.pad(partial_targets, [[0, 0], [0, 1]]) + features["partial_targets"] = partial_targets + return features + + +def get_step_from_ckpt_path(path): + return int(os.path.basename(path).split("-")[-1]) + + +def latest_checkpoint_step(ckpt_dir): + ckpt = tf.train.get_checkpoint_state(ckpt_dir) + if not ckpt: + return None + path = ckpt.model_checkpoint_path + return get_step_from_ckpt_path(path) + + +class DecodeHookArgs(collections.namedtuple( + "DecodeHookArgs", + ["estimator", "problem", "output_dirs", "hparams", + "decode_hparams", "predictions"])): + pass + + +def run_postdecode_hooks(decode_hook_args, dataset_split): + """Run hooks after decodes have run.""" + hooks = decode_hook_args.problem.decode_hooks + if not hooks: + return + global_step = latest_checkpoint_step(decode_hook_args.estimator.model_dir) + if global_step is None: + tf.logging.info( + "Skipping decode hooks because no checkpoint yet available.") + return + tf.logging.info("Running decode hooks.") + parent_dir = os.path.join(decode_hook_args.output_dirs[0], os.pardir) + child_dir = decode_hook_args.decode_hparams.summaries_log_dir + if dataset_split is not None: + child_dir += "_{}".format(dataset_split) + final_dir = os.path.join(parent_dir, child_dir) + summary_writer = tf.summary.FileWriter(final_dir) + + for hook in hooks: + # Isolate each hook in case it creates TF ops + with tf.Graph().as_default(): + summaries = hook(decode_hook_args) + if summaries: + summary = tf.Summary(value=list(summaries)) + summary_writer.add_summary(summary, global_step) + summary_writer.close() + tf.logging.info("Decode hooks done.") diff --git a/tensor2tensor/utils/devices.py b/tensor2tensor/utils/devices.py new file mode 100644 index 000000000..6c869984d --- /dev/null +++ b/tensor2tensor/utils/devices.py @@ -0,0 +1,177 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Device placement and data parallelism.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.utils import expert_utils as eu +import tensorflow.compat.v1 as tf +from tensorflow.python.util import tf_inspect as inspect + + +def data_parallelism_from_flags(daisy_chain_variables=True, all_workers=False): + """Over which devices do we split each training batch. + + In old-fashioned async mode, we split the batch over all GPUs on the + current worker. + + In sync mode, we split the batch over all the parameter server GPUs. + + This function returns an expert_utils.Parallelism object, which can be used + to build the model. It is configured in a way that any variables created + by `tf.get_variable` will be assigned to the parameter servers and shared + between datashards. + + Args: + daisy_chain_variables: whether to copy variables in a daisy chain on GPUs. + all_workers: whether the devices are all async workers or just this one. + + Returns: + a expert_utils.Parallelism. + """ + dp_arg_names = inspect.getargspec(data_parallelism).args + + blacklist = ["daisy_chain_variables", "all_workers"] + + kwargs = {} + for arg in dp_arg_names: + if arg in blacklist: + continue + kwargs[arg] = getattr(tf.flags.FLAGS, arg) + + return data_parallelism( + daisy_chain_variables=daisy_chain_variables, + all_workers=all_workers, + **kwargs) + + +def data_parallelism(daisy_chain_variables=True, + all_workers=False, + ps_replicas=0, + ps_job="/job:ps", + ps_gpu=0, + schedule="continuous_train_and_eval", + sync=False, + worker_gpu=1, + worker_replicas=1, + worker_id=0, + gpu_order="", + worker_job="/job:localhost", + no_data_parallelism=False): + """See data_parallelism_from_flags.""" + tf.logging.info("schedule=%s" % schedule) + tf.logging.info("worker_gpu=%s" % worker_gpu) + tf.logging.info("sync=%s" % sync) + def _ps_replicas(all_workers=False): + if all_workers: + return list(range(ps_replicas)) + # Worker K will be using replicas {0,...n-1} + K*n if we have n replicas. + num_replicas = ps_replicas // worker_replicas + return [d + worker_id * num_replicas for d in range(num_replicas)] + + def _gpu_order(num_gpus): + if gpu_order: + ret = [int(s) for s in gpu_order.split(" ")] + if len(ret) == num_gpus: + return ret + return list(range(num_gpus)) + + def _ps_gpus(all_workers=False): + ps_gpus = [] + for d in _ps_replicas(all_workers=all_workers): + ps_gpus.extend([(d, gpu) for gpu in _gpu_order(ps_gpu)]) + return ps_gpus + + def ps_devices(all_workers=False): + """List of ps devices (where to put the experts). + + Args: + all_workers: whether the list is for all async workers or just this one. + + Returns: + a list of device names + """ + if ps_replicas > 0: + if ps_gpu > 0: + return [ + ps_job + "/task:%d/GPU:%d" % (d, gpu) + for (d, gpu) in _ps_gpus(all_workers=all_workers) + ] + else: + return [ + ps_job + "/task:%d" % d + for d in _ps_replicas(all_workers=all_workers) + ] + else: + if worker_gpu > 0: + return ["gpu:%d" % d for d in _gpu_order(worker_gpu)] + else: + return [""] + + def _replica_device_setter(worker_device): + if ps_replicas == 0: + return worker_device + return tf.train.replica_device_setter( + worker_device=worker_device, + ps_tasks=ps_replicas, + ps_device=ps_job + "/GPU:0" if ps_gpu > 0 else ps_job) + + is_single_machine = ps_replicas == 0 and worker_replicas == 1 + + if no_data_parallelism: + datashard_devices = [""] + caching_devices = None + elif is_single_machine: + tf.logging.warn( + "Schedule=%s. Assuming that training is running on a single machine.", + schedule) + datashard_devices = ["gpu:%d" % d for d in _gpu_order(worker_gpu)] + if worker_gpu < 1: + datashard_devices += ["cpu:0"] + caching_devices = None + elif sync and ps_replicas > 0: + # compute on ps + datashard_devices = [ + _replica_device_setter(d) for d in ps_devices(all_workers=all_workers) + ] + if ps_gpu > 0 and ps_replicas > 1: + caching_devices = [ + ps_job + "/task:%d/cpu:0" % d + for (d, _) in _ps_gpus(all_workers=all_workers) + ] + else: + caching_devices = None + else: + # compute on worker - this is either a single-worker setup or asynchronous + # with parameter servers. + if worker_gpu > 1: + datashard_devices = [ + _replica_device_setter(worker_job + "/GPU:%d" % d) + for d in _gpu_order(worker_gpu) + ] + caching_devices = None + else: + datashard_devices = [_replica_device_setter(worker_job)] + caching_devices = None + tf.logging.info("datashard_devices: %s", datashard_devices) + tf.logging.info("caching_devices: %s", caching_devices) + tf.logging.info("ps_devices: %s", ps_devices(all_workers=all_workers)) + return eu.Parallelism( + datashard_devices, + caching_devices=caching_devices, + daisy_chain_variables=daisy_chain_variables, + ps_devices=ps_devices(all_workers=all_workers)) diff --git a/tensor2tensor/utils/diet.py b/tensor2tensor/utils/diet.py new file mode 100644 index 000000000..67bd94afc --- /dev/null +++ b/tensor2tensor/utils/diet.py @@ -0,0 +1,363 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Diet variables are much more memory-efficient than regular variables. + +Using diet variables, we can reduce memory overhead per parameter from +16 bytes to 2 bytes, allowing for up to 4B parameters per GPU. + +Functions that build subgraphs with variables can be made to use diet variables +by using the fn_with_diet_vars decorator. +""" + +from collections import defaultdict +import copy +import math + +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import hparam +import tensorflow.compat.v1 as tf + + +def diet_adam_optimizer_params(): + """Default hyperparameters for a DietAdamOptimizer. + + Returns: + a hyperparameters object. + """ + return hparam.HParams( + quantize=True, # use 16-bit fixed-point + quantization_scale=10.0 / tf.int16.max, + optimizer="DietAdam", + learning_rate=1.0, + learning_rate_warmup_steps=2000, + learning_rate_decay_scheme="noam", # "noam" or "none" + epsilon=1e-10, + beta1=0.0, # we can save memory if beta1=0 + beta2=0.98, + factored_second_moment_accumulator=True, # this saves memory + ) + + +def diet_expert(x, hidden_size, params): + """A two-layer feed-forward network with relu activation on hidden layer. + + Uses diet variables. + Recomputes hidden layer on backprop to save activation memory. + + Args: + x: a Tensor with shape [batch, io_size] + hidden_size: an integer + params: a diet variable HParams object. + + Returns: + a Tensor with shape [batch, io_size] + """ + + @fn_with_diet_vars(params) + def diet_expert_internal(x): + dim = x.get_shape().as_list()[-1] + h = tf.layers.dense(x, hidden_size, activation=tf.nn.relu, use_bias=False) + y = tf.layers.dense(h, dim, use_bias=False) + y *= tf.rsqrt(tf.to_float(dim * hidden_size)) + return y + + return diet_expert_internal(x) + + +class DietVariableOptimizer(object): + """Base class for Diet variable optimizers.""" + + def __init__(self, params): + self._params = params + self._global_step = tf.train.get_or_create_global_step() + + @property + def params(self): + return self._params + + @property + def global_step(self): + return self._global_step + + def create_slots(self, var): + raise NotImplementedError() + + def update_variable(self, var, grad_var): + raise NotImplementedError() + + +class DietAdamOptimizer(DietVariableOptimizer): + """A memory efficient optimizer for memory-efficient variables. + + We employ the following techniques: + - 16-bit fixed-point quantization + - inline updates during backprop, instead of through the optimizer. This + keeps the gradients from staying around in memory. + - momentum is optional - saves a slot if it is off (beta1=0.0). + - "factored second-moment accumulator" + (keep row-wise and col-wise averages instead of full accumulator) + - tighter control over operation ordering to make sure that only a small + portion of the decompressed variables and of the variable gradients + are resident in memory at any given time. + + All together these techniques reduce the memory footprint per parameter to + a little over 2 bytes, allowing for roughly 4B parameters per GPU. This is + roughly an 8x improvement over the naive version. + + Usage: + + Diet variables should be created with the + DietAdamOptimizer.get_variable() method. The resulting variables + have extra fields pointing to the optimizer and to the accumulator + slots. + + The variable is kept in quantized form, so you need to call + var.optimizer.dequantize(var) to get the value. + + The variables are created with trainable=False, so that they will + not be optimized by an ordinary optimizer. Instead, the user is + responsible for making sure that var.optimizer.update(var, grad) is + called during backprop. The reason for this inline update is to + avoid keeping around the gradients for all variables at once. This + is done with the clever use of defuns and control dependencies. See + diet_expert() for an example of how all of this is done. + + To facilitate fixed-point quantization and to make it easier to + choose a learning rate, all variables are initialized with unit + normal initialization. If you want smaller values, downscale on the + outside. + """ + + def create_slots(self, var): + """Create the factorized Adam accumulators for diet variables.""" + params = self.params + shape = var.get_shape().as_list() + + if not hasattr(params, "slots"): + params.slots = defaultdict(dict) + + name = var.op.name + slots = params.slots[name] + + if params.factored_second_moment_accumulator and len(shape) == 2: + slots["adam_vr"] = tf.get_variable( + name + "_adam_vr", [shape[0], 1], + trainable=False, + initializer=tf.zeros_initializer()) + slots["adam_vc"] = tf.get_variable( + name + "_adam_vc", [1, shape[1]], + trainable=False, + initializer=tf.zeros_initializer()) + else: + slots["adam_v"] = tf.get_variable( + name + "_adam_v", + shape, + trainable=False, + initializer=tf.zeros_initializer()) + if params.beta1 != 0.0: + slots["adam_m"] = tf.get_variable( + name + "_adam_m", + shape, + trainable=False, + initializer=tf.zeros_initializer()) + + def update_variable(self, var, grad_var): + """Update the variable and its slots.""" + params = self.params + global_step = tf.to_float(self.global_step) + 1 + + # compute learning rate + lrate = params.learning_rate + if params.learning_rate_decay_scheme == "noam": + lrate *= tf.minimum(global_step * params.learning_rate_warmup_steps**-1.5, + global_step**-0.5) + else: + assert params.learning_rate_decay_scheme == "none" + lrate *= tf.minimum(global_step / params.learning_rate_warmup_steps, 1.0) + + # compute adjustment due to second moment + slots = params.slots[var.op.name] + grad_squared = tf.square(grad_var) + beta2_pow = tf.pow(params.beta2, global_step) + if params.factored_second_moment_accumulator and len(var.shape) == 2: + vr_update = tf.assign(slots["adam_vr"], slots["adam_vr"] * params.beta2 + + tf.reduce_mean(grad_squared, 1, keepdims=True) * + (1.0 - params.beta2)) + vc_update = tf.assign(slots["adam_vc"], slots["adam_vc"] * params.beta2 + + tf.reduce_mean(grad_squared, 0, keepdims=True) * + (1.0 - params.beta2)) + with tf.control_dependencies([vr_update, vc_update]): + vr = tf.sqrt(slots["adam_vr"] / (1.0 - beta2_pow)) + params.epsilon + vc = tf.sqrt(slots["adam_vc"] / (1.0 - beta2_pow)) + params.epsilon + vc /= tf.reduce_mean(vc) + denom = vr * vc + else: + v_update = tf.assign(slots["adam_v"], + slots["adam_v"] * params.beta2 + grad_squared * + (1.0 - params.beta2)) + with tf.control_dependencies([v_update]): + denom = tf.sqrt(slots["adam_v"] / (1.0 - beta2_pow)) + params.epsilon + + # compute momentum if applicable + if params.beta1 != 0.0: + m_update = tf.assign(slots["adam_m"], + slots["adam_m"] * params.beta1 + grad_var * + (1.0 - params.beta1)) + with tf.control_dependencies([m_update]): + grad_var = slots["adam_m"] + + # update var + subtrahend = lrate * grad_var / denom + new_val = _quantize(_dequantize(var, params) - subtrahend, params) + return tf.assign(var, new_val) + + +def _create_diet_optimizer(params): + if params.optimizer == "DietAdam": + return DietAdamOptimizer(params) + else: + raise ValueError("Unrecognized diet optimizer") + + +def _quantize(x, params, randomize=True): + """Quantize x according to params, optionally randomizing the rounding.""" + if not params.quantize: + return x + + if not randomize: + return tf.bitcast( + tf.cast(x / params.quantization_scale, tf.int16), tf.float16) + + abs_x = tf.abs(x) + sign_x = tf.sign(x) + y = abs_x / params.quantization_scale + y = tf.floor(y + tf.random_uniform(common_layers.shape_list(x))) + y = tf.minimum(y, tf.int16.max) * sign_x + q = tf.bitcast(tf.cast(y, tf.int16), tf.float16) + return q + + +def _dequantize(q, params): + """Dequantize q according to params.""" + if not params.quantize: + return q + return tf.to_float(tf.bitcast(q, tf.int16)) * params.quantization_scale + + +def make_diet_var_getter(params): + """Create a custom variable getter for diet variables according to params.""" + + def diet_var_initializer(shape, dtype, partition_info=None): + """Initializer for a diet variable.""" + del dtype + del partition_info + + with common_layers.fn_device_dependency("diet_init") as out_deps: + float_range = math.sqrt(3) + ret = tf.random_uniform(shape, -float_range, float_range) + if params.quantize: + ret = _quantize(ret, params, randomize=False) + out_deps.append(ret) + return ret + + def diet_var_getter(getter, **kwargs): + """Get diet variable and return it dequantized.""" + if params.quantize: + kwargs["dtype"] = tf.float16 + kwargs["initializer"] = diet_var_initializer + kwargs["trainable"] = False + + base_var = getter(**kwargs) + + dequantized = _dequantize(base_var, params) + + if not hasattr(params, "dequantized"): + params.dequantized = defaultdict(list) + params.dequantized[base_var.name].append(dequantized) + + return dequantized + + return diet_var_getter + + +def _fn_with_diet_vars(fn, args, params): + """Call function with args; use diet variables according to params.""" + + vs_ctr = [] + + def grad_fn(inputs, variables, outputs, output_grads): + """Custom gradient function.""" + del outputs # recomputing below + with common_layers.fn_device_dependency("diet_grad", + output_grads[0].device) as out_dep: + with tf.variable_scope(vs_ctr[0], reuse=True): + outputs = fn(*inputs) + + variables = [common_layers.underlying_variable_ref(v) for v in variables] + dequantized_variables = [ + params.dequantized[v.name][-1] for v in variables + ] + + grads = tf.gradients(outputs, inputs + dequantized_variables, + output_grads) + grad_inputs = grads[:len(inputs)] + grad_variables = grads[len(inputs):] + + opt = _create_diet_optimizer(params) + + # Apply grad_variables here + var_updates = [] + for v, dv in zip(variables, grad_variables): + with tf.variable_scope(vs_ctr[0].name): + opt.create_slots(v) + update_op = opt.update_variable(v, dv) + var_updates.append(update_op) + + with tf.control_dependencies(var_updates): + grad_inputs = [tf.identity(dx) for dx in grad_inputs] + + out_dep.append(grad_inputs) + + return grad_inputs, [None] * len(variables) + + @common_layers.fn_with_custom_grad(grad_fn, use_global_vars=True) + def forward(*inputs): + with tf.variable_scope( + None, default_name="diet", + custom_getter=make_diet_var_getter(params)) as vs: + vs_ctr.append(vs) + outputs = fn(*inputs) + return outputs + + with common_layers.fn_device_dependency("diet_forward", + args[0].device) as out_dep: + outputs = forward(*args) + out_dep.append(outputs) + return outputs + + +def fn_with_diet_vars(params): + """Decorator for graph-building function to use diet variables.""" + params = copy.copy(params) + + def dec(fn): + + def wrapped(*args): + return _fn_with_diet_vars(fn, args, params) + + return wrapped + + return dec diff --git a/tensor2tensor/utils/diet_test.py b/tensor2tensor/utils/diet_test.py new file mode 100644 index 000000000..98df97fd9 --- /dev/null +++ b/tensor2tensor/utils/diet_test.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for common layers.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from tensor2tensor.utils import diet + +import tensorflow.compat.v1 as tf + + +class DietVarTest(tf.test.TestCase): + + def testDiet(self): + + params = diet.diet_adam_optimizer_params() + + @diet.fn_with_diet_vars(params) + def model_fn(x): + y = tf.layers.dense(x, 10, use_bias=False) + return y + + @diet.fn_with_diet_vars(params) + def model_fn2(x): + y = tf.layers.dense(x, 10, use_bias=False) + return y + + x = tf.random_uniform((10, 10)) + y = model_fn(x) + 10. + y = model_fn2(y) + 10. + grads = tf.gradients(y, [x]) + with tf.control_dependencies(grads): + incr_step = tf.assign_add(tf.train.get_or_create_global_step(), 1) + + train_op = tf.group(incr_step, *grads) + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + orig_vals = sess.run(tf.global_variables()) + for _ in range(10): + sess.run(train_op) + new_vals = sess.run(tf.global_variables()) + + different = [] + for old, new in zip(orig_vals, new_vals): + try: + self.assertAllClose(old, new) + except AssertionError: + different.append(True) + self.assertEqual(len(different), len(tf.global_variables())) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/utils/expert_utils.py b/tensor2tensor/utils/expert_utils.py index 8d3d1d50c..469ba5362 100644 --- a/tensor2tensor/utils/expert_utils.py +++ b/tensor2tensor/utils/expert_utils.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,159 +15,76 @@ """Utilities for creating Sparsely-Gated Mixture-of-Experts Layers. -See the most recent draft of our ICLR paper: -https://openreview.net/pdf?id=B1ckMDqlg +See "Outrageously Large Neural Networks" +https://arxiv.org/abs/1701.06538 """ from __future__ import absolute_import from __future__ import division from __future__ import print_function +import functools import math - -# Dependency imports - import six -from six.moves import xrange # pylint: disable=redefined-builtin +from six.moves import range # pylint: disable=redefined-builtin from six.moves import zip # pylint: disable=redefined-builtin -import tensorflow as tf -from tensorflow.python.framework import function +from tensor2tensor.layers import common_layers +from tensor2tensor.layers.vq_discrete import DiscreteBottleneck +import tensorflow.compat.v1 as tf -def NoisyTopKGatingParams(): - """Hyperparams defining NoisyTopK Gating Network. +DEFAULT_DEV_STRING = "existing_device" - Returns: - a tf.contrib.training.HParams object - """ - return tf.contrib.training.HParams( - gating_class=NoisyTopKGating, - num_experts=16, # The number of experts - k=2, # 'The number of experts to use per example - input_size=None, # size of input to MoE. Set by MoE class - dtype=tf.float32, # floating point data type - initializer=tf.zeros_initializer(), # initializer for weight matrices - noisy_gating=True, # Add tunable noise (necessary for load-balancing) - noise_epsilon=1e-2, # Added to noise stddev for numerical stability - ) +def add_scope(scope=None, scope_fn=None): + """Return a decorator which add a TF name/variable scope to a function. -def FeedForwardExpertParams(): - """Hyperparameters defining feed-forward expert networks. - - Returns: - a tf.contrib.training.HParams object - """ - return tf.contrib.training.HParams( - # The class that implements the expert network - expert_class=FeedForwardExpert, - input_size=None, # Size of input to MoE. Set by MoE class. - # List of hidden layer sizes, or None for no hidden layers. - # The length of this list determines the number of hidden layers - hidden_layer_sizes=None, - output_size=None, # Size of output from MoE. Set by MoE class. - dtype=tf.float32, # Floating point data type) - # Activation function applied at each hidden layer) - hidden_activation=tf.nn.relu, - initializer=None, # Optional initializer for weight matrices.) - # If autoscale=True, At each hidden/output layer, multiply by - # rsqrt(prev_layer_size / input_size). This scaling happens - # before application of hidden_activation) - autoscale=True,) - - -def _SetInputOutputSizes(hp, input_size, output_size): - """Fill in the input_size and output_size hyperparameters. - - This is used by LocalMixtureOfExperts and DistributedMixtureOfExperts to - fill in the input_size and output_size on the gating parameters and expert - parameters so that the user does not have to set them in multiple places. + Note that the function returned by the decorator accept an additional 'name' + parameter, which can overwrite the name scope given when the function is + created. Args: - hp: a hyperparameters - input_size: an integer - output_size: an integer + scope (str): name of the scope. If None, the function name is used. + scope_fn (fct): Either tf.name_scope or tf.variable_scope + + Returns: + fct: the add_scope decorator """ - if hp.input_size is None: - hp.input_size = input_size - else: - assert hp.input_size == input_size - if output_size is not None: - if hp.output_size is None: - hp.output_size = output_size - else: - assert hp.output_size == output_size + def decorator(f): + @functools.wraps(f) + def decorated(*args, **kwargs): + name = kwargs.pop("name", None) # Python 2 hack for keyword only args + with scope_fn(name or scope or f.__name__): + return f(*args, **kwargs) -class FeedForwardExpert(object): - """An object representing a feed forward network (used as an expert). - """ + return decorated - def __init__(self, hp, name): - """Creates a FeedForwardExpert. + return decorator - Args: - hp: hyperparameters. Call FeedForwardExpertParams() to create these. - name: a string. - """ - self._hp = hp - hidden_layer_sizes = hp.hidden_layer_sizes or [] - num_layers = 1 + len(hidden_layer_sizes) - layer_sizes = [hp.input_size] + hidden_layer_sizes + [hp.output_size] - self._layer_sizes = layer_sizes - self._w = [] - for layer in range(num_layers): - shape = layer_sizes[layer:layer + 2] - self._w.append( - tf.get_variable('%s_layer_%d' % (name, layer), shape, hp.dtype, - hp.initializer)) - - def Eval(self, x): - """Evaluate the FeedForwardExpert on the given input. - Args: - x: a `Tensor` of shape `[batch_size, hp.input_size]` +def add_var_scope(scope=None): + return add_scope(scope, scope_fn=tf.variable_scope) - Returns: - a `Tensor` of shape `[batch_size, hp.output_size]` - """ - hp = self._hp - num_layers = len(self._w) - for i in xrange(num_layers): - x = tf.matmul(x, self._w[i]) - if hp.autoscale and self._layer_sizes[i] != hp.input_size: - x *= (self._layer_sizes[i] / hp.input_size)**-0.5 - if i + 1 < num_layers and hp.hidden_activation: - x = hp.hidden_activation(x) - return x - @property - def vars(self): - return self._w +def add_name_scope(scope=None): + return add_scope(scope, scope_fn=tf.name_scope) -@function.Defun( - python_grad_func=lambda x, dy: tf.convert_to_tensor(dy), - shape_func=lambda op: [op.inputs[0].get_shape()]) -def ConvertGradientToTensor(x): - """Identity operation whose gradient is converted to a `Tensor`. +def _add_variable_proxy_methods(var, proxy_tensor): + """Proxy methods of underlying variable. - Currently, the gradient to `tf.concat` is particularly expensive to - compute if dy is an `IndexedSlices` (a lack of GPU implementation - forces the gradient operation onto CPU). This situation occurs when - the output of the `tf.concat` is eventually passed to `tf.gather`. - It is sometimes faster to convert the gradient to a `Tensor`, so as - to get the cheaper gradient for `tf.concat`. To do this, replace - `tf.concat(x)` with `ConvertGradientToTensor(tf.concat(x))`. + This enables our custom getters to still work with, e.g., batch norm. Args: - x: A `Tensor`. - - Returns: - The input `Tensor`. + var: Variable to proxy + proxy_tensor: Tensor that is identity of var """ - return x + proxy_tensor.read_value = lambda: tf.identity(proxy_tensor) + proxy_tensor.assign_sub = var.assign_sub + proxy_tensor.assign = var.assign + proxy_tensor.initialized_value = var.initialized_value class Parallelism(object): @@ -176,7 +94,7 @@ class Parallelism(object): e = [] f = [] - for i in xrange(len(devices)): + for i in range(len(devices)): with tf.device(devices[i]): e_, f_ = func(a[i], b[i], c) e.append(e_) @@ -189,13 +107,14 @@ class Parallelism(object): def __init__(self, device_names_or_functions, - reuse=None, + reuse=True, caching_devices=None, - daisy_chain_variables=False): + daisy_chain_variables=False, + ps_devices=None): """Create a Parallelism. Args: - device_names_or_functions: A list of of length n, containing device names + device_names_or_functions: A list of length n, containing device names or device functions (see `tf.device`) reuse: True or None. Whether to reuse variables created in the first replica in the subsequent replicas. @@ -203,6 +122,7 @@ def __init__(self, names. daisy_chain_variables: a boolean - if true, then copies variables in a daisy chain between devices. + ps_devices: list, list of devices for experts. Returns: a Parallelism. @@ -211,8 +131,9 @@ def __init__(self, self._devices = device_names_or_functions self._n = len(device_names_or_functions) self._reuse = reuse - self._caching_devices = self._MaybeRepeat(caching_devices) + self._caching_devices = self._maybe_repeat(caching_devices) self._daisy_chain_variables = daisy_chain_variables + self._ps_devices = ps_devices or [""] def __call__(self, fn, *args, **kwargs): """A parallel set of function calls (using the specified devices). @@ -230,24 +151,26 @@ def __call__(self, fn, *args, **kwargs): """ # Construct lists or args and kwargs for each function. if args: - my_args = TransposeListOfLists([self._MaybeRepeat(arg) for arg in args]) + my_args = transpose_list_of_lists( + [self._maybe_repeat(arg) for arg in args]) else: - my_args = [[] for _ in xrange(self.n)] - my_kwargs = [{} for _ in xrange(self.n)] + my_args = [[] for _ in range(self.n)] + my_kwargs = [{} for _ in range(self.n)] for k, v in six.iteritems(kwargs): - vals = self._MaybeRepeat(v) - for i in xrange(self.n): + vals = self._maybe_repeat(v) + for i in range(self.n): my_kwargs[i][k] = vals[i] # Construct lists of functions. - fns = self._MaybeRepeat(fn) + fns = self._maybe_repeat(fn) # Now make the parallel call. outputs = [] cache = {} - for i in xrange(self.n): + tensor_to_var = {} + for i in range(self.n): - def DaisyChainGetter(getter, name, *args, **kwargs): + def daisy_chain_getter(getter, name, *args, **kwargs): """Get a variable and cache in a daisy chain.""" device_var_key = (self._devices[i], name) if device_var_key in cache: @@ -255,10 +178,16 @@ def DaisyChainGetter(getter, name, *args, **kwargs): return cache[device_var_key] if name in cache: # if we have it on a different device, copy it from the last device - v = tf.identity(cache[name]) + last_device_v = cache[name] + var = tensor_to_var[last_device_v] + v = tf.identity(last_device_v) else: var = getter(name, *args, **kwargs) - v = tf.identity(var._ref()) # pylint: disable=protected-access + v = var.read_value() + + # keep track of the original variable + tensor_to_var[v] = var + _add_variable_proxy_methods(tensor_to_var[v], v) # update the cache cache[name] = v cache[device_var_key] = v @@ -267,30 +196,40 @@ def DaisyChainGetter(getter, name, *args, **kwargs): # Variable scope will not reset caching_device on reused variables, # so we make a custom getter that uses identity to cache the variable. # pylint: disable=cell-var-from-loop - def CachingGetter(getter, name, *args, **kwargs): - v = getter(name, *args, **kwargs) + def caching_getter(getter, name, *args, **kwargs): + """Cache variables on device.""" key = (self._caching_devices[i], name) if key in cache: return cache[key] + + v = getter(name, *args, **kwargs) with tf.device(self._caching_devices[i]): - ret = tf.identity(v._ref()) # pylint: disable=protected-access + ret = v.read_value() + _add_variable_proxy_methods(v, ret) cache[key] = ret return ret if self._daisy_chain_variables: - custom_getter = DaisyChainGetter - elif self._caching_devices: - custom_getter = CachingGetter + custom_getter = daisy_chain_getter + elif self._caching_devices[i]: + custom_getter = caching_getter else: custom_getter = None # pylint: enable=cell-var-from-loop - with tf.name_scope('parallel_%d' % i): + with tf.name_scope("parallel_%d" % i): with tf.variable_scope( - tf.get_variable_scope(), + tf.get_variable_scope() if self._reuse else "parallel_%d" % i, reuse=True if i > 0 and self._reuse else None, caching_device=self._caching_devices[i], custom_getter=custom_getter): - with tf.device(self._devices[i]): + # TODO(noam, epot, avaswani) + # Allows for passing no device in case you want to default to the + # existing device. This is needed when we put all experts on a single + # device, for example in local_moe. + if self._devices[i] != DEFAULT_DEV_STRING: + with tf.device(self._devices[i]): + outputs.append(fns[i](*my_args[i], **my_kwargs[i])) + else: outputs.append(fns[i](*my_args[i], **my_kwargs[i])) if isinstance(outputs[0], tuple): outputs = list(zip(*outputs)) @@ -305,7 +244,11 @@ def n(self): def devices(self): return self._devices - def _MaybeRepeat(self, x): + @property + def ps_devices(self): + return self._ps_devices + + def _maybe_repeat(self, x): """Utility function for processing arguments that are singletons or lists. Args: @@ -321,25 +264,7 @@ def _MaybeRepeat(self, x): return [x] * self.n -def Parallel(device_names_or_functions, fn, *args): - """Deprecated interface. - - Use `Parallelism(device_names_or_functions)(fn, *args)` instead. - - Args: - device_names_or_functions: A list of length n. - fn: a function or a list of n functions. - *args: additional args. Each arg should either be not a list, or a list - of length n. - - Returns: - either a single list of length n (if fn does not return a tuple), or a - tuple of lists of length n (if fn returns a tuple). - """ - return Parallelism(device_names_or_functions)(fn, *args) - - -def _RowwiseUnsortedSegmentSum(values, indices, n): +def _rowwise_unsorted_segment_sum(values, indices, n): """UnsortedSegmentSum on each row. Args: @@ -356,7 +281,7 @@ def _RowwiseUnsortedSegmentSum(values, indices, n): return tf.reshape(ret_flat, [batch, n]) -def _NormalDistributionCDF(x, stddev): +def _normal_distribution_cdf(x, stddev): """Evaluates the CDF of the normal distribution. Normal distribution with mean 0 and standard deviation stddev, @@ -375,7 +300,8 @@ def _NormalDistributionCDF(x, stddev): return 0.5 * (1.0 + tf.erf(x / (math.sqrt(2) * stddev + 1e-20))) -def _ProbInTopK(clean_values, noisy_values, noise_stddev, noisy_top_values, k): +def _prob_in_top_k( + clean_values, noisy_values, noise_stddev, noisy_top_values, k): """Helper function to NoisyTopKGating. Computes the probability that value is in top k, given different random noise. @@ -392,7 +318,7 @@ def _ProbInTopK(clean_values, noisy_values, noise_stddev, noisy_top_values, k): normally distributed noise with standard deviation noise_stddev. noise_stddev: a `Tensor` of shape [batch, n], or None noisy_top_values: a `Tensor` of shape [batch, m]. - 'values' Output of tf.top_k(noisy_top_values, m). m >= k+1 + "values" Output of tf.top_k(noisy_top_values, m). m >= k+1 k: an integer. Returns: @@ -414,15 +340,15 @@ def _ProbInTopK(clean_values, noisy_values, noise_stddev, noisy_top_values, k): threshold_if_out = tf.expand_dims( tf.gather(top_values_flat, threshold_positions_if_out), 1) # is each value currently in the top k. - prob_if_in = _NormalDistributionCDF(clean_values - threshold_if_in, - noise_stddev) - prob_if_out = _NormalDistributionCDF(clean_values - threshold_if_out, - noise_stddev) + prob_if_in = _normal_distribution_cdf(clean_values - threshold_if_in, + noise_stddev) + prob_if_out = _normal_distribution_cdf(clean_values - threshold_if_out, + noise_stddev) prob = tf.where(is_in, prob_if_in, prob_if_out) return prob -def CVSquared(x): +def cv_squared(x): """The squared coefficient of variation of a sample. Useful as a loss to encourage a positive distribution to be more uniform. @@ -438,37 +364,11 @@ def CVSquared(x): epsilon = 1e-10 float_size = tf.to_float(tf.size(x)) + epsilon mean = tf.reduce_sum(x) / float_size - variance = tf.reduce_sum(tf.square(x - mean)) / float_size + variance = tf.reduce_sum(tf.squared_difference(x, mean)) / float_size return variance / (tf.square(mean) + epsilon) -def MaxOverload(load): - """The load of the hardest-hit device relative to average. - - This is useful for monitoring the performance of MoEs. - - The load of an expert is the number of examples assigned to that expert. - The load of a device is the sum of the loads of all experts on that device. - - The input to this function is generally the 'load' output of - DistributedMixtureOfExperts.Eval(), which is either a 1d or 2d `Tensor` of - per-expert loads. In either case, the fist dimension corresponds to devices. - - This function sums over all dimensions other than dimension zero, then - computes the ratio of the maxmium value to the mean value. - - Args: - load: a 1d or 2d `Tensor`. - - Returns: - a `Scalar`. - """ - per_device_load = tf.reduce_sum(tf.reshape(load, [tf.shape(load)[0], -1]), 1) - return (tf.reduce_max(per_device_load) / - (tf.reduce_mean(per_device_load) + 1e-10)) - - -def _GatesToLoad(gates): +def _gates_to_load(gates): """Compute the true load per expert, given the gates. The load is the number of examples for which the corresponding gate is >0. @@ -481,11 +381,37 @@ def _GatesToLoad(gates): return tf.reduce_sum(tf.to_float(gates > 0), 0) -def _MyTopK(x, k): +def update_hparams_for_vq_gating(hparams): + """VQ Gating hparams.""" + hparams.add_hparam("z_size", 4) + hparams.add_hparam("noise_dev", 0.5) + # Bottleneck kinds supported: dense, vae, dvq. + hparams.add_hparam("bottleneck_kind", "dvq") + hparams.add_hparam("num_blocks", 1) + hparams.add_hparam("num_residuals", 1) + # Reshape method for DVQ: slice, project + hparams.add_hparam("beta", 0.25) + hparams.add_hparam("epsilon", 1e-5) + hparams.add_hparam("decay", 0.999) + hparams.add_hparam("ema", False) # default is false until ema is implemented + hparams.add_hparam("random_top_k", 1) + hparams.add_hparam("soft_em", False) + hparams.add_hparam("num_samples", 10) + hparams.add_hparam("gating_type", "vq") + hparams.add_hparam("use_scales", int(True)) + hparams.add_hparam("residual_centroids", int(False)) + + +def _my_top_k(x, k): """GPU-compatible version of top-k that works for very small constant k. Calls argmax repeatedly. + tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense, + seems not to be, so if we use tf.nn.top_k, then both the top_k and its + gradient go on cpu. Once this is not an issue, this function becomes + obsolete and should be replaced by tf.nn.top_k. + Args: x: a 2d Tensor. k: a small integer. @@ -499,7 +425,7 @@ def _MyTopK(x, k): values = [] indices = [] depth = tf.shape(x)[1] - for i in xrange(k): + for i in range(k): values.append(tf.reduce_max(x, 1)) argmax = tf.argmax(x, 1) indices.append(argmax) @@ -508,379 +434,324 @@ def _MyTopK(x, k): return tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1)) -class NoisyTopKGating(object): - """Noisy top-k gating network. +def vq_gating(x, + num_experts, + k, + bneck, + hparams=None, + name="vq_gating"): + """VQ gating. - See paper: https://arxiv.org/abs/1701.06538. + Args: + x: input Tensor with shape [batch_size, input_size] + num_experts: an integer + k: an integer - number of experts per example + bneck: a bottleneck object + hparams: optional hparams + name: an optional string + + Returns: + gates: a Tensor with shape [batch_size, num_experts] + load: a Tensor with shape [num_experts] """ + with tf.variable_scope(name, reuse=tf.AUTO_REUSE): + + if hparams.use_scales: + scales = tf.get_variable( + "scales", [num_experts], + tf.float32, + initializer=tf.ones_initializer()) + scales = tf.nn.softmax(scales) + hparams.scales = scales + input_size = x.get_shape().as_list()[-1] + batch_size = common_layers.shape_list(x)[0] + + if k > 1: + # first project into two dense layers, chop and discretize, and gate + # TODO(avaswani): Maybe scale the embeddings flowing out of the experts. + # We might want to do this to match the computation being done by topk + x = tf.layers.dense(x, input_size * k) + # x goes from [batch_size, input_size*k] to [batch_size*k, input_size] + x = tf.reshape(x, [batch_size * k, input_size]) + inputs = tf.expand_dims(x, axis=1) + inputs = tf.expand_dims(inputs, axis=1) + # VQ hparams + hparams.z_size = int(math.log(num_experts, 2)) + hparams.hidden_size = input_size + hparams.top_k = k + d = bneck.discrete_bottleneck(inputs) + centroids = None + exp_discrete = d["discrete"] + embed_lookup = d["embed"] + extra_loss = d["loss"] + if hparams.residual_centroids: + centroids = embed_lookup(exp_discrete) # gives the centroids + top_k_indices = tf.squeeze(exp_discrete, axis=1) + tf.summary.histogram("discrete_counts", top_k_indices) + # if k > 1, then we need to reshape top_k_indices from [batch_size*k, 1] + # to [batch_size, k] + if k > 1: + top_k_indices = tf.reshape(top_k_indices, [batch_size, k]) + # get the top k gates + top_k_gates = tf.ones([batch_size, k]) + # This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the + # positions corresponding to all but the top k experts per example. + gates = _rowwise_unsorted_segment_sum(top_k_gates, top_k_indices, + num_experts) + # Compute count per expert from the gates. + # gates has shape [batch_size, num_experts] + # count per expert has shape [num_experts, 1] + count_per_expert = tf.reduce_sum(gates, axis=0) + if hparams.use_scales: + scale_loss = tf.reduce_mean(tf.to_float(count_per_expert) * scales) + extra_loss += scale_loss + if common_layers.should_generate_summaries(): + tf.summary.histogram("vq_loss", extra_loss) + tf.summary.historgram("scale_loss", scale_loss) + return gates, extra_loss, centroids + + +def noisy_top_k_gating(x, + num_experts, + train, + k=2, + initializer=tf.zeros_initializer(), + noisy_gating=True, + noise_epsilon=1e-2, + name=None): + """Noisy top-k gating. - def __init__(self, hp, name): - """Create a NoisyTopKGating network. + See paper: https://arxiv.org/abs/1701.06538. - Args: - hp: a hyperparameters created by NoisyTopKGatingParams() - name: a string - """ - self._vars = [] - self._hp = hp - self._w_gate = tf.get_variable('%s_gate' % name, - [hp.input_size, - hp.num_experts], hp.dtype, hp.initializer) - self._vars.append(self._w_gate) - if hp.noisy_gating: - self._w_noise = tf.get_variable('%s_noise' % name, - [hp.input_size, hp.num_experts], hp.dtype, - hp.initializer) - self._vars.append(self._w_noise) - - def Eval(self, x, train=True, summaries=False): - """Compute noisy top-k gating. + Args: + x: input Tensor with shape [batch_size, input_size] + num_experts: an integer + train: a boolean - we only add noise at training time. + k: an integer - number of experts per example + initializer: an initializer + noisy_gating: a boolean + noise_epsilon: a float + name: an optional string - Args: - x: a `Tensor` of shape `[batch_size, input_size]`. - train: a boolean `Scalar`. Setting this to false turns off noise. - summaries: a boolean. Whether to add summaries. - Returns: - gates: a `Tensor` of shape `[batch_size, n]` - load: a `Tensor` of shape `[n]`. - If we are using noise, this is a smooth approximation of the load, - and you can define a loss in terms of it to help with load-balancing. - """ - with tf.variable_scope('NoisyTopKGating'): - hp = self._hp - clean_logits = tf.matmul(x, self._w_gate) - if hp.noisy_gating: - raw_noise_stddev = tf.matmul(x, self._w_noise) - noise_stddev = ((tf.nn.softplus(raw_noise_stddev) + hp.noise_epsilon) * - (tf.to_float(train))) - noisy_logits = clean_logits + ( - tf.random_normal(tf.shape(clean_logits)) * noise_stddev) - logits = noisy_logits - if summaries: - tf.summary.histogram('noisy_logits', noisy_logits) - tf.summary.histogram('noise_stddev', noise_stddev) - else: - logits = clean_logits - top_logits, top_indices = _MyTopK(logits, min(hp.k + 1, hp.num_experts)) - top_k_logits = tf.slice(top_logits, [0, 0], [-1, hp.k]) - top_k_indices = tf.slice(top_indices, [0, 0], [-1, hp.k]) - top_k_gates = tf.nn.softmax(top_k_logits) - # This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the - # positions corresponding to all but the top k experts per example. - gates = _RowwiseUnsortedSegmentSum(top_k_gates, top_k_indices, - hp.num_experts) - if hp.noisy_gating and hp.k < hp.num_experts: - load = tf.reduce_sum( - _ProbInTopK(clean_logits, noisy_logits, noise_stddev, top_logits, - hp.k), 0) - else: - load = _GatesToLoad(gates) - if summaries: - tf.summary.histogram('importance', tf.reduce_sum(gates, 0)) - tf.summary.histogram('load', load) - return gates, load + Returns: + gates: a Tensor with shape [batch_size, num_experts] + load: a Tensor with shape [num_experts] + """ + with tf.variable_scope(name, default_name="noisy_top_k_gating"): + input_size = x.get_shape().as_list()[-1] + w_gate = tf.get_variable( + "w_gate", [input_size, num_experts], tf.float32, initializer) + if noisy_gating: + w_noise = tf.get_variable("w_noise", + [input_size, num_experts], tf.float32, + initializer) + clean_logits = tf.matmul(x, w_gate) + if noisy_gating: + raw_noise_stddev = tf.matmul(x, w_noise) + noise_stddev = ((tf.nn.softplus(raw_noise_stddev) + noise_epsilon) * + (tf.to_float(train))) + noisy_logits = clean_logits + ( + tf.random_normal(tf.shape(clean_logits)) * noise_stddev) + logits = noisy_logits + if common_layers.should_generate_summaries(): + tf.summary.histogram("noisy_logits", noisy_logits) + tf.summary.histogram("noise_stddev", noise_stddev) + else: + logits = clean_logits + top_logits, top_indices = _my_top_k(logits, min(k + 1, num_experts)) + # top k logits has shape [batch, k] + top_k_logits = tf.slice(top_logits, [0, 0], [-1, k]) + top_k_indices = tf.slice(top_indices, [0, 0], [-1, k]) + top_k_gates = tf.nn.softmax(top_k_logits) + # This will be a `Tensor` of shape `[batch_size, n]`, with zeros in the + # positions corresponding to all but the top k experts per example. + gates = _rowwise_unsorted_segment_sum(top_k_gates, top_k_indices, + num_experts) + if noisy_gating and k < num_experts: + load = tf.reduce_sum( + _prob_in_top_k(clean_logits, noisy_logits, noise_stddev, top_logits, + k), 0) + else: + load = _gates_to_load(gates) + if common_layers.should_generate_summaries(): + tf.summary.histogram("importance", tf.reduce_sum(gates, 0)) + tf.summary.histogram("load", load) + return gates, load + + +class PadRemover(object): + """Helper to remove padding from a tensor before sending to the experts. + + The padding is computed for one reference tensor containing the padding mask + and then can be applied to any other tensor of shape [dim_origin,...]. + + Ex: + input = [ + [tok1, tok2], + [tok3, tok4], + [0, 0], + [0, 0], + [tok5, tok6], + [0, 0], + ] + output = [ + [tok1, tok2], + [tok3, tok4], + [tok5, tok6], + ] + """ - @property - def vars(self): - return self._vars + def __init__(self, pad_mask): + """Compute and store the location of the padding. + Args: + pad_mask (tf.Tensor): Reference padding tensor of shape + [batch_size,length] or [dim_origin] (dim_origin=batch_size*length) + containing non-zeros positive values to indicate padding location. + """ + self.nonpad_ids = None + self.dim_origin = None -class LocalMixtureOfExperts(object): - """A MoE on a single device. - """ + with tf.name_scope("pad_reduce/get_ids"): + pad_mask = tf.reshape(pad_mask, [-1]) # Flatten the batch + # nonpad_ids contains coordinates of zeros rows (as pad_mask is + # float32, checking zero equality is done with |x| < epsilon, with + # epsilon=1e-9 as standard, here pad_mask only contains positive values + # so tf.abs would be redundant) + self.nonpad_ids = tf.to_int32(tf.where(pad_mask < 1e-9)) + self.dim_origin = tf.shape(pad_mask)[:1] - def __init__(self, gating_hp, expert_hp, input_size, output_size, name): - """Create a LocalMixtureOfExperts. + def remove(self, x): + """Remove padding from the given tensor. Args: - gating_hp: hyperparameters for the gating network. - e.g. NoisyTopKGatingParams() - expert_hp: hyperparameters for the expert networks. - e.g. FeedForwardExpertParams() - input_size: an integer. - output_size: an integer. - name: a string. + x (tf.Tensor): of shape [dim_origin,...] + + Returns: + a tensor of shape [dim_compressed,...] with dim_compressed <= dim_origin """ - self._name = name - _SetInputOutputSizes(gating_hp, input_size, None) - _SetInputOutputSizes(expert_hp, input_size, output_size) - self._gating_hp = gating_hp - self._gating = gating_hp.gating_class(gating_hp, name + '_gating') - self._expert_hp = expert_hp - self._experts = [ - expert_hp.expert_class(expert_hp, name + '_%d' % i) - for i in xrange(gating_hp.num_experts) - ] - - def Eval(self, - x, - train=True, - per_example_multiplier=None, - summaries=False, - identifiers=None): - """Evaluate mixture of experts. - - We provide a convenient debugging tool for determining the set of examples - that we passed to each expert. The caller may provide a `Tensor` of - "identifiers", of any type whose first dimension matches the number of - input examples. The function will then return a list - "expert_to_identifiers", with one `Tensor` for each expert containing the - identifiers for all examples assigned to that expert. A parallel list of - `Tensor`s, "expert_to_gates", is also returned, containing the - corresponding gate values. + with tf.name_scope("pad_reduce/remove"): + x_shape = x.get_shape().as_list() + x = tf.gather_nd( + x, + indices=self.nonpad_ids, + ) + if not tf.executing_eagerly(): + # This is a hack but for some reason, gather_nd return a tensor of + # undefined shape, so the shape is set up manually + x.set_shape([None] + x_shape[1:]) + return x + + def restore(self, x): + """Add padding back to the given tensor. Args: - x: a `Tensor` of shape `[batch_size, input_size]` - train: a boolean Scalar. Are we in training mode? - per_example_multiplier: an optional `Tensor` of shape `[batch_size]` which - gets multiplied into the gate values. If this LocalMixtureOfExperts - represents one secondary MoE in a hierarchical MoE, then we pass in - in the gate values from the primary gating function here. This causes - the computed values (`y`, `importance` and `expert_to_gates`) to also - reflect the primary gate values. - summaries: an boolean. Enable summaries. - identifiers: an optional `Tensor` whose first dimension is equal to - batch_size. + x (tf.Tensor): of shape [dim_compressed,...] Returns: - y: a `Tensor` of shape `[batch_size, output_size]`. Output of the MoE. - importance: a `Tensor` of shape `[n]`. Batchwise sum of gates. - load: a `Tensor` of shape `[n]`. Smooth estimator of the number of - examples passed to each expert. This is useful for load-balancing, - as any gradient on this `Tensor` will back-propagate to the gating - network. - expert_to_identifiers: if `identifiers` was passed in, a list of - length `num_experts`. Each element is a `Tensor` whose shape matches - that of `identifiers` in all but the first dimension. Contains the - slices of `identifiers` corresponding to the batch elements that were - dispatched to that expert. - expert_to_gates: A list of length `num_experts`. Each element contains - a 1-dimensional tensor + a tensor of shape [dim_origin,...] with dim_compressed >= dim_origin. The + dim is restored from the original reference tensor """ - gating_hp = self._gating_hp - gates, load = self._gating.Eval(x, train, summaries) - if per_example_multiplier is not None: - gates *= tf.expand_dims(per_example_multiplier, 1) - dispatcher = SparseDispatcher(gating_hp.num_experts, gates) - expert_input = dispatcher.Dispatch(x) - expert_output = [ - self._experts[i].Eval(expert_input[i]) - for i in xrange(gating_hp.num_experts) - ] - y = dispatcher.Combine(expert_output) - if identifiers is not None: - expert_to_identifiers = dispatcher.Dispatch(identifiers) - else: - expert_to_identifiers = None - return (y, tf.reduce_sum(gates, 0), load, expert_to_identifiers, - dispatcher.ExpertToGates()) - - @property - def vars(self): - ret = [] - for x in self._experts: - ret.extend(x.vars) - ret.extend(self._gating.vars) - return ret + with tf.name_scope("pad_reduce/restore"): + x = tf.scatter_nd( + indices=self.nonpad_ids, + updates=x, + shape=tf.concat([self.dim_origin, tf.shape(x)[1:]], axis=0), + ) + return x -class DistributedMixtureOfExperts(object): - """Distributed (optionally Hierarchical) Mixture of Experts. +@add_name_scope("map_ids") +def map_ids(x, indices, map_fn): + """Apply a function to each coordinate ids of a multidimensional tensor. - This class implements the scheme described in our paper. - See link at the top of this file. + This allows to process each sequence of a batch independently. This is + similar to tf.map_fn but with tensor where the batch dim has been flatten. - The model is trained synchronously using one large TF graph using - multiple devices. + Warning: The indices ids have to be contiguous and ordered in memory as the + output vector for each of the ids are simply concatenated after being + processed. + Ex: if your indices are [0,2,2,1,2,0], the output will contains the processed + rows in the following order: [0,0,1,2,2,2] - The conventional (non-MoE) layers use data-parallelism, with each device - processing a subset of the training batch. We call these datashards. + Args: + x (Tensor): The tensor to be dispatched of shape [length,...] + indices (Tensor): A int32 tensor of size [length, 1] containing the batch + coordinate of x + map_fn (fct): Function called for every ids of the original tensor. Take + as input a tensor of same rank than x and from shape [length_id,...] with + length_id <= length. Isn't called if length_id == 0 - The MoE layer (this object) uses model parallelism. Each expert is assigned - to a particular device, which hosts the expert parameters and performs the - expert computation for all examples assigned to that expert. In the case - of a hierarchical MoE, each second-level MoE is assigned to a device. + Returns: + a tensor of same shape as x, where each elements has been processed """ + indices = tf.reshape(indices, [-1]) + + t_i = tf.constant(0) + # batch_coordinates start at 0 + t_batch_size = tf.reduce_max(indices) + 1 + + # ta_stack_out will store the intermediate results for each individual id + # As alternative to tf.TensorArray, scatter_update could potentially be used + # but that would require an additional mutable tensor. + ta_stack_out = tf.TensorArray( + x.dtype, + size=t_batch_size, + ) - def __init__(self, primary_gating_hp, secondary_gating_hp, expert_hp, - input_size, output_size, expert_devices, name): - """Create a DistributedMixtureOfExperts. + # Then we iterate over each sequence individually and compute the + # transformation for each id + while_condition = lambda t_i, *args: tf.less(t_i, t_batch_size) + def body(t_i, ta_stack_out): + """Loop body.""" + # Gather the ids + current_ids = tf.to_int32(tf.where(tf.equal(indices, t_i))) + t_row = tf.gather_nd(x, indices=current_ids) - If `secondary_gating_hp` is `None`, then this is a flat MoE with - `primary_gating_hp.num_experts` experts. Otherwise, this is a hierarchical - MoE with `primary_gating_hp.num_experts` groups of - `secondary_gating_hp.num_experts` experts. + # TODO(epot): Should not call map_fn if t_row size is 0 - The assignemnt of experts (or groups of experts) to devices is by - round-robin. So to make equal use of all the devices, one should set - `primary_gating_hp.num_experts` to the number of devices or a multiple - thereof. + # Apply transformation to each id + # Restore batch_dim=1 as most function expect [batch_dim, length, ...] as + # input + t_row = tf.expand_dims(t_row, axis=0) + t_row = map_fn(t_row) + t_row = tf.squeeze(t_row, axis=0) # Squeeze for concatenation + ta_stack_out = ta_stack_out.write(t_i, t_row) - Args: - primary_gating_hp: hyperparameters for the primary gating network. - e.g. NoisyTopKGatingParams(). - secondary_gating_hp: hyperparameters for the secondary gating network. - e.g. NoisyTopKGatingParams(). None indicates a flat MoE. - expert_hp: hyperparameters for the expert networks. - e.g. FeedForwardExpertParams() - input_size: an integer. - output_size: an integer. - expert_devices: a list of device strings. The devices to be used for - the experts. - name: a string. - """ - self._name = name - # fill in the missing values in the hyperparameters - _SetInputOutputSizes(primary_gating_hp, input_size, None) - _SetInputOutputSizes(expert_hp, input_size, output_size) - self._is_hierarchical = secondary_gating_hp is not None - self._primary_gating_hp = primary_gating_hp - self._primary_gating = primary_gating_hp.gating_class( - primary_gating_hp, name + '_primary_gating') - n1 = self._primary_gating_hp.num_experts - # round robin assignment of experts to devices. - expert_devices = [ - expert_devices[i % len(expert_devices)] for i in xrange(n1) - ] - self._expert_devices = expert_devices - self._all_vars = [] - self._all_vars.extend(self._primary_gating.vars) - if self._is_hierarchical: - # hierarchical MoE - self._secondary_moe = [] - for i in xrange(n1): - with tf.device(expert_devices[i]): - secondary_moe = LocalMixtureOfExperts(secondary_gating_hp, expert_hp, - input_size, output_size, - '%s_secondary_%d' % (name, i)) - self._secondary_moe.append(secondary_moe) - self._all_vars.extend(secondary_moe.vars) - else: - # flat MoE - self._experts = [] - for i in xrange(n1): - with tf.device(expert_devices[i]): - expert = expert_hp.expert_class(expert_hp, name + '_%d' % i) - self._experts.append(expert) - self._all_vars.extend(expert.vars) - - def Eval(self, - datashard_devices, - xs, - train=True, - summaries=False, - identifiers=None, - shadow_xs=None): - """Evaluate MoE on given inputs. - - This class is designed for the case where the rest of the model is using - data parallelism. We receive an array of input `Tensor`s, one per - datashard, and we produce a list of output Tensors, one per datashard. - - We provide a convenient debugging tool for determining the set of examples - that we passed to each expert. The caller may provide a `Tensor` of - "identifiers", of any type whose first dimension matches the number of - input examples. The function will then return a list - "expert_to_identifiers", with one `Tensor` for each expert containing the - identifiers for all examples assigned to that expert. A parallel list of - `Tensor`s, "expert_to_gates", is also returned, containing the - corresponding gate values. + return [tf.add(t_i, 1), ta_stack_out] # ++i - Args: - datashard_devices: a `list` of device strings of length `num_datashards`. - Which devices to use for the output tensors. - xs: A `list` of `Tensor`s of length `num_datashards`. Each has shape - `[batch_size[d], input_size]. - train: a boolean `Scalar`. When train=`True`, noise is added to the - gating function. - summaries: a boolean. Whether to write summaries. - identifiers: an optional list of tensors. - Each tensor has shape [, extra_dims] - shadow_xs: Optional `list` of `Tensor`s of length `num_datashards`. Each - has shape `[batch_size[d], input_size]. Shadow_xs is useful if you want - to dispatch a transformed version of xs to the experts, but you want - untransformed xs for the gating network. + # Run the loop, equivalent to: + # stack_out = [] + # while i < batch_size: + # stack_out.expand(map_fn(x[indices==i])) + _, ta_stack_out = tf.while_loop(while_condition, body, [t_i, ta_stack_out]) - Returns: - ys: the output (a list of one tensor per datashard). Each has shape - `[batch_size[d], output_size]. - importance: a `Tensor` of shape `[n]` for a flat MoE or `[n1, n2]` for a - hierarchical MoE. Batchwise sum of gates. - load: a `Tensor` of shape `[n]` for a flat MoE or `[n1, n2]` for a - hierarchical MoE. Smooth estimator of the number of - examples passed to each expert. This is useful for load-balancing, - as any gradient on this `Tensor` will back-propagate to the gating - network. - expert_to_identifiers: if `identifiers` was passed in, a list of - length `num_experts`. Each element is a `Tensor` whose shape matches - that of `identifiers` in all but the first dimension. Contains the - slices of `identifiers` corresponding to the batch elements that were - dispatched to that expert. - expert_to_gates: a list of one tensor per expert. - Each tensor has shape [] - - """ - n1 = self._primary_gating_hp.num_experts - epsilon = 1e-10 - assert len(datashard_devices) == len(xs) - num_datashards = len(xs) - expert_devices = self._expert_devices - has_identifiers = identifiers is not None - # pylint: disable=unbalanced-tuple-unpacking - primary_gates, primary_smooth_load = Parallel( - datashard_devices, self._primary_gating.Eval, xs, train, - [summaries] + [False] * (num_datashards - 1)) - primary_importance = tf.add_n( - Parallel(datashard_devices, tf.reduce_sum, primary_gates, 0)) - primary_smooth_load = tf.add_n(primary_smooth_load) - primary_true_load = tf.add_n( - Parallel(datashard_devices, _GatesToLoad, primary_gates)) - primary_dispatcher = DistributedSparseDispatcher( - datashard_devices, expert_devices, primary_gates) - - if shadow_xs is None: - secondary_input = primary_dispatcher.Dispatch(xs) - else: - secondary_input = primary_dispatcher.Dispatch(shadow_xs) - - primary_expert_to_identifiers = (primary_dispatcher.Dispatch(identifiers) - if has_identifiers else None) - primary_expert_to_gates = primary_dispatcher.ExpertToGates() - if not self._is_hierarchical: - # one-level distributed mixture of experts - secondary_output = Parallel(expert_devices, lambda a, b: a.Eval(b), - self._experts, secondary_input) - ys = primary_dispatcher.Combine(secondary_output) - return (ys, primary_importance, primary_smooth_load, - primary_expert_to_identifiers, primary_expert_to_gates) - # two-level hierarchical MoE - (secondary_output, secondary_importance, secondary_load, - secondary_expert_to_identifiers, secondary_expert_to_gates) = (Parallel( - expert_devices, [m.Eval for m in self._secondary_moe], secondary_input, - train, primary_expert_to_gates, [summaries] + [False] * (n1 - 1), - primary_expert_to_identifiers)) - # pylint: enable=unbalanced-tuple-unpacking - ys = primary_dispatcher.Combine(secondary_output, multiply_by_gates=False) - importance = tf.stack(secondary_importance) - load = tf.stack(secondary_load) * tf.expand_dims(primary_smooth_load / ( - primary_true_load + epsilon), 1) - expert_to_identifiers = [] - if identifiers is not None: - for el in secondary_expert_to_identifiers: - expert_to_identifiers.extend(el) - expert_to_gates = [] - for el in secondary_expert_to_gates: - expert_to_gates.extend(el) - return (ys, importance, load, expert_to_identifiers, expert_to_gates) - - @property - def vars(self): - return self._all_vars + # Merge all results + return ta_stack_out.concat() class SparseDispatcher(object): """Helper for implementing a mixture of experts. + The purpose of this class is to create input minibatches for the + experts and to combine the results of the experts to form a unified + output tensor. + + There are two functions: + dispatch - take an input Tensor and create input Tensors for each expert. + combine - take output Tensors from each expert and form a combined output + Tensor. Outputs from different experts for the same batch element are + summed together, weighted by the provided "gates". + + The class is initialized with a "gates" Tensor, which specifies which + batch elements go to which experts, and the weights to use when combining + the outputs. Batch element b is sent to expert e iff gates[b, e] != 0. + + The inputs and outputs are all two-dimensional [batch, depth]. + Caller is responsible for collapsing additional dimensions prior to + calling this class and reshaping the output to the original shape. + See common_layers.reshape_like(). + Example use: gates: a float32 `Tensor` with shape `[batch_size, num_experts]` @@ -888,9 +759,9 @@ class SparseDispatcher(object): experts: a list of length `num_experts` containing sub-networks. dispatcher = SparseDispatcher(num_experts, gates) - expert_inputs = dispatcher.Dispatch(inputs) + expert_inputs = dispatcher.dispatch(inputs) expert_outputs = [experts[i](expert_inputs[i]) for i in range(num_experts)] - outputs = dispatcher.Combine(expert_outputs) + outputs = dispatcher.combine(expert_outputs) The preceding code sets the output for a particular example b to: output[b] = Sum_i(gates[b, i] * experts[i](inputs[b])) @@ -919,22 +790,24 @@ def __init__(self, num_experts, gates): tf.reshape(self._gates, [-1]), self._batch_index * num_experts + self._expert_index) - def Dispatch(self, inp): + @add_name_scope() + def dispatch(self, inp): """Create one input Tensor for each expert. The `Tensor` for a expert `i` contains the slices of `inp` corresponding to the batch elements `b` where `gates[b, i] > 0`. Args: - inp: a `Tensor` of shape '[batch_size, ]` + inp: a `Tensor` of shape "[batch_size, ]` Returns: a list of `num_experts` `Tensor`s with shapes `[expert_batch_size_i, ]`. """ inp = tf.gather(inp, self._batch_index) - return tf.split(inp, self._part_sizes_tensor, 0) + return tf.split(inp, self._part_sizes_tensor, 0, num=self._num_experts) - def Combine(self, expert_out, multiply_by_gates=True): + @add_name_scope() + def combine(self, expert_out, multiply_by_gates=True): """Sum together the expert output, weighted by the gates. The slice corresponding to a particular batch element `b` is computed @@ -950,22 +823,34 @@ def Combine(self, expert_out, multiply_by_gates=True): Returns: a `Tensor` with shape `[batch_size, ]`. """ - # see comments on ConvertGradientToTensor - stitched = ConvertGradientToTensor(tf.concat(expert_out, 0)) + # see comments on convert_gradient_to_tensor + stitched = common_layers.convert_gradient_to_tensor( + tf.concat(expert_out, 0)) if multiply_by_gates: stitched *= tf.expand_dims(self._nonzero_gates, 1) combined = tf.unsorted_segment_sum(stitched, self._batch_index, tf.shape(self._gates)[0]) return combined - def ExpertToGates(self): + def expert_to_gates(self): """Gate values corresponding to the examples in the per-expert `Tensor`s. Returns: a list of `num_experts` one-dimensional `Tensor`s with type `tf.float32` and shapes `[expert_batch_size_i]` """ - return tf.split(self._nonzero_gates, self._part_sizes_tensor, 0) + return tf.split( + self._nonzero_gates, self._part_sizes_tensor, 0, num=self._num_experts) + + def expert_to_batch_indices(self): + """Batch indices corresponding to the examples in the per-expert `Tensor`s. + + Returns: + a list of `num_experts` one-dimensional `Tensor`s with type `tf.int64` + and shapes `[expert_batch_size_i]` + """ + return tf.split( + self._batch_index, self._part_sizes_tensor, 0, num=self._num_experts) @property def part_sizes(self): @@ -976,36 +861,33 @@ class DistributedSparseDispatcher(object): """A distributed version of SparseDispatcher. Instead of one batch of input examples, we simultaneously process - num_datashards batches of input examples. The per-expert `Tensor`s contain - a combination of examples from the different datashards. + a list of num_datashards batches of input examples. The per-expert + `Tensor`s contain a combination of examples from the different datashards. Each datashard is associated with a particular device and each expert is associated with a particular device. All per-datashard and per-expert `Tensor`s are created on those devices. There is no single-device bottleneck. """ - def __init__(self, datashard_devices, expert_devices, gates): + def __init__(self, data_parallelism, expert_parallelism, gates): """Create a DistributedSparseDispatcher. Args: - datashard_devices: a list of num_datashards device strings. - expert_devices: a list of num_experts device strings. - gates: a list of num_datashards `Tensor`s of shapes + data_parallelism: a Parallelism object. + expert_parallelism: a Parallelism object. + gates: a list of datashard_parallelism.n `Tensor`s of shapes `[batch_size[d], num_experts]`. Returns: a DistributedSparseDispatcher """ self._gates = gates - self._num_experts = len(expert_devices) - assert len(gates) == len(datashard_devices) - self._num_datashards = len(gates) - self._datashard_devices = datashard_devices - self._expert_devices = expert_devices - self._dispatchers = Parallel(self._datashard_devices, SparseDispatcher, - self._num_experts, gates) - - def Dispatch(self, inp): + self._dp = data_parallelism + self._ep = expert_parallelism + assert len(gates) == self._dp.n + self._dispatchers = self._dp(SparseDispatcher, self._ep.n, gates) + + def dispatch(self, inp): """Create one input Tensor for each expert. Args: @@ -1015,16 +897,14 @@ def Dispatch(self, inp): a list of `num_experts` `Tensor`s with shapes `[num_examples[i], ]`. """ - dispatched = Parallel(self._datashard_devices, lambda a, b: a.Dispatch(b), - self._dispatchers, inp) - ret = Parallel(self._expert_devices, tf.concat, - TransposeListOfLists(dispatched), 0) + dispatched = self._dp(lambda a, b: a.dispatch(b), self._dispatchers, inp) + ret = self._ep(tf.concat, transpose_list_of_lists(dispatched), 0) if ret[0].dtype == tf.float32: - # see comments on ConvertGradientToTensor - ret = Parallel(self._expert_devices, ConvertGradientToTensor, ret) + # see comments on common_layers.convert_gradient_to_tensor + ret = self._ep(common_layers.convert_gradient_to_tensor, ret) return ret - def Combine(self, expert_out, multiply_by_gates=True): + def combine(self, expert_out, multiply_by_gates=True): """Sum together the expert output, multiplied by the corresponding gates. Args: @@ -1037,40 +917,31 @@ def Combine(self, expert_out, multiply_by_gates=True): `[batch_size[d], ]`. """ expert_part_sizes = tf.unstack( - tf.stack([ - self._dispatchers[d].part_sizes - for d in xrange(self._num_datashards) - ]), - num=self._num_experts, + tf.stack([d.part_sizes for d in self._dispatchers]), + num=self._ep.n, axis=1) # list of lists of shape [num_experts][num_datashards] - expert_output_parts = Parallel(self._expert_devices, tf.split, expert_out, - expert_part_sizes) - expert_output_parts_t = TransposeListOfLists(expert_output_parts) - ret = [] - for d in xrange(self._num_datashards): - with tf.device(self._datashard_devices[d]): - ret.append(self._dispatchers[d].Combine( - # see comments on ConvertGradientToTensor - ConvertGradientToTensor(tf.concat(expert_output_parts_t[d], 0)), - multiply_by_gates=multiply_by_gates)) - return ret - - def ExpertToGates(self): + expert_output_parts = self._ep(tf.split, expert_out, expert_part_sizes) + expert_output_parts_t = transpose_list_of_lists(expert_output_parts) + def my_combine(dispatcher, parts): + return dispatcher.combine( + common_layers.convert_gradient_to_tensor(tf.concat(parts, 0)), + multiply_by_gates=multiply_by_gates) + return self._dp(my_combine, self._dispatchers, expert_output_parts_t) + + def expert_to_gates(self): """Gate values corresponding to the examples in the per-expert `Tensor`s. Returns: a list of `num_experts` one-dimensional `Tensor`s of type `tf.float32`. """ - return Parallel(self._expert_devices, tf.concat, - TransposeListOfLists( - Parallel(self._datashard_devices, [ - self._dispatchers[d].ExpertToGates - for d in xrange(self._num_datashards) - ])), 0) + return self._ep( + tf.concat, + transpose_list_of_lists( + self._dp(lambda d: d.expert_to_gates(), self._dispatchers)), 0) -def TransposeListOfLists(lol): +def transpose_list_of_lists(lol): """Transpose a list of equally-sized python lists. Args: @@ -1078,207 +949,589 @@ def TransposeListOfLists(lol): Returns: a list of lists """ - assert lol, 'cannot pass the empty list' + assert lol, "cannot pass the empty list" return [list(x) for x in zip(*lol)] -class DistributedSingleDispatcher(object): - """Dispatches to experts according to gates. +def ffn_expert_fn(input_size, + hidden_sizes, + output_size, + hidden_activation=tf.nn.relu): + """Returns a function that creates a feed-forward network. - Each example goes to one expert. + Use this function to create the expert_fn argument to distributed_moe. - Unlike SparseDispatcher, the gates are one-dimensional `Tensor`s of integer - expert ids. There are no weights. + Args: + input_size: an integer + hidden_sizes: a list of integers + output_size: an integer + hidden_activation: a unary function. + + Returns: + a unary function """ + def my_fn(x): + layer_sizes = [input_size] + hidden_sizes + [output_size] + for i in range(1 + len(hidden_sizes)): + w = tf.get_variable("w_%d" % i, layer_sizes[i:i+2], tf.float32) + x = tf.matmul(x, w) + if i < len(hidden_sizes): + x = hidden_activation(x) + if layer_sizes[i] != input_size: + x *= (layer_sizes[i] / float(input_size))**-0.5 + return x + return my_fn + - def __init__(self, data_parallelism, model_parallelism, gates): - """Constructs a Dispatcher. +def flatten_all_but_last(a): + """Flatten all dimensions of a except the last.""" + ret = tf.reshape(a, [-1, tf.shape(a)[-1]]) + if not tf.executing_eagerly(): + ret.set_shape([None] + a.get_shape().as_list()[-1:]) + return ret + + +def local_moe(x, + train, + expert_fn, + num_experts, + k=1, + loss_coef=1e-2, + hparams=None, + pass_x=True, + pass_gates=False, + additional_dispatch_params=None, + name=None): + """Call a local mixture of experts. + + Args: + x: a tensors with shape [... , input_size] + train: a boolean scalar. + expert_fn: a function. + num_experts: an integer - number of experts + k: an integer - how many experts to use for each batch element + loss_coef: a scalar - multiplier on load-balancing losses + hparams: optional hparams for vq gating + pass_x: a boolean. If true, x will also be dispatched to the experts. + pass_gates: a boolean. If true, gates will be passed to experts. Might be + necessary when dealing with sparse encoder-encoder decoder attention + additional_dispatch_params: The extra tensors that need to be sent to each + expert. Examples include batch batch coordinates (see + common_attention.local_expert_attention) + name: a string + + Returns: + y: a tensor. Has the same shape as x, except for the last dimension, + which is output_size. + extra_training_loss: a scalar. This should be added into the overall + training loss of the model. The backpropagation of this loss + encourages all experts to be approximately equally used across a batch. + """ + bneck = DiscreteBottleneck(hparams) + with tf.variable_scope(name, default_name="local_moe"): + centroids = None + x_flat = flatten_all_but_last(x) + if hparams.gating_type == "topk": + tf.logging.info("Using noisy top_k with k = {}".format(k)) + # The gates indicate which batch elements go to which tensors. + # load is a measure of approximately how many examples go to each expert + gates, load = noisy_top_k_gating( + x_flat, + num_experts, + train, + k, + initializer=tf.zeros_initializer(), + noisy_gating=True, + noise_epsilon=1e-2) + importance = tf.reduce_sum(gates, 0) + loss = (cv_squared(importance) + cv_squared(load)) + else: + assert hparams.gating_type == "vq" + tf.logging.info("Using VQ gating") + gates, loss, centroids = vq_gating( + x_flat, num_experts, k, bneck, hparams=hparams) + loss *= loss_coef + # Shuffle data between datashards and experts. + dispatcher = SparseDispatcher(num_experts, gates) + # Set up expert_fn arguments + expert_kwargs = {} + if pass_x: + expert_kwargs["x"] = dispatcher.dispatch(x_flat) + if pass_gates: + expert_kwargs["gates"] = dispatcher.expert_to_gates() + for key, val in six.iteritems(additional_dispatch_params or {}): + val = flatten_all_but_last(val) + expert_kwargs[key] = dispatcher.dispatch(val) + + ep = Parallelism([DEFAULT_DEV_STRING] * num_experts, reuse=None) + expert_outputs = ep(expert_fn, **expert_kwargs) + + y_flat = dispatcher.combine(expert_outputs) + if centroids is not None: + centroids = tf.squeeze(centroids, axis=[1, 2]) + y_flat += centroids + y = common_layers.reshape_like(y_flat, x) + return y, loss + + +class TruncatingDispatcher(object): + """Helper for implementing a mixture of experts. + + A TruncatingDispatcher is useful when you need to deal with + fixed-sized Tensors. As opposed to a SparseDispatcher, which + produces batches of different sizes for the different experts, the + TruncatingDispatcher always produces batches of the same given size, + and the results are returned stacked in one big tensor. + + In the case where an expert is over-capacity, the last items that + should have gone to that expert are dropped. + + Confusingly, the inputs to a TruncatingDispatcher have both a + "batch" and a "length" dimension. Not only does each expert receive + the same total number of examples, it also receives the same number + of examples for each element of "batch". This behavior is necessary + for applications such as grouped attention, where we have a batch of + sequences, and we want each sequence to be divided evenly among + experts. For simpler applications like mixture-of-experts, you can + reshape the input so that the "batch" dimension is 1, and only the + "length" dimension is used. + """ + + @add_name_scope("truncating_dispatcher") + def __init__(self, requests, expert_capacity): + """Create a TruncatingDispatcher. Args: - data_parallelism: a Parallelism object. - model_parallelism: a Parallelism object. - gates: a list of 1d integer `Tensor`s, one per datashard. - Says which expert to use for each batch element. + requests: a boolean `Tensor` of shape `[batch, length, num_experts]`. + Alternatively, a float or int Tensor containing zeros and ones. + expert_capacity: a Scalar - maximum number of examples per expert per + batch element. Returns: - a DistributedSingleDispatcher + a TruncatingDispatcher """ - gates = data_parallelism(tf.to_int32, gates) - self._gates = gates - self._data_parallelism = data_parallelism - self._model_parallelism = model_parallelism + self._requests = tf.to_float(requests) + self._expert_capacity = expert_capacity + expert_capacity_f = tf.to_float(expert_capacity) + self._batch, self._length, self._num_experts = tf.unstack( + tf.shape(self._requests), num=3) + + # [batch, length, num_experts] + position_in_expert = tf.cumsum(self._requests, axis=1, exclusive=True) + # [batch, length, num_experts] + self._gates = self._requests * tf.to_float( + tf.less(position_in_expert, expert_capacity_f)) + batch_index = tf.reshape( + tf.to_float(tf.range(self._batch)), [self._batch, 1, 1]) + length_index = tf.reshape( + tf.to_float(tf.range(self._length)), [1, self._length, 1]) + expert_index = tf.reshape( + tf.to_float(tf.range(self._num_experts)), [1, 1, self._num_experts]) + # position in a Tensor with shape [batch * num_experts * expert_capacity] + flat_position = ( + position_in_expert + + batch_index * (tf.to_float(self._num_experts) * expert_capacity_f) + + expert_index * expert_capacity_f) + # Tensor of shape [batch * num_experts * expert_capacity]. + # each element is an integer in [0, length) + self._indices = tf.unsorted_segment_sum( + data=tf.reshape((length_index + 1.0) * self._gates, [-1]), + segment_ids=tf.to_int32(tf.reshape(flat_position, [-1])), + num_segments=self._batch * self._num_experts * expert_capacity) + self._indices = tf.reshape( + self._indices, + [self._batch, self._num_experts, expert_capacity]) + # Tensors of shape [batch, num_experts, expert_capacity]. + # each element is 0.0 or 1.0 + self._nonpadding = tf.minimum(self._indices, 1.0) + # each element is an integer in [0, length) + self._indices = tf.nn.relu(self._indices - 1.0) + # self._flat_indices is [batch, num_experts, expert_capacity], with values + # in [0, batch * length) + self._flat_indices = tf.to_int32( + self._indices + + (tf.reshape(tf.to_float(tf.range(self._batch)), [-1, 1, 1]) + * tf.to_float(self._length))) + self._indices = tf.to_int32(self._indices) + + @add_name_scope("truncating_dispatcher_dispatch") + def dispatch(self, inp): + """Send the inputs to the experts. - # Compute the sizes number of examples going from each datashard to each - # expert. - def _PartSizes(gates): - return tf.unsorted_segment_sum( - tf.ones_like(gates), gates, model_parallelism.n) + Args: + inp: a `Tensor` of shape "[batch, length, depth]` + Returns: + a tensor with shape [batch, num_experts, expert_capacity, depth] + """ + inp = tf.reshape(inp, [self._batch * self._length, -1]) + # [batch, num_experts, expert_capacity, depth] + ret = tf.gather(inp, self._flat_indices) + return ret - part_sizes_by_datashard = data_parallelism(_PartSizes, gates) - self._part_sizes_by_expert = tf.unstack( - tf.stack(part_sizes_by_datashard), num=model_parallelism.n, axis=1) + @add_name_scope("truncating_dispatcher_combine") + def combine(self, x): + """Return the output from the experts. - # These indices will be used to combine the output on the datashards. - def _StitchIndices(gates): - return tf.dynamic_partition( - tf.range(tf.size(gates)), gates, model_parallelism.n) + When one example goes to multiple experts, the outputs are summed. - self._stitch_indices = data_parallelism(_StitchIndices, gates) + Args: + x: a Tensor with shape [batch, num_experts, expert_capacity, depth] - def Dispatch(self, d_tensors): - """Reshuffles input `Tensor`s to produce output `Tensor`s. + Returns: + a `Tensor` with shape `[batch, length, depth] + """ + depth = tf.shape(x)[-1] + x *= tf.expand_dims(self._nonpadding, -1) + ret = tf.unsorted_segment_sum( + x, self._flat_indices, num_segments=self._batch * self._length) + ret = tf.reshape(ret, [self._batch, self._length, depth]) + return ret - The dimensions of all input and output `Tensor`s match, except for - dimension 0. In dimension 0, the input `Tensor`s match the corresponding - `gates` `Tensor`s which were passed to the constructor. + def nonpadding(self): + """Which elements of a dispatched Tensor are not padding. - Args: - d_tensors: a list of `Tensor`s, one per datashard. + Returns: + a Zero/One float tensor with shape [batch, num_experts, expert_capacity]. + """ + return self._nonpadding + + def gates(self): + """A Tensor indicating which examples go to which experts. Returns: - a list of `Tensor`s, one per expert. + A float32 Tensor with shape [batch, length, num_experts], where each value + is 0.0 or 1.0. + """ + return self._gates + def length_coordinate(self): + """Length coordinate of dispatched tensor. + + Returns: + a tensor with shape [batch, num_experts, expert_capacity] containing + integers in the range [0, length) """ - parts = self._data_parallelism(tf.dynamic_partition, d_tensors, self._gates, - self._model_parallelism.n) - parts_by_expert = TransposeListOfLists(parts) - x_tensors = self._model_parallelism(tf.concat, parts_by_expert, 0) - return x_tensors + return self._indices - def Combine(self, x_tensors): - """Reshuffles per-expert `Tensor`s to produce per-datashard `Tensor`s. - Dispatch must have been called at least once first. +def local_moe_tpu(inputs, + hidden_size, + output_size, + num_experts, + loss_coef=1e-3, + overhead=1.0): + """Local mixture of experts that works well on TPU. - The dimensions of all input and output `Tensor`s match, except for - dimension 0. In dimension 0, the input `Tensor`s match the corresponding - outputs of `Dispatch`, and the output `Tensor`s match the corresponding - `gates` `Tensor`s which were passed to the constructor. + See https://arxiv.org/abs/1701.06538 - Args: - x_tensors: a list of `Tensor`s, one per expert. + There are num_experts expert networks, each containing a relu-activated + hidden layer of size hidden_size, followed by an output projection. - Returns: - a list of `Tensor`s, one per datashard. - """ - parts = self._model_parallelism(tf.split, x_tensors, - self._part_sizes_by_expert) - d_tensors = self._data_parallelism(tf.dynamic_stitch, self._stitch_indices, - TransposeListOfLists(parts)) - return d_tensors + The number of parameters is thus: + num_experts * (input_size * hidden_size + hidden_size * output_size) + + The input is 3d: [batch, length, depth], consisting of the representations + of all positions in a batch of sequences. + + Each position of each sequence is sent to 0-2 experts. The expert + choices and the combination weights are determined by a learned gating + function. + + This function returns a small auxiliary loss that should be added to the + training loss of the model. This loss helps to balance expert usage. + Without the loss, it is very likely that a few experts will be trained and + the rest will starve. + Several hacks are necessary to get around current TPU limitations: -def ParallelEmbeddingLookup(params, ids, data_parallelism): - """Mod-sharded embedding lookup with multiple datashards. + - To ensure static shapes, we enforce (by truncation/padding) + that each sequence send the same number of elements to each expert. - TODO(noam): does this work when vocab_size is not a multiple of `num_shards`? + It would make more sense to enforce this equality over the entire batch, + as opposed to on individual sequences. This would allow more freedom + for individual sequences to be unbalanced. Unfortunately, that would + slow down our hacked-up gather-by-matmul implementation. + + TODO(noam): There is no real reason for a single sequence to be the unit + of equal allocation. Reshaping the inputs would allow us to pick a + different unit of equal allocation. + + TODO(noam): Factor this code better. We want to be able to substitute + different code for the experts themselves. We also want to integrate this + gating/dispatching logic into multi-device mixtures-of-experts. Args: - params: A list of `num_shards` `Tensors`, each with shapes - `[vocab_size / num_params, depth]`. - ids: A list of `num_datashards` one-dimensional ineger `Tensors`, - with shapes `[batch_size[i]]` - data_parallelism: A Parallelism object. + inputs: a Tensor with shape [batch, length, depth] + hidden_size: an integer + output_size: an integer + num_experts: an integer + loss_coef: a float scalar + overhead: multiplicative factor of how much spare capacity to assign Returns: - a list of `num_datashards` `Tensors`, each with shape - `[batch_size[i], depth]`. + outputs: a Tensor with shape [batch, length, output_size] + loss: a scalar """ - param_devices = [x.device for x in params] - model_parallelism = Parallelism(param_devices) - num_shards = len(param_devices) - # pylint: disable=unbalanced-tuple-unpacking - ids, unique_idx = data_parallelism(tf.unique, ids) - # pylint: enable=unbalanced-tuple-unpacking - gates = data_parallelism(tf.mod, ids, num_shards) - ids_div = data_parallelism(tf.div, ids, num_shards) - dispatcher = DistributedSingleDispatcher(data_parallelism, model_parallelism, - gates) - x_ids_div = dispatcher.Dispatch(ids_div) - params = model_parallelism(ConvertGradientToTensor, params) - x_emb = model_parallelism(tf.gather, params, x_ids_div) - r_emb = dispatcher.Combine(x_emb) - r_emb = data_parallelism(tf.gather, r_emb, unique_idx) - return r_emb - - -def SampledSoftmaxLoss(features, sampler, num_classes, target_classes, - target_params, sampled_classes, sampled_params): - """Loss for training softmax classifiers on large label vocabulary. - - This function assumes that we have already chosen the sampled classes and - fetched the parameters for the target classes and the sampled classes. + batch, length, input_size = common_layers.shape_list(inputs)[:] + # Each sequence sends expert_capacity positions to each expert. + if isinstance(length, int): + expert_capacity = min( + length, int((length * 2 * overhead) / num_experts)) + else: + expert_capacity = tf.minimum( + length, tf.to_int32( + tf.to_float(length) * 2 * overhead / num_experts)) + expert_capacity_f = tf.to_float(expert_capacity) + + # This is the learned gating function. + gates = tf.nn.softmax( + tf.to_float(common_layers.dense(inputs, num_experts, name="logits"))) + + # Find the top expert for each position. + gate_1, index_1 = common_layers.top_1_tpu(gates) + # [batch, length, num_experts] + mask_1 = tf.one_hot(index_1, num_experts) + # [batch, length, num_experts] + # This is the position within the expert's mini-batch for this sequence + position_in_expert_1 = common_layers.cumsum( + mask_1, axis=1, exclusive=True) * mask_1 + # Remove the elements that don't fit. + mask_1 *= tf.to_float(tf.less(position_in_expert_1, expert_capacity_f)) + # [batch, 1, num_experts] + # How many examples in this sequence go to this expert + mask_1_count = tf.reduce_sum(mask_1, axis=1, keepdims=True) + # [batch, length] - mostly ones, but zeros where something didn't fit + mask_1_flat = tf.reduce_sum(mask_1, axis=2) + position_in_expert_1 = tf.reduce_sum(position_in_expert_1, axis=2) + # Weight assigned to first expert. + gate_1 *= mask_1_flat + + # Pick a second-place expert for each position. + # We first mask out the experts that we expect to be over-capacity + space_remaining = expert_capacity_f - mask_1_count + use_rate = (mask_1_count + 1.0) / tf.to_float(length) + # At what point in the sequence do we expect the expert to be full. + expected_exhaustion_pos = space_remaining / use_rate + # A Tensor with shape [batch, length, num_experts] representing a boolean + # - whether we expect that the expert will already be full. + expected_exhausted = tf.to_float(tf.greater( + tf.reshape(tf.to_float(tf.range(length)), [1, length, 1]), + expected_exhaustion_pos)) + masked_gates = gates - mask_1 - expected_exhausted + # This section is similar to the section above. + gate_2, index_2 = common_layers.top_1_tpu(masked_gates) + # [batch, length, num_experts] + mask_2 = tf.one_hot(index_2, num_experts) + position_in_expert_2 = ( + common_layers.cumsum(mask_2, axis=1, exclusive=True) + mask_1_count) + position_in_expert_2 *= mask_2 + mask_2 *= tf.to_float(tf.less(position_in_expert_2, expert_capacity_f)) + mask_2_count = tf.reduce_sum(mask_2, axis=1, keepdims=True) + mask_2_flat = tf.reduce_sum(mask_2, axis=2) + position_in_expert_2 = tf.reduce_sum(position_in_expert_2, axis=2) + gate_2 *= mask_2_flat + + # What fraction didn't fit - show summaries + miss_rate_1 = 1.0 - tf.reduce_sum(mask_1_count) / tf.to_float(batch * length) + miss_rate_2 = 1.0 - tf.reduce_sum(mask_2_count) / tf.to_float(batch * length) + tf.summary.scalar("miss_rate_1", miss_rate_1) + tf.summary.scalar("miss_rate_2", miss_rate_2) + + # renormalize the two gate values to add up to 1 + denom = gate_1 + gate_2 + 1e-9 + gate_1 /= denom + gate_2 /= denom + + # inputs: [batch, length, input_size] + # forward_assignment: [batch, length, num_experts * expert_capacity] + # expert_inputs: [batch, num_experts * expert_capacity, input_size] + + segment_ids_forward_1 = ( + (index_1 * expert_capacity) + + tf.to_int32(position_in_expert_1) + + tf.to_int32(1.0 - mask_1_flat) * (num_experts * expert_capacity)) + + segment_ids_forward_2 = ( + (index_2 * expert_capacity) + + tf.to_int32(position_in_expert_2) + + tf.to_int32(1.0 - mask_2_flat) * (num_experts * expert_capacity)) + + # Gather and scatter are painfully slow on TPU. + # We will use one_hot and matmul instead. + + # [batch, length, num_experts * expert_capacity] + one_hot_1 = tf.one_hot( + segment_ids_forward_1, num_experts * expert_capacity, dtype=inputs.dtype) + one_hot_2 = tf.one_hot( + segment_ids_forward_2, num_experts * expert_capacity, dtype=inputs.dtype) + + forward_assignment = (one_hot_1 + one_hot_2) + + # [batch, num_experts * expert_capacity, input_size] + expert_inputs = tf.matmul(forward_assignment, inputs, transpose_a=True) + + # [batch, num_experts, expert_capacity, input_size] + expert_inputs = tf.reshape( + expert_inputs, [batch, num_experts, expert_capacity, input_size]) + # [num_experts, batch, expert_capacity, input_size] + expert_inputs = tf.transpose(expert_inputs, [1, 0, 2, 3]) + + # [num_experts, batch * expert_capacity, input_size] + expert_inputs = tf.reshape( + expert_inputs, [num_experts, batch * expert_capacity, input_size]) + + # Now feed the expert inputs through the experts. + h = common_layers.batch_dense( + expert_inputs, hidden_size, activation=tf.nn.relu, name="x0") + expert_output = common_layers.batch_dense(h, output_size, name="x1") + expert_output = tf.reshape( + expert_output, [num_experts, batch, expert_capacity, output_size]) + + # [batch, num_experts, expert_capacity, output_size] + expert_output = tf.transpose(expert_output, [1, 0, 2, 3]) + expert_output = tf.reshape( + expert_output, [batch, num_experts * expert_capacity, output_size]) + + # Again, use matmul instead of unsorted_segment_sum. This time, we need + # to multiply by the combination weights gate_1 and gate_2. + + # expert_output: [batch, num_experts * expert_capacity, output_size] + # backward_assigmnent: [batch, length, num_experts * expert_capacity] + # output: [batch, length, output_size] + backward_assigmnent = ( + one_hot_1 * tf.cast(tf.expand_dims(gate_1, 2), inputs.dtype) + + one_hot_2 * tf.cast(tf.expand_dims(gate_2, 2), inputs.dtype)) + output = tf.matmul(backward_assigmnent, expert_output) + + # Compute a loss equal to the coefficient ov variation of the + # total gate value per expert per sequence. + # This loss causes the experts to be used about equally used per sequence. + importance = tf.reduce_sum(gates * (mask_1 + mask_2), 1) + loss = loss_coef * cv_squared(importance) + return output, loss + + +def reduce_by_device(parallelism, data, reduce_fn): + """Reduces data per device. + + This can be useful, for example, if we want to all-reduce n tensors on k $targets_file.tok +perl $mosesdecoder/scripts/tokenizer/tokenizer.perl -l en < $decodes_file > $decodes_file.tok + +# Get rouge scores +python get_rouge.py --decodes_filename $decodes_file.tok --targets_filename $targets_file.tok diff --git a/tensor2tensor/utils/get_ende_bleu.sh b/tensor2tensor/utils/get_ende_bleu.sh new file mode 100755 index 000000000..e48fad36d --- /dev/null +++ b/tensor2tensor/utils/get_ende_bleu.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +mosesdecoder=~/mosesdecoder +tok_gold_targets=newstest2013.tok.de + +decodes_file=$1 + +# Replace unicode. +perl $mosesdecoder/scripts/tokenizer/replace-unicode-punctuation.perl -l de < $decodes_file > $decodes_file.n + +# Tokenize. +perl $mosesdecoder/scripts/tokenizer/tokenizer.perl -l de < $decodes_file.n > $decodes_file.tok + +# Put compounds in ATAT format (comparable to papers like GNMT, ConvS2S). +# See https://nlp.stanford.edu/projects/nmt/ : +# 'Also, for historical reasons, we split compound words, e.g., +# "rich-text format" --> rich ##AT##-##AT## text format."' +perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' < $tok_gold_targets > $tok_gold_targets.atat +perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' < $decodes_file.tok > $decodes_file.tok.atat + +# Get BLEU. +perl $mosesdecoder/scripts/generic/multi-bleu.perl $tok_gold_targets.atat < $decodes_file.tok.atat diff --git a/tensor2tensor/utils/get_rouge.py b/tensor2tensor/utils/get_rouge.py new file mode 100644 index 000000000..a042f6aed --- /dev/null +++ b/tensor2tensor/utils/get_rouge.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Computing rouge scores using pyrouge.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging +import os +import shutil +from tempfile import mkdtemp +import pyrouge +import tensorflow.compat.v1 as tf + +FLAGS = tf.flags.FLAGS + +tf.flags.DEFINE_string("decodes_filename", None, + "File containing model generated summaries tokenized") +tf.flags.DEFINE_string("targets_filename", None, + "File containing model target summaries tokenized") + + +def write_to_file(filename, data): + data = ".\n".join(data.split(". ")) + with open(filename, "w") as fp: + fp.write(data) + + +def prep_data(decode_dir, target_dir): + with open(FLAGS.decodes_filename, "rb") as fdecodes: + with open(FLAGS.targets_filename, "rb") as ftargets: + for i, (d, t) in enumerate(zip(fdecodes, ftargets)): + write_to_file(os.path.join(decode_dir, "rouge.%06d.txt" % (i+1)), d) + write_to_file(os.path.join(target_dir, "rouge.A.%06d.txt" % (i+1)), t) + if (i+1 % 1000) == 0: + tf.logging.info("Written %d examples to file" % i) + + +def main(_): + rouge = pyrouge.Rouge155() + rouge.log.setLevel(logging.ERROR) + rouge.system_filename_pattern = "rouge.(\\d+).txt" + rouge.model_filename_pattern = "rouge.[A-Z].#ID#.txt" + + tf.logging.set_verbosity(tf.logging.INFO) + + tmpdir = mkdtemp() + tf.logging.info("tmpdir: %s" % tmpdir) + # system = decodes/predictions + system_dir = os.path.join(tmpdir, "system") + # model = targets/gold + model_dir = os.path.join(tmpdir, "model") + os.mkdir(system_dir) + os.mkdir(model_dir) + + rouge.system_dir = system_dir + rouge.model_dir = model_dir + + prep_data(rouge.system_dir, rouge.model_dir) + + rouge_scores = rouge.convert_and_evaluate() + rouge_scores = rouge.output_to_dict(rouge_scores) + for prefix in ["rouge_1", "rouge_2", "rouge_l"]: + for suffix in ["f_score", "precision", "recall"]: + key = "_".join([prefix, suffix]) + tf.logging.info("%s: %.4f" % (key, rouge_scores[key])) + + # clean up after pyrouge + shutil.rmtree(tmpdir) + shutil.rmtree(rouge._config_dir) # pylint: disable=protected-access + shutil.rmtree(os.path.split(rouge._system_dir)[0]) # pylint: disable=protected-access + + +if __name__ == "__main__": + tf.app.run() diff --git a/tensor2tensor/utils/hparam.py b/tensor2tensor/utils/hparam.py new file mode 100644 index 000000000..b45634399 --- /dev/null +++ b/tensor2tensor/utils/hparam.py @@ -0,0 +1,651 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Forked with minor changes from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/training/python/training/hparam.py pylint: disable=line-too-long +"""Hyperparameter values.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json +import numbers +import re +import six + +# Define the regular expression for parsing a single clause of the input +# (delimited by commas). A legal clause looks like: +# []? = +# where is either a single token or [] enclosed list of tokens. +# For example: "var[1] = a" or "x = [1,2,3]" +PARAM_RE = re.compile(r""" + (?P[a-zA-Z][\w\.]*) # variable name: "var" or "x" + (\[\s*(?P\d+)\s*\])? # (optional) index: "1" or None + \s*=\s* + ((?P[^,\[]*) # single value: "a" or None + | + \[(?P[^\]]*)\]) # list of values: None or "1,2,3" + ($|,\s*)""", re.VERBOSE) + + +def _parse_fail(name, var_type, value, values): + """Helper function for raising a value error for bad assignment.""" + raise ValueError( + 'Could not parse hparam \'%s\' of type \'%s\' with value \'%s\' in %s' % + (name, var_type.__name__, value, values)) + + +def _reuse_fail(name, values): + """Helper function for raising a value error for reuse of name.""" + raise ValueError('Multiple assignments to variable \'%s\' in %s' % (name, + values)) + + +def _process_scalar_value(name, parse_fn, var_type, m_dict, values, + results_dictionary): + """Update results_dictionary with a scalar value. + + Used to update the results_dictionary to be returned by parse_values when + encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".) + + Mutates results_dictionary. + + Args: + name: Name of variable in assignment ("s" or "arr"). + parse_fn: Function for parsing the actual value. + var_type: Type of named variable. + m_dict: Dictionary constructed from regex parsing. + m_dict['val']: RHS value (scalar) + m_dict['index']: List index value (or None) + values: Full expression being parsed + results_dictionary: The dictionary being updated for return by the parsing + function. + + Raises: + ValueError: If the name has already been used. + """ + try: + parsed_value = parse_fn(m_dict['val']) + except ValueError: + _parse_fail(name, var_type, m_dict['val'], values) + + # If no index is provided + if not m_dict['index']: + if name in results_dictionary: + _reuse_fail(name, values) + results_dictionary[name] = parsed_value + else: + if name in results_dictionary: + # The name has already been used as a scalar, then it + # will be in this dictionary and map to a non-dictionary. + if not isinstance(results_dictionary.get(name), dict): + _reuse_fail(name, values) + else: + results_dictionary[name] = {} + + index = int(m_dict['index']) + # Make sure the index position hasn't already been assigned a value. + if index in results_dictionary[name]: + _reuse_fail('{}[{}]'.format(name, index), values) + results_dictionary[name][index] = parsed_value + + +def _process_list_value(name, parse_fn, var_type, m_dict, values, + results_dictionary): + """Update results_dictionary from a list of values. + + Used to update results_dictionary to be returned by parse_values when + encountering a clause with a list RHS (e.g. "arr=[1,2,3]".) + + Mutates results_dictionary. + + Args: + name: Name of variable in assignment ("arr"). + parse_fn: Function for parsing individual values. + var_type: Type of named variable. + m_dict: Dictionary constructed from regex parsing. + m_dict['val']: RHS value (scalar) + values: Full expression being parsed + results_dictionary: The dictionary being updated for return by the parsing + function. + + Raises: + ValueError: If the name has an index or the values cannot be parsed. + """ + if m_dict['index'] is not None: + raise ValueError('Assignment of a list to a list index.') + elements = filter(None, re.split('[ ,]', m_dict['vals'])) + # Make sure the name hasn't already been assigned a value + if name in results_dictionary: + raise _reuse_fail(name, values) + try: + results_dictionary[name] = [parse_fn(e) for e in elements] + except ValueError: + _parse_fail(name, var_type, m_dict['vals'], values) + + +def _cast_to_type_if_compatible(name, param_type, value): + """Cast hparam to the provided type, if compatible. + + Args: + name: Name of the hparam to be cast. + param_type: The type of the hparam. + value: The value to be cast, if compatible. + + Returns: + The result of casting `value` to `param_type`. + + Raises: + ValueError: If the type of `value` is not compatible with param_type. + * If `param_type` is a string type, but `value` is not. + * If `param_type` is a boolean, but `value` is not, or vice versa. + * If `param_type` is an integer type, but `value` is not. + * If `param_type` is a float type, but `value` is not a numeric type. + """ + fail_msg = ( + "Could not cast hparam '%s' of type '%s' from value %r" % + (name, param_type, value)) + + # Some callers use None, for which we can't do any casting/checking. :( + if issubclass(param_type, type(None)): + return value + + # Avoid converting a non-string type to a string. + if (issubclass(param_type, (six.string_types, six.binary_type)) and + not isinstance(value, (six.string_types, six.binary_type))): + raise ValueError(fail_msg) + + # Avoid converting a number or string type to a boolean or vice versa. + if issubclass(param_type, bool) != isinstance(value, bool): + raise ValueError(fail_msg) + + # Avoid converting float to an integer (the reverse is fine). + if (issubclass(param_type, numbers.Integral) and + not isinstance(value, numbers.Integral)): + raise ValueError(fail_msg) + + # Avoid converting a non-numeric type to a numeric type. + if (issubclass(param_type, numbers.Number) and + not isinstance(value, numbers.Number)): + raise ValueError(fail_msg) + + return param_type(value) + + +def parse_values(values, type_map, ignore_unknown=False): + """Parses hyperparameter values from a string into a python map. + + `values` is a string containing comma-separated `name=value` pairs. + For each pair, the value of the hyperparameter named `name` is set to + `value`. + + If a hyperparameter name appears multiple times in `values`, a ValueError + is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2'). + + If a hyperparameter name in both an index assignment and scalar assignment, + a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1'). + + The hyperparameter name may contain '.' symbols, which will result in an + attribute name that is only accessible through the getattr and setattr + functions. (And must be first explicit added through add_hparam.) + + WARNING: Use of '.' in your variable names is allowed, but is not well + supported and not recommended. + + The `value` in `name=value` must follows the syntax according to the + type of the parameter: + + * Scalar integer: A Python-parsable integer point value. E.g.: 1, + 100, -12. + * Scalar float: A Python-parsable floating point value. E.g.: 1.0, + -.54e89. + * Boolean: Either true or false. + * Scalar string: A non-empty sequence of characters, excluding comma, + spaces, and square brackets. E.g.: foo, bar_1. + * List: A comma separated list of scalar values of the parameter type + enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low]. + + When index assignment is used, the corresponding type_map key should be the + list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not + "arr[1]"). + + Args: + values: String. Comma separated list of `name=value` pairs where + 'value' must follow the syntax described above. + type_map: A dictionary mapping hyperparameter names to types. Note every + parameter name in values must be a key in type_map. The values must + conform to the types indicated, where a value V is said to conform to a + type T if either V has type T, or V is a list of elements of type T. + Hence, for a multidimensional parameter 'x' taking float values, + 'x=[0.1,0.2]' will parse successfully if type_map['x'] = float. + ignore_unknown: Bool. Whether values that are missing a type in type_map + should be ignored. If set to True, a ValueError will not be raised for + unknown hyperparameter type. + + Returns: + A python map mapping each name to either: + * A scalar value. + * A list of scalar values. + * A dictionary mapping index numbers to scalar values. + (e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}") + + Raises: + ValueError: If there is a problem with input. + * If `values` cannot be parsed. + * If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]'). + * If the same rvalue is assigned two different values (e.g. 'a=1,a=2', + 'a[1]=1,a[1]=2', or 'a=1,a=[1]') + """ + results_dictionary = {} + pos = 0 + while pos < len(values): + m = PARAM_RE.match(values, pos) + if not m: + raise ValueError('Malformed hyperparameter value: %s' % values[pos:]) + # Check that there is a comma between parameters and move past it. + pos = m.end() + # Parse the values. + m_dict = m.groupdict() + name = m_dict['name'] + if name not in type_map: + if ignore_unknown: + continue + raise ValueError('Unknown hyperparameter type for %s' % name) + type_ = type_map[name] + + # Set up correct parsing function (depending on whether type_ is a bool) + if type_ == bool: + + def parse_bool(value): + if value in ['true', 'True']: + return True + elif value in ['false', 'False']: + return False + else: + try: + return bool(int(value)) + except ValueError: + _parse_fail(name, type_, value, values) + + parse = parse_bool + else: + parse = type_ + + # If a singe value is provided + if m_dict['val'] is not None: + _process_scalar_value(name, parse, type_, m_dict, values, + results_dictionary) + + # If the assigned value is a list: + elif m_dict['vals'] is not None: + _process_list_value(name, parse, type_, m_dict, values, + results_dictionary) + + else: # Not assigned a list or value + _parse_fail(name, type_, '', values) + + return results_dictionary + + +class HParams(object): + """Class to hold a set of hyperparameters as name-value pairs. + + A `HParams` object holds hyperparameters used to build and train a model, + such as the number of hidden units in a neural net layer or the learning rate + to use when training. + + You first create a `HParams` object by specifying the names and values of the + hyperparameters. + + To make them easily accessible the parameter names are added as direct + attributes of the class. A typical usage is as follows: + + ```python + # Create a HParams object specifying names and values of the model + # hyperparameters: + hparams = HParams(learning_rate=0.1, num_hidden_units=100) + + # The hyperparameter are available as attributes of the HParams object: + hparams.learning_rate ==> 0.1 + hparams.num_hidden_units ==> 100 + ``` + + Hyperparameters have type, which is inferred from the type of their value + passed at construction type. The currently supported types are: integer, + float, boolean, string, and list of integer, float, boolean, or string. + + You can override hyperparameter values by calling the + [`parse()`](#HParams.parse) method, passing a string of comma separated + `name=value` pairs. This is intended to make it possible to override + any hyperparameter values from a single command-line flag to which + the user passes 'hyper-param=value' pairs. It avoids having to define + one flag for each hyperparameter. + + The syntax expected for each value depends on the type of the parameter. + See `parse()` for a description of the syntax. + + Example: + + ```python + # Define a command line flag to pass name=value pairs. + # For example using argparse: + import argparse + parser = argparse.ArgumentParser(description='Train my model.') + parser.add_argument('--hparams', type=str, + help='Comma separated list of "name=value" pairs.') + args = parser.parse_args() + ... + def my_program(): + # Create a HParams object specifying the names and values of the + # model hyperparameters: + hparams = tf.HParams(learning_rate=0.1, num_hidden_units=100, + activations=['relu', 'tanh']) + + # Override hyperparameters values by parsing the command line + hparams.parse(args.hparams) + + # If the user passed `--hparams=learning_rate=0.3` on the command line + # then 'hparams' has the following attributes: + hparams.learning_rate ==> 0.3 + hparams.num_hidden_units ==> 100 + hparams.activations ==> ['relu', 'tanh'] + + # If the hyperparameters are in json format use parse_json: + hparams.parse_json('{"learning_rate": 0.3, "activations": "relu"}') + ``` + """ + + _HAS_DYNAMIC_ATTRIBUTES = True # Required for pytype checks. + + def __init__(self, model_structure=None, **kwargs): + """Create an instance of `HParams` from keyword arguments. + + The keyword arguments specify name-values pairs for the hyperparameters. + The parameter types are inferred from the type of the values passed. + + The parameter names are added as attributes of `HParams` object, so they + can be accessed directly with the dot notation `hparams._name_`. + + Example: + + ```python + # Define 3 hyperparameters: 'learning_rate' is a float parameter, + # 'num_hidden_units' an integer parameter, and 'activation' a string + # parameter. + hparams = tf.HParams( + learning_rate=0.1, num_hidden_units=100, activation='relu') + + hparams.activation ==> 'relu' + ``` + + Note that a few names are reserved and cannot be used as hyperparameter + names. If you use one of the reserved name the constructor raises a + `ValueError`. + + Args: + model_structure: An instance of ModelStructure, defining the feature + crosses to be used in the Trial. + **kwargs: Key-value pairs where the key is the hyperparameter name and + the value is the value for the parameter. + + Raises: + ValueError: If both `hparam_def` and initialization values are provided, + or if one of the arguments is invalid. + + """ + # Register the hyperparameters and their type in _hparam_types. + # This simplifies the implementation of parse(). + # _hparam_types maps the parameter name to a tuple (type, bool). + # The type value is the type of the parameter for scalar hyperparameters, + # or the type of the list elements for multidimensional hyperparameters. + # The bool value is True if the value is a list, False otherwise. + self._hparam_types = {} + self._model_structure = model_structure + for name, value in six.iteritems(kwargs): + self.add_hparam(name, value) + + def add_hparam(self, name, value): + """Adds {name, value} pair to hyperparameters. + + Args: + name: Name of the hyperparameter. + value: Value of the hyperparameter. Can be one of the following types: + int, float, string, int list, float list, or string list. + + Raises: + ValueError: if one of the arguments is invalid. + """ + # Keys in kwargs are unique, but 'name' could the name of a pre-existing + # attribute of this object. In that case we refuse to use it as a + # hyperparameter name. + if getattr(self, name, None) is not None: + raise ValueError('Hyperparameter name is reserved: %s' % name) + if isinstance(value, (list, tuple)): + if not value: + raise ValueError( + 'Multi-valued hyperparameters cannot be empty: %s' % name) + self._hparam_types[name] = (type(value[0]), True) + else: + self._hparam_types[name] = (type(value), False) + setattr(self, name, value) + + def set_hparam(self, name, value): + """Set the value of an existing hyperparameter. + + This function verifies that the type of the value matches the type of the + existing hyperparameter. + + Args: + name: Name of the hyperparameter. + value: New value of the hyperparameter. + + Raises: + KeyError: If the hyperparameter doesn't exist. + ValueError: If there is a type mismatch. + """ + param_type, is_list = self._hparam_types[name] + if isinstance(value, list): + if not is_list: + raise ValueError( + 'Must not pass a list for single-valued parameter: %s' % name) + setattr(self, name, [ + _cast_to_type_if_compatible(name, param_type, v) for v in value]) + else: + if is_list: + raise ValueError( + 'Must pass a list for multi-valued parameter: %s.' % name) + setattr(self, name, _cast_to_type_if_compatible(name, param_type, value)) + + def del_hparam(self, name): + """Removes the hyperparameter with key 'name'. + + Does nothing if it isn't present. + + Args: + name: Name of the hyperparameter. + """ + if hasattr(self, name): + delattr(self, name) + del self._hparam_types[name] + + def parse(self, values): + """Override existing hyperparameter values, parsing new values from a string. + + See parse_values for more detail on the allowed format for values. + + Args: + values: String. Comma separated list of `name=value` pairs where 'value' + must follow the syntax described above. + + Returns: + The `HParams` instance. + + Raises: + ValueError: If `values` cannot be parsed or a hyperparameter in `values` + doesn't exist. + """ + type_map = {} + for name, t in self._hparam_types.items(): + param_type, _ = t + type_map[name] = param_type + + values_map = parse_values(values, type_map) + return self.override_from_dict(values_map) + + def override_from_dict(self, values_dict): + """Override existing hyperparameter values, parsing new values from a dictionary. + + Args: + values_dict: Dictionary of name:value pairs. + + Returns: + The `HParams` instance. + + Raises: + KeyError: If a hyperparameter in `values_dict` doesn't exist. + ValueError: If `values_dict` cannot be parsed. + """ + for name, value in values_dict.items(): + self.set_hparam(name, value) + return self + + def set_model_structure(self, model_structure): + self._model_structure = model_structure + + def get_model_structure(self): + return self._model_structure + + def to_json(self, indent=None, separators=None, sort_keys=False): + """Serializes the hyperparameters into JSON. + + Args: + indent: If a non-negative integer, JSON array elements and object members + will be pretty-printed with that indent level. An indent level of 0, or + negative, will only insert newlines. `None` (the default) selects the + most compact representation. + separators: Optional `(item_separator, key_separator)` tuple. Default is + `(', ', ': ')`. + sort_keys: If `True`, the output dictionaries will be sorted by key. + + Returns: + A JSON string. + """ + def remove_callables(x): + """Omit callable elements from input with arbitrary nesting.""" + if isinstance(x, dict): + return {k: remove_callables(v) for k, v in six.iteritems(x) + if not callable(v)} + elif isinstance(x, list): + return [remove_callables(i) for i in x if not callable(i)] + return x + return json.dumps( + remove_callables(self.values()), + indent=indent, + separators=separators, + sort_keys=sort_keys) + + def parse_json(self, values_json): + """Override existing hyperparameter values, parsing new values from a json object. + + Args: + values_json: String containing a json object of name:value pairs. + + Returns: + The `HParams` instance. + + Raises: + KeyError: If a hyperparameter in `values_json` doesn't exist. + ValueError: If `values_json` cannot be parsed. + """ + values_map = json.loads(values_json) + return self.override_from_dict(values_map) + + def values(self): + """Return the hyperparameter values as a Python dictionary. + + Returns: + A dictionary with hyperparameter names as keys. The values are the + hyperparameter values. + """ + return {n: getattr(self, n) for n in self._hparam_types.keys()} + + def get(self, key, default=None): + """Returns the value of `key` if it exists, else `default`.""" + if key in self._hparam_types: + # Ensure that default is compatible with the parameter type. + if default is not None: + param_type, is_param_list = self._hparam_types[key] + type_str = 'list<%s>' % param_type if is_param_list else str(param_type) + fail_msg = ("Hparam '%s' of type '%s' is incompatible with " + 'default=%s' % (key, type_str, default)) + + is_default_list = isinstance(default, list) + if is_param_list != is_default_list: + raise ValueError(fail_msg) + + try: + if is_default_list: + for value in default: + _cast_to_type_if_compatible(key, param_type, value) + else: + _cast_to_type_if_compatible(key, param_type, default) + except ValueError as e: + raise ValueError('%s. %s' % (fail_msg, e)) + + return getattr(self, key) + + return default + + def __contains__(self, key): + return key in self._hparam_types + + def __str__(self): + return str(sorted(self.values().items())) + + def __repr__(self): + return '%s(%s)' % (type(self).__name__, self.__str__()) + + @staticmethod + def _get_kind_name(param_type, is_list): + """Returns the field name given parameter type and is_list. + + Args: + param_type: Data type of the hparam. + is_list: Whether this is a list. + + Returns: + A string representation of the field name. + + Raises: + ValueError: If parameter type is not recognized. + """ + if issubclass(param_type, bool): + # This check must happen before issubclass(param_type, six.integer_types), + # since Python considers bool to be a subclass of int. + typename = 'bool' + elif issubclass(param_type, six.integer_types): + # Setting 'int' and 'long' types to be 'int64' to ensure the type is + # compatible with both Python2 and Python3. + typename = 'int64' + elif issubclass(param_type, (six.string_types, six.binary_type)): + # Setting 'string' and 'bytes' types to be 'bytes' to ensure the type is + # compatible with both Python2 and Python3. + typename = 'bytes' + elif issubclass(param_type, float): + typename = 'float' + else: + raise ValueError('Unsupported parameter type: %s' % str(param_type)) + + suffix = 'list' if is_list else 'value' + return '_'.join([typename, suffix]) diff --git a/tensor2tensor/utils/hparam_test.py b/tensor2tensor/utils/hparam_test.py new file mode 100644 index 000000000..51c07dba4 --- /dev/null +++ b/tensor2tensor/utils/hparam_test.py @@ -0,0 +1,518 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Forked with minor changes from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/training/python/training/hparam_test.py pylint: disable=line-too-long +"""Tests for hparam.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.utils import hparam + +import tensorflow.compat.v1 as tf + + +class HParamsTest(tf.test.TestCase): + + def testEmpty(self): + hparams = hparam.HParams() + self.assertDictEqual({}, hparams.values()) + hparams.parse('') + self.assertDictEqual({}, hparams.values()) + with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'): + hparams.parse('xyz=123') + + def testContains(self): + hparams = hparam.HParams(foo=1) + self.assertTrue('foo' in hparams) + self.assertFalse('bar' in hparams) + + def testSomeValues(self): + hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d='/a/b=c/d') + self.assertDictEqual( + {'aaa': 1, 'b': 2.0, 'c_c': 'relu6', 'd': '/a/b=c/d'}, + hparams.values()) + expected_str = ('[(\'aaa\', 1), (\'b\', 2.0), (\'c_c\', \'relu6\'), ' + '(\'d\', \'/a/b=c/d\')]') + self.assertEqual(expected_str, str(hparams.__str__())) + self.assertEqual(expected_str, str(hparams)) + self.assertEqual(1, hparams.aaa) + self.assertEqual(2.0, hparams.b) + self.assertEqual('relu6', hparams.c_c) + self.assertEqual('/a/b=c/d', hparams.d) + hparams.parse('aaa=12') + self.assertDictEqual({ + 'aaa': 12, + 'b': 2.0, + 'c_c': 'relu6', + 'd': '/a/b=c/d' + }, hparams.values()) + self.assertEqual(12, hparams.aaa) + self.assertEqual(2.0, hparams.b) + self.assertEqual('relu6', hparams.c_c) + self.assertEqual('/a/b=c/d', hparams.d) + hparams.parse('c_c=relu4, b=-2.0e10') + self.assertDictEqual({ + 'aaa': 12, + 'b': -2.0e10, + 'c_c': 'relu4', + 'd': '/a/b=c/d' + }, hparams.values()) + self.assertEqual(12, hparams.aaa) + self.assertEqual(-2.0e10, hparams.b) + self.assertEqual('relu4', hparams.c_c) + self.assertEqual('/a/b=c/d', hparams.d) + hparams.parse('c_c=,b=0,') + self.assertDictEqual({'aaa': 12, 'b': 0, 'c_c': '', 'd': '/a/b=c/d'}, + hparams.values()) + self.assertEqual(12, hparams.aaa) + self.assertEqual(0.0, hparams.b) + self.assertEqual('', hparams.c_c) + self.assertEqual('/a/b=c/d', hparams.d) + hparams.parse('c_c=2.3",b=+2,') + self.assertEqual(2.0, hparams.b) + self.assertEqual('2.3"', hparams.c_c) + hparams.parse('d=/a/b/c/d,aaa=11,') + self.assertEqual(11, hparams.aaa) + self.assertEqual(2.0, hparams.b) + self.assertEqual('2.3"', hparams.c_c) + self.assertEqual('/a/b/c/d', hparams.d) + hparams.parse('b=1.5,d=/a=b/c/d,aaa=10,') + self.assertEqual(10, hparams.aaa) + self.assertEqual(1.5, hparams.b) + self.assertEqual('2.3"', hparams.c_c) + self.assertEqual('/a=b/c/d', hparams.d) + with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'): + hparams.parse('x=123') + with self.assertRaisesRegexp(ValueError, 'Could not parse'): + hparams.parse('aaa=poipoi') + with self.assertRaisesRegexp(ValueError, 'Could not parse'): + hparams.parse('aaa=1.0') + with self.assertRaisesRegexp(ValueError, 'Could not parse'): + hparams.parse('b=12x') + with self.assertRaisesRegexp(ValueError, 'Could not parse'): + hparams.parse('b=relu') + with self.assertRaisesRegexp(ValueError, 'Must not pass a list'): + hparams.parse('aaa=[123]') + self.assertEqual(10, hparams.aaa) + self.assertEqual(1.5, hparams.b) + self.assertEqual('2.3"', hparams.c_c) + self.assertEqual('/a=b/c/d', hparams.d) + + def testWithPeriodInVariableName(self): + hparams = hparam.HParams() + hparams.add_hparam(name='a.b', value=0.0) + hparams.parse('a.b=1.0') + self.assertEqual(1.0, getattr(hparams, 'a.b')) + hparams.add_hparam(name='c.d', value=0.0) + with self.assertRaisesRegexp(ValueError, 'Could not parse'): + hparams.parse('c.d=abc') + hparams.add_hparam(name='e.f', value='') + hparams.parse('e.f=abc') + self.assertEqual('abc', getattr(hparams, 'e.f')) + hparams.add_hparam(name='d..', value=0.0) + hparams.parse('d..=10.0') + self.assertEqual(10.0, getattr(hparams, 'd..')) + + def testSetFromMap(self): + hparams = hparam.HParams(a=1, b=2.0, c='tanh') + hparams.override_from_dict({'a': -2, 'c': 'identity'}) + self.assertDictEqual({'a': -2, 'c': 'identity', 'b': 2.0}, hparams.values()) + + hparams = hparam.HParams(x=1, b=2.0, d=[0.5]) + hparams.override_from_dict({'d': [0.1, 0.2, 0.3]}) + self.assertDictEqual({'d': [0.1, 0.2, 0.3], 'x': 1, 'b': 2.0}, + hparams.values()) + + def testFunction(self): + def f(x): + return x + hparams = hparam.HParams(function=f) + self.assertEqual(hparams.function, f) + + json_str = hparams.to_json() + self.assertEqual(json_str, '{}') + + def testBoolParsing(self): + for value in 'true', 'false', 'True', 'False', '1', '0': + for initial in False, True: + hparams = hparam.HParams(use_gpu=initial) + hparams.parse('use_gpu=' + value) + self.assertEqual(hparams.use_gpu, value in ['True', 'true', '1']) + + def testBoolParsingFail(self): + hparams = hparam.HParams(use_gpu=True) + with self.assertRaisesRegexp(ValueError, r'Could not parse.*use_gpu'): + hparams.parse('use_gpu=yep') + + def testLists(self): + hparams = hparam.HParams(aaa=[1], b=[2.0, 3.0], c_c=['relu6']) + self.assertDictEqual({ + 'aaa': [1], + 'b': [2.0, 3.0], + 'c_c': ['relu6'] + }, hparams.values()) + self.assertEqual([1], hparams.aaa) + self.assertEqual([2.0, 3.0], hparams.b) + self.assertEqual(['relu6'], hparams.c_c) + hparams.parse('aaa=[12]') + self.assertEqual([12], hparams.aaa) + hparams.parse('aaa=[12,34,56]') + self.assertEqual([12, 34, 56], hparams.aaa) + hparams.parse('c_c=[relu4,relu12],b=[1.0]') + self.assertEqual(['relu4', 'relu12'], hparams.c_c) + self.assertEqual([1.0], hparams.b) + hparams.parse('c_c=[],aaa=[-34]') + self.assertEqual([-34], hparams.aaa) + self.assertEqual([], hparams.c_c) + hparams.parse('c_c=[_12,3\'4"],aaa=[+3]') + self.assertEqual([3], hparams.aaa) + self.assertEqual(['_12', '3\'4"'], hparams.c_c) + with self.assertRaisesRegexp(ValueError, 'Unknown hyperparameter'): + hparams.parse('x=[123]') + with self.assertRaisesRegexp(ValueError, 'Could not parse'): + hparams.parse('aaa=[poipoi]') + with self.assertRaisesRegexp(ValueError, 'Could not parse'): + hparams.parse('aaa=[1.0]') + with self.assertRaisesRegexp(ValueError, 'Could not parse'): + hparams.parse('b=[12x]') + with self.assertRaisesRegexp(ValueError, 'Could not parse'): + hparams.parse('b=[relu]') + with self.assertRaisesRegexp(ValueError, 'Must pass a list'): + hparams.parse('aaa=123') + + def testParseValuesWithIndexAssigment1(self): + """Assignment to an index position.""" + parse_dict = hparam.parse_values('arr[1]=10', {'arr': int}) + self.assertEqual(len(parse_dict), 1) + self.assertIsInstance(parse_dict['arr'], dict) + self.assertDictEqual(parse_dict['arr'], {1: 10}) + + def testParseValuesWithIndexAssigment1_IgnoreUnknown(self): + """Assignment to an index position.""" + parse_dict = hparam.parse_values( + 'arr[1]=10,b=5', {'arr': int}, ignore_unknown=True) + self.assertEqual(len(parse_dict), 1) + self.assertIsInstance(parse_dict['arr'], dict) + self.assertDictEqual(parse_dict['arr'], {1: 10}) + + def testParseValuesWithIndexAssigment2(self): + """Assignment to multiple index positions.""" + parse_dict = hparam.parse_values('arr[0]=10,arr[5]=20', {'arr': int}) + self.assertEqual(len(parse_dict), 1) + self.assertIsInstance(parse_dict['arr'], dict) + self.assertDictEqual(parse_dict['arr'], {0: 10, 5: 20}) + + def testParseValuesWithIndexAssigment2_IgnoreUnknown(self): + """Assignment to multiple index positions.""" + parse_dict = hparam.parse_values( + 'arr[0]=10,arr[5]=20,foo=bar', {'arr': int}, ignore_unknown=True) + self.assertEqual(len(parse_dict), 1) + self.assertIsInstance(parse_dict['arr'], dict) + self.assertDictEqual(parse_dict['arr'], {0: 10, 5: 20}) + + def testParseValuesWithIndexAssigment3(self): + """Assignment to index positions in multiple names.""" + parse_dict = hparam.parse_values('arr[0]=10,arr[1]=20,L[5]=100,L[10]=200', + {'arr': int, + 'L': int}) + self.assertEqual(len(parse_dict), 2) + self.assertIsInstance(parse_dict['arr'], dict) + self.assertDictEqual(parse_dict['arr'], {0: 10, 1: 20}) + self.assertIsInstance(parse_dict['L'], dict) + self.assertDictEqual(parse_dict['L'], {5: 100, 10: 200}) + + def testParseValuesWithIndexAssigment3_IgnoreUnknown(self): + """Assignment to index positions in multiple names.""" + parse_dict = hparam.parse_values( + 'arr[0]=10,C=5,arr[1]=20,B[0]=kkk,L[5]=100,L[10]=200', + {'arr': int, 'L': int}, ignore_unknown=True) + self.assertEqual(len(parse_dict), 2) + self.assertIsInstance(parse_dict['arr'], dict) + self.assertDictEqual(parse_dict['arr'], {0: 10, 1: 20}) + self.assertIsInstance(parse_dict['L'], dict) + self.assertDictEqual(parse_dict['L'], {5: 100, 10: 200}) + + def testParseValuesWithIndexAssigment4(self): + """Assignment of index positions and scalars.""" + parse_dict = hparam.parse_values('x=10,arr[1]=20,y=30', + {'x': int, + 'y': int, + 'arr': int}) + self.assertEqual(len(parse_dict), 3) + self.assertIsInstance(parse_dict['arr'], dict) + self.assertDictEqual(parse_dict['arr'], {1: 20}) + self.assertEqual(parse_dict['x'], 10) + self.assertEqual(parse_dict['y'], 30) + + def testParseValuesWithIndexAssigment4_IgnoreUnknown(self): + """Assignment of index positions and scalars.""" + parse_dict = hparam.parse_values( + 'x=10,foo[0]=bar,arr[1]=20,zzz=78,y=30', + {'x': int, 'y': int, 'arr': int}, ignore_unknown=True) + self.assertEqual(len(parse_dict), 3) + self.assertIsInstance(parse_dict['arr'], dict) + self.assertDictEqual(parse_dict['arr'], {1: 20}) + self.assertEqual(parse_dict['x'], 10) + self.assertEqual(parse_dict['y'], 30) + + def testParseValuesWithIndexAssigment5(self): + """Different variable types.""" + parse_dict = hparam.parse_values('a[0]=5,b[1]=true,c[2]=abc,d[3]=3.14', { + 'a': int, + 'b': bool, + 'c': str, + 'd': float + }) + self.assertEqual(set(parse_dict.keys()), {'a', 'b', 'c', 'd'}) + self.assertIsInstance(parse_dict['a'], dict) + self.assertDictEqual(parse_dict['a'], {0: 5}) + self.assertIsInstance(parse_dict['b'], dict) + self.assertDictEqual(parse_dict['b'], {1: True}) + self.assertIsInstance(parse_dict['c'], dict) + self.assertDictEqual(parse_dict['c'], {2: 'abc'}) + self.assertIsInstance(parse_dict['d'], dict) + self.assertDictEqual(parse_dict['d'], {3: 3.14}) + + def testParseValuesWithIndexAssigment5_IgnoreUnknown(self): + """Different variable types.""" + parse_dict = hparam.parse_values( + 'a[0]=5,cc=4,b[1]=true,c[2]=abc,mm=2,d[3]=3.14', + {'a': int, 'b': bool, 'c': str, 'd': float}, + ignore_unknown=True) + self.assertEqual(set(parse_dict.keys()), {'a', 'b', 'c', 'd'}) + self.assertIsInstance(parse_dict['a'], dict) + self.assertDictEqual(parse_dict['a'], {0: 5}) + self.assertIsInstance(parse_dict['b'], dict) + self.assertDictEqual(parse_dict['b'], {1: True}) + self.assertIsInstance(parse_dict['c'], dict) + self.assertDictEqual(parse_dict['c'], {2: 'abc'}) + self.assertIsInstance(parse_dict['d'], dict) + self.assertDictEqual(parse_dict['d'], {3: 3.14}) + + def testParseValuesWithBadIndexAssigment1(self): + """Reject assignment of list to variable type.""" + with self.assertRaisesRegexp(ValueError, + r'Assignment of a list to a list index.'): + hparam.parse_values('arr[1]=[1,2,3]', {'arr': int}) + + def testParseValuesWithBadIndexAssigment1_IgnoreUnknown(self): + """Reject assignment of list to variable type.""" + with self.assertRaisesRegexp(ValueError, + r'Assignment of a list to a list index.'): + hparam.parse_values( + 'arr[1]=[1,2,3],c=8', {'arr': int}, ignore_unknown=True) + + def testParseValuesWithBadIndexAssigment2(self): + """Reject if type missing.""" + with self.assertRaisesRegexp(ValueError, + r'Unknown hyperparameter type for arr'): + hparam.parse_values('arr[1]=5', {}) + + def testParseValuesWithBadIndexAssigment2_IgnoreUnknown(self): + """Ignore missing type.""" + hparam.parse_values('arr[1]=5', {}, ignore_unknown=True) + + def testParseValuesWithBadIndexAssigment3(self): + """Reject type of the form name[index].""" + with self.assertRaisesRegexp(ValueError, + 'Unknown hyperparameter type for arr'): + hparam.parse_values('arr[1]=1', {'arr[1]': int}) + + def testParseValuesWithBadIndexAssigment3_IgnoreUnknown(self): + """Ignore type of the form name[index].""" + hparam.parse_values('arr[1]=1', {'arr[1]': int}, ignore_unknown=True) + + def testWithReusedVariables(self): + with self.assertRaisesRegexp(ValueError, + 'Multiple assignments to variable \'x\''): + hparam.parse_values('x=1,x=1', {'x': int}) + + with self.assertRaisesRegexp(ValueError, + 'Multiple assignments to variable \'arr\''): + hparam.parse_values('arr=[100,200],arr[0]=10', {'arr': int}) + + with self.assertRaisesRegexp( + ValueError, r'Multiple assignments to variable \'arr\[0\]\''): + hparam.parse_values('arr[0]=10,arr[0]=20', {'arr': int}) + + with self.assertRaisesRegexp(ValueError, + 'Multiple assignments to variable \'arr\''): + hparam.parse_values('arr[0]=10,arr=[100]', {'arr': int}) + + def testJson(self): + hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True) + self.assertDictEqual({ + 'aaa': 1, + 'b': 2.0, + 'c_c': 'relu6', + 'd': True + }, hparams.values()) + self.assertEqual(1, hparams.aaa) + self.assertEqual(2.0, hparams.b) + self.assertEqual('relu6', hparams.c_c) + hparams.parse_json('{"aaa": 12, "b": 3.0, "c_c": "relu4", "d": false}') + self.assertDictEqual({ + 'aaa': 12, + 'b': 3.0, + 'c_c': 'relu4', + 'd': False + }, hparams.values()) + self.assertEqual(12, hparams.aaa) + self.assertEqual(3.0, hparams.b) + self.assertEqual('relu4', hparams.c_c) + + json_str = hparams.to_json() + hparams2 = hparam.HParams(aaa=10, b=20.0, c_c='hello', d=False) + hparams2.parse_json(json_str) + self.assertEqual(12, hparams2.aaa) + self.assertEqual(3.0, hparams2.b) + self.assertEqual('relu4', hparams2.c_c) + self.assertEqual(False, hparams2.d) + + hparams3 = hparam.HParams(aaa=123) + self.assertEqual('{"aaa": 123}', hparams3.to_json()) + self.assertEqual('{\n "aaa": 123\n}', hparams3.to_json(indent=2)) + self.assertEqual('{"aaa"=123}', hparams3.to_json(separators=(';', '='))) + + hparams4 = hparam.HParams(aaa=123, b='hello', c_c=False) + self.assertEqual( + '{"aaa": 123, "b": "hello", "c_c": false}', + hparams4.to_json(sort_keys=True)) + + def testSetHParam(self): + hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True) + self.assertDictEqual({ + 'aaa': 1, + 'b': 2.0, + 'c_c': 'relu6', + 'd': True + }, hparams.values()) + self.assertEqual(1, hparams.aaa) + self.assertEqual(2.0, hparams.b) + self.assertEqual('relu6', hparams.c_c) + + hparams.set_hparam('aaa', 12) + hparams.set_hparam('b', 3.0) + hparams.set_hparam('c_c', 'relu4') + hparams.set_hparam('d', False) + self.assertDictEqual({ + 'aaa': 12, + 'b': 3.0, + 'c_c': 'relu4', + 'd': False + }, hparams.values()) + self.assertEqual(12, hparams.aaa) + self.assertEqual(3.0, hparams.b) + self.assertEqual('relu4', hparams.c_c) + + def testSetHParamListNonListMismatch(self): + hparams = hparam.HParams(a=1, b=[2.0, 3.0]) + with self.assertRaisesRegexp(ValueError, r'Must not pass a list'): + hparams.set_hparam('a', [1.0]) + with self.assertRaisesRegexp(ValueError, r'Must pass a list'): + hparams.set_hparam('b', 1.0) + + def testSetHParamTypeMismatch(self): + hparams = hparam.HParams( + int_=1, str_='str', bool_=True, float_=1.1, list_int=[1, 2], none=None) + + with self.assertRaises(ValueError): + hparams.set_hparam('str_', 2.2) + + with self.assertRaises(ValueError): + hparams.set_hparam('int_', False) + + with self.assertRaises(ValueError): + hparams.set_hparam('bool_', 1) + + with self.assertRaises(ValueError): + hparams.set_hparam('int_', 2.2) + + with self.assertRaises(ValueError): + hparams.set_hparam('list_int', [2, 3.3]) + + with self.assertRaises(ValueError): + hparams.set_hparam('int_', '2') + + # Casting int to float is OK + hparams.set_hparam('float_', 1) + + # Getting stuck with NoneType :( + hparams.set_hparam('none', '1') + self.assertEqual('1', hparams.none) + + def testGet(self): + hparams = hparam.HParams(aaa=1, b=2.0, c_c='relu6', d=True, e=[5.0, 6.0]) + + # Existing parameters with default=None. + self.assertEqual(1, hparams.get('aaa')) + self.assertEqual(2.0, hparams.get('b')) + self.assertEqual('relu6', hparams.get('c_c')) + self.assertEqual(True, hparams.get('d')) + self.assertEqual([5.0, 6.0], hparams.get('e', None)) + + # Existing parameters with compatible defaults. + self.assertEqual(1, hparams.get('aaa', 2)) + self.assertEqual(2.0, hparams.get('b', 3.0)) + self.assertEqual(2.0, hparams.get('b', 3)) + self.assertEqual('relu6', hparams.get('c_c', 'default')) + self.assertEqual(True, hparams.get('d', True)) + self.assertEqual([5.0, 6.0], hparams.get('e', [1.0, 2.0, 3.0])) + self.assertEqual([5.0, 6.0], hparams.get('e', [1, 2, 3])) + + # Existing parameters with incompatible defaults. + with self.assertRaises(ValueError): + hparams.get('aaa', 2.0) + + with self.assertRaises(ValueError): + hparams.get('b', False) + + with self.assertRaises(ValueError): + hparams.get('c_c', [1, 2, 3]) + + with self.assertRaises(ValueError): + hparams.get('d', 'relu') + + with self.assertRaises(ValueError): + hparams.get('e', 123.0) + + with self.assertRaises(ValueError): + hparams.get('e', ['a', 'b', 'c']) + + # Nonexistent parameters. + self.assertEqual(None, hparams.get('unknown')) + self.assertEqual(123, hparams.get('unknown', 123)) + self.assertEqual([1, 2, 3], hparams.get('unknown', [1, 2, 3])) + + def testDel(self): + hparams = hparam.HParams(aaa=1, b=2.0) + + with self.assertRaises(ValueError): + hparams.set_hparam('aaa', 'will fail') + + with self.assertRaises(ValueError): + hparams.add_hparam('aaa', 'will fail') + + hparams.del_hparam('aaa') + hparams.add_hparam('aaa', 'will work') + self.assertEqual('will work', hparams.get('aaa')) + + hparams.set_hparam('aaa', 'still works') + self.assertEqual('still works', hparams.get('aaa')) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/utils/hparams_lib.py b/tensor2tensor/utils/hparams_lib.py new file mode 100644 index 000000000..14f60739b --- /dev/null +++ b/tensor2tensor/utils/hparams_lib.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""T2T HParams handling.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import json + +from tensor2tensor.data_generators import problem as problem_lib +from tensor2tensor.utils import hparam +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +def copy_hparams(hparams): + hp_vals = hparams.values() + new_hparams = hparam.HParams(**hp_vals) + other_attrs = ["problem", "problem_hparams"] + for attr in other_attrs: + attr_val = getattr(hparams, attr, None) + if attr_val is not None: + setattr(new_hparams, attr, attr_val) + return new_hparams + + +def create_hparams(hparams_set, + hparams_overrides_str="", + data_dir=None, + problem_name=None, + hparams_path=None): + """Create HParams with data_dir and problem hparams, if kwargs provided.""" + hparams = registry.hparams(hparams_set) + if hparams_path and tf.gfile.Exists(hparams_path): + hparams = create_hparams_from_json(hparams_path, hparams) + if data_dir: + hparams.add_hparam("data_dir", data_dir) + if hparams_overrides_str: + tf.logging.info("Overriding hparams in %s with %s", hparams_set, + hparams_overrides_str) + hparams = hparams.parse(hparams_overrides_str) + if problem_name: + add_problem_hparams(hparams, problem_name) + return hparams + + +def create_hparams_from_json(json_path, hparams=None): + """Loading hparams from json; can also start from hparams if specified.""" + tf.logging.info("Loading hparams from existing json %s" % json_path) + with tf.gfile.Open(json_path, "r") as f: + hparams_values = json.load(f) + # Prevent certain keys from overwriting the passed-in hparams. + # TODO(trandustin): Remove this hack after registries are available to avoid + # saving them as functions. + if hparams: + hparams_values.pop("bottom", None) + hparams_values.pop("loss", None) + hparams_values.pop("name", None) + hparams_values.pop("top", None) + hparams_values.pop("weights_fn", None) + new_hparams = hparam.HParams(**hparams_values) + # Some keys are in new_hparams but not hparams, so we need to be more + # careful than simply using parse_json() from HParams + if hparams: # hparams specified, so update values from json + for key in sorted(new_hparams.values().keys()): + if hasattr(hparams, key): # Overlapped keys + value = getattr(hparams, key) + new_value = getattr(new_hparams, key) + if value != new_value: # Different values + tf.logging.info("Overwrite key %s: %s -> %s" % ( + key, value, new_value)) + setattr(hparams, key, new_value) + else: + hparams = new_hparams + + return hparams + + +def add_problem_hparams(hparams, problem_name_or_instance): + """Add problem hparams for the problems.""" + if isinstance(problem_name_or_instance, problem_lib.Problem): + problem = problem_name_or_instance + else: + problem = registry.problem(problem_name_or_instance) + p_hparams = problem.get_hparams(hparams) + hparams.problem = problem + hparams.problem_hparams = p_hparams diff --git a/tensor2tensor/utils/hparams_lib_test.py b/tensor2tensor/utils/hparams_lib_test.py new file mode 100644 index 000000000..0ac0864b0 --- /dev/null +++ b/tensor2tensor/utils/hparams_lib_test.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for trainer_lib.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from tensor2tensor.utils import hparams_lib + +import tensorflow.compat.v1 as tf + + +class HparamsLibTest(tf.test.TestCase): + + def testCreateHparamsFromJson(self): + # Get json_path + pkg = os.path.abspath(__file__) + pkg, _ = os.path.split(pkg) + pkg, _ = os.path.split(pkg) + json_path = os.path.join( + pkg, "test_data", "transformer_test_ckpt", "hparams.json") + + # Create hparams + hparams = hparams_lib.create_hparams_from_json(json_path) + self.assertEqual(75, len(hparams.values())) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/utils/learning_rate.py b/tensor2tensor/utils/learning_rate.py new file mode 100644 index 000000000..a3c700468 --- /dev/null +++ b/tensor2tensor/utils/learning_rate.py @@ -0,0 +1,215 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Optimization.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np + +from tensor2tensor.utils import mlperf_log +import tensorflow.compat.v1 as tf + + +def learning_rate_factor(name, step_num, hparams): + """Compute the designated learning rate factor from hparams.""" + if name == "constant": + tf.logging.info("Base learning rate: %f", hparams.learning_rate_constant) + return hparams.learning_rate_constant + elif name == "linear_warmup": + return tf.minimum(1.0, step_num / hparams.learning_rate_warmup_steps) + elif name == "linear_decay": + ret = (hparams.train_steps - step_num) / hparams.learning_rate_decay_steps + return tf.minimum(1.0, tf.maximum(0.0, ret)) + elif name == "cosdecay": # openai gpt + in_warmup = tf.cast(step_num <= hparams.learning_rate_warmup_steps, + dtype=tf.float32) + ret = 0.5 * (1 + tf.cos( + np.pi * step_num / hparams.learning_rate_decay_steps)) + # if in warmup stage return 1 else return the decayed value + return in_warmup * 1 + (1 - in_warmup) * ret + elif name == "single_cycle_cos_decay": + # Cosine decay to zero with a single cycle. This is different from + # "cosdecay" because it starts at 1 when the warmup steps end. + x = tf.maximum(step_num, hparams.learning_rate_warmup_steps) + step = x - hparams.learning_rate_warmup_steps + if hparams.train_steps <= hparams.learning_rate_warmup_steps: + raise ValueError("single_cycle_cos_decay cannot be used unless " + "hparams.train_steps > " + "hparams.learning_rate_warmup_steps") + return tf.math.cos( + step * np.pi / + (hparams.train_steps - hparams.learning_rate_warmup_steps)) / 2.0 + 0.5 + elif name == "multi_cycle_cos_decay": + # Cosine decay with a variable number of cycles. This is different from + # "cosdecay" because it starts at 1 when the warmup steps end. Use + # hparams.learning_rate_decay_steps to determine the number of cycles. + x = tf.maximum(step_num, hparams.learning_rate_warmup_steps) + step = x - hparams.learning_rate_warmup_steps + return tf.math.cos( + step * np.pi / hparams.learning_rate_decay_steps) / 2.0 + 0.5 + elif name == "rsqrt_decay": + return tf.rsqrt(tf.maximum(step_num, hparams.learning_rate_warmup_steps)) + elif name == "rsqrt_normalized_decay": + scale = tf.sqrt(tf.to_float(hparams.learning_rate_warmup_steps)) + return scale * tf.rsqrt(tf.maximum( + step_num, hparams.learning_rate_warmup_steps)) + elif name == "exp_decay": + decay_steps = hparams.learning_rate_decay_steps + warmup_steps = hparams.learning_rate_warmup_steps + p = (step_num - warmup_steps) / decay_steps + p = tf.maximum(p, 0.) + if hparams.learning_rate_decay_staircase: + p = tf.floor(p) + return tf.pow(hparams.learning_rate_decay_rate, p) + elif name == "rsqrt_hidden_size": + return hparams.hidden_size ** -0.5 + elif name == "legacy": + return legacy_learning_rate_schedule(hparams) + else: + raise ValueError("unknown learning rate factor %s" % name) + + +def learning_rate_schedule(hparams): + """Learning rate schedule based on hparams.""" + mlperf_log.transformer_print(key=mlperf_log.OPT_LR, deferred=True) + mlperf_log.transformer_print( + key=mlperf_log.OPT_LR_WARMUP_STEPS, + value=hparams.learning_rate_warmup_steps) + step_num = _global_step(hparams) + schedule_string = hparams.learning_rate_schedule + names = schedule_string.split("*") + names = [name.strip() for name in names if name.strip()] + ret = tf.constant(1.0) + for name in names: + ret *= learning_rate_factor(name, step_num, hparams) + return ret + + +def legacy_learning_rate_schedule(hparams): + """Backwards-compatible learning-rate schedule.""" + step_num = _global_step(hparams) + warmup_steps = tf.to_float(hparams.learning_rate_warmup_steps) + if hparams.learning_rate_decay_scheme == "noam": + ret = 5000.0 * hparams.hidden_size**-0.5 * tf.minimum( + (step_num + 1) * warmup_steps**-1.5, (step_num + 1)**-0.5) + else: + warmup_steps = hparams.learning_rate_warmup_steps + warmup = _learning_rate_warmup(warmup_steps, hparams=hparams) + decay = _learning_rate_decay(hparams, warmup_steps) + ret = tf.where(step_num < warmup_steps, warmup, decay) + optimizer_correction = 0.002 if "adam" in hparams.optimizer else 1.0 + tf.logging.info("Base learning rate: %f", hparams.learning_rate) + return ret * optimizer_correction * hparams.learning_rate + + +def _global_step(hparams): + """Adjust global step if a multi-step optimizer is used.""" + step = tf.to_float(tf.train.get_or_create_global_step()) + multiplier = hparams.optimizer_multistep_accumulate_steps + if not multiplier: + return step + + tf.logging.info("Dividing global step by %d for multi-step optimizer." + % multiplier) + return step / tf.to_float(multiplier) + + +def _legacy_sqrt_decay(step): + """Decay like 1 / sqrt(step), multiplied by 500 to normalize.""" + return 500.0 / tf.sqrt(tf.maximum(step, 1.0)) + + +def _piecewise_learning_rate(step, boundaries, values): + """Scale learning rate according to the given schedule. + + Multipliers are not cumulative. + + Args: + step: global step + boundaries: List of steps to transition on. + values: Multiplier to apply at each boundary transition. + + Returns: + Scaled value for the learning rate. + """ + values = [1.0] + values + boundaries = [float(x) for x in boundaries] + return tf.train.piecewise_constant( + step, boundaries, values, name="piecewise_lr") + + +def _learning_rate_decay(hparams, warmup_steps=0): + """Learning rate decay multiplier.""" + scheme = hparams.learning_rate_decay_scheme + warmup_steps = tf.to_float(warmup_steps) + global_step = _global_step(hparams) + + if not scheme or scheme == "none": + return tf.constant(1.) + + tf.logging.info("Applying learning rate decay: %s.", scheme) + + if scheme == "exp": + decay_steps = hparams.learning_rate_decay_steps + p = (global_step - warmup_steps) / decay_steps + if hparams.learning_rate_decay_staircase: + p = tf.floor(p) + return tf.pow(hparams.learning_rate_decay_rate, p) + + if scheme == "piecewise": + return _piecewise_learning_rate(global_step, + hparams.learning_rate_boundaries, + hparams.learning_rate_multiples) + + if scheme == "cosine": + cycle_steps = hparams.learning_rate_cosine_cycle_steps + cycle_position = global_step % (2 * cycle_steps) + cycle_position = cycle_steps - tf.abs(cycle_steps - cycle_position) + return 0.5 * (1 + tf.cos(np.pi * cycle_position / cycle_steps)) + + if scheme == "cyclelinear10x": + # Cycle the rate linearly by 10x every warmup_steps, up and down. + cycle_steps = warmup_steps + cycle_position = global_step % (2 * cycle_steps) + cycle_position = tf.to_float( # Normalize to the interval [-1, 1]. + cycle_position - cycle_steps) / float(cycle_steps) + cycle_position = 1.0 - tf.abs(cycle_position) # 0 to 1 and back to 0. + return (cycle_position + 0.1) * 3.0 # 10x difference each cycle (0.3-3). + + if scheme == "sqrt": + return _legacy_sqrt_decay(global_step - warmup_steps) + + raise ValueError("Unrecognized learning rate decay scheme: %s" % + hparams.learning_rate_decay_scheme) + + +def _learning_rate_warmup(warmup_steps, warmup_schedule="exp", hparams=None): + """Learning rate warmup multiplier.""" + if not warmup_steps: + return tf.constant(1.) + + tf.logging.info("Applying %s learning rate warmup for %d steps", + warmup_schedule, warmup_steps) + + warmup_steps = tf.to_float(warmup_steps) + global_step = _global_step(hparams) + + if warmup_schedule == "exp": + return tf.exp(tf.log(0.01) / warmup_steps)**(warmup_steps - global_step) + else: + assert warmup_schedule == "linear" + start = tf.constant(0.35) + return ((tf.constant(1.) - start) / warmup_steps) * global_step + start diff --git a/tensor2tensor/utils/metrics.py b/tensor2tensor/utils/metrics.py index 4dc952a08..bfacc7184 100644 --- a/tensor2tensor/utils/metrics.py +++ b/tensor2tensor/utils/metrics.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,19 +18,120 @@ from __future__ import division from __future__ import print_function -import functools - -# Dependency imports - +import numpy as np import six -from tensor2tensor.models import common_layers +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import modalities from tensor2tensor.utils import bleu_hook +from tensor2tensor.utils import contrib +from tensor2tensor.utils import rouge +from tensor2tensor.utils import sari_hook + +import tensorflow.compat.v1 as tf +from tensorflow.python.util import tf_inspect as inspect + + +class Metrics(object): + """Available evaluation metrics.""" + # Entries here should match the keys in METRICS_FNS below + ACC = "accuracy" + ACC_TOP5 = "accuracy_top5" + ACC_PER_SEQ = "accuracy_per_sequence" + ACC_MULTILABEL_MATCH3 = "accuracy_multilabel_match3" + NEG_LOG_PERPLEXITY = "neg_log_perplexity" + MASKED_NEG_LOG_PERPLEXITY = "masked_neg_log_perplexity" + APPROX_BLEU = "approx_bleu_score" + APPROX_SARI = "approx_sari_score" + RMSE = "rmse" + UNPADDED_MSE = "unpadded_mse" + LOG_POISSON = "log_poisson" + PEARSON = "pearson" + R2 = "r_squared" + ROUGE_2_F = "rouge_2_fscore" + ROUGE_L_F = "rouge_L_fscore" + EDIT_DISTANCE = "edit_distance" + PREFIX_ACCURACY = "prefix_accuracy" + WORD_ERROR_RATE = "word_error_rate" + SET_PRECISION = "set_precision" + SET_RECALL = "set_recall" + SOFTMAX_CROSS_ENTROPY_ONE_HOT = "softmax_cross_entropy_one_hot" + SIGMOID_ACCURACY_ONE_HOT = "sigmoid_accuracy_one_hot" + SIGMOID_ACCURACY = "sigmoid_accuracy" + SIGMOID_RECALL_ONE_HOT = "sigmoid_recall_one_hot" + SIGMOID_PRECISION_ONE_HOT = "sigmoid_precision_one_hot" + SIGMOID_CROSS_ENTROPY_ONE_HOT = "sigmoid_cross_entropy_one_hot" + TWO_CLASS_ACCURACY = "two_class_accuracy" + TWO_CLASS_LOG_LIKELIHOOD = "two_class_log_likelihood" + ROC_AUC = "roc_auc" + IMAGE_SUMMARY = "image_summary" + DMOL_PERPLEXITY = "disc_mol_neg_log_perplexity" + ABS_ERR = "mean_absolute_error" + IMAGE_RMSE = "image_rmse" + + +def image_rmse(predictions, labels, weights_fn=common_layers.weights_all): + """RMSE but will argmax if last dim is not 1.""" + if common_layers.shape_list(predictions)[-1] == 1: + predictions = tf.squeeze(predictions, axis=[-1]) + else: + predictions = tf.argmax(predictions, axis=-1) + return padded_rmse(predictions, labels, weights_fn) + + +def padded_rmse(predictions, labels, weights_fn=common_layers.weights_all): + predictions = tf.to_float(predictions) + labels = tf.to_float(labels) + predictions, labels = common_layers.pad_with_zeros(predictions, labels) + weights = weights_fn(labels) + error = tf.pow(predictions - labels, 2) + error_sqrt = tf.sqrt(tf.reduce_mean(error * weights)) + return error_sqrt, tf.reduce_sum(weights) + + +def unpadded_mse(predictions, labels, weights_fn=common_layers.weights_all): + predictions = tf.to_float(predictions) + labels = tf.to_float(labels) + weights = weights_fn(labels) + error = tf.pow(predictions - labels, 2) + mean_error = tf.reduce_mean(error * weights) + return mean_error, tf.reduce_sum(weights) + + +def abs_error(predictions, labels, weights_fn=None): + """Computes mean(abs(preds-target)).""" + del weights_fn # Unused + targets = tf.squeeze(labels, axis=[2, 3]) + batch_abs_error = tf.abs(predictions - targets) + den = tf.ones(tf.shape(batch_abs_error), dtype=tf.float32) + return (batch_abs_error, den) -import tensorflow as tf -flags = tf.flags -FLAGS = flags.FLAGS +def padded_log_poisson(predictions, + labels, + weights_fn=common_layers.weights_all): + # Expects predictions to already be transformed into log space + predictions, labels = common_layers.pad_with_zeros(predictions, labels) + targets = labels + weights = weights_fn(targets) + + lp_loss = tf.nn.log_poisson_loss(targets, predictions, compute_full_loss=True) + return tf.reduce_sum(lp_loss * weights), tf.reduce_sum(weights) + + +def padded_variance_explained(predictions, + labels, + weights_fn=common_layers.weights_all): + """Explained variance, also known as R^2.""" + predictions, labels = common_layers.pad_with_zeros(predictions, labels) + targets = labels + weights = weights_fn(targets) + + y_bar = tf.reduce_mean(weights * targets) + tot_ss = tf.reduce_sum(weights * tf.pow(targets - y_bar, 2)) + res_ss = tf.reduce_sum(weights * tf.pow(targets - predictions, 2)) + r2 = 1. - res_ss / tot_ss + return r2, tf.reduce_sum(weights) def padded_accuracy_topk(predictions, @@ -38,11 +140,14 @@ def padded_accuracy_topk(predictions, weights_fn=common_layers.weights_nonzero): """Percentage of times that top-k predictions matches labels on non-0s.""" with tf.variable_scope("padded_accuracy_topk", values=[predictions, labels]): - padded_labels = common_layers.pad_with_zeros(predictions, labels) + padded_predictions, padded_labels = common_layers.pad_with_zeros( + predictions, labels) weights = weights_fn(padded_labels) - effective_k = tf.minimum(k, tf.shape(predictions)[-1]) - _, outputs = tf.nn.top_k(predictions, k=effective_k) + effective_k = tf.minimum(k, + common_layers.shape_list(padded_predictions)[-1]) + _, outputs = tf.nn.top_k(padded_predictions, k=effective_k) outputs = tf.to_int32(outputs) + padded_labels = tf.to_int32(padded_labels) padded_labels = tf.expand_dims(padded_labels, axis=-1) padded_labels += tf.zeros_like(outputs) # Pad to same shape. same = tf.to_float(tf.equal(outputs, padded_labels)) @@ -56,21 +161,169 @@ def padded_accuracy_top5(predictions, return padded_accuracy_topk(predictions, labels, 5, weights_fn) +def rounding_sequence_accuracy(predictions, + labels, + weights_fn=common_layers.weights_nonzero): + """Sequence accuracy for L1/L2 losses: round down the predictions to ints.""" + outputs = tf.squeeze(tf.to_int32(predictions), axis=-1) + weights = weights_fn(labels) + labels = tf.to_int32(labels) + not_correct = tf.to_float(tf.not_equal(outputs, labels)) * weights + axis = list(range(1, len(outputs.get_shape()))) + correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis)) + return correct_seq, tf.constant(1.0) + + +def two_class_accuracy(predictions, labels, weights_fn=None): + """Accuracy for two class classification with 0/1 labels.""" + with tf.variable_scope("two_class_accuracy", values=[predictions, labels]): + del weights_fn + hard_predictions = tf.to_int32(tf.math.round(tf.squeeze(predictions))) + int_labels = tf.to_int32(labels) + _, accuracy = tf.metrics.accuracy(labels=int_labels, + predictions=hard_predictions) + return accuracy, tf.constant(1.0) + + +def two_class_log_likelihood(predictions, labels, weights_fn=None): + """Log-likelihood for two class classification with 0/1 labels. + + Args: + predictions: A float valued tensor of shape [`batch_size`]. Each + component should be between 0 and 1. + labels: An int valued tensor of shape [`batch_size`]. Each component + should either be 0 or 1. + weights_fn: unused. + + Returns: + A pair, with the average log likelihood in the first component. + """ + del weights_fn + float_predictions = tf.cast(tf.squeeze(predictions), dtype=tf.float64) + batch_probs = tf.stack([1. - float_predictions, float_predictions], axis=-1) + int_labels = tf.cast(tf.squeeze(labels), dtype=tf.int32) + onehot_targets = tf.cast(tf.one_hot(int_labels, 2), dtype=tf.float64) + chosen_probs = tf.einsum( + "ij,ij->i", batch_probs, onehot_targets, name="chosen_probs") + avg_log_likelihood = tf.reduce_mean(tf.log(chosen_probs)) + return avg_log_likelihood, tf.constant(1.0) + + def padded_sequence_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): """Percentage of times that predictions matches labels everywhere (non-0).""" + # If the last dimension is 1 then we're using L1/L2 loss. + if common_layers.shape_list(predictions)[-1] == 1: + return rounding_sequence_accuracy( + predictions, labels, weights_fn=weights_fn) with tf.variable_scope( "padded_sequence_accuracy", values=[predictions, labels]): - padded_labels = common_layers.pad_with_zeros(predictions, labels) + padded_predictions, padded_labels = common_layers.pad_with_zeros( + predictions, labels) weights = weights_fn(padded_labels) - outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) + + # Flatten, keeping batch dim (and num_classes dim for predictions) + # TPU argmax can only deal with a limited number of dimensions + predictions_shape = common_layers.shape_list(padded_predictions) + batch_size = predictions_shape[0] + num_classes = predictions_shape[-1] + flat_size = common_layers.list_product( + common_layers.shape_list(padded_labels)[1:]) + padded_predictions = tf.reshape( + padded_predictions, + [batch_size, common_layers.list_product(predictions_shape[1:-1]), + num_classes]) + padded_labels = tf.reshape(padded_labels, [batch_size, flat_size]) + weights = tf.reshape(weights, [batch_size, flat_size]) + + outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1)) + padded_labels = tf.to_int32(padded_labels) not_correct = tf.to_float(tf.not_equal(outputs, padded_labels)) * weights axis = list(range(1, len(outputs.get_shape()))) correct_seq = 1.0 - tf.minimum(1.0, tf.reduce_sum(not_correct, axis=axis)) return correct_seq, tf.constant(1.0) +def prefix_accuracy(predictions, + labels, + weights_fn=common_layers.weights_nonzero): + """Average # of correct tokens at start of sequences, ignoring padding 0s. + + See section 4.3 of Learning to Transduce with Unbounded Memory, + Grefenstette et al., 2015. + + Args: + predictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and + type tf.float32 representing the logits, 0-padded. + labels: Tensor of shape [`batch_size`, `length`, 1, 1] and type tf.int32 + representing the labels of same length as logits and 0-padded. + weights_fn: ignored. The weights returned are the total length of the ground + truth labels, excluding 0-paddings. + + Returns: + (prefix accuracy, 1.0) + + Raises: + ValueError: if weights_fn is not common_layers.weights_nonzero. + """ + if weights_fn is not common_layers.weights_nonzero: + raise ValueError("Only weights_nonzero can be used for this metric.") + + predictions = tf.to_int32(tf.squeeze(tf.argmax(predictions, axis=-1), axis=2)) + labels = tf.squeeze(labels, axis=(2, 3)) + seq_len = tf.reduce_sum( + tf.cast(tf.not_equal(labels, tf.constant(0)), dtype=tf.float32), axis=1) + matching_elements = tf.equal(labels, predictions) + prefix_len = tf.reduce_sum( + tf.cumprod(tf.cast(matching_elements, tf.float32), axis=1), axis=1) + return tf.reduce_mean(prefix_len / seq_len), tf.constant(1.0) + + +def sequence_edit_distance(predictions, + labels, + weights_fn=common_layers.weights_nonzero): + """Average edit distance, ignoring padding 0s. + + The score returned is the edit distance divided by the total length of + reference truth and the weight returned is the total length of the truth. + + Args: + predictions: Tensor of shape [`batch_size`, `length`, 1, `num_classes`] and + type tf.float32 representing the logits, 0-padded. + labels: Tensor of shape [`batch_size`, `length`, 1, 1] and type tf.int32 + representing the labels of same length as logits and 0-padded. + weights_fn: ignored. The weights returned are the total length of the ground + truth labels, excluding 0-paddings. + + Returns: + (edit distance / reference length, reference length) + + Raises: + ValueError: if weights_fn is not common_layers.weights_nonzero. + """ + if weights_fn is not common_layers.weights_nonzero: + raise ValueError("Only weights_nonzero can be used for this metric.") + + with tf.variable_scope("edit_distance", values=[predictions, labels]): + # Transform logits into sequence classes by taking max at every step. + predictions = tf.to_int32( + tf.squeeze(tf.argmax(predictions, axis=-1), axis=(2, 3))) + nonzero_idx = tf.where(tf.not_equal(predictions, 0)) + sparse_outputs = tf.SparseTensor(nonzero_idx, + tf.gather_nd(predictions, nonzero_idx), + tf.shape(predictions, out_type=tf.int64)) + labels = tf.squeeze(labels, axis=(2, 3)) + nonzero_idx = tf.where(tf.not_equal(labels, 0)) + label_sparse_outputs = tf.SparseTensor(nonzero_idx, + tf.gather_nd(labels, nonzero_idx), + tf.shape(labels, out_type=tf.int64)) + distance = tf.reduce_sum( + tf.edit_distance(sparse_outputs, label_sparse_outputs, normalize=False)) + reference_length = tf.to_float(common_layers.shape_list(nonzero_idx)[0]) + return distance / reference_length, reference_length + + def padded_neg_log_perplexity(predictions, labels, weights_fn=common_layers.weights_nonzero): @@ -80,76 +333,590 @@ def padded_neg_log_perplexity(predictions, return (-num, den) +def padded_neg_log_perplexity_with_masking( + predictions, + labels, + features, + weights_fn=None): + """Average log-perplexity with custom targets_mask.""" + del weights_fn + if "targets_mask" not in features: + raise ValueError("masked_neg_log_perplexity requires targets_mask feature") + + # Features are 4 dimensional, so we need to reshape the targets_mask to match + # the shape of the labels. A lot of models rely on these features being 4D, + # so it's best to update the shape of the mask. + extended_targets_mask_shape = common_layers.shape_list( + features["targets_mask"]) + extended_targets_mask_shape.extend([1, 1]) + features["targets_mask"] = tf.reshape(features["targets_mask"], + shape=extended_targets_mask_shape) + + mask_fn = lambda labels: features["targets_mask"] + return padded_neg_log_perplexity(predictions, labels, mask_fn) + + +def dmol_neg_log_perplexity(predictions, + labels, + weights_fn=None): + """Average log-perplexity excluding padding 0s. No smoothing.""" + del weights_fn # Unused + num, den = common_layers.dml_loss( + predictions, labels, reduce_sum=False) + return (-num, den) + + +def rounding_accuracy(predictions, + labels, + weights_fn=common_layers.weights_nonzero): + """Rounding accuracy for L1/L2 losses: round down the predictions to ints.""" + outputs = tf.squeeze(tf.to_int32(predictions)) + labels = tf.squeeze(labels) + weights = weights_fn(labels) + labels = tf.to_int32(labels) + return tf.to_float(tf.equal(outputs, labels)), weights + + def padded_accuracy(predictions, labels, weights_fn=common_layers.weights_nonzero): """Percentage of times that predictions matches labels on non-0s.""" + # If the last dimension is 1 then we're using L1/L2 loss. + if common_layers.shape_list(predictions)[-1] == 1: + return rounding_accuracy(predictions, labels, weights_fn=weights_fn) with tf.variable_scope("padded_accuracy", values=[predictions, labels]): - padded_labels = common_layers.pad_with_zeros(predictions, labels) + padded_predictions, padded_labels = common_layers.pad_with_zeros( + predictions, labels) weights = weights_fn(padded_labels) - outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) + outputs = tf.to_int32(tf.argmax(padded_predictions, axis=-1)) + padded_labels = tf.to_int32(padded_labels) return tf.to_float(tf.equal(outputs, padded_labels)), weights -def create_evaluation_metrics(problems): +def multilabel_accuracy_matchk(predictions, + labels, + k, + weights_fn=common_layers.weights_nonzero): + """Used to evaluate the VQA accuracy. + + Let n be the times that predictions appear in labels, then final score + is min(n/k, 1). + Refer to https://arxiv.org/pdf/1505.00468.pdf. + + Args: + predictions: A tensor with shape [batch_size, 1, 1, 1, vocab_size]. + labels: A tensor with shape [batch_size, length, 1, 1]. + k: A tensor constant. + weights_fn: weight function. + Returns: + scores: min(n/k, 1). + weights: returns all ones. + + """ + predictions = tf.to_int32(tf.argmax(predictions, axis=-1)) + scores = tf.to_float(tf.equal(predictions, labels)) + # those label == 0 do not count + weights = weights_fn(labels) + scores *= weights + scores = tf.reduce_sum(scores, axis=[1, 2, 3]) + scores = tf.minimum(scores / tf.to_float(k), 1) + # every sample count + weights = tf.ones(tf.shape(scores), dtype=tf.float32) + + return scores, weights + + +def multilabel_accuracy_match3(predictions, labels, + weights_fn=common_layers.weights_nonzero): + return multilabel_accuracy_matchk(predictions, labels, 3, weights_fn) + + +def set_precision(predictions, labels, + weights_fn=common_layers.weights_nonzero): + """Precision of set predictions. + + Args: + predictions : A Tensor of scores of shape [batch, nlabels]. + labels: A Tensor of int32s giving true set elements, + of shape [batch, seq_length]. + weights_fn: A function to weight the elements. + + Returns: + hits: A Tensor of shape [batch, nlabels]. + weights: A Tensor of shape [batch, nlabels]. + """ + with tf.variable_scope("set_precision", values=[predictions, labels]): + labels = tf.squeeze(labels, [2, 3]) + weights = weights_fn(labels) + labels = tf.one_hot(labels, predictions.shape[-1]) + labels = tf.reduce_max(labels, axis=1) + labels = tf.cast(labels, tf.bool) + return tf.to_float(tf.equal(labels, predictions)), weights + + +def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero): + """Recall of set predictions. + + Args: + predictions : A Tensor of scores of shape [batch, nlabels]. + labels: A Tensor of int32s giving true set elements, + of shape [batch, seq_length]. + weights_fn: A function to weight the elements. + + Returns: + hits: A Tensor of shape [batch, nlabels]. + weights: A Tensor of shape [batch, nlabels]. + """ + with tf.variable_scope("set_recall", values=[predictions, labels]): + labels = tf.squeeze(labels, [2, 3]) + weights = weights_fn(labels) + labels = tf.one_hot(labels, predictions.shape[-1]) + labels = tf.reduce_max(labels, axis=1) + labels = tf.cast(labels, tf.bool) + return tf.to_float(tf.equal(labels, predictions)), weights + + +def image_summary(predictions, targets, hparams): + """Reshapes predictions and passes it to tensorboard. + + Args: + predictions : The predicted image (logits). + targets : The ground truth. + hparams: model hparams. + + Returns: + summary_proto: containing the summary images. + weights: A Tensor of zeros of the same shape as predictions. + """ + del hparams + results = tf.cast(tf.argmax(predictions, axis=-1), tf.uint8) + gold = tf.cast(targets, tf.uint8) + summary1 = tf.summary.image("prediction", results, max_outputs=2) + summary2 = tf.summary.image("data", gold, max_outputs=2) + summary = tf.summary.merge([summary1, summary2]) + return summary, tf.zeros_like(predictions) + + +def softmax_cross_entropy_one_hot(logits, labels, weights_fn=None): + """Calculate softmax cross entropy given one-hot labels and logits. + + Args: + logits: Tensor of size [batch-size, o=1, p=1, num-classes] + labels: Tensor of size [batch-size, o=1, p=1, num-classes] + weights_fn: Function that takes in labels and weighs examples (unused) + Returns: + cross-entropy (scalar), weights + """ + with tf.variable_scope("softmax_cross_entropy_one_hot", + values=[logits, labels]): + del weights_fn + cross_entropy = tf.losses.softmax_cross_entropy( + onehot_labels=labels, logits=logits) + return cross_entropy, tf.constant(1.0) + + +def sigmoid_accuracy_one_hot(logits, labels, weights_fn=None): + """Calculate accuracy for a set, given one-hot labels and logits. + + Args: + logits: Tensor of size [batch-size, o=1, p=1, num-classes] + labels: Tensor of size [batch-size, o=1, p=1, num-classes] + weights_fn: Function that takes in labels and weighs examples (unused) + Returns: + accuracy (scalar), weights + """ + with tf.variable_scope("sigmoid_accuracy_one_hot", values=[logits, labels]): + del weights_fn + predictions = tf.nn.sigmoid(logits) + labels = tf.argmax(labels, -1) + predictions = tf.argmax(predictions, -1) + _, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions) + return accuracy, tf.constant(1.0) + + +def sigmoid_accuracy(logits, labels, weights_fn=None): + """Calculate accuracy for a set, given integer labels and logits. + + Args: + logits: Tensor of size [batch-size, o=1, p=1, num-classes] + labels: Tensor of size [batch-size, o=1, p=1] + weights_fn: Function that takes in labels and weighs examples (unused) + Returns: + accuracy (scalar), weights + """ + with tf.variable_scope("sigmoid_accuracy", values=[logits, labels]): + del weights_fn + predictions = tf.nn.sigmoid(logits) + predictions = tf.argmax(predictions, -1) + _, accuracy = tf.metrics.accuracy(labels=labels, predictions=predictions) + return accuracy, tf.constant(1.0) + + +def sigmoid_precision_one_hot(logits, labels, weights_fn=None): + """Calculate precision for a set, given one-hot labels and logits. + + Predictions are converted to one-hot, + as predictions[example][arg-max(example)] = 1 + + Args: + logits: Tensor of size [batch-size, o=1, p=1, num-classes] + labels: Tensor of size [batch-size, o=1, p=1, num-classes] + weights_fn: Function that takes in labels and weighs examples (unused) + Returns: + precision (scalar), weights + """ + with tf.variable_scope("sigmoid_precision_one_hot", values=[logits, labels]): + del weights_fn + num_classes = logits.shape[-1] + predictions = tf.nn.sigmoid(logits) + predictions = tf.argmax(predictions, -1) + predictions = tf.one_hot(predictions, num_classes) + _, precision = tf.metrics.precision(labels=labels, predictions=predictions) + return precision, tf.constant(1.0) + + +def sigmoid_recall_one_hot(logits, labels, weights_fn=None): + """Calculate recall for a set, given one-hot labels and logits. + + Predictions are converted to one-hot, + as predictions[example][arg-max(example)] = 1 + + Args: + logits: Tensor of size [batch-size, o=1, p=1, num-classes] + labels: Tensor of size [batch-size, o=1, p=1, num-classes] + weights_fn: Function that takes in labels and weighs examples (unused) + Returns: + recall (scalar), weights + """ + with tf.variable_scope("sigmoid_recall_one_hot", values=[logits, labels]): + del weights_fn + num_classes = logits.shape[-1] + predictions = tf.nn.sigmoid(logits) + predictions = tf.argmax(predictions, -1) + predictions = tf.one_hot(predictions, num_classes) + _, recall = tf.metrics.recall(labels=labels, predictions=predictions) + return recall, tf.constant(1.0) + + +def sigmoid_cross_entropy_one_hot(logits, labels, weights_fn=None): + """Calculate sigmoid cross entropy for one-hot lanels and logits. + + Args: + logits: Tensor of size [batch-size, o=1, p=1, num-classes] + labels: Tensor of size [batch-size, o=1, p=1, num-classes] + weights_fn: Function that takes in labels and weighs examples (unused) + Returns: + cross_entropy (scalar), weights + """ + with tf.variable_scope("sigmoid_cross_entropy_one_hot", + values=[logits, labels]): + del weights_fn + cross_entropy = tf.losses.sigmoid_cross_entropy( + multi_class_labels=labels, logits=logits) + return cross_entropy, tf.constant(1.0) + + +def roc_auc(logits, labels, weights_fn=None): + """Calculate ROC AUC. + + Requires binary classes. + + Args: + logits: Tensor of size [batch_size, 1, 1, num_classes] + labels: Tensor of size [batch_size, 1, 1, num_classes] + weights_fn: Function that takes in labels and weighs examples (unused) + Returns: + ROC AUC (scalar), weights + """ + del weights_fn + with tf.variable_scope("roc_auc", values=[logits, labels]): + predictions = tf.argmax(logits, axis=-1) + _, auc = tf.metrics.auc(labels, predictions, curve="ROC") + return auc, tf.constant(1.0) + + +def create_evaluation_metrics(problems, model_hparams): """Creates the evaluation metrics for the model. Args: - problems: List of strings containing the name of the problems. + problems: List of Problem instances. + model_hparams: a set of hparams. Returns: - A dictionary with keys that are strings naming the evaluation - metrics and values that are functions taking arguments of - (predictions, targets), returning a tuple of a tensor of the - metric's value together with an op to update the metric's value. + dict. The metric functions have signature + (Tensor predictions, features) -> (metric Tensor, update op), where features + is a dict with keys {targets}. + + Raises: + ValueError: if the metrics specified by a problem are not recognized (i.e. + are not defined in the Metrics enum. """ + def reduce_dimensions(predictions, labels): + """Reduce dimensions for high-dimensional predictions and labels.""" + # We will treat first dimensions as batch. One example are video frames. + if len(predictions.get_shape()) > 5: + predictions_shape = common_layers.shape_list(predictions) + predictions = tf.reshape( + predictions, [predictions_shape[0], predictions_shape[1], -1, + predictions_shape[-1]]) + labels_shape = common_layers.shape_list(labels) + labels = tf.reshape( + labels, [labels_shape[0], labels_shape[1], -1]) + return predictions, labels - def append_metric_fns(metric_tup, eval_metrics): - """Append problem-specific and global metrics to eval_metrics.""" - metric_name, metric_function = metric_tup - def fn(predictions, labels, weights, idx, weights_fn): - # The 'weights' argument represents problem-choice here, - # we need to keep this name because MetricSpecs checks it. - problem_choice = weights - (scores, weights) = tf.cond( - tf.equal(idx, problem_choice), # pylint: disable=cell-var-from-loop - lambda: metric_function(predictions, labels, weights_fn=weights_fn), - lambda: (tf.constant(0.0), tf.constant(0.0))) - # The tf.metrics.mean function assures correct aggregation. - return tf.metrics.mean(scores, weights) + def make_problem_specific_metric_fn(metric_fn, weights_fn): + """Create a metric fn.""" - for i, problem in enumerate(problems): - name = "metrics-%s/%s" % (problem, metric_name) - weights_fn = (common_layers.weights_concatenated - if "concat" in problem else common_layers.weights_nonzero) - eval_metrics[name] = functools.partial(fn, idx=i, weights_fn=weights_fn) + def problem_metric_fn(predictions, features, labels): + """Metric fn.""" + # Send along the entire features dict if the metric fn has the kwarg + # "features". + kwargs = {} + args, _, keywords, _ = inspect.getargspec(metric_fn) + if ("features" in args) or keywords: + kwargs["features"] = features - def global_fn(predictions, labels, weights): - (scores, weights) = metric_function(predictions, labels) + predictions, labels = reduce_dimensions(predictions, labels) + + scores, weights = metric_fn(predictions, labels, + weights_fn=weights_fn, **kwargs) return tf.metrics.mean(scores, weights) - eval_metrics["metrics/%s" % metric_name] = global_fn + return problem_metric_fn + + def make_image_wrapped_metric_fn(metric_fn): + """Metric fn without tf.metrics.mean.""" + + def image_wrapped_metric_fn(predictions, + features, + labels, + weights_fn=common_layers.weights_all): + del weights_fn + del features + predictions, labels = reduce_dimensions(predictions, labels) + return metric_fn(predictions, labels, model_hparams) + + return image_wrapped_metric_fn + + def weights_fn_for_mp(problem_task_id): + return lambda x: common_layers.weights_multi_problem(x, problem_task_id) + + eval_metrics = {} + for problem_instance in problems: + problem_name = problem_instance.name + if problem_instance.was_reversed: + problem_name += "_rev" + metrics = problem_instance.eval_metric_fns(model_hparams) + if hasattr(model_hparams.problem, "task_list"): + metrics = model_hparams.problem.eval_metric_fns(model_hparams) - eval_metrics = dict() + tm = problem_instance.get_hparams(model_hparams).modality["targets"] + if not isinstance(tm, dict): + tm = {"targets": tm} - # Metrics are functions that take predictions and labels and return - # a tensor of metrics and a tensor of weights. - # The results are passed to tf.metrics.mean to accumulate properly. - metrics_list = [("accuracy", padded_accuracy), ("accuracy_top5", - padded_accuracy_top5), - ("accuracy_per_sequence", padded_sequence_accuracy), - ("neg_log_perplexity", padded_neg_log_perplexity)] + for target_name, modality in six.iteritems(tm): + weights_fn = model_hparams.weights_fn.get( + "targets", + modalities.get_weights_fn(modality)) + if hasattr(model_hparams.problem, "task_list"): + ptid = problem_instance.task_id # pylint: disable=cell-var-from-loop + weights_fn = weights_fn_for_mp(ptid) - # TODO(nikip): Extend this to support use of custom metrics for problems. - for problem in problems: - if "wmt" in problem: - metrics_list.append(("bleu_score", bleu_hook.padded_bleu_score)) + for metric, metric_fn in six.iteritems(metrics): + overload_eval_metric_name = getattr( + model_hparams, "overload_eval_metric_name", None) + if len(problems) == 1 and overload_eval_metric_name: + metric_name = "metrics-%s/%s/%s" % ( + overload_eval_metric_name, target_name, metric) + else: + metric_name = "metrics-%s/%s/%s" % (problem_name, target_name, metric) + if metric == Metrics.IMAGE_SUMMARY: + eval_metrics[metric_name] = make_image_wrapped_metric_fn(metric_fn) + else: + eval_metrics[metric_name] = make_problem_specific_metric_fn( + metric_fn, weights_fn) - for metric in metrics_list: - append_metric_fns(metric, eval_metrics) + return eval_metrics + + +def create_eager_metrics_for_problem(problem, model_hparams): + """See create_eager_metrics.""" + metric_fns = problem.eval_metric_fns(model_hparams) + problem_hparams = problem.get_hparams(model_hparams) + target_modality = problem_hparams.modality["targets"] + weights_fn = model_hparams.weights_fn.get( + "targets", + modalities.get_weights_fn(target_modality)) + return create_eager_metrics_internal(metric_fns, weights_fn=weights_fn) + + +def create_eager_metrics(metric_names, weights_fn=common_layers.weights_all): + """Create metrics accumulators and averager for Eager mode. + + Args: + metric_names: list from Metrics enum + weights_fn: function that takes labels and returns a weights mask. Defaults + to weights of all 1, i.e. common_layers.weights_all. Use + common_layers.weights_nonzero if labels have 0-padding. + + Returns: + (accum_fn(predictions, targets) => None, + result_fn() => dict + """ + metric_fns = dict( + [(name, METRICS_FNS[name]) for name in metric_names]) + return create_eager_metrics_internal(metric_fns, weights_fn) + + +def create_eager_metrics_internal(metric_fns, + weights_fn=common_layers.weights_all): + """Create metrics accumulators and averager for Eager mode. + + Args: + metric_fns: dict + weights_fn: function that takes labels and returns a weights mask. Defaults + to weights of all 1, i.e. common_layers.weights_all. Use + common_layers.weights_nonzero if labels have 0-padding. + + Returns: + (accum_fn(predictions, targets) => None, + result_fn() => dict + """ + + from tensorflow.contrib.eager.python import tfe # pylint: disable=g-import-not-at-top + + tfe_metrics = {} + + for name in metric_fns: + tfe_metrics[name] = tfe.metrics.Mean(name=name) + + def metric_accum(predictions, targets): + for name, metric_fn in metric_fns.items(): + val, weight = metric_fn(predictions, targets, + weights_fn=weights_fn) + tfe_metrics[name](np.squeeze(val), np.squeeze(weight)) + + def metric_means(): + avgs = {} + for name in metric_fns: + avgs[name] = tfe_metrics[name].result().numpy() + return avgs + + return metric_accum, metric_means + + +def word_error_rate(raw_predictions, + labels, + lookup=None, + weights_fn=common_layers.weights_nonzero): + """Calculate word error rate. + + Args: + raw_predictions: The raw predictions. + labels: The actual labels. + lookup: A tf.constant mapping indices to output tokens. + weights_fn: Weighting function. + + Returns: + The word error rate. + """ + + def from_tokens(raw, lookup_): + gathered = tf.gather(lookup_, tf.cast(raw, tf.int32)) + joined = tf.regex_replace(tf.reduce_join(gathered, axis=1), b".*", b"") + cleaned = tf.regex_replace(joined, b"_", b" ") + tokens = tf.string_split(cleaned, " ") + return tokens + + def from_characters(raw, lookup_): + """Convert ascii+2 encoded codes to string-tokens.""" + corrected = tf.bitcast( + tf.clip_by_value(tf.subtract(raw, 2), 0, 255), tf.uint8) + + gathered = tf.gather(lookup_, tf.cast(corrected, tf.int32))[:, :, 0] + joined = tf.reduce_join(gathered, axis=1) + cleaned = tf.regex_replace(joined, b"\0", b"") + tokens = tf.string_split(cleaned, " ") + return tokens + + if lookup is None: + lookup = tf.constant([chr(i) for i in range(256)]) + convert_fn = from_characters + else: + convert_fn = from_tokens + + if weights_fn is not common_layers.weights_nonzero: + raise ValueError("Only weights_nonzero can be used for this metric.") + + with tf.variable_scope("word_error_rate", values=[raw_predictions, labels]): + + raw_predictions = tf.squeeze( + tf.argmax(raw_predictions, axis=-1), axis=(2, 3)) + labels = tf.squeeze(labels, axis=(2, 3)) + + reference = convert_fn(labels, lookup) + predictions = convert_fn(raw_predictions, lookup) + + distance = tf.reduce_sum( + tf.edit_distance(predictions, reference, normalize=False)) + reference_length = tf.cast( + tf.size(reference.values, out_type=tf.int32), dtype=tf.float32) + + return distance / reference_length, reference_length + + +def pearson_correlation_coefficient(predictions, labels, weights_fn=None): + """Calculate pearson correlation coefficient. + + Args: + predictions: The raw predictions. + labels: The actual labels. + weights_fn: Weighting function. + + Returns: + The pearson correlation coefficient. + """ + del weights_fn + _, pearson = contrib.metrics().streaming_pearson_correlation( + predictions, labels) + return pearson, tf.constant(1.0) - return { - k: tf.contrib.learn.MetricSpec( - v, prediction_key="predictions", weight_key="problem_choice") - for (k, v) in six.iteritems(eval_metrics) - } +# Metrics are functions that take predictions and labels and return +# a tensor of metrics and a tensor of weights. +# If the function has "features" as an argument, it will receive the whole +# features dict as well. +# The results are passed to tf.metrics.mean to accumulate properly. +METRICS_FNS = { + Metrics.ACC: padded_accuracy, + Metrics.ACC_TOP5: padded_accuracy_top5, + Metrics.ACC_PER_SEQ: padded_sequence_accuracy, + Metrics.ACC_MULTILABEL_MATCH3: multilabel_accuracy_match3, + Metrics.NEG_LOG_PERPLEXITY: padded_neg_log_perplexity, + Metrics.MASKED_NEG_LOG_PERPLEXITY: padded_neg_log_perplexity_with_masking, + Metrics.APPROX_BLEU: bleu_hook.bleu_score, + Metrics.APPROX_SARI: sari_hook.sari_score, + Metrics.RMSE: padded_rmse, + Metrics.UNPADDED_MSE: unpadded_mse, + Metrics.LOG_POISSON: padded_log_poisson, + Metrics.PEARSON: pearson_correlation_coefficient, + Metrics.R2: padded_variance_explained, + Metrics.ROUGE_2_F: rouge.rouge_2_fscore, + Metrics.ROUGE_L_F: rouge.rouge_l_fscore, + Metrics.EDIT_DISTANCE: sequence_edit_distance, + Metrics.SOFTMAX_CROSS_ENTROPY_ONE_HOT: softmax_cross_entropy_one_hot, + Metrics.SIGMOID_ACCURACY: sigmoid_accuracy, + Metrics.SIGMOID_ACCURACY_ONE_HOT: sigmoid_accuracy_one_hot, + Metrics.SIGMOID_RECALL_ONE_HOT: sigmoid_recall_one_hot, + Metrics.SIGMOID_PRECISION_ONE_HOT: sigmoid_precision_one_hot, + Metrics.SIGMOID_CROSS_ENTROPY_ONE_HOT: sigmoid_cross_entropy_one_hot, + Metrics.SET_PRECISION: set_precision, + Metrics.SET_RECALL: set_recall, + Metrics.TWO_CLASS_ACCURACY: two_class_accuracy, + Metrics.TWO_CLASS_LOG_LIKELIHOOD: two_class_log_likelihood, + Metrics.ROC_AUC: roc_auc, + Metrics.IMAGE_SUMMARY: image_summary, + Metrics.DMOL_PERPLEXITY: dmol_neg_log_perplexity, + Metrics.ABS_ERR: abs_error, + Metrics.IMAGE_RMSE: image_rmse, + Metrics.WORD_ERROR_RATE: word_error_rate, +} diff --git a/tensor2tensor/utils/metrics_hook.py b/tensor2tensor/utils/metrics_hook.py new file mode 100644 index 000000000..f5f006935 --- /dev/null +++ b/tensor2tensor/utils/metrics_hook.py @@ -0,0 +1,290 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Summary-based SessionRunHooks.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import tensorflow.compat.v1 as tf + +from tensorboard.backend.event_processing import event_accumulator +from tensorboard.backend.event_processing import event_multiplexer + + +class MetricsBasedHook(tf.train.SessionRunHook): + """Base class for hooks based on summary metrics. + + Subclasses should override _process_metrics. + + If _process_metrics returns True, calls run_context.request_stop(). + + This can be used to something like "Stop after the loss has stopped decreasing + for 5000 steps. + """ + _RUN_NAME = "run%d" + + def __init__(self, events_dir, subdirs=None, tags=None, every_n_steps=1000): + """Construct MetricsBasedHook. + + Args: + events_dir: str, top-level directory containing events files. + subdirs: list, subdirectories of events_dir that also contain + events files. Use "" to specify the top-level directory. Defaults to + [""]. + tags: list, names of metrics to collect. Default will collect all + metrics. + every_n_steps: int, collect metrics every n steps. + """ + self._events_dir = events_dir + self._subdirs = subdirs or [""] + self._tags = tags + self._every_n_steps = every_n_steps + self._start_step = None + self._event_multiplexer = self._init_multiplexer() + + def _init_multiplexer(self): + dirs = [os.path.join(self._events_dir, subdir) for subdir in self._subdirs] + run_path_map = dict([(self._RUN_NAME % i, d) for i, d in enumerate(dirs)]) + return event_multiplexer.EventMultiplexer(run_path_map) + + def begin(self): + self._global_step_tensor = tf.train.get_global_step() + if self._global_step_tensor is None: + raise RuntimeError("Global step must be created to use MetricsBasedHook.") + + def after_create_session(self, session, coord): + del coord + if self._start_step is None: + self._start_step = session.run(self._global_step_tensor) + + def before_run(self, run_context): + del run_context + return tf.train.SessionRunArgs([self._global_step_tensor]) + + def after_run(self, run_context, run_values): + global_step = run_values.results[0] + if (global_step - self._start_step) % self._every_n_steps != 0: + return + metrics = self._collect_metrics() + self._after_run(run_context, run_values, global_step, metrics) + + def _after_run(self, run_context, run_values, global_step, metrics): + del run_values + if self._process_metrics(global_step, metrics): + run_context.request_stop() + + def _collect_metrics(self): + self._event_multiplexer.Reload() + subdir_data = {} + for i, subdir in enumerate(self._subdirs): + subdir_metrics = {} + + accum = self._event_multiplexer.GetAccumulator(self._RUN_NAME % i) + for tag in accum.Tags()[event_accumulator.SCALARS]: + steps, vals = zip(*[ + (event.step, event.value) for event in accum.Scalars(tag)]) + subdir_metrics[tag] = (steps, vals) + + subdir_data[subdir] = subdir_metrics + return subdir_data + + def _process_metrics(self, global_step, metrics): + """Process the collected metrics. + + Args: + global_step: int, the current global step value. + metrics: dict. The collected + metrics. subdir_metrics is a dict from tag name to tuple of lists. The + lists are a list of global steps and a list of values. + i.e. subdir_metrics: + `dict global steps, list values>>>` + + Returns: + should_stop: bool. If True, will request that the session stops. + """ + del global_step, metrics + return False + + +class EarlyStoppingHook(MetricsBasedHook): + """EarlyStoppingHook will stop training when a given metric has plateaued.""" + + def __init__(self, + events_dir, + tag, + num_plateau_steps=1000, + plateau_delta=0.1, + plateau_decrease=True, + every_n_steps=1000): + """Create an EarlyStoppingHook. + + This hook will stop training when the metric identified by tag has + plateaued. Plateaued is defined by the metric having stopped + increasing/decreasing (based on plateau_decrease) by plateau_delta for + num_plateau_steps. + + Args: + events_dir: Directory with events files. + tag: Name of metric in TensorBoard. + num_plateau_steps: Number of steps over which to check the plateau. + plateau_delta: delta to define a "plateau". + plateau_decrease: whether to check decrease or increase in the metric. + every_n_steps: how often to run this hook. + + Returns: + An instance of EarlyStoppingHook. + """ + super(EarlyStoppingHook, self).__init__( + events_dir=events_dir, tags=[tag], every_n_steps=every_n_steps) + self._num_plateau_steps = num_plateau_steps + self._plateau_delta = plateau_delta + self._plateau_decrease = plateau_decrease + + def _process_metrics(self, global_step, metrics): + if not metrics: + return None + + if not list(metrics.values())[0]: + return None + + # Metrics should have just a single subdir and a single tag + steps, vals = list(metrics.values())[0][self._tags[0]] + return has_metric_plateaued( + steps, + vals, + num_steps=self._num_plateau_steps, + delta=self._plateau_delta, + decrease=self._plateau_decrease) + + +class PlateauOpHook(MetricsBasedHook): + """Runs an op when a metric has plateaued.""" + + def __init__(self, + events_dir, + tag, + plateau_op, + num_plateau_steps=1000, + plateau_delta=0.1, + plateau_decrease=True, + every_n_steps=1000, + only_once=False): + """See EarlyStoppingHook for args. Runs plateau_op if plateaued.""" + super(PlateauOpHook, self).__init__( + events_dir=events_dir, tags=[tag], every_n_steps=every_n_steps) + self._num_plateau_steps = num_plateau_steps + self._plateau_delta = plateau_delta + self._plateau_decrease = plateau_decrease + self._plateau_op = plateau_op + self._only_once = only_once + self._should_run_op = False + self._ever_ran = False + self._last_metric_step_seen = 0 + + @property + def keep_alive(self): + if self._only_once and self._ever_ran: + return False + return True + + def before_run(self, run_context): + del run_context + + fetches = [self._global_step_tensor] + if self._should_run_op and self.keep_alive: + fetches.append(self._plateau_op) + self._should_run_op = False + self._ever_ran = True + + return tf.train.SessionRunArgs(fetches) + + def _after_run(self, run_context, run_values, global_step, metrics): + del run_context + del run_values + del global_step + + if not self.keep_alive: + return + + if not metrics: + return + + if not list(metrics.values())[0]: + return + + # There should be only a single subdir and a single tag + steps, vals = list(metrics.values())[0][self._tags[0]] + + if not steps: + return + + last_step = steps[-1] + if last_step == self._last_metric_step_seen: + return + self._last_metric_step_seen = last_step + + if has_metric_plateaued( + steps, + vals, + num_steps=self._num_plateau_steps, + delta=self._plateau_delta, + decrease=self._plateau_decrease): + self._should_run_op = True + + +def has_metric_plateaued(steps, values, num_steps=100, delta=0.1, + decrease=True): + """Check if metric has plateaued. + + A metric has plateaued if the value has not increased/decreased (depending on + `decrease`) by `delta` for at least `num_steps`. + + Args: + steps: list list of global steps for values. + values: list list of metric values. + num_steps: int, number of steps the metric has to have been plateaued for. + delta: float, how much the metric should have changed by over num_steps. + decrease: bool, whether to check if the metric has decreased by delta or + increased by delta. + + Returns: + bool, whether the metric has plateaued. + """ + assert num_steps > 0 + if len(steps) < 2: + return False + + steps_at_least_num_steps_ago = [ + s for s in steps if s <= (steps[-1] - num_steps) + ] + if not steps_at_least_num_steps_ago: + # Not enough steps yet + return False + delta_step_idx = len(steps_at_least_num_steps_ago) - 1 + + start_val = values[delta_step_idx] + values_to_check = values[delta_step_idx:] + observed_deltas = [] + for val in values_to_check: + if decrease: + observed_delta = start_val - val + else: + observed_delta = val - start_val + observed_deltas.append(observed_delta) + + within_range = [obs < delta for obs in observed_deltas] + return all(within_range) diff --git a/tensor2tensor/utils/metrics_hook_test.py b/tensor2tensor/utils/metrics_hook_test.py new file mode 100644 index 000000000..4744ec118 --- /dev/null +++ b/tensor2tensor/utils/metrics_hook_test.py @@ -0,0 +1,195 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for metrics_hook.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import contextlib +import os +import shutil +from tensor2tensor.utils import metrics_hook + +import tensorflow.compat.v1 as tf + + +class DummyHook(metrics_hook.MetricsBasedHook): + + def _process_metrics(self, global_step, metrics): + if metrics: + assert "" in metrics + assert isinstance(metrics[""], dict) + if metrics[""]: + assert "global_step_1" in metrics[""] + self.test_metrics = metrics + if global_step >= 40: + return True + + +class MetricsHookTest(tf.test.TestCase): + + @classmethod + def setUpClass(cls): + cls.base_checkpoint_dir = tf.test.get_temp_dir() + shutil.rmtree(cls.base_checkpoint_dir, ignore_errors=True) + + def ckpt_dir(self, name): + return os.path.join(self.base_checkpoint_dir, name) + + @contextlib.contextmanager + def sess(self, hook, ckpt_dir): + with tf.train.MonitoredTrainingSession( + checkpoint_dir=ckpt_dir, + save_checkpoint_secs=0, + save_summaries_steps=10, + hooks=[hook]) as sess: + self._sess = sess + yield sess + + def flush(self): + self._sess._hooks[1]._summary_writer.flush() + + def testStop(self): + global_step = tf.train.create_global_step() + tf.summary.scalar("global_step", global_step) + incr_global_step = tf.assign_add(global_step, 1) + + ckpt_dir = self.ckpt_dir("stop") + dummy = DummyHook(ckpt_dir, every_n_steps=10) + with self.sess(dummy, ckpt_dir) as sess: + for _ in range(20): + sess.run(incr_global_step) + + # Summary files should now have 2 global step values in them + self.flush() + + # Run for 10 more so that the hook gets triggered again + for _ in range(10): + sess.run(incr_global_step) + + # Check that the metrics have actually been collected. + self.assertTrue("" in dummy.test_metrics) + metrics = dummy.test_metrics[""] + self.assertTrue("global_step_1" in metrics) + steps, vals = metrics["global_step_1"] + self.assertTrue(len(steps) == len(vals)) + self.assertTrue(len(steps) >= 2) + + # Run for 10 more so that the hook triggers stoppage + for _ in range(10): + sess.run(incr_global_step) + + with self.assertRaisesRegexp(RuntimeError, "after should_stop requested"): + sess.run(incr_global_step) + + def testEarlyStoppingHook(self): + global_step = tf.train.create_global_step() + counter = tf.get_variable("count", initializer=0, dtype=tf.int32) + tf.summary.scalar("count", counter) + incr_global_step = tf.assign_add(global_step, 1) + incr_counter = tf.assign_add(counter, 1) + + # Stop if the global step has not gone up by more than 1 in 20 steps. + + ckpt_dir = self.ckpt_dir("early") + stop_hook = metrics_hook.EarlyStoppingHook( + ckpt_dir, + "count_1", + num_plateau_steps=20, + plateau_delta=1., + plateau_decrease=False, + every_n_steps=10) + with self.sess(stop_hook, ckpt_dir) as sess: + for _ in range(20): + sess.run((incr_global_step, incr_counter)) + + # Summary files should now have 2 values in them + self.flush() + + # Run for more steps so that the hook gets triggered and we verify that we + # don't stop. + for _ in range(30): + sess.run((incr_global_step, incr_counter)) + + self.flush() + + # Run without incrementing the counter + for _ in range(40): + sess.run(incr_global_step) + + # Metrics should be written such that now the counter has gone >20 steps + # without being incremented. + self.flush() + + # Check that we ask for stop + with self.assertRaisesRegexp(RuntimeError, "after should_stop requested"): + for _ in range(30): + sess.run(incr_global_step) + + def testPlateauOpHook(self): + global_step = tf.train.create_global_step() + counter = tf.get_variable("count", initializer=0, dtype=tf.int32) + indicator = tf.get_variable("indicator", initializer=0, dtype=tf.int32) + tf.summary.scalar("count", counter) + incr_global_step = tf.assign_add(global_step, 1) + incr_counter = tf.assign_add(counter, 1) + incr_indicator = tf.assign_add(indicator, 1) + + # Stop if the global step has not gone up by more than 1 in 20 steps. + + ckpt_dir = self.ckpt_dir("plateauop") + stop_hook = metrics_hook.PlateauOpHook( + ckpt_dir, + "count_1", + incr_indicator, + num_plateau_steps=20, + plateau_delta=1., + plateau_decrease=False, + every_n_steps=10) + with self.sess(stop_hook, ckpt_dir) as sess: + for _ in range(20): + sess.run((incr_global_step, incr_counter)) + + # Summary files should now have 2 values in them + self.flush() + + # Run for more steps so that the hook gets triggered and we verify that we + # don't stop. + for _ in range(30): + sess.run((incr_global_step, incr_counter)) + + self.flush() + + # Run without incrementing the counter + for _ in range(30): + sess.run(incr_global_step) + self.flush() + + self.assertTrue(sess.run(indicator) < 1) + + # Metrics should be written such that now the counter has gone >20 steps + # without being incremented. + # Check that we run the incr_indicator op several times + for _ in range(3): + for _ in range(10): + sess.run(incr_global_step) + self.flush() + + self.assertTrue(sess.run(indicator) > 1) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/utils/metrics_test.py b/tensor2tensor/utils/metrics_test.py index 0472d4f21..1057b141f 100644 --- a/tensor2tensor/utils/metrics_test.py +++ b/tensor2tensor/utils/metrics_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,15 +18,13 @@ from __future__ import division from __future__ import print_function -# Dependency imports - import numpy as np from tensor2tensor.utils import metrics -import tensorflow as tf +import tensorflow.compat.v1 as tf -class CommonLayersTest(tf.test.TestCase): +class MetricsTest(tf.test.TestCase): def testAccuracyMetric(self): predictions = np.random.randint(1, 5, size=(12, 12, 12, 1)) @@ -57,6 +56,26 @@ def testAccuracyTopKMetric(self): self.assertAlmostEqual(actual1, expected) self.assertAlmostEqual(actual2, 1.0) + def testPrefixAccuracy(self): + vocab_size = 10 + predictions = tf.one_hot( + tf.constant([[[1], [2], [3], [4], [9], [6], [7], [8]], + [[1], [2], [3], [4], [5], [9], [7], [8]], + [[1], [2], [3], [4], [5], [9], [7], [0]]]), + vocab_size) + labels = tf.expand_dims( + tf.constant([[[1], [2], [3], [4], [5], [6], [7], [8]], + [[1], [2], [3], [4], [5], [6], [7], [8]], + [[1], [2], [3], [4], [5], [6], [7], [0]]]), + axis=-1) + expected_accuracy = np.average([4.0 / 8.0, + 5.0 / 8.0, + 5.0 / 7.0]) + accuracy, _ = metrics.prefix_accuracy(predictions, labels) + with self.test_session() as session: + accuracy_value = session.run(accuracy) + self.assertAlmostEqual(expected_accuracy, accuracy_value) + def testSequenceAccuracyMetric(self): predictions = np.random.randint(4, size=(12, 12, 12, 1)) targets = np.random.randint(4, size=(12, 12, 12, 1)) @@ -71,6 +90,133 @@ def testSequenceAccuracyMetric(self): actual = session.run(a) self.assertEqual(actual, expected) + def testTwoClassAccuracyMetric(self): + predictions = tf.constant([0.0, 0.2, 0.4, 0.6, 0.8, 1.0], dtype=tf.float32) + targets = tf.constant([0, 0, 1, 0, 1, 1], dtype=tf.int32) + expected = 2.0 / 3.0 + with self.test_session() as session: + accuracy, _ = metrics.two_class_accuracy(predictions, targets) + session.run(tf.global_variables_initializer()) + session.run(tf.local_variables_initializer()) + actual = session.run(accuracy) + self.assertAlmostEqual(actual, expected) + + def testTwoClassLogLikelihood(self): + predictions = np.array([0.0, 0.2, 0.4, 0.6, 0.8, 1.0]) + targets = np.array([0, 0, 1, 0, 1, 1]) + expected = (2.0 * np.log(0.8) + 2.0 * np.log(0.4)) / 6.0 + with self.test_session() as session: + avg_log_likelihood, _ = metrics.two_class_log_likelihood( + predictions, targets) + actual = session.run(avg_log_likelihood) + self.assertAlmostEqual(actual, expected) + + def testTwoClassLogLikelihoodVersusOldImplementation(self): + def alt_two_class_log_likelihood_impl(predictions, labels): + float_labels = tf.cast(labels, dtype=tf.float64) + float_predictions = tf.cast(tf.squeeze(predictions), dtype=tf.float64) + # likelihood should be just p for class 1, and 1 - p for class 0. + # signs is 1 for class 1, and -1 for class 0 + signs = 2 * float_labels - tf.ones_like(float_labels) + # constant_term is 1 for class 0, and 0 for class 1. + constant_term = tf.ones_like(float_labels) - float_labels + likelihoods = constant_term + signs * float_predictions + log_likelihoods = tf.log(likelihoods) + avg_log_likelihood = tf.reduce_mean(log_likelihoods) + return avg_log_likelihood + predictions = np.random.rand(1, 10, 1) + targets = np.random.randint(2, size=10) + with self.test_session() as session: + new_log_likelihood, _ = metrics.two_class_log_likelihood( + predictions, targets) + alt_log_likelihood = alt_two_class_log_likelihood_impl( + predictions, targets) + new_impl, alt_impl = session.run([new_log_likelihood, alt_log_likelihood]) + self.assertAlmostEqual(new_impl, alt_impl) + + def testRMSEMetric(self): + predictions = np.full((10, 1), 1) # All 1's + targets = np.full((10, 1), 3) # All 3's + expected = np.sqrt(np.mean((predictions - targets)**2)) # RMSE = 2.0 + with self.test_session() as session: + rmse, _ = metrics.padded_rmse( + tf.constant(predictions, dtype=tf.int32), + tf.constant(targets, dtype=tf.int32)) + session.run(tf.global_variables_initializer()) + actual = session.run(rmse) + self.assertEqual(actual, expected) + + def testUnpaddedRMSEMetric(self): + predictions = np.full((10, 1), 1) # All 1's + targets = np.full((10, 1), 3) # All 3's + expected = np.mean((predictions - targets)**2) # MSE = 4.0 + with self.test_session() as session: + mse, _ = metrics.unpadded_mse( + tf.constant(predictions, dtype=tf.int32), + tf.constant(targets, dtype=tf.int32)) + session.run(tf.global_variables_initializer()) + actual = session.run(mse) + self.assertEqual(actual, expected) + + def testSequenceEditDistanceMetric(self): + predictions = np.array([[3, 4, 5, 1, 0, 0], + [2, 1, 3, 4, 0, 0], + [2, 1, 3, 4, 0, 0]]) + # Targets are just a bit different: + # - first sequence has a different prediction + # - second sequence has a different prediction and one extra step + # - third sequence is identical + targets = np.array([[5, 4, 5, 1, 0, 0], + [2, 5, 3, 4, 1, 0], + [2, 1, 3, 4, 0, 0]]) + # Reshape to match expected input format by metric fns. + predictions = np.reshape(predictions, [3, 6, 1, 1]) + targets = np.reshape(targets, [3, 6, 1, 1]) + with self.test_session() as session: + scores, weight = metrics.sequence_edit_distance( + tf.one_hot(predictions, depth=6, dtype=tf.float32), + tf.constant(targets, dtype=tf.int32)) + session.run(tf.global_variables_initializer()) + actual_scores, actual_weight = session.run([scores, weight]) + self.assertAlmostEqual(actual_scores, 3.0 / 13) + self.assertEqual(actual_weight, 13) + + def testWordErrorRateMetric(self): + + # Ensure availability of the WER metric function in the dictionary. + assert metrics.Metrics.WORD_ERROR_RATE in metrics.METRICS_FNS + + # Test if WER is computed correctly. + ref = np.asarray([ + # a b c + [97, 34, 98, 34, 99], + [97, 34, 98, 34, 99], + [97, 34, 98, 34, 99], + [97, 34, 98, 34, 99], + ]) + + hyp = np.asarray([ + [97, 34, 98, 34, 99], # a b c + [97, 34, 98, 0, 0], # a b + [97, 34, 98, 34, 100], # a b d + [0, 0, 0, 0, 0] # empty + ]) + + labels = np.reshape(ref, ref.shape + (1, 1)) + predictions = np.zeros((len(ref), np.max([len(s) for s in hyp]), 1, 1, 256)) + + for i, sample in enumerate(hyp): + for j, idx in enumerate(sample): + predictions[i, j, 0, 0, idx] = 1 + + with self.test_session() as session: + actual_wer, unused_actual_ref_len = session.run( + metrics.word_error_rate(predictions, labels)) + + expected_wer = 0.417 + places = 3 + self.assertAlmostEqual(round(actual_wer, places), expected_wer, places) + def testNegativeLogPerplexity(self): predictions = np.random.randint(4, size=(12, 12, 12, 1)) targets = np.random.randint(4, size=(12, 12, 12, 1)) @@ -83,6 +229,206 @@ def testNegativeLogPerplexity(self): actual = session.run(a) self.assertEqual(actual.shape, ()) + def testNegativeLogPerplexityMasked(self): + predictions = np.random.randint(4, size=(12, 12, 12, 1)) + targets = np.random.randint(4, size=(12, 12, 12, 1)) + features = { + 'targets_mask': tf.to_float(tf.ones([12, 12])) + } + with self.test_session() as session: + scores, _ = metrics.padded_neg_log_perplexity_with_masking( + tf.one_hot(predictions, depth=4, dtype=tf.float32), + tf.constant(targets, dtype=tf.int32), + features) + a = tf.reduce_mean(scores) + session.run(tf.global_variables_initializer()) + actual = session.run(a) + self.assertEqual(actual.shape, ()) + + def testNegativeLogPerplexityMaskedAssert(self): + predictions = np.random.randint(4, size=(12, 12, 12, 1)) + targets = np.random.randint(4, size=(12, 12, 12, 1)) + features = {} + + with self.assertRaisesRegexp( + ValueError, + 'masked_neg_log_perplexity requires targets_mask feature'): + with self.test_session() as session: + scores, _ = metrics.padded_neg_log_perplexity_with_masking( + tf.one_hot(predictions, depth=4, dtype=tf.float32), + tf.constant(targets, dtype=tf.int32), + features) + a = tf.reduce_mean(scores) + session.run(tf.global_variables_initializer()) + _ = session.run(a) + + def testSigmoidAccuracyOneHot(self): + logits = np.array([ + [-1., 1.], + [1., -1.], + [-1., 1.], + [1., -1.] + ]) + labels = np.array([ + [0, 1], + [1, 0], + [1, 0], + [0, 1] + ]) + logits = np.expand_dims(np.expand_dims(logits, 1), 1) + labels = np.expand_dims(np.expand_dims(labels, 1), 1) + + with self.test_session() as session: + score, _ = metrics.sigmoid_accuracy_one_hot(logits, labels) + session.run(tf.global_variables_initializer()) + session.run(tf.local_variables_initializer()) + s = session.run(score) + self.assertEqual(s, 0.5) + + def testSigmoidAccuracy(self): + logits = np.array([ + [-1., 1.], + [1., -1.], + [-1., 1.], + [1., -1.] + ]) + labels = np.array([1, 0, 0, 1]) + + with self.test_session() as session: + score, _ = metrics.sigmoid_accuracy(logits, labels) + session.run(tf.global_variables_initializer()) + session.run(tf.local_variables_initializer()) + s = session.run(score) + self.assertEqual(s, 0.5) + + def testSigmoidPrecisionOneHot(self): + logits = np.array([ + [-1., 1.], + [1., -1.], + [1., -1.], + [1., -1.] + ]) + labels = np.array([ + [0, 1], + [0, 1], + [0, 1], + [0, 1] + ]) + logits = np.expand_dims(np.expand_dims(logits, 1), 1) + labels = np.expand_dims(np.expand_dims(labels, 1), 1) + + with self.test_session() as session: + score, _ = metrics.sigmoid_precision_one_hot(logits, labels) + session.run(tf.global_variables_initializer()) + session.run(tf.local_variables_initializer()) + s = session.run(score) + self.assertEqual(s, 0.25) + + def testSigmoidRecallOneHot(self): + logits = np.array([ + [-1., 1.], + [1., -1.], + [1., -1.], + [1., -1.] + ]) + labels = np.array([ + [0, 1], + [0, 1], + [0, 1], + [0, 1] + ]) + logits = np.expand_dims(np.expand_dims(logits, 1), 1) + labels = np.expand_dims(np.expand_dims(labels, 1), 1) + + with self.test_session() as session: + score, _ = metrics.sigmoid_recall_one_hot(logits, labels) + session.run(tf.global_variables_initializer()) + session.run(tf.local_variables_initializer()) + s = session.run(score) + self.assertEqual(s, 0.25) + + def testSigmoidCrossEntropyOneHot(self): + logits = np.array([ + [-1., 1.], + [1., -1.], + [1., -1.], + [1., -1.] + ]) + labels = np.array([ + [0, 1], + [1, 0], + [0, 0], + [0, 1] + ]) + logits = np.expand_dims(np.expand_dims(logits, 1), 1) + labels = np.expand_dims(np.expand_dims(labels, 1), 1) + + with self.test_session() as session: + score, _ = metrics.sigmoid_cross_entropy_one_hot(logits, labels) + session.run(tf.global_variables_initializer()) + session.run(tf.local_variables_initializer()) + s = session.run(score) + self.assertAlmostEqual(s, 0.688, places=3) + + def testRocAuc(self): + logits = np.array([ + [-1., 1.], + [1., -1.], + [1., -1.], + [1., -1.] + ]) + labels = np.array([ + [1], + [0], + [1], + [0] + ]) + logits = np.expand_dims(np.expand_dims(logits, 1), 1) + labels = np.expand_dims(np.expand_dims(labels, 1), 1) + + with self.test_session() as session: + score, _ = metrics.roc_auc(logits, labels) + session.run(tf.global_variables_initializer()) + session.run(tf.local_variables_initializer()) + s = session.run(score) + self.assertAlmostEqual(s, 0.750, places=3) + + def testMultilabelMatch3(self): + predictions = np.random.randint(1, 5, size=(100, 1, 1, 1)) + targets = np.random.randint(1, 5, size=(100, 10, 1, 1)) + weights = np.random.randint(0, 2, size=(100, 1, 1, 1)) + targets *= weights + + predictions_repeat = np.repeat(predictions, 10, axis=1) + expected = (predictions_repeat == targets).astype(float) + expected = np.sum(expected, axis=(1, 2, 3)) + expected = np.minimum(expected / 3.0, 1.) + expected = np.sum(expected * weights[:, 0, 0, 0]) / weights.shape[0] + with self.test_session() as session: + scores, weights_ = metrics.multilabel_accuracy_match3( + tf.one_hot(predictions, depth=5, dtype=tf.float32), + tf.constant(targets, dtype=tf.int32)) + a, a_op = tf.metrics.mean(scores, weights_) + session.run(tf.local_variables_initializer()) + session.run(tf.global_variables_initializer()) + _ = session.run(a_op) + actual = session.run(a) + self.assertAlmostEqual(actual, expected, places=6) + + def testPearsonCorrelationCoefficient(self): + predictions = np.random.rand(12, 1) + targets = np.random.rand(12, 1) + + expected = np.corrcoef(np.squeeze(predictions), np.squeeze(targets))[0][1] + with self.test_session() as session: + pearson, _ = metrics.pearson_correlation_coefficient( + tf.constant(predictions, dtype=tf.float32), + tf.constant(targets, dtype=tf.float32)) + session.run(tf.global_variables_initializer()) + session.run(tf.local_variables_initializer()) + actual = session.run(pearson) + self.assertAlmostEqual(actual, expected) + if __name__ == '__main__': tf.test.main() diff --git a/tensor2tensor/utils/misc_utils.py b/tensor2tensor/utils/misc_utils.py new file mode 100644 index 000000000..1e452b3fb --- /dev/null +++ b/tensor2tensor/utils/misc_utils.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Miscellaneous utilities.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import pprint +import re + +# Camel case to snake case utils +_first_cap_re = re.compile("(.)([A-Z][a-z0-9]+)") +_all_cap_re = re.compile("([a-z0-9])([A-Z])") + + +def camelcase_to_snakecase(name): + s1 = _first_cap_re.sub(r"\1_\2", name) + return _all_cap_re.sub(r"\1_\2", s1).lower() + + +def snakecase_to_camelcase(name): + return "".join([w[0].upper() + w[1:] for w in name.split("_")]) + + +def pprint_hparams(hparams): + """Represents hparams using its dictionary and calls pprint.pformat on it.""" + return "\n{}".format(pprint.pformat(hparams.values(), width=1)) diff --git a/tensor2tensor/utils/misc_utils_test.py b/tensor2tensor/utils/misc_utils_test.py new file mode 100644 index 000000000..c988384c3 --- /dev/null +++ b/tensor2tensor/utils/misc_utils_test.py @@ -0,0 +1,77 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.utils.misc_utils.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.utils import hparam +from tensor2tensor.utils import misc_utils +import tensorflow.compat.v1 as tf + + +class MiscUtilsTest(tf.test.TestCase): + + def test_camelcase_to_snakecase(self): + self.assertEqual("typical_camel_case", + misc_utils.camelcase_to_snakecase("TypicalCamelCase")) + self.assertEqual("numbers_fuse2gether", + misc_utils.camelcase_to_snakecase("NumbersFuse2gether")) + self.assertEqual("numbers_fuse2_gether", + misc_utils.camelcase_to_snakecase("NumbersFuse2Gether")) + self.assertEqual("lstm_seq2_seq", + misc_utils.camelcase_to_snakecase("LSTMSeq2Seq")) + self.assertEqual("starts_lower", + misc_utils.camelcase_to_snakecase("startsLower")) + self.assertEqual("starts_lower_caps", + misc_utils.camelcase_to_snakecase("startsLowerCAPS")) + self.assertEqual("caps_fuse_together", + misc_utils.camelcase_to_snakecase("CapsFUSETogether")) + self.assertEqual("startscap", + misc_utils.camelcase_to_snakecase("Startscap")) + self.assertEqual("s_tartscap", + misc_utils.camelcase_to_snakecase("STartscap")) + + def test_snakecase_to_camelcase(self): + self.assertEqual("TypicalCamelCase", + misc_utils.snakecase_to_camelcase("typical_camel_case")) + self.assertEqual("NumbersFuse2gether", + misc_utils.snakecase_to_camelcase("numbers_fuse2gether")) + self.assertEqual("NumbersFuse2Gether", + misc_utils.snakecase_to_camelcase("numbers_fuse2_gether")) + self.assertEqual("LstmSeq2Seq", + misc_utils.snakecase_to_camelcase("lstm_seq2_seq")) + + def test_pprint_hparams(self): + hparams = hparam.HParams( + int_=1, str_="str", bool_=True, float_=1.1, list_int=[1, 2], none=None) + + # pylint: disable=g-inconsistent-quotes + expected_string = r""" +{'bool_': True, + 'float_': 1.1, + 'int_': 1, + 'list_int': [1, + 2], + 'none': None, + 'str_': 'str'}""" + # pylint: enable=g-inconsistent-quotes + + self.assertEqual(expected_string, misc_utils.pprint_hparams(hparams)) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/utils/mlperf_log.py b/tensor2tensor/utils/mlperf_log.py new file mode 100644 index 000000000..0e89aabf9 --- /dev/null +++ b/tensor2tensor/utils/mlperf_log.py @@ -0,0 +1,180 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright 2018 MLBenchmark Group. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Convenience function for logging compliance tags to stdout. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import inspect +import json +import logging +import os +import re +import sys +import time +import uuid + +# pylint: disable=wildcard-import,unused-wildcard-import +from tensor2tensor.utils.mlperf_tags import * +# pylint: enable=wildcard-import,unused-wildcard-import + + +ROOT_DIR_GNMT = None + +# Set by imagenet_main.py +ROOT_DIR_RESNET = None + +# Set by transformer_main.py and process_data.py +ROOT_DIR_TRANSFORMER = None + + +PATTERN = re.compile("[a-zA-Z0-9]+") + +LOG_FILE = os.getenv("COMPLIANCE_FILE") +# create logger with 'mlperf_compliance' +LOGGER = logging.getLogger("mlperf_compliance") +LOGGER.setLevel(logging.DEBUG) + +_STREAM_HANDLER = logging.StreamHandler(stream=sys.stdout) +_STREAM_HANDLER.setLevel(logging.INFO) +LOGGER.addHandler(_STREAM_HANDLER) + +if LOG_FILE: + _FILE_HANDLER = logging.FileHandler(LOG_FILE) + _FILE_HANDLER.setLevel(logging.DEBUG) + LOGGER.addHandler(_FILE_HANDLER) +else: + _STREAM_HANDLER.setLevel(logging.DEBUG) + + +def get_mode(hparams): + """Returns whether we should do MLPerf logging.""" + return "mlperf_mode" in hparams and hparams.mlperf_mode + + +def get_caller(stack_index=2, root_dir=None): + # pylint: disable=g-doc-args + """Returns file.py:lineno of your caller. + + A stack_index of 2 will provide + the caller of the function calling this function. Notice that stack_index + of 2 or more will fail if called from global scope. + """ + caller = inspect.getframeinfo(inspect.stack()[stack_index][0]) + + # Trim the filenames for readability. + filename = caller.filename + if root_dir is not None: + filename = re.sub("^" + root_dir + "/", "", filename) + return "%s:%d" % (filename, caller.lineno) + + +def _mlperf_print(key, value=None, benchmark=None, stack_offset=0, + tag_set=None, deferred=False, root_dir=None, + extra_print=False): + # pylint: disable=g-doc-args + # pylint: disable=g-doc-return-or-yield + """Prints out an MLPerf Log Line. + + key: The MLPerf log key such as 'CLOCK' or 'QUALITY'. See the list of log keys + in the spec. + value: The value which contains no newlines. + benchmark: The short code for the benchmark being run, see the MLPerf log + spec. + stack_offset: Increase the value to go deeper into the stack to find the + callsite. For example, if this + is being called by a wraper/helper you may want to set + stack_offset=1 to use the callsite + of the wraper/helper itself. + tag_set: The set of tags in which key must belong. + deferred: The value is not presently known. In that case, a unique ID will + be assigned as the value of this call and will be returned. The + caller can then include said unique ID when the value is known + later. + root_dir: Directory prefix which will be trimmed when reporting calling file + for compliance logging. + extra_print: Print a blank line before logging to clear any text in the line. + + Example output: + :::MLP-1537375353 MINGO[17] (eval.py:42) QUALITY: 43.7 + """ + + return_value = None + + if (tag_set is None and not PATTERN.match(key)) or key not in tag_set: + raise ValueError("Invalid key for MLPerf print: " + str(key)) + + if value is not None and deferred: + raise ValueError("deferred is set to True, but a value was provided") + + if deferred: + return_value = str(uuid.uuid4()) + value = "DEFERRED: {}".format(return_value) + + if value is None: + tag = key + else: + str_json = json.dumps(value) + tag = "{key}: {value}".format(key=key, value=str_json) + + callsite = get_caller(2 + stack_offset, root_dir=root_dir) + now = time.time() + + message = ":::MLPv0.5.0 {benchmark} {secs:.9f} ({callsite}) {tag}".format( + secs=now, benchmark=benchmark, callsite=callsite, tag=tag) + + if extra_print: + print() # There could be prior text on a line + + if tag in STDOUT_TAG_SET: # pylint: disable=undefined-variable + LOGGER.info(message) + else: + LOGGER.debug(message) + + return return_value + + +TRANSFORMER_TAG_SET = set(TRANSFORMER_TAGS) # pylint: disable=undefined-variable + + +def transformer_print(key, value=None, stack_offset=2, deferred=False, + hparams=None): + if not hparams or not get_mode(hparams): + return + return _mlperf_print( + key=key, + value=value, + benchmark=TRANSFORMER, # pylint: disable=undefined-variable + stack_offset=stack_offset, + tag_set=TRANSFORMER_TAG_SET, + deferred=deferred, + root_dir=ROOT_DIR_TRANSFORMER) diff --git a/tensor2tensor/utils/mlperf_tags.py b/tensor2tensor/utils/mlperf_tags.py new file mode 100644 index 000000000..1e882ec47 --- /dev/null +++ b/tensor2tensor/utils/mlperf_tags.py @@ -0,0 +1,334 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright 2018 MLBenchmark Group. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Master list of MLPerf tags to be logged for benchmark submissions. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +# ============================================================================== +# == Benchmarks ================================================================ +# ============================================================================== + +# translation/ +TRANSFORMER = "transformer" +INPUT_MAX_LENGTH = "input_max_length" + +OPT_LR_WARMUP_STEPS = "opt_learning_rate_warmup_steps" + +MODEL_HP_INITIALIZER_GAIN = "model_hp_initializer_gain" +MODEL_HP_VOCAB_SIZE = "model_hp_vocab_size" +MODEL_HP_NUM_HIDDEN_LAYERS = "model_hp_hidden_layers" +MODEL_HP_EMBEDDING_SHARED_WEIGHTS = "model_hp_embedding_shared_weights" +MODEL_HP_ATTENTION_DENSE = "model_hp_attention_dense" +MODEL_HP_ATTENTION_DROPOUT = "model_hp_attention_dropout" +MODEL_HP_FFN_OUTPUT_DENSE = "model_hp_ffn_output_dense" +MODEL_HP_FFN_FILTER_DENSE = "model_hp_ffn_filter_dense" +MODEL_HP_RELU_DROPOUT = "model_hp_relu_dropout" +MODEL_HP_LAYER_POSTPROCESS_DROPOUT = "model_hp_layer_postprocess_dropout" +MODEL_HP_NORM = "model_hp_norm" +MODEL_HP_SEQ_BEAM_SEARCH = "model_hp_sequence_beam_search" + +# ============================================================================== +# == Tags ====================================================================== +# ============================================================================== +""" +Tags may be used by all models, a subset of models, or only one model. A +specification for which models require which tags can be found below the tag +definitions. +""" + +# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ +# All models: Tags which should appear in absolutely every MLPerf model. +# ////////////////////////////////////////////////////////////////////////////// + +# This tag signals to start the timer. Emission of this tag need not be (and +# generally will not be) the first part of a submission script. Rather, this +# tag must be emitted prior to performing any work which the MLPerf rules +# state must be timed. This tag is generally emitted directly before the first +# step which invokes random number generation or the first step which must be +# performed on the system under test. (Whichever comes first.) If clarification +# is needed, please file an issue under: +# https://github.com/mlperf/policies +RUN_START = "run_start" + +# This tag signals that a submission has reached the relevant stopping criteria, +# and has completed all tasks which are performed in the reference. The wall +# time for a submission will be computed as the difference between the time +# when this tag is emitted and the time whe the RUN_START is emitted. +RUN_STOP = "run_stop" + +# This tag should be emitted immediately before ending a run, and should be the +# last tag emitted. This tag should indicate the completion of untimed post +# processing work such as system specific cleanup. +RUN_FINAL = "run_final" + + +# Emit this tag in the place(s) where random seeds are set. +RUN_SET_RANDOM_SEED = "run_set_random_seed" + + +# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ +# Common Values: Constants which are expected to be reported across many models. +# These values are included for convenience. +# ////////////////////////////////////////////////////////////////////////////// +BCE = "binary_cross_entropy" +CCE = "categorical_cross_entropy" + +SGD = "stochastic_gradient_descent" + +# Some conventions distinguish between "vanilla" SGD and SGD with momentum +# (where vanilla SGD would be the specific case of momentum=0) +SGD_WITH_MOMENTUM = "stochastic_gradient_descent_with_momentum" + +ADAM = "adam" +LAZY_ADAM = "lazy_adam" + +TRUNCATED_NORMAL = "truncated_normal" + +RELU = "relu" + + +# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ +# Preprocessing: Tags for generic preprocessing steps +# ////////////////////////////////////////////////////////////////////////////// + +# The number of training examples in a single epoch +PREPROC_NUM_TRAIN_EXAMPLES = "preproc_num_train_examples" + +# The number of evaluation examples in a single epoch +PREPROC_NUM_EVAL_EXAMPLES = "preproc_num_eval_examples" + +# This tag is used to declare what part of code tokenizes the training data. +PREPROC_TOKENIZE_TRAINING = "preproc_tokenize_training" + +# This tag is used to declare what part of code tokenizes the evaluation data. +PREPROC_TOKENIZE_EVAL = "preproc_tokenize_eval" + +# The vocabulary size used for tokenization. +PREPROC_VOCAB_SIZE = "preproc_vocab_size" + + +# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ +# Input: Tags for the timed portion of the data input pipeline +# ////////////////////////////////////////////////////////////////////////////// + +# The number of examples in the training portion of the data pipeline. Generally +# this should match PREPROC_NUM_TRAIN_EXAMPLES. If it does not (for instance +# if certain examples are dropped in compliance with MLPerf rules), the +# call which declares this tag is a good place for a comment stating why the +# disparity is expected. +INPUT_SIZE = "input_size" + +# The size of a training minibatch size. If this value is variable, please emit +# "-1" and then log an implementation specific characterization of the batch +# size which is a reasonable analog to the reference. (For instance log that +# all but the last batch has size 64, and the last batch is a partial batch) +INPUT_BATCH_SIZE = "input_batch_size" + +# This tag indicates where the location of the code which defines the order in +# which training examples are traversed. It is not necessary to describe the +# method in the tag emission (though comments are always welcome). Rather, this +# should simply provide a good starting point to an interested party. +INPUT_ORDER = "input_order" + + +# -------------------------------------- +# -- Data Augmentation and Alteration -- +# -------------------------------------- + +# ResNet random cropping +INPUT_CENTRAL_CROP = "input_central_crop" + +INPUT_DISTORTED_CROP_MIN_OBJ_COV = "input_distorted_crop_min_object_covered" +INPUT_DISTORTED_CROP_RATIO_RANGE = "input_distorted_crop_aspect_ratio_range" +INPUT_DISTORTED_CROP_AREA_RANGE = "input_distorted_crop_area_range" +INPUT_DISTORTED_CROP_MAX_ATTEMPTS = "input_distorted_crop_max_attempts" + +INPUT_MEAN_SUBTRACTION = "input_mean_subtraction" + +# Random flip of an image for data augmentation +INPUT_RANDOM_FLIP = "input_random_flip" + +INPUT_RESIZE = "input_resize" +INPUT_RESIZE_ASPECT_PRESERVING = "input_resize_aspect_preserving" + + +# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ +# Opt: Tags for declaring optimizer specific information. Submissions should +# declare and log explicit values rather than relying on defaults. +# ////////////////////////////////////////////////////////////////////////////// + +# The name of the optimizer used. (SGD, Adam, etc.) +OPT_NAME = "opt_name" + +OPT_LR = "opt_learning_rate" +OPT_MOMENTUM = "opt_momentum" + +OPT_WEIGHT_DECAY = "opt_weight_decay" + +# beta1, beta2, and epsilon are optimizer hyperparameters associated with the +# Adam optimizer and its variants (e.g. LazyAdam). +OPT_HP_ADAM_BETA1 = "opt_hp_Adam_beta1" +OPT_HP_ADAM_BETA2 = "opt_hp_Adam_beta2" +OPT_HP_ADAM_EPSILON = "opt_hp_Adam_epsilon" + + +# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ +# Train: Tags for control flow during model training. +# ////////////////////////////////////////////////////////////////////////////// + +# This tag is emitted when a model first enters its training loop. This is not +# necessarily when it begins to apply gradients; rather, it should be placed at +# a location which logically partitions the submission code. +TRAIN_LOOP = "train_loop" + +# The current epoch as said epoch begins training. +TRAIN_EPOCH = "train_epoch" + +# This tag is used to indicate approximately where checkpoints are written. Some +# frameworks abstract away checkpoint saving; in such cases simply choose a +# logical place in the code which signals that the framework has been instructed +# to save checkpoints, along with an explanatory comment. +TRAIN_CHECKPOINT = "train_checkpoint" + + +# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ +# Eval: Tags for control flow during model evaluation. +# ////////////////////////////////////////////////////////////////////////////// + +# This tag should be emitted whenever the submission begins an evaluation pass +# for a given set of weights. +EVAL_START = "eval_start" + +# The number of examples on which evaluation is performed. +EVAL_SIZE = "eval_size" + +# The target quality at which the model may stop training. +EVAL_TARGET = "eval_target" + +# The observed accuracy of the model at a given epoch. +EVAL_ACCURACY = "eval_accuracy" + +# This tag should be emitted when the model has determined that it has met the +# target quality set by the reference. +EVAL_STOP = "eval_stop" + + +# \\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\ +# Model: Tags for logging topology specific information. +# ////////////////////////////////////////////////////////////////////////////// + +# The loss function (cross entropy, squared error, etc.) used by the model. For +# more exotic loss functions such as those encountered in object detection +# models, additional benchmark specific subcomponents should also be logged. +MODEL_HP_LOSS_FN = "model_hp_loss_fn" + +MODEL_HP_INITIAL_SHAPE = "model_hp_initial_shape" +MODEL_HP_FINAL_SHAPE = "model_hp_final_shape" + +MODEL_L2_REGULARIZATION = "model_l2_regularization" +MODEL_EXCLUDE_BN_FROM_L2 = "model_exclude_bn_from_l2" + +MODEL_HP_RELU = "model_hp_relu" +MODEL_HP_CONV2D_FIXED_PADDING = "model_hp_conv2d_fixed_padding" +MODEL_HP_BATCH_NORM = "model_hp_batch_norm" +MODEL_HP_DENSE = "model_hp_dense" + + +# ============================================================================== +# == Stdout tags =============================================================== +# ============================================================================== + +# These tags are always logged to stdout. The rest will be logged to a file if +# one is available. +STDOUT_TAG_SET = { + RUN_START, + RUN_STOP, + RUN_FINAL, + + TRAIN_LOOP, + TRAIN_EPOCH, + + EVAL_START, + EVAL_SIZE, + EVAL_TARGET, + EVAL_ACCURACY, + EVAL_STOP, +} + + +# ============================================================================== +# == Benchmark tag sets ======================================================== +# ============================================================================== +ALL_USED_TAGS = set() + +TRANSFORMER_TAGS = ( + RUN_START, + RUN_STOP, + RUN_FINAL, + RUN_SET_RANDOM_SEED, + PREPROC_NUM_TRAIN_EXAMPLES, + PREPROC_NUM_EVAL_EXAMPLES, + PREPROC_TOKENIZE_TRAINING, + PREPROC_TOKENIZE_EVAL, + PREPROC_VOCAB_SIZE, + INPUT_BATCH_SIZE, + INPUT_MAX_LENGTH, + INPUT_ORDER, + OPT_NAME, + OPT_LR, + OPT_LR_WARMUP_STEPS, + OPT_HP_ADAM_BETA1, + OPT_HP_ADAM_BETA2, + OPT_HP_ADAM_EPSILON, + TRAIN_LOOP, + TRAIN_EPOCH, + EVAL_START, + EVAL_SIZE, + EVAL_TARGET, + EVAL_ACCURACY, + EVAL_STOP, + MODEL_HP_INITIALIZER_GAIN, + MODEL_HP_VOCAB_SIZE, + MODEL_HP_NUM_HIDDEN_LAYERS, + MODEL_HP_EMBEDDING_SHARED_WEIGHTS, + MODEL_HP_ATTENTION_DENSE, + MODEL_HP_ATTENTION_DROPOUT, + MODEL_HP_FFN_OUTPUT_DENSE, + MODEL_HP_FFN_FILTER_DENSE, + MODEL_HP_RELU_DROPOUT, + MODEL_HP_LAYER_POSTPROCESS_DROPOUT, + MODEL_HP_NORM, + MODEL_HP_SEQ_BEAM_SEARCH, +) + +ALL_USED_TAGS.update(TRANSFORMER_TAGS) diff --git a/tensor2tensor/utils/modality.py b/tensor2tensor/utils/modality.py deleted file mode 100644 index e6b1c9994..000000000 --- a/tensor2tensor/utils/modality.py +++ /dev/null @@ -1,564 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Modalities define the bottom and top of the model (not the body).""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import re - -# Dependency imports - -from six.moves import xrange # pylint: disable=redefined-builtin - -from tensor2tensor.models import common_layers -from tensor2tensor.utils import expert_utils as eu -import tensorflow as tf - - -class Modality(object): - """Abstract Modality class for data transformations. - - An abstract class representing modalities for transforming data to a space - interpretable by sequence models. It has 3 functions: - * inputs_bottom: called on inputs entering the model. - * targets_bottom: called on targets entering the model (e.g., the decoder). - * targets_top : called on targets to generate predictions. - - For example, think about a modality for images. The inputs_bottom function - represents the part of the model applied to an incoming image, e.g., an entry - flow of a convolutional network. The targets_top function represents the top - part of a model that is generating images, e.g., a PixelCNN network. The final - function targets_bottom represents the auto-regressive part of the network. - It is applied to the already-generated part of an image, which is given to - the decoder to generate the next part. In some cases, e.g., for text, it is - the same as the inputs_bottom function, as that is the default we use. But, - e.g., for images, a different function might be needed to regress properly. - - All 3 functions have simple and sharded versions. A sub-class only needs - to implement the simple version, the default sharding will be used then. - """ - - def __init__(self, model_hparams): - self._model_hparams = model_hparams - - @property - def name(self): - camelcase_name = type(self).__name__ # DeCamelCase for TF readability. - return re.sub("([A-Z]+)", r"_\1", camelcase_name).lower()[1:] - - @property - def targets_dimensionality(self): - """Integer, the last dimension of the predictions (vocab size).""" - raise NotImplementedError("Abstract Method") - - @property - def _body_input_depth(self): - return self._model_hparams.hidden_size - - def inputs_bottom_simple(self, x): - """Transform one shard of input. - - Args: - x: An int32 Tensor with shape [batch, p0, p1, input_channels] - Returns: - A float32 Tensor with shape [batch, p0, p1, body_input_depth] - """ - raise NotImplementedError("Abstract Method") - - def inputs_bottom_sharded(self, xs, data_parallelism): - """Transform the inputs. - - Args: - xs: A list of num_datashards Tensors (one per shard) - each with shape [batch, p0, p1, depth] - data_parallelism: a expert_utils.Parallelism object - Returns: - shaded_body_input: A list of num_datashards Tensors, each with shape - [batch, p0, p1, body_input_depth]. - """ - return data_parallelism(self.inputs_bottom_simple, xs) - - def targets_bottom_simple(self, x): - """Transform one shard of targets. - - Args: - x: An int32 Tensor with shape [batch, p0, p1, target_channels] - Returns: - A float32 Tensor with shape [batch, p0, p1, body_input_depth] - """ - with tf.variable_scope("targets_bottom_simple"): - return self.inputs_bottom_simple(x) - - def targets_bottom_sharded(self, xs, data_parallelism): - """Transform the targets. - - Args: - xs: A list of num_datashards Tensors (one per shard) - each with shape [batch, p0, p1, target_channels] - data_parallelism: a expert_utils.Parallelism object - Returns: - shaded_body_input: A list of num_datashards Tensors, each with shape - [batch, p0, p1, body_input_depth]. - """ - return data_parallelism(self.targets_bottom_simple, xs) - - def targets_top_simple(self, body_output, targets): - """Transform one shard of output. - - Most classes will override this function. - - Args: - body_output: A Tensor with shape [batch, p0, p1, body_output_depth] - targets: A Tensor with shape [batch, p0, p1, targets_channels, - targets_dimensionality] - Returns: - A Tensor of class logits. - """ - raise NotImplementedError("Abstract Method") - - def targets_top_sharded(self, - sharded_body_output, - sharded_targets, - data_parallelism, - weights_fn=common_layers.weights_nonzero): - """Transform all shards of targets. - - Classes with cross-shard interaction will override this function. - - Args: - sharded_body_output: A list of Tensors. - sharded_targets: A list of Tensors. - data_parallelism: a expert_utils.Parallelism object. - weights_fn: function from targets to target weights. - Returns: - shaded_logits: A list of Tensors. - training_loss: a Scalar. - """ - sharded_logits = data_parallelism(self.targets_top_simple, - sharded_body_output, sharded_targets) - loss_num, loss_den = data_parallelism( - common_layers.padded_cross_entropy, - sharded_logits, - sharded_targets, - self._model_hparams.label_smoothing, - weights_fn=weights_fn) - loss = tf.add_n(loss_num) / tf.maximum(1.0, tf.add_n(loss_den)) - return sharded_logits, loss - - -class SymbolModality(Modality): - """Modality for sets of discrete symbols. - - Input: - Embedding. - - Output: - Linear transformation + softmax. - """ - - def __init__(self, model_hparams, vocab_size): - super(SymbolModality, self).__init__(model_hparams) - self._vocab_size = vocab_size - self._datashard_device_to_embedding = None - self._datashard_device_to_softmax_weights = None - - @property - def name(self): - return "symbol_modality_%d_%d" % (self._vocab_size, self._body_input_depth) - - @property - def targets_dimensionality(self): - return self._vocab_size - - def _get_weights(self): - """Create or get concatenated embedding or softmax variable. - - Returns: - a list of self._num_shards Tensors. - """ - num_shards = self._model_hparams.symbol_modality_num_shards - shards = [] - for i in xrange(num_shards): - shard_size = (self._vocab_size // num_shards) + ( - 1 if i < self._vocab_size % num_shards else 0) - var_name = "weights_%d" % i - shards.append( - tf.get_variable( - var_name, [shard_size, self._body_input_depth], - initializer=tf.random_normal_initializer( - 0.0, self._body_input_depth**-0.5))) - if num_shards == 1: - ret = shards[0] - else: - ret = tf.concat(shards, 0) - ret = eu.ConvertGradientToTensor(ret) - return ret - - def bottom_simple(self, x, name, reuse): - with tf.variable_scope(name, reuse=reuse): - # Squeeze out the channels dimension. - x = tf.squeeze(x, axis=3) - var = self._get_weights() - ret = tf.gather(var, x) - if self._model_hparams.multiply_embedding_mode == "sqrt_depth": - ret *= self._body_input_depth**0.5 - ret *= tf.expand_dims(tf.to_float(tf.not_equal(x, 0)), -1) - return ret - - def inputs_bottom_simple(self, x): - if self._model_hparams.shared_embedding_and_softmax_weights: - return self.bottom_simple(x, "shared", reuse=None) - else: - return self.bottom_simple(x, "input_emb", reuse=None) - - def targets_bottom_simple(self, x): - if self._model_hparams.shared_embedding_and_softmax_weights: - return self.bottom_simple(x, "shared", reuse=True) - else: - return self.bottom_simple(x, "target_emb", reuse=None) - - def targets_top_simple(self, body_output, targets): - """Generate logits. - - Args: - body_output: A Tensor with shape [batch, p0, p1, body_input_depth] - targets: A Tensor with shape [batch, p0, p1, 1] - Returns: - logits: A Tensor with shape [batch, p0, p1, ?, vocab_size]. - """ - if self._model_hparams.shared_embedding_and_softmax_weights: - scope_name = "shared" - reuse = True - else: - scope_name = "softmax" - reuse = False - with tf.variable_scope(scope_name, reuse=reuse): - var = self._get_weights() - shape = tf.shape(body_output)[:-1] - body_output = tf.reshape(body_output, [-1, self._body_input_depth]) - logits = tf.matmul(body_output, var, transpose_b=True) - logits = tf.reshape(logits, tf.concat([shape, [self._vocab_size]], 0)) - # insert a channels dimension - return tf.expand_dims(logits, 3) - - -class SmallImageModality(Modality): - """Performs strided conv compressions for small image data.""" - - def __init__(self, model_hparams): - super(SmallImageModality, self).__init__(model_hparams) - - @property - def targets_dimensionality(self): - return 256 - - def inputs_bottom_simple(self, inputs): - with tf.variable_scope(self.name): - inputs = common_layers.standardize_images(inputs) - # TODO(lukaszkaiser): summaries here don't work in multi-problem case yet. - # tf.summary.image("inputs", inputs, max_outputs=2) - if self._model_hparams.compress_steps > 0: - strides = (2, 2) - else: - strides = (1, 1) - return common_layers.conv_block( - inputs, - self._body_input_depth, [((1, 1), (3, 3))], - first_relu=False, - strides=strides, - padding="SAME", - force2d=True, - name="small_image_conv") - - def targets_bottom_simple(self, inputs): - with tf.variable_scope(self.name): - inputs = common_layers.standardize_images(inputs) - if self._model_hparams.compress_steps > 0: - kernel, strides = (2, 2), (2, 2) # Crucial to not leak! - else: - kernel, strides = (1, 1), (1, 1) - return common_layers.conv_block( - inputs, - self._body_input_depth, [((1, 1), kernel)], - first_relu=False, - strides=strides, - force2d=True, - name="small_image_conv") - - def targets_top_simple(self, body_output, targets): - with tf.variable_scope(self.name): - if self._model_hparams.compress_steps == 0: - targets_shape = tf.shape(targets) - channels = targets.shape.as_list()[-1] - outputs = tf.layers.dense(body_output, 256 * channels) - return tf.reshape(outputs, [ - targets_shape[0], targets_shape[1], targets_shape[2], 3, 256 - ]) - dilations_kernels = [((1, 1), (3, 1)), ((2, 1), (3, 1)), ((4, 1), (3, 1))] - return common_layers.decompress_seqcnn( - body_output, targets, 256, dilations_kernels, 2, is_2d=True) - - def targets_top_sharded(self, - sharded_body_output, - sharded_targets, - data_parallelism, - weights_fn=common_layers.weights_all): - # Call the default implementation, but weight 1.0 on 0s by default. - # (Since we're processing images and so have no padding and some pixel 0s.) - return super(SmallImageModality, self).targets_top_sharded( - sharded_body_output, - sharded_targets, - data_parallelism, - weights_fn=weights_fn) - - -class ImageModality(Modality): - """Performs embedding and strided conv compressions for large image data.""" - - def __init__(self, model_hparams): - super(ImageModality, self).__init__(model_hparams) - - @property - def targets_dimensionality(self): - return 256 - - def inputs_bottom_simple(self, inputs): - """Transform input from data space to model space. - - Perform the Xception "Entry flow", which consists of two convolutional - filter upscalings followed by three residually connected separable - convolution blocks. - - Args: - inputs: A Tensor with shape [batch, ...] - Returns: - body_input: A Tensor with shape [batch, ?, ?, body_input_depth]. - """ - with tf.variable_scope(self.name): - - def xnet_resblock(x, filters, res_relu, name): - with tf.variable_scope(name): - y = common_layers.separable_conv_block( - x, - filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], - first_relu=True, - padding="SAME", - force2d=True, - name="sep_conv_block") - y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2)) - return y + common_layers.conv_block( - x, - filters, [((1, 1), (1, 1))], - padding="SAME", - strides=(2, 2), - first_relu=res_relu, - force2d=True, - name="res_conv0") - - inputs = common_layers.standardize_images(inputs) - # TODO(lukaszkaiser): summaries here don't work in multi-problem case yet. - # tf.summary.image("inputs", inputs, max_outputs=2) - x = common_layers.conv_block( - inputs, - 32, [((1, 1), (3, 3))], - first_relu=False, - padding="SAME", - strides=(2, 2), - force2d=True, - name="conv0") - x = common_layers.conv_block( - x, 64, [((1, 1), (3, 3))], padding="SAME", force2d=True, name="conv1") - x = xnet_resblock(x, min(128, self._body_input_depth), True, "block0") - x = xnet_resblock(x, min(256, self._body_input_depth), False, "block1") - return xnet_resblock(x, self._body_input_depth, False, "block2") - - def targets_top_simple(self, body_output, _): - # TODO(lukaszkaiser): work on a better way to generate large images. - with tf.variable_scope(self.name): - decompressed_inputs = common_layers.deconv_stride2_multistep( - body_output, - self._model_hparams.compress_steps, - body_output.get_shape()[-1], - name="deconv") - return common_layers.conv( - decompressed_inputs, self._vocab_size, (1, 1), padding="SAME") - - -class AudioModality(Modality): - """Performs strided conv compressions for audio data.""" - - def __init__(self, model_hparams): - super(AudioModality, self).__init__(model_hparams) - - def inputs_bottom_simple(self, inputs): - """Transform input from data space to model space. - - Args: - inputs: A Tensor with shape [batch, ...] - Returns: - body_input: A Tensor with shape [batch, ?, ?, body_input_depth]. - """ - with tf.variable_scope(self.name): - # TODO(aidangomez): Will need to sort out a better audio pipeline - def xnet_resblock(x, filters, res_relu, name): - with tf.variable_scope(name): - # Typically audio samples are >100k samples in length and have a width - # of 2 or 4. Mono audio has a single channel while stereo has 2. - y = common_layers.separable_conv_block( - x, - filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], - first_relu=True, - padding="SAME", - force2d=True, - name="sep_conv_block") - y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 2)) - return y + common_layers.conv_block( - x, - filters, [((1, 1), (1, 1))], - padding="SAME", - strides=(2, 2), - first_relu=res_relu, - force2d=True, - name="res_conv0") - - x = tf.to_float(inputs) / 255. - x.set_shape([None, None, None, 1]) - for i in xrange(self._model_hparams.audio_compression): - x = xnet_resblock(x, 2**(i + 1), True, "compress_block_%d" % i) - return xnet_resblock(x, self._body_input_depth, False, - "compress_block_final") - - -class AudioSpectralModality(Modality): - """Performs strided conv compressions for audio spectral data.""" - - def __init__(self, model_hparams): - super(AudioSpectralModality, self).__init__(model_hparams) - - def inputs_bottom_simple(self, inputs): - """Transform input from data space to model space. - - Args: - inputs: A Tensor with shape [batch, ...] - Returns: - body_input: A Tensor with shape [batch, ?, ?, body_input_depth]. - """ - with tf.variable_scope(self.name): - # TODO(aidangomez): Will need to sort out a better audio pipeline - def xnet_resblock(x, filters, res_relu, name): - with tf.variable_scope(name): - # We only stride along the length dimension to preserve the spectral - # bins (which are tiny in dimensionality relative to length) - y = common_layers.separable_conv_block( - x, - filters, [((1, 1), (3, 3)), ((1, 1), (3, 3))], - first_relu=True, - padding="SAME", - force2d=True, - name="sep_conv_block") - y = common_layers.pool(y, (3, 3), "MAX", "SAME", strides=(2, 1)) - return y + common_layers.conv_block( - x, - filters, [((1, 1), (1, 1))], - padding="SAME", - strides=(2, 1), - first_relu=res_relu, - force2d=True, - name="res_conv0") - - # Bitcast back from int32 - x = tf.bitcast(inputs, tf.float32) - x.set_shape([None, None, None, 1]) - for i in xrange(self._model_hparams.audio_compression): - x = xnet_resblock(x, 2**(i + 1), True, "compress_block_%d" % i) - return xnet_resblock(x, self._body_input_depth, False, - "compress_block_final") - - -class ClassLabelModality(Modality): - """Used for label data.""" - - def __init__(self, model_hparams, vocab_size, is2d=False): - super(ClassLabelModality, self).__init__(model_hparams) - self._vocab_size = vocab_size - self._is_2d = is2d - self._kernel = (3, 3) if is2d else (5, 1) - self._strides = (2, 2) if is2d else (4, 1) - self._padding = "SAME" if is2d else "LEFT" - - @property - def name(self): - return "class_label_modality_%d_%d" % (self._vocab_size, - self._body_input_depth) - - @property - def targets_dimensionality(self): - return self._vocab_size - - def inputs_bottom_simple(self, x): - with tf.variable_scope(self.name): - return common_layers.embedding( - x, - self._vocab_size, - self._body_input_depth, - multiplier=self._body_input_depth**0.5 if - self._model_hparams.multiply_embedding_mode == "sqrt_depth" else 1.0) - - def targets_bottom_simple(self, x): - with tf.variable_scope(self.name): - return tf.zeros([tf.shape(x)[0], 1, 1, self._body_input_depth]) - - def targets_top_simple(self, body_output, _): - """Transform inputs from model space to target space. - - Perform the Xception "Exit flow", consisting of a single residual block and - two separable convolutional upscalings followed by global spatial average - pooling. - - Args: - body_output: A Tensor with shape [batch, ?, ?, body_output_size]. - Returns: - a Tensors, each with shape [batch_size, ?, ?, vocab_size] - """ - with tf.variable_scope(self.name): - x = body_output - - # Assume input is a square with self._body_input_depth channels. - if self._is_2d: - length_float = tf.to_float(tf.shape(x)[1]) - spatial_dim_float = tf.sqrt(length_float) - spatial_dim = tf.to_int32(spatial_dim_float) - x = tf.reshape(x, [-1, spatial_dim, spatial_dim, - self._body_input_depth]) - x = common_layers.conv_block_downsample(x, self._kernel, self._strides, - self._padding) - x = tf.nn.relu(x) - x = tf.reduce_mean(x, axis=[1, 2], keep_dims=True) - res = common_layers.conv(x, self._vocab_size, (1, 1)) - return tf.expand_dims(res, 3) - - def targets_top_sharded(self, - sharded_body_output, - sharded_targets, - data_parallelism, - weights_fn=common_layers.weights_all): - # Call the default implementation, but weight 1.0 on 0s by default. - # (Since we're processing images and so have no padding and some labels 0.) - return super(ClassLabelModality, self).targets_top_sharded( - sharded_body_output, - sharded_targets, - data_parallelism, - weights_fn=weights_fn) diff --git a/tensor2tensor/utils/modality_test.py b/tensor2tensor/utils/modality_test.py deleted file mode 100644 index 0b22b4eff..000000000 --- a/tensor2tensor/utils/modality_test.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for Modalities.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -import numpy as np - -from tensor2tensor.utils import expert_utils -from tensor2tensor.utils import modality - -import tensorflow as tf - - -class ModalityTest(tf.test.TestCase): - - def testSymbolModalityInputs(self): - batch_size = 10 - num_datashards = 5 - length = 5 - vocab_size = 5000 - hidden_size = 9 - model_hparams = tf.contrib.training.HParams( - symbol_modality_num_shards=4, - hidden_size=hidden_size, - multiply_embedding_mode="sqrt_depth", - shared_embedding_and_softmax_weights=0) - x = -1 + np.random.random_integers(vocab_size, size=( - batch_size, length, 1, 1)) - m = modality.SymbolModality(model_hparams, vocab_size) - data_parallelism = expert_utils.Parallelism( - ["/device:CPU:0"] * num_datashards, reuse=True) - with self.test_session() as session: - xs = tf.split(x, num_datashards) - sharded_output = m.inputs_bottom_sharded(xs, data_parallelism) - output = tf.concat(sharded_output, 0) - session.run(tf.global_variables_initializer()) - res = session.run(output) - self.assertEqual(res.shape, (batch_size, length, 1, hidden_size)) - - def testSymbolModalityTargets(self): - batch_size = 10 - num_datashards = 5 - length = 6 - height = 7 - hidden_size = 9 - vocab_size = 11 - model_hparams = tf.contrib.training.HParams( - symbol_modality_num_shards=4, - hidden_size=hidden_size, - label_smoothing=0.2, - shared_embedding_and_softmax_weights=0) - body_output = -1 + np.random.random_integers( - 100, size=(batch_size, length, height, hidden_size)) - targets = -1 + np.random.random_integers( - vocab_size, size=(batch_size, length, height, 1)) - m = modality.SymbolModality(model_hparams, vocab_size) - data_parallelism = expert_utils.Parallelism( - ["/device:CPU:0"] * num_datashards, reuse=True) - with self.test_session() as session: - sharded_body_output = tf.split(tf.to_float(body_output), num_datashards) - sharded_targets = tf.split(targets, num_datashards) - sharded_logits, train_loss = m.targets_top_sharded( - sharded_body_output, sharded_targets, data_parallelism) - logits = tf.concat(sharded_logits, 0) - session.run(tf.global_variables_initializer()) - res1, res2 = session.run((logits, train_loss)) - self.assertEqual(res1.shape, (batch_size, length, height, 1, vocab_size)) - self.assertEqual(res2.shape, ()) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensor2tensor/utils/mtf_model.py b/tensor2tensor/utils/mtf_model.py new file mode 100644 index 000000000..08dfbf979 --- /dev/null +++ b/tensor2tensor/utils/mtf_model.py @@ -0,0 +1,267 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Mesh-Tensorflow Model in tensor2tensor.""" + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import mesh_tensorflow as mtf + +import six +from tensor2tensor.utils import hparams_lib +from tensor2tensor.utils import learning_rate +from tensor2tensor.utils import metrics +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +from tensorflow.contrib.tpu.python.tpu import tpu_estimator + + +class MtfModel(t2t_model.T2TModel): + """Toy model to test mesh_tensorflow.""" + + @classmethod + def estimator_model_fn(cls, + hparams, + features, + labels, + mode, + config=None, + params=None, + decode_hparams=None, + use_tpu=False): + hparams = hparams_lib.copy_hparams(hparams) + hparams.use_tpu = use_tpu + # merge decode_hparams into hparams if present + if mode == tf_estimator.ModeKeys.PREDICT and decode_hparams is not None: + for k, v in six.iteritems(decode_hparams.values()): + if hasattr(hparams, k) and getattr(hparams, k) != v: + tf.logging.warning("Overriding hparams.%s with %s from decode_hparams" + % (k, v)) + setattr(hparams, k, v) + + # Instantiate model + data_parallelism = None + if not use_tpu and config: + data_parallelism = config.data_parallelism + model = cls( + hparams, + mode, + data_parallelism=data_parallelism, + decode_hparams=decode_hparams) + + global_step = tf.train.get_global_step() + + mesh_shape = mtf.convert_to_shape(hparams.mesh_shape) + layout_rules = mtf.convert_to_layout_rules(hparams.layout) + if use_tpu: + ctx = params["context"] + num_hosts = ctx.num_hosts + host_placement_fn = ctx.tpu_host_placement_function + device_list = [host_placement_fn(host_id=t) for t in range(num_hosts)] + # TODO(ylc): Better estimation of replica cache size? + replica_cache_size = 300 * 1000000 # 300M per replica + # Worker 0 caches all the TPU binaries. + worker0_mem = replica_cache_size * ctx.num_replicas + devices_memeory_usage = [worker0_mem] + [0] * (num_hosts - 1) + var_placer = mtf.utils.BalancedVariablePlacer(device_list, + devices_memeory_usage) + mesh_devices = [""] * mesh_shape.size + mesh_impl = mtf.simd_mesh_impl.SimdMeshImpl( + mesh_shape, layout_rules, mesh_devices, ctx.device_assignment) + else: + var_placer = None + if data_parallelism is None or len(data_parallelism.ps_devices) == 1: + mesh_devices = [""] * mesh_shape.size + else: + assert len(data_parallelism.ps_devices) == mesh_shape.size + mesh_devices = data_parallelism.ps_devices + mesh_impl = mtf.placement_mesh_impl.PlacementMeshImpl( + mesh_shape, layout_rules, mesh_devices) + + graph = mtf.Graph() + mesh = mtf.Mesh(graph, "my_mesh", var_placer) + # PREDICT mode + if mode == tf_estimator.ModeKeys.PREDICT: + return model.estimator_spec_predict(features, mesh, mesh_impl, use_tpu) + + logits, loss = model.mtf_model_fn(features, mesh) + if use_tpu and logits is not None: + logits = mtf.anonymize(logits) + + # TRAIN mode + if mode == tf_estimator.ModeKeys.TRAIN: + var_grads = mtf.gradients( + [loss], [v.outputs[0] for v in graph.trainable_variables]) + lr = learning_rate.learning_rate_schedule(hparams) + tf.summary.scalar("learning_rate", lr) + mtf_lr = mtf.import_tf_tensor( + mesh, tf.convert_to_tensor(lr, dtype=tf.float32), mtf.Shape([])) + optimizer = mtf.optimize.make_optimizer(hparams, mtf_lr) + update_ops = optimizer.apply_grads(var_grads, graph.trainable_variables) + + lowering = mtf.Lowering(graph, {mesh: mesh_impl}) + + tf_loss = lowering.export_to_tf_tensor(loss) + tf_loss = tf.to_float(tf_loss) + if logits and mode != tf_estimator.ModeKeys.TRAIN: + tf_logits = lowering.export_to_tf_tensor(logits) + + if mode == tf_estimator.ModeKeys.TRAIN: + tf_update_ops = [lowering.lowered_operation(op) for op in update_ops] + tf_update_ops.append(tf.assign_add(global_step, 1)) + # tf.logging.info("tf_update_ops: {}".format(tf_update_ops)) + train_op = tf.group(tf_update_ops) + + with mtf.utils.outside_all_rewrites(): + # Copy master variables to slices. Must be called first. + restore_hook = mtf.MtfRestoreHook(lowering) + saver = tf.train.Saver( + tf.global_variables(), + sharded=True, + max_to_keep=10, + keep_checkpoint_every_n_hours=2, + defer_build=False, + save_relative_paths=True) + tf.add_to_collection(tf.GraphKeys.SAVERS, saver) + saver_listener = mtf.MtfCheckpointSaverListener(lowering) + saver_hook = tf.train.CheckpointSaverHook( + hparams.model_dir, + save_steps=1000, + saver=saver, + listeners=[saver_listener]) + + # EVAL mode + if mode == tf_estimator.ModeKeys.EVAL: + tf_logits = lowering.export_to_tf_tensor(logits) + return model.estimator_spec_eval(features, tf_logits, labels, tf_loss, + restore_hook, use_tpu) + + if use_tpu: + # TPU host call. Important: need to be called before remove_summaries() + if hparams.tpu_enable_host_call: + host_call = t2t_model.create_host_call(hparams.model_dir) + else: + host_call = None + + if hparams.warm_start_from: + + def scaffold_fn(): + t2t_model.initialize_from_ckpt( + ckpt_dir=hparams.warm_start_from, hparams=hparams) + return tf.train.Scaffold() + else: + scaffold_fn = None + + t2t_model.remove_summaries() + return tpu_estimator.TPUEstimatorSpec( + mode=tf_estimator.ModeKeys.TRAIN, + loss=tf_loss, + train_op=train_op, + host_call=host_call, + training_hooks=[restore_hook, saver_hook], + scaffold_fn=scaffold_fn) + else: + if hparams.warm_start_from: + t2t_model.initialize_from_ckpt( + ckpt_dir=hparams.warm_start_from, hparams=hparams) + return tf_estimator.EstimatorSpec( + tf_estimator.ModeKeys.TRAIN, loss=tf_loss, train_op=train_op, + training_chief_hooks=[restore_hook, saver_hook]) + + def estimator_spec_eval( + self, features, logits, labels, loss, restore_hook, use_tpu): + """Construct EstimatorSpec for EVAL mode.""" + hparams = self.hparams + problem = hparams.problem + if logits.get_shape().ndims == 3: + logits = tf.expand_dims(tf.expand_dims(logits, 2), 3) + + # Support for multiproblem + task_list = [problem] + if hasattr(problem, "task_list"): + task_list = problem.task_list + + eval_metrics_fns = metrics.create_evaluation_metrics(task_list, hparams) + + if use_tpu: + def metric_fn(tf_logits, labels): + with tf.device("cpu:0"), mtf.utils.outside_all_rewrites(): + eval_metrics = {} + for metric_name, metric_fn in six.iteritems(eval_metrics_fns): + if metric_name.split("/")[-1] not in t2t_model.TPU_METRIC_BLACKLIST: + eval_metrics[metric_name] = metric_fn( + tf_logits, None, tf.identity(labels)) + return eval_metrics + return tpu_estimator.TPUEstimatorSpec( + tf_estimator.ModeKeys.EVAL, + evaluation_hooks=[restore_hook], + loss=loss, + eval_metrics=(metric_fn, [logits, labels])) + else: + eval_metrics = {} + predictions = {"predictions": logits} + for metric_name, metric_fn in six.iteritems(eval_metrics_fns): + eval_metrics[metric_name] = metric_fn(logits, features, + features["targets"]) + + return tf_estimator.EstimatorSpec( + tf_estimator.ModeKeys.EVAL, + predictions=predictions, + eval_metric_ops=eval_metrics, + evaluation_hooks=[restore_hook], + loss=loss) + + def estimator_spec_predict(self, features, mesh, mesh_impl, use_tpu): + mtf_samples = mtf.anonymize(self.sample(features, mesh)) + lowering = mtf.Lowering(mesh.graph, {mesh: mesh_impl}) + outputs = lowering.export_to_tf_tensor(mtf_samples) + if self.has_input: + ndims = len(outputs.shape.as_list()) + actual_batch_size = tf.shape(features["inputs"])[0] + outputs = tf.slice( + outputs, [0] * ndims, [actual_batch_size] + [-1] * (ndims - 1)) + predictions = { + "outputs": outputs + } + if features.get("infer_targets") is not None: + predictions["infer_targets"] = features["infer_targets"] + + if features.get("inputs") is not None: + predictions["inputs"] = features["inputs"] + + if use_tpu: + t2t_model.remove_summaries() + return tpu_estimator.TPUEstimatorSpec( + mode=tf_estimator.ModeKeys.PREDICT, + predictions=predictions, + prediction_hooks=[mtf.MtfRestoreHook(lowering)]) + else: + return tf_estimator.EstimatorSpec( + tf_estimator.ModeKeys.PREDICT, + predictions=predictions, + prediction_hooks=[mtf.MtfRestoreHook(lowering)]) + + def sample(self, features, mesh): + """Sample from the model.""" + raise NotImplementedError("TODO(noam): write generic slow mtf sample.") + + def mtf_model_fn(self, features, mesh): + raise NotImplementedError("Not implemented") diff --git a/tensor2tensor/utils/multistep_optimizer.py b/tensor2tensor/utils/multistep_optimizer.py new file mode 100644 index 000000000..916455b8d --- /dev/null +++ b/tensor2tensor/utils/multistep_optimizer.py @@ -0,0 +1,137 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Multi-step optimizers simulating large batches. + +Optimizer variants which make it possible to use very large batch sizes with +limited GPU memory. Optimizers in this module accumulate the gradients for n +batches, and call the optimizer's update rule every n batches with the +accumulated gradients. + +See [Saunders et al., 2018](https://arxiv.org/abs/1805.00456) for details. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + + +class MultistepAdamOptimizer(tf.train.AdamOptimizer): + """Adam with SGD updates every n steps with accumulated gradients.""" + + def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8, + use_locking=False, name="Adam", n=1): + super(MultistepAdamOptimizer, self).__init__( + learning_rate=learning_rate, beta1=beta1, beta2=beta2, epsilon=epsilon, + use_locking=use_locking, name=name) + self._n = n # Call Adam optimizer every n batches with accumulated grads + self._n_t = None # n as tensor + + def _create_slots(self, var_list): + """Create slot variables for Adam with accumulated gradients.""" + super(MultistepAdamOptimizer, self)._create_slots(var_list) + first_var = min(var_list, key=lambda x: x.name) + self._create_non_slot_variable(initial_value=0 if self._n == 1 else 1, + name="iter", + colocate_with=first_var) + for v in var_list: + self._zeros_slot(v, "grad_acc", self._name) + + def _get_iter_variable(self): + graph = ( + None if tf.executing_eagerly() else tf.get_default_graph()) + return self._get_non_slot_variable("iter", graph=graph) + + def _prepare(self): + super(MultistepAdamOptimizer, self)._prepare() + self._n_t = tf.convert_to_tensor(self._n, name="n") + + def _apply_cond(self, apply_fn, grad, var, *args, **kwargs): + """Apply conditionally if counter is zero.""" + grad_acc = self.get_slot(var, "grad_acc") + + def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs): + total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype) + adam_op = apply_fn(total_grad, var, *args, **kwargs) + with tf.control_dependencies([adam_op]): + grad_acc_to_zero_op = grad_acc.assign(tf.zeros_like(grad_acc), + use_locking=self._use_locking) + return tf.group(adam_op, grad_acc_to_zero_op) + + def accumulate_gradient(grad_acc, grad): + assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking) + return tf.group(assign_op) # Strip return value + + return tf.cond( + tf.equal(self._get_iter_variable(), 0), + lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs), + lambda: accumulate_gradient(grad_acc, grad)) + + def _apply_dense(self, grad, var): + return self._apply_cond( + super(MultistepAdamOptimizer, self)._apply_dense, grad, var) + + def _resource_apply_dense(self, grad, var): + return self._apply_cond( + super(MultistepAdamOptimizer, self)._resource_apply_dense, grad, var) + + def _apply_sparse_shared(self, grad, var, indices, scatter_add): + return self._apply_cond( + super(MultistepAdamOptimizer, self)._apply_sparse_shared, grad, var, + indices, scatter_add) + + def _apply_sparse(self, grad, var): + # TODO(fstahlberg): Implement a sparse version + tf.logging.warning("MultistepAdamOptimizer does not support sparse updates") + dense_grad = tf.convert_to_tensor(grad) + return self._apply_cond( + super(MultistepAdamOptimizer, self)._apply_dense, dense_grad, var) + + def _resource_apply_sparse_duplicate_indices(self, grad, var, indices): + tf.logging.warning("MultistepAdamOptimizer does not support sparse updates") + # Note that conversion to a dense Tensor handles duplicate `indices` + # correctly (summing them). A real sparse implementation will probably want + # to override _resource_apply_sparse instead so it gets them de-duplicated + # automatically. + dense_grad = tf.convert_to_tensor( + tf.IndexedSlices(values=grad, indices=indices, + dense_shape=tf.shape(var))) + return self._apply_cond( + super(MultistepAdamOptimizer, self)._resource_apply_dense, + dense_grad, var) + + def _finish(self, update_ops, name_scope): + """Updates beta_power variables every n batches and incrs counter.""" + iter_ = self._get_iter_variable() + beta1_power, beta2_power = self._get_beta_accumulators() + with tf.control_dependencies(update_ops): + with tf.colocate_with(iter_): + + def update_beta_op(): + update_beta1 = beta1_power.assign( + beta1_power * self._beta1_t, + use_locking=self._use_locking) + update_beta2 = beta2_power.assign( + beta2_power * self._beta2_t, + use_locking=self._use_locking) + return tf.group(update_beta1, update_beta2) + maybe_update_beta = tf.cond( + tf.equal(iter_, 0), update_beta_op, tf.no_op) + with tf.control_dependencies([maybe_update_beta]): + update_iter = iter_.assign(tf.mod(iter_ + 1, self._n_t), + use_locking=self._use_locking) + return tf.group( + *update_ops + [update_iter, maybe_update_beta], name=name_scope) diff --git a/tensor2tensor/utils/multistep_optimizer_test.py b/tensor2tensor/utils/multistep_optimizer_test.py new file mode 100644 index 000000000..5655acfcf --- /dev/null +++ b/tensor2tensor/utils/multistep_optimizer_test.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Multi-step Optimizer Test Module for TensorFlow.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from tensor2tensor.utils import multistep_optimizer +import tensorflow.compat.v1 as tf + + +class MultistepAdamOptimizerTest(tf.test.TestCase): + + def testMultistep(self): + dtype = tf.float32 + beta1 = 0.2 + beta2 = 0.99 + alpha = 10.0 + grads0_np_lst = [ + np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype), + np.array([0.2, -0.1], dtype=dtype.as_numpy_dtype), + np.array([0.3, 0.1], dtype=dtype.as_numpy_dtype), + np.array([0.4, -0.1], dtype=dtype.as_numpy_dtype) + ] + grads1_np_lst = [ + np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype), + np.array([0.02, 0.02], dtype=dtype.as_numpy_dtype), + np.array([-0.04, 0.04], dtype=dtype.as_numpy_dtype), + np.array([-0.04, 0.06], dtype=dtype.as_numpy_dtype) + ] + var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) + var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) + # Test accumulating gradients for n=1..4 steps + for n in range(1, 5): + with tf.Graph().as_default(): + with tf.Session(): + singlestep_var0 = tf.Variable(var0_np) + singlestep_var1 = tf.Variable(var1_np) + + multistep_var0 = tf.Variable(var0_np) + multistep_var1 = tf.Variable(var1_np) + + singlestep_opt = tf.train.AdamOptimizer( + beta1=beta1, beta2=beta2, learning_rate=alpha) + multistep_opt = multistep_optimizer.MultistepAdamOptimizer( + n=n, beta1=beta1, beta2=beta2, learning_rate=alpha) + + singlestep_update = singlestep_opt.apply_gradients([ + (tf.constant(sum(grads0_np_lst[:n]) / n), singlestep_var0), + (tf.constant(sum(grads1_np_lst[:n]) / n), singlestep_var1)]) + multistep_updates = [ + multistep_opt.apply_gradients([(tf.constant(g0), multistep_var0), + (tf.constant(g1), multistep_var1)]) + for g0, g1 in zip(grads0_np_lst, grads1_np_lst)][:n] + + self.evaluate(tf.global_variables_initializer()) + (singlestep_beta1_power, + singlestep_beta2_power) = singlestep_opt._get_beta_accumulators() + (multistep_beta1_power, + multistep_beta2_power) = multistep_opt._get_beta_accumulators() + + # Run 3 steps of Adam + for _ in range(1, 4): + self.evaluate(singlestep_update) + for multistep_update in multistep_updates: + self.evaluate(multistep_update) + + self.assertAllCloseAccordingToType( + self.evaluate(singlestep_beta1_power), + self.evaluate(multistep_beta1_power)) + self.assertAllCloseAccordingToType( + self.evaluate(singlestep_beta2_power), + self.evaluate(multistep_beta2_power)) + # Validate updated params + self.assertAllCloseAccordingToType( + self.evaluate(singlestep_var0), + self.evaluate(multistep_var0)) + self.assertAllCloseAccordingToType( + self.evaluate(singlestep_var1), + self.evaluate(multistep_var1)) + + def testResourceVariables(self): + v1 = tf.Variable([1., 2.], use_resource=True) + v2 = tf.Variable([3., 4.], use_resource=True) + with tf.GradientTape() as tape: + tape.watch([v1, v2]) + loss = tf.reduce_sum(tf.gather(params=v1, indices=[0]) + v2) + v1_grad, v2_grad = tape.gradient(loss, [v1, v2]) + multistep_opt = multistep_optimizer.MultistepAdamOptimizer(0.1) + multistep_opt.apply_gradients(((v1_grad, v1), (v2_grad, v2))) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/utils/multistep_with_adamoptimizer.py b/tensor2tensor/utils/multistep_with_adamoptimizer.py new file mode 100644 index 000000000..667f068f9 --- /dev/null +++ b/tensor2tensor/utils/multistep_with_adamoptimizer.py @@ -0,0 +1,256 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright 2019 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Multi-step optimizers simulating large batches. + +Optimizer variants which make it possible to use very large batch sizes with +limited GPU memory. Optimizers in this module accumulate the gradients for n +batches, and call the optimizer's update rule every n batches with the +accumulated gradients. + +See [Saunders et al., 2018](https://arxiv.org/abs/1805.00456) for details. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf +# pylint: disable=g-direct-tensorflow-import +from tensorflow.python.ops import resource_variable_ops +from tensorflow.python.training import training_ops +# pylint: enable=g-direct-tensorflow-import + + +class MultistepAdamOptimizer(tf.train.Optimizer): + """Adam with SGD updates every n steps with accumulated gradients.""" + + def __init__(self, + learning_rate=0.001, + beta1=0.9, + beta2=0.999, + epsilon=1e-8, + use_locking=False, + name="Adam", + n=1): + super(MultistepAdamOptimizer, self).__init__( + use_locking=use_locking, name=name) + self._lr = learning_rate + self._beta1 = beta1 + self._beta2 = beta2 + self._epsilon = epsilon + # Tensor versions of the constructor arguments, created in _prepare(). + self._lr_t = None + self._beta1_t = None + self._beta2_t = None + self._epsilon_t = None + self._n = n # Call Adam optimizer every n batches with accumulated grads + self._n_t = None # n as tensor + + def _get_beta_accumulators(self): + with tf.init_scope(): + if tf.executing_eagerly(): + graph = None + else: + graph = tf.get_default_graph() + return (self._get_non_slot_variable("beta1_power", graph=graph), + self._get_non_slot_variable("beta2_power", graph=graph)) + + def _create_slots(self, var_list): + """Create slot variables for Adam with accumulated gradients.""" + first_var = min(var_list, key=lambda x: x.name) + self._create_non_slot_variable( + initial_value=self._beta1, name="beta1_power", colocate_with=first_var) + self._create_non_slot_variable( + initial_value=self._beta2, name="beta2_power", colocate_with=first_var) + # if iter is initialized as an int32, this optimizer could not run + # with tensorflow_hub with a tensorflow-gpu version + self._create_non_slot_variable( + initial_value=0.0 if self._n == 1 else 1.0, + name="iter", + colocate_with=first_var) + # Create slots for the first and second moments, as well as grad_acc. + for v in var_list: + self._zeros_slot(v, "m", self._name) + self._zeros_slot(v, "v", self._name) + self._zeros_slot(v, "grad_acc", self._name) + + def _get_iter_variable(self): + graph = (None if tf.executing_eagerly() else tf.get_default_graph()) + return self._get_non_slot_variable("iter", graph=graph) + + def _prepare(self): + lr = self._call_if_callable(self._lr) + beta1 = self._call_if_callable(self._beta1) + beta2 = self._call_if_callable(self._beta2) + epsilon = self._call_if_callable(self._epsilon) + self._beta1_t = tf.convert_to_tensor(beta1, name="beta1") + self._beta2_t = tf.convert_to_tensor(beta2, name="beta2") + self._lr_t = tf.convert_to_tensor(lr, name="learning_rate") + self._epsilon_t = tf.convert_to_tensor(epsilon, name="epsilon") + self._n_t = tf.convert_to_tensor(self._n, name="n") + + def _apply_cond(self, apply_fn, grad, var, *args, **kwargs): + """Apply conditionally if counter is zero.""" + grad_acc = self.get_slot(var, "grad_acc") + + def apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs): + total_grad = (grad_acc + grad) / tf.cast(self._n_t, grad.dtype) + adam_op = apply_fn(total_grad, var, *args, **kwargs) + with tf.control_dependencies([adam_op]): + grad_acc_to_zero_op = grad_acc.assign( + tf.zeros_like(grad_acc), use_locking=self._use_locking) + return tf.group(adam_op, grad_acc_to_zero_op) + + def accumulate_gradient(grad_acc, grad): + assign_op = tf.assign_add(grad_acc, grad, use_locking=self._use_locking) + return tf.group(assign_op) # Strip return value + + return tf.cond( + tf.equal(self._get_iter_variable(), 0), + lambda: apply_adam(grad_acc, apply_fn, grad, var, *args, **kwargs), + lambda: accumulate_gradient(grad_acc, grad)) + + def _apply_dense(self, grad, var): + return self._apply_cond(self._apply_dense_in_action, grad, var) + + def _apply_dense_in_action(self, grad, var): + m = self.get_slot(var, "m") + v = self.get_slot(var, "v") + beta1_power, beta2_power = self._get_beta_accumulators() + return training_ops.apply_adam( + var, + m, + v, + tf.cast(beta1_power, var.dtype.base_dtype), + tf.cast(beta2_power, var.dtype.base_dtype), + tf.cast(self._lr_t, var.dtype.base_dtype), + tf.cast(self._beta1_t, var.dtype.base_dtype), + tf.cast(self._beta2_t, var.dtype.base_dtype), + tf.cast(self._epsilon_t, var.dtype.base_dtype), + grad, + use_locking=self._use_locking).op + + def _resource_apply_dense(self, grad, var): + return self._apply_cond(self._resource_apply_dense_in_action, grad, var) + + def _resource_apply_dense_in_action(self, grad, var): + m = self.get_slot(var, "m") + v = self.get_slot(var, "v") + beta1_power, beta2_power = self._get_beta_accumulators() + return training_ops.resource_apply_adam( + var.handle, + m.handle, + v.handle, + tf.cast(beta1_power, grad.dtype.base_dtype), + tf.cast(beta2_power, grad.dtype.base_dtype), + tf.cast(self._lr_t, var.dtype.base_dtype), + tf.cast(self._beta1_t, grad.dtype.base_dtype), + tf.cast(self._beta2_t, grad.dtype.base_dtype), + tf.cast(self._epsilon_t, grad.dtype.base_dtype), + grad, + use_locking=self._use_locking) + + def _apply_sparse_shared(self, grad, var, indices, scatter_add): + beta1_power, beta2_power = self._get_beta_accumulators() + beta1_power = tf.cast(beta1_power, var.dtype.base_dtype) + beta2_power = tf.cast(beta2_power, var.dtype.base_dtype) + lr_t = tf.cast(self._lr_t, var.dtype.base_dtype) + beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype) + beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype) + epsilon_t = tf.cast(self._epsilon_t, var.dtype.base_dtype) + lr = (lr_t * tf.sqrt(1 - beta2_power) / (1 - beta1_power)) + # m_t = beta1 * m + (1 - beta1) * g_t + m = self.get_slot(var, "m") + m_scaled_g_values = grad * (1 - beta1_t) + m_t = tf.assign(m, m * beta1_t, use_locking=self._use_locking) + with tf.control_dependencies([m_t]): + m_t = scatter_add(m, indices, m_scaled_g_values) + # v_t = beta2 * v + (1 - beta2) * (g_t * g_t) + v = self.get_slot(var, "v") + v_scaled_g_values = (grad * grad) * (1 - beta2_t) + v_t = tf.assign(v, v * beta2_t, use_locking=self._use_locking) + with tf.control_dependencies([v_t]): + v_t = scatter_add(v, indices, v_scaled_g_values) + v_sqrt = tf.sqrt(v_t) + var_update = tf.assign_sub( + var, lr * m_t / (v_sqrt + epsilon_t), use_locking=self._use_locking) + return tf.group(*[var_update, m_t, v_t]) + + def _apply_sparse(self, grad, var): + # TODO(fstahlberg): Implement a sparse version + tf.logging.warning("MultistepAdamOptimizer does not support sparse updates") + dense_grad = tf.convert_to_tensor(grad) + return self._apply_cond(self._apply_dense_in_action, dense_grad, var) + + def _resource_apply_sparse_duplicate_indices(self, grad, var, indices): + tf.logging.warning("MultistepAdamOptimizer does not support sparse updates") + # Note that conversion to a dense Tensor handles duplicate `indices` + # correctly (summing them). A real sparse implementation will probably want + # to override _resource_apply_sparse instead so it gets them de-duplicated + # automatically. + dense_grad = tf.convert_to_tensor( + tf.IndexedSlices( + values=grad, indices=indices, dense_shape=tf.shape(var))) + return self._apply_cond(self._resource_apply_dense_in_action, dense_grad, + var) + + def _resource_scatter_add(self, x, i, v): + with tf.control_dependencies( + [resource_variable_ops.resource_scatter_add(x.handle, i, v)]): + return x.value() + + def _resource_apply_sparse(self, grad, var, indices): + return self._apply_sparse_shared(grad, var, indices, + self._resource_scatter_add) + + def _finish(self, update_ops, name_scope): + """Updates beta_power variables every n batches and incrs counter.""" + iter_ = self._get_iter_variable() + beta1_power, beta2_power = self._get_beta_accumulators() + with tf.control_dependencies(update_ops): + with tf.colocate_with(iter_): + + def update_beta_op(): + update_beta1 = beta1_power.assign( + beta1_power * self._beta1_t, use_locking=self._use_locking) + update_beta2 = beta2_power.assign( + beta2_power * self._beta2_t, use_locking=self._use_locking) + return tf.group(update_beta1, update_beta2) + + maybe_update_beta = tf.cond( + tf.equal(iter_, 0), update_beta_op, tf.no_op) + with tf.control_dependencies([maybe_update_beta]): + # TODO(cuong): It is suboptimal here because we have to cast twice + # (float to int, and then int to float) + update_iter = iter_.assign( + tf.cast( + tf.mod(tf.cast(iter_ + 1.0, tf.int32), self._n_t), + tf.float32), + use_locking=self._use_locking) + return tf.group( + *update_ops + [update_iter, maybe_update_beta], name=name_scope) diff --git a/tensor2tensor/utils/multistep_with_adamoptimizer_test.py b/tensor2tensor/utils/multistep_with_adamoptimizer_test.py new file mode 100644 index 000000000..5411b2d6c --- /dev/null +++ b/tensor2tensor/utils/multistep_with_adamoptimizer_test.py @@ -0,0 +1,122 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright 2019 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Multi-step Optimizer Test Module for TensorFlow.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from tensor2tensor.utils import multistep_with_adamoptimizer +import tensorflow.compat.v1 as tf + + +class MultistepAdamOptimizerTest(tf.test.TestCase): + + def testMultistep(self): + dtype = tf.float32 + beta1 = 0.2 + beta2 = 0.99 + alpha = 10.0 + grads0_np_lst = [ + np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype), + np.array([0.2, -0.1], dtype=dtype.as_numpy_dtype), + np.array([0.3, 0.1], dtype=dtype.as_numpy_dtype), + np.array([0.4, -0.1], dtype=dtype.as_numpy_dtype) + ] + grads1_np_lst = [ + np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype), + np.array([0.02, 0.02], dtype=dtype.as_numpy_dtype), + np.array([-0.04, 0.04], dtype=dtype.as_numpy_dtype), + np.array([-0.04, 0.06], dtype=dtype.as_numpy_dtype) + ] + var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype) + var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype) + # Test accumulating gradients for n=1..4 steps + for n in range(1, 5): + with tf.Graph().as_default(): + with tf.Session(): + singlestep_var0 = tf.Variable(var0_np) + singlestep_var1 = tf.Variable(var1_np) + + multistep_var0 = tf.Variable(var0_np) + multistep_var1 = tf.Variable(var1_np) + + singlestep_opt = tf.train.AdamOptimizer( + beta1=beta1, beta2=beta2, learning_rate=alpha) + multistep_opt = multistep_with_adamoptimizer.MultistepAdamOptimizer( + n=n, beta1=beta1, beta2=beta2, learning_rate=alpha) + + singlestep_update = singlestep_opt.apply_gradients([ + (tf.constant(sum(grads0_np_lst[:n]) / n), singlestep_var0), + (tf.constant(sum(grads1_np_lst[:n]) / n), singlestep_var1)]) + multistep_updates = [ + multistep_opt.apply_gradients([(tf.constant(g0), multistep_var0), + (tf.constant(g1), multistep_var1)]) + for g0, g1 in zip(grads0_np_lst, grads1_np_lst)][:n] + + self.evaluate(tf.global_variables_initializer()) + (singlestep_beta1_power, + singlestep_beta2_power) = singlestep_opt._get_beta_accumulators() + (multistep_beta1_power, + multistep_beta2_power) = multistep_opt._get_beta_accumulators() + + # Run 3 steps of Adam + for _ in range(1, 4): + self.evaluate(singlestep_update) + for multistep_update in multistep_updates: + self.evaluate(multistep_update) + + self.assertAllCloseAccordingToType( + self.evaluate(singlestep_beta1_power), + self.evaluate(multistep_beta1_power)) + self.assertAllCloseAccordingToType( + self.evaluate(singlestep_beta2_power), + self.evaluate(multistep_beta2_power)) + # Validate updated params + self.assertAllCloseAccordingToType( + self.evaluate(singlestep_var0), + self.evaluate(multistep_var0)) + self.assertAllCloseAccordingToType( + self.evaluate(singlestep_var1), + self.evaluate(multistep_var1)) + + def testResourceVariables(self): + v1 = tf.Variable([1., 2.], use_resource=True) + v2 = tf.Variable([3., 4.], use_resource=True) + with tf.GradientTape() as tape: + tape.watch([v1, v2]) + loss = tf.reduce_sum(tf.gather(params=v1, indices=[0]) + v2) + v1_grad, v2_grad = tape.gradient(loss, [v1, v2]) + multistep_opt = multistep_with_adamoptimizer.MultistepAdamOptimizer(0.1) + multistep_opt.apply_gradients(((v1_grad, v1), (v2_grad, v2))) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/utils/optimize.py b/tensor2tensor/utils/optimize.py new file mode 100644 index 000000000..0725c52f0 --- /dev/null +++ b/tensor2tensor/utils/optimize.py @@ -0,0 +1,401 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Optimization.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import adafactor as adafactor_lib +from tensor2tensor.utils import contrib +from tensor2tensor.utils import misc_utils +from tensor2tensor.utils import mlperf_log +from tensor2tensor.utils import multistep_optimizer +from tensor2tensor.utils import registry +from tensor2tensor.utils import yellowfin + +import tensorflow.compat.v1 as tf + + +from tensorflow.python.framework import dtypes # pylint: disable=g-direct-tensorflow-import + + +def _mixed_precision_is_enabled(hparams): + """Should be the same as in common_attention, avoiding import.""" + activation_dtype = hparams.activation_dtype + weight_dtype = hparams.weight_dtype + return activation_dtype == tf.float16 and weight_dtype == tf.float32 + + +def optimize(loss, + learning_rate, + hparams, + use_tpu=False, + variables=None): + """Minimize loss.""" + loss = weight_decay_and_noise(loss, hparams, learning_rate) + loss = tf.identity(loss, name="total_loss") + if variables is None: + variables = tf.trainable_variables() + # Print trainable variables. + log_variable_sizes(variables, verbose=hparams.summarize_vars) + # Print non-trainable variables. + non_trainable_variables = list( + set(tf.global_variables()) - set(variables)) + log_variable_sizes(non_trainable_variables, tag="Non-trainable variables", + verbose=hparams.summarize_vars) + if hparams.summarize_vars: + summarize_variables(variables) + # Summarize non-trainable variables as well + summarize_variables(non_trainable_variables, tag="Non-trainable variables") + diet_vars = [ + v for v in tf.global_variables() if v.dtype == dtypes.float16_ref + ] + log_variable_sizes( + diet_vars, "Diet Variables", verbose=hparams.summarize_vars) + opt = ConditionalOptimizer(hparams.optimizer, learning_rate, hparams, use_tpu) + if use_tpu: + opt = contrib.tpu().CrossShardOptimizer(opt) + if getattr(hparams, "gpu_automatic_mixed_precision", False): + if use_tpu: + raise RuntimeError("GPU auto mixed precision cannot be used with TPU") + elif _mixed_precision_is_enabled(hparams): + raise RuntimeError( + "GPU auto mixed precision cannot be used with manual mixed precision") + else: + setattr(opt, "_use_locking", "True") + setattr(opt, "_name", "ConditionalOptimizer") + opt = tf.train.experimental.enable_mixed_precision_graph_rewrite(opt) + + opt_summaries = [] + if common_layers.should_generate_summaries(): + tf.summary.scalar("learning_rate", learning_rate) + opt_summaries.append("loss") + if hparams.summarize_grads: + tf.logging.info("Summarizing gradients") + opt_summaries.extend( + ["gradients", "gradient_norm", "global_gradient_norm"]) + + if hparams.clip_grad_norm: + tf.logging.info("Clipping gradients, norm: %0.5f", hparams.clip_grad_norm) + if hparams.grad_noise_scale: + tf.logging.info("Adding noise to gradients, noise scale: %0.5f", + hparams.grad_noise_scale) + + train_op = contrib.layers().optimize_loss( + name="training", + loss=loss, + global_step=tf.train.get_or_create_global_step(), + learning_rate=learning_rate, + clip_gradients=hparams.clip_grad_norm or None, + gradient_noise_scale=hparams.grad_noise_scale or None, + optimizer=opt, + summaries=opt_summaries, + colocate_gradients_with_ops=True, + variables=variables) + return train_op + + +@registry.register_optimizer +def adam(learning_rate, hparams): + """Return adam optimizer for the given params.""" + # We change the default epsilon for Adam. + # Using LazyAdam as it's much faster for large vocabulary embeddings. + if contrib.is_tf2: + # in TF2 beta1 -> beta_1 :/ + return contrib.opt().LazyAdamOptimizer( + learning_rate, + beta_1=hparams.optimizer_adam_beta1, + beta_2=hparams.optimizer_adam_beta2, + epsilon=hparams.optimizer_adam_epsilon) + else: + return contrib.opt().LazyAdamOptimizer( + learning_rate, + beta1=hparams.optimizer_adam_beta1, + beta2=hparams.optimizer_adam_beta2, + epsilon=hparams.optimizer_adam_epsilon) + + +@registry.register_optimizer +def multistep_adam(learning_rate, hparams): + return multistep_optimizer.MultistepAdamOptimizer( + learning_rate, + beta1=hparams.optimizer_adam_beta1, + beta2=hparams.optimizer_adam_beta2, + epsilon=hparams.optimizer_adam_epsilon, + n=hparams.optimizer_multistep_accumulate_steps) + + +@registry.register_optimizer +def momentum(learning_rate, hparams): + return tf.train.MomentumOptimizer( + learning_rate, + momentum=hparams.optimizer_momentum_momentum, + use_nesterov=hparams.optimizer_momentum_nesterov) + + +@registry.register_optimizer +def yellow_fin(learning_rate, hparams): + return yellowfin.YellowFinOptimizer( + learning_rate=learning_rate, + momentum=hparams.optimizer_momentum_momentum) + + +@registry.register_optimizer +def true_adam(learning_rate, hparams): + return tf.train.AdamOptimizer( + learning_rate, + beta1=hparams.optimizer_adam_beta1, + beta2=hparams.optimizer_adam_beta2, + epsilon=hparams.optimizer_adam_epsilon) + + +@registry.register_optimizer +def adam_w(learning_rate, hparams): + return contrib.opt().AdamWOptimizer( + weight_decay=hparams.weight_decay, + learning_rate=learning_rate, + beta1=hparams.optimizer_adam_beta1, + beta2=hparams.optimizer_adam_beta2, + epsilon=hparams.optimizer_adam_epsilon) + + +@registry.register_optimizer +def adafactor(learning_rate, hparams): + return adafactor_lib.adafactor_optimizer_from_hparams(hparams, learning_rate) + + + + +def _register_base_optimizer(name, opt): + key = misc_utils.camelcase_to_snakecase(name) + if key in registry.Registries.optimizers: + return + registry.register_optimizer(key)( + lambda learning_rate, hparams: opt(learning_rate)) + + +for _name, _opt in contrib.layers().OPTIMIZER_CLS_NAMES.items(): + _register_base_optimizer(_name, _opt) + + +class ConditionalOptimizer(tf.train.Optimizer): + """Conditional optimizer.""" + + def __init__(self, optimizer_name, lr, hparams, use_tpu=False): # pylint: disable=super-init-not-called + tf.logging.info("Using optimizer %s", optimizer_name) + + mlperf_log.transformer_print(key=mlperf_log.OPT_NAME, + value=optimizer_name, + hparams=hparams) + mlperf_log.transformer_print( + key=mlperf_log.OPT_HP_ADAM_BETA1, value=hparams.optimizer_adam_beta1, + hparams=hparams) + mlperf_log.transformer_print( + key=mlperf_log.OPT_HP_ADAM_BETA2, value=hparams.optimizer_adam_beta2, + hparams=hparams) + mlperf_log.transformer_print( + key=mlperf_log.OPT_HP_ADAM_EPSILON, + value=hparams.optimizer_adam_epsilon, + hparams=hparams) + + self._opt = registry.optimizer(optimizer_name)(lr, hparams) + if _mixed_precision_is_enabled(hparams): + if not hparams.mixed_precision_optimizer_loss_scaler: + tf.logging.warning("Using mixed precision without a loss scaler will " + "likely cause numerical errors.") + elif hparams.mixed_precision_optimizer_loss_scaler != "exponential": + raise ValueError("Mixed precision training only supports the " + "exponential loss scaler") + else: + tf.logging.info( + ("Using Exponential Update Loss Scaler with", + "init loss scale of {}".format( + hparams.mixed_precision_optimizer_init_loss_scale))) + manager = contrib.mixed_precision().ExponentialUpdateLossScaleManager( + init_loss_scale=hparams.mixed_precision_optimizer_init_loss_scale, + incr_every_n_steps=2000, + decr_every_n_nan_or_inf=2, + incr_ratio=2, + decr_ratio=0.5) + self._opt = contrib.mixed_precision().LossScaleOptimizer( + self._opt, manager) + + self._zero_grads = hparams.optimizer_zero_grads + + def compute_gradients(self, loss, var_list=None, **kwargs): # pylint: disable=arguments-differ + if contrib.is_tf2: + gradients = self._opt.get_gradients(loss, var_list) + gradients = zip(gradients, var_list) + else: + gradients = self._opt.compute_gradients(loss, var_list, **kwargs) + + def cast_grad(g, v): + if v is not None and g is not None: + g = common_layers.cast_like(g, v) + if self._zero_grads and g is None: + g = tf.zeros_like(v) + return (g, v) + gradients = [cast_grad(g, v) for g, v in gradients] + return gradients + + def apply_gradients(self, grads_and_vars, global_step=None, name=None): + if contrib.is_tf2: + with tf.control_dependencies( + [tf.assign_add(tf.train.get_or_create_global_step(), 1)]): + return self._opt.apply_gradients(grads_and_vars, name=name) + else: + return self._opt.apply_gradients( + grads_and_vars, global_step=global_step, name=name) + + +def weight_decay_and_noise(loss, hparams, learning_rate, var_list=None): + """Apply weight decay and weight noise.""" + if var_list is None: + var_list = tf.trainable_variables() + + decay_vars = [v for v in var_list] + noise_vars = [v for v in var_list if "/body/" in v.name] + + weight_decay_loss = weight_decay(hparams.weight_decay, decay_vars) + if hparams.weight_decay and common_layers.should_generate_summaries(): + tf.summary.scalar("losses/weight_decay", weight_decay_loss) + weight_noise_ops = weight_noise(hparams.weight_noise, learning_rate, + noise_vars) + + with tf.control_dependencies(weight_noise_ops): + loss = tf.identity(loss) + + loss += weight_decay_loss + return loss + + +def weight_noise(noise_rate, learning_rate, var_list): + """Apply weight noise to vars in var_list.""" + if not noise_rate: + return [tf.no_op()] + + tf.logging.info("Applying weight noise scaled by learning rate, " + "noise_rate: %0.5f", noise_rate) + + noise_ops = [] + + for v in var_list: + with tf.device(v.device): # pylint: disable=protected-access + scale = noise_rate * learning_rate * 0.001 + if common_layers.should_generate_summaries(): + tf.summary.scalar("weight_noise_scale", scale) + noise = tf.truncated_normal(v.shape) * scale + noise_op = v.assign_add(noise) + noise_ops.append(noise_op) + + return noise_ops + + +def weight_decay(decay_rate, var_list, skip_biases=True): + """Apply weight decay to vars in var_list.""" + if not decay_rate: + return 0. + + tf.logging.info("Applying weight decay, decay_rate: %0.5f", decay_rate) + + weight_decays = [] + for v in var_list: + # Weight decay. + # This is a heuristic way to detect biases that works for main tf.layers. + is_bias = len(v.shape.as_list()) == 1 and v.name.endswith("bias:0") + if not (skip_biases and is_bias): + with tf.device(v.device): + v_loss = tf.nn.l2_loss(v) + weight_decays.append(v_loss) + + return tf.add_n(weight_decays) * decay_rate + + +def log_variable_sizes(var_list=None, tag=None, verbose=False): + """Log the sizes and shapes of variables, and the total size. + + Args: + var_list: a list of variables; defaults to trainable_variables + tag: a string; defaults to "Trainable Variables" + verbose: bool, if True, log every weight; otherwise, log total size only. + """ + if var_list is None: + var_list = tf.trainable_variables() + if tag is None: + tag = "Trainable Variables" + + if not var_list: + return + + name_to_var = {v.name: v for v in var_list} + total_size = 0 + for v_name in sorted(list(name_to_var)): + v = name_to_var[v_name] + v_size = int(np.prod(np.array(v.shape.as_list()))) + if verbose: + tf.logging.info("Weight %s\tshape %s\tsize %d", + v.name[:-2].ljust(80), + str(v.shape).ljust(20), v_size) + total_size += v_size + tf.logging.info("%s Total size: %d", tag, total_size) + + +def summarize_variables(var_list=None, tag=None): + """Summarize the variables. + + Args: + var_list: a list of variables; defaults to trainable_variables. + tag: name scope of the summary; defaults to training_variables/. + """ + if var_list is None: + var_list = tf.trainable_variables() + if tag is None: + tag = "training_variables/" + + name_to_var = {v.name: v for v in var_list} + for v_name in list(name_to_var): + v = name_to_var[v_name] + tf.summary.histogram(tag + v_name, v) + + +def get_variable_initializer(hparams): + """Get variable initializer from hparams.""" + if not hparams.initializer: + return None + + mlperf_log.transformer_print(key=mlperf_log.MODEL_HP_INITIALIZER_GAIN, + value=hparams.initializer_gain, + hparams=hparams) + + if not tf.executing_eagerly(): + tf.logging.info("Using variable initializer: %s", hparams.initializer) + if hparams.initializer == "orthogonal": + return tf.orthogonal_initializer(gain=hparams.initializer_gain) + elif hparams.initializer == "uniform": + max_val = 0.1 * hparams.initializer_gain + return tf.random_uniform_initializer(-max_val, max_val) + elif hparams.initializer == "normal_unit_scaling": + return tf.variance_scaling_initializer( + hparams.initializer_gain, mode="fan_avg", distribution="normal") + elif hparams.initializer == "uniform_unit_scaling": + return tf.variance_scaling_initializer( + hparams.initializer_gain, mode="fan_avg", distribution="uniform") + elif hparams.initializer == "xavier": + return tf.initializers.glorot_uniform() + else: + raise ValueError("Unrecognized initializer: %s" % hparams.initializer) diff --git a/tensor2tensor/utils/optimize_test.py b/tensor2tensor/utils/optimize_test.py new file mode 100644 index 000000000..3b191d6de --- /dev/null +++ b/tensor2tensor/utils/optimize_test.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.utils.optimize.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from absl.testing import parameterized +from tensor2tensor.utils import hparams_lib +from tensor2tensor.utils import optimize +import tensorflow.compat.v1 as tf + + +class OptimizeTest(parameterized.TestCase, tf.test.TestCase): + + @parameterized.parameters( + "sgd", + "SGD", + "rms_prop", + "RMSProp", + "adagrad", + "Adagrad", + "adam", + "Adam", + "adam_w", + "AdamW", + ) + def test_names(self, opt_name): + hparams = hparams_lib.create_hparams("basic_1") + optimize.ConditionalOptimizer(opt_name, 0.1, hparams) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/utils/partial_checkpoint_load_hook.py b/tensor2tensor/utils/partial_checkpoint_load_hook.py new file mode 100644 index 000000000..c2795e6cf --- /dev/null +++ b/tensor2tensor/utils/partial_checkpoint_load_hook.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Hook to partially load a checkpoint.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + + +class PartialCheckpointLoad(tf.train.SessionRunHook): + """Partially load train_variables from a checkpoint. + + Hook used to load each variable saved in checkpoint into the graph. It + will ignore any additional variables present in the graph that are not + saved in the checkpoint. (Note: The loaded variables include ADAM/training + variables, if they exist in the checkpoint) + Can perform mapping if the base scopename for graph variables is different + from the checkpoint variables. + """ + + def __init__(self, hook_context, chk_scopename, graph_scopename): + """Initialize the hook with chkp directory and scopenames. + + Args: + hook_context: HookContext object containing hparams. + chk_scopename: Base scopename of variables in the checkpoint being loaded + graph_scopename: Base scopename of variables in current graph + """ + self.checkpoint_path = hook_context.hparams.partial_load_checkpoint + self.chk_scopename = chk_scopename + self.graph_scopename = graph_scopename + + def begin(self): + # TODO(karishmamalkan): Add logging for when variables are loaded + variable_references = {var.name: var for var in tf.all_variables()} + variable_mappings = {} + vars_in_chk = tf.train.list_variables(self.checkpoint_path) + for (var, _) in vars_in_chk: + variable_mappings[var] = variable_references[ + var.replace(self.chk_scopename, self.graph_scopename) + ":0"] + tf.train.init_from_checkpoint(self.checkpoint_path, variable_mappings) diff --git a/tensor2tensor/utils/pruning_utils.py b/tensor2tensor/utils/pruning_utils.py new file mode 100644 index 000000000..b3624743a --- /dev/null +++ b/tensor2tensor/utils/pruning_utils.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities to assist in pruning models.""" + +import numpy as np + +from tensor2tensor.layers import common_layers +from tensor2tensor.utils import registry + +import tensorflow.compat.v1 as tf + + +@registry.register_pruning_strategy +def weight(w, sparsity): + """Weight-level magnitude pruning.""" + w_shape = common_layers.shape_list(w) + k = int(np.prod(w_shape[:-1])) + count = tf.to_int32(k * sparsity) + mask = common_layers.weight_targeting(w, count) + return (1 - mask) * w + + +@registry.register_pruning_strategy +def unit(w, sparsity): + """Unit-level magnitude pruning.""" + w_shape = common_layers.shape_list(w) + count = tf.to_int32(w_shape[-1] * sparsity) + mask = common_layers.unit_targeting(w, count) + return (1 - mask) * w + + +def sparsify(sess, eval_model, pruning_strategy, pruning_params): + """Prune the weights of a model and evaluate.""" + weights = tf.trainable_variables() + + def should_prune(name): + """Whether to prune a weight or not.""" + in_whitelist = not pruning_params.white_list or any( + e in name for e in pruning_params.white_list) + in_blacklist = any(e in name for e in pruning_params.black_list) + + if pruning_params.white_list and not in_whitelist: + return False + elif in_blacklist: + return False + + return True + + weights = [w for w in weights if should_prune(w.name)] + tf.logging.info("Pruning weights: %s" % weights) + unpruned_weights = sess.run(weights) + + reset_op = tf.no_op() + for w, ow in zip(weights, unpruned_weights): + op = tf.assign(w, ow) + reset_op = tf.group(reset_op, op) + + for sparsity in pruning_params.sparsities: + set_weights_op = tf.no_op() + for w in weights: + op = tf.assign(w, pruning_strategy(w, sparsity)) + set_weights_op = tf.group(set_weights_op, op) + sess.run(set_weights_op) + + acc = eval_model() + tf.logging.info("\tPruning to sparsity = %f: acc = %f" % (sparsity, acc)) + sess.run(reset_op) diff --git a/tensor2tensor/utils/quantization.py b/tensor2tensor/utils/quantization.py new file mode 100644 index 000000000..9c2eb748b --- /dev/null +++ b/tensor2tensor/utils/quantization.py @@ -0,0 +1,314 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities related to using bfloat16 activations and/or parameters.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import tensorflow.compat.v1 as tf + +from tensorflow.python.framework import function + + +def bfloat16_activations_var_getter(getter, *args, **kwargs): + """A custom getter function for float32 parameters and bfloat16 activations. + + Args: + getter: custom getter + *args: arguments + **kwargs: keyword arguments + Returns: + variables with the correct dtype. + Raises: + KeyError: if "dtype" is not provided as a kwarg. + """ + requested_dtype = kwargs["dtype"] + if requested_dtype == tf.bfloat16: + kwargs["dtype"] = tf.float32 + var = getter(*args, **kwargs) + # This if statement is needed to guard the cast, because batch norm + # assigns directly to the return value of this custom getter. The cast + # makes the return value not a variable so it cannot be assigned. Batch + # norm variables are always in fp32 so this if statement is never + # triggered for them. + if var.dtype.base_dtype != requested_dtype: + var = tf.cast(var, requested_dtype) + return var + + +def float16_activations_var_getter(getter, *args, **kwargs): + """A custom getter function for float32 parameters and float16 activations. + + This function ensures the following: + 1. All variables requested with type fp16 are stored as type fp32. + 2. All variables requested with type fp32 are returned as type fp16. + See https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/ + #training_tensorflow for more information on this strategy. + + Args: + getter: custom getter + *args: arguments + **kwargs: keyword arguments + + Returns: + variables with the correct dtype. + + Raises: + KeyError: if "dtype" is not provided as a kwarg. + """ + requested_dtype = kwargs["dtype"] + + if requested_dtype == tf.float16: + kwargs["dtype"] = tf.float32 + + if requested_dtype == tf.float32: + requested_dtype = tf.float16 + var = getter(*args, **kwargs) + # This if statement is needed to guard the cast, because batch norm + # assigns directly to the return value of this custom getter. The cast + # makes the return value not a variable so it cannot be assigned. Batch + # norm variables are always in fp32 so this if statement is never + # triggered for them. + if var.dtype.base_dtype != requested_dtype: + var = tf.cast(var, requested_dtype) + return var + + +def simulated_quantize(x, num_bits, noise): + """Simulate quantization to num_bits bits, with externally-stored scale. + + num_bits is the number of bits used to store each value. + noise is a float32 Tensor containing values in [0, 1). + Each value in noise should take different values across + different steps, approximating a uniform distribution over [0, 1). + In the case of replicated TPU training, noise should be identical + across replicas in order to keep the parameters identical across replicas. + + The natural choice for noise would be tf.random_uniform(), + but this is not possible for TPU, since there is currently no way to seed + the different cores to produce identical values across replicas. Instead we + use noise_from_step_num() (see below). + + The quantization scheme is as follows: + + Compute the maximum absolute value by row (call this max_abs). + Store this either in an auxiliary variable or in an extra column. + + Divide the parameters by (max_abs / (2^(num_bits-1)-1)). This gives a + float32 value in the range [-2^(num_bits-1)-1, 2^(num_bits-1)-1] + + Unbiased randomized roundoff by adding noise and rounding down. + + This produces a signed integer with num_bits bits which can then be stored. + + Args: + x: a float32 Tensor + num_bits: an integer between 1 and 22 + noise: a float Tensor broadcastable to the shape of x. + + Returns: + a float32 Tensor + """ + shape = x.get_shape().as_list() + if not (len(shape) >= 2 and shape[-1] > 1): + return x + max_abs = tf.reduce_max(tf.abs(x), -1, keepdims=True) + 1e-9 + max_int = 2 ** (num_bits - 1) - 1 + scale = max_abs / max_int + x /= scale + x = tf.floor(x + noise) + # dequantize before storing (since this is a simulation) + x *= scale + return x + + +def noise_from_step_num(): + """Quantization noise equal to (phi * (step_num + 1)) mod 1.0. + + Not using random_uniform here due to a problem on TPU in that random seeds + are not respected, which may cause the parameters on different replicas + to go out-of-sync. + + Returns: + a float32 scalar + """ + step = tf.to_int32(tf.train.get_or_create_global_step()) + 1 + phi = ((5 ** 0.5) - 1) / 2 + # Naive computation tf.mod(phi * step, 1.0) in float32 would be disastrous + # due to loss of precision when the step number gets large. + # Computation in doubles does not work on TPU, so we use this complicated + # alternative computation which does not suffer from these roundoff errors. + ret = 0.0 + for i in range(30): + ret += (((phi * (2 ** i)) % 1.0) # double-precision computation in python + * tf.to_float(tf.mod(step // (2 ** i), 2))) + return tf.mod(ret, 1.0) + + +def _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2): + """Round-off x to cand1 or to cand2 in an unbiased way. + + Cand1 and cand2 are the same shape as x. + For every element of x, the corresponding elements of cand1 and cand2 should + be the two closest bfloat16 values to x. Order does not matter. + cand1 and cand2 must differ from each other. + + Args: + x: A float32 Tensor. + noise: A Tensor broadcastable to the shape of x containing + random uniform values in [0.0, 1.0]. + cand1: A bfloat16 Tensor the same shape as x. + cand2: A bfloat16 Tensor the same shape as x. + + Returns: + A bfloat16 Tensor. + """ + cand1_f = tf.to_float(cand1) + cand2_f = tf.to_float(cand2) + step_size = cand2_f - cand1_f + fpart = (x - cand1_f) / step_size + ret = tf.where(tf.greater(fpart, noise), cand2, cand1) + return ret + + +def _to_bfloat16_unbiased(x, noise): + """Convert a float32 to a bfloat16 using randomized roundoff. + + Args: + x: A float32 Tensor. + noise: a float32 Tensor with values in [0, 1), broadcastable to tf.shape(x) + Returns: + A float32 Tensor. + """ + x_sign = tf.sign(x) + # Make sure x is positive. If it is zero, the two candidates are identical. + x = x * x_sign + 1e-30 + cand1 = tf.to_bfloat16(x) + cand1_f = tf.to_float(cand1) + # This relies on the fact that for a positive bfloat16 b, + # b * 1.005 gives you the next higher bfloat16 and b*0.995 gives you the + # next lower one. Both 1.005 and 0.995 are ballpark estimation. + cand2 = tf.to_bfloat16( + tf.where(tf.greater(x, cand1_f), cand1_f * 1.005, cand1_f * 0.995)) + ret = _randomized_roundoff_to_bfloat16(x, noise, cand1, cand2) + return ret * tf.to_bfloat16(x_sign) + + +class ParameterEncoding(object): + """Helper class for encoding weights as bfloat16. + + For now, the parameters are always stored (encoded) as bfloat16 and decoded + to bfloat32. Confusingly, the custom getter then converts the bfloat32 back + to a bfloat16 to use as an activation, assuming that we use bfloat16 for + activations. + + TODO(noam): Add options for activation dtype=float32, and for different + storage dtypes. + """ + + def encode(self, x, noise): + """Encode float32 to bfloat16. + + Args: + x: a float32 Tensor + noise: a float32 Tensor with values in [0, 1), broadcastable to shape(x) + + Returns: + a bfloat16 Tensor + """ + raise NotImplementedError("encode not implemented") + + def decode(self, x): + """Decode bfloat16 to float32.""" + raise NotImplementedError("decode not implemented") + + def _decode_with_identity_gradient(self, x): + # identity backprop through the decoder. + # This means that the optimizer must call encode when updating weights. + @function.Defun(python_grad_func=lambda op, dy: dy, + shape_func=lambda op: [op.inputs[0].get_shape()]) + def my_fn(x): + return self.decode(x) + return my_fn(x) + + def custom_getter(self, activation_dtype=tf.bfloat16): + """A custom getter that uses the encoding for bfloat16 and float32 vars. + + When a bfloat16 or float32 variable is requsted, an encoded float16 + varaible is created, which is then decoded and cast to a bfloat16 + activation. + + Args: + activation_dtype: a dtype to which to convert the decoded value. + + Returns: + a function. + """ + def getter_fn(getter, *args, **kwargs): + requested_dtype = kwargs["dtype"] + if requested_dtype in (tf.bfloat16, tf.float32): + kwargs["dtype"] = tf.bfloat16 + kwargs["initializer"] = _EncodingInitializer( + kwargs["initializer"], self) + ret = self._decode_with_identity_gradient(getter(*args, **kwargs)) + return tf.cast(ret, activation_dtype) + return getter(*args, **kwargs) + return getter_fn + + +class _EncodingInitializer(object): + """Helper class for ParameterEncoding. + + Initializes variables by calling base initializer, then encoding. + """ + + def __init__(self, base_initializer, parameter_encoding): + self._base_initializer = base_initializer + self._parameter_encoding = parameter_encoding + + def __call__(self, shape, dtype, partition_info=None): + if self._base_initializer is None: + # mimic default initialization in tf.get_variable() + if dtype.is_floating: + ret = tf.glorot_uniform_initializer()(shape, dtype) + else: + ret = tf.zeros(shape, dtype) + else: + ret = self._base_initializer(shape, dtype, partition_info=partition_info) + noise = 0.0 # no random noise in the initializer. + return tf.cast(self._parameter_encoding.encode(ret, noise), dtype) + + +class EighthPowerEncoding(ParameterEncoding): + """enc(x) = sign(x) * (abs(x)*128)^8. + + This provides less range and more resolution. + The range of representable positive values is approximately [2^-23, 2^9] + Resolution is 8x better than bfloat16. + """ + + def encode(self, x, noise): + x = tf.to_float(x) + # we can't use tf.pow(..., 8.0) because of a high-error approximation + # on TPU. Instead we square three times. + x = tf.sign(x) * tf.square(tf.square(tf.square(tf.abs(x) * 128.0))) + x = _to_bfloat16_unbiased(x, noise) + return x + + def decode(self, x): + x = tf.to_float(x) + # we can't use tf.pow(..., 0.125) because of a high-error approximation + # on TPU. Instead we sqrt three times. + return tf.sign(x) * (tf.sqrt(tf.sqrt(tf.sqrt(tf.abs(x)))) / 128.0) diff --git a/tensor2tensor/utils/registry.py b/tensor2tensor/utils/registry.py index 7be75b919..cfe256366 100644 --- a/tensor2tensor/utils/registry.py +++ b/tensor2tensor/utils/registry.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,173 +13,601 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Registry for models, hyperparameter settings, problem types, and datasets. +"""Object registration. + +Registries are instances of `Registry`. + +See `Registries` for a centralized list of object registries +(models, problems, hyperparameter sets, etc.). + +New functions and classes can be registered using `.register`. The can be +accessed/queried similar to dictionaries, keyed by default by `snake_case` +equivalents. + +``` +@Registries.models.register +class MyModel(T2TModel): + ... + +'my_model' in Registries.models # True +for k in Registries.models: + print(k) # prints 'my_model' +model = Registries.models['my_model'](constructor_arg) +``` + +#### Legacy Support Define a new model by subclassing T2TModel and register it: ``` -@registry.register_model +@register_model class MyModel(T2TModel): ... ``` -Access by snake-cased name: `registry.model("my_model")`. If you're using -`trainer.py`, you can pass on the command-line: `--model=my_model`. +Access by snake-cased name: `model("my_model")`. If you're using +`t2t_trainer.py`, you can pass on the command-line: `--model=my_model`. -See all the models registered: `registry.list_models()`. +See all the models registered: `list_models()`. For hyperparameter sets: - * Register: `registry.register_hparams` - * List: `registry.list_hparams` - * Retrieve by name: `registry.hparams` - * Command-line flag in `trainer.py`: `--hparams_set=name` + * Register: `register_hparams` + * List: `list_hparams` + * Retrieve by name: `hparams` + * Command-line flag in `t2t_trainer.py`: `--hparams_set=name` For hyperparameter ranges: - * Register: `registry.register_ranged_hparams` - * List: `registry.list_ranged_hparams` - * Retrieve by name: `registry.ranged_hparams` - * Command-line flag in `trainer.py`: `--hparams_range=name` + * Register: `register_ranged_hparams` + * List: `list_ranged_hparams` + * Retrieve by name: `ranged_hparams` + * Command-line flag in `t2t_trainer.py`: `--hparams_range=name` """ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import inspect -import re +import collections -# Dependency imports +from tensor2tensor.utils import misc_utils +import tensorflow.compat.v1 as tf -from tensor2tensor.utils import t2t_model +from tensorflow.python.util import tf_inspect as inspect # pylint: disable=g-direct-tensorflow-import -import tensorflow as tf -_MODELS = {} -_HPARAMS = {} -_RANGED_HPARAMS = {} +def default_name(class_or_fn): + """Default name for a class or function. -# Camel case to snake case utils -_first_cap_re = re.compile("(.)([A-Z][a-z0-9]+)") -_all_cap_re = re.compile("([a-z])([A-Z])") + This is the naming function by default for registries expecting classes or + functions. + Args: + class_or_fn: class or function to be named. -def _convert_camel_to_snake(name): - s1 = _first_cap_re.sub(r"\1_\2", name) - return _all_cap_re.sub(r"\1_\2", s1).lower() + Returns: + Default name for registration. + """ + return misc_utils.camelcase_to_snakecase(class_or_fn.__name__) -def _reset(): - for ctr in [_MODELS, _HPARAMS, _RANGED_HPARAMS]: - ctr.clear() +default_object_name = lambda obj: default_name(type(obj)) -def _default_name(obj): - return _convert_camel_to_snake(obj.__name__) +class Registry(object): + """Dict-like class for managing function registrations. + ```python + my_registry = Registry("custom_name") -def register_model(name=None): - """Register a model. name defaults to class name snake-cased.""" + @my_registry.register + def my_func(): + pass - def decorator(model_cls, registration_name=None): - """Registers & returns model_cls with registration_name or default name.""" - model_name = registration_name or _default_name(model_cls) - if model_name in _MODELS: - raise ValueError("Model %s already registered." % model_name) - if (not inspect.isclass(model_cls) or - not issubclass(model_cls, t2t_model.T2TModel)): - tf.logging.warning("Model %s is not an instance of T2TModel. " - "Object is expected to abide by its API.", model_name) - _MODELS[model_name] = model_cls - return model_cls + @my_registry.register() + def another_func(): + pass - # Handle if decorator was used without parens - if callable(name): - model_cls = name - return decorator(model_cls, registration_name=_default_name(model_cls)) + @my_registry.register("non_default_name") + def third_func(x, y, z): + pass - return lambda model_cls: decorator(model_cls, name) + def foo(): + pass + my_registry.register()(foo) + my_registry.register("baz")(lambda (x, y): x + y) + my_register.register("bar") -def model(name): - if name not in _MODELS: - raise ValueError("Model %s never registered." % name) - return _MODELS[name] + print(list(my_registry)) + # ["my_func", "another_func", "non_default_name", "foo", "baz"] + # (order may vary) + print(my_registry["non_default_name"] is third_func) # True + print("third_func" in my_registry) # False + print("bar" in my_registry) # False + my_registry["non-existent_key"] # raises KeyError + ``` + Optional validation, on_set callback and value transform also supported. + See `__init__` doc. + """ -def list_models(): - return list(_MODELS) + def __init__(self, + registry_name, + default_key_fn=default_name, + validator=None, + on_set=None, + value_transformer=(lambda k, v: v)): + """Construct a new registry. + + Args: + registry_name: str identifier for the given registry. Used in error msgs. + default_key_fn (optional): function mapping value -> key for registration + when a key is not provided + validator (optional): if given, this is run before setting a given (key, + value) pair. Accepts (key, value) and should raise if there is a + problem. Overwriting existing keys is not allowed and is checked + separately. Values are also checked to be callable separately. + on_set (optional): callback function accepting (key, value) pair which is + run after an item is successfully set. + value_transformer (optional): if run, `__getitem__` will return + value_transformer(key, registered_value). + """ + self._registry = {} + self._name = registry_name + self._default_key_fn = default_key_fn + self._validator = validator + self._on_set = on_set + self._value_transformer = value_transformer + + def default_key(self, value): + """Default key used when key not provided. Uses function from __init__.""" + return self._default_key_fn(value) + + @property + def name(self): + return self._name + + def validate(self, key, value): + """Validation function run before setting. Uses function from __init__.""" + if self._validator is not None: + self._validator(key, value) + + def on_set(self, key, value): + """Callback called on successful set. Uses function from __init__.""" + if self._on_set is not None: + self._on_set(key, value) + + def __setitem__(self, key, value): + """Validate, set, and (if successful) call `on_set` for the given item. + + Args: + key: key to store value under. If `None`, `self.default_key(value)` is + used. + value: callable stored under the given key. + + Raises: + KeyError: if key is already in registry. + """ + if key is None: + key = self.default_key(value) + if key in self: + raise KeyError( + "key %s already registered in registry %s" % (key, self._name)) + if not callable(value): + raise ValueError("value must be callable") + self.validate(key, value) + self._registry[key] = value + self.on_set(key, value) + + def register(self, key_or_value=None): + """Decorator to register a function, or registration itself. + + This is primarily intended for use as a decorator, either with or without + a key/parentheses. + ```python + @my_registry.register('key1') + def value_fn(x, y, z): + pass + + @my_registry.register() + def another_fn(x, y): + pass + + @my_registry.register + def third_func(): + pass + ``` + + Note if key_or_value is provided as a non-callable, registration only + occurs once the returned callback is called with a callable as its only + argument. + ```python + callback = my_registry.register('different_key') + 'different_key' in my_registry # False + callback(lambda (x, y): x + y) + 'different_key' in my_registry # True + ``` + + Args: + key_or_value (optional): key to access the registered value with, or the + function itself. If `None` (default), `self.default_key` will be called + on `value` once the returned callback is called with `value` as the only + arg. If `key_or_value` is itself callable, it is assumed to be the value + and the key is given by `self.default_key(key)`. + + Returns: + decorated callback, or callback generated a decorated function. + """ + + def decorator(value, key): + self[key] = value + return value + + # Handle if decorator was used without parens + if callable(key_or_value): + return decorator(value=key_or_value, key=None) + else: + return lambda value: decorator(value, key=key_or_value) + + def __getitem__(self, key): + if key not in self: + raise KeyError("%s never registered with registry %s. Available:\n %s" % + (key, self.name, display_list_by_prefix(sorted(self), 4))) + value = self._registry[key] + return self._value_transformer(key, value) + + def __contains__(self, key): + return key in self._registry + + def keys(self): + return self._registry.keys() + + def values(self): + return (self[k] for k in self) # complicated because of transformer + + def items(self): + return ((k, self[k]) for k in self) # complicated because of transformer + + def __iter__(self): + return iter(self._registry) + + def __len__(self): + return len(self._registry) + + def _clear(self): + self._registry.clear() + + def get(self, key, default=None): + return self[key] if key in self else default + + +def _on_model_set(k, v): + v.REGISTERED_NAME = k + + +def _nargs_validator(nargs, message): + """Makes validator for function to ensure it takes nargs args.""" + if message is None: + message = "Registered function must take exactly %d arguments" % nargs + def f(key, value): + del key + spec = inspect.getfullargspec(value) + if (len(spec.args) != nargs or spec.varargs is not None or + spec.varkw is not None): + raise ValueError(message) -def register_hparams(name=None): - """Register an HParams set. name defaults to function name snake-cased.""" + return f - def decorator(hp_fn, registration_name=None): - """Registers & returns hp_fn with registration_name or default name.""" - hp_name = registration_name or _default_name(hp_fn) - if hp_name in _HPARAMS: - raise ValueError("HParams set %s already registered." % hp_name) - _HPARAMS[hp_name] = hp_fn - return hp_fn - # Handle if decorator was used without parens - if callable(name): - hp_fn = name - return decorator(hp_fn, registration_name=_default_name(hp_fn)) +ProblemSpec = collections.namedtuple("ProblemSpec", + ["base_name", "was_reversed", "was_copy"]) - return lambda hp_fn: decorator(hp_fn, name) +def parse_problem_name(name): + """Determines if problem_name specifies a copy and/or reversal. -def hparams(name): - if name not in _HPARAMS: - raise ValueError("HParams set %s never registered." % name) - return _HPARAMS[name] + Args: + name: str, problem name, possibly with suffixes. + Returns: + ProblemSpec: namedtuple with ["base_name", "was_reversed", "was_copy"] -def list_hparams(): - return list(_HPARAMS) + Raises: + ValueError if name contains multiple suffixes of the same type + ('_rev' or '_copy'). One of each is ok. + """ + # Recursively strip tags until we reach a base name. + if name.endswith("_rev"): + base, was_reversed, was_copy = parse_problem_name(name[:-4]) + if was_reversed: + # duplicate rev + raise ValueError( + "Invalid problem name %s: multiple '_rev' instances" % name) + return ProblemSpec(base, True, was_copy) + elif name.endswith("_copy"): + base, was_reversed, was_copy = parse_problem_name(name[:-5]) + if was_copy: + raise ValueError( + "Invalid problem_name %s: multiple '_copy' instances" % name) + return ProblemSpec(base, was_reversed, True) + else: + return ProblemSpec(name, False, False) + + +def get_problem_name(base_name, was_reversed=False, was_copy=False): + """Construct a problem name from base and reversed/copy options. + + Inverse of `parse_problem_name`. + + Args: + base_name: base problem name. Should not end in "_rev" or "_copy" + was_reversed: if the problem is to be reversed + was_copy: if the problem is to be copied + + Returns: + string name consistent with use with `parse_problem_name`. + + Raises: + ValueError if `base_name` ends with "_rev" or "_copy" + """ + if any(base_name.endswith(suffix) for suffix in ("_rev", "_copy")): + raise ValueError("`base_name` cannot end in '_rev' or '_copy'") + name = base_name + if was_copy: + name = "%s_copy" % name + if was_reversed: + name = "%s_rev" % name + return name -def register_ranged_hparams(name=None): - """Register a RangedHParams set. name defaults to fn name snake-cased.""" +def _problem_name_validator(k, v): + del v + if parse_problem_name(k).base_name != k: + raise KeyError( + "Invalid problem name: cannot end in %s or %s" % ("_rev", "_copy")) - def decorator(rhp_fn, registration_name=None): - """Registers & returns hp_fn with registration_name or default name.""" - rhp_name = registration_name or _default_name(rhp_fn) - if rhp_name in _RANGED_HPARAMS: - raise ValueError("RangedHParams set %s already registered." % rhp_name) - # Check that the fn takes a single argument - args, varargs, keywords, _ = inspect.getargspec(rhp_fn) - if len(args) != 1 or varargs is not None or keywords is not None: - raise ValueError("RangedHParams set function must take a single " - "argument, the RangedHParams object.") - _RANGED_HPARAMS[rhp_name] = rhp_fn - return rhp_fn +def _on_problem_set(k, v): + v.name = k - # Handle if decorator was used without parens - if callable(name): - rhp_fn = name - return decorator(rhp_fn, registration_name=_default_name(rhp_fn)) - return lambda rhp_fn: decorator(rhp_fn, name) +def _call_value(k, v): + del k + return v() -def ranged_hparams(name): - if name not in _RANGED_HPARAMS: - raise ValueError("RangedHParams set %s never registered." % name) - return _RANGED_HPARAMS[name] +def _hparams_value_transformer(key, value): + out = value() + if out is None: + raise TypeError("HParams %s is None. Make sure the registered function " + "returns the HParams object" % key) + return out -def list_ranged_hparams(): - return list(_RANGED_HPARAMS) +class Registries(object): + """Object holding `Registry` objects.""" + def __init__(self): + raise RuntimeError("Registries is not intended to be instantiated") -def help_string(): - help_str = """Registry contents: + models = Registry("models", on_set=_on_model_set) + + optimizers = Registry( + "optimizers", + validator=_nargs_validator( + 2, "Registered optimizer functions must take exactly two arguments: " + "learning_rate (float) and hparams (HParams).")) + + hparams = Registry("hparams", value_transformer=_hparams_value_transformer) + + ranged_hparams = Registry( + "ranged_hparams", + validator=_nargs_validator( + 1, "Registered ranged_hparams functions must take a single argument, " + "the RangedHParams object.")) - Models: %s + problems = Registry( + "problems", validator=_problem_name_validator, on_set=_on_problem_set) - HParams: %s + attacks = Registry("attacks", value_transformer=_call_value) - RangedHParams: %s + attack_params = Registry("attack_params", value_transformer=_call_value) + + pruning_params = Registry("pruning_params", value_transformer=_call_value) + + pruning_strategies = Registry("pruning_strategies") + + mtf_layers = Registry( + "mtf_layers", + validator=_nargs_validator( + 2, "Registered layer functions must take exaction two arguments: " + "hparams (HParams) and prefix (str).")) + + env_problems = Registry("env_problems", on_set=_on_problem_set) + + +# consistent version of old API +model = Registries.models.__getitem__ +list_models = lambda: sorted(Registries.models) +register_model = Registries.models.register + + +def optimizer(name): + """Get pre-registered optimizer keyed by name. + + `name` should be snake case, though SGD -> sgd, RMSProp -> rms_prop and + UpperCamelCase -> snake_case conversions included for legacy support. + + Args: + name: name of optimizer used in registration. This should be a snake case + identifier, though others supported for legacy reasons. + + Returns: + optimizer """ - return help_str % (list_models(), list_hparams(), list_ranged_hparams()) + warn_msg = ("Please update `registry.optimizer` callsite " + "(likely due to a `HParams.optimizer` value)") + if name == "SGD": + name = "sgd" + tf.logging.warning("'SGD' optimizer now keyed by 'sgd'. %s" % warn_msg) + elif name == "RMSProp": + name = "rms_prop" + tf.logging.warning( + "'RMSProp' optimizer now keyed by 'rms_prop'. %s" % warn_msg) + else: + snake_name = misc_utils.camelcase_to_snakecase(name) + if name != snake_name: + tf.logging.warning( + "optimizer names now keyed by snake_case names. %s" % warn_msg) + name = snake_name + return Registries.optimizers[name] + + +list_optimizers = lambda: sorted(Registries.optimizers) +register_optimizer = Registries.optimizers.register + +hparams = Registries.hparams.__getitem__ +register_hparams = Registries.hparams.register + +list_env_problems = lambda: sorted(Registries.env_problems) +register_env_problem = Registries.env_problems.register + + +def list_hparams(prefix=None): + hp_names = sorted(Registries.hparams) + if prefix: + hp_names = [name for name in hp_names if name.startswith(prefix)] + return hp_names + + +ranged_hparams = Registries.ranged_hparams.__getitem__ +list_ranged_hparams = lambda: sorted(Registries.ranged_hparams) +register_ranged_hparams = Registries.ranged_hparams.register + +base_problem = Registries.problems.__getitem__ +list_base_problems = lambda: sorted(Registries.problems) +register_base_problem = Registries.problems.register + +# Keeping for back-compatibility +list_problems = list_base_problems +register_problem = register_base_problem + + +def problem(problem_name, **kwargs): + """Get possibly copied/reversed problem in `base_registry` or `env_registry`. + + Args: + problem_name: string problem name. See `parse_problem_name`. + **kwargs: forwarded to env problem's initialize method. + + Returns: + possibly reversed/copied version of base problem registered in the given + registry. + """ + spec = parse_problem_name(problem_name) + try: + return Registries.problems[spec.base_name]( + was_copy=spec.was_copy, was_reversed=spec.was_reversed) + except KeyError: + # If name is not found in base problems then try creating an env problem + return env_problem(problem_name, **kwargs) + + +def env_problem(env_problem_name, **kwargs): + """Get and initialize the `EnvProblem` with the given name and batch size. + + Args: + env_problem_name: string name of the registered env problem. + **kwargs: forwarded to env problem's initialize method. + + Returns: + an initialized EnvProblem with the given batch size. + """ + + ep_cls = Registries.env_problems[env_problem_name] + ep = ep_cls() + ep.initialize(**kwargs) + return ep + + +attack = Registries.attacks.__getitem__ +list_attacks = lambda: sorted(Registries.attacks) +register_attack = Registries.attacks.register + +attack_params = Registries.attack_params.__getitem__ +list_attack_params = lambda: sorted(Registries.attack_params) +register_attack_params = Registries.attack_params.register + +pruning_params = Registries.pruning_params.__getitem__ +list_pruning_params = lambda: sorted(Registries.pruning_params) +register_pruning_params = Registries.pruning_params.register + +pruning_strategy = Registries.pruning_strategies.__getitem__ +list_pruning_strategies = lambda: sorted(Registries.pruning_strategies) +register_pruning_strategy = Registries.pruning_strategies.register + + +def display_list_by_prefix(names_list, starting_spaces=0): + """Creates a help string for names_list grouped by prefix.""" + cur_prefix, result_lines = None, [] + space = " " * starting_spaces + for name in sorted(names_list): + split = name.split("_", 1) + prefix = split[0] + if cur_prefix != prefix: + result_lines.append(space + prefix + ":") + cur_prefix = prefix + result_lines.append(space + " * " + name) + return "\n".join(result_lines) + + +def help_string(): + """Generate help string with contents of registry.""" + help_str = """ +Registry contents: +------------------ + + Models: +%s + + HParams: +%s + + RangedHParams: +%s + + Problems: +%s + + Optimizers: +%s + + Attacks: +%s + + Attack HParams: +%s + + Pruning HParams: +%s + + Pruning Strategies: +%s + + Env Problems: +%s +""" + lists = tuple( + display_list_by_prefix(entries, starting_spaces=4) for entries in [ # pylint: disable=g-complex-comprehension + list_models(), + list_hparams(), + list_ranged_hparams(), + list_base_problems(), + list_optimizers(), + list_attacks(), + list_attack_params(), + list_pruning_params(), + list_pruning_strategies(), + list_env_problems(), + ]) + return help_str % lists diff --git a/tensor2tensor/utils/registry_test.py b/tensor2tensor/utils/registry_test.py index 54ccca749..cee46aba2 100644 --- a/tensor2tensor/utils/registry_test.py +++ b/tensor2tensor/utils/registry_test.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -18,20 +19,111 @@ from __future__ import division from __future__ import print_function -# Dependency imports - from tensor2tensor.utils import registry from tensor2tensor.utils import t2t_model -import tensorflow as tf +import tensorflow.compat.v1 as tf + + +# pylint: disable=unused-variable,unused-argument + + +class RegistryClassTest(tf.test.TestCase): + """Test of base registry.Registry class.""" + + def testGetterSetter(self): + r = registry.Registry("test_registry") + r["hello"] = lambda: "world" + r["a"] = lambda: "b" + self.assertEqual(r["hello"](), "world") + self.assertEqual(r["a"](), "b") + + def testDefaultKeyFn(self): + r = registry.Registry("test", default_key_fn=lambda x: x().upper()) + r.register()(lambda: "hello") + self.assertEqual(r["HELLO"](), "hello") + + def testNoKeyProvided(self): + r = registry.Registry("test") + def f(): + return 3 + r.register(f) + self.assertEqual(r["f"](), 3) + + def testMembership(self): + r = registry.Registry("test_registry") + r["a"] = lambda: None + r["b"] = lambda: 4 + self.assertTrue("a" in r) + self.assertTrue("b" in r) + + def testIteration(self): + r = registry.Registry("test_registry") + r["a"] = lambda: None + r["b"] = lambda: 4 + self.assertEqual(sorted(r), ["a", "b"]) + + def testLen(self): + r = registry.Registry("test_registry") + self.assertEqual(len(r), 0) + r["a"] = lambda: None + self.assertEqual(len(r), 1) + r["b"] = lambda: 4 + self.assertEqual(len(r), 2) + + def testTransformer(self): + r = registry.Registry( + "test_registry", value_transformer=lambda x, y: x + y()) + r.register(3)(lambda: 5) + r.register(10)(lambda: 12) + self.assertEqual(r[3], 8) + self.assertEqual(r[10], 22) + self.assertEqual(set(r.values()), set((8, 22))) + self.assertEqual(set(r.items()), set(((3, 8), (10, 22)))) + + def testGet(self): + r = registry.Registry("test_registry", value_transformer=lambda k, v: v()) + r["a"] = lambda: "xyz" + self.assertEqual(r.get("a"), "xyz") + self.assertEqual(r.get("a", 3), "xyz") + self.assertIsNone(r.get("b")) + self.assertEqual(r.get("b", 3), 3) + + +class EnvProblemRegistryTest(tf.test.TestCase): + + def setUp(self): + registry.Registries.env_problems._clear() + + def testEnvProblem(self): + # Register this class and expect to get it back. + + @registry.register_env_problem + class EnvProb(object): + + batch_size = None -# pylint: disable=unused-variable + def initialize(self, batch_size): + self.batch_size = batch_size + + # Get it with given batch_size. + batch_size = 100 + ep = registry.env_problem("env_prob", batch_size=batch_size) + + # name property is set. + self.assertEqual("env_prob", ep.name) + + # initialize was called and therefore batch_size was set. + self.assertEqual(batch_size, ep.batch_size) + + # assert on the type. + self.assertIsInstance(ep, EnvProb) class ModelRegistryTest(tf.test.TestCase): def setUp(self): - registry._reset() + registry.Registries.models._clear() def testT2TModelRegistration(self): @@ -61,7 +153,7 @@ def model_fn(): self.assertTrue(model is model_fn) def testUnknownModel(self): - with self.assertRaisesRegexp(ValueError, "never registered"): + with self.assertRaisesRegexp(KeyError, "never registered"): registry.model("not_registered") def testDuplicateRegistration(self): @@ -70,7 +162,7 @@ def testDuplicateRegistration(self): def m1(): pass - with self.assertRaisesRegexp(ValueError, "already registered"): + with self.assertRaisesRegexp(KeyError, "already registered"): @registry.register_model("m1") def m2(): @@ -88,35 +180,96 @@ def m2(): self.assertSetEqual(set(["m1", "m2"]), set(registry.list_models())) - def testSnakeCase(self): - convert = registry._convert_camel_to_snake - self.assertEqual("typical_camel_case", convert("TypicalCamelCase")) - self.assertEqual("numbers_fuse2gether", convert("NumbersFuse2Gether")) - self.assertEqual("lstm_seq2seq", convert("LSTMSeq2Seq")) - self.assertEqual("starts_lower", convert("startsLower")) - self.assertEqual("starts_lower_caps", convert("startsLowerCAPS")) - self.assertEqual("caps_fuse_together", convert("CapsFUSETogether")) - self.assertEqual("startscap", convert("Startscap")) - self.assertEqual("s_tartscap", convert("STartscap")) +class OptimizerRegistryTest(tf.test.TestCase): + + def setUp(self): + registry.Registries.optimizers._clear() + + def testRegistration(self): + @registry.register_optimizer + def my_optimizer(learning_rate, hparams): + return 3 + + @registry.register_optimizer("my_other_optimizer") + def another_optimizer(learning_rate, hparams): + return 5 + + self.assertEqual(registry.optimizer("my_optimizer"), my_optimizer) + self.assertEqual( + registry.optimizer("my_other_optimizer"), another_optimizer) + + def testMembership(self): + @registry.register_optimizer + def my_optimizer(learning_rate, hparams): + return 3 + + @registry.register_optimizer("my_other_optimizer") + def another_optimizer(learning_rate, hparams): + return 5 + + self.assertTrue("my_optimizer" in registry.Registries.optimizers) + self.assertTrue("my_other_optimizer" in registry.Registries.optimizers) + self.assertFalse("another_optimizer" in registry.Registries.optimizers) + self.assertEqual(len(registry.Registries.optimizers), 2) + + def testArgErrorCheck(self): + with self.assertRaisesRegexp(ValueError, "must take .* arguments"): + registry.Registries.optimizers.register("OneArgs")(lambda x: 4) + with self.assertRaisesRegexp(ValueError, "must take .* arguments"): + registry.Registries.optimizers.register("ThreeArgs")( + lambda x, y, z: 4) + with self.assertRaisesRegexp(ValueError, "must take .* arguments"): + registry.Registries.optimizers.register("NArgs")(lambda *args: 4) + with self.assertRaisesRegexp(ValueError, "must take .* arguments"): + registry.Registries.optimizers.register("Kwargs")(lambda **kargs: 4) + with self.assertRaisesRegexp(ValueError, "must take .* arguments"): + registry.Registries.optimizers.register("TwoAndKwargs")( + lambda a, b, **kargs: 4) + + def testMultipleRegistration(self): + @registry.register_optimizer + def my_optimizer(learning_rate, hparams): + return 3 + + with self.assertRaisesRegexp(KeyError, "already registered"): + + @registry.register_optimizer("my_optimizer") + def another_fn(learning_rate, hparams): + return 5 + + def testUnknownOptimizer(self): + with self.assertRaisesRegexp(KeyError, "never registered"): + registry.optimizer("not_registered_optimizer") + + def testGetterSetterInterface(self): + def f(x, y): + return 3 + + k = "blah" + registry.Registries.optimizers[k] = f + self.assertEqual(registry.optimizer(k), f) + self.assertEqual(registry.Registries.optimizers[k], f) + self.assertEqual(registry.Registries.optimizers[k], registry.optimizer(k)) class HParamRegistryTest(tf.test.TestCase): def setUp(self): - registry._reset() + registry.Registries.hparams._clear() + registry.Registries.ranged_hparams._clear() def testHParamSet(self): @registry.register_hparams def my_hparams_set(): - pass + return 3 @registry.register_ranged_hparams def my_hparams_range(_): pass - self.assertTrue(registry.hparams("my_hparams_set") is my_hparams_set) + self.assertEqual(registry.hparams("my_hparams_set"), my_hparams_set()) self.assertTrue( registry.ranged_hparams("my_hparams_range") is my_hparams_range) @@ -124,28 +277,37 @@ def testNamedRegistration(self): @registry.register_hparams("a") def my_hparams_set(): - pass + return 7 @registry.register_ranged_hparams("a") def my_hparams_range(_): pass - self.assertTrue(registry.hparams("a") is my_hparams_set) + self.assertEqual(registry.hparams("a"), my_hparams_set()) self.assertTrue(registry.ranged_hparams("a") is my_hparams_range) def testUnknownHparams(self): - with self.assertRaisesRegexp(ValueError, "never registered"): + with self.assertRaisesRegexp(KeyError, "never registered"): registry.hparams("not_registered") - with self.assertRaisesRegexp(ValueError, "never registered"): + with self.assertRaisesRegexp(KeyError, "never registered"): registry.ranged_hparams("not_registered") + def testNoneHparams(self): + + @registry.register_hparams + def hp(): + pass + + with self.assertRaisesRegexp(TypeError, "is None"): + registry.hparams("hp") + def testDuplicateRegistration(self): @registry.register_hparams def hp1(): pass - with self.assertRaisesRegexp(ValueError, "already registered"): + with self.assertRaisesRegexp(LookupError, "already registered"): @registry.register_hparams("hp1") def hp2(): @@ -155,7 +317,7 @@ def hp2(): def rhp1(_): pass - with self.assertRaisesRegexp(ValueError, "already registered"): + with self.assertRaisesRegexp(LookupError, "already registered"): @registry.register_ranged_hparams("rhp1") def rhp2(_): @@ -198,5 +360,13 @@ def rhp_bad2(a, b): # pylint: disable=unused-argument pass +class RegistryHelpTest(tf.test.TestCase): + """Test class for common functions.""" + + def testRegistryHelp(self): + help_str = registry.help_string() + self.assertIsNotNone(help_str) + self.assertGreater(len(help_str), 0) + if __name__ == "__main__": tf.test.main() diff --git a/tensor2tensor/utils/restore_hook.py b/tensor2tensor/utils/restore_hook.py new file mode 100644 index 000000000..f8ad7e0fc --- /dev/null +++ b/tensor2tensor/utils/restore_hook.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Restore hooks.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import six + +from tensor2tensor.utils import contrib +import tensorflow.compat.v1 as tf + + +class RestoreHook(tf.train.SessionRunHook): + """Restore variables from a checkpoint path.""" + + def __init__(self, checkpoint_path="", new_model_scope="", old_model_scope="", + include=None, exclude=None): + self._checkpoint_path = checkpoint_path + self._new_model_scope = new_model_scope + self._old_model_scope = old_model_scope + self._include = include + self._exclude = exclude + + def begin(self): + """Load variables from checkpoint. + + New model variables have the following name foramt: + new_model_scope/old_model_scope/xxx/xxx:0 To find the map of + name to variable, need to strip the new_model_scope and then + match the old_model_scope and remove the suffix :0. + + """ + variables_to_restore = contrib.framework().get_variables_to_restore( + include=self._include, exclude=self._exclude) + # remove new_model_scope from variable name prefix + assignment_map = {variable.name[len(self._new_model_scope):]: variable + for variable in variables_to_restore + if variable.name.startswith(self._new_model_scope)} + # remove :0 from variable name suffix + assignment_map = {name.split(":")[0]: variable + for name, variable in six.iteritems(assignment_map) + if name.startswith(self._old_model_scope)} + self._assignment_map = assignment_map + + tf.logging.info("restoring %d variables from checkpoint %s"%( + len(assignment_map), self._checkpoint_path)) + tf.train.init_from_checkpoint(self._checkpoint_path, self._assignment_map) diff --git a/tensor2tensor/utils/rouge.py b/tensor2tensor/utils/rouge.py new file mode 100644 index 000000000..766bac018 --- /dev/null +++ b/tensor2tensor/utils/rouge.py @@ -0,0 +1,236 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# coding=utf-8 +"""ROUGE metric implementation. + +This is a modified and slightly extended version of +https://github.com/miso-belica/sumy/blob/dev/sumy/evaluation/rouge.py. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import numpy as np + +import tensorflow.compat.v1 as tf + + +def _len_lcs(x, y): + """Returns the length of the Longest Common Subsequence between two seqs. + + Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence + + Args: + x: sequence of words + y: sequence of words + + Returns + integer: Length of LCS between x and y + """ + table = _lcs(x, y) + n, m = len(x), len(y) + return table[n, m] + + +def _lcs(x, y): + """Computes the length of the LCS between two seqs. + + The implementation below uses a DP programming algorithm and runs + in O(nm) time where n = len(x) and m = len(y). + Source: http://www.algorithmist.com/index.php/Longest_Common_Subsequence + + Args: + x: collection of words + y: collection of words + + Returns: + Table of dictionary of coord and len lcs + """ + n, m = len(x), len(y) + table = {} + for i in range(n + 1): + for j in range(m + 1): + if i == 0 or j == 0: + table[i, j] = 0 + elif x[i - 1] == y[j - 1]: + table[i, j] = table[i - 1, j - 1] + 1 + else: + table[i, j] = max(table[i - 1, j], table[i, j - 1]) + return table + + +def _f_lcs(llcs, m, n): + """Computes the LCS-based F-measure score. + + Source: https://www.microsoft.com/en-us/research/publication/ + rouge-a-package-for-automatic-evaluation-of-summaries/ + + Args: + llcs: Length of LCS + m: number of words in reference summary + n: number of words in candidate summary + + Returns: + Float. LCS-based F-measure score + """ + r_lcs = llcs / m + p_lcs = llcs / n + beta = p_lcs / (r_lcs + 1e-12) + num = (1 + (beta**2)) * r_lcs * p_lcs + denom = r_lcs + ((beta**2) * p_lcs) + f_lcs = num / (denom + 1e-12) + return f_lcs + + +def rouge_l_sentence_level(eval_sentences, ref_sentences): + """Computes ROUGE-L (sentence level) of two collections of sentences. + + Source: https://www.microsoft.com/en-us/research/publication/ + rouge-a-package-for-automatic-evaluation-of-summaries/ + + Calculated according to: + R_lcs = LCS(X,Y)/m + P_lcs = LCS(X,Y)/n + F_lcs = ((1 + beta^2)*R_lcs*P_lcs) / (R_lcs + (beta^2) * P_lcs) + + where: + X = reference summary + Y = Candidate summary + m = length of reference summary + n = length of candidate summary + + Args: + eval_sentences: The sentences that have been picked by the summarizer + ref_sentences: The sentences from the reference set + + Returns: + A float: F_lcs + """ + + f1_scores = [] + for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences): + m = len(ref_sentence) + n = len(eval_sentence) + lcs = _len_lcs(eval_sentence, ref_sentence) + f1_scores.append(_f_lcs(lcs, m, n)) + return np.mean(f1_scores, dtype=np.float32) + + +def rouge_l_fscore(predictions, labels, **unused_kwargs): + """ROUGE scores computation between labels and predictions. + + This is an approximate ROUGE scoring method since we do not glue word pieces + or decode the ids and tokenize the output. + + Args: + predictions: tensor, model predictions + labels: tensor, gold output. + + Returns: + rouge_l_fscore: approx rouge-l f1 score. + """ + outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) + # Convert the outputs and labels to a [batch_size, input_length] tensor. + outputs = tf.squeeze(outputs, axis=[-1, -2]) + labels = tf.squeeze(labels, axis=[-1, -2]) + rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels), + tf.float32) + return rouge_l_f_score, tf.constant(1.0) + + +def _get_ngrams(n, text): + """Calculates n-grams. + + Args: + n: which n-grams to calculate + text: An array of tokens + + Returns: + A set of n-grams + """ + ngram_set = set() + text_length = len(text) + max_index_ngram_start = text_length - n + for i in range(max_index_ngram_start + 1): + ngram_set.add(tuple(text[i:i + n])) + return ngram_set + + +def rouge_n(eval_sentences, ref_sentences, n=2): + """Computes ROUGE-N f1 score of two text collections of sentences. + + Source: https://www.microsoft.com/en-us/research/publication/ + rouge-a-package-for-automatic-evaluation-of-summaries/ + + Args: + eval_sentences: The sentences that have been picked by the summarizer + ref_sentences: The sentences from the reference set + n: Size of ngram. Defaults to 2. + + Returns: + f1 score for ROUGE-N + """ + + f1_scores = [] + for eval_sentence, ref_sentence in zip(eval_sentences, ref_sentences): + eval_ngrams = _get_ngrams(n, eval_sentence) + ref_ngrams = _get_ngrams(n, ref_sentence) + ref_count = len(ref_ngrams) + eval_count = len(eval_ngrams) + + # Gets the overlapping ngrams between evaluated and reference + overlapping_ngrams = eval_ngrams.intersection(ref_ngrams) + overlapping_count = len(overlapping_ngrams) + + # Handle edge case. This isn't mathematically correct, but it's good enough + if eval_count == 0: + precision = 0.0 + else: + precision = overlapping_count / eval_count + + if ref_count == 0: + recall = 0.0 + else: + recall = overlapping_count / ref_count + + f1_scores.append(2.0 * ((precision * recall) / (precision + recall + 1e-8))) + + # return overlapping_count / reference_count + return np.mean(f1_scores, dtype=np.float32) + + +def rouge_2_fscore(predictions, labels, **unused_kwargs): + """ROUGE-2 F1 score computation between labels and predictions. + + This is an approximate ROUGE scoring method since we do not glue word pieces + or decode the ids and tokenize the output. + + Args: + predictions: tensor, model predictions + labels: tensor, gold output. + + Returns: + rouge2_fscore: approx rouge-2 f1 score. + """ + + outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) + # Convert the outputs and labels to a [batch_size, input_length] tensor. + outputs = tf.squeeze(outputs, axis=[-1, -2]) + labels = tf.squeeze(labels, axis=[-1, -2]) + rouge_2_f_score = tf.py_func(rouge_n, (outputs, labels), tf.float32) + return rouge_2_f_score, tf.constant(1.0) diff --git a/tensor2tensor/utils/rouge_test.py b/tensor2tensor/utils/rouge_test.py new file mode 100644 index 000000000..c2e3ca37b --- /dev/null +++ b/tensor2tensor/utils/rouge_test.py @@ -0,0 +1,117 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for Rouge metric.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np +from tensor2tensor.utils import rouge + +import tensorflow.compat.v1 as tf + + +class TestRouge2Metric(tf.test.TestCase): + """Tests the rouge-2 metric.""" + + def testRouge2Identical(self): + hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0], + [1, 2, 3, 4, 5, 1, 6, 8, 7]]) + references = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0], + [1, 2, 3, 4, 5, 1, 6, 8, 7]]) + self.assertAllClose(rouge.rouge_n(hypotheses, references), 1.0, atol=1e-03) + + def testRouge2Disjoint(self): + hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0], + [1, 2, 3, 4, 5, 1, 6, 8, 7]]) + references = np.array([[8, 9, 10, 11, 12, 13, 14, 15, 16, 17], + [9, 10, 11, 12, 13, 14, 15, 16, 17, 0]]) + self.assertEqual(rouge.rouge_n(hypotheses, references), 0.0) + + def testRouge2PartialOverlap(self): + hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0], + [1, 2, 3, 4, 5, 1, 6, 8, 7]]) + references = np.array([[1, 9, 2, 3, 4, 5, 1, 10, 6, 7], + [1, 9, 2, 3, 4, 5, 1, 10, 6, 7]]) + self.assertAllClose(rouge.rouge_n(hypotheses, references), 0.53, atol=1e-03) + + +class TestRougeLMetric(tf.test.TestCase): + """Tests the rouge-l metric.""" + + def testRougeLIdentical(self): + hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0], + [1, 2, 3, 4, 5, 1, 6, 8, 7]]) + references = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0], + [1, 2, 3, 4, 5, 1, 6, 8, 7]]) + self.assertAllClose( + rouge.rouge_l_sentence_level(hypotheses, references), 1.0, atol=1e-03) + + def testRougeLDisjoint(self): + hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0], + [1, 2, 3, 4, 5, 1, 6, 8, 7]]) + references = np.array([[8, 9, 10, 11, 12, 13, 14, 15, 16, 17], + [9, 10, 11, 12, 13, 14, 15, 16, 17, 0]]) + self.assertEqual(rouge.rouge_l_sentence_level(hypotheses, references), 0.0) + + def testRougeLPartialOverlap(self): + hypotheses = np.array([[1, 2, 3, 4, 5, 1, 6, 7, 0], + [1, 2, 3, 4, 5, 1, 6, 8, 7]]) + references = np.array([[1, 9, 2, 3, 4, 5, 1, 10, 6, 7], + [1, 9, 2, 3, 4, 5, 1, 10, 6, 7]]) + self.assertAllClose( + rouge.rouge_l_sentence_level(hypotheses, references), 0.837, atol=1e-03) + + +class TestRougeMetricsE2E(tf.test.TestCase): + """Tests the rouge metrics end-to-end.""" + + def testRouge2MetricE2E(self): + vocab_size = 4 + batch_size = 12 + seq_length = 12 + predictions = tf.one_hot( + np.random.randint(vocab_size, size=(batch_size, seq_length, 1, 1)), + depth=4, + dtype=tf.float32) + targets = np.random.randint(4, size=(12, 12, 1, 1)) + with self.test_session() as session: + scores, _ = rouge.rouge_2_fscore(predictions, + tf.constant(targets, dtype=tf.int32)) + a = tf.reduce_mean(scores) + session.run(tf.global_variables_initializer()) + session.run(a) + + def testRougeLMetricE2E(self): + vocab_size = 4 + batch_size = 12 + seq_length = 12 + predictions = tf.one_hot( + np.random.randint(vocab_size, size=(batch_size, seq_length, 1, 1)), + depth=4, + dtype=tf.float32) + targets = np.random.randint(4, size=(12, 12, 1, 1)) + with self.test_session() as session: + scores, _ = rouge.rouge_l_fscore( + predictions, + tf.constant(targets, dtype=tf.int32)) + a = tf.reduce_mean(scores) + session.run(tf.global_variables_initializer()) + session.run(a) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/utils/sari_hook.py b/tensor2tensor/utils/sari_hook.py new file mode 100644 index 000000000..b992a1ae4 --- /dev/null +++ b/tensor2tensor/utils/sari_hook.py @@ -0,0 +1,252 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""SARI score for evaluating paraphrasing and other text generation models. + +The score is introduced in the following paper: + + Optimizing Statistical Machine Translation for Text Simplification + Wei Xu, Courtney Napoles, Ellie Pavlick, Quanze Chen and Chris Callison-Burch + In Transactions of the Association for Computational Linguistics (TACL) 2015 + http://cs.jhu.edu/~napoles/res/tacl2016-optimizing.pdf + +This implementation has two differences with the GitHub [1] implementation: + (1) Define 0/0=1 instead of 0 to give higher scores for predictions that match + a target exactly. + (2) Fix an alleged bug [2] in the deletion score computation. + +[1] https://github.com/cocoxu/simplification/blob/master/SARI.py + (commit 0210f15) +[2] https://github.com/cocoxu/simplification/issues/6 +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +import numpy as np +import tensorflow.compat.v1 as tf + +# The paper that intoduces the SARI score uses only the precision of the deleted +# tokens (i.e. beta=0). To give more emphasis on recall, you may set, e.g., +# beta=1. +BETA_FOR_SARI_DELETION_F_MEASURE = 0 + + +def _get_ngram_counter(ids, n): + """Get a Counter with the ngrams of the given ID list. + + Args: + ids: np.array or a list corresponding to a single sentence + n: n-gram size + + Returns: + collections.Counter with ID tuples as keys and 1s as values. + """ + # Remove zero IDs used to pad the sequence. + ids = [token_id for token_id in ids if token_id != 0] + ngram_list = [tuple(ids[i:i + n]) for i in range(len(ids) + 1 - n)] + ngrams = set(ngram_list) + counts = collections.Counter() + for ngram in ngrams: + counts[ngram] = 1 + return counts + + +def _get_fbeta_score(true_positives, selected, relevant, beta=1): + """Compute Fbeta score. + + Args: + true_positives: Number of true positive ngrams. + selected: Number of selected ngrams. + relevant: Number of relevant ngrams. + beta: 0 gives precision only, 1 gives F1 score, and Inf gives recall only. + + Returns: + Fbeta score. + """ + precision = 1 + if selected > 0: + precision = true_positives / selected + if beta == 0: + return precision + recall = 1 + if relevant > 0: + recall = true_positives / relevant + if precision > 0 and recall > 0: + beta2 = beta * beta + return (1 + beta2) * precision * recall / (beta2 * precision + recall) + else: + return 0 + + +def get_addition_score(source_counts, prediction_counts, target_counts): + """Compute the addition score (Equation 4 in the paper).""" + added_to_prediction_counts = prediction_counts - source_counts + true_positives = sum((added_to_prediction_counts & target_counts).values()) + selected = sum(added_to_prediction_counts.values()) + # Note that in the paper the summation is done over all the ngrams in the + # output rather than the ngrams in the following set difference. Since the + # former does not make as much sense we compute the latter, which is also done + # in the GitHub implementation. + relevant = sum((target_counts - source_counts).values()) + return _get_fbeta_score(true_positives, selected, relevant) + + +def get_keep_score(source_counts, prediction_counts, target_counts): + """Compute the keep score (Equation 5 in the paper).""" + source_and_prediction_counts = source_counts & prediction_counts + source_and_target_counts = source_counts & target_counts + true_positives = sum((source_and_prediction_counts & + source_and_target_counts).values()) + selected = sum(source_and_prediction_counts.values()) + relevant = sum(source_and_target_counts.values()) + return _get_fbeta_score(true_positives, selected, relevant) + + +def get_deletion_score(source_counts, prediction_counts, target_counts, beta=0): + """Compute the deletion score (Equation 6 in the paper).""" + source_not_prediction_counts = source_counts - prediction_counts + source_not_target_counts = source_counts - target_counts + true_positives = sum((source_not_prediction_counts & + source_not_target_counts).values()) + selected = sum(source_not_prediction_counts.values()) + relevant = sum(source_not_target_counts.values()) + return _get_fbeta_score(true_positives, selected, relevant, beta=beta) + + +def get_sari_score(source_ids, prediction_ids, list_of_targets, + max_gram_size=4, beta_for_deletion=0): + """Compute the SARI score for a single prediction and one or more targets. + + Args: + source_ids: a list / np.array of SentencePiece IDs + prediction_ids: a list / np.array of SentencePiece IDs + list_of_targets: a list of target ID lists / np.arrays + max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams, + bigrams, and trigrams) + beta_for_deletion: beta for deletion F score. + + Returns: + the SARI score and its three components: add, keep, and deletion scores + """ + addition_scores = [] + keep_scores = [] + deletion_scores = [] + for n in range(1, max_gram_size + 1): + source_counts = _get_ngram_counter(source_ids, n) + prediction_counts = _get_ngram_counter(prediction_ids, n) + # All ngrams in the targets with count 1. + target_counts = collections.Counter() + # All ngrams in the targets with count r/num_targets, where r is the number + # of targets where the ngram occurs. + weighted_target_counts = collections.Counter() + num_nonempty_targets = 0 + for target_ids_i in list_of_targets: + target_counts_i = _get_ngram_counter(target_ids_i, n) + if target_counts_i: + weighted_target_counts += target_counts_i + num_nonempty_targets += 1 + for gram in weighted_target_counts.keys(): + weighted_target_counts[gram] /= num_nonempty_targets + target_counts[gram] = 1 + keep_scores.append(get_keep_score(source_counts, prediction_counts, + weighted_target_counts)) + deletion_scores.append(get_deletion_score(source_counts, prediction_counts, + weighted_target_counts, + beta_for_deletion)) + addition_scores.append(get_addition_score(source_counts, prediction_counts, + target_counts)) + + avg_keep_score = sum(keep_scores) / max_gram_size + avg_addition_score = sum(addition_scores) / max_gram_size + avg_deletion_score = sum(deletion_scores) / max_gram_size + sari = (avg_keep_score + avg_addition_score + avg_deletion_score) / 3.0 + return sari, avg_keep_score, avg_addition_score, avg_deletion_score + + +def get_sari(source_ids, prediction_ids, target_ids, max_gram_size=4): + """Computes the SARI scores from the given source, prediction and targets. + + Args: + source_ids: A 2D tf.Tensor of size (batch_size , sequence_length) + prediction_ids: A 2D tf.Tensor of size (batch_size, sequence_length) + target_ids: A 3D tf.Tensor of size (batch_size, number_of_targets, + sequence_length) + max_gram_size: int. largest n-gram size we care about (e.g. 3 for unigrams, + bigrams, and trigrams) + + Returns: + A 4-tuple of 1D float Tensors of size (batch_size) for the SARI score and + the keep, addition and deletion scores. + """ + + def get_sari_numpy(source_ids, prediction_ids, target_ids): + """Iterate over elements in the batch and call the SARI function.""" + sari_scores = [] + keep_scores = [] + add_scores = [] + deletion_scores = [] + # Iterate over elements in the batch. + for source_ids_i, prediction_ids_i, target_ids_i in zip( + source_ids, prediction_ids, target_ids): + sari, keep, add, deletion = get_sari_score( + source_ids_i, prediction_ids_i, target_ids_i, max_gram_size, + BETA_FOR_SARI_DELETION_F_MEASURE) + sari_scores.append(sari) + keep_scores.append(keep) + add_scores.append(add) + deletion_scores.append(deletion) + return (np.asarray(sari_scores), np.asarray(keep_scores), + np.asarray(add_scores), np.asarray(deletion_scores)) + + sari, keep, add, deletion = tf.py_func( + get_sari_numpy, + [source_ids, prediction_ids, target_ids], + [tf.float64, tf.float64, tf.float64, tf.float64]) + return sari, keep, add, deletion + + +def sari_score(predictions, labels, features, **unused_kwargs): + """Computes the SARI scores from the given source, prediction and targets. + + An approximate SARI scoring method since we do not glue word pieces or + decode the ids and tokenize the output. By default, we use ngram order of 4. + Also, this does not have beam search. + + Args: + predictions: tensor, model predictions. + labels: tensor, gold output. + features: dict, containing inputs. + + Returns: + sari: int, approx sari score + """ + if "inputs" not in features: + raise ValueError("sari_score requires inputs feature") + + # Convert the inputs and outputs to a [batch_size, sequence_length] tensor. + inputs = tf.squeeze(features["inputs"], axis=[-1, -2]) + outputs = tf.to_int32(tf.argmax(predictions, axis=-1)) + outputs = tf.squeeze(outputs, axis=[-1, -2]) + + # Convert the labels to a [batch_size, 1, sequence_length] tensor. + labels = tf.squeeze(labels, axis=[-1, -2]) + labels = tf.expand_dims(labels, axis=1) + + score, _, _, _ = get_sari(inputs, outputs, labels) + return score, tf.constant(1.0) diff --git a/tensor2tensor/utils/sari_hook_test.py b/tensor2tensor/utils/sari_hook_test.py new file mode 100644 index 000000000..caff0a07e --- /dev/null +++ b/tensor2tensor/utils/sari_hook_test.py @@ -0,0 +1,147 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.utils.sari_hook.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections + +import numpy as np +from tensor2tensor.utils import sari_hook +import tensorflow.compat.v1 as tf + + +class SariHookTest(tf.test.TestCase): + + def setUp(self): + """Sets up inputs and references from the paper's test cases.""" + self.input_sentence = ( + "About 95 species are currently accepted .".split()) + self.references = [ + "About 95 species are currently known .".split(), + "About 95 species are now accepted .".split(), + "95 species are now accepted .".split(), + ] + + def testSariSent1(self): + """Test case 1 from SARI-paper. + + The score is slightly different from what is reported in the paper (0.2683) + since the authors' code seems to contain a bug in the keep recall score + computation. + """ + output = "About 95 you now get in ." .split() + score, _, _, _ = sari_hook.get_sari_score(self.input_sentence, output, + self.references) + self.assertAlmostEqual(0.2695360, score) + + def testSariSent2(self): + """Test case 2 from SARI-paper.""" + output = "About 95 species are now agreed .".split() + score, _, _, _ = sari_hook.get_sari_score(self.input_sentence, output, + self.references) + self.assertAlmostEqual(0.6170966, score) + + def testSariSent3(self): + """Test case 3 from SARI-paper.""" + output = "About 95 species are currently agreed .".split() + score, _, _, _ = sari_hook.get_sari_score(self.input_sentence, output, + self.references) + self.assertAlmostEqual(0.5088682, score) + + def testMatchingSentences(self): + """If input=output=reference, the score should be 1.""" + input_sentence = [3, 1, 4, 1, 5, 9, 2, 6, 5] + output = input_sentence + references = [input_sentence] + score, _, _, _ = sari_hook.get_sari_score(input_sentence, output, + references) + self.assertEqual(1, score) + + def testMatchingOutputAndReference(self): + """If output=reference, the score should be 1.""" + input_sentence = [3, 1, 4, 1, 5, 9, 2, 6, 5] + output = [3, 1, 4, 1, 80, 70] + references = [output] + score, _, _, _ = sari_hook.get_sari_score(input_sentence, output, + references) + self.assertEqual(1, score) + + def testMatchingSentencesWithRepetitions(self): + """Token frequencies should not matter if we only consider unigrams.""" + input_sentence = [3, 1, 4] + output = [3, 3, 1, 1, 1, 4] + references = [[3, 3, 3, 1, 1, 4, 4]] + score, _, _, _ = sari_hook.get_sari_score(input_sentence, output, + references, max_gram_size=1) + self.assertEqual(1, score) + + def testKeepScore(self): + """Toy example where Input='1 2', Output='2', References=['1 2', 1'].""" + # Unigram counts. + source_counts = collections.Counter({1: 1, 2: 1}) + prediction_counts = collections.Counter({2: 1}) + target_counts = collections.Counter({1: 1, 2: 0.5}) + score = sari_hook.get_keep_score(source_counts, prediction_counts, + target_counts) + self.assertAlmostEqual(6.0/15, score) + + def testDeletionScore(self): + """Toy example where Input='1 2', Output='1 2', References=['1'].""" + # Unigram counts. + source_counts = collections.Counter({1: 1, 2: 1}) + prediction_counts = collections.Counter({1: 1, 2: 1}) + target_counts = collections.Counter({1: 1}) + # Output doesn't drop any (incorrect) tokens from the input so precision + # should be 1, but since '2' is not dropped, recall should be 0. Thus we + # should have F1=0 and F0=precision=1. + f1_score = sari_hook.get_deletion_score(source_counts, prediction_counts, + target_counts, beta=1) + self.assertEqual(0, f1_score) + f0_score = sari_hook.get_deletion_score(source_counts, prediction_counts, + target_counts, beta=0) + self.assertEqual(1, f0_score) + + def testIdsWithZeros(self): + """Zeros should be ignored.""" + input_sentence = [3, 1, 4, 0, 0, 0] + output = [3, 1, 4] + references = [[3, 1, 4, 0, 0, 0, 0, 0]] + score, _, _, _ = sari_hook.get_sari_score(input_sentence, output, + references) + self.assertEqual(1, score) + + def testSariScoreE2E(self): + """Tests the SARI metrics end-to-end.""" + predictions = np.random.randint(4, size=(12, 12, 1, 1, 12)) + targets = np.random.randint(4, size=(12, 12, 1, 1)) + inputs = np.random.randint(4, size=(12, 12, 1, 1)) + with self.test_session() as session: + scores, _ = sari_hook.sari_score( + predictions=tf.constant(predictions, dtype=tf.int32), + labels=tf.constant(targets, dtype=tf.int32), + features={ + "inputs": tf.constant(inputs, dtype=tf.int32), + }) + a = tf.reduce_mean(scores) + session.run(tf.global_variables_initializer()) + session.run(a) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/utils/scheduled_sampling.py b/tensor2tensor/utils/scheduled_sampling.py new file mode 100644 index 000000000..8f556bd86 --- /dev/null +++ b/tensor2tensor/utils/scheduled_sampling.py @@ -0,0 +1,279 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Scheduled Sampling. + +This module implemented scheduled sampling as described in (Bengio et al, 2015). +The entry points are two functions, + +`sequential_scheduled_sampling_for_t2tmodel()`: + scheduled sampling adapted to instances of T2TModel. + +`sequential_scheduled_sampling()`: + raw implementation of scheduled sampling. May be used independent of T2T. + +**WARNING** This code is VERY slow. Its runtime is at least O(n^2) for +sequences of length n. For models with self-attention, its runtime is O(n^3). + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import copy + +from tensor2tensor.layers import common_layers +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +from tensorflow.python.ops import inplace_ops # pylint: disable=g-direct-tensorflow-import + + +def sequential_scheduled_sampling_for_t2tmodel(t2tmodel, features): + """Schedule Sampling for T2TModels. + + Args: + t2tmodel: T2TModel instance. + features: {str: Tensor}. Input features. + + Returns: + ss_logits: [batch_size, seq_len, 1, 1, vocab_size]. + losses_dict: {str: scalar Tensor}. Losses to minimize. + """ + targets = features["targets"] + targets_size = common_layers.shape_list(targets) + batch_size = targets_size[0] + seq_len = targets_size[1] + targets = tf.reshape(targets, [batch_size, seq_len]) + + adapter = ScheduledSamplingAdapter(t2tmodel, features) + ss_tokens, ss_logits, losses_dict = sequential_scheduled_sampling( + infer_fn=adapter.infer_fn, + mix_fn=adapter.mix_fn, + loss_fn=adapter.loss_fn, + targets=targets) + + _ = ss_tokens # unused. + targets_vocab_size = t2tmodel.problem_hparams.vocab_size["targets"] + ss_logits = tf.reshape(ss_logits, + [batch_size, seq_len, 1, 1, targets_vocab_size]) + + return ss_logits, losses_dict + + +def sequential_scheduled_sampling(infer_fn, mix_fn, loss_fn, targets): + """Scheduled Sampling. + + Args: + infer_fn: Function. Computes logits for all timesteps. + mix_fn: Function. Mixes gold and sample tokens. + loss_fn: Function. Computes loss between gold tokens and logits. + targets: Tensor of shape [batch_size, seq_len]. Gold tokens. + + Returns: + ss_tokens: Tensor of shape [batch_size, seq_len]. Scheduled sampling tokens. + ss_logits: Tensor of shape [batch_size, seq_len, vocab_size]. Logits for + next token when conditioning on ss_tokens. + losses_dict: {str: scalar Tensor}. Losses to optimize. + """ + targets_shape = common_layers.shape_list(targets) + batch_size = targets_shape[0] + seq_len = targets_shape[1] + + if not targets.shape.is_fully_defined(): + # TODO(duckworthd): When running on GPU, I get the following error. Solve + # it to enable use on other devices. + # + # Cannot use 'Identity_186' as input to + # 'transformer/parallel_0_7/transformer/transformer/symbol_modality_16282_512/shared/convert_gradient_to_tensor_HBc3xYw22Mw' + # because 'Identity_186' is in a while loop. + + raise ValueError( + "The following code only works on TPU. As targets.shape isn't fully " + "defined, I am assuming you are using a different device.") + + def cond_fn(i, ss_tokens): + """True if i < seq_len.""" + _ = ss_tokens + return i < seq_len + + def body_fn(i, ss_tokens): + """Constructs conditioning tokens for scheduled sampling.""" + # next_token_logits depends on timesteps 0...i-1. + # + # [batch_size, seq_len] -> [batch_size, seq_len, vocab_size] + ss_tokens_logits = infer_fn(ss_tokens) + + # Same as 'next_token_logits = ss_tokens_logits[:, i, :]'. + vocab_size = common_layers.shape_list(ss_tokens_logits)[2] + next_token_logits = tf.slice( + ss_tokens_logits, begin=[0, i, 0], size=[batch_size, 1, vocab_size]) + next_token_logits = tf.squeeze(next_token_logits, axis=[1]) + + # [batch_size, vocab_size] -> [batch_size] + sampled_next_tokens = _sample_next_tokens(next_token_logits) + + # Same as 'gold_next_tokens = targets[:, i]'. + gold_next_tokens = tf.slice(targets, begin=[0, i], size=[batch_size, 1]) + gold_next_tokens = tf.squeeze(gold_next_tokens, axis=[1]) + + next_tokens = mix_fn(gold_next_tokens, sampled_next_tokens) + ss_tokens = _update_timestep(ss_tokens, timestep=i, values=next_tokens) + + return i+1, tf.stop_gradient(ss_tokens) + + # tf.while_loop() over all timesteps. Generate scheduled sampling tokens. + i = 0 + ss_tokens = tf.zeros([batch_size, seq_len], dtype=tf.int32) + i, ss_tokens = tf.while_loop(cond_fn, body_fn, [i, ss_tokens]) + + ss_logits = infer_fn(ss_tokens) + return ss_tokens, ss_logits, loss_fn(targets, ss_logits) + + +def _mix_tokens(p_sample, gold_targets, sampled_targets): + """Interleave sampled and gold tokens randomly. + + Args: + p_sample: float in [0, 1]. Probability a token will come from + 'sampled_targets'. 0 means all-gold, 1 means all-sampled. + gold_targets: Tensor. Gold token IDs. + sampled_targets: Tensor. Sampled token IDs. Same shape as 'gold_targets'. + + Returns: + Tensor of same shape as 'gold_targets' containing a mix of tokens from + 'gold_targets' and 'sampled_targets'. + """ + targets_shape = common_layers.shape_list(sampled_targets) + return tf.where( + tf.less(tf.random_uniform(targets_shape), p_sample), + sampled_targets, gold_targets) + + +def _sample_next_tokens(logits): + """Sample tokens for next timestep.""" + batch_size = common_layers.shape_list(logits)[0] + next_tokens = tf.random.categorical(logits, 1) + next_tokens = tf.cast(next_tokens, tf.int32) + next_tokens = tf.reshape(next_tokens, [batch_size]) + return next_tokens + + +def _update_timestep(x, timestep, values): + """Set x[:, timestep] = values. + + This operation is **NOT** differentiable. + + Args: + x: Tensor of shape [batch_size, seq_len, ...] + timestep: int or scalar Tensor. Index to update in x. + values: Tensor of shape [batch_size, ...]. New values for x[:, i]. + + Returns: + Copy of 'x' after setting x[:, timestep] = values. + """ + perm = range(x.shape.ndims) + perm[0], perm[1] = perm[1], perm[0] + x = tf.transpose(x, perm) + x = inplace_ops.alias_inplace_update(x, timestep, values) + x = tf.transpose(x, perm) + return x + + +def inverse_decay_mix_prob(warmup_schedule_name, p_max, num_warmup_steps): + """Interpolate from 0.001 to 'p_max' over 'num_warmup_steps'.""" + warmup_schedule_fn = { + "exp": common_layers.inverse_exp_decay, + "linear": common_layers.inverse_lin_decay, + "sigmoid": common_layers.inverse_sigmoid_decay, + }[warmup_schedule_name] + return p_max * warmup_schedule_fn(num_warmup_steps, min_value=0.001) + + +class ScheduledSamplingAdapter(object): + """Adapts T2TModel for sequential_scheduled_sampling().""" + + def __init__(self, t2tmodel, features): + self._t2tmodel = t2tmodel + self._features = features + + hparams = self._t2tmodel.hparams + assert hparams.mode == tf_estimator.ModeKeys.TRAIN, hparams.mode + + def infer_fn(self, partial_targets): + """Computes logits for all timesteps. + + Args: + partial_targets: [batch_size, seq_len]. Targets to condition on. + + Returns: + next_token_logits: [batch_size, seq_len, vocab_size] + """ + batch_size, seq_len = common_layers.shape_list(partial_targets) + partial_targets = tf.reshape(partial_targets, [batch_size, seq_len, 1, 1]) + features = copy.copy(self._features) + features["targets"] = partial_targets + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + transformed_features = self._t2tmodel.bottom(features) + + with tf.variable_scope("body"): + body_outputs, losses = self._t2tmodel._normalize_body_output( # pylint: disable=protected-access + self._t2tmodel.body(transformed_features)) + assert losses == {"extra": 0.0}, ( + "Auxiliary losses are not propagated in this code. %s" + % (losses,)) + + logits = self._t2tmodel.top(body_outputs, features) + + vocab_size = self._t2tmodel.problem_hparams.vocab_size["targets"] + logits = tf.reshape(logits, [batch_size, seq_len, vocab_size]) + return logits + + def mix_fn(self, gold_tokens, sampled_tokens): + """Mixes gold and sampled tokens randomly.""" + hparams = self._t2tmodel.hparams + p_sample = inverse_decay_mix_prob( + hparams.scheduled_sampling_warmup_schedule, + hparams.scheduled_sampling_gold_mixin_prob, + hparams.scheduled_sampling_warmup_steps) + return _mix_tokens( + p_sample=p_sample, + gold_targets=gold_tokens, + sampled_targets=sampled_tokens) + + def loss_fn(self, targets, logits): + """Constructs loss dict. + + Args: + targets: [batch_size, seq_len] + logits: [batch_size, seq_len, vocab_size] + + Returns: + {str: Tensor of shape []}. Losses. + """ + batch_size, seq_len, vocab_size = common_layers.shape_list(logits) + targets = tf.reshape(targets, [batch_size, seq_len, 1, 1]) + logits = tf.reshape(logits, [batch_size, seq_len, 1, 1, vocab_size]) + features = copy.copy(self._features) + features["targets"] = targets + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + losses = { + "training": self._t2tmodel.loss(logits, features), + } + + return losses diff --git a/tensor2tensor/utils/t2t_model.py b/tensor2tensor/utils/t2t_model.py index 80c06e347..995fb5982 100644 --- a/tensor2tensor/utils/t2t_model.py +++ b/tensor2tensor/utils/t2t_model.py @@ -1,4 +1,5 @@ -# Copyright 2017 Google Inc. +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -17,78 +18,757 @@ from __future__ import division from __future__ import print_function +import collections +import contextlib +import copy +import functools +import math +import os import time - -# Dependency imports - import six -from six.moves import xrange # pylint: disable=redefined-builtin +from tensor2tensor.data_generators import multi_problem +from tensor2tensor.data_generators import text_encoder +from tensor2tensor.data_generators.problem import problem_hparams_to_features +from tensor2tensor.layers import common_layers +from tensor2tensor.layers import modalities +from tensor2tensor.layers.common_attention import mixed_precision_is_enabled from tensor2tensor.utils import beam_search +from tensor2tensor.utils import contrib +from tensor2tensor.utils import decoding from tensor2tensor.utils import expert_utils as eu -from tensor2tensor.utils import modality +from tensor2tensor.utils import hparams_lib +from tensor2tensor.utils import learning_rate +from tensor2tensor.utils import metrics +from tensor2tensor.utils import mlperf_log +from tensor2tensor.utils import optimize +from tensor2tensor.utils import quantization +from tensor2tensor.utils import registry +from tensor2tensor.utils import scheduled_sampling + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +from tensorflow.python.layers import base +from tensorflow.python.ops import inplace_ops +from tensorflow.python.ops import variable_scope +from tensorflow.python.util import tf_inspect as inspect + +_no_problem_err_str = ( + "The default implementation of %s requires that the " + "model be used with a Problem. If using a Problem, augment the " + "hparams object with trainer_lib.add_problem_hparams. If not, " + "override %s.") +_no_problem_err = ( + lambda method_name: _no_problem_err_str % (method_name, method_name)) + + +def _flatten_dict(original_dict): + """Flatten dict of dicts into a single dict with appropriate prefixes. + + Handles only 2 levels of nesting in the original dict. + + Args: + original_dict: Dict which may contain one or more dicts. + Returns: + flat_dict: Dict without any nesting. Any dicts in the original dict have + their keys as prefixes in the new dict. + Raises: + ValueError if the original dict has more than two levels of nesting. + """ + flat_dict = {} + for key, value in original_dict.items(): + if isinstance(value, dict): + for name, tensor in value.items(): + if isinstance(tensor, dict): + raise ValueError("flatten_dict only handles 2 levels of nesting.") + flat_key = "__" + key + "_" + name + flat_dict[flat_key] = tensor + else: + flat_dict[key] = value -import tensorflow as tf + return flat_dict -def _with_timing(fn, msg): +def _unflatten_dict(flat_dict, prefixes): + """Returns a dict of dicts if any prefixes match keys in the flat dict. - def fn_with_timing(*args, **kwargs): - start_time = time.time() - res = fn(*args, **kwargs) - tf.logging.info("Doing %s took %.3f sec." % (msg, time.time() - start_time)) - return res + The function handles the case where the prefix may not be a dict. - return fn_with_timing + Args: + flat_dict: A dict without any nesting. + prefixes: A list of strings which may have been dicts in the + original structure. - -class T2TModel(object): + """ + original_dict = {} + for key, value in flat_dict.items(): + prefix_found = False + for prefix in prefixes: + full_prefix = "__" + prefix + "_" + if key.startswith(full_prefix): + # Add a dict to the original dict with key=prefix + if prefix not in original_dict: + original_dict[prefix] = {} + original_dict[prefix][key[len(full_prefix):]] = value + prefix_found = True + break + if not prefix_found: + # No key matched a prefix in the for loop. + original_dict[key] = value + + return original_dict + + +class T2TModel(base.Layer): """Abstract base class for models. - Subclassess generally only need to override `build_model`. + `T2TModel` has three typical usages: + + 1. Estimator: The method `make_estimator_model_fn` builds a `model_fn` for + the tf.Estimator workflow of training, evaluation, and prediction. + It performs the method `call`, which performs the core computation, + followed by `estimator_spec_train`, `estimator_spec_eval`, or + `estimator_spec_predict` depending on the tf.Estimator mode. + 2. Layer: The method `call` enables `T2TModel` to be used a callable by + itself. It calls the following methods: + + * `bottom`, which transforms features according to `problem_hparams`' input + and target `Modality`s; + * `body`, which takes features and performs the core model computation to + return output and any auxiliary loss terms; + * `top`, which takes features and the body output, and transforms them + according to `problem_hparams`' input and target `Modality`s to return + the final logits; + * `loss`, which takes the logits, forms any missing training loss, and sums + all loss terms. + 3. Inference: The method `infer` enables `T2TModel` to make sequence + predictions by itself. + + Subclasses generally only need to override `body`. """ + REGISTERED_NAME = None # Updated on registration. def __init__(self, hparams, - problem_hparams, - problem_idx=0, + mode=tf_estimator.ModeKeys.TRAIN, + problem_hparams=None, data_parallelism=None, - ps_devices=None): - """Create a T2TModel. + decode_hparams=None, + **kwargs): + """Creates a T2TModel. Args: - hparams: a hyperparameters object. - problem_hparams: a hyperparameters object. - problem_idx: an integer. - data_parallelism: a expert_utils.parallelism - (specifies devices for data parallelism). - ps_devices: a list of devices to be used for experts - - Returns: - a T2TModel + hparams: HParams, model hyperparameters. + mode: tf.estimator.ModeKeys, the execution mode. + problem_hparams: HParams, hyperparameters for the + Problem. If provided here or in hparams.problem_hparams, the model will + automatically determine bottom, top, and loss methods. If not provided, + calling the model will only invoke body. + data_parallelism: a expert_utils.Parallelism object, + specifies devices for data parallelism. + decode_hparams: a hyperparameter object with decoding parameters. + See decoding.decode_hparams. + **kwargs: arguments to pass to base.Layer constructor. """ - if data_parallelism is None: - data_parallelism = eu.Parallelism([""]) - if ps_devices is None: - ps_devices = [""] - self._hparams = hparams - self._data_parallelism = data_parallelism - self._num_datashards = data_parallelism.n - self._ps_devices = ps_devices + # Determine name first: use registered name if possible, class name else. + default_name = registry.default_name(type(self)) + name = self.REGISTERED_NAME or default_name + super(T2TModel, self).__init__( + trainable=mode == tf_estimator.ModeKeys.TRAIN, name=name, **kwargs) + + if not problem_hparams and hasattr(hparams, "problem_hparams"): + problem_hparams = hparams.problem_hparams self._problem_hparams = problem_hparams - self._problem_idx = problem_idx + + # Setup hparams + hparams = hparams_lib.copy_hparams(hparams) + if self._problem_hparams and hparams.shared_embedding_and_softmax_weights: + # If vocabularies differ, unset shared_embedding_and_softmax_weights. + input_vocab_size = self._problem_hparams.vocab_size.get("inputs") + target_vocab_size = self._problem_hparams.vocab_size.get("targets") + if input_vocab_size is not None and hasattr(hparams, "vocab_divisor"): + input_vocab_size += (-input_vocab_size) % hparams.vocab_divisor + if target_vocab_size is not None and hasattr(hparams, "vocab_divisor"): + target_vocab_size += (-target_vocab_size) % hparams.vocab_divisor + if (input_vocab_size is not None and target_vocab_size is not None and + input_vocab_size != target_vocab_size): + log_info("Unsetting shared_embedding_and_softmax_weights.") + hparams.shared_embedding_and_softmax_weights = 0 + + if hparams.hidden_size: + hidden_size = hparams.hidden_size + else: + hidden_size = 1024 + mlperf_log.transformer_print( + key=mlperf_log.MODEL_HP_EMBEDDING_SHARED_WEIGHTS, + value={ + "vocab_size": target_vocab_size, + "hidden_size": hidden_size + }, + hparams=hparams) + + if self._problem_hparams: + for feature_name, modality in six.iteritems( + self._problem_hparams.modality): + # If prepend mode, set weights_fn to appropriately handle it. + if (modality in (modalities.ModalityType.CTC_SYMBOL, + modalities.ModalityType.IDENTITY_SYMBOL, + modalities.ModalityType.SYMBOL, + modalities.ModalityType.SYMBOL_ONE_HOT)): + if (hparams.prepend_mode == "prepend_inputs_full_attention" or + (hparams.prepend_mode == "prepend_inputs_masked_attention" and + mode != tf_estimator.ModeKeys.TRAIN)): + weights_fn = common_layers.weights_prepend_inputs_to_targets + hparams.weights_fn[feature_name] = weights_fn + + self._original_hparams = hparams + self.set_mode(mode) + + self._decode_hparams = hparams_lib.copy_hparams( + decode_hparams or decoding.decode_hparams()) + self._data_parallelism = data_parallelism or eu.Parallelism([""]) + self._num_datashards = self._data_parallelism.n + self._ps_devices = self._data_parallelism.ps_devices + self._eager_var_store = create_eager_var_store() + if not common_layers.is_xla_compiled(): + self.summarize_hparams() + self._variable_scopes = {} + + def _add_variable_scope(self, key, vs): + if key not in self._variable_scopes: + self._variable_scopes[key] = vs + + def summarize_hparams(self): + def create_hparams_summary(hparams, name): + hparams_strs = [tf.convert_to_tensor([k, str(v)]) + for k, v in hparams.values().items()] + tf.summary.text(name, tf.cast(tf.stack(hparams_strs), tf.string)) + + create_hparams_summary(self._hparams, "%s_hparams" % self.name) + if self._problem_hparams: + create_hparams_summary(self._problem_hparams, + "%s_problem_hparams" % self.name) + + # Replace the two methods below in order to add custom SessionRunHooks to + # the training procedure. + @staticmethod + def train_hooks(hook_context): + return [] + + @staticmethod + def eval_hooks(hook_context): + return [] + + @property + def hparams(self): + return self._hparams + + @property + def problem_hparams(self): + return self._problem_hparams + + @property + def is_training(self): + return self._hparams.mode == tf_estimator.ModeKeys.TRAIN + + @property + def is_predicting(self): + return self._hparams.mode == tf_estimator.ModeKeys.PREDICT @property def has_input(self): - return self._input_modality + if self._problem_hparams: + return "inputs" in self._problem_hparams.modality + else: + return True + + @property + def _custom_getter(self): + if self.hparams.weight_dtype == "bfloat16": + if self.hparams.optimizer != "Adafactor": + raise NotImplementedError( + "weight_dtype=bfloat16 only implemented with Adafactor optimizer") + activation_dtype = tf.float32 + if self.hparams.activation_dtype == "bfloat16": + activation_dtype = tf.bfloat16 + return quantization.EighthPowerEncoding().custom_getter( + activation_dtype=activation_dtype) + elif self.hparams.activation_dtype == "bfloat16": + return quantization.bfloat16_activations_var_getter + elif mixed_precision_is_enabled(hparams=self.hparams): + return quantization.float16_activations_var_getter + else: + return None + + @property + def _target_modality_is_real(self): + """Whether the target modality is real-valued.""" + vocab_size = self._problem_hparams.vocab_size["targets"] + if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"): + vocab_size += (-vocab_size) % self._hparams.vocab_divisor + modality = self._problem_hparams.modality["targets"] + modality_name = self._hparams.name.get( + "targets", + modalities.get_name(modality))(self._hparams, vocab_size) + return modality_name.startswith("real") + + def call(self, inputs, **kwargs): + del kwargs + features = inputs + set_custom_getter_compose(self._custom_getter) + tf.get_variable_scope().set_initializer( + optimize.get_variable_initializer(self.hparams)) + with self._eager_var_store.as_default(): + self._fill_problem_hparams_features(features) + summarize_features(features, num_shards=self._num_datashards) + sharded_features = self._shard_features(features) + sharded_logits, losses = self.model_fn_sharded(sharded_features) + if isinstance(sharded_logits, dict): + concat_logits = {} + for k, v in six.iteritems(sharded_logits): + concat_logits[k] = tf.concat(v, 0) + return concat_logits, losses + else: + return tf.concat(sharded_logits, 0), losses + + @staticmethod + def has_symmetric_shards(model_name): + # model_fn is sharded symmetrically unless the model overrides body_sharded + # method to manually control the sharding. + model_cls = registry.model(model_name) + return not model_cls.use_body_sharded() + + @staticmethod + def use_body_sharded(): + return False + + def body_sharded(self, sharded_features): + raise NotImplementedError("Models that wish to manually control sharding, " + "e.g. MoE models, should override body_sharded " + "and set use_body_sharded to True.") + + def model_fn_sharded(self, sharded_features): + """Estimator model_fn sharded along batch dimension. + + Args: + sharded_features: {str: [Tensor]}. Features sharded along batch dimension. + Each list is the same length (== number of shards). + + Returns: + sharded_logits: [Tensor]. Logits for each shard of examples. + losses: {str: 0-D Tensor}. Loss averaged across shards. + """ + dp = self._data_parallelism + + # [{str: Tensor}]. Transpose of 'sharded_features'. + datashard_to_features = self._to_features_per_datashard(sharded_features) + if self.use_body_sharded(): + if self.hparams.scheduled_sampling_prob > 0.0: + raise NotImplementedError( + "Scheduled sampling for non-sharded body only.") + + # MoE models override body_sharded + transformed_features = dp(self.bottom, datashard_to_features) + body_out = self.body_sharded( + self._to_single_features_dict(transformed_features)) + body_out, losses = self._normalize_body_output(body_out) + if "training" in losses: + log_info("Skipping T2TModel top and loss because training loss " + "returned from body") + sharded_logits = body_out + else: + if isinstance(body_out, dict): + sharded_logits = collections.OrderedDict() + sharded_losses = collections.OrderedDict() + for k, v in sorted(six.iteritems(body_out)): + sharded_logits[k] = dp(self.top, v, datashard_to_features) + sharded_losses[k] = dp(self.loss, sharded_logits[k], + datashard_to_features) + training_loss_dict = average_sharded_losses([({ + "training": l + } for l in loss) for loss in sharded_losses.values()]) + losses.update(training_loss_dict) + else: + sharded_logits = dp(self.top, body_out, datashard_to_features) + sharded_losses = dp(self.loss, sharded_logits, datashard_to_features) + if isinstance(sharded_losses, tuple): + nums, dens = sharded_losses + sharded_losses = zip(nums, dens) + training_loss_dict = average_sharded_losses([{ + "training": loss + } for loss in sharded_losses]) + losses.update(training_loss_dict) + else: + sharded_logits, sharded_losses = dp(self.model_fn, datashard_to_features) + sharded_logits, sharded_losses = dp( + self.maybe_scheduled_sampling, + datashard_to_features, sharded_logits, sharded_losses) + if isinstance(sharded_logits[0], dict): + temp_dict = {k: [] for k, _ in six.iteritems(sharded_logits[0])} + for k, _ in six.iteritems(sharded_logits[0]): + for l in sharded_logits: + temp_dict[k].append(l[k]) + sharded_logits = temp_dict + losses = average_sharded_losses(sharded_losses) + + return sharded_logits, losses + + def model_fn(self, features): + with tf.variable_scope(tf.get_variable_scope(), use_resource=True) as vs: + self._add_variable_scope("model_fn", vs) + transformed_features = self.bottom(features) + + if self.hparams.activation_dtype == "bfloat16": + for k, v in sorted(six.iteritems(transformed_features)): + if v.dtype == tf.float32: + transformed_features[k] = tf.cast(v, tf.bfloat16) + + with tf.variable_scope("body") as body_vs: + self._add_variable_scope("body", body_vs) + log_info("Building model body") + body_out = self.body(transformed_features) + output, losses = self._normalize_body_output(body_out) + + if "training" in losses: + log_info("Skipping T2TModel top and loss because training loss " + "returned from body") + logits = output + else: + logits = self.top(output, features) + losses["training"] = 0.0 + if (self._hparams.mode != tf_estimator.ModeKeys.PREDICT and + self._hparams.mode != "attack"): + losses["training"] = self.loss(logits, features) + + return logits, losses + + def bottom(self, features): + """Transforms features to feed into body. + + Args: + features: dict of str to Tensor. Typically it is the preprocessed data + batch after Problem's preprocess_example(). + + Returns: + transformed_features: dict of same key-value pairs as features. The value + Tensors are newly transformed. + """ + if not self._problem_hparams: + log_warn("Without a Problem, T2TModel.bottom is a passthrough.") + return features + + transformed_features = collections.OrderedDict() + all_previous_modalities = [] + target_modality = _create_target_modality(self._problem_hparams.modality) + + # Transform features via its corresponding modality. + for feature_name, modality in sorted( + six.iteritems(self._problem_hparams.modality)): + if feature_name not in features: + tf.logging.warning("Missing feature %s - ignoring." % feature_name) + continue + vocab_size = self._problem_hparams.vocab_size[feature_name] + if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"): + vocab_size += (-vocab_size) % self._hparams.vocab_divisor + modality_name = self._hparams.name.get( + feature_name, + modalities.get_name(modality))(self._hparams, vocab_size) + # Use if-else clauses to preserve behavior of previous changes: namely, + # the variable scope name for the targets feature if there is only one + # target modality; and to reuse variable scopes for only input modalities. + if feature_name in target_modality: + if len(target_modality) > 1: + variable_scope_name = "%s/%s" % (modality_name, feature_name) + else: + variable_scope_name = modality_name + bottom = self._hparams.bottom.get( + feature_name, + modalities.get_targets_bottom(modality)) + # TODO(aidangomez): share variables? + with tf.variable_scope(variable_scope_name) as vs: + self._add_variable_scope(variable_scope_name, vs) + log_info("Transforming feature '%s' with %s.targets_bottom", + feature_name, + modality_name) + transformed_features[feature_name] = bottom(features[feature_name], + self._hparams, + vocab_size) + else: + bottom = self._hparams.bottom.get(feature_name, + modalities.get_bottom(modality)) + do_reuse = modality_name in all_previous_modalities + with tf.variable_scope(modality_name, reuse=do_reuse) as vs: + self._add_variable_scope(modality_name, vs) + log_info("Transforming feature '%s' with %s.bottom", + feature_name, + modality_name) + transformed_features[feature_name] = bottom(features[feature_name], + self._hparams, + vocab_size) + all_previous_modalities.append(modality_name) + + for key in features: + if key not in transformed_features: + # For features without a modality, we pass them along as is + transformed_features[key] = features[key] + else: + # Other features get passed along with the "raw" suffix + transformed_features[key + "_raw"] = features[key] + + return transformed_features + + def body(self, features): + """Computes the targets' pre-logit activations given transformed inputs. + + Most `T2TModel` subclasses will override this method. + + Args: + features: dict of str to Tensor, where each Tensor has shape [batch_size, + ..., hidden_size]. It typically contains keys `inputs` and `targets`. + + Returns: + output: Tensor of pre-logit activations with shape [batch_size, ..., + hidden_size]. + losses: Either single loss as a scalar, a list, a Tensor (to be averaged), + or a dictionary of losses. If losses is a dictionary with the key + "training", losses["training"] is considered the final training + loss and output is considered logits; self.top and self.loss will + be skipped. + """ + raise NotImplementedError("Abstract Method") + + def _top_single(self, body_output, feature_name, features): + if not self._problem_hparams: + log_warn("Without a Problem, T2TModel.top is a passthrough.") + return body_output + + modality = self._problem_hparams.modality[feature_name] + vocab_size = self._problem_hparams.vocab_size[feature_name] + if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"): + vocab_size += (-vocab_size) % self._hparams.vocab_divisor + name = self._hparams.name.get( + feature_name, + modalities.get_name(modality))(self._hparams, vocab_size) + with tf.variable_scope(name) as tm_vs: + self._add_variable_scope(tm_vs.name, tm_vs) + log_info("Transforming body output with %s.top", name) + top = self._hparams.top.get(feature_name, modalities.get_top(modality)) + top_is_pointwise = getattr(top, "pointwise", False) + last_only = (top_is_pointwise and + self.hparams.mode == tf_estimator.ModeKeys.PREDICT and + not self.hparams.force_full_predict) + if not last_only: + logits = top(body_output, features.get("targets"), + self._hparams, vocab_size) + else: + # Take body outputs for the last position only, and targets too. + if "decode_loop_step" not in features: + last_position_body_output = tf.expand_dims( + body_output[:, -1, :, :], axis=[1]) + last_position_targets = tf.expand_dims( + features["targets"][:, -1, :, :], axis=[1]) + else: + body_output_shape = body_output.shape.as_list() + last_position_body_output = tf.slice( + body_output, [0, features["decode_loop_step"][0], 0, 0], [ + body_output_shape[0], 1, body_output_shape[2], + body_output_shape[3] + ]) + target_shape = features["targets"].shape.as_list() + last_position_targets = tf.slice( + features["targets"], [0, features["decode_loop_step"][0], 0, 0], + [target_shape[0], 1, target_shape[2], target_shape[3]]) + logits = top(last_position_body_output, last_position_targets, + self._hparams, vocab_size) + return logits + + def top(self, body_output, features): + """Computes logits given body output and features. + + Args: + body_output: dict of str to Tensor, comprising one key-value pair for each + target. Each value denotes the target's pre-logit activations. + Alternatively, it may be a single Tensor denoting the pre-logits for + that target. + features: dict of str to Tensor. Typically it is the preprocessed data + batch after Problem's preprocess_example(). + + Returns: + logits: dict of str to Tensor, denoting each logits for each target; or + a single Tensor denoting the logits for that target. + When targets are generated at training time: + logits == { + "self_generated_targets": + "logits": + } + """ + if isinstance(body_output, dict): + logits = {} + for k, v in six.iteritems(body_output): + # TODO(aidangomez): share variables here? + with tf.variable_scope(k) as top_vs: + self._add_variable_scope("top_%s" % k, top_vs) + logits[k] = self._top_single(v, k, features) + return logits + else: + return self._top_single(body_output, "targets", features) + + def _loss_single(self, logits, feature_name, feature, weights=None): + # The current bfloat16 version still uses float32 for most parts of backward + # propagation to keep model quality, so cast back before computing the loss + # value. + if not self._problem_hparams: + log_warn(_no_problem_err("loss")) + return (tf.constant(0., dtype=tf.float32), + tf.constant(1., dtype=tf.float32)) + + # Calculate loss contribution. + modality = self._problem_hparams.modality[feature_name] + vocab_size = self._problem_hparams.vocab_size[feature_name] + if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"): + vocab_size += (-vocab_size) % self._hparams.vocab_divisor + loss = self._hparams.loss.get(feature_name, modalities.get_loss(modality)) + targets_weights_fn = self._hparams.weights_fn.get( + "targets", modalities.get_weights_fn(modality)) + if weights is None: + loss_num, loss_den = loss(logits, feature, self._hparams, vocab_size, + weights_fn=targets_weights_fn) + else: + + def weights_fn(labels): + """Per-token weights for loss.""" + # Use target_weights_fn() given by modality as well as explicitly given + # weights. + modality_weights = targets_weights_fn(labels) + + # Broadcast 'weights' along minor dimensions (TF's default is major). + explicit_weights = weights + if len(explicit_weights.shape) < len(modality_weights.shape): + explicit_weights = common_layers.expand_squeeze_to_nd( + weights, modality_weights.shape.ndims) + + return explicit_weights * modality_weights + + # Ensure that target.modality_loss() supports "weights_fn" keyword + # argument. If it doesn't and "weights" is specified, raise an exception. + argument_names = inspect.getargspec(loss).args + if "weights_fn" not in argument_names: + raise ValueError( + "Explicit 'weights' given but default loss for modality doesn't " + "support 'weights_fn' keyword argument: %s.loss(%s)." % + (modality, ", ".join(argument_names))) + + loss_num, loss_den = loss( + logits, feature, self._hparams, vocab_size, weights_fn=weights_fn) + + loss_num *= self._problem_hparams.loss_multiplier + + if hasattr(self.hparams, "problem") and hasattr( + self.hparams.problem, "task_list"): + if weights is not None: + raise NotImplementedError("weights not yet implemented in " + "multitask setting.") + loss_num, loss_den, summaries = multi_problem.aggregate_task_losses( + self.hparams, + self._problem_hparams, + logits, + feature_name, + feature + ) + + for key, val in summaries: + tf.summary.scalar(key, val) + + return loss_num, loss_den + + def loss(self, logits, features): + if isinstance(logits, dict): + losses = {} + for k, v in six.iteritems(logits): + losses[k] = self._loss_single( + v, + k, + features[k], + weights=features.get(k + "_mask")) + + n, d = losses[k] + if common_layers.should_generate_summaries(): + tf.summary.scalar(k + "_loss", n / d) + tf.summary.scalar(k + "_loss_num", n) + tf.summary.scalar(k + "_loss_den", d) + if getattr(self.hparams, "visualize_logits_histogram", False): + hist = tf.summary.histogram + hist(k + "_predict", tf.argmax(tf.squeeze(v), axis=-1)) + hist(k + "_targets", features[k]) + + return tf.add_n([n / d for n, d in losses.values()]) + else: + return self._loss_single( + logits, + "targets", + features["targets"], + weights=features.get("targets_mask")) + + def optimize(self, loss, num_async_replicas=1, use_tpu=False, variables=None): + """Return a training op minimizing loss.""" + lr = learning_rate.learning_rate_schedule(self.hparams) + if num_async_replicas > 1: + log_info("Dividing learning rate by num_async_replicas: %d", + num_async_replicas) + lr /= math.sqrt(float(num_async_replicas)) + train_op = optimize.optimize( + loss, lr, self.hparams, use_tpu=use_tpu, variables=variables) + return train_op + + def set_mode(self, mode): + """Set hparams with the given mode.""" + log_info("Setting T2TModel mode to '%s'", mode) + hparams = hparams_lib.copy_hparams(self._original_hparams) + hparams.add_hparam("mode", mode) + # When not in training mode, set all forms of dropout to zero. + if mode != tf_estimator.ModeKeys.TRAIN: + for key in hparams.values(): + if key.endswith("dropout") or key == "label_smoothing": + log_info("Setting hparams.%s to 0.0", key) + setattr(hparams, key, 0.0) + self._hparams = hparams + + def prepare_features_for_infer(self, features): + """Called before inference to allow adding infer-specific features.""" + pass + + def eval_autoregressive(self, features=None, decode_length=50): + """Autoregressive eval. + + Quadratic time in decode_length. + + Args: + features: an map of string to `Tensor` + decode_length: an integer. How many additional timesteps to decode. + + Returns: + logits: `Tensor` + losses: a dictionary: {loss-name (string): floating point `Scalar`}. + Contains a single key "training". + """ + results = self._slow_greedy_infer(features, decode_length=decode_length) + return results["logits"], results["losses"] + + def _fill_problem_hparams_features(self, features): + if features is not None: + for k, v in sorted( + six.iteritems(problem_hparams_to_features(self._problem_hparams))): + if k not in features: + features[k] = tf.constant(v, name=k) def infer(self, features=None, decode_length=50, beam_size=1, top_beams=1, - last_position_only=False, - alpha=0.0): + alpha=0.0, + use_tpu=False): """A inference method. Quadratic time in decode_length. @@ -98,93 +778,378 @@ def infer(self, decode_length: an integer. How many additional timesteps to decode. beam_size: number of beams. top_beams: an integer. How many of the beams to return. - last_position_only: a boolean, speed-up by computing last position only. alpha: Float that controls the length penalty. larger the alpha, stronger - the preference for slonger translations. + the preference for longer translations. + use_tpu: bool, whether to build the inference graph for TPU. Returns: - samples: an integer `Tensor`. + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if top_beams == 1 or + [batch_size, top_beams, <= decode_length] + "scores": decoding log probs from the beam search, + None if using greedy decoding (beam_size=1) + } + if slow greedy decoding is used then the dict will also contain { + "logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size]. + "losses": a dictionary: {loss-name (string): floating point `Scalar` + } """ - if beam_size == 1: - tf.logging.info("Greedy Decoding") - return self._greedy_infer(features, decode_length, last_position_only) - else: - tf.logging.info("Beam Decoding with beam size %d" % beam_size) - return self._beam_decode(features, decode_length, beam_size, top_beams, - last_position_only, alpha) - - def _beam_decode(self, features, decode_length, beam_size, top_beams, - last_position_only, alpha): + set_custom_getter_compose(self._custom_getter) + with self._eager_var_store.as_default(): + # TODO(rsepassi): Make decoding work with real-valued model outputs + # (i.e. if the target modality is RealModality). + self.prepare_features_for_infer(features) + if not self.has_input and beam_size > 1: + log_warn("Beam searching for a model with no inputs.") + if not self.has_input and self.hparams.sampling_method != "random": + log_warn("Non-random sampling for a model with no inputs.") + self._fill_problem_hparams_features(features) + + if self._problem_hparams: + target_modality = self._problem_hparams.modality["targets"] + if (target_modality == modalities.ModalityType.CLASS_LABEL or + self._problem_hparams.get("regression_targets")): + # No use to run beam-search for classification or regression. + beam_size = 1 + if beam_size == 1: + log_info("Greedy Decoding") + results = self._greedy_infer(features, decode_length, use_tpu) + else: + log_info("Beam Decoding with beam size %d" % beam_size) + results = self._beam_decode(features, decode_length, beam_size, + top_beams, alpha, use_tpu) + + return results + + def _beam_decode(self, + features, + decode_length, + beam_size, + top_beams, + alpha, + use_tpu=False): """Beam search decoding. + Models should ideally implement a more efficient version of this function. + Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. beam_size: number of beams. top_beams: an integer. How many of the beams to return. - last_position_only: a boolean, speed-up by computing last position only. alpha: Float that controls the length penalty. larger the alpha, stronger - the preference for slonger translations. + the preference for longer translations. + use_tpu: A bool, whether to do beam decode on TPU. Returns: samples: an integer `Tensor`. Top samples from the beam search """ + return self._beam_decode_slow(features, decode_length, beam_size, top_beams, + alpha, use_tpu) + + def _beam_decode_slow(self, features, decode_length, beam_size, top_beams, + alpha, use_tpu=False): + """Slow version of Beam search decoding. + + Quadratic time in decode_length. + + Args: + features: an map of string to `Tensor` + decode_length: an integer. How many additional timesteps to decode. + beam_size: number of beams. + top_beams: an integer. How many of the beams to return. + alpha: Float that controls the length penalty. larger the alpha, stronger + the preference for longer translations. + use_tpu: A bool, whether to do slow beam decode on TPU. + + Returns: + samples: an integer `Tensor`. Top samples from the beam search. + + Raises: + NotImplementedError: If use_tpu is set to true. + """ + batch_size = common_layers.shape_list(features["inputs"])[0] - def symbols_to_logits_fn(ids): + def symbols_to_logits_fn(ids, i=None): """Go from ids to logits.""" ids = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3) ids = tf.pad(ids[:, 1:], [[0, 0], [0, 1], [0, 0], [0, 0]]) + if "partial_targets" in features: + pt = features["partial_targets"] + pt_length = common_layers.shape_list(pt)[1] + pt = tf.tile(pt, [1, beam_size]) + pt = tf.reshape(pt, [batch_size * beam_size, pt_length, 1, 1]) + ids = tf.concat([pt, ids], axis=1) features["targets"] = ids + if i is not None: + features["decode_loop_step"] = i self._coverage = None - sharded_logits, _, _ = self.model_fn( - features, False, last_position_only=last_position_only) + logits, _ = self(features) # pylint: disable=not-callable # now self._coverage is a coverage tensor for the first datashard. # it has shape [batch_size] and contains floats between 0 and # source_length. - logits = sharded_logits[0] # Assuming we have one shard. - if last_position_only: - return tf.squeeze(logits, axis=[1, 2, 3]) - current_output_position = tf.shape(ids)[1] - 1 # -1 due to the pad above. + if self._problem_hparams: + modality = self._problem_hparams.modality["targets"] + top = self._hparams.top.get("targets", modalities.get_top(modality)) + if getattr(top, "pointwise", False): + return tf.squeeze(logits, axis=[1, 2, 3]) + # -1 due to the pad above. + current_output_position = common_layers.shape_list(ids)[1] - 1 logits = logits[:, current_output_position, :, :] return tf.squeeze(logits, axis=[1, 2]) - batch_size = tf.shape(features["inputs"])[0] + def _clone_examples_for_beam(old_feature, n): + """Clone each example n times.""" + old_shape = common_layers.shape_list(old_feature) + assert len(old_shape) >= 1 + + # Expand the inputs in to the beam size. + feature = tf.expand_dims(old_feature, 1) + feature = tf.tile(feature, [1, n] + [1] * (len(old_shape) - 1)) + new_shape = common_layers.shape_list(feature) + feature = tf.reshape(feature, + [new_shape[0] * new_shape[1]] + new_shape[2:]) + return feature + initial_ids = tf.zeros([batch_size], dtype=tf.int32) - inputs_old = features["inputs"] - features["inputs"] = tf.expand_dims(features["inputs"], 1) - if len(features["inputs"].shape) < 5: - features["inputs"] = tf.expand_dims(features["inputs"], 4) - # Expand the inputs in to the beam size. - features["inputs"] = tf.tile(features["inputs"], [1, beam_size, 1, 1, 1]) - s = tf.shape(features["inputs"]) - features["inputs"] = tf.reshape(features["inputs"], - [s[0] * s[1], s[2], s[3], s[4]]) - - target_modality = self._hparams.problems[self._problem_idx].target_modality - vocab_size = target_modality.targets_dimensionality - # Setting decode length to input length + decode_length - decode_length = tf.shape(features["inputs"])[1] + tf.constant(decode_length) - ids, scores = beam_search.beam_search(symbols_to_logits_fn, initial_ids, - beam_size, decode_length, vocab_size, - alpha) + # Clone select features multiple times to account for beam size. + old_features = {} + for feature_name in ["inputs", "knowledge"]: + if feature_name not in features: + continue + old_features[feature_name] = features[feature_name] + features[feature_name] = _clone_examples_for_beam( + features[feature_name], beam_size) - # Set inputs back to the unexpanded inputs to not to confuse the Estimator! - features["inputs"] = inputs_old + vocab_size = self._problem_hparams.vocab_size["targets"] + if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"): + vocab_size += (-vocab_size) % self._hparams.vocab_divisor + + # Setting decode length to input length + decode_length + if "partial_targets" not in features: + inputs = features["inputs"] + decode_length = (common_layers.shape_list(inputs)[1] + + features.get("decode_length", decode_length)) + ids, scores, _ = beam_search.beam_search( + symbols_to_logits_fn, + initial_ids, + beam_size, + decode_length, + vocab_size, + alpha, + stop_early=(top_beams == 1), + use_tpu=use_tpu) + + # Set features back to the unexpanded form to not to confuse the + # Estimator! + features.update(old_features) # Return `top_beams` decodings (also remove initial id from the beam search) - return_scores = False # TODO(lukaszkaiser): make it work multi-problem. + # TODO(lukaszkaiser): make it work multi-problem. if top_beams == 1: - if return_scores: - return {"outputs": ids[:, 0, 1:], "scores": scores} - return ids[:, 0, 1:] + samples = ids[:, 0, 1:] else: - if return_scores: - return {"outputs": ids[:, :top_beams, 1:], "scores": scores} - return ids[:, :top_beams, 1:] + samples = ids[:, :top_beams, 1:] - def _greedy_infer(self, features, decode_length, last_position_only): + return {"outputs": samples, "scores": scores} + + def _greedy_infer(self, features, decode_length, use_tpu=False): + """A greedy inference method. + + Models should ideally implement a more efficient version of this function. + + Args: + features: an map of string to `Tensor` + decode_length: an integer. How many additional timesteps to decode. + use_tpu: A bool, whether to build the inference graph for TPU. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": None + "logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size]. + "losses": a dictionary: {loss-name (string): floating point `Scalar`} + } + """ + if use_tpu: + return self._slow_greedy_infer_tpu(features, decode_length) + return self._slow_greedy_infer(features, decode_length) + + def _slow_greedy_infer_tpu(self, features, decode_length): + """A slow greedy inference method on TPU. + + Quadratic time in decode_length. + + Args: + features: An map of string to `Tensor`. + decode_length: An integer, how many additional timesteps to decode. + + Returns: + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": None + "logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size]. + "losses": a dictionary: {loss-name (string): floating point `Scalar`} + } + """ + if not features: + features = {} + inputs_old = None + if "inputs" in features and len(features["inputs"].shape) < 4: + inputs_old = features["inputs"] + features["inputs"] = tf.expand_dims(features["inputs"], 2) + if not self.has_input: + # Prepare partial targets. + # In either features["inputs"] or features["targets"]. + # We force the outputs to begin with these sequences. + partial_targets = features.get("inputs") + if partial_targets is None: + partial_targets = features["targets"] + features["partial_targets"] = tf.to_int64(partial_targets) + # Save the targets in a var and reassign it after the tf.while loop to avoid + # having targets being in a 'while' frame. This ensures targets when used + # in metric functions stays in the same frame as other vars. + targets_old = features.get("targets", None) + + target_modality = self._problem_hparams.modality["targets"] + + def infer_step(i, recent_output, recent_logits, unused_loss): + """Inference step.""" + if not tf.executing_eagerly(): + recent_output.set_shape([None, None, None, 1]) + padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]]) + features["targets"] = padded + # This is inefficient in that it generates samples at all timesteps, + # not just the last one, except if target_modality is pointwise. + features["decode_loop_step"] = i + samples, logits, losses = self.sample(features) + # Concatenate the already-generated recent_output with last timestep + # of the newly-generated samples.z + top = self._hparams.top.get("targets", + modalities.get_top(target_modality)) + if getattr(top, "pointwise", False): + cur_sample = samples[:, -1, :, :] + else: + cur_sample = samples[:, i, :, :] + samples = tf.transpose(recent_output, perm=[1, 0, 2, 3]) + samples = inplace_ops.alias_inplace_update(samples, i, + tf.to_int64(cur_sample)) + samples = tf.transpose(samples, perm=[1, 0, 2, 3]) + if not tf.executing_eagerly(): + samples.set_shape([None, None, None, 1]) + + # Assuming we have one shard for logits. + recent_logits = tf.transpose(recent_logits, perm=[1, 0, 2, 3, 4]) + recent_logits = inplace_ops.alias_inplace_update( + recent_logits, i, tf.squeeze(logits[:, -1:], axis=1)) + logits = tf.transpose(recent_logits, perm=[1, 0, 2, 3, 4]) + loss = sum([l for l in losses.values() if l is not None]) + return i + 1, samples, logits, loss + + # Create an initial output tensor. This will be passed + # to the infer_step, which adds one timestep at every iteration. + if "partial_targets" in features: + initial_output = tf.to_int64(features["partial_targets"]) + while len(initial_output.get_shape().as_list()) < 4: + initial_output = tf.expand_dims(initial_output, 2) + batch_size = common_layers.shape_list(initial_output)[0] + else: + batch_size = common_layers.shape_list(features["inputs"])[0] + initial_output = tf.zeros((batch_size, 0, 1, 1), dtype=tf.int64) + # Hack: foldl complains when the output shape is less specified than the + # input shape, so we confuse it about the input shape. + initial_output = tf.slice(initial_output, [0, 0, 0, 0], + common_layers.shape_list(initial_output)) + target_modality = self._problem_hparams.modality["targets"] + if (target_modality == modalities.ModalityType.CLASS_LABEL or + self._problem_hparams.get("regression_targets")): + decode_length = 1 + else: + if "partial_targets" in features: + prefix_length = common_layers.shape_list(features["partial_targets"])[1] + else: + prefix_length = common_layers.shape_list(features["inputs"])[1] + decode_length = prefix_length + decode_length + + # Initial values of result, logits and loss. + result = tf.concat( + [initial_output, + tf.zeros([batch_size, decode_length, 1, 1], tf.int64)], + axis=1) + # tensor padded to [batch_size, decode_length, 1, 1, vocab_size] + vocab_size = self._problem_hparams.vocab_size["targets"] + if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"): + vocab_size += (-vocab_size) % self._hparams.vocab_divisor + logits = tf.zeros((batch_size, decode_length, 1, 1, vocab_size)) + if not tf.executing_eagerly(): + logits.set_shape([None, None, None, None, None]) + loss = 0.0 + + def while_exit_cond(i, result, logits, loss): # pylint: disable=unused-argument + """Exit the loop either if reach decode_length or EOS.""" + not_overflow = i < decode_length + + if self._problem_hparams.stop_at_eos: + + def fn_not_eos(): + # Check if the last predicted element is a EOS + return tf.reduce_any( + tf.not_equal( + tf.squeeze(result[:, -1, :, :]), text_encoder.EOS_ID)) + + not_eos = tf.cond( + # We only check for early stopping if there is at least 1 element ( + # otherwise not_eos will crash). + tf.not_equal(i, 0), + fn_not_eos, + lambda: True, + ) + + return tf.cond( + tf.equal(batch_size, 1), + # If batch_size == 1, we check EOS for early stopping. + lambda: tf.logical_and(not_overflow, not_eos), + # Else, just wait for max length + lambda: not_overflow) + return not_overflow + + _, result, logits, loss = tf.while_loop( + while_exit_cond, + infer_step, [tf.constant(0), result, logits, loss], + shape_invariants=[ + tf.TensorShape([]), + tf.TensorShape([batch_size, decode_length, 1, 1]), + tf.TensorShape([batch_size, decode_length, 1, 1, vocab_size]), + tf.TensorShape([]), + ], + back_prop=False, + parallel_iterations=1) + if inputs_old is not None: # Restore to not confuse Estimator. + features["inputs"] = inputs_old + # Reassign targets back to the previous value. + if targets_old is not None: + features["targets"] = targets_old + losses = {"training": loss} + if "partial_targets" in features: + partial_target_length = common_layers.shape_list( + features["partial_targets"])[1] + result = tf.slice(result, [0, partial_target_length, 0, 0], + [-1, -1, -1, -1]) + return { + "outputs": result, + "scores": None, + "logits": logits, + "losses": losses, + } + + def _slow_greedy_infer(self, features, decode_length): """A slow greedy inference method. Quadratic time in decode_length. @@ -192,10 +1157,16 @@ def _greedy_infer(self, features, decode_length, last_position_only): Args: features: an map of string to `Tensor` decode_length: an integer. How many additional timesteps to decode. - last_position_only: a boolean, speed-up by computing last position only. Returns: - samples: an integer `Tensor`. + A dict of decoding results { + "outputs": integer `Tensor` of decoded ids of shape + [batch_size, <= decode_length] if beam_size == 1 or + [batch_size, top_beams, <= decode_length] + "scores": None + "logits": `Tensor` of shape [batch_size, time, 1, 1, vocab_size]. + "losses": a dictionary: {loss-name (string): floating point `Scalar`} + } """ if not features: features = {} @@ -203,227 +1174,1179 @@ def _greedy_infer(self, features, decode_length, last_position_only): if "inputs" in features and len(features["inputs"].shape) < 4: inputs_old = features["inputs"] features["inputs"] = tf.expand_dims(features["inputs"], 2) - - def infer_step(recent_output, _): + if not self.has_input: + # Prepare partial targets. + # In either features["inputs"] or features["targets"]. + # We force the outputs to begin with these sequences. + partial_targets = features.get("inputs") + if partial_targets is None: + partial_targets = features["targets"] + features["partial_targets"] = tf.to_int64(partial_targets) + # Save the targets in a var and reassign it after the tf.while loop to avoid + # having targets being in a 'while' frame. This ensures targets when used + # in metric functions stays in the same frame as other vars. + targets_old = features.get("targets", None) + + target_modality = self._problem_hparams.modality["targets"] + + def infer_step(recent_output, recent_logits, unused_loss): """Inference step.""" - recent_output.set_shape([None, None, None, 1]) + if not tf.executing_eagerly(): + if self._target_modality_is_real: + dim = self._problem_hparams.vocab_size["targets"] + if dim is not None and hasattr(self._hparams, "vocab_divisor"): + dim += (-dim) % self._hparams.vocab_divisor + recent_output.set_shape([None, None, None, dim]) + else: + recent_output.set_shape([None, None, None, 1]) padded = tf.pad(recent_output, [[0, 0], [0, 1], [0, 0], [0, 0]]) features["targets"] = padded # This is inefficient in that it generates samples at all timesteps, - # not just the last one, except if last_position_only is set (dangerous). - samples = self.sample(features, last_position_only=last_position_only) + # not just the last one, except if target_modality is pointwise. + samples, logits, losses = self.sample(features) # Concatenate the already-generated recent_output with last timestep # of the newly-generated samples. - if last_position_only: + top = self._hparams.top.get("targets", + modalities.get_top(target_modality)) + if getattr(top, "pointwise", False): cur_sample = samples[:, -1, :, :] else: - cur_sample = samples[:, tf.shape(recent_output)[1], :, :] - cur_sample = tf.to_int64(tf.expand_dims(cur_sample, axis=1)) - samples = tf.concat([recent_output, cur_sample], axis=1) - samples.set_shape([None, None, None, 1]) - return samples + cur_sample = samples[:, + common_layers.shape_list(recent_output)[1], :, :] + if self._target_modality_is_real: + cur_sample = tf.expand_dims(cur_sample, axis=1) + samples = tf.concat([recent_output, cur_sample], axis=1) + else: + cur_sample = tf.to_int64(tf.expand_dims(cur_sample, axis=1)) + samples = tf.concat([recent_output, cur_sample], axis=1) + if not tf.executing_eagerly(): + samples.set_shape([None, None, None, 1]) + + # Assuming we have one shard for logits. + logits = tf.concat([recent_logits, logits[:, -1:]], 1) + loss = sum([l for l in losses.values() if l is not None]) + return samples, logits, loss # Create an initial output tensor. This will be passed # to the infer_step, which adds one timestep at every iteration. if "partial_targets" in features: - initial_output = tf.convert_to_tensor(features["partial_targets"]) + initial_output = tf.to_int64(features["partial_targets"]) + while len(initial_output.get_shape().as_list()) < 4: + initial_output = tf.expand_dims(initial_output, 2) + batch_size = common_layers.shape_list(initial_output)[0] else: - batch_size = tf.shape(features["inputs"])[0] - initial_output = tf.zeros((batch_size, 0, 1, 1), dtype=tf.int64) + batch_size = common_layers.shape_list(features["inputs"])[0] + if self._target_modality_is_real: + dim = self._problem_hparams.vocab_size["targets"] + if dim is not None and hasattr(self._hparams, "vocab_divisor"): + dim += (-dim) % self._hparams.vocab_divisor + initial_output = tf.zeros((batch_size, 0, 1, dim), dtype=tf.float32) + else: + initial_output = tf.zeros((batch_size, 0, 1, 1), dtype=tf.int64) # Hack: foldl complains when the output shape is less specified than the # input shape, so we confuse it about the input shape. initial_output = tf.slice(initial_output, [0, 0, 0, 0], - tf.shape(initial_output)) - if isinstance(self._hparams.problems[self._problem_idx].target_modality, - modality.ClassLabelModality): + common_layers.shape_list(initial_output)) + target_modality = self._problem_hparams.modality["targets"] + if (target_modality == modalities.ModalityType.CLASS_LABEL or + self._problem_hparams.get("regression_targets")): decode_length = 1 else: - decode_length = tf.shape(features["inputs"])[1] + decode_length - result = tf.foldl( - infer_step, - tf.range(decode_length), - initializer=initial_output, + if "partial_targets" in features: + prefix_length = common_layers.shape_list(features["partial_targets"])[1] + else: + prefix_length = common_layers.shape_list(features["inputs"])[1] + decode_length = prefix_length + decode_length + + # Initial values of result, logits and loss. + result = initial_output + vocab_size = self._problem_hparams.vocab_size["targets"] + if vocab_size is not None and hasattr(self._hparams, "vocab_divisor"): + vocab_size += (-vocab_size) % self._hparams.vocab_divisor + if self._target_modality_is_real: + logits = tf.zeros((batch_size, 0, 1, vocab_size)) + logits_shape_inv = [None, None, None, None] + else: + # tensor of shape [batch_size, time, 1, 1, vocab_size] + logits = tf.zeros((batch_size, 0, 1, 1, vocab_size)) + logits_shape_inv = [None, None, None, None, None] + if not tf.executing_eagerly(): + logits.set_shape(logits_shape_inv) + + loss = 0.0 + + def while_exit_cond(result, logits, loss): # pylint: disable=unused-argument + """Exit the loop either if reach decode_length or EOS.""" + length = common_layers.shape_list(result)[1] + + not_overflow = length < decode_length + + if self._problem_hparams.stop_at_eos: + + def fn_not_eos(): + return tf.not_equal( # Check if the last predicted element is a EOS + tf.squeeze(result[:, -1, :, :]), text_encoder.EOS_ID) + + not_eos = tf.cond( + # We only check for early stopping if there is at least 1 element ( + # otherwise not_eos will crash). + tf.not_equal(length, 0), + fn_not_eos, + lambda: True, + ) + + return tf.cond( + tf.equal(batch_size, 1), + # If batch_size == 1, we check EOS for early stopping. + lambda: tf.logical_and(not_overflow, not_eos), + # Else, just wait for max length + lambda: not_overflow) + return not_overflow + + result, logits, loss = tf.while_loop( + while_exit_cond, + infer_step, [result, logits, loss], + shape_invariants=[ + tf.TensorShape([None, None, None, None]), + tf.TensorShape(logits_shape_inv), + tf.TensorShape([]), + ], back_prop=False, parallel_iterations=1) if inputs_old is not None: # Restore to not confuse Estimator. features["inputs"] = inputs_old - return result - - def sample(self, features, last_position_only=False): + # Reassign targets back to the previous value. + if targets_old is not None: + features["targets"] = targets_old + losses = {"training": loss} + if "partial_targets" in features: + partial_target_length = common_layers.shape_list( + features["partial_targets"])[1] + result = tf.slice(result, [0, partial_target_length, 0, 0], + [-1, -1, -1, -1]) + return { + "outputs": result, + "scores": None, + "logits": logits, + "losses": losses, + } + + def sample(self, features): """Run the model and extract samples. Args: features: an map of string to `Tensor`. - last_position_only: a boolean, speed-up by computing last position only. Returns: samples: an integer `Tensor`. + logits: a list of `Tensor`s, one per datashard. + losses: a dictionary: {loss-name (string): floating point `Scalar`}. """ - sharded_logits, _, _ = self.model_fn( - features, False, last_position_only=last_position_only) - if self._hparams.sampling_method == "argmax": - sharded_samples = self._data_parallelism(tf.argmax, sharded_logits, 4) + logits, losses = self(features) # pylint: disable=not-callable + if self._target_modality_is_real: + return logits, logits, losses # Raw numbers returned from real modality. + if self.hparams.sampling_method == "argmax": + samples = tf.argmax(logits, axis=-1) else: - assert self._hparams.sampling_method == "random" + assert self.hparams.sampling_method == "random" - def _multinomial_squeeze(logits): - reshaped_logits = tf.reshape(logits, [-1, tf.shape(logits)[-1]]) + def multinomial_squeeze(logits, temperature=1.0): + logits_shape = common_layers.shape_list(logits) + logits /= tf.reshape(temperature, [-1] + [1] * (len(logits_shape) - 1)) + reshaped_logits = tf.reshape(logits, [-1, logits_shape[-1]]) choices = tf.multinomial(reshaped_logits, 1) - choices = tf.reshape(choices, - tf.shape(logits)[:logits.get_shape().ndims - 1]) + choices = tf.reshape(choices, logits_shape[:-1]) return choices - sharded_samples = self._data_parallelism(_multinomial_squeeze, - sharded_logits) - return tf.concat(sharded_samples, 0) + temperature = features.get("sampling_temp", self.hparams.sampling_temp) + samples = multinomial_squeeze(logits, temperature) + + return samples, logits, losses def _shard_features(self, features): # pylint: disable=missing-docstring - sharded_features = dict() - for k, v in six.iteritems(features): + sharded_features = {} + for k, v in sorted(six.iteritems(features)): v = tf.convert_to_tensor(v) - if not v.shape.as_list(): + v_shape = common_layers.shape_list(v) + if not v_shape: v = tf.expand_dims(v, axis=-1) - v = tf.tile(v, [self._num_datashards]) - sharded_features[k] = self._data_parallelism(tf.identity, - tf.split( - v, self._num_datashards, - 0)) + v_shape = [1] + if v_shape == [1]: + v = tf.tile(v, tf.to_int32([self._num_datashards])) + sharded_features[k] = self._data_parallelism( + tf.identity, tf.split(v, self._num_datashards, 0)) return sharded_features - def model_fn(self, features, train, skip=False, last_position_only=False): - """Computes the entire model and produces sharded logits and training loss. + def _to_features_per_datashard(self, features): + datashard_features = [] + assert len(features[list(features.keys())[0]]) == self._num_datashards + for d in range(self._num_datashards): + f = {k: v[d] for k, v in six.iteritems(features)} + datashard_features.append(f) + return datashard_features + + def _to_single_features_dict(self, datashard_features): + assert len(datashard_features) == self._num_datashards + features = collections.defaultdict(list) + for feats in datashard_features: + for k, v in six.iteritems(feats): + features[k].append(v) + return features + + @staticmethod + def get_train_hooks(model_name, hook_context): + model_cls = registry.model(model_name) + return model_cls.train_hooks(hook_context) + + @staticmethod + def get_eval_hooks(model_name, hook_context): + model_cls = registry.model(model_name) + return model_cls.eval_hooks(hook_context) + + @staticmethod + def make_estimator_model_fn(model_name, + hparams, + decode_hparams=None, + use_tpu=False): + model_cls = registry.model(model_name) + + def wrapping_model_fn(features, labels, mode, params=None, config=None): + return model_cls.estimator_model_fn( + hparams, + features, + labels, + mode, + config=config, + params=params, + decode_hparams=decode_hparams, + use_tpu=use_tpu) + + return wrapping_model_fn + + @classmethod + def estimator_model_fn(cls, + hparams, + features, + labels, + mode, + config=None, + params=None, + decode_hparams=None, + use_tpu=False): + """Model fn for Estimator. Args: - features: A dictionary of feature name to tensor. - train: a boolean `Scalar` (whether we are in training mode). - skip: a boolean, if we're just dummy-calling and actually skip this model - (but we need to create variables to not confuse distributed training). - last_position_only: a boolean, compute logits for only the last position. + hparams: HParams, model hyperparameters + features: dict + labels: Tensor + mode: tf.estimator.ModeKeys + config: RunConfig, possibly with data_parallelism attribute + params: dict, may include batch_size, use_tpu + decode_hparams: HParams, used when mode == PREDICT. + use_tpu: A bool, whether to build the inference graph for TPU. Returns: - sharded_logits: a list of `Tensor`s, one per datashard. - training_loss: a floating point `Scalar`. + TPUEstimatorSpec if use tpu else EstimatorSpec """ - start_time = time.time() - dp = self._data_parallelism + if mode == tf_estimator.ModeKeys.TRAIN: + create_dummy_vars() + hparams = hparams_lib.copy_hparams(hparams) + + # Instantiate model + data_parallelism = None + if not use_tpu and config: + data_parallelism = config.data_parallelism + reuse = tf.get_variable_scope().reuse + model = cls( + hparams, + mode, + data_parallelism=data_parallelism, + decode_hparams=decode_hparams, + _reuse=reuse) + + # PREDICT mode + if mode == tf_estimator.ModeKeys.PREDICT: + if use_tpu: + inputs = features.get("inputs") + if inputs is None: + inputs = features.get("targets") + if inputs is None: + inputs = features["infer_targets"] + shape = inputs.get_shape().as_list() + if shape[0] is None: + shape[0] = decode_hparams.batch_size or hparams.batch_size + if shape[1] is None: + shape[1] = hparams.max_input_seq_length or hparams.max_length + inputs.set_shape(shape) + return model.estimator_spec_predict(features, use_tpu=use_tpu) + + # TRAIN and EVAL modes + if hparams.eval_run_autoregressive and mode == tf_estimator.ModeKeys.EVAL: + logits, losses_dict = model.eval_autoregressive(features) + else: + logits, losses_dict = model(features) # pylint: disable=not-callable + + # Support model-generated labels by overriding features["targets"] with + # logits["self_generated_targets"]. + if isinstance(logits, dict) and "self_generated_targets" in logits: + # Overwrite 'features["targets"]' and 'labels' + # by logits["self_generated_targets"]. + tf.logging.info("Replacing targets with model-provided targets.") + features["targets"] = labels = logits.pop("self_generated_targets") + assert list(logits.keys()) == ["logits"], ( + # See "Returns" in the "top" method docstring for the expected + # "logits" format when targets are generated at training time. + "Expect only key 'logits' when there is 'self_generated_targets'. " + "Found {}".format(logits.keys()) + ) + # Recover the original logits tensor from the logits dict. + logits = logits["logits"] # Can be a tf.Tensor or a dict. + + # Set known shapes + if common_layers.is_xla_compiled(): + if isinstance(logits, dict): + for k, v in sorted(six.iteritems(logits)): + if "scalar/" in k: + continue + + shape = v.get_shape().as_list() + if shape[0] is None: + shape[0] = params["batch_size"] + if shape[1] is None: + shape[1] = hparams.max_length + v.set_shape(shape) + else: + shape = logits.get_shape().as_list() + if shape[0] is None: + shape[0] = params["batch_size"] + if shape[1] is None: + shape[1] = hparams.max_length + logits.set_shape(shape) + + assert "training" in losses_dict + + # Attack mode + if mode == "attack": + return logits + + # Summarize losses + model._summarize_losses(losses_dict) # pylint: disable=protected-access + + # Accumulate losses + loss = sum(losses_dict[key] for key in sorted(losses_dict.keys())) + + # EVAL mode + if mode == tf_estimator.ModeKeys.EVAL: + return model.estimator_spec_eval(features, logits, labels, loss, + losses_dict) + + # TRAIN mode + assert mode == tf_estimator.ModeKeys.TRAIN + num_async_replicas = 1 + if config and not use_tpu: + num_async_replicas = config.t2t_device_info["num_async_replicas"] + return model.estimator_spec_train( + loss, num_async_replicas=num_async_replicas, use_tpu=use_tpu) + + def initialize_from_ckpt(self, ckpt_dir): + return initialize_from_ckpt(ckpt_dir=ckpt_dir, hparams=self._hparams) + + def create_train_host_call(self): + return create_host_call(self.hparams.model_dir) + + def create_eval_host_call(self): + eval_dir = os.path.join( + self.hparams.model_dir, + self.hparams.get("eval_dir_name", "eval")) + return create_host_call(eval_dir) + + def estimator_spec_train(self, loss, num_async_replicas=1, use_tpu=False): + """Constructs `tf.estimator.EstimatorSpec` for TRAIN (training) mode.""" + train_op = self.optimize(loss, num_async_replicas=num_async_replicas, + use_tpu=use_tpu) + + if use_tpu: + if self._hparams.warm_start_from: + def scaffold_fn(): + self.initialize_from_ckpt(self._hparams.warm_start_from) + return tf.train.Scaffold() + else: + scaffold_fn = None - sharded_features = self._shard_features(features) + # Note: important to call this before remove_summaries() + if self.hparams.tpu_enable_host_call: + host_call = self.create_train_host_call() + else: + host_call = None - # Construct the model bottom for inputs. - transformed_features = {} - all_previous_modalities = [] + remove_summaries() - for key, input_modality in six.iteritems( - self._problem_hparams.input_modality): - previous_modalities = [ - self._hparams.problems[i].input_modality[key].name - for i in xrange(self._problem_idx) - ] - all_previous_modalities.extend(previous_modalities) - do_reuse = input_modality.name in all_previous_modalities - with tf.variable_scope(input_modality.name, reuse=do_reuse): - transformed_features[key] = input_modality.inputs_bottom_sharded( - sharded_features[key], dp) - all_previous_modalities.append(input_modality.name) - - # Target space id just gets copied to every shard. - if "target_space_id" in features: - transformed_features["target_space_id"] = [features["target_space_id"] - ] * self._num_datashards - - # Targets are transformed by the autoregressive part of the modality - previous_tgt_modalities = [ - self._hparams.problems[i].target_modality.name - for i in xrange(self._problem_idx) - ] - all_previous_modalities.extend(previous_tgt_modalities) - - target_modality = self._problem_hparams.target_modality - target_reuse = target_modality.name in previous_tgt_modalities - with tf.variable_scope(target_modality.name, reuse=target_reuse): - transformed_features["targets"] = target_modality.targets_bottom_sharded( - sharded_features["targets"], dp) - - # Construct the model body. - with tf.variable_scope("body", reuse=self._problem_idx > 0): - if skip: - body_outputs, extra_loss = transformed_features["targets"], 0.0 + return contrib.tpu().TPUEstimatorSpec( + tf_estimator.ModeKeys.TRAIN, + loss=loss, + train_op=train_op, + host_call=host_call, + scaffold_fn=scaffold_fn) + else: + if self._hparams.warm_start_from: + self.initialize_from_ckpt(self._hparams.warm_start_from) + + # When loading weights from a pre-trained model, you want to be able to + # load separate weights into the encoder and decoder. + if self._hparams.warm_start_from_second: + self.initialize_from_ckpt(self._hparams.warm_start_from_second) + + return tf_estimator.EstimatorSpec( + tf_estimator.ModeKeys.TRAIN, loss=loss, train_op=train_op) + + def estimator_spec_eval(self, features, logits, labels, loss, losses_dict): + """Constructs `tf.estimator.EstimatorSpec` for EVAL (evaluation) mode.""" + del losses_dict + hparams = self.hparams + + if not hasattr(hparams, "problem"): + raise NotImplementedError(_no_problem_err("estimator_spec_eval")) + + problem = hparams.problem + + if common_layers.is_xla_compiled(): + # Note: important to call this before remove_summaries() + if self.hparams.tpu_enable_host_call: + host_call = self.create_eval_host_call() + else: + host_call = None + + remove_summaries() + + eval_metrics_fn = create_tpu_eval_metrics_fn(problem, hparams) + + batch_size = [feature.shape.as_list()[0] for _, feature + in features.items() if feature.shape.ndims][0] + + # Add batch dimension to all features since tpu requires the batch + # dimension on all tensors. + for name, feature in features.items(): + if not feature.shape.as_list(): + # All features must have a batch dimension + feature = tf.tile(tf.expand_dims(feature, 0), [batch_size]) + features[name] = feature + + eval_metrics_fn_args = dict( + logits=logits, # possibly a dict + labels=labels, + features=features, # dict + ) + + eval_metrics_fn_flat_args = _flatten_dict(eval_metrics_fn_args) + return contrib.tpu().TPUEstimatorSpec( + tf_estimator.ModeKeys.EVAL, + eval_metrics=(eval_metrics_fn, eval_metrics_fn_flat_args), + host_call=host_call, + loss=loss) + else: + task_list = [problem] + if hasattr(problem, "task_list"): + task_list = problem.task_list + + eval_metrics_fns = metrics.create_evaluation_metrics(task_list, hparams) + eval_metrics = {} + for metric_name, metric_fn in six.iteritems(eval_metrics_fns): + if isinstance(logits, dict): + # the key is located in the center of metric_name: "metrics-%s/%s/%s" + k = metric_name.split("/")[1] + if k in logits: + eval_metrics[metric_name] = metric_fn(logits[k], features, + features[k]) + else: + # We do not make it an error because we sometimes run models that + # predict only parts of the targets defined by the Problem class. + # For example, an autoencoder or pure-video model can run on a gym + # problem even if another model is also predicting other things, + # like actions or rewards. + tf.logging.warning("No key %s in logits for evaluation." % k) + else: + eval_metrics[metric_name] = metric_fn(logits, features, + features["targets"]) + if isinstance(logits, dict): + predictions = logits else: - body_outputs, extra_loss = self.model_fn_body_sharded( - transformed_features, train) + predictions = {"predictions": logits} + + evaluation_hooks = [] + # Create a SummarySaverHook + eval_dir = os.path.join( + self.hparams.model_dir, + self.hparams.get("eval_dir_name", "eval")) + eval_summary_hook = tf.train.SummarySaverHook( + save_steps=1, + output_dir=eval_dir, + summary_op=tf.summary.merge_all()) + evaluation_hooks.append(eval_summary_hook) + + evaluation_hooks += problem.eval_hooks(features, logits, hparams) + + return tf_estimator.EstimatorSpec( + tf_estimator.ModeKeys.EVAL, + predictions=predictions, + eval_metric_ops=eval_metrics, + evaluation_hooks=evaluation_hooks, + loss=loss) + + def estimator_spec_predict(self, features, use_tpu=False): + """Constructs `tf.estimator.EstimatorSpec` for PREDICT (inference) mode.""" + decode_hparams = self._decode_hparams + top_beams = decode_hparams.beam_size if decode_hparams.return_beams else 1 + infer_out = self.infer( + features, + beam_size=decode_hparams.beam_size, + top_beams=top_beams, + alpha=decode_hparams.alpha, + decode_length=decode_hparams.extra_length, + use_tpu=use_tpu) + if isinstance(infer_out, dict): + outputs = infer_out["outputs"] + scores = infer_out["scores"] + else: + outputs = infer_out + scores = None + + # Workaround for "ValueError: prediction values must be from the default + # graph" during TPU model exporting. + # TODO(b/130501786): remove tf.identity once default graph mismatch is fixed + if use_tpu: + for name, feature in features.items(): + features[name] = tf.identity(feature) + + inputs = features.get("inputs") + if inputs is None: + inputs = features.get("targets") + + predictions = { + "outputs": outputs, + "scores": scores, + "inputs": inputs, + "targets": features.get("infer_targets"), + } + + # Pass through remaining features + for name, feature in features.items(): + if name not in list(predictions.keys()) + ["infer_targets"]: + if name == "decode_loop_step": + continue + if not feature.shape.as_list(): + # All features must have a batch dimension + batch_size = common_layers.shape_list(outputs)[0] + feature = tf.tile(tf.expand_dims(feature, 0), [batch_size]) + predictions[name] = feature + + _del_dict_non_tensors(predictions) + + export_out = {"outputs": predictions["outputs"]} + if "scores" in predictions: + export_out["scores"] = predictions["scores"] + + if decode_hparams.get("export_extra_infer_outputs"): + for output in decode_hparams.export_extra_infer_outputs.split(","): + export_out[output] = infer_out[output] + + # Necessary to rejoin examples in the correct order with the Cloud ML Engine + # batch prediction API. + if "batch_prediction_key" in predictions: + export_out["batch_prediction_key"] = predictions["batch_prediction_key"] + + export_outputs = { + tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: + tf_estimator.export.PredictOutput(export_out) + } + if use_tpu: + # Note: important to call this before remove_summaries() + if self.hparams.tpu_enable_host_call: + host_call = self.create_eval_host_call() + else: + host_call = None - with tf.variable_scope(target_modality.name, reuse=target_reuse): - if not last_position_only: - sharded_logits, training_loss = (target_modality.targets_top_sharded( - body_outputs, sharded_features["targets"], self._data_parallelism)) + remove_summaries() - training_loss *= self._problem_hparams.loss_multiplier + return contrib.tpu().TPUEstimatorSpec( + tf_estimator.ModeKeys.PREDICT, + predictions=predictions, + host_call=host_call, + export_outputs=export_outputs) + else: + return tf_estimator.EstimatorSpec( + tf_estimator.ModeKeys.PREDICT, + predictions=predictions, + export_outputs=export_outputs) + + def _normalize_body_output(self, body_out): + if isinstance(body_out, tuple): + output, losses = body_out + if isinstance(losses, (list, tuple)): + losses = {"extra": tf.add_n([tf.reduce_mean(l) for l in losses])} + elif isinstance(losses, dict): + pass else: - # Take body outputs for the last position only, and targets too. - # TODO(lukaszkaiser): warning, this doesn't work for all modalities! - last_position_body_outputs = [ - tf.expand_dims(body_shard[:, -1, :, :], axis=[1]) - for body_shard in body_outputs - ] - last_position_targets = [ - tf.expand_dims(target_shard[:, -1:, :, :], axis=[1]) - for target_shard in sharded_features["targets"] - ] - sharded_logits, training_loss = (target_modality.targets_top_sharded( - last_position_body_outputs, last_position_targets, - self._data_parallelism)) + losses = {"extra": tf.reduce_mean(losses)} + else: + output = body_out + losses = {"extra": 0.0} + + return output, losses - training_loss = None + def _summarize_losses(self, losses_dict): + """Adds `tf.summary`s to all terms in the losses dictionary.""" + if common_layers.should_generate_summaries(): + with tf.name_scope("losses"): + for loss_name, loss_val in sorted(losses_dict.items()): + tf.summary.scalar(loss_name, loss_val) - tf.logging.info("This model_fn took %.3f sec." % (time.time() - start_time)) - return sharded_logits, training_loss, extra_loss + def maybe_scheduled_sampling(self, features, logits, losses): + """Scheduled sampling. - def model_fn_body_sharded(self, sharded_features, train): - """Mixture-of-experts models will override this function. + Performs forward inference again with "targets" feature replaced with values + sampled from the model. - Compute model body on all datashards. + This is the identity unless self.hparams.scheduled_sampling_prob > 0 + (default). + + **WARNING**: If hparams.scheduled_sampling_method == "parallel", this is + not a faithful implementation of scheduled sampling. This implementation + samples tokens for timestep t condtioned on gold tokens 1...t-1. A proper + implementation must condition on a mix of gold and sampled tokens. Doing + so is not efficient for models such like Transformer. Args: - sharded_features: map from string to list of Tensors each with shape - [batch, ?, ?, body_input_size] - train: A boolean `Scalar` (whether we are in training mode). + features: {str: Tensor}. Features sharded along batch dimension. + logits: Tensor. Logits for each shard of data. + losses: 0-D Tensor or (num: 0-D Tensor, denom: 0-D Tensor). Loss Tensor Returns: - sharded_body_output: - a list of Tensors, each with shape [batch, O, P, body_output_size] - extra_loss: a Scalar. + new_logits: Tensor. + new_losses: {str: loss} where loss is one of (i) a 0-D Tensor or + (ii) a (num: 0-D Tensor, denom: 0-D Tensor) pair to be used in a + weighted average. """ - with tf.name_scope("model"): - datashard_to_features = [{ - k: v[d] - for k, v in six.iteritems(sharded_features) - } for d in xrange(self._num_datashards)] - output = self._data_parallelism( - _with_timing(self.model_fn_body, "model_fn_body"), - datashard_to_features, train) - if isinstance(output, tuple): - loss = tf.reduce_mean(output[1]) - output = output[0] + hparams = self.hparams + problem_hparams = self._problem_hparams + + # Only do scheduled sampling if requested. + if hparams.scheduled_sampling_prob == 0.0: + return (logits, losses) + + # Only do scheduled sampling on language tasks. + modality = problem_hparams.modality["targets"] + if modality not in [ + modalities.ModalityType.SYMBOL, + modalities.ModalityType.SYMBOL_WEIGHTS_ALL, + modalities.ModalityType.IMAGE + ]: + assert hparams.scheduled_sampling_prob == 0, ( + "Scheduled sampling only applies to ModalityType.(SYMBOL, " + "SYMBOL_WEIGHTS_ALL, IMAGE). Found {modality}. Set " + "hparams.scheduled_sampling_prob == 0.0.").format(modality=modality) + return (logits, losses) + + # Only do scheduled sampling when training. + is_training = (hparams.mode == tf_estimator.ModeKeys.TRAIN) + if not is_training: + tf.logging.info("Running in %s mode. Not using scheduled sampling.", + hparams.mode) + return (logits, losses) + + # Pad vocabulary if vocab size must be evenly divisible by vocab_divisor. + vocab_size = problem_hparams.vocab_size["targets"] + assert vocab_size is not None + assert hparams.vocab_divisor == 1 + + # TODO(duckworthd): Move to scheduled_sampling.py. + def sample(x): + """Multinomial sampling from a n-dimensional tensor.""" + samples = tf.multinomial(tf.reshape(x, [-1, vocab_size]), 1) + reshaped_samples = tf.reshape(samples, common_layers.shape_list(x)[:-1]) + return tf.to_int32(reshaped_samples) + + # TODO(duckworthd): Move to scheduled_sampling.py. + def mix_gold_sampled(gold_targets, + sampled_targets, + mixin_prob, + i, + prev_new_targets): + """Interleave sampled and gold tokens randomly.""" + # Resample each location iid. + should_use_sampled_targets = tf.less( + tf.random_uniform(common_layers.shape_list(sampled_targets)), + mixin_prob) + mixed_targets = tf.where( + should_use_sampled_targets, + sampled_targets, + gold_targets) + + # Reuse sample tokens for earlier timesteps. + new_targets = tf.where( + is_later_timestep(gold_targets, i), + mixed_targets, + prev_new_targets) + return new_targets + + # TODO(duckworthd): Move to scheduled_sampling.py. + def is_later_timestep(x, pass_idx): + """Constructs mask based on timestep.""" + assert x.shape.ndims == 4, x.shape + x_shape = tf.shape(x) + num_timesteps = x_shape[1] + timesteps = tf.range(num_timesteps) + timesteps = tf.reshape(timesteps, [1, num_timesteps, 1, 1]) + # The following is a bit untrue. For images, "num_timesteps" actually + # represents image height, not time. We ignore that fact here. + timesteps = tf.broadcast_to(timesteps, x_shape) + return tf.greater_equal(timesteps, pass_idx) + + # TODO(duckworthd): Move to scheduled_sampling.py. + def parallel_scheduled_sampling_pass( + i, prev_new_targets, features, logits, mixin_prob): + """Generate scheduled sampling results.""" + sampled_targets = sample(logits) + new_targets = mix_gold_sampled(features["targets"], + sampled_targets, + mixin_prob, + i, + prev_new_targets) + new_targets = tf.stop_gradient(new_targets) # Treat new_targets as given. + new_features = copy.copy(features) + new_features["targets"] = new_targets + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + # Compute bottom() for new_targets. + # + # TODO(duckworthd): Only apply bottom to 'new_targets'. + new_transformed_features = self.bottom(new_features) + + # Compute body. + with tf.variable_scope("body"): + new_body_outputs, new_losses = self._normalize_body_output( + self.body(new_transformed_features)) + assert "training" not in new_losses + + # Compute top. + new_logits = self.top(new_body_outputs, new_features) + + # Compute loss. Use original features (== labels). + if (hparams.mode != tf_estimator.ModeKeys.PREDICT and + hparams.mode != "attack"): + new_losses["training"] = self.loss(new_logits, features) + else: + new_losses["training"] = 0.0 + + return new_targets, new_logits, new_losses + + tf.logging.info("Using scheduled sampling.") + tf.logging.info("Warming scheduled sampling up with schedule: %s", + hparams.scheduled_sampling_warmup_schedule) + assert hparams.scheduled_sampling_prob == 1.0, ( + "hparams.scheduled_sampling_prob must be 0 or 1.") + + if hparams.scheduled_sampling_method == "sequential": + tf.logging.info("Using SEQUENTIAL scheduled sampling.") + assert hparams.scheduled_sampling_num_passes == 1, ( + "hparams.scheduled_sampling_num_passes must equal 1 if " + "doing sequential scheduled sampling.") + return scheduled_sampling.sequential_scheduled_sampling_for_t2tmodel( + self, features) + elif hparams.scheduled_sampling_method == "parallel": + tf.logging.info("Using PARALLEL scheduled sampling.") + # TODO(duckworthd): Move this block to scheduled_sampling.py. + + # Gradually increase over a warmup period. Lower numbers mean more gold + # tokens. + mixin_prob = scheduled_sampling.inverse_decay_mix_prob( + hparams.scheduled_sampling_warmup_schedule, + hparams.scheduled_sampling_gold_mixin_prob, + hparams.scheduled_sampling_warmup_steps) + + # Apply scheduled sampling over N passes. The logits from the (n-1)-th + # pass will be mixed with gold tokens for conditioning in the n-th pass. + assert hparams.scheduled_sampling_num_passes > 0, ( + "hparams.scheduled_sampling_num_passes must be > 0 if " + "hparams.scheduled_sampling_prob > 0.0") + new_logits = logits + new_losses = losses + prev_new_targets = features["targets"] + for i in range(hparams.scheduled_sampling_num_passes): + prev_new_targets, new_logits, new_losses = parallel_scheduled_sampling_pass( + i, prev_new_targets, features, new_logits, mixin_prob) + return new_logits, new_losses + else: + raise ValueError( + "Unknown scheduled_sampling_method = %s" % ( + hparams.scheduled_sampling_method,)) + + +def _with_timing(fn, msg, silent=False): + + def fn_with_timing(*args, **kwargs): + start_time = time.time() + res = fn(*args, **kwargs) + if not silent: + log_info("Doing %s took %.3f sec." % (msg, time.time() - start_time)) + return res + + return fn_with_timing + + +def create_dummy_vars(): + """Dummy vars for restore to work when not using TPU codepath.""" + var_names = set([v.name for v in tf.global_variables()]) + if "losses_avg/problem_0/total_loss:0" in var_names: + return + with tf.variable_scope("losses_avg"): + with tf.variable_scope("problem_0"): + for var_name in ["total", "extra", "training"]: + tf.get_variable( + "%s_loss" % var_name, initializer=100.0, trainable=False) + with tf.variable_scope("train_stats"): + tf.get_variable("problem_0_steps", initializer=0, trainable=False) + + +# These metrics are implemented with py_funcs and therefore do no work with TPU +TPU_METRIC_BLACKLIST = set([ + metrics.Metrics.APPROX_BLEU, + metrics.Metrics.ROUGE_2_F, + metrics.Metrics.ROUGE_L_F, + metrics.Metrics.IMAGE_SUMMARY, +]) + + +def create_tpu_eval_metrics_fn(problem, model_hparams): + """Create the metrics_fn that TPUEstimatorSpec expects.""" + + def reduce_dimensions(predictions, labels): + """Reduce dimensions for high-dimensional predictions and labels.""" + if len(predictions.get_shape()) > 5: + predictions_shape = common_layers.shape_list(predictions) + predictions = tf.reshape( + predictions, [predictions_shape[0], predictions_shape[1], -1, + predictions_shape[-1]]) + labels_shape = common_layers.shape_list(labels) + labels = tf.reshape( + labels, [labels_shape[0], labels_shape[1], -1]) + return predictions, labels + + metric_fns = [] + eval_metrics = problem.eval_metric_fns(model_hparams) + + tm = _create_target_modality(problem.get_hparams(model_hparams).modality) + if isinstance(tm, dict): + for k, v in six.iteritems(tm): + weights_fn = modalities.get_weights_fn(v) + + def make_metric_fn(metric_fn): + """returns a metric_fn.""" + def wrapped_metric_fn(logits, labels, features, weights_fn=weights_fn): + kwargs = {} + args, _, keywords, _ = inspect.getargspec(metric_fn) + if ("features" in args) or keywords: + kwargs["features"] = features + + logits, labels = reduce_dimensions(logits, labels) + num, den = metric_fn(logits, labels, weights_fn=weights_fn, **kwargs) + return tf.metrics.mean(num, den) + + return wrapped_metric_fn + + for metric, metric_fn in six.iteritems(eval_metrics): + if metric in TPU_METRIC_BLACKLIST: + log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric) + continue + name = "%s/metrics-%s/%s" % (k, problem.name, metric) + metric_fns.append((name, make_metric_fn(metric_fn))) + else: + weights_fn = modalities.get_weights_fn(tm) + + def make_metric_fn(metric_fn): + """returns a metric fn.""" + def wrapped_metric_fn(logits, labels, features): + kwargs = {} + args, _, keywords, _ = inspect.getargspec(metric_fn) + if ("features" in args) or keywords: + kwargs["features"] = features + + logits, labels = reduce_dimensions(logits, labels) + num, den = metric_fn(logits, labels, weights_fn=weights_fn, **kwargs) + return tf.metrics.mean(num, den) + + return wrapped_metric_fn + + for metric, metric_fn in six.iteritems(eval_metrics): + if metric in TPU_METRIC_BLACKLIST: + log_warn("Skipping eval metric %s in TPU_METRIC_BLACKLIST", metric) + continue + name = "metrics-%s/%s" % (problem.name, metric) + metric_fns.append((name, make_metric_fn(metric_fn))) + + def all_metrics_fn(**kwargs): + """Construct metrics dictionary.""" + + original_kwargs = _unflatten_dict(kwargs, prefixes=["logits", "features"]) + del kwargs + + logits = original_kwargs["logits"] + labels = original_kwargs["labels"] + features = original_kwargs["features"] + del original_kwargs + + metrics_dict = {} + + for name, fn in metric_fns: + if isinstance(logits, dict) and isinstance(labels, dict): + for k, v in six.iteritems(logits): + metrics_dict["%s/%s" % (k, name)] = fn(v, labels[k], features) + elif isinstance(logits, dict): + tf.logging.warning("Logits is a dict, but labels is not; only " + "evaluating logits['targets'] against labels.") + metrics_dict["%s/%s" % ("targets", name)] = fn(logits["targets"], + labels, features) else: - loss = 0.0 - return output, loss + metrics_dict[name] = fn(logits, labels, features) + + return metrics_dict + + return all_metrics_fn - def model_fn_body(self, features, train): - """Most models will override this function. - Compute label logits for one shard as a function of the transformed - features. +def remove_summaries(): + """Remove summaries from the default graph.""" + g = tf.get_default_graph() + key = tf.GraphKeys.SUMMARIES + log_debug("Remove summaries %s" % str(g.get_collection(key))) + del g.get_collection_ref(key)[:] + assert not g.get_collection(key) + + +def create_host_call(model_dir): + """Construct a host_call writing scalar summaries. + + Args: + model_dir: String containing path to train + + Returns: + (fn, args) Pair to be called by TPUEstimator as the host_call. + """ + graph = tf.get_default_graph() + summaries = graph.get_collection(tf.GraphKeys.SUMMARIES) + gs_t = tf.reshape(tf.to_int32(tf.train.get_global_step()), [1]) + summary_kwargs = collections.OrderedDict() + for t in summaries: + # TODO(aidangomez): enable ImageSummary support when we have a faster method + # see @shibow's comment in cl/202344570 + if t.op.type not in ["ScalarSummary"]: + tf.logging.warn("Ignoring unsupported tf.Summary type %s" % t.op.type) + continue + + name = t.op.name + tensor = t.op.inputs[1] + if t.op.type == "ScalarSummary": + assert tensor.shape.is_compatible_with([]) + if tensor.dtype == tf.int64: + tensor = tf.to_int32(tensor) + summary_kwargs["ScalarSummary" + name] = tf.reshape(tensor, [1]) + elif t.op.type == "ImageSummary": + # TODO(aidangomez): as we move to support more types, update + # common_layers.tpu_safe_image_summary + if tensor.dtype != tf.float32: + tf.logging.warn( + "Currently T2T on TPU only supports ImageSummary of " + "tf.float32-type Tensors. Skipping Tensor " + "%s with dtype %s..." % (tensor.name, tensor.dtype)) + continue + # tensor = tf.to_float(tensor) + summary_kwargs["ImageSummary" + name] = tensor + # When no supported summaries are found, don't create host_call. Otherwise, + # TPU outfeed queue would enqueue global_step while host_call doesn't dequeue + # it, eventually causing hang. + if not summary_kwargs: + return None + summary_kwargs["global_step"] = gs_t + log_info("summary_kwargs %s" % str(summary_kwargs)) + + def host_call_fn(**kwargs): + """Training host call. Creates summaries for training metrics. Args: - features: A dictionary of key to Tensor. Each Tensor has shape - `[batch_size, ?, ?, hidden_size]`. - train: A boolean `Scalar` (whether we are in training mode). + **kwargs: Dict of {str: Tensor} , with `Tensor` of shape `[batch]`. Must + contain key "global_step" with value of current global_step Tensor. Returns: - a `Tensor` of logits with shape `[batch_size, O, P, body_output_size]`. + List of summary ops to run on the CPU host. """ - raise NotImplementedError("Abstract Method") + gs = tf.to_int64(kwargs.pop("global_step")[0]) + with contrib.summary().create_file_writer(model_dir).as_default(): + with contrib.summary().always_record_summaries(): + # We need to use tf.contrib.summary in order to feed the `step`. + for name, value in sorted(six.iteritems(kwargs)): + if name.startswith("ScalarSummary"): + name = name[len("ScalarSummary"):] + contrib.summary().scalar( + name, tf.reduce_mean(tf.to_float(value)), step=gs) + elif name.startswith("ImageSummary"): + name = name[len("ImageSummary"):] + contrib.summary().image(name, value, step=gs) - @property - def hparams(self): - return self._hparams + return contrib.summary().all_summary_ops() + + return (host_call_fn, summary_kwargs) + + +def _del_dict_non_tensors(d): + for k in list(d.keys()): + if not isinstance(d[k], tf.Tensor): + del d[k] + + +class DummyVariableStore(object): + + @contextlib.contextmanager + def as_default(self): + yield + + +def create_eager_var_store(): + if tf.executing_eagerly(): + return variable_scope.EagerVariableStore() + else: + return DummyVariableStore() + + +def average_sharded_losses(sharded_losses): + """Average losses across datashards. + + Args: + sharded_losses: list>. The loss + can be a single Tensor or a 2-tuple (numerator and denominator). + + Returns: + losses: dict + """ + losses = {} + for loss_name in sorted(sharded_losses[0]): + all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses] + if isinstance(all_shards[0], tuple): + sharded_num, sharded_den = zip(*all_shards) + mean_loss = ( + tf.add_n(sharded_num) / tf.maximum( + tf.cast(1.0, sharded_den[0].dtype), tf.add_n(sharded_den))) + else: + mean_loss = tf.reduce_mean(all_shards) + + losses[loss_name] = mean_loss + return losses + + +def summarize_features(features, num_shards=1): + """Generate summaries for features.""" + if not common_layers.should_generate_summaries(): + return + + with tf.name_scope("input_stats"): + for (k, v) in sorted(six.iteritems(features)): + if (isinstance(v, tf.Tensor) and (v.get_shape().ndims > 1) and + (v.dtype != tf.string)): + tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // num_shards) + tf.summary.scalar("%s_length" % k, tf.shape(v)[1]) + nonpadding = tf.to_float(tf.not_equal(v, 0)) + nonpadding_tokens = tf.reduce_sum(nonpadding) + tf.summary.scalar("%s_nonpadding_tokens" % k, nonpadding_tokens) + tf.summary.scalar("%s_nonpadding_fraction" % k, + tf.reduce_mean(nonpadding)) + + +_already_logged = set() + + +def _eager_log(level, *args): + if tf.executing_eagerly() and args in _already_logged: + return + _already_logged.add(args) + getattr(tf.logging, level)(*args) + + +def log_debug(*args): + _eager_log("debug", *args) + + +def log_info(*args): + _eager_log("info", *args) + + +def log_warn(*args): + _eager_log("warn", *args) + + +def _compose_custom_getters(getter_a, getter_b): + """Compose two custom getters. + + Example use: + tf.get_variable_scope().set_custom_getter( + compose_custom_getters(tf.get_variable_scope().custom_getter, new_getter)) + + This composes getters in the same way as creating a new variable scope with + the new_getter, but it does not actually create a new variable scope. + + Args: + getter_a: a custom getter - generally from the existing variable scope. + getter_b: a custom getter + + Returns: + a custom getter + """ + if not getter_a: + return getter_b + if not getter_b: + return getter_a + + def getter_fn(getter, *args, **kwargs): + return getter_b(functools.partial(getter_a, getter), *args, **kwargs) + + return getter_fn + + +def set_custom_getter_compose(custom_getter): + """Set a custom getter in the current variable scope. + + Do not overwrite the existing custom getter - rather compose with it. + + Args: + custom_getter: a custom getter. + """ + tf.get_variable_scope().set_custom_getter( + _compose_custom_getters(tf.get_variable_scope().custom_getter, + custom_getter)) + + +def _create_target_modality(modality_dict): + # TODO(trandustin): We require this in order to apply methods utilized + # differently for modalities which are "targets" + # (e.g., modality.target_bottom). In the future, remove need for this + # behavior. + return {k: v for k, v in six.iteritems(modality_dict) if "target" in k + and k != "targets_segmentation" and k != "targets_position"} + + +def initialize_from_ckpt(ckpt_dir, hparams): + """Initialize variables from given directory.""" + model_dir = hparams.get("model_dir", None) + already_has_ckpt = ( + model_dir and tf.train.latest_checkpoint(model_dir) is not None) + if already_has_ckpt: + return + + tf.logging.info("Checkpoint dir: %s", ckpt_dir) + reader = contrib.framework().load_checkpoint(ckpt_dir) + variable_map = {} + for var in contrib.framework().get_trainable_variables(): + var_name = var.name.split(":")[0] + if reader.has_tensor(var_name): + tf.logging.info("Loading variable from checkpoint: %s", var_name) + variable_map[var_name] = var + else: + tf.logging.info("Cannot find variable in checkpoint, skipping: %s", + var_name) + tf.train.init_from_checkpoint(ckpt_dir, variable_map) diff --git a/tensor2tensor/utils/t2t_model_test.py b/tensor2tensor/utils/t2t_model_test.py new file mode 100644 index 000000000..4ce17dd67 --- /dev/null +++ b/tensor2tensor/utils/t2t_model_test.py @@ -0,0 +1,83 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for T2TModel.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.data_generators import problem_hparams +from tensor2tensor.utils import hparam +from tensor2tensor.utils import t2t_model +from tensor2tensor.utils import test_utils + +import tensorflow.compat.v1 as tf +tf.enable_eager_execution() + + +class T2TModelTest(tf.test.TestCase): + + @test_utils.run_in_graph_and_eager_modes() + def testSummarizeLosses(self): + with tf.Graph().as_default(): + model = t2t_model.T2TModel(hparam.HParams()) + losses = {"training": tf.random_normal([]), + "extra": tf.random_normal([])} + outputs = model._summarize_losses(losses) + self.assertIsNone(outputs, None) + self.assertEqual( + len(tf.get_collection(tf.GraphKeys.SUMMARIES, scope="losses")), + len(losses)) + + def testLossSingleWeights(self): + """Ensure _loss_single() respects optional 'weights' argument.""" + with tf.Graph().as_default(): + with self.test_session() as sess: + batch_size = 2 + sequence_size = 16 + vocab_size = 3 + + model_hparams = hparam.HParams( + prepend_mode="none", + loss={}, + weights_fn={}, + label_smoothing=0.0, + shared_embedding_and_softmax_weights=False) + + ph = problem_hparams.TestProblem( + vocab_size, vocab_size).get_hparams(model_hparams) + + model = t2t_model.T2TModel(model_hparams, problem_hparams=ph) + logits = tf.zeros((batch_size, sequence_size, 1, 1, vocab_size)) + feature = tf.ones((batch_size, sequence_size, 1, 1)) + + # all-zero weights == zero loss. + weights = tf.zeros((batch_size, sequence_size)) + loss_num, loss_denom = model._loss_single( + logits, "targets", feature, weights=weights) + self.assertAllClose(tf.zeros_like(loss_num), sess.run(loss_num)) + self.assertAllClose(tf.zeros_like(loss_denom), sess.run(loss_denom)) + + # non-zero weights > zero loss. + weights = tf.ones((batch_size, sequence_size)) + loss_num, loss_denom = model._loss_single( + logits, "targets", feature, weights=weights) + self.assertAllLess(0.0, sess.run(loss_num)) + self.assertAllClose(batch_size * sequence_size, sess.run(loss_denom)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/utils/test_utils.py b/tensor2tensor/utils/test_utils.py new file mode 100644 index 000000000..4a823dea3 --- /dev/null +++ b/tensor2tensor/utils/test_utils.py @@ -0,0 +1,124 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Test utilities.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + + +def run_in_graph_and_eager_modes(func=None, + config=None, + use_gpu=True): + """Execute the decorated test with and without enabling eager execution. + + This function returns a decorator intended to be applied to test methods in + a `tf.test.TestCase` class. Doing so will cause the contents of the test + method to be executed twice - once in graph mode, and once with eager + execution enabled. This allows unittests to confirm the equivalence between + eager and graph execution. + + NOTE: This decorator can only be used when executing eagerly in the + outer scope. + + For example, consider the following unittest: + + ```python + tf.enable_eager_execution() + + class SomeTest(tf.test.TestCase): + + @test_utils.run_in_graph_and_eager_modes + def test_foo(self): + x = tf.constant([1, 2]) + y = tf.constant([3, 4]) + z = tf.add(x, y) + self.assertAllEqual([4, 6], self.evaluate(z)) + + if __name__ == "__main__": + tf.test.main() + ``` + + This test validates that `tf.add()` has the same behavior when computed with + eager execution enabled as it does when constructing a TensorFlow graph and + executing the `z` tensor with a session. + + Args: + func: function to be annotated. If `func` is None, this method returns a + decorator the can be applied to a function. If `func` is not None this + returns the decorator applied to `func`. + config: An optional config_pb2.ConfigProto to use to configure the session + when executing graphs. + use_gpu: If True, attempt to run as many operations as possible on GPU. + + Returns: + Returns a decorator that will run the decorated test method twice: + once by constructing and executing a graph in a session and once with + eager execution enabled. + """ + + def decorator(f): + """Decorator for a method.""" + def decorated(self, *args, **kwargs): + """Run the decorated test method.""" + if not tf.executing_eagerly(): + raise ValueError("Must be executing eagerly when using the " + "run_in_graph_and_eager_modes decorator.") + + # Run eager block + f(self, *args, **kwargs) + self.tearDown() + + # Run in graph mode block + with tf.Graph().as_default(): + self.setUp() + with self.test_session(use_gpu=use_gpu, config=config): + f(self, *args, **kwargs) + + return decorated + + if func is not None: + return decorator(func) + + return decorator + + +def run_in_graph_mode_only(func=None, config=None, use_gpu=True): + """Runs a test in graph mode only, when eager is enabled by default.""" + def decorator(f): + """Decorator for a method.""" + def decorated(self, *args, **kwargs): + """Run the decorated test method.""" + self.tearDown() + # Run in graph mode block + with tf.Graph().as_default(): + self.setUp() + with self.test_session(use_gpu=use_gpu, config=config): + f(self, *args, **kwargs) + + return decorated + + if func is not None: + return decorator(func) + + return decorator + + +def test_main(): + tf.enable_eager_execution() + tf.test.main() diff --git a/tensor2tensor/utils/test_utils_test.py b/tensor2tensor/utils/test_utils_test.py new file mode 100644 index 000000000..4c79e7ceb --- /dev/null +++ b/tensor2tensor/utils/test_utils_test.py @@ -0,0 +1,75 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for tensor2tensor.utils.test_utils.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +from tensor2tensor.utils import test_utils + +import tensorflow.compat.v1 as tf +tf.enable_eager_execution() + + +class RunInGraphAndEagerTest(tf.test.TestCase): + + def test_run_in_graph_and_eager_modes(self): + l = [] + def inc(self, with_brackets): + del self # self argument is required by run_in_graph_and_eager_modes. + mode = "eager" if tf.executing_eagerly() else "graph" + with_brackets = "with_brackets" if with_brackets else "without_brackets" + l.append((with_brackets, mode)) + + f = test_utils.run_in_graph_and_eager_modes(inc) + f(self, with_brackets=False) + f = test_utils.run_in_graph_and_eager_modes()(inc) + f(self, with_brackets=True) + + self.assertEqual(len(l), 4) + self.assertEqual(set(l), { + ("with_brackets", "graph"), + ("with_brackets", "eager"), + ("without_brackets", "graph"), + ("without_brackets", "eager"), + }) + + def test_run_in_graph_and_eager_modes_setup_in_same_mode(self): + modes = [] + mode_name = lambda: "eager" if tf.executing_eagerly() else "graph" + + class ExampleTest(tf.test.TestCase): + + def runTest(self): + pass + + def setUp(self): + modes.append("setup_" + mode_name()) + + @test_utils.run_in_graph_and_eager_modes + def testBody(self): + modes.append("run_" + mode_name()) + + e = ExampleTest() + e.setUp() + e.testBody() + + self.assertEqual(modes[0:2], ["setup_eager", "run_eager"]) + self.assertEqual(modes[2:], ["setup_graph", "run_graph"]) + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/utils/trainer_lib.py b/tensor2tensor/utils/trainer_lib.py new file mode 100644 index 000000000..0dcbe7a81 --- /dev/null +++ b/tensor2tensor/utils/trainer_lib.py @@ -0,0 +1,869 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Library for training. See t2t_trainer.py.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import collections +import contextlib +import json +import os +import random +import numpy as np + +from tensor2tensor.utils import contrib +from tensor2tensor.utils import decoding +from tensor2tensor.utils import devices +from tensor2tensor.utils import hparams_lib +from tensor2tensor.utils import metrics_hook +from tensor2tensor.utils import mlperf_log +from tensor2tensor.utils import registry +from tensor2tensor.utils import t2t_model + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +from tensorflow.core.protobuf import rewriter_config_pb2 +from tensorflow.python import debug + + +create_hparams = hparams_lib.create_hparams +add_problem_hparams = hparams_lib.add_problem_hparams + + +def next_checkpoint(model_dir, timeout_mins=240): + """Yields successive checkpoints from model_dir. + + Args: + model_dir: The directory in which checkpoints are saved. + timeout_mins: The maximum amount of time in minutes to wait + between checkpoints. Set this to -1 to wait indefinitely. + Yields: + last_ckpt: a new checkpoint path, or None if the timeout was reached. + """ + last_ckpt = None + timeout_secs = None + if timeout_mins != -1: + timeout_secs = timeout_mins * 60 + while True: + last_ckpt = contrib.training().wait_for_new_checkpoint( + model_dir, last_ckpt, seconds_to_sleep=60, timeout=timeout_secs) + + if last_ckpt is None: + tf.logging.info( + "Eval timeout: no new checkpoints within %dm" % timeout_mins) + break + + yield last_ckpt + + +def next_undecoded_checkpoint(model_dir, timeout_mins=240): + """Yields successive checkpoints from model_dir.""" + last_ckpt = None + last_step = 0 + while True: + # Get the latest checkpoint. + last_ckpt = contrib.training().wait_for_new_checkpoint( + model_dir, last_ckpt, seconds_to_sleep=60, timeout=60 * timeout_mins) + # Get all the checkpoint from the model dir. + ckpt_path = tf.train.get_checkpoint_state(model_dir) + all_model_checkpoint_paths = ckpt_path.all_model_checkpoint_paths + ckpt_step = np.inf + next_ckpt = None + # Find the next checkpoint to eval based on last_step. + for ckpt in all_model_checkpoint_paths: + step = int(os.path.basename(ckpt).split("-")[1]) + if step > last_step and step < ckpt_step: + ckpt_step = step + next_ckpt = ckpt + + # If all the checkpoints have been evaluated. + if last_ckpt is None and next_ckpt is None: + tf.logging.info( + "Eval timeout: no new checkpoints within %dm" % timeout_mins) + break + + if next_ckpt is not None: + last_step = ckpt_step + last_ckpt = next_ckpt + + yield last_ckpt + + +def create_session_config(log_device_placement=False, + enable_graph_rewriter=False, + gpu_mem_fraction=0.95, + use_tpu=False, + xla_jit_level=tf.OptimizerOptions.OFF, + inter_op_parallelism_threads=0, + intra_op_parallelism_threads=0): + """The TensorFlow Session config to use.""" + if use_tpu: + graph_options = tf.GraphOptions() + else: + if enable_graph_rewriter: + rewrite_options = rewriter_config_pb2.RewriterConfig() + rewrite_options.layout_optimizer = rewriter_config_pb2.RewriterConfig.ON + graph_options = tf.GraphOptions(rewrite_options=rewrite_options) + else: + graph_options = tf.GraphOptions( + optimizer_options=tf.OptimizerOptions( + opt_level=tf.OptimizerOptions.L1, + do_function_inlining=False, + global_jit_level=xla_jit_level)) + + gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_mem_fraction) + + config = tf.ConfigProto( + allow_soft_placement=True, + graph_options=graph_options, + gpu_options=gpu_options, + log_device_placement=log_device_placement, + inter_op_parallelism_threads=inter_op_parallelism_threads, + intra_op_parallelism_threads=intra_op_parallelism_threads, + isolate_session_state=True) + return config + + +def is_cloud_async_distributed(): + return ("chief" in + json.loads(os.environ.get("TF_CONFIG", "{}")).get("cluster", {})) + + +def create_run_config(model_name, + master="", + model_dir=None, + iterations_per_loop=1000, + num_shards=8, + log_device_placement=False, + save_checkpoints_steps=1000, + save_checkpoints_secs=None, + keep_checkpoint_max=20, + keep_checkpoint_every_n_hours=10000, + num_gpus=1, + gpu_order="", + num_async_replicas=1, + enable_graph_rewriter=False, + gpu_mem_fraction=0.95, + no_data_parallelism=False, + optionally_use_dist_strat=False, + daisy_chain_variables=True, + schedule="continuous_train_and_eval", + worker_job="/job:localhost", + worker_id=0, + ps_replicas=0, + ps_job="/job:ps", + ps_gpu=0, + random_seed=None, + sync=False, + tpu_infeed_sleep_secs=None, + use_tpu=False, + use_tpu_estimator=False, + xla_jit_level=tf.OptimizerOptions.OFF, + inter_op_parallelism_threads=0, + log_step_count_steps=100, + intra_op_parallelism_threads=0, + tpu_config_extra_kwargs=None, + cloud_tpu_name="", + cloud_tpu_zone=None): + """Create RunConfig, TPUConfig, and Parallelism object.""" + session_config = create_session_config( + log_device_placement=log_device_placement, + enable_graph_rewriter=enable_graph_rewriter, + gpu_mem_fraction=gpu_mem_fraction, + use_tpu=use_tpu, + xla_jit_level=xla_jit_level, + inter_op_parallelism_threads=inter_op_parallelism_threads, + intra_op_parallelism_threads=intra_op_parallelism_threads) + run_config_args = { + "master": master, + "evaluation_master": master, + "model_dir": model_dir, + "session_config": session_config, + "save_summary_steps": 100, + "save_checkpoints_steps": save_checkpoints_steps, + "save_checkpoints_secs": save_checkpoints_secs, + "keep_checkpoint_max": keep_checkpoint_max, + "keep_checkpoint_every_n_hours": keep_checkpoint_every_n_hours, + "tf_random_seed": random_seed, + "log_step_count_steps": log_step_count_steps, + } + if save_checkpoints_secs: + del run_config_args["save_checkpoints_steps"] + run_config_cls = contrib.learn().RunConfig + + if use_tpu or use_tpu_estimator: + # If using TPUEstimator, use TPU RunConfig, add TPUConfig, and add + # additional args. + tpu_config_kwargs = { + "iterations_per_loop": iterations_per_loop, + "num_shards": num_shards, + "per_host_input_for_training": True, + "initial_infeed_sleep_secs": tpu_infeed_sleep_secs, + } + if tpu_config_extra_kwargs is not None: + tpu_config_kwargs.update(tpu_config_extra_kwargs) + run_config_cls = contrib.tpu().RunConfig + tpu_config = contrib.tpu().TPUConfig(**tpu_config_kwargs) + run_config_args["tpu_config"] = tpu_config + if not master and "KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS" in os.environ: + # If running on TPU but no master is set and the KUBE env var is present + # then we're running on ML Engine. Set the master. + run_config_args["master"] = os.environ[ + "KUBE_GOOGLE_CLOUD_TPU_ENDPOINTS"] + run_config_args["evaluation_master"] = run_config_args["master"] + elif not master and cloud_tpu_name: + # Update run_config to use cluster instead of master/evaluation_master + # as we need the cluster spec to use Cloud Pods + tpu_cluster_resolver = contrib.cluster_resolver().TPUClusterResolver( + tpu=cloud_tpu_name, zone=cloud_tpu_zone) + run_config_args["cluster"] = tpu_cluster_resolver + del run_config_args["master"] + del run_config_args["evaluation_master"] + elif is_cloud_async_distributed(): + run_config_cls = tf_estimator.RunConfig + del run_config_args["master"] + del run_config_args["evaluation_master"] + + # tf.estimator RunConfig construction got totally broken in TF2. + # we now have to specify master in a global environment variable + if contrib.is_tf2: + del run_config_args["evaluation_master"] + del run_config_args["master"] + + config = run_config_cls(**run_config_args) + + # If not using TPU, add device info for data_parallelism + config.use_tpu = use_tpu + if not use_tpu: + config.t2t_device_info = { + "num_async_replicas": num_async_replicas, + } + use_distribution_strategy = ( + optionally_use_dist_strat and + t2t_model.T2TModel.has_symmetric_shards(model_name) and + not no_data_parallelism and ps_replicas == 0 and ps_gpu == 0 and + num_async_replicas == 1) + + if use_distribution_strategy: + tf.logging.info( + "Configuring MirroredStrategy DistributionStrategy to replicate the " + "model." + ) + distribution = contrib.distribute().MirroredStrategy() + config = config.replace(train_distribute=distribution) + config.data_parallelism = None + else: + tf.logging.info("Configuring DataParallelism to replicate the model.") + config.data_parallelism = devices.data_parallelism( + daisy_chain_variables=daisy_chain_variables, + ps_replicas=ps_replicas, + ps_job=ps_job, + ps_gpu=ps_gpu, + schedule=schedule, + sync=sync, + worker_gpu=num_gpus, + worker_replicas=num_async_replicas, + worker_id=worker_id, + gpu_order=gpu_order, + worker_job=worker_job, + no_data_parallelism=no_data_parallelism) + + return config + + +def create_estimator(model_name, + hparams, + run_config, + schedule="train_and_evaluate", + decode_hparams=None, + use_tpu=False, + use_tpu_estimator=False, + use_xla=False, + export_saved_model_api_version=1, + use_guarantee_const_getter=False): + """Create a T2T Estimator.""" + model_fn = t2t_model.T2TModel.make_estimator_model_fn( + model_name, hparams, decode_hparams=decode_hparams, use_tpu=use_tpu) + + + del use_xla + if use_tpu or use_tpu_estimator: + from tensorflow.contrib.tpu.python.tpu import tpu_estimator # pylint: disable=g-import-not-at-top + problem = hparams.problem + batch_size = ( + problem.tpu_batch_size_per_shard(hparams) * + run_config.tpu_config.num_shards) + mlperf_log.transformer_print( + key=mlperf_log.INPUT_BATCH_SIZE, value=batch_size) + if getattr(hparams, "mtf_mode", False): + batch_size = problem.tpu_batch_size_per_shard(hparams) + predict_batch_size = batch_size + if decode_hparams and decode_hparams.batch_size: + predict_batch_size = decode_hparams.batch_size + if decode_hparams and run_config.tpu_config: + decode_hparams.add_hparam("iterations_per_loop", + run_config.tpu_config.iterations_per_loop) + if export_saved_model_api_version == 1: + api_version_enum_name = tpu_estimator.ExportSavedModelApiVersion.V1 + estimator_model_fn = model_fn + elif export_saved_model_api_version == 2: + api_version_enum_name = tpu_estimator.ExportSavedModelApiVersion.V2 + + def maybe_use_guarantee_const_getter_model_fn(features, labels, mode, + params): + """Wrapper model_fn with guarantee_const getter.""" + if not use_guarantee_const_getter: + return model_fn(features, labels, mode, params) + + # It marks all weights as constant, which may improves TPU inference + # performance because it prevents the weights being transferred to the + # TPU. It will increase HBM "program" usage and reduce HBM "arguments" + # usage during TPU model serving. + def guarantee_const_getter(getter, name, *args, **kwargs): + with tf.control_dependencies(None): + return tf.guarantee_const( + getter(name, *args, **kwargs), name=name + "/GuaranteeConst") + + @contextlib.contextmanager + def guarantee_const_scope(): + var_scope = tf.get_variable_scope() + prev_custom_getter = var_scope.custom_getter + prev_caching_device = var_scope.caching_device + var_scope.set_custom_getter(guarantee_const_getter) + var_scope.set_caching_device(lambda op: op.device) + yield + var_scope.set_custom_getter(prev_custom_getter) + var_scope.set_caching_device(prev_caching_device) + + with guarantee_const_scope(): + return model_fn(features, labels, mode, params) + + def tpu_model_fn(features, labels, mode, params): + """Wrapper model_fn with tpu.rewrite / TPUPartitionedCall.""" + if mode == tf_estimator.ModeKeys.PREDICT and params["use_tpu"]: + batch_config = tpu_estimator.BatchConfig( + num_batch_threads=2, + max_batch_size=predict_batch_size, + batch_timeout_micros=60 * 1000, + allowed_batch_sizes=[predict_batch_size]) + return tpu_estimator.model_fn_inference_on_tpu( + maybe_use_guarantee_const_getter_model_fn, + features=features, + labels=labels, + config=None, + params=params, + batch_config=batch_config) + else: + return model_fn(features, labels, mode, params) + + estimator_model_fn = tpu_model_fn + else: + raise ValueError("Flag export_saved_model_api_version must be 1 or 2.") + estimator = contrib.tpu().TPUEstimator( + model_fn=estimator_model_fn, + model_dir=run_config.model_dir, + config=run_config, + use_tpu=use_tpu, + train_batch_size=batch_size, + eval_batch_size=batch_size if "eval" in schedule else None, + predict_batch_size=predict_batch_size, + export_saved_model_api_version=api_version_enum_name) + else: + estimator = tf_estimator.Estimator( + model_fn=model_fn, + model_dir=run_config.model_dir, + config=run_config, + ) + return estimator + + +def create_hooks(use_tfdbg=False, + use_dbgprofile=False, + dbgprofile_kwargs=None, + use_validation_monitor=False, + validation_monitor_kwargs=None, + use_early_stopping=False, + early_stopping_kwargs=None): + """Create train and eval hooks for Experiment.""" + train_hooks = [] + eval_hooks = [] + + if use_tfdbg: + hook = debug.LocalCLIDebugHook() + train_hooks.append(hook) + eval_hooks.append(hook) + + if use_dbgprofile: + # Recorded traces can be visualized with chrome://tracing/ + # The memory/tensor lifetime is also profiled + tf.logging.info("Using ProfilerHook") + defaults = dict(save_steps=10, show_dataflow=True, show_memory=True) + defaults.update(dbgprofile_kwargs) + train_hooks.append(tf.train.ProfilerHook(**defaults)) + + if use_validation_monitor: + tf.logging.info("Using ValidationMonitor") + train_hooks.append( + contrib.learn().monitors.ValidationMonitor( + hooks=eval_hooks, **validation_monitor_kwargs)) + + if use_early_stopping: + tf.logging.info("Using EarlyStoppingHook") + hook = metrics_hook.EarlyStoppingHook(**early_stopping_kwargs) + # Adding to both training and eval so that eval aborts as well + train_hooks.append(hook) + eval_hooks.append(hook) + + return train_hooks, eval_hooks + + +class HookContext(collections.namedtuple( + "HookContext", + ["estimator", "problem", "hparams"])): + pass + + +class T2TExperiment(object): + """Custom Experiment class for running distributed experiments.""" + + def __init__(self, estimator, hparams, train_spec, eval_spec, + use_validation_monitor, decode_hparams=None): + self._train_spec = train_spec + self._eval_spec = eval_spec + self._hparams = hparams + self._decode_hparams = decode_hparams + self._estimator = estimator + self._use_validation_monitor = use_validation_monitor + + @property + def estimator(self): + return self._estimator + + @property + def train_steps(self): + return self._train_spec.max_steps + + @property + def eval_steps(self): + return self._eval_spec.steps + + def continuous_train_and_eval(self, continuous_eval_predicate_fn=None): + del continuous_eval_predicate_fn + tf_estimator.train_and_evaluate(self._estimator, self._train_spec, + self._eval_spec) + return self.evaluate() + + def train_and_evaluate(self): + if self._use_validation_monitor: + tf.logging.warning("EvalSpec not provided. Estimator will not manage " + "model evaluation. Assuming ValidationMonitor present " + "in train_hooks.") + self.train() + + def train(self, max_steps=None): + mlperf_log.transformer_print(key=mlperf_log.TRAIN_LOOP) + mlperf_log.transformer_print(key=mlperf_log.TRAIN_EPOCH, value=0) + self._estimator.train( + self._train_spec.input_fn, + hooks=self._train_spec.hooks, + max_steps=max_steps or self._train_spec.max_steps) + + def train_eval_and_decode(self): + """Does eval and decode after training every eval_freq_in_steps.""" + eval_steps = self._hparams.eval_freq_in_steps + packed_dataset = "_packed" in self._hparams.problem.name + mlperf_log.transformer_print(key=mlperf_log.TRAIN_LOOP) + for i in range(0, self._train_spec.max_steps, eval_steps): + mlperf_log.transformer_print( + key=mlperf_log.TRAIN_EPOCH, value=i // eval_steps) + if packed_dataset and i > 0: + problem = registry.problem(self._hparams.problem.name + "_packed") + p_hparams = problem.get_hparams(self._hparams) + self._hparams.problem = problem + self._hparams.problem_hparams = p_hparams + self._estimator.train( + self._train_spec.input_fn, + steps=eval_steps, + hooks=self._train_spec.hooks) + self._set_eval_dir_name("eval") + self._estimator.evaluate( + self._eval_spec.input_fn, + steps=self._eval_spec.steps, + hooks=self._eval_spec.hooks, + name="eval") + if packed_dataset: + problem = registry.problem( + self._hparams.problem.name.replace("_packed", "")) + p_hparams = problem.get_hparams(self._hparams) + self._hparams.problem = problem + self._hparams.problem_hparams = p_hparams + mlperf_log.transformer_print(key=mlperf_log.EVAL_START) + if self._hparams.mlperf_mode: + self._decode_hparams.mlperf_decode_step = i + eval_steps + self.decode(dataset_split=tf_estimator.ModeKeys.EVAL) + d_hparams = self._decode_hparams + if self._hparams.mlperf_mode and d_hparams.mlperf_success: + mlperf_log.transformer_print( + key=mlperf_log.RUN_STOP, value={"success": "true"}) + break + + d_hparams = self._decode_hparams + if self._hparams.mlperf_mode and not d_hparams.mlperf_success: + mlperf_log.transformer_print( + key=mlperf_log.RUN_STOP, value={"success": "false"}) + + def _set_eval_dir_name(self, eval_dir_name): + attr = "eval_dir_name" + hp = self._hparams + if attr not in hp: + hp.add_hparam(attr, "") + hp.eval_dir_name = eval_dir_name + + def evaluate(self): + name = "eval" + self._set_eval_dir_name("eval") + return self._estimator.evaluate( + self._eval_spec.input_fn, + steps=self._eval_spec.steps, + hooks=self._eval_spec.hooks, + name=name) + + def evaluate_on_train_data(self): + name = "eval_train" + self._set_eval_dir_name(name) + self._estimator.evaluate( + self._train_spec.input_fn, + steps=self._eval_spec.steps, + hooks=self._eval_spec.hooks, + name=name) + + def continuous_eval(self): + """Evaluate until checkpoints stop being produced.""" + for ckpt_path in next_checkpoint(self._hparams.model_dir, + self._hparams.eval_timeout_mins): + # Skip zero'th step. + train_step = decoding.get_step_from_ckpt_path(ckpt_path) + if train_step == 0: + tf.logging.info("Skipping evaluation at step 0") + continue + self.evaluate() + + def continuous_eval_on_train_data(self): + """Evaluate on train data until checkpoints stop being produced.""" + for ckpt_path in next_checkpoint(self._hparams.model_dir, + self._hparams.eval_timeout_mins): + # Skip zero'th step. + train_step = decoding.get_step_from_ckpt_path(ckpt_path) + if train_step == 0: + tf.logging.info("Skipping evaluation at step 0") + continue + self.evaluate_on_train_data() + + def test(self): + """Perform 1 train step and 1 eval step.""" + if self._use_validation_monitor: + return self.train_and_evaluate() + + self._estimator.train( + self._train_spec.input_fn, hooks=self._train_spec.hooks, max_steps=1) + + self._estimator.evaluate( + self._eval_spec.input_fn, steps=1, hooks=self._eval_spec.hooks) + + def run_std_server(self): + """Starts a TensorFlow server and joins the serving thread. + + Typically used for parameter servers. + + Raises: + ValueError: if not enough information is available in the estimator's + config to create a server. + """ + config = tf_estimator.RunConfig() + server = tf.train.Server( + config.cluster_spec, + job_name=config.task_type, + task_index=config.task_id, + protocol=config.protocol) + server.join() + + def decode(self, + dataset_split=None, + decode_from_file=False, + checkpoint_path=None): + """Decodes from dataset or file.""" + if decode_from_file: + decoding.decode_from_file(self._estimator, + self._decode_hparams.decode_from_file, + self._hparams, + self._decode_hparams, + self._decode_hparams.decode_to_file) + else: + decoding.decode_from_dataset( + self._estimator, + self._hparams.problem.name, + self._hparams, + self._decode_hparams, + dataset_split=dataset_split, + checkpoint_path=checkpoint_path) + + def continuous_decode(self): + """Decode from dataset on new checkpoint.""" + for _ in next_checkpoint(self._hparams.model_dir, + self._decode_hparams.decode_timeout_mins): + self.decode() + + def continuous_decode_on_train_data(self): + """Decode from dataset on new checkpoint.""" + for _ in next_checkpoint(self._hparams.model_dir, + self._decode_hparams.decode_timeout_mins): + self.decode(dataset_split=tf_estimator.ModeKeys.TRAIN) + + def continuous_decode_on_eval_data(self): + """Decode from dataset on new checkpoint.""" + if self._hparams.mlperf_mode: + ckpt_generator = next_undecoded_checkpoint( + self._hparams.model_dir, self._decode_hparams.decode_timeout_mins) + else: + ckpt_generator = next_checkpoint(self._hparams.model_dir, + self._decode_hparams.decode_timeout_mins) + + for ckpt in ckpt_generator: + current_step = decoding.get_step_from_ckpt_path(ckpt) + tf.logging.info("Decoding step %d" % current_step) + # Skip checkpoint 0. + if current_step == 0: + continue + # Decode the latest checkpoint by default. + checkpoint_path = None + if self._hparams.mlperf_mode: + self._decode_hparams.mlperf_decode_step = current_step + checkpoint_path = ckpt + + mlperf_log.transformer_print(key=mlperf_log.EVAL_START) + self.decode( + dataset_split=tf_estimator.ModeKeys.EVAL, + checkpoint_path=checkpoint_path) + d_hparams = self._decode_hparams + if self._hparams.mlperf_mode and d_hparams.mlperf_success: + mlperf_log.transformer_print( + key=mlperf_log.RUN_STOP, value={"success": "true"}) + break + + d_hparams = self._decode_hparams + if self._hparams.mlperf_mode and not d_hparams.mlperf_success: + mlperf_log.transformer_print( + key=mlperf_log.RUN_STOP, value={"success": "false"}) + + def continuous_decode_from_file(self): + """Decode from file on new checkpoint.""" + for _ in next_checkpoint(self._hparams.model_dir, + self._decode_hparams.decode_timeout_mins): + self.decode(decode_from_file=True) + + +def create_experiment( + run_config, + hparams, + model_name, + problem_name, + data_dir, + train_steps, + eval_steps, + min_eval_frequency=2000, + eval_throttle_seconds=600, + schedule="train_and_evaluate", + export=False, + decode_hparams=None, + use_tfdbg=False, + use_dbgprofile=False, + eval_early_stopping_steps=None, + eval_early_stopping_metric=None, + eval_early_stopping_metric_delta=None, + eval_early_stopping_metric_minimize=True, + eval_timeout_mins=240, + eval_use_test_set=False, + use_tpu=False, + use_tpu_estimator=False, + use_xla=False, + export_saved_model_api_version=1, + use_guarantee_const_getter=False, + additional_train_hooks=None, + additional_eval_hooks=None, + warm_start_from=None, + decode_from_file="", + decode_to_file="", + decode_reference="", + std_server_protocol=None): + """Create Experiment.""" + # HParams + hparams.add_hparam("model_dir", run_config.model_dir) + hparams.add_hparam("data_dir", data_dir) + hparams.add_hparam("train_steps", train_steps) + hparams.add_hparam("eval_steps", eval_steps) + hparams.add_hparam("schedule", schedule) + hparams.add_hparam("warm_start_from", warm_start_from) + hparams.add_hparam("std_server_protocol", std_server_protocol) + hparams.add_hparam("eval_freq_in_steps", min_eval_frequency) + hparams.add_hparam("eval_timeout_mins", eval_timeout_mins) + if decode_hparams is not None: + decode_hparams.add_hparam("decode_from_file", decode_from_file) + if decode_to_file and not decode_hparams.decode_to_file: + decode_hparams.decode_to_file = decode_to_file + if decode_reference and not decode_hparams.decode_reference: + decode_hparams.decode_reference = decode_reference + add_problem_hparams(hparams, problem_name) + + # Estimator + estimator = create_estimator( + model_name, + hparams, + run_config, + schedule=schedule, + decode_hparams=decode_hparams, + use_tpu=use_tpu, + use_tpu_estimator=use_tpu_estimator, + use_xla=use_xla, + export_saved_model_api_version=export_saved_model_api_version, + use_guarantee_const_getter=use_guarantee_const_getter) + + # Input fns from Problem + problem = hparams.problem + train_input_fn = problem.make_estimator_input_fn(tf_estimator.ModeKeys.TRAIN, + hparams) + + dataset_split = "test" if eval_use_test_set else None + dataset_kwargs = {"dataset_split": dataset_split} + eval_input_fn = problem.make_estimator_input_fn(tf_estimator.ModeKeys.EVAL, + hparams, + dataset_kwargs=dataset_kwargs) + + # Export + exporter = None + if export: + def compare_fn(best_eval_result, current_eval_result): + metric = eval_early_stopping_metric or "loss" + return current_eval_result[metric] < best_eval_result[metric] + + def serving_input_receiver_fn(hparams, decode_hparams, use_tpu): + return problem.serving_input_fn(hparams, decode_hparams, use_tpu) + + exporter = tf_estimator.BestExporter( + name="best", + serving_input_receiver_fn=serving_input_receiver_fn, + compare_fn=compare_fn, + assets_extra=problem.export_assets) + + # Hooks + validation_monitor_kwargs = dict( + input_fn=eval_input_fn, + eval_steps=eval_steps, + every_n_steps=min_eval_frequency, + early_stopping_rounds=eval_early_stopping_steps, + early_stopping_metric=eval_early_stopping_metric, + early_stopping_metric_minimize=eval_early_stopping_metric_minimize) + dbgprofile_kwargs = {"output_dir": run_config.model_dir} + early_stopping_kwargs = dict( + events_dir=os.path.join(run_config.model_dir, "eval_continuous"), + tag=eval_early_stopping_metric, + num_plateau_steps=eval_early_stopping_steps, + plateau_decrease=eval_early_stopping_metric_minimize, + plateau_delta=eval_early_stopping_metric_delta, + every_n_steps=min_eval_frequency) + + # Eval on TPU Pods is not supported yet + if use_tpu and run_config.tpu_config.num_shards > 8 and "eval" in schedule: + raise ValueError("Eval is not currently supported on a TPU Pod") + + # In-process eval (and possible early stopping) + if schedule == "continuous_train_and_eval" and min_eval_frequency: + tf.logging.warn("ValidationMonitor only works with " + "--schedule=train_and_evaluate") + use_validation_monitor = ( + schedule == "train_and_evaluate" and min_eval_frequency) + # Distributed early stopping + local_schedules = ["train_and_evaluate", "continuous_train_and_eval"] + use_early_stopping = ( + schedule not in local_schedules and eval_early_stopping_steps) + train_hooks, eval_hooks = create_hooks( + use_tfdbg=use_tfdbg, + use_dbgprofile=use_dbgprofile, + dbgprofile_kwargs=dbgprofile_kwargs, + use_validation_monitor=use_validation_monitor, + validation_monitor_kwargs=validation_monitor_kwargs, + use_early_stopping=use_early_stopping, + early_stopping_kwargs=early_stopping_kwargs) + + hook_context = HookContext( + estimator=estimator, problem=problem, hparams=hparams) + + train_hooks += t2t_model.T2TModel.get_train_hooks(model_name, hook_context) + eval_hooks += t2t_model.T2TModel.get_eval_hooks(model_name, hook_context) + if additional_train_hooks: + train_hooks += additional_train_hooks + if additional_eval_hooks: + eval_hooks += additional_eval_hooks + + train_hooks = contrib.learn().monitors.replace_monitors_with_hooks( + train_hooks, estimator) + eval_hooks = contrib.learn().monitors.replace_monitors_with_hooks( + eval_hooks, estimator) + + train_spec = tf_estimator.TrainSpec( + train_input_fn, max_steps=train_steps, hooks=train_hooks) + eval_spec = tf_estimator.EvalSpec( + eval_input_fn, + steps=eval_steps, + hooks=eval_hooks, + start_delay_secs=0 if hparams.schedule == "evaluate" else 120, + throttle_secs=eval_throttle_seconds, + exporters=exporter) + + return T2TExperiment(estimator, hparams, train_spec, eval_spec, + use_validation_monitor, decode_hparams) + + +def create_experiment_fn(*args, **kwargs): + """Wrapper for canonical experiment_fn. See create_experiment.""" + + def experiment_fn(run_config, hparams): + return create_experiment(run_config, hparams, *args, **kwargs) + + return experiment_fn + + +def set_random_seed(seed): + tf.set_random_seed(seed) + random.seed(seed) + np.random.seed(seed) + + +def restore_checkpoint(ckpt_dir, saver, sess, must_restore=False): + """Restore from a checkpoint.""" + ckpt = tf.train.get_checkpoint_state(ckpt_dir) + if must_restore and not ckpt: + raise ValueError("No checkpoint found in %s" % ckpt_dir) + if not ckpt: + return 0 + + path = ckpt.model_checkpoint_path + tf.logging.info("Restoring checkpoint %s", path) + saver.restore(sess, path) + step = int(path.split("-")[-1]) + return step diff --git a/tensor2tensor/utils/trainer_lib_test.py b/tensor2tensor/utils/trainer_lib_test.py new file mode 100644 index 000000000..a5bbecb9e --- /dev/null +++ b/tensor2tensor/utils/trainer_lib_test.py @@ -0,0 +1,169 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for trainer_lib.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +from tensor2tensor.data_generators import algorithmic +from tensor2tensor.models import transformer # pylint: disable=unused-import +from tensor2tensor.utils import data_reader +from tensor2tensor.utils import registry +from tensor2tensor.utils import trainer_lib +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + + +class TrainerLibTest(tf.test.TestCase): + + @classmethod + def setUpClass(cls): + algorithmic.TinyAlgo.setup_for_test() + + def testExperiment(self): + exp_fn = trainer_lib.create_experiment_fn( + "transformer", + "tiny_algo", + algorithmic.TinyAlgo.data_dir, + train_steps=1, + eval_steps=1, + min_eval_frequency=1, + use_tpu=False) + run_config = trainer_lib.create_run_config( + model_name="transformer", + model_dir=algorithmic.TinyAlgo.data_dir, + num_gpus=0, + use_tpu=False) + hparams = registry.hparams("transformer_tiny_tpu") + exp = exp_fn(run_config, hparams) + exp.test() + + def testExperimentWithClass(self): + exp_fn = trainer_lib.create_experiment_fn( + "transformer", + algorithmic.TinyAlgo(), + algorithmic.TinyAlgo.data_dir, + train_steps=1, + eval_steps=1, + min_eval_frequency=1, + use_tpu=False) + run_config = trainer_lib.create_run_config( + model_name="transformer", + model_dir=algorithmic.TinyAlgo.data_dir, + num_gpus=0, + use_tpu=False) + hparams = registry.hparams("transformer_tiny_tpu") + exp = exp_fn(run_config, hparams) + exp.test() + + def testModel(self): + # HParams + hparams = trainer_lib.create_hparams( + "transformer_tiny", data_dir=algorithmic.TinyAlgo.data_dir, + problem_name="tiny_algo") + + # Dataset + problem = hparams.problem + dataset = problem.dataset(tf_estimator.ModeKeys.TRAIN, + algorithmic.TinyAlgo.data_dir) + dataset = dataset.repeat(None).padded_batch(10, dataset.output_shapes) + features = dataset.make_one_shot_iterator().get_next() + features = data_reader.standardize_shapes(features) + + # Model + model = registry.model("transformer")(hparams, tf_estimator.ModeKeys.TRAIN) + logits, losses = model(features) + + self.assertTrue("training" in losses) + loss = losses["training"] + + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + logits_val, loss_val = sess.run([logits, loss]) + logits_shape = list(logits_val.shape) + logits_shape[1] = None + self.assertAllEqual(logits_shape, [10, None, 1, 1, 4]) + self.assertEqual(loss_val.shape, tuple()) + + def testMultipleTargetModalities(self): + # Use existing hparams and override target modality. + hparams = trainer_lib.create_hparams( + "transformer_tiny", data_dir=algorithmic.TinyAlgo.data_dir, + problem_name="tiny_algo") + # Manually turn off sharing. It is not currently supported for multitargets. + hparams.shared_embedding_and_softmax_weights = 0 # pylint: disable=line-too-long + hparams.problem_hparams.modality = { + "targets": hparams.problem_hparams.modality["targets"], + "targets_A": hparams.problem_hparams.modality["targets"], + "targets_B": hparams.problem_hparams.modality["targets"], + } + hparams.problem_hparams.vocab_size = { + "targets": hparams.problem_hparams.vocab_size["targets"], + "targets_A": hparams.problem_hparams.vocab_size["targets"], + "targets_B": hparams.problem_hparams.vocab_size["targets"], + } + hparams.problem._hparams = hparams.problem_hparams + + # Dataset + problem = hparams.problem + dataset = problem.dataset(tf_estimator.ModeKeys.TRAIN, + algorithmic.TinyAlgo.data_dir) + dataset = dataset.repeat(None).padded_batch(10, dataset.output_shapes) + features = dataset.make_one_shot_iterator().get_next() + features = data_reader.standardize_shapes(features) + features["targets_A"] = features["targets_B"] = features["targets"] + + # Model + model = registry.model("transformer")(hparams, tf_estimator.ModeKeys.TRAIN) + + def body(args, mb=model.body): + out = mb(args) + return {"targets": out, "targets_A": out, "targets_B": out} + + model.body = body + + logits, losses = model(features) + + self.assertTrue("training" in losses) + loss = losses["training"] + + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + sess.run([logits, loss]) + + def testCreateHparams(self): + # Get json_path + pkg = os.path.abspath(__file__) + pkg, _ = os.path.split(pkg) + pkg, _ = os.path.split(pkg) + json_path = os.path.join( + pkg, "test_data", "transformer_test_ckpt", "hparams.json") + + # Create hparams + hparams = trainer_lib.create_hparams("transformer_big", "hidden_size=1", + hparams_path=json_path) + self.assertEqual(2, hparams.num_hidden_layers) # from json + self.assertEqual(1, hparams.hidden_size) # from hparams_overrides_str + + # Compare with base hparams + base_hparams = trainer_lib.create_hparams("transformer_big") + self.assertEqual(len(base_hparams.values()), len(hparams.values())) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/utils/trainer_utils.py b/tensor2tensor/utils/trainer_utils.py deleted file mode 100644 index e10bff8ae..000000000 --- a/tensor2tensor/utils/trainer_utils.py +++ /dev/null @@ -1,1301 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Utilities for trainer binary.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math -import operator -import os -import sys - -# Dependency imports - -import numpy as np -import six -# pylint: disable=redefined-builtin -from six.moves import input -from six.moves import xrange -from six.moves import zip -# pylint: enable=redefined-builtin - -from tensor2tensor.data_generators import problem_hparams -from tensor2tensor.models import models # pylint: disable=unused-import -from tensor2tensor.utils import data_reader -from tensor2tensor.utils import expert_utils as eu -from tensor2tensor.utils import metrics -from tensor2tensor.utils import registry - -import tensorflow as tf -from tensorflow.contrib.learn.python.learn import learn_runner -from tensorflow.python.ops import init_ops - - -# Number of samples to draw for an image input (in such cases as captioning) -IMAGE_DECODE_LENGTH = 100 - -flags = tf.flags -FLAGS = flags.FLAGS - -flags.DEFINE_bool("registry_help", False, - "If True, logs the contents of the registry and exits.") -flags.DEFINE_string("master", "", "Address of TensorFlow master.") -flags.DEFINE_string("schedule", "local_run", - "Method of tf.contrib.learn.Experiment to run.") -flags.DEFINE_string("output_dir", "", "Base output directory for run.") -flags.DEFINE_string("model", "", "Which model to use.") -flags.DEFINE_string("hparams_set", "", "Which parameters to use.") -flags.DEFINE_string("hparams_range", "", "Parameters range.") -flags.DEFINE_string( - "hparams", "", - """A comma-separated list of `name=value` hyperparameter values. This flag - is used to override hyperparameter settings either when manually selecting - hyperparameters or when using Vizier. If a hyperparameter setting is - specified by this flag then it must be a valid hyperparameter name for the - model.""") -flags.DEFINE_string("problems", "", "Dash separated list of problems to " - "solve.") -flags.DEFINE_string("data_dir", "/tmp/data", "Directory with training data.") -flags.DEFINE_string("worker_job", "/job:worker", "name of worker job") -flags.DEFINE_integer("worker_gpu", 1, "How many GPUs to use.") -flags.DEFINE_integer("worker_replicas", 1, "How many workers to use.") -flags.DEFINE_integer("worker_id", 0, "Which worker task are we.") -flags.DEFINE_integer("ps_gpu", 0, "How many GPUs to use per ps.") -flags.DEFINE_string("gpu_order", "", "Optional order for daisy-chaining gpus." - " e.g. \"1 3 2 4\"") -flags.DEFINE_string("ps_job", "/job:ps", "name of ps job") -flags.DEFINE_integer("ps_replicas", 0, "How many ps replicas.") -flags.DEFINE_bool("experimental_optimize_placement", False, - "Optimize ops placement with experimental session options.") -flags.DEFINE_bool("sync", False, "Sync compute on PS.") -flags.DEFINE_bool("infer_use_last_position_only", False, - "In inference, use last position only for speedup.") -flags.DEFINE_integer("train_steps", 250000, - "The number of steps to run training for.") -flags.DEFINE_integer("eval_steps", 10, "Number of steps in evaluation.") -flags.DEFINE_integer("keep_checkpoint_max", 20, - "How many recent checkpoints to keep.") -flags.DEFINE_bool("interactive", False, "Interactive local inference mode.") -flags.DEFINE_bool("endless_dec", False, "Run decoding endlessly. Temporary.") -flags.DEFINE_bool("save_images", False, "Save inference input images.") -flags.DEFINE_string("decode_from_file", None, "Path to decode file") -flags.DEFINE_string("decode_to_file", None, "Path to inference output file") -flags.DEFINE_integer("decode_shards", 1, "How many shards to decode.") -flags.DEFINE_integer("decode_problem_id", 0, "Which problem to decode.") -flags.DEFINE_integer("decode_extra_length", 50, "Added decode length.") -flags.DEFINE_integer("decode_batch_size", 32, "Batch size for decoding. " - "The decodes will be written to .decodes in" - "format result\tinput") -flags.DEFINE_integer("beam_size", 4, "The beam size for beam decoding") -flags.DEFINE_float("alpha", 0.6, "Alpha for length penalty") -flags.DEFINE_bool("return_beams", False, - "Whether to return 1 (False) or all (True) beams. The \n " - "output file will have the format " - "\t..\t") -flags.DEFINE_bool("daisy_chain_variables", True, - "copy variables around in a daisy chain") - - -def make_experiment_fn(data_dir, model_name, train_steps, eval_steps): - """Returns experiment_fn for learn_runner. Wraps create_experiment.""" - - def experiment_fn(output_dir): - return create_experiment( - output_dir=output_dir, - data_dir=data_dir, - model_name=model_name, - train_steps=train_steps, - eval_steps=eval_steps) - - return experiment_fn - - -def create_experiment(output_dir, data_dir, model_name, train_steps, - eval_steps): - hparams = create_hparams(FLAGS.hparams_set, data_dir) - estimator, input_fns = create_experiment_components( - hparams=hparams, - output_dir=output_dir, - data_dir=data_dir, - model_name=model_name) - return tf.contrib.learn.Experiment( - estimator=estimator, - train_input_fn=input_fns["train"], - eval_input_fn=input_fns["eval"], - eval_metrics=metrics.create_evaluation_metrics(FLAGS.problems.split("-")), - train_steps=train_steps, - eval_steps=eval_steps, - train_monitors=[]) - - -def create_experiment_components(hparams, output_dir, data_dir, model_name): - """Constructs and returns Estimator and train/eval input functions.""" - hparams.problems = [ - problem_hparams.problem_hparams(problem, hparams) - for problem in FLAGS.problems.split("-") - ] - - num_datashards = data_parallelism().n - - tf.logging.info("Creating experiment, storing model files in %s", output_dir) - - train_problems_data = get_datasets_for_mode(data_dir, - tf.contrib.learn.ModeKeys.TRAIN) - train_input_fn = get_input_fn( - mode=tf.contrib.learn.ModeKeys.TRAIN, - hparams=hparams, - data_file_patterns=train_problems_data, - num_datashards=num_datashards) - - eval_problems_data = get_datasets_for_mode(data_dir, - tf.contrib.learn.ModeKeys.EVAL) - eval_input_fn = get_input_fn( - mode=tf.contrib.learn.ModeKeys.EVAL, - hparams=hparams, - data_file_patterns=eval_problems_data, - num_datashards=num_datashards) - estimator = tf.contrib.learn.Estimator( - model_fn=model_builder(model_name, hparams=hparams), - model_dir=output_dir, - config=tf.contrib.learn.RunConfig( - master=FLAGS.master, - model_dir=output_dir, - session_config=session_config(), - keep_checkpoint_max=20)) - return estimator, {"train": train_input_fn, "eval": eval_input_fn} - - -def log_registry(): - tf.logging.info(registry.help_string()) - if FLAGS.registry_help: - sys.exit(0) - - -def create_hparams(params_id, data_dir): - """Returns hyperparameters, including any flag value overrides. - - If the hparams FLAG is set, then it will use any values specified in - hparams to override any individually-set hyperparameter. This logic - allows tuners to override hyperparameter settings to find optimal values. - - Args: - params_id: which set of parameters to choose (must be in _PARAMS above). - data_dir: the directory containing the training data. - - Returns: - The hyperparameters as a tf.contrib.training.HParams object. - """ - hparams = registry.hparams(params_id)() - hparams.add_hparam("data_dir", data_dir) - # Command line flags override any of the preceding hyperparameter values. - if FLAGS.hparams: - hparams = hparams.parse(FLAGS.hparams) - return hparams - - -def run(data_dir, model, output_dir, train_steps, eval_steps, schedule): - """Runs an Estimator locally or distributed. - - This function chooses one of two paths to execute: - - 1. Running locally if schedule=="local_run". - 3. Distributed training/evaluation otherwise. - - Args: - data_dir: The directory the data can be found in. - model: The name of the model to use. - output_dir: The directory to store outputs in. - train_steps: The number of steps to run training for. - eval_steps: The number of steps to run evaluation for. - schedule: (str) The schedule to run. The value here must - be the name of one of Experiment's methods. - """ - if schedule == "local_run": - # Run the local demo. - run_locally( - data_dir=data_dir, - model=model, - output_dir=output_dir, - train_steps=train_steps, - eval_steps=eval_steps) - else: - # Perform distributed training/evaluation. - learn_runner.run( - experiment_fn=make_experiment_fn( - data_dir=data_dir, - model_name=model, - train_steps=train_steps, - eval_steps=eval_steps), - schedule=schedule, - output_dir=FLAGS.output_dir) - - -def validate_flags(): - if not FLAGS.model: - raise ValueError("Must specify a model with --model.") - if not FLAGS.problems: - raise ValueError("Must specify a set of problems with --problems.") - if not (FLAGS.hparams_set or FLAGS.hparams_range): - raise ValueError("Must specify either --hparams_set or --hparams_range.") - if not FLAGS.schedule: - raise ValueError("Must specify --schedule.") - if not FLAGS.output_dir: - FLAGS.output_dir = "/tmp/tensor2tensor" - tf.logging.warning("It is strongly recommended to specify --output_dir. " - "Using default output_dir=%s.", FLAGS.output_dir) - - -def session_config(): - """The TensorFlow Session config to use.""" - graph_options = tf.GraphOptions(optimizer_options=tf.OptimizerOptions( - opt_level=tf.OptimizerOptions.L1, do_function_inlining=False)) - if FLAGS.experimental_optimize_placement: - rewrite_options = tf.RewriterConfig(optimize_tensor_layout=True) - rewrite_options.optimizers.append("pruning") - rewrite_options.optimizers.append("constfold") - rewrite_options.optimizers.append("layout") - graph_options = tf.GraphOptions( - rewrite_options=rewrite_options, infer_shapes=True) - config = tf.ConfigProto( - allow_soft_placement=True, graph_options=graph_options) - - return config - - -def model_builder(model, hparams): - """Returns a function to build the model. - - Args: - model: The name of the model to use. - hparams: The hyperparameters. - - Returns: - A function to build the model's graph. This function is called by - the Estimator object to construct the graph. - """ - - def initializer(): - if hparams.initializer == "orthogonal": - return tf.orthogonal_initializer(gain=hparams.initializer_gain) - elif hparams.initializer == "uniform": - max_val = 0.1 * hparams.initializer_gain - return tf.random_uniform_initializer(-max_val, max_val) - elif hparams.initializer == "normal_unit_scaling": - return init_ops.variance_scaling_initializer( - hparams.initializer_gain, mode="fan_avg", distribution="normal") - elif hparams.initializer == "uniform_unit_scaling": - return init_ops.variance_scaling_initializer( - hparams.initializer_gain, mode="fan_avg", distribution="uniform") - else: - raise ValueError("Unrecognized initializer: %s" % hparams.initializer) - - def learning_rate_decay(): - """Inverse-decay learning rate until warmup_steps, then decay.""" - warmup_steps = tf.to_float( - hparams.learning_rate_warmup_steps * FLAGS.worker_replicas) - step = tf.to_float(tf.contrib.framework.get_global_step()) - if hparams.learning_rate_decay_scheme == "noam": - return 5000.0 * hparams.hidden_size**-0.5 * tf.minimum( - (step + 1) * warmup_steps**-1.5, (step + 1)**-0.5) - elif hparams.learning_rate_decay_scheme == "exp100k": - return 0.94**(step // 100000) - - inv_base = tf.exp(tf.log(0.01) / warmup_steps) - inv_decay = inv_base**(warmup_steps - step) - if hparams.learning_rate_decay_scheme == "sqrt": - decay = _sqrt_decay(step - warmup_steps) - elif hparams.learning_rate_decay_scheme == "exp10k": - decay = _exp_decay_after(step - warmup_steps, 0.9995, - FLAGS.train_steps - warmup_steps - 10000) - elif hparams.learning_rate_decay_scheme == "exp50k": - decay = _exp_decay_after(step - warmup_steps, 0.99995, - FLAGS.train_steps - warmup_steps - 50000) - elif hparams.learning_rate_decay_scheme == "exp500k": - decay = _exp_decay_after(step - warmup_steps, 0.9999955, - FLAGS.train_steps - warmup_steps - 500000) - elif hparams.learning_rate_decay_scheme == "none": - decay = tf.constant(1.0) - else: - raise ValueError("Unrecognized learning rate decay scheme: %s" % - hparams.learning_rate_decay_scheme) - return tf.cond( - step < warmup_steps, - lambda: inv_decay, - lambda: decay, - name="learning_rate_decay_warump_cond") - - def model_fn(features, targets, mode): - """Creates the prediction, loss, and train ops. - - Args: - features: A dictionary of tensors keyed by the feature name. - targets: A tensor representing the labels (targets). - mode: The execution mode, as defined in tf.contrib.learn.ModeKeys. - - Returns: - A tuple consisting of the prediction, loss, and train_op. - """ - if mode == tf.contrib.learn.ModeKeys.INFER and FLAGS.interactive: - features = _interactive_input_tensor_to_features_dict(features, hparams) - if mode == tf.contrib.learn.ModeKeys.INFER and FLAGS.decode_from_file: - features = _decode_input_tensor_to_features_dict(features, hparams) - # A dictionary containing: - # - problem_choice: A Tensor containing an integer indicating which problem - # was selected for this run. - # - predictions: A Tensor containing the model's output predictions. - run_info = dict() - run_info["problem_choice"] = features["problem_choice"] - - if targets is not None: - features["targets"] = targets - - dp = data_parallelism() - - # Add input statistics for incoming features. - with tf.name_scope("input_stats"): - for (k, v) in six.iteritems(features): - if isinstance(v, tf.Tensor) and v.get_shape().ndims > 1: - tf.summary.scalar("%s_batch" % k, tf.shape(v)[0] // dp.n) - tf.summary.scalar("%s_length" % k, tf.shape(v)[1]) - nonpadding = tf.to_float(tf.not_equal(v, 0)) - tf.summary.scalar("%s_nonpadding_tokens" % k, - tf.reduce_sum(nonpadding)) - tf.summary.scalar("%s_nonpadding_fraction" % k, - tf.reduce_mean(nonpadding)) - - tf.get_variable_scope().set_initializer(initializer()) - train = mode == tf.contrib.learn.ModeKeys.TRAIN - - # Get multi-problem logits and loss based on features["problem_choice"]. - def nth_model(n): - """Build the model for the n-th problem, plus some added variables.""" - model_class = registry.model(model)( - hparams, hparams.problems[n], n, dp, _ps_devices(all_workers=True)) - if mode == tf.contrib.learn.ModeKeys.INFER: - return model_class.infer( - features, - beam_size=FLAGS.beam_size, - top_beams=FLAGS.beam_size if FLAGS.return_beams else 1, - last_position_only=FLAGS.infer_use_last_position_only, - alpha=FLAGS.alpha, - decode_length=FLAGS.decode_extra_length) - # In distributed mode, we build graph for problem=0 and problem=worker_id. - skipping_is_on = hparams.problem_choice == "distributed" and train - problem_worker_id = FLAGS.worker_id % len(hparams.problems) - skip_this_one = n != 0 and n % FLAGS.worker_replicas != problem_worker_id - # On worker 0 also build graph for problems <= 1. - # TODO(lukaszkaiser): why is this hack needed for variables init? Repair. - skip_this_one = skip_this_one and (FLAGS.worker_id != 0 or n > 1) - sharded_logits, training_loss, extra_loss = model_class.model_fn( - features, train, skip=(skipping_is_on and skip_this_one)) - with tf.variable_scope("losses_avg", reuse=True): - loss_moving_avg = tf.get_variable("problem_%d/training_loss" % n) - o1 = loss_moving_avg.assign(loss_moving_avg * 0.9 + training_loss * 0.1) - loss_moving_avg = tf.get_variable("problem_%d/extra_loss" % n) - o2 = loss_moving_avg.assign(loss_moving_avg * 0.9 + extra_loss * 0.1) - loss_moving_avg = tf.get_variable("problem_%d/total_loss" % n) - total_loss = training_loss + extra_loss - o3 = loss_moving_avg.assign(loss_moving_avg * 0.9 + total_loss * 0.1) - with tf.variable_scope("train_stats"): # Count steps for this problem. - problem_steps = tf.get_variable( - "problem_%d_steps" % n, initializer=0, trainable=False) - o4 = problem_steps.assign_add(1) - with tf.control_dependencies([o1, o2, o3, o4]): # Make sure the ops run. - total_loss = tf.identity(total_loss) - return [total_loss] + sharded_logits # Need to flatten for cond later. - - result_list = _cond_on_index(nth_model, features["problem_choice"], 0, - len(hparams.problems) - 1) - - if mode == tf.contrib.learn.ModeKeys.INFER: - # Beam search in sequence model returns both decodes withe key "outputs" - # and scores with they key "scores". If return list is a dict, we expect - # that it will have keys "outputs", a tensor of int32 and scores, a - # tensor of floats. This is useful if we want to return scores from - # estimator.predict - if not isinstance(result_list, dict): - ret = {"outputs": result_list}, None, None - else: - ret = { - "outputs": result_list["outputs"], - "scores": result_list["scores"] - }, None, None - if "inputs" in features: - ret[0]["inputs"] = features["inputs"] - if "infer_targets" in features: - ret[0]["targets"] = features["infer_targets"] - return ret - - sharded_logits, total_loss = result_list[1:], result_list[0] - if mode == tf.contrib.learn.ModeKeys.EVAL: - logits = tf.concat(sharded_logits, 0) - # For evaluation, return the logits layer as our predictions. - run_info["predictions"] = logits - train_op = None - return run_info, total_loss, None - - assert mode == tf.contrib.learn.ModeKeys.TRAIN - - # Some training statistics. - with tf.name_scope("training_stats"): - learning_rate = hparams.learning_rate * learning_rate_decay() - learning_rate /= math.sqrt(float(FLAGS.worker_replicas)) - tf.summary.scalar("learning_rate", learning_rate) - global_step = tf.to_float(tf.contrib.framework.get_global_step()) - for n in xrange(len(hparams.problems)): - with tf.variable_scope("losses_avg", reuse=True): - total_loss_var = tf.get_variable("problem_%d/total_loss" % n) - training_loss_var = tf.get_variable("problem_%d/training_loss" % n) - extra_loss_var = tf.get_variable("problem_%d/extra_loss" % n) - tf.summary.scalar("loss_avg_%d/total_loss" % n, total_loss_var) - tf.summary.scalar("loss_avg_%d/training_loss" % n, training_loss_var) - tf.summary.scalar("loss_avg_%d/extra_loss" % n, extra_loss_var) - with tf.variable_scope("train_stats", reuse=True): - nth_steps = tf.get_variable("problem_%d_steps" % n, dtype=tf.int32) - tf.summary.scalar("problem_%d_frequency" % n, - tf.to_float(nth_steps) / (global_step + 1.0)) - - # Log trainable weights and add decay. - total_size, total_embedding, weight_decay_loss = 0, 0, 0.0 - all_weights = {v.name: v for v in tf.trainable_variables()} - for v_name in sorted(list(all_weights)): - v = all_weights[v_name] - v_size = int(np.prod(np.array(v.shape.as_list()))) - tf.logging.info("Weight %s\tshape %s\tsize %d", - v.name[:-2].ljust(80), str(v.shape).ljust(20), v_size) - if "embedding" in v_name: - total_embedding += v_size - total_size += v_size - if hparams.weight_decay > 0.0 and len(v.shape.as_list()) > 1: - # Add weight regularization if set and the weight is not a bias (dim>1). - with tf.device(v._ref().device): # pylint: disable=protected-access - v_loss = tf.nn.l2_loss(v) / v_size - weight_decay_loss += v_loss - is_body = len(v_name) > 5 and v_name[:5] == "body/" - if hparams.weight_noise > 0.0 and is_body: - # Add weight noise if set in hparams. - with tf.device(v._ref().device): # pylint: disable=protected-access - scale = learning_rate * 0.001 - noise = tf.truncated_normal(v.shape) * hparams.weight_noise * scale - noise_op = v.assign_add(noise) - with tf.control_dependencies([noise_op]): - total_loss = tf.identity(total_loss) - tf.logging.info("Total trainable variables size: %d", total_size) - tf.logging.info("Total embedding variables size: %d", total_embedding) - tf.logging.info("Total non-embedding variables size: %d", - total_size - total_embedding) - total_loss += weight_decay_loss * hparams.weight_decay - - # Define the train_op for the TRAIN mode. - opt = _ConditionalOptimizer(hparams.optimizer, learning_rate, hparams) - tf.logging.info("Computing gradients for global model_fn.") - train_op = tf.contrib.layers.optimize_loss( - name="training", - loss=total_loss, - global_step=tf.contrib.framework.get_global_step(), - learning_rate=learning_rate, - clip_gradients=hparams.clip_grad_norm or None, - optimizer=opt, - colocate_gradients_with_ops=True) - - tf.logging.info("Global model_fn finished.") - return run_info, total_loss, train_op - - return model_fn - - -def run_locally(data_dir, model, output_dir, train_steps, eval_steps): - """Runs an Estimator locally. - - This function demonstrates model training, evaluation, inference locally. - - Args: - data_dir: The directory the data can be found in. - model: The name of the model to use. - output_dir: The directory to store outputs in. - train_steps: The number of steps to run training for. - eval_steps: The number of steps to run evaluation for. - """ - train_problems_data = get_datasets_for_mode(data_dir, - tf.contrib.learn.ModeKeys.TRAIN) - - # For a local run, we can train, evaluate, predict. - hparams = create_hparams(FLAGS.hparams_set, FLAGS.data_dir) - hparams.problems = [ - problem_hparams.problem_hparams(problem, hparams) - for problem in FLAGS.problems.split("-") - ] - - estimator = tf.contrib.learn.Estimator( - model_fn=model_builder(model, hparams=hparams), - model_dir=output_dir, - config=tf.contrib.learn.RunConfig( - session_config=session_config(), - keep_checkpoint_max=FLAGS.keep_checkpoint_max)) - - num_datashards = data_parallelism().n - - if train_steps > 0: - # Train. - tf.logging.info("Performing local training.") - estimator.fit( - input_fn=get_input_fn( - mode=tf.contrib.learn.ModeKeys.TRAIN, - hparams=hparams, - data_file_patterns=train_problems_data, - num_datashards=num_datashards), - steps=train_steps, - monitors=[]) - - if eval_steps > 0: - # Evaluate. - tf.logging.info("Performing local evaluation.") - eval_problems_data = get_datasets_for_mode(data_dir, - tf.contrib.learn.ModeKeys.EVAL) - eval_input_fn = get_input_fn( - mode=tf.contrib.learn.ModeKeys.EVAL, - hparams=hparams, - data_file_patterns=eval_problems_data, - num_datashards=num_datashards) - unused_metrics = estimator.evaluate( - input_fn=eval_input_fn, - steps=eval_steps, - metrics=metrics.create_evaluation_metrics(FLAGS.problems.split("-"))) - - # Predict. - if FLAGS.interactive: - infer_input_fn = _interactive_input_fn(hparams) - for problem_idx, example in infer_input_fn: - targets_vocab = hparams.problems[problem_idx].vocabulary["targets"] - result_iter = estimator.predict(input_fn=lambda e=example: e) - for result in result_iter: - if FLAGS.return_beams: - beams = np.split(result["outputs"], FLAGS.beam_size, axis=0) - scores = None - if "scores" in result: - scores = np.split(result["scores"], FLAGS.beam_size, axis=0) - for k, beam in enumerate(beams): - tf.logging.info("BEAM %d:" % k) - if scores is not None: - tf.logging.info("%s\tScore:%f" % - (targets_vocab.decode(beam.flatten()), scores[k])) - else: - tf.logging.info(targets_vocab.decode(beam.flatten())) - else: - tf.logging.info(targets_vocab.decode(result["outputs"].flatten())) - # Predict from file - elif FLAGS.decode_from_file is not None: - problem_id = FLAGS.decode_problem_id - inputs_vocab = hparams.problems[problem_id].vocabulary["inputs"] - targets_vocab = hparams.problems[problem_id].vocabulary["targets"] - tf.logging.info("Performing Decoding from a file.") - sorted_inputs, sorted_keys = _get_sorted_inputs() - num_decode_batches = (len(sorted_inputs) - 1) // FLAGS.decode_batch_size + 1 - input_fn = _decode_batch_input_fn(problem_id, num_decode_batches, - sorted_inputs, inputs_vocab) - - # strips everything after the first id, which is assumed to be 1 - def _save_until_eos(hyp): # pylint: disable=missing-docstring - ret = [] - index = 0 - # until you reach id - while index < len(hyp) and hyp[index] != 1: - ret.append(hyp[index]) - index += 1 - return np.array(ret) - - decodes = [] - for _ in range(num_decode_batches): - result_iter = estimator.predict(input_fn=input_fn.next, as_iterable=True) - for result in result_iter: - - def log_fn(inputs, outputs): - decoded_inputs = inputs_vocab.decode( - _save_until_eos(inputs.flatten())) - tf.logging.info("Inference results INPUT: %s" % decoded_inputs) - - decoded_outputs = targets_vocab.decode( - _save_until_eos(outputs.flatten())) - tf.logging.info("Inference results OUTPUT: %s" % decoded_outputs) - return decoded_outputs - - if FLAGS.return_beams: - beam_decodes = [] - output_beams = np.split(result["outputs"], FLAGS.beam_size, axis=0) - for k, beam in enumerate(output_beams): - tf.logging.info("BEAM %d:" % k) - beam_decodes.append(log_fn(result["inputs"], beam)) - decodes.append(str.join("\t", beam_decodes)) - - else: - decodes.append(log_fn(result["inputs"], result["outputs"])) - - # Reversing the decoded inputs and outputs because they were reversed in - # _decode_batch_input_fn - sorted_inputs.reverse() - decodes.reverse() - # Dumping inputs and outputs to file FLAGS.decode_from_file.decodes in - # format result\tinput in the same order as original inputs - if FLAGS.decode_shards > 1: - base_filename = FLAGS.decode_from_file + ("%.2d" % FLAGS.worker_id) - else: - base_filename = FLAGS.decode_from_file - decode_filename = ( - base_filename + "." + FLAGS.model + "." + FLAGS.hparams_set + ".beam" + - str(FLAGS.beam_size) + ".alpha" + str(FLAGS.alpha) + ".decodes") - tf.logging.info("Writing decodes into %s" % decode_filename) - outfile = tf.gfile.Open(decode_filename, "w") - for index in range(len(sorted_inputs)): - outfile.write("%s\t%s\n" % (decodes[sorted_keys[index]], - sorted_inputs[sorted_keys[index]])) - else: - for i, problem in enumerate(FLAGS.problems.split("-")): - inputs_vocab = hparams.problems[i].vocabulary.get("inputs", None) - targets_vocab = hparams.problems[i].vocabulary["targets"] - tf.logging.info("Performing local inference.") - infer_problems_data = get_datasets_for_mode( - data_dir, tf.contrib.learn.ModeKeys.INFER) - infer_input_fn = get_input_fn( - mode=tf.contrib.learn.ModeKeys.INFER, - hparams=hparams, - data_file_patterns=infer_problems_data, - num_datashards=num_datashards, - fixed_problem=i) - result_iter = estimator.predict( - input_fn=infer_input_fn, as_iterable=FLAGS.endless_dec) - - def log_fn(inputs, targets, outputs, problem, j): - """Log inference results.""" - if "image" in problem and FLAGS.save_images: - save_path = os.path.join(FLAGS.output_dir, - "%s_prediction_%d.jpg" % (problem, j)) - show_and_save_image(inputs / 255., save_path) - elif inputs_vocab: - decoded_inputs = inputs_vocab.decode(inputs.flatten()) - tf.logging.info("Inference results INPUT: %s" % decoded_inputs) - - decoded_outputs = targets_vocab.decode(outputs.flatten()) - decoded_targets = targets_vocab.decode(targets.flatten()) - tf.logging.info("Inference results OUTPUT: %s" % decoded_outputs) - if FLAGS.decode_to_file: - output_filepath = FLAGS.decode_to_file + ".outputs." + problem - output_file = tf.gfile.Open(output_filepath, "a") - output_file.write(decoded_outputs + "\n") - target_filepath = FLAGS.decode_to_file + ".targets." + problem - target_file = tf.gfile.Open(target_filepath, "a") - target_file.write(decoded_targets + "\n") - - # The function predict() returns an iterable over the network's - # predictions from the test input. if FLAGS.endless_dec is set, it will - # decode over the dev set endlessly, looping over it. We use the returned - # iterator to log inputs and decodes. - if FLAGS.endless_dec: - tf.logging.info("Warning: Decoding endlessly") - for j, result in enumerate(result_iter): - inputs, targets, outputs = (result["inputs"], result["targets"], - result["outputs"]) - if FLAGS.return_beams: - output_beams = np.split(outputs, FLAGS.beam_size, axis=0) - for k, beam in enumerate(output_beams): - tf.logging.info("BEAM %d:" % k) - log_fn(inputs, targets, beam, problem, j) - else: - log_fn(inputs, targets, outputs, problem, j) - else: - for j, (inputs, targets, outputs) in enumerate( - zip(result_iter["inputs"], result_iter["targets"], result_iter[ - "outputs"])): - if FLAGS.return_beams: - output_beams = np.split(outputs, FLAGS.beam_size, axis=0) - for k, beam in enumerate(output_beams): - tf.logging.info("BEAM %d:" % k) - log_fn(inputs, targets, beam, problem, j) - else: - log_fn(inputs, targets, outputs, problem, j) - - -def _decode_batch_input_fn(problem_id, num_decode_batches, sorted_inputs, - vocabulary): - tf.logging.info(" batch %d" % num_decode_batches) - # First reverse all the input sentences so that if you're going to get OOMs, - # you'll see it in the first batch - sorted_inputs.reverse() - for b in range(num_decode_batches): - tf.logging.info("Deocding batch %d" % b) - batch_length = 0 - batch_inputs = [] - for inputs in sorted_inputs[b * FLAGS.decode_batch_size:( - b + 1) * FLAGS.decode_batch_size]: - input_ids = vocabulary.encode(inputs) - input_ids.append(1) # Assuming EOS=1. - batch_inputs.append(input_ids) - if len(input_ids) > batch_length: - batch_length = len(input_ids) - final_batch_inputs = [] - for input_ids in batch_inputs: - assert len(input_ids) <= batch_length - x = input_ids + [0] * (batch_length - len(input_ids)) - final_batch_inputs.append(x) - yield { - "inputs": np.array(final_batch_inputs), - "problem_choice": np.array(problem_id) - } - - -def get_datasets_for_mode(data_dir, mode): - return data_reader.get_datasets(FLAGS.problems, data_dir, mode) - - -def _cond_on_index(fn, index_tensor, cur_idx, max_idx): - """Call fn(index_tensor) using tf.cond in [cur_id, max_idx].""" - if cur_idx == max_idx: - return fn(cur_idx) - return tf.cond( - tf.equal(index_tensor, cur_idx), lambda: fn(cur_idx), - lambda: _cond_on_index(fn, index_tensor, cur_idx + 1, max_idx)) - - -def _interactive_input_fn(hparams): - """Generator that reads from the terminal and yields "interactive inputs". - - Due to temporary limitations in tf.learn, if we don't want to reload the - whole graph, then we are stuck encoding all of the input as one fixed-size - numpy array. - - We yield int64 arrays with shape [const_array_size]. The format is: - [num_samples, decode_length, len(input ids), , ] - - Args: - hparams: model hparams - Yields: - numpy arrays - - Raises: - Exception: when `input_type` is invalid. - """ - num_samples = 3 - decode_length = 100 - input_type = "text" - problem_id = 0 - p_hparams = hparams.problems[problem_id] - has_input = "inputs" in p_hparams.input_modality - vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"] - # This should be longer than the longest input. - const_array_size = 10000 - while True: - prompt = ("INTERACTIVE MODE num_samples=%d decode_length=%d \n" - " it= ('text' or 'image')\n" - " pr= (set the problem number)\n" - " in= (set the input problem number)\n" - " ou= (set the output problem number)\n" - " ns= (changes number of samples)\n" - " dl= (changes decode legnth)\n" - " <%s> (decode)\n" - " q (quit)\n" - ">" % (num_samples, decode_length, "source_string" - if has_input else "target_prefix")) - input_string = input(prompt) - if input_string == "q": - return - elif input_string[:3] == "pr=": - problem_id = int(input_string[3:]) - p_hparams = hparams.problems[problem_id] - has_input = "inputs" in p_hparams.input_modality - vocabulary = p_hparams.vocabulary["inputs" if has_input else "targets"] - elif input_string[:3] == "in=": - problem = int(input_string[3:]) - p_hparams.input_modality = hparams.problems[problem].input_modality - p_hparams.input_space_id = hparams.problems[problem].input_space_id - elif input_string[:3] == "ou=": - problem = int(input_string[3:]) - p_hparams.target_modality = hparams.problems[problem].target_modality - p_hparams.target_space_id = hparams.problems[problem].target_space_id - elif input_string[:3] == "ns=": - num_samples = int(input_string[3:]) - elif input_string[:3] == "dl=": - decode_length = int(input_string[3:]) - elif input_string[:3] == "it=": - input_type = input_string[3:] - else: - if input_type == "text": - input_ids = vocabulary.encode(input_string) - if has_input: - input_ids.append(1) # assume 1 means end-of-source - x = [num_samples, decode_length, len(input_ids)] + input_ids - assert len(x) < const_array_size - x += [0] * (const_array_size - len(x)) - yield problem_id, { - "inputs": np.array(x), - "problem_choice": np.array(problem_id) - } - elif input_type == "image": - input_path = input_string - img = read_image(input_path) - yield problem_id, { - "inputs": img, - "problem_choice": np.array(problem_id) - } - else: - raise Exception("Unsupported input type.") - - -def read_image(path): - try: - import matplotlib.image as im # pylint: disable=g-import-not-at-top - except ImportError as e: - tf.logging.warning( - "Reading an image requires matplotlib to be installed: %s", e) - raise NotImplementedError("Image reading not implemented.") - return im.imread(path) - - -def show_and_save_image(img, save_path): - try: - import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top - except ImportError as e: - tf.logging.warning("Showing and saving an image requires matplotlib to be " - "installed: %s", e) - raise NotImplementedError("Image display and save not implemented.") - plt.imshow(img) - plt.savefig(save_path) - - -def _get_sorted_inputs(): - """Returning inputs sorted according to length. - - Returns: - a sorted list of inputs - - """ - tf.logging.info("Getting sorted inputs") - # read file and sort inputs according them according to input length. - if FLAGS.decode_shards > 1: - decode_filename = FLAGS.decode_from_file + ("%.2d" % FLAGS.worker_id) - else: - decode_filename = FLAGS.decode_from_file - inputs = [line.strip() for line in tf.gfile.Open(decode_filename)] - input_lens = [(i, len(line.strip().split())) for i, line in enumerate(inputs)] - sorted_input_lens = sorted(input_lens, key=operator.itemgetter(1)) - # We'll need the keys to rearrange the inputs back into their original order - sorted_keys = {} - sorted_inputs = [] - for i, (index, _) in enumerate(sorted_input_lens): - sorted_inputs.append(inputs[index]) - sorted_keys[index] = i - return sorted_inputs, sorted_keys - - -def _interactive_input_tensor_to_features_dict(feature_map, hparams): - """Convert the interactive input format (see above) to a dictionary. - - Args: - feature_map: a dictionary with keys `problem_choice` and `input` containing - Tensors. - hparams: model hyperparameters - - Returns: - a features dictionary, as expected by the decoder. - """ - inputs = tf.constant(feature_map["inputs"]) - input_is_image = False if len(inputs.shape) < 3 else True - - def input_fn(problem_choice, x=inputs): # pylint: disable=missing-docstring - p_hparams = hparams.problems[problem_choice] - if not input_is_image: - # Remove the batch dimension. - num_samples = x[0] - length = x[2] - x = tf.slice(x, [3], tf.to_int32([length])) - x = tf.reshape(x, [1, -1, 1, 1]) - # Transform into a batch of size num_samples to get that many random - # decodes. - x = tf.tile(x, tf.to_int32([num_samples, 1, 1, 1])) - else: - x = tf.image.resize_images(x, [299, 299]) - x = tf.reshape(x, [1, 299, 299, -1]) - x = tf.to_int32(x) - return (tf.constant(p_hparams.input_space_id), - tf.constant(p_hparams.target_space_id), x) - - input_space_id, target_space_id, x = _cond_on_index( - input_fn, feature_map["problem_choice"], 0, len(hparams.problems) - 1) - - features = {} - features["problem_choice"] = tf.constant(feature_map["problem_choice"]) - features["input_space_id"] = input_space_id - features["target_space_id"] = target_space_id - features["decode_length"] = (IMAGE_DECODE_LENGTH - if input_is_image else inputs[1]) - features["inputs"] = x - return features - - -def _decode_input_tensor_to_features_dict(feature_map, hparams): - """Convert the interactive input format (see above) to a dictionary. - - Args: - feature_map: a dictionary with keys `problem_choice` and `input` containing - Tensors. - hparams: model hyperparameters - - Returns: - a features dictionary, as expected by the decoder. - """ - inputs = tf.constant(feature_map["inputs"]) - input_is_image = False - - def input_fn(problem_choice, x=inputs): # pylint: disable=missing-docstring - p_hparams = hparams.problems[problem_choice] - # Add a third empty dimension dimension - x = tf.expand_dims(x, axis=[2]) - x = tf.to_int32(x) - return (tf.constant(p_hparams.input_space_id), - tf.constant(p_hparams.target_space_id), x) - - input_space_id, target_space_id, x = _cond_on_index( - input_fn, feature_map["problem_choice"], 0, len(hparams.problems) - 1) - - features = {} - features["problem_choice"] = feature_map["problem_choice"] - features["input_space_id"] = input_space_id - features["target_space_id"] = target_space_id - features["decode_length"] = (IMAGE_DECODE_LENGTH - if input_is_image else tf.shape(x)[1] + 50) - features["inputs"] = x - return features - - -def get_input_fn(mode, - hparams, - data_file_patterns=None, - num_datashards=None, - fixed_problem=None): - """Provides input to the graph, either from disk or via a placeholder. - - This function produces an input function that will feed data into - the network. There are two modes of operation: - - 1. If data_file_pattern and all subsequent arguments are None, then - it creates a placeholder for a serialized tf.Example proto. - 2. If data_file_pattern is defined, it will read the data from the - files at the given location. Use this mode for training, - evaluation, and testing prediction. - - Args: - mode: The execution mode, as defined in tf.contrib.learn.ModeKeys. - hparams: HParams object. - data_file_patterns: The list of file patterns to use to read in data. Set to - `None` if you want to create a placeholder for the input data. The - `problems` flag is a list of problem names joined by the `-` character. - The flag's string is then split along the `-` and each problem gets its - own example queue. - num_datashards: An integer. - fixed_problem: An integer indicating the problem to fetch data for, or None - if the input is to be randomly selected. - - Returns: - A function that returns a dictionary of features and the target labels. - """ - - def input_fn(): - """Supplies input to our model. - - This function supplies input to our model, where this input is a - function of the mode. For example, we supply different data if - we're performing training versus evaluation. - - Returns: - A tuple consisting of 1) a dictionary of tensors whose keys are - the feature names, and 2) a tensor of target labels if the mode - is not INFER (and None, otherwise). - - Raises: - ValueError: if one of the parameters has an unsupported value. - """ - problem_count, batches = len(data_file_patterns), [] - with tf.name_scope("input_queues"): - for n in xrange(problem_count): - if fixed_problem is not None and n != fixed_problem: - continue - with tf.name_scope("problem_%d" % n): - with tf.device("/cpu:0"): # Input queues are on CPU. - capacity = hparams.problems[n].max_expected_batch_size_per_shard - capacity *= num_datashards - examples = data_reader.input_pipeline(data_file_patterns[n], - capacity, mode) - drop_long_sequences = mode == tf.contrib.learn.ModeKeys.TRAIN - batch_size_multiplier = hparams.problems[n].batch_size_multiplier - feature_map = data_reader.batch_examples( - examples, - data_reader.hparams_to_batching_scheme( - hparams, - shard_multiplier=num_datashards, - drop_long_sequences=drop_long_sequences, - length_multiplier=batch_size_multiplier)) - - # Reverse inputs and targets features if the problem was reversed. - if hparams.problems[n].was_reversed: - inputs = feature_map["inputs"] - targets = feature_map["targets"] - feature_map["inputs"] = targets - feature_map["targets"] = inputs - - # Use the inputs as the targets if the problem is a copy problem. - if hparams.problems[n].was_copy: - feature_map["targets"] = feature_map["inputs"] - - # Ensure inputs and targets are proper rank. - while len(feature_map["inputs"].get_shape()) != 4: - feature_map["inputs"] = tf.expand_dims(feature_map["inputs"], axis=-1) - while len(feature_map["targets"].get_shape()) != 4: - feature_map["targets"] = tf.expand_dims( - feature_map["targets"], axis=-1) - - batches.append( - (feature_map["inputs"], feature_map["targets"], tf.constant(n), - tf.constant(hparams.problems[n].input_space_id), - tf.constant(hparams.problems[n].target_space_id))) - - # We choose which problem to process. - loss_moving_avgs = [] # Need loss moving averages for that. - for n in xrange(problem_count): - with tf.variable_scope("losses_avg"): - loss_moving_avgs.append( - tf.get_variable( - "problem_%d/total_loss" % n, initializer=100.0, - trainable=False)) - tf.get_variable( - "problem_%d/training_loss" % n, initializer=100.0, trainable=False) - tf.get_variable( - "problem_%d/extra_loss" % n, initializer=100.0, trainable=False) - if fixed_problem is None: - if (hparams.problem_choice == "uniform" or - mode != tf.contrib.learn.ModeKeys.TRAIN): - problem_choice = tf.random_uniform( - [], maxval=problem_count, dtype=tf.int32) - elif hparams.problem_choice == "adaptive": - loss_moving_avgs = tf.stack(loss_moving_avgs) - problem_choice = tf.multinomial( - tf.reshape(loss_moving_avgs, [1, -1]), 1) - problem_choice = tf.to_int32(tf.squeeze(problem_choice)) - elif hparams.problem_choice == "distributed": - assert FLAGS.worker_replicas >= problem_count - assert FLAGS.worker_replicas % problem_count == 0 - problem_choice = tf.to_int32(FLAGS.worker_id % problem_count) - else: - raise ValueError("Value of hparams.problem_choice is %s and must be " - "one of [uniform, adaptive, distributed]", - hparams.problem_choice) - - # Inputs and targets conditional on problem_choice. - rand_inputs, rand_target, choice, inp_id, tgt_id = _cond_on_index( - lambda n: batches[n], problem_choice, 0, problem_count - 1) - else: - problem_choice = tf.constant(fixed_problem) - # Take the only constructed batch, which is the fixed_problem. - rand_inputs, rand_target, choice, inp_id, tgt_id = batches[0] - - # Set shapes so the ranks are clear. - rand_inputs.set_shape([None, None, None, None]) - rand_target.set_shape([None, None, None, None]) - choice.set_shape([]) - inp_id.set_shape([]) - tgt_id.set_shape([]) - # Forced shape obfuscation is necessary for inference. - if mode == tf.contrib.learn.ModeKeys.INFER: - rand_inputs._shape = tf.TensorShape([None, None, None, None]) # pylint: disable=protected-access - rand_target._shape = tf.TensorShape([None, None, None, None]) # pylint: disable=protected-access - - # Final feature map. - rand_feature_map = { - "inputs": rand_inputs, - "problem_choice": choice, - "input_space_id": inp_id, - "target_space_id": tgt_id - } - if mode == tf.contrib.learn.ModeKeys.INFER: - rand_feature_map["infer_targets"] = rand_target - rand_target = None - return rand_feature_map, rand_target - - return input_fn - - -class _ConditionalOptimizer(tf.train.Optimizer): - """Conditional optimizer.""" - - def __init__(self, optimizer_name, lr, hparams, skip_condition_tensor=False): - self._skip_condition = skip_condition_tensor - if optimizer_name == "Adam": - # We change the default epsilon for Adam and re-scale lr. - # Using LazyAdam as it's much faster for large vocabulary embeddings. - self._opt = tf.contrib.opt.LazyAdamOptimizer( - lr / 500.0, - beta1=hparams.optimizer_adam_beta1, - beta2=hparams.optimizer_adam_beta2, - epsilon=hparams.optimizer_adam_epsilon) - elif optimizer_name == "Momentum": - self._opt = tf.train.MomentumOptimizer( - lr, momentum=hparams.optimizer_momentum_momentum) - else: - self._opt = tf.contrib.layers.OPTIMIZER_CLS_NAMES[optimizer_name](lr) - - def compute_gradients(self, loss, var_list, colocate_gradients_with_ops): - return self._opt.compute_gradients( - loss, var_list, colocate_gradients_with_ops=colocate_gradients_with_ops) - - def apply_gradients(self, gradients, global_step=None, name=None): - - def opt_gradients(): - return self._opt.apply_gradients( - gradients, global_step=global_step, name=name) - - if self._skip_condition is False: - return opt_gradients() - return tf.cond( - self._skip_condition, - tf.no_op, - opt_gradients, - name="conditional_optimizer_gradients_skip_cond") - - -def _sqrt_decay(step): - """Decay like 1 / sqrt(step), multiplied by 500 to normalize.""" - return 500.0 / tf.sqrt(tf.maximum(step, 1.0)) - - -def _exp_decay_after(step, rate, from_which_step): - """Decay exponentially by rate (per step) starting at from_which_step.""" - return tf.cond( - step < from_which_step, - lambda: tf.constant(1.0), - lambda: rate**(step - from_which_step), - name="exponential_decay_step_cond") - - -def _ps_replicas(all_workers=False): - if all_workers: - return list(range(FLAGS.ps_replicas)) - # Worker K will be using replicas {0,...n-1} + K*n if we have n replicas. - num_replicas = FLAGS.ps_replicas // FLAGS.worker_replicas - return [d + FLAGS.worker_id * num_replicas for d in xrange(num_replicas)] - - -def _gpu_order(num_gpus): - if FLAGS.gpu_order: - ret = [int(s) for s in FLAGS.gpu_order.split(" ")] - if len(ret) == num_gpus: - return ret - return list(range(num_gpus)) - - -def _ps_gpus(all_workers=False): - ps_gpus = [] - for d in _ps_replicas(all_workers=all_workers): - ps_gpus.extend([(d, gpu) for gpu in _gpu_order(FLAGS.ps_gpu)]) - return ps_gpus - - -def _ps_devices(all_workers=False): - """List of ps devices (where to put the experts). - - Args: - all_workers: whether the list is for all async workers or just this one. - - Returns: - a list of device names - """ - if FLAGS.ps_replicas > 0: - if FLAGS.ps_gpu > 0: - return [ - FLAGS.ps_job + "/task:%d/GPU:%d" % (d, gpu) - for (d, gpu) in _ps_gpus(all_workers=all_workers) - ] - else: - return [ - FLAGS.ps_job + "/task:%d" % d - for d in _ps_replicas(all_workers=all_workers) - ] - else: - if FLAGS.worker_gpu > 0: - return ["gpu:%d" % d for d in _gpu_order(FLAGS.worker_gpu)] - else: - return [""] - - -def data_parallelism(all_workers=False): - """Over which devices do we split each training batch. - - In old-fashioned async mode, we split the batch over all GPUs on the - current worker. - - In sync mode, we split the batch over all the parameter server GPUs. - - This function returns an expert_utils.Parallelism object, which can be used - to build the model. It is configured in a way that any variables created - by `tf.get_variable` will be assigned to the parameter servers and shared - between datashards. - - Args: - all_workers: whether the devices are all async workers or just this one. - - Returns: - a expert_utils.Parallelism. - """ - - def _replica_device_setter(worker_device): - if FLAGS.ps_replicas == 0: - return worker_device - return tf.train.replica_device_setter( - worker_device=worker_device, - ps_tasks=FLAGS.ps_replicas, - ps_device=FLAGS.ps_job + "/GPU:0" if FLAGS.ps_gpu > 0 else FLAGS.ps_job) - - if FLAGS.schedule == "local_run": - assert not FLAGS.sync - datashard_devices = ["gpu:%d" % d for d in _gpu_order(FLAGS.worker_gpu)] - caching_devices = None - elif FLAGS.sync: - assert FLAGS.ps_replicas > 0 - datashard_devices = [ - _replica_device_setter(d) for d in _ps_devices(all_workers=all_workers) - ] - if FLAGS.ps_gpu > 0 and FLAGS.ps_replicas > 1: - caching_devices = [ - FLAGS.ps_job + "/task:%d/cpu:0" % d - for (d, _) in _ps_gpus(all_workers=all_workers) - ] - else: - caching_devices = None - else: - # old fashioned async - compute on worker - if FLAGS.worker_gpu > 1: - datashard_devices = [ - _replica_device_setter(FLAGS.worker_job + "/GPU:%d" % d) - for d in _gpu_order(FLAGS.worker_gpu) - ] - caching_devices = [FLAGS.worker_job + "/GPU:0"] * FLAGS.worker_gpu - else: - datashard_devices = [_replica_device_setter(FLAGS.worker_job)] - caching_devices = None - tf.logging.info("datashard_devices: %s", datashard_devices) - tf.logging.info("caching_devices: %s", caching_devices) - return eu.Parallelism( - datashard_devices, - reuse=True, - caching_devices=caching_devices, - daisy_chain_variables=FLAGS.daisy_chain_variables) diff --git a/tensor2tensor/utils/trainer_utils_test.py b/tensor2tensor/utils/trainer_utils_test.py deleted file mode 100644 index fd1c6885c..000000000 --- a/tensor2tensor/utils/trainer_utils_test.py +++ /dev/null @@ -1,70 +0,0 @@ -# Copyright 2017 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Tests for trainer_utils.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# Dependency imports - -from tensor2tensor.data_generators import algorithmic -from tensor2tensor.data_generators import generator_utils -from tensor2tensor.utils import registry -from tensor2tensor.utils import trainer_utils as utils # pylint: disable=unused-import - -import tensorflow as tf - -FLAGS = tf.flags.FLAGS - - -class TrainerUtilsTest(tf.test.TestCase): - - @classmethod - def setUpClass(cls): - # Generate a small test dataset - FLAGS.problems = "algorithmic_addition_binary40" - TrainerUtilsTest.data_dir = tf.test.get_temp_dir() - gen = algorithmic.identity_generator(2, 10, 300) - generator_utils.generate_files(gen, FLAGS.problems + "-train", - TrainerUtilsTest.data_dir, 1, 100) - generator_utils.generate_files(gen, FLAGS.problems + "-dev", - TrainerUtilsTest.data_dir, 1, 100) - - def testModelsImported(self): - models = registry.list_models() - self.assertTrue("baseline_lstm_seq2seq" in models) - - def testHParamsImported(self): - hparams = registry.list_hparams() - self.assertTrue("transformer_base" in hparams) - - def testSingleStep(self): - model_name = "transformer" - FLAGS.hparams_set = "transformer_base" - # Shrink the test model down - FLAGS.hparams = ("batch_size=10,hidden_size=10,num_heads=2,max_length=16," - "num_hidden_layers=1") - exp = utils.create_experiment( - output_dir=tf.test.get_temp_dir(), - data_dir=TrainerUtilsTest.data_dir, - model_name=model_name, - train_steps=1, - eval_steps=1) - exp.test() - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensor2tensor/utils/update_ops_hook.py b/tensor2tensor/utils/update_ops_hook.py new file mode 100644 index 000000000..849216fe0 --- /dev/null +++ b/tensor2tensor/utils/update_ops_hook.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Hook to run tf.GraphKeys.UPDATE_OPS.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow.compat.v1 as tf + + +class UpdateOpsHook(tf.train.SessionRunHook): + """Hook to run assign_ops.""" + + def before_run(self, run_context): + del run_context + update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) + return tf.train.SessionRunArgs(update_ops) diff --git a/tensor2tensor/utils/usr_dir.py b/tensor2tensor/utils/usr_dir.py new file mode 100644 index 000000000..b7a54ebcd --- /dev/null +++ b/tensor2tensor/utils/usr_dir.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utility to load code from an external user-supplied directory.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import importlib +import os +import sys +import tensorflow.compat.v1 as tf + + +INTERNAL_USR_DIR_PACKAGE = "t2t_usr_dir_internal" + + +def import_usr_dir(usr_dir): + """Import module at usr_dir, if provided.""" + if not usr_dir: + return + if usr_dir == INTERNAL_USR_DIR_PACKAGE: + # The package has been installed with pip under this name for Cloud ML + # Engine so just import it. + importlib.import_module(INTERNAL_USR_DIR_PACKAGE) + return + + dir_path = os.path.abspath(os.path.expanduser(usr_dir).rstrip("/")) + containing_dir, module_name = os.path.split(dir_path) + tf.logging.info("Importing user module %s from path %s", module_name, + containing_dir) + sys.path.insert(0, containing_dir) + importlib.import_module(module_name) + sys.path.pop(0) diff --git a/tensor2tensor/utils/video/prediction2gif.py b/tensor2tensor/utils/video/prediction2gif.py new file mode 100644 index 000000000..f28674b4c --- /dev/null +++ b/tensor2tensor/utils/video/prediction2gif.py @@ -0,0 +1,200 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Generates gifs out of a video checkpoint. + + Usage: + prediction2gif \ + --problem="gym_pong_deterministic-v4_random" \ + --model="next_frame_sv2p" \ + --hparams_set="next_frame_sv2p" \ + --output_dir=$CHECKPOINT_DIRECTORY \ + --data_dir=$DATA_DIRECTORY \ + --output_gif=$USER/out.gif \ + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +import matplotlib as mpl +import numpy as np +from queue import Queue + +from tensor2tensor.bin import t2t_trainer # pylint: disable=unused-import +from tensor2tensor.layers import common_video +from tensor2tensor.utils import registry +from tensor2tensor.utils import trainer_lib +from tensor2tensor.utils import usr_dir + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +mpl.use("Agg") +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_integer("num_steps", 100, "Number of prediction steps.") +flags.DEFINE_integer("fps", 10, "Generated gif FPS.") +flags.DEFINE_string("output_gif", None, "Output path to save the gif.") + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + trainer_lib.set_random_seed(FLAGS.random_seed) + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + + # Create hparams + hparams = trainer_lib.create_hparams( + FLAGS.hparams_set, + FLAGS.hparams, + data_dir=os.path.expanduser(FLAGS.data_dir), + problem_name=FLAGS.problem) + hparams.force_full_predict = True + hparams.scheduled_sampling_k = -1 + + # Params + num_agents = 1 # TODO(mbz): fix the code for more agents + num_steps = FLAGS.num_steps + if hasattr(hparams.problem, "num_actions"): + num_actions = hparams.problem.num_actions + else: + num_actions = None + frame_shape = hparams.problem.frame_shape + resized_frame = hparams.preprocess_resize_frames is not None + if resized_frame: + frame_shape = hparams.preprocess_resize_frames + frame_shape += [hparams.problem.num_channels] + + dataset = registry.problem(FLAGS.problem).dataset( + tf_estimator.ModeKeys.TRAIN, + shuffle_files=True, + data_dir=os.path.expanduser(FLAGS.data_dir), + hparams=hparams) + + dataset = dataset.batch(num_agents, drop_remainder=True) + data = dataset.make_one_shot_iterator().get_next() + # Setup input placeholders + input_size = [num_agents, hparams.video_num_input_frames] + if num_actions is None: + placeholders = { + "inputs": tf.placeholder(tf.float32, input_size + frame_shape) + } + else: + placeholders = { + "inputs": tf.placeholder(tf.float32, input_size + frame_shape), + "input_action": tf.placeholder(tf.int64, input_size + [1]), + "input_reward": tf.placeholder(tf.int64, input_size + [1]), + "reset_internal_states": tf.placeholder(tf.float32, []), + } + # Create model. + model_cls = registry.model(FLAGS.model) + model = model_cls(hparams, tf_estimator.ModeKeys.PREDICT) + prediction_ops = model.infer(placeholders) + + states_q = Queue(maxsize=hparams.video_num_input_frames) + actions_q = Queue(maxsize=hparams.video_num_input_frames) + rewards_q = Queue(maxsize=hparams.video_num_input_frames) + if num_actions is not None: + all_qs = [states_q, actions_q, rewards_q] + else: + all_qs = [states_q] + + writer = common_video.WholeVideoWriter( + fps=FLAGS.fps, output_path=FLAGS.output_gif) + + saver = tf.train.Saver(tf.trainable_variables()) + with tf.train.SingularMonitoredSession() as sess: + # Load latest checkpoint + ckpt = tf.train.get_checkpoint_state(FLAGS.output_dir).model_checkpoint_path + saver.restore(sess.raw_session(), ckpt) + + # get init frames from the dataset + data_np = sess.run(data) + + frames = np.split(data_np["inputs"], hparams.video_num_input_frames, 1) + for frame in frames: + frame = np.squeeze(frame, 1) + states_q.put(frame) + writer.write(frame[0].astype(np.uint8)) + + if num_actions is not None: + actions = np.split(data_np["input_action"], + hparams.video_num_input_frames, 1) + for action in actions: + actions_q.put(np.squeeze(action, 1)) + + rewards = np.split(data_np["input_reward"], + hparams.video_num_input_frames, 1) + for reward in rewards: + rewards_q.put(np.squeeze(reward, 1)) + + for step in range(num_steps): + print(">>>>>>> ", step) + + if num_actions is not None: + random_actions = np.random.randint(num_actions-1) + random_actions = np.expand_dims(random_actions, 0) + random_actions = np.tile(random_actions, (num_agents, 1)) + + # Shape inputs and targets + inputs, input_action, input_reward = ( + np.stack(list(q.queue), axis=1) for q in all_qs) + else: + assert len(all_qs) == 1 + q = all_qs[0] + elems = list(q.queue) + # Need to adjust shapes sometimes. + for i, e in enumerate(elems): + if len(e.shape) < 4: + elems[i] = np.expand_dims(e, axis=0) + inputs = np.stack(elems, axis=1) + + # Predict next frames + if num_actions is None: + feed = {placeholders["inputs"]: inputs} + else: + feed = { + placeholders["inputs"]: inputs, + placeholders["input_action"]: input_action, + placeholders["input_reward"]: input_reward, + placeholders["reset_internal_states"]: float(step == 0), + } + predictions = sess.run(prediction_ops, feed_dict=feed) + + if num_actions is None: + predicted_states = predictions[:, 0] + else: + predicted_states = predictions["targets"][:, 0] + predicted_reward = predictions["target_reward"][:, 0] + + # Update queues + if num_actions is None: + new_data = (predicted_states) + else: + new_data = (predicted_states, random_actions, predicted_reward) + for q, d in zip(all_qs, new_data): + q.get() + q.put(d.copy()) + + writer.write(np.round(predicted_states[0]).astype(np.uint8)) + + writer.finish_to_disk() + +if __name__ == "__main__": + tf.app.run() diff --git a/tensor2tensor/utils/video/reward_confusion.py b/tensor2tensor/utils/video/reward_confusion.py new file mode 100644 index 000000000..2b01f08c8 --- /dev/null +++ b/tensor2tensor/utils/video/reward_confusion.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""Computes the reward prediction confusion matrix given checkpoints and data. + + Usage: + reward_confusion \ + --problem="gym_pong_deterministic-v4_random" \ + --model="next_frame_sv2p" \ + --hparams_set="next_frame_sv2p" \ + --output_dir=$CHECKPOINT_DIRECTORY \ + --data_dir=$DATA_DIRECTORY \ + +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from tensor2tensor.bin.t2t_decoder import create_hparams +from tensor2tensor.data_generators import problem # pylint: disable=unused-import +from tensor2tensor.utils import registry +from tensor2tensor.utils import trainer_lib +from tensor2tensor.utils import usr_dir + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +flags = tf.flags +FLAGS = flags.FLAGS + + +def print_confusion_matrix(title, cm): + print("=" * 30) + print(title) + print("=" * 30) + print(cm) + print("=" * 30) + print() + + +def main(_): + tf.logging.set_verbosity(tf.logging.INFO) + trainer_lib.set_random_seed(FLAGS.random_seed) + usr_dir.import_usr_dir(FLAGS.t2t_usr_dir) + + # Create hparams + hparams = create_hparams() + hparams.force_full_predict = True + batch_size = hparams.batch_size + + # Iterating over dev/test partition of the data. + # Change the data partition if necessary. + dataset = registry.problem(FLAGS.problem).dataset( + tf_estimator.ModeKeys.PREDICT, + shuffle_files=False, + hparams=hparams) + + dataset = dataset.batch(batch_size, drop_remainder=True) + data = dataset.make_one_shot_iterator().get_next() + input_data = dict((k, data[k]) for k in data.keys() if k.startswith("input")) + + # Creat model + model_cls = registry.model(FLAGS.model) + model = model_cls(hparams, tf_estimator.ModeKeys.PREDICT) + prediction_ops = model.infer(input_data) + + # Confusion Matrix + nr = hparams.problem.num_rewards + cm_per_frame = np.zeros((nr, nr), dtype=np.uint64) + cm_next_frame = np.zeros((nr, nr), dtype=np.uint64) + + saver = tf.train.Saver() + with tf.train.SingularMonitoredSession() as sess: + # Load latest checkpoint + ckpt = tf.train.get_checkpoint_state(FLAGS.output_dir).model_checkpoint_path + saver.restore(sess.raw_session(), ckpt) + + counter = 0 + while not sess.should_stop(): + counter += 1 + if counter % 1 == 0: + print(counter) + + # Predict next frames + rew_pd, rew_gt = sess.run( + [prediction_ops["target_reward"], data["target_reward"]]) + + for i in range(batch_size): + cm_next_frame[rew_gt[i, 0, 0], rew_pd[i, 0, 0]] += 1 + for gt, pd in zip(rew_gt[i], rew_pd[i]): + cm_per_frame[gt, pd] += 1 + + print_confusion_matrix("Per-frame Confusion Matrix", cm_per_frame) + print_confusion_matrix("Next-frame Confusion Matrix", cm_next_frame) + +if __name__ == "__main__": + tf.app.run() diff --git a/tensor2tensor/utils/video2gif.py b/tensor2tensor/utils/video2gif.py new file mode 100644 index 000000000..afbf694d2 --- /dev/null +++ b/tensor2tensor/utils/video2gif.py @@ -0,0 +1,94 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +r"""View the problem. + +This binary saves the videos in the problem(dataset) into gifs. + +The imagemagick package should be installed for conversion to gifs. + +Example usage to view dataset: + + video2gif \ + --data_dir ~/data \ + --problem=gym_water_world_random5k \ + --hparams_set=next_frame_stochastic \ + --output_dir /usr/local/google/home/mbz/t2t_train/ww/ \ + --data_dir /usr/local/google/home/mbz/temp/ \ + --num_samples 10 +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys +import numpy as np +from tensor2tensor.bin import t2t_trainer # pylint: disable=unused-import +from tensor2tensor.data_generators import problem # pylint: disable=unused-import +from tensor2tensor.utils import decoding +from tensor2tensor.utils import trainer_lib + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +flags = tf.flags +FLAGS = flags.FLAGS + +flags.DEFINE_integer("num_samples", -1, "Number of saved samples.") + + +def create_gif(name): + cmd = "convert -delay 15 {0}* {0}.gif".format(name) + os.system(cmd) + + +def main(_): + problem_name = FLAGS.problem + if "video" not in problem_name and "gym" not in problem_name: + print("This tool only works for video problems.") + return + + mode = tf_estimator.ModeKeys.TRAIN + hparams = trainer_lib.create_hparams( + FLAGS.hparams_set, + FLAGS.hparams, + data_dir=os.path.expanduser(FLAGS.data_dir), + problem_name=problem_name) + + dataset = hparams.problem.input_fn(mode, hparams) + features = dataset.make_one_shot_iterator().get_next() + + tf.gfile.MakeDirs(FLAGS.output_dir) + base_template = os.path.join(FLAGS.output_dir, FLAGS.problem) + count = 0 + with tf.train.MonitoredTrainingSession() as sess: + while not sess.should_stop(): + # TODO(mbz): figure out what the second output is. + data, _ = sess.run(features) + video_batch = np.concatenate((data["inputs"], data["targets"]), axis=1) + + for video in video_batch: + print("Saving {}/{}".format(count, FLAGS.num_samples)) + name = "%s_%05d" % (base_template, count) + decoding.save_video(video, name + "_{:05d}.png") + create_gif(name) + count += 1 + + if count == FLAGS.num_samples: + sys.exit(0) + +if __name__ == "__main__": + tf.app.run() diff --git a/tensor2tensor/utils/video_metrics.py b/tensor2tensor/utils/video_metrics.py new file mode 100644 index 000000000..b22474c97 --- /dev/null +++ b/tensor2tensor/utils/video_metrics.py @@ -0,0 +1,294 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Computes the metrics for video prediction and generation.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import numpy as np +import six + + +import tensorflow.compat.v1 as tf + + +def load_image_map_function(filename, frame_shape): + image = tf.read_file(filename) + image = tf.image.decode_png(image) + image = tf.image.resize_images(image, frame_shape[0:2]) + image.set_shape(frame_shape) + return image + + +def load_videos(template, video_length, frame_shape): + """Loads videos from files. + + Args: + template: template string for listing the image files. + video_length: length of the video. + frame_shape: shape of each frame. + + Returns: + dataset: the tf dataset frame by frame. + dataset_len: number of the items which is the number of image files. + + Raises: + ValueError: if no files found. + """ + filenames = tf.gfile.Glob(template) + if not filenames: + raise ValueError("no files found.") + filenames = sorted(filenames) + dataset_len = len(filenames) + filenames = tf.constant(filenames) + dataset = tf.data.Dataset.from_tensor_slices(filenames) + dataset = dataset.apply(tf.data.experimental.map_and_batch( + lambda filename: load_image_map_function(filename, frame_shape), + video_length, drop_remainder=True)) + return dataset, dataset_len + + +def file_pattern(output_dir, problem_name, prefix): + return os.path.join(output_dir, "{}_{}*.png".format(problem_name, prefix)) + + +def get_target_and_output_filepatterns(output_dir, problem_name): + return (file_pattern(output_dir, problem_name, "outputs"), + file_pattern(output_dir, problem_name, "targets")) + + +def get_zipped_dataset_from_png_files( + output_files, target_files, video_length, frame_shape): + outputs, len_ = load_videos(output_files, video_length, frame_shape) + targets, len_ = load_videos(target_files, video_length, frame_shape) + zipped_dataset = tf.data.Dataset.zip((outputs, targets)) + num_videos = len_ // video_length + iterator = zipped_dataset.make_one_shot_iterator() + return iterator, None, num_videos + + +def save_results(results, output_dir, problem_name): + for name, array in six.iteritems(results): + output_filename = "{}_{}.npy".format(problem_name, name) + output_filename = os.path.join(output_dir, output_filename) + with tf.gfile.Open(output_filename, "wb") as fname: + np.save(fname, array) + + +def psnr_and_ssim(output, target): + """Compute the PSNR and SSIM. + + Args: + output: 4-D Tensor, shape=(num_frames, height, width, num_channels) + target: 4-D Tensor, shape=(num_frames, height, width, num_channels) + Returns: + psnr: 1-D Tensor, shape=(num_frames,) + ssim: 1-D Tensor, shape=(num_frames,) + """ + output = tf.cast(output, dtype=tf.int32) + target = tf.cast(target, dtype=tf.int32) + psnr = tf.image.psnr(output, target, max_val=255) + ssim = tf.image.ssim(output, target, max_val=255) + return psnr, ssim + + +def stack_data_given_key(predictions, key): + x = [p[key] for p in predictions] + x = np.stack(x, axis=0) + return x + + +def get_zipped_dataset_from_predictions(predictions): + """Creates dataset from in-memory predictions.""" + targets = stack_data_given_key(predictions, "targets") + outputs = stack_data_given_key(predictions, "outputs") + num_videos, num_steps = targets.shape[:2] + + # Truncate output time-steps to match target time-steps + outputs = outputs[:, :num_steps] + + targets_placeholder = tf.placeholder(targets.dtype, targets.shape) + outputs_placeholder = tf.placeholder(outputs.dtype, outputs.shape) + dataset = tf.data.Dataset.from_tensor_slices( + (targets_placeholder, outputs_placeholder)) + iterator = dataset.make_initializable_iterator() + feed_dict = {targets_placeholder: targets, + outputs_placeholder: outputs} + return iterator, feed_dict, num_videos + + +def compute_one_decoding_video_metrics(iterator, feed_dict, num_videos): + """Computes the average of all the metric for one decoding. + + Args: + iterator: dataset iterator. + feed_dict: feed dict to initialize iterator. + num_videos: number of videos. + + Returns: + all_psnr: 2-D Numpy array, shape=(num_samples, num_frames) + all_ssim: 2-D Numpy array, shape=(num_samples, num_frames) + """ + output, target = iterator.get_next() + metrics = psnr_and_ssim(output, target) + + with tf.Session() as sess: + sess.run(tf.local_variables_initializer()) + initalizer = iterator._initializer # pylint: disable=protected-access + if initalizer is not None: + sess.run(initalizer, feed_dict=feed_dict) + + all_psnr, all_ssim = [], [] + for i in range(num_videos): + print("Computing video: %d" % i) + psnr_np, ssim_np = sess.run(metrics) + all_psnr.append(psnr_np) + all_ssim.append(ssim_np) + all_psnr = np.array(all_psnr) + all_ssim = np.array(all_ssim) + return all_psnr, all_ssim + + +def reduce_to_best_decode(metrics, reduce_func): + """Extracts the best-decode from the metrics according to reduce_func. + + Args: + metrics: 3-D numpy array, shape=(num_decodes, num_samples, num_frames) + reduce_func: callable, np.argmax or np.argmin. + Returns: + best_metrics: 2-D numpy array, shape=(num_samples, num_frames). + best_decode_ind: 1-D numpy array, shape=(num_samples,) + """ + num_videos = metrics.shape[1] + # Take mean of the metric across the frames to approximate the video + # closest to the ground truth. + mean_across_frames = np.mean(metrics, axis=-1) + + # For every sample, use the decode that has a maximum mean-metric. + best_decode_ind = reduce_func(mean_across_frames, axis=0) + best_metrics = metrics[best_decode_ind, np.arange(num_videos), :] + return best_metrics, best_decode_ind + + +def compute_all_metrics_statistics(all_results): + """Computes statistics of metrics across multiple decodings. + + Args: + all_results: dict of 3-D numpy arrays. + Each array has shape=(num_decodes, num_samples, num_frames). + Returns: + statistics: dict of 1-D numpy arrays, shape=(num_frames). + First the statistic (max/mean/std) is computed across the + decodes, then the mean is taken across num_samples. + decode_inds: dict of 1-D numpy arrays, shape=(num_samples,) + Each element represents the index of the decode corresponding + to the best statistic. + """ + statistics = {} + decode_inds = {} + all_metrics = all_results.keys() + + for key in all_metrics: + values = all_results[key] + statistics[key + "_MEAN"] = np.mean(values, axis=0) + statistics[key + "_STD"] = np.std(values, axis=0) + min_stats, min_decode_ind = reduce_to_best_decode(values, np.argmin) + statistics[key + "_MIN"] = min_stats + decode_inds[key + "_MIN_DECODE"] = min_decode_ind + max_stats, max_decode_ind = reduce_to_best_decode(values, np.argmax) + statistics[key + "_MAX"] = max_stats + decode_inds[key + "_MAX_DECODE"] = max_decode_ind + + # Computes mean of each statistic across the dataset. + for key in statistics: + statistics[key] = np.mean(statistics[key], axis=0) + return statistics, decode_inds + + +def compute_video_metrics_from_predictions(predictions, decode_hparams): + """Computes metrics from predictions. + + Args: + predictions: list of list of dicts. + outer length: num_decodes, inner_length: num_samples + decode_hparams: Decode hparams. instance of HParams. + Returns: + statistics: dict of Tensors, key being the metric with each Tensor + having the shape (num_samples, num_frames). + """ + all_results = {} + + + ssim_all_decodes, psnr_all_decodes = [], [] + for single_decode in predictions: + args = get_zipped_dataset_from_predictions(single_decode) + psnr_single, ssim_single = compute_one_decoding_video_metrics(*args) + psnr_all_decodes.append(psnr_single) + ssim_all_decodes.append(ssim_single) + psnr_all_decodes = np.array(psnr_all_decodes) + ssim_all_decodes = np.array(ssim_all_decodes) + all_results.update({"PSNR": psnr_all_decodes, "SSIM": ssim_all_decodes}) + return compute_all_metrics_statistics(all_results) + + +def compute_video_metrics_from_png_files( + output_dirs, problem_name, video_length, frame_shape): + """Computes the average of all the metric for one decoding. + + This function assumes that all the predicted and target frames + have been saved on the disk and sorting them by name will result + to consecutive frames saved in order. + + Args: + output_dirs: directory with all the saved frames. + problem_name: prefix of the saved frames usually name of the problem. + video_length: length of the videos. + frame_shape: shape of each frame in HxWxC format. + + Returns: + Dictionary which contains the average of each metric per frame. + """ + ssim_all_decodes, psnr_all_decodes = [], [] + for output_dir in output_dirs: + output_files, target_files = get_target_and_output_filepatterns( + output_dir, problem_name) + args = get_zipped_dataset_from_png_files( + output_files, target_files, video_length, frame_shape) + psnr_single, ssim_single = compute_one_decoding_video_metrics(*args) + psnr_all_decodes.append(psnr_single) + ssim_all_decodes.append(ssim_single) + + psnr_all_decodes = np.array(psnr_all_decodes) + ssim_all_decodes = np.array(ssim_all_decodes) + all_results = {"PSNR": psnr_all_decodes, "SSIM": ssim_all_decodes} + return compute_all_metrics_statistics(all_results) + + +def compute_and_save_video_metrics( + output_dirs, problem_name, video_length, frame_shape): + """Compute and saves the video metrics.""" + statistics, all_results = compute_video_metrics_from_png_files( + output_dirs, problem_name, video_length, frame_shape) + for results, output_dir in zip(all_results, output_dirs): + save_results(results, output_dir, problem_name) + + parent_dir = os.path.join(output_dirs[0], os.pardir) + final_dir = os.path.join(parent_dir, "decode") + tf.gfile.MakeDirs(parent_dir) + + save_results(statistics, final_dir, problem_name) diff --git a/tensor2tensor/utils/video_metrics_test.py b/tensor2tensor/utils/video_metrics_test.py new file mode 100644 index 000000000..a7619d486 --- /dev/null +++ b/tensor2tensor/utils/video_metrics_test.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""video metrics test.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +from tensor2tensor.utils import video_metrics +import tensorflow.compat.v1 as tf + + +class VideoMetricsTest(tf.test.TestCase): + + def test_reduce_to_best_decode(self): + # num_decodes=2, num_samples=3, num_frames=4 + decode1 = [ + [30.0, 32.0, 33.0, 34.0], + [22.0, 19.0, 12.0, 13.0], + [30.0, 10.0, 30.0, 10.0]] + decode2 = [ + [22.0, 19.0, 12.0, 13.0], + [30.0, 32.0, 33.0, 34.0], + [25.0, 25.0, 25.0, 25.0]] + all_decodes = [decode1, decode2] + all_decodes = np.array(all_decodes) + best_decode, best_decode_ind = video_metrics.reduce_to_best_decode( + all_decodes, np.argmax) + worst_decode, worst_decode_ind = video_metrics.reduce_to_best_decode( + all_decodes, np.argmin) + exp_best_decode = [ + [30.0, 32.0, 33.0, 34.0], + [30.0, 32.0, 33.0, 34.0], + [25.0, 25.0, 25.0, 25.0]] + exp_worst_decode = [ + [22.0, 19.0, 12.0, 13.0], + [22.0, 19.0, 12.0, 13.0], + [30.0, 10.0, 30.0, 10.0]] + self.assertTrue(np.allclose(best_decode, exp_best_decode)) + self.assertTrue(np.allclose(worst_decode, exp_worst_decode)) + self.assertTrue(np.allclose(best_decode_ind, [0, 1, 1])) + self.assertTrue(np.allclose(worst_decode_ind, [1, 0, 0])) + + +if __name__ == '__main__': + tf.test.main() diff --git a/tensor2tensor/utils/yellowfin.py b/tensor2tensor/utils/yellowfin.py new file mode 100644 index 000000000..d89641ec1 --- /dev/null +++ b/tensor2tensor/utils/yellowfin.py @@ -0,0 +1,642 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""YellowFin for TensorFlow. Thanks Jian Zhang: zjian [@] stanford [.] edu.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import tensorflow.compat.v1 as tf + + +# Values for gate_gradients. +GATE_NONE = tf.train.Optimizer.GATE_NONE +GATE_OP = tf.train.Optimizer.GATE_OP +GATE_GRAPH = tf.train.Optimizer.GATE_GRAPH + + +class YellowFinOptimizer(object): + """Optimizer that implements the YellowFin algorithm. + + See [Zhang et. al., 2017](https://arxiv.org/abs/1706.03471) for details. + """ + + def __init__(self, + learning_rate=1.0, + momentum=0.0, + clip_thresh=None, + beta=0.999, + curvature_window_width=20, + zero_debias=True, + delta_mu=0.0, + sparsity_debias=True, + use_locking=False, + name="YellowFin", + use_nesterov=False): + """Construct a new YellowFin optimizer. + + Implemented as a wrapper around tf.train.MomentumOptimizer + + Args: + learning_rate: A Tensor or a floating point value. The learning rate. + Set to 1.0 in the paper. + momentum: A Tensor or a floating point value. The momentum. + Set to 0.0 in the paper. + clip_thresh: A Tensor or a floating point value. The clipping threshold + for `tf.clip_by_global_norm`. If None, no clipping will be carried out. + beta: A float value or a constant float tensor. The smoothing parameter + for estimations. + curvature_window_width: A int value or a constant int tensor. + The curvature window width. + zero_debias: A boolean, zero debias moving-averages. + delta_mu: For extensions. Not necessary in the basic use. + sparsity_debias: A boolean. Gradient norm and curvature are + biased to larger values when calculated with sparse gradient. + This is useful when the model is very sparse, e.g. LSTM with + word embedding. For non-sparse CNN, turning it off could + slightly accelerate the speed. + use_locking: If True, use locks for update operations. + name: Optional name prefix for the operations created when + applying gradients. Defaults to "YellowFin". + use_nesterov: If True, the underlying MomentumOptimizer uses Nesterov + Momentum. Set to False in the default YellowFin algorithm. + + Note: + clip_thresh is the threshold value on ||lr * gradient||, + delta_mu can be place holder/variable/tensor scalar. + They are used for additional momentum in situations such as + asynchronous-parallel training. + The default is 0.0(or None) for basic usage of the optimizer. + + Other features: + If you want to manually control the learning rates, self.lr_factor is + an interface to the outside, it is an multiplier for the internal + learning rate in YellowFin. It is helpful when you want to do additional + hand tuning or some decaying scheme to the tuned learning rate in + YellowFin. + Example on using lr_factor can be found here: + https://github.com/JianGoForIt/YellowFin/blob/master/char-rnn-tensorflow/train_YF.py#L140 + """ + # Set lr and mu + self._lr = learning_rate + self._mu = momentum + + # Set lr and mu tensor. + self._lr_var = tf.get_variable("YF_lr", + dtype=tf.float32, + trainable=False, + initializer=learning_rate) + self._mu_var = tf.get_variable("YF_mu", + dtype=tf.float32, + trainable=False, + initializer=tf.constant(momentum)) + + # Tuning factor for learning rates step or decaying scheme. + self.lr_factor = tf.get_variable("YF_lr_factor", + dtype=tf.float32, + trainable=False, + initializer=tf.constant(1.0)) + + # Gradient Clipping Threshold. + if clip_thresh is not None: + self._clip_thresh_var = tf.get_variable( + "YF_clip_thresh", + dtype=tf.float32, + trainable=False, + initializer=tf.constant(clip_thresh)) + else: + self._clip_thresh_var = None + + # Set initial lr and mu for momentum. + self._lr_m = self._lr_var * self.lr_factor + self._mu_m = self._mu_var + delta_mu + + # Init momentum optimizer. + self._momentum_optimizer = tf.train.MomentumOptimizer( + self._lr_m, self._mu_m, use_locking, name, use_nesterov) + + # Moving average for statistics. + self._beta = beta + self._moving_averager = None + + # Step counting. + self._step = tf.get_variable("YF_step", + dtype=tf.int32, + trainable=False, + initializer=tf.constant(0)) + + # YF_step + 1 op. + self._increment_step_op = None + + # For conditional tuning. + self._do_tune = tf.greater(self._step, tf.constant(0)) + + # Moving-averages. + self._zero_debias = zero_debias + self._sparsity_debias = sparsity_debias + + # For curvature range. + self.curvature_window_width = curvature_window_width + self._curv_win = None + + # Gradients and Variables. + self._grad = None + self._vars = None + + # Get per var g**2, norm**2 and mean(norm**2). + self._grad_squared = None + self._grad_norm_squared = None + self._grad_norm_squared_avg = None + + # Mean(grad) and Mean(grad**2) to compute Variance. + self._grad_avg = None + self._grad_avg_squared = None + + # Max and Min curvature variations. + self._h_max_t = None + self._h_min_t = None + self._h_min = None + self._h_max = None + + # Gradient Expected Variance. + self._grad_var = None + + # Gradient Norm and Mean(Gradient Norm). + self._grad_norm = None + self._grad_norm_avg = None + + # Distance to optimum and Mean(Distance to optimum). + self._d_t = None + self._dist_to_opt_avg = None + + # Maintains moving averages of variables + # by employing an exponential decay(Beta), + # and (zero_devias) moving-averages. + self._moving_averager = None + + # Handling Sparse Matrix + self._sparsity = None + self._sparsity_avg = None + + def _curvature_range(self): + """Curvature range. + + Returns: + h_max_t, h_min_t ops + """ + self._curv_win = tf.get_variable("curv_win", + dtype=tf.float32, + trainable=False, + shape=[self.curvature_window_width,], + initializer=tf.zeros_initializer) + # We use log smoothing for curvature range + self._curv_win = tf.scatter_update(self._curv_win, + self._step % self.curvature_window_width, + tf.log(self._grad_norm_squared)) + # Note here the iterations start from iteration 0 + valid_window = tf.slice(self._curv_win, + tf.constant([0,]), + tf.expand_dims( + tf.minimum( + tf.constant(self.curvature_window_width), + self._step + 1), dim=0)) + self._h_min_t = tf.reduce_min(valid_window) + self._h_max_t = tf.reduce_max(valid_window) + + curv_range_ops = [] + with tf.control_dependencies([self._h_min_t, self._h_max_t]): + avg_op = self._moving_averager.apply([self._h_min_t, self._h_max_t]) + with tf.control_dependencies([avg_op]): + self._h_min = tf.exp( + tf.identity(self._moving_averager.average(self._h_min_t))) + self._h_max = tf.exp( + tf.identity(self._moving_averager.average(self._h_max_t))) + if self._sparsity_debias: + self._h_min *= self._sparsity_avg + self._h_max *= self._sparsity_avg + curv_range_ops.append(avg_op) + return curv_range_ops # h_max_t, h_min_t + + def _grad_variance(self): + """Estimate of gradient Variance. + + Returns: + C_t ops. + """ + grad_var_ops = [] + tensor_to_avg = [] + for t, g in zip(self._vars, self._grad): + if isinstance(g, tf.IndexedSlices): + tensor_to_avg.append( + tf.reshape(tf.unsorted_segment_sum(g.values, + g.indices, + g.dense_shape[0]), + shape=t.get_shape())) + else: + tensor_to_avg.append(g) + avg_op = self._moving_averager.apply(tensor_to_avg) + grad_var_ops.append(avg_op) + with tf.control_dependencies([avg_op]): + self._grad_avg = [self._moving_averager.average(val) + for val in tensor_to_avg] + self._grad_avg_squared = [tf.square(val) for val in self._grad_avg] + + # Compute Variance + self._grad_var = tf.maximum( + tf.constant(1e-6, dtype=self._grad_norm_squared_avg.dtype), + self._grad_norm_squared_avg + - tf.add_n([tf.reduce_sum(val) for val in self._grad_avg_squared])) + if self._sparsity_debias: + self._grad_var *= self._sparsity_avg + return grad_var_ops # C_t + + def _dist_to_opt(self): + """Distance to optimum. + + Returns: + D_t ops + """ + dist_to_opt_ops = [] + # Running average of the norm of gradient + self._grad_norm = tf.sqrt(self._grad_norm_squared) + avg_op = self._moving_averager.apply([self._grad_norm,]) + dist_to_opt_ops.append(avg_op) + with tf.control_dependencies([avg_op]): + self._grad_norm_avg = self._moving_averager.average(self._grad_norm) + # Single iteration distance estimation, note here + # self._grad_norm_avg is per variable + self._d_t = self._grad_norm_avg / self._grad_norm_squared_avg + # Running average of distance + avg_op = self._moving_averager.apply([self._d_t]) + dist_to_opt_ops.append(avg_op) + with tf.control_dependencies([avg_op]): + self._dist_to_opt_avg = tf.identity( + self._moving_averager.average(self._d_t)) + if self._sparsity_debias: + self._dist_to_opt_avg /= tf.sqrt(self._sparsity_avg) + return dist_to_opt_ops # D_t + + def _grad_sparsity(self): + """Gradient sparsity.""" + # If the sparse minibatch gradient has 10 percent of its entries + # non-zero, its sparsity is 0.1. + # The norm of dense gradient averaged from full dataset + # are roughly estimated norm of minibatch + # sparse gradient norm * sqrt(sparsity) + # An extension maybe only correct the sparse blob. + non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad]) + all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad]) + self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype) + self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype) + avg_op = self._moving_averager.apply([self._sparsity,]) + with tf.control_dependencies([avg_op]): + self._sparsity_avg = self._moving_averager.average(self._sparsity) + return avg_op + + def _prepare_variables(self): + """Prepare Variables for YellowFin. + + Returns: + Grad**2, Norm, Norm**2, Mean(Norm**2) ops + """ + self._moving_averager = tf.train.ExponentialMovingAverage( + decay=self._beta, zero_debias=self._zero_debias) + # assert self._grad is not None and len(self._grad) > 0 + # List for the returned Operations + prepare_variables_op = [] + + # Get per var g**2 and norm**2 + self._grad_squared = [] + self._grad_norm_squared = [] + + # Gradient squared + for v, g in zip(self._vars, self._grad): + if g is None: continue + with tf.colocate_with(v): + self._grad_squared.append(tf.square(g)) + + # Norm squared. + self._grad_norm_squared = [tf.reduce_sum(g_sq) + for g_sq in self._grad_squared] + + if self._sparsity_debias: + avg_op_sparsity = self._grad_sparsity() + prepare_variables_op.append(avg_op_sparsity) + + # The following running average on squared norm of gradient + # is shared by grad_var and dist_to_opt + avg_op = self._moving_averager.apply(self._grad_norm_squared) + + with tf.control_dependencies([avg_op]): + self._grad_norm_squared_avg = [self._moving_averager.average(val) + for val in self._grad_norm_squared] + self._grad_norm_squared = tf.add_n(self._grad_norm_squared) + self._grad_norm_squared_avg = tf.add_n(self._grad_norm_squared_avg) + + prepare_variables_op.append(avg_op) + return tf.group(*prepare_variables_op) + + def _get_cubic_root(self): + """Get the cubic root.""" + # We have the equation x^2 D^2 + (1-x)^4 * C / h_min^2 + # where x = sqrt(mu). + # We substitute x, which is sqrt(mu), with x = y + 1. + # It gives y^3 + py = q + # where p = (D^2 h_min^2)/(2*C) and q = -p. + # We use the Vieta's substitution to compute the root. + # There is only one real solution y (which is in [0, 1] ). + # http://mathworld.wolfram.com/VietasSubstitution.html + assert_array = [ + tf.Assert( + tf.logical_not(tf.is_nan(self._dist_to_opt_avg)), + [self._dist_to_opt_avg,]), + tf.Assert( + tf.logical_not(tf.is_nan(self._h_min)), + [self._h_min,]), + tf.Assert( + tf.logical_not(tf.is_nan(self._grad_var)), + [self._grad_var,]), + tf.Assert( + tf.logical_not(tf.is_inf(self._dist_to_opt_avg)), + [self._dist_to_opt_avg,]), + tf.Assert( + tf.logical_not(tf.is_inf(self._h_min)), + [self._h_min,]), + tf.Assert( + tf.logical_not(tf.is_inf(self._grad_var)), + [self._grad_var,]) + ] + with tf.control_dependencies(assert_array): + p = self._dist_to_opt_avg**2 * self._h_min**2 / 2 / self._grad_var + w3 = (-tf.sqrt(p**2 + 4.0 / 27.0 * p**3) - p) / 2.0 + w = tf.sign(w3) * tf.pow(tf.abs(w3), 1.0/3.0) + y = w - p / 3.0 / w + x = y + 1 + return x + + def _get_lr_tensor(self): + """Get lr minimizing the surrogate. + + Returns: + The lr_t. + """ + lr = tf.squared_difference(1.0, tf.sqrt(self._mu)) / self._h_min + return lr + + def _get_mu_tensor(self): + """Get the min mu which minimize the surrogate. + + Returns: + The mu_t. + """ + root = self._get_cubic_root() + dr = self._h_max / self._h_min + mu = tf.maximum( + root**2, ((tf.sqrt(dr) - 1) / (tf.sqrt(dr) + 1))**2) + return mu + + def _yellowfin(self): + """YellowFin auto-tuning optimizer based on momentum SGD. + + Returns: + YF ops + (Curvature range, + Grad_variance, + Dist_to_opt, + Single-Step, + Auto-Tuning) + """ + # List for the returned Operations. + yellowfin_ops = [] + + # Curvature range ops. + curv_range_ops = self._curvature_range() + yellowfin_ops += curv_range_ops + # Estimate of gradient Variance ops. + grad_var_ops = self._grad_variance() + yellowfin_ops += grad_var_ops + # Distance to optimum ops. + dist_to_opt_ops = self._dist_to_opt() + yellowfin_ops += dist_to_opt_ops + + # Single-Step: minimizes the surrogate for the expected + # squared distance from the optimum of a local quadratic + # approximation after a single step while keeping all directions in the + # robust region. + self._mu = tf.identity(tf.cond(self._do_tune, + self._get_mu_tensor, + lambda: self._mu_var)) + with tf.control_dependencies([self._mu]): + self._lr = tf.identity(tf.cond(self._do_tune, + self._get_lr_tensor, + lambda: self._lr_var)) + + # Tune learning rate and momentum. + with tf.control_dependencies([self._mu, self._lr]): + self._mu = self._beta * self._mu_var + (1 - self._beta) * self._mu + self._lr = self._beta * self._lr_var + (1 - self._beta) * self._lr + yellowfin_ops.append(tf.assign(self._mu_var, self._mu)) + yellowfin_ops.append(tf.assign(self._lr_var, self._lr)) + + yellowfin_ops = tf.group(*yellowfin_ops) + return yellowfin_ops + + def get_name(self): + """Get optimizer name.""" + return self._momentum_optimizer.get_name() + + def apply_gradients(self, grads_and_vars, global_step=None, name=None): + """Applying gradients and tune hyperparams with YellowFin. + + Args: + grads_and_vars: List of (gradient, variable) pairs as returned by + compute_gradients(). + global_step: Optional Variable to increment by one after the + variables have been updated. + name: Optional name for the returned operation. Default to the + name passed to the Optimizer constructor. + + Returns: + (A group of operations) + Variable Update with Momentum ops, + YellowFin ops(Curvature, Variance, Distance) ops, + SingleStep and lr_mu tuning ops, + Step increment ops. + """ + self._grad, self._vars = zip(*[(g, t) + for g, t in grads_and_vars if g is not None]) + + # Var update with Momentum. + with tf.variable_scope("apply_updates"): + # Gradient Clipping? + if self._clip_thresh_var is not None: + self._grad, _ = tf.clip_by_global_norm( + self._grad, self._clip_thresh_var) + + apply_grad_op = self._momentum_optimizer.apply_gradients( + zip(self._grad, self._vars), + global_step=global_step, + name=name) + else: + apply_grad_op = self._momentum_optimizer.apply_gradients( + zip(self._grad, self._vars), + global_step=global_step, + name=name) + + # Begin lr and mu tuning. + with tf.variable_scope("prepare_yellowFin_variables"): + # the dependencies ideally only need to be after clip is done, + # i.e. depends on self._grads. However, the control_dependencies + # does not support indexed slice for sparse gradients. + # The alternative dependencies here might be slightly slower due + # to less parallelization. + with tf.control_dependencies([apply_grad_op,]): + prepare_variables_op = self._prepare_variables() + + with tf.variable_scope("yellowfin"): + with tf.control_dependencies([prepare_variables_op]): + yellowfin_op = self._yellowfin() + + # Update YellowFin step variable. + with tf.control_dependencies([yellowfin_op]): + self._increment_step_op = tf.assign_add(self._step, 1).op + + return tf.group(apply_grad_op, + prepare_variables_op, + yellowfin_op, + self._increment_step_op) + + def compute_gradients(self, + loss, + var_list, + global_step=None, + gate_gradients=GATE_OP, + aggregation_method=None, + colocate_gradients_with_ops=False, + name=None, + grad_loss=None): + """Compute gradients through momentum optimizer. + + Args: + loss: A Tensor containing the value to minimize. + var_list: Optional list or tuple of tf.Variable to update + to minimize loss. Defaults to the list of variables collected + in the graph under the key GraphKey.TRAINABLE_VARIABLES. + global_step: Optional Variable to increment by one after the + variables have been updated. + gate_gradients: How to gate the computation of gradients. + Can be GATE_NONE, GATE_OP, or GATE_GRAPH. + aggregation_method: Specifies the method used to combine + gradient terms. Valid values are defined in the class AggregationMethod. + colocate_gradients_with_ops: If True, try collocating gradients with + the corresponding op. + name: Optional name for the returned operation. Default to the name + passed to the Optimizer constructor. + grad_loss: Optional. A Tensor holding the gradient computed for loss. + + Returns: + A list of (gradient, variable) pairs. Variable is always present, + but gradient can be None. + """ + del global_step, name # Unused for now. + return self._momentum_optimizer.compute_gradients( + loss, + var_list=var_list, + gate_gradients=gate_gradients, + aggregation_method=aggregation_method, + colocate_gradients_with_ops=colocate_gradients_with_ops, + grad_loss=grad_loss) + + def minimize(self, + loss, + global_step=None, + var_list=None, + gate_gradients=GATE_OP, + aggregation_method=None, + colocate_gradients_with_ops=False, + name=None, + grad_loss=None): + """Adapted from TensorFlow Optimizer base class member function. + + Add operations to minimize `loss` by updating `var_list`. + This method simply combines calls `compute_gradients()` and + `apply_gradients()`. If you want to process the gradient before applying + them call `tf.gradients()` and `self.apply_gradients()` explicitly instead + of using this function. + + Args: + loss: A Tensor containing the value to minimize. + global_step: Optional Variable to increment by one after the variables + have been updated. + var_list: Optional list or tuple of Variable objects to update to + minimize loss. Defaults to the list of variables collected in + the graph under the key GraphKeys.TRAINABLE_VARIABLES. + gate_gradients: How to gate the computation of gradients. + Can be GATE_NONE, GATE_OP, or GATE_GRAPH. + aggregation_method: Specifies the method used to combine gradient terms. + Valid values are defined in the class AggregationMethod. + colocate_gradients_with_ops: If True, try collocating gradients with + the corresponding op. + name: Optional name for the returned operation. + grad_loss: Optional. A Tensor holding the gradient computed for loss. + + Returns: + An Operation that updates the variables in var_list. + If global_step was not None, that operation also increments global_step. + + Raises: + ValueError: if no gradients are provided for any variable. + """ + grads_and_vars = self._momentum_optimizer.compute_gradients( + loss, + var_list=var_list, + gate_gradients=gate_gradients, + aggregation_method=aggregation_method, + colocate_gradients_with_ops=colocate_gradients_with_ops, + grad_loss=grad_loss) + + vars_with_grad = [v for g, v in grads_and_vars if g is not None] + if not vars_with_grad: + raise ValueError( + "No gradients provided for any variable, check your graph for ops" + " that do not support gradients, between variables %s and loss %s." % + ([str(v) for _, v in grads_and_vars], loss)) + for g, v in grads_and_vars: + print("g ", g) + print("v ", v) + + return self.apply_gradients(grads_and_vars, + global_step=global_step, + name=name) + + def get_slot(self, var, name): + """Return a slot named `name` created for `var`. + + Args: + var: A variable passed to `minimize()` or `apply_gradients()`. + name: A string. + + Returns: + The `Variable` for the slot if it was created, `None` otherwise. + """ + return self._momentum_optimizer.get_slot(var, name) + + def get_slot_names(self): + """Return a list of the names of the slots using MomentumOptimizer. + + Returns: + A list of strings. + """ + return self._momentum_optimizer.get_slot_names() diff --git a/tensor2tensor/utils/yellowfin_test.py b/tensor2tensor/utils/yellowfin_test.py new file mode 100644 index 000000000..fb0fc57f9 --- /dev/null +++ b/tensor2tensor/utils/yellowfin_test.py @@ -0,0 +1,212 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""YellowFin Test Module for TensorFlow.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np + +from tensor2tensor.utils.yellowfin import YellowFinOptimizer + +import tensorflow.compat.v1 as tf + + +n_dim = 1000000 +n_iter = 0 + + +class YellowFinTest(tf.test.TestCase): + + def tune_everything(self, x0squared, c, t, gmin, gmax): + del t + # First tune based on dynamic range + if c == 0: + dr = gmax / gmin + mustar = ((np.sqrt(dr) - 1) / (np.sqrt(dr) + 1))**2 + alpha_star = (1 + np.sqrt(mustar))**2/gmax + + return alpha_star, mustar + + dist_to_opt = x0squared + grad_var = c + max_curv = gmax + min_curv = gmin + const_fact = dist_to_opt * min_curv**2 / 2 / grad_var + coef = [-1, 3, -(3 + const_fact), 1] + roots = np.roots(coef) + roots = roots[np.real(roots) > 0] + roots = roots[np.real(roots) < 1] + root = roots[np.argmin(np.imag(roots))] + + assert root > 0 and root < 1 and np.absolute(root.imag) < 1e-6 + + dr = max_curv / min_curv + assert max_curv >= min_curv + mu = max(((np.sqrt(dr) - 1) / (np.sqrt(dr) + 1))**2, root**2) + + lr_min = (1 - np.sqrt(mu))**2 / min_curv + + alpha_star = lr_min + mustar = mu + + return alpha_star, mustar + + def testMeasurement(self): + opt = YellowFinOptimizer(zero_debias=False) + w = tf.Variable(np.ones([n_dim,]), + dtype=tf.float32, + name="w", + trainable=True) + b = tf.Variable(np.ones([1,], dtype=np.float32), + dtype=tf.float32, + name="b", + trainable=True) + x = tf.constant(np.ones([n_dim,], dtype=np.float32), + dtype=tf.float32) + _ = tf.multiply(w, x) + b # loss + tvars = tf.trainable_variables() + + w_grad_val = tf.placeholder(tf.float32, shape=(n_dim,)) + b_grad_val = tf.placeholder(tf.float32, shape=(1,)) + apply_op = opt.apply_gradients(zip([w_grad_val, b_grad_val], tvars)) + + init_op = tf.global_variables_initializer() + with tf.Session() as sess: + sess.run(init_op) + target_h_max = 0.0 + target_h_min = 0.0 + g_norm_squared_avg = 0.0 + g_norm_avg = 0.0 + g_avg = 0.0 + target_dist = 0.0 + for i in range(n_iter): + feed_dict = {w_grad_val: (i + 1) * np.ones([n_dim,], dtype=np.float32), + b_grad_val: (i + 1) * np.ones([1,], dtype=np.float32)} + res = sess.run([opt._curv_win, + opt._h_max, + opt._h_min, + opt._grad_var, + opt._dist_to_opt_avg, + apply_op], feed_dict=feed_dict) + + g_norm_squared_avg = ( + 0.999 * g_norm_squared_avg + + 0.001 * np.sum(((i + 1) * np.ones([n_dim + 1,]))**2)) + g_norm_avg = (0.999 * g_norm_avg + + 0.001 * np.linalg.norm((i + 1)*np.ones([n_dim + 1,]))) + g_avg = 0.999 * g_avg + 0.001 * (i + 1) + + target_h_max = 0.999 * target_h_max + 0.001 * (i + 1)**2*(n_dim + 1) + target_h_min = (0.999 * target_h_min + + 0.001 * max(1, i + 2 - 20)**2 * (n_dim + 1)) + target_var = g_norm_squared_avg - g_avg**2 * (n_dim + 1) + target_dist = (0.999 * target_dist + + 0.001 * g_norm_avg / g_norm_squared_avg) + + assert np.abs(target_h_max - res[1]) < np.abs(target_h_max) * 1e-3 + assert np.abs(target_h_min - res[2]) < np.abs(target_h_min) * 1e-3 + assert np.abs(target_var - res[3]) < np.abs(res[3]) * 1e-3 + assert np.abs(target_dist - res[4]) < np.abs(res[4]) * 1e-3 + + def testLrMu(self): + opt = YellowFinOptimizer(learning_rate=0.5, momentum=0.5, zero_debias=False) + w = tf.Variable(np.ones([n_dim,]), + dtype=tf.float32, + name="w", + trainable=True) + b = tf.Variable(np.ones([1,], + dtype=np.float32), + dtype=tf.float32, + name="b", + trainable=True) + x = tf.constant(np.ones([n_dim,], dtype=np.float32), dtype=tf.float32) + _ = tf.multiply(w, x) + b # loss + tvars = tf.trainable_variables() + + w_grad_val = tf.Variable(np.zeros([n_dim,]), + dtype=tf.float32, + trainable=False) + b_grad_val = tf.Variable(np.zeros([1,]), + dtype=tf.float32, + trainable=False) + apply_op = opt.apply_gradients(zip([w_grad_val, b_grad_val], tvars)) + + init_op = tf.global_variables_initializer() + with tf.Session() as sess: + sess.run(init_op) + target_h_max = 0.0 + target_h_min = 0.0 + g_norm_squared_avg = 0.0 + g_norm_avg = 0.0 + g_avg = 0.0 + target_dist = 0.0 + target_lr = 0.5 + target_mu = 0.5 + for i in range(n_iter): + + sess.run(tf.assign(w_grad_val, (i + 1) * np.ones([n_dim,], + dtype=np.float32))) + sess.run(tf.assign(b_grad_val, (i + 1) * np.ones([1,], + dtype=np.float32))) + + res = sess.run([opt._curv_win, + opt._h_max, + opt._h_min, + opt._grad_var, + opt._dist_to_opt_avg, + opt._lr_var, + opt._mu_var, + apply_op]) + + res[5] = opt._lr_var.eval() + res[6] = opt._mu_var.eval() + + g_norm_squared_avg = ( + 0.999 * g_norm_squared_avg + + 0.001 * np.sum(((i + 1) * np.ones([n_dim + 1,]))**2)) + g_norm_avg = (0.999 * g_norm_avg + + 0.001 * np.linalg.norm((i + 1)*np.ones([n_dim + 1,]))) + g_avg = 0.999 * g_avg + 0.001 * (i + 1) + + target_h_max = 0.999 * target_h_max + 0.001 * (i + 1)**2 * (n_dim + 1) + target_h_min = (0.999 * target_h_min + + 0.001 * max(1, i + 2 - 20)**2 * (n_dim + 1)) + target_var = g_norm_squared_avg - g_avg**2 * (n_dim + 1) + target_dist = (0.999 * target_dist + + 0.001 * g_norm_avg / g_norm_squared_avg) + + if i > 0: + lr, mu = self.tune_everything(target_dist**2, + target_var, + 1, + target_h_min, + target_h_max) + target_lr = 0.999 * target_lr + 0.001 * lr + target_mu = 0.999 * target_mu + 0.001 * mu + + assert np.abs(target_h_max - res[1]) < np.abs(target_h_max) * 1e-3 + assert np.abs(target_h_min - res[2]) < np.abs(target_h_min) * 1e-3 + assert np.abs(target_var - res[3]) < np.abs(res[3]) * 1e-3 + assert np.abs(target_dist - res[4]) < np.abs(res[4]) * 1e-3 + assert (target_lr == 0.0 or + (np.abs(target_lr - res[5]) < np.abs(res[5]) * 1e-3)) + assert (target_mu == 0.0 or + (np.abs(target_mu - res[6]) < np.abs(res[6]) * 5e-3)) + + +if __name__ == "__main__": + tf.test.main() diff --git a/tensor2tensor/visualization/TransformerVisualization.ipynb b/tensor2tensor/visualization/TransformerVisualization.ipynb new file mode 100644 index 000000000..dea6b3c6b --- /dev/null +++ b/tensor2tensor/visualization/TransformerVisualization.ipynb @@ -0,0 +1,265 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "cellView": "form", + "colab": { + "autoexec": { + "startup": false, + "wait_interval": 0 + } + }, + "colab_type": "code", + "id": "6uNrFWq5BRba" + }, + "outputs": [], + "source": [ + "#@title\n", + "# Copyright 2018 Google LLC.\n", + "\n", + "# Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Create Your Own Visualizations!\n", + "Instructions:\n", + "1. Install tensor2tensor and train up a Transformer model following the instruction in the repository https://github.com/tensorflow/tensor2tensor.\n", + "2. Update cell 3 to point to your checkpoint, it is currently set up to read from the default checkpoint location that would be created from following the instructions above.\n", + "3. If you used custom hyper parameters then update cell 4.\n", + "4. Run the notebook!" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "import tensorflow as tf\n", + "\n", + "from tensor2tensor import problems\n", + "from tensor2tensor.bin import t2t_decoder # To register the hparams set\n", + "from tensor2tensor.utils import registry\n", + "from tensor2tensor.utils import trainer_lib\n", + "from tensor2tensor.visualization import attention\n", + "from tensor2tensor.visualization import visualization" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "application/javascript": [ + "require.config({\n", + " paths: {\n", + " d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.8/d3.min'\n", + " }\n", + "});" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "%%javascript\n", + "require.config({\n", + " paths: {\n", + " d3: '//cdnjs.cloudflare.com/ajax/libs/d3/3.4.8/d3.min'\n", + " }\n", + "});" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## HParams" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "# PUT THE MODEL YOU WANT TO LOAD HERE!\n", + "CHECKPOINT = os.path.expanduser('~/t2t_train/translate_ende_wmt32k/transformer-transformer_base_single_gpu')" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# HParams\n", + "problem_name = 'translate_ende_wmt32k'\n", + "data_dir = os.path.expanduser('~/t2t_data/')\n", + "model_name = \"transformer\"\n", + "hparams_set = \"transformer_base_single_gpu\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualization" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Setting T2TModel mode to 'eval'\n", + "INFO:tensorflow:Setting hparams.layer_prepostprocess_dropout to 0.0\n", + "INFO:tensorflow:Setting hparams.symbol_dropout to 0.0\n", + "INFO:tensorflow:Setting hparams.attention_dropout to 0.0\n", + "INFO:tensorflow:Setting hparams.dropout to 0.0\n", + "INFO:tensorflow:Setting hparams.relu_dropout to 0.0\n", + "INFO:tensorflow:Using variable initializer: uniform_unit_scaling\n", + "INFO:tensorflow:Transforming feature 'inputs' with symbol_modality_33708_512.bottom\n", + "INFO:tensorflow:Transforming 'targets' with symbol_modality_33708_512.targets_bottom\n", + "INFO:tensorflow:Building model body\n", + "WARNING:tensorflow:From /tmp/t2t/tensor2tensor/layers/common_layers.py:512: calling reduce_mean (from tensorflow.python.ops.math_ops) with keep_dims is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "keep_dims is deprecated, use keepdims instead\n", + "INFO:tensorflow:Transforming body output with symbol_modality_33708_512.top\n", + "WARNING:tensorflow:From /tmp/t2t/tensor2tensor/layers/common_layers.py:1707: softmax_cross_entropy_with_logits (from tensorflow.python.ops.nn_ops) is deprecated and will be removed in a future version.\n", + "Instructions for updating:\n", + "\n", + "Future major versions of TensorFlow will allow gradients to flow\n", + "into the labels input on backprop by default.\n", + "\n", + "See tf.nn.softmax_cross_entropy_with_logits_v2.\n", + "\n", + "INFO:tensorflow:Greedy Decoding\n" + ] + } + ], + "source": [ + "visualizer = visualization.AttentionVisualizer(hparams_set, model_name, data_dir, problem_name, beam_size=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Create CheckpointSaverHook.\n", + "INFO:tensorflow:Restoring parameters from /usr/local/google/home/llion/t2t_train/translate_ende_wmt32k/transformer-transformer_base_single_gpu/model.ckpt-1\n" + ] + } + ], + "source": [ + "tf.Variable(0, dtype=tf.int64, trainable=False, name='global_step')\n", + "\n", + "sess = tf.train.MonitoredTrainingSession(\n", + " checkpoint_dir=CHECKPOINT,\n", + " save_summaries_secs=0,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Saving checkpoints for 1 into /usr/local/google/home/llion/t2t_train/translate_ende_wmt32k/transformer-transformer_base_single_gpu/model.ckpt.\n" + ] + } + ], + "source": [ + "input_sentence = \"I have two dogs.\"\n", + "output_string, inp_text, out_text, att_mats = visualizer.get_vis_data_from_string(sess, input_sentence)\n", + "print(output_string)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Interpreting the Visualizations\n", + "- The layers drop down allow you to view the different Transformer layers, 0-indexed of course.\n", + " - Tip: The first layer, last layer and 2nd to last layer are usually the most interpretable.\n", + "- The attention dropdown allows you to select different pairs of encoder-decoder attentions:\n", + " - All: Shows all types of attentions together. NOTE: There is no relation between heads of the same color - between the decoder self attention and decoder-encoder attention since they do not share parameters.\n", + " - Input - Input: Shows only the encoder self-attention.\n", + " - Input - Output: Shows the decoder’s attention on the encoder. NOTE: Every decoder layer attends to the final layer of encoder so the visualization will show the attention on the final encoder layer regardless of what layer is selected in the drop down.\n", + " - Output - Output: Shows only the decoder self-attention. NOTE: The visualization might be slightly misleading in the first layer since the text shown is the target of the decoder, the input to the decoder at layer 0 is this text with a GO symbol prepreded.\n", + "- The colored squares represent the different attention heads.\n", + " - You can hide or show a given head by clicking on it’s color.\n", + " - Double clicking a color will hide all other colors, double clicking on a color when it’s the only head showing will show all the heads again.\n", + "- You can hover over a word to see the individual attention weights for just that position.\n", + " - Hovering over the words on the left will show what that position attended to.\n", + " - Hovering over the words on the right will show what positions attended to it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "attention.show(inp_text, out_text, *att_mats)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/tensor2tensor/visualization/__init__.py b/tensor2tensor/visualization/__init__.py new file mode 100644 index 000000000..06080ebe9 --- /dev/null +++ b/tensor2tensor/visualization/__init__.py @@ -0,0 +1,16 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + diff --git a/tensor2tensor/visualization/attention.js b/tensor2tensor/visualization/attention.js new file mode 100644 index 000000000..ae2deb6bd --- /dev/null +++ b/tensor2tensor/visualization/attention.js @@ -0,0 +1,363 @@ +/** + * @fileoverview Transformer Visualization D3 javascript code. + */ + +requirejs(['jquery', 'd3'], +function($, d3) { + +var attention = window.attention; + +const TEXT_SIZE = 15; +const BOXWIDTH = TEXT_SIZE * 8; +const BOXHEIGHT = TEXT_SIZE * 1.5; +const WIDTH = 2000; +const HEIGHT = attention.all.bot_text.length * BOXHEIGHT * 2 + 100; +const MATRIX_WIDTH = 150; +const head_colours = d3.scale.category10(); +const CHECKBOX_SIZE = 20; + +function lighten(colour) { + var c = d3.hsl(colour); + var increment = (1 - c.l) * 0.6; + c.l += increment; + c.s -= increment; + return c; +} + +function transpose(mat) { + return mat[0].map(function(col, i) { + return mat.map(function(row) { + return row[i]; + }); + }); +} + +function zip(a, b) { + return a.map(function (e, i) { + return [e, b[i]]; + }); +} + + +function renderVis(id, top_text, bot_text, attention_heads, config) { + $(id).empty(); + var svg = d3.select(id) + .append('svg') + .attr("width", WIDTH) + .attr("height", HEIGHT); + + var att_data = []; + for (var i=0; i < attention_heads.length; i++) { + var att_trans = transpose(attention_heads[i]); + att_data.push(zip(attention_heads[i], att_trans)); + } + + renderText(svg, top_text, true, att_data, 0); + renderText(svg, bot_text, false, att_data, MATRIX_WIDTH + BOXWIDTH); + + renderAttentionHighlights(svg, att_data); + + svg.append("g").classed("attention_heads", true); + + renderAttention(svg, attention_heads); + + draw_checkboxes(config, 0, svg, attention_heads); +} + + +function renderText(svg, text, is_top, att_data, left_pos) { + var id = is_top ? "top" : "bottom"; + var textContainer = svg.append("svg:g") + .attr("id", id); + + textContainer.append("g").classed("attention_boxes", true) + .selectAll("g") + .data(att_data) + .enter() + .append("g") + .selectAll("rect") + .data(function(d) {return d;}) + .enter() + .append("rect") + .attr("x", function(d, i, j) { + return left_pos + box_offset(j); + }) + .attr("y", function(d, i) { + return (+1) * BOXHEIGHT; + }) + .attr("width", BOXWIDTH/active_heads()) + .attr("height", function() { return BOXHEIGHT; }) + .attr("fill", function(d, i, j) { + return head_colours(j); + }) + .style("opacity", 0.0); + + + var tokenContainer = textContainer.append("g").selectAll("g") + .data(text) + .enter() + .append("g"); + + tokenContainer.append("rect") + .classed("background", true) + .style("opacity", 0.0) + .attr("fill", "lightgray") + .attr("x", left_pos) + .attr("y", function(d, i) { + return (i+1) * BOXHEIGHT; + }) + .attr("width", BOXWIDTH) + .attr("height", BOXHEIGHT); + + var theText = tokenContainer.append("text") + .text(function(d) { return d; }) + .attr("font-size", TEXT_SIZE + "px") + .style("cursor", "default") + .style("-webkit-user-select", "none") + .attr("x", left_pos) + .attr("y", function(d, i) { + return (i+1) * BOXHEIGHT; + }); + + if (is_top) { + theText.style("text-anchor", "end") + .attr("dx", BOXWIDTH - TEXT_SIZE) + .attr("dy", TEXT_SIZE); + } else { + theText.style("text-anchor", "start") + .attr("dx", + TEXT_SIZE) + .attr("dy", TEXT_SIZE); + } + + tokenContainer.on("mouseover", function(d, index) { + textContainer.selectAll(".background") + .style("opacity", function(d, i) { + return i == index ? 1.0 : 0.0; + }); + + svg.selectAll(".attention_heads").style("display", "none"); + + svg.selectAll(".line_heads") // To get the nesting to work. + .selectAll(".att_lines") + .attr("stroke-opacity", function(d) { + return 1.0; + }) + .attr("y1", function(d, i) { + if (is_top) { + return (index+1) * BOXHEIGHT + (BOXHEIGHT/2); + } else { + return (i+1) * BOXHEIGHT + (BOXHEIGHT/2); + } + }) + .attr("x1", BOXWIDTH) + .attr("y2", function(d, i) { + if (is_top) { + return (i+1) * BOXHEIGHT + (BOXHEIGHT/2); + } else { + return (index+1) * BOXHEIGHT + (BOXHEIGHT/2); + } + }) + .attr("x2", BOXWIDTH + MATRIX_WIDTH) + .attr("stroke-width", 2) + .attr("stroke", function(d, i, j) { + return head_colours(j); + }) + .attr("stroke-opacity", function(d, i, j) { + if (is_top) {d = d[0];} else {d = d[1];} + if (config.head_vis[j]) { + if (d) { + return d[index]; + } else { + return 0.0; + } + } else { + return 0.0; + } + }); + + + function updateAttentionBoxes() { + var id = is_top ? "bottom" : "top"; + var the_left_pos = is_top ? MATRIX_WIDTH + BOXWIDTH : 0; + svg.select("#" + id) + .selectAll(".attention_boxes") + .selectAll("g") + .selectAll("rect") + .attr("x", function(d, i, j) { return the_left_pos + box_offset(j); }) + .attr("y", function(d, i) { return (i+1) * BOXHEIGHT; }) + .attr("width", BOXWIDTH/active_heads()) + .attr("height", function() { return BOXHEIGHT; }) + .style("opacity", function(d, i, j) { + if (is_top) {d = d[0];} else {d = d[1];} + if (config.head_vis[j]) + if (d) { + return d[index]; + } else { + return 0.0; + } + else + return 0.0; + + }); + } + + updateAttentionBoxes(); + }); + + textContainer.on("mouseleave", function() { + d3.select(this).selectAll(".background") + .style("opacity", 0.0); + + svg.selectAll(".att_lines").attr("stroke-opacity", 0.0); + svg.selectAll(".attention_heads").style("display", "inline"); + svg.selectAll(".attention_boxes") + .selectAll("g") + .selectAll("rect") + .style("opacity", 0.0); + }); +} + +function renderAttentionHighlights(svg, attention) { + var line_container = svg.append("g"); + line_container.selectAll("g") + .data(attention) + .enter() + .append("g") + .classed("line_heads", true) + .selectAll("line") + .data(function(d){return d;}) + .enter() + .append("line").classed("att_lines", true); +} + +function renderAttention(svg, attention_heads) { + var line_container = svg.selectAll(".attention_heads"); + line_container.html(null); + for(var h=0; h").val(i).text(i)); +} + +$("#layer").on('change', function(e) { + config.layer = +e.currentTarget.value; + render(); +}); + +$("#att_type").on('change', function(e) { + config.att_type = e.currentTarget.value; + render(); +}); + +$("button").on('click', visualize); + +visualize(); + +}); diff --git a/tensor2tensor/visualization/attention.py b/tensor2tensor/visualization/attention.py new file mode 100644 index 000000000..7ab271653 --- /dev/null +++ b/tensor2tensor/visualization/attention.py @@ -0,0 +1,163 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module for postprocessing and displaying transformer attentions. + +This module is designed to be called from an ipython notebook. +""" + +import json +import os + +import IPython.display as display + +import numpy as np + +vis_html = """ + + Layer: + Attention: + +
+""" + + +__location__ = os.path.realpath( + os.path.join(os.getcwd(), os.path.dirname(__file__))) +vis_js = open(os.path.join(__location__, 'attention.js')).read() + + +def show(inp_text, out_text, enc_atts, dec_atts, encdec_atts): + enc_att, dec_att, encdec_att = (resize(enc_atts), + resize(dec_atts), resize(encdec_atts)) + attention = _get_attention( + inp_text, out_text, enc_att, dec_att, encdec_att) + att_json = json.dumps(attention) + _show_attention(att_json) + + +def _show_attention(att_json): + display.display(display.HTML(vis_html)) + display.display(display.Javascript('window.attention = %s' % att_json)) + display.display(display.Javascript(vis_js)) + + +def resize(att_mat, max_length=None): + """Normalize attention matrices and reshape as necessary.""" + for i, att in enumerate(att_mat): + # Add extra batch dim for viz code to work. + if att.ndim == 3: + att = np.expand_dims(att, axis=0) + if max_length is not None: + # Sum across different attention values for each token. + att = att[:, :, :max_length, :max_length] + row_sums = np.sum(att, axis=2) + # Normalize + att /= row_sums[:, :, np.newaxis] + att_mat[i] = att + return att_mat + + +def _get_attention(inp_text, out_text, enc_atts, dec_atts, encdec_atts): + """Compute representation of the attention ready for the d3 visualization. + + Args: + inp_text: list of strings, words to be displayed on the left of the vis + out_text: list of strings, words to be displayed on the right of the vis + enc_atts: numpy array, encoder self-attentions + [num_layers, batch_size, num_heads, enc_length, enc_length] + dec_atts: numpy array, decoder self-attentions + [num_layers, batch_size, num_heads, dec_length, dec_length] + encdec_atts: numpy array, encoder-decoder attentions + [num_layers, batch_size, num_heads, dec_length, enc_length] + + Returns: + Dictionary of attention representations with the structure: + { + 'all': Representations for showing all attentions at the same time. + 'inp_inp': Representations for showing encoder self-attentions + 'inp_out': Representations for showing encoder-decoder attentions + 'out_out': Representations for showing decoder self-attentions + } + and each sub-dictionary has structure: + { + 'att': list of inter attentions matrices, one for each attention head + 'top_text': list of strings, words to be displayed on the left of the vis + 'bot_text': list of strings, words to be displayed on the right of the vis + } + """ + def get_full_attention(layer): + """Get the full input+output - input+output attentions.""" + enc_att = enc_atts[layer][0] + dec_att = dec_atts[layer][0] + encdec_att = encdec_atts[layer][0] + enc_att = np.transpose(enc_att, [0, 2, 1]) + dec_att = np.transpose(dec_att, [0, 2, 1]) + encdec_att = np.transpose(encdec_att, [0, 2, 1]) + # [heads, query_length, memory_length] + enc_length = enc_att.shape[1] + dec_length = dec_att.shape[1] + num_heads = enc_att.shape[0] + first = np.concatenate([enc_att, encdec_att], axis=2) + second = np.concatenate( + [np.zeros((num_heads, dec_length, enc_length)), dec_att], axis=2) + full_att = np.concatenate([first, second], axis=1) + return [ha.T.tolist() for ha in full_att] + + def get_inp_inp_attention(layer): + att = np.transpose(enc_atts[layer][0], (0, 2, 1)) + return [ha.T.tolist() for ha in att] + + def get_out_inp_attention(layer): + att = np.transpose(encdec_atts[layer][0], (0, 2, 1)) + return [ha.T.tolist() for ha in att] + + def get_out_out_attention(layer): + att = np.transpose(dec_atts[layer][0], (0, 2, 1)) + return [ha.T.tolist() for ha in att] + + def get_attentions(get_attention_fn): + num_layers = len(enc_atts) + return [get_attention_fn(i) for i in range(num_layers)] + + attentions = { + 'all': { + 'att': get_attentions(get_full_attention), + 'top_text': inp_text + out_text, + 'bot_text': inp_text + out_text, + }, + 'inp_inp': { + 'att': get_attentions(get_inp_inp_attention), + 'top_text': inp_text, + 'bot_text': inp_text, + }, + 'inp_out': { + 'att': get_attentions(get_out_inp_attention), + 'top_text': inp_text, + 'bot_text': out_text, + }, + 'out_out': { + 'att': get_attentions(get_out_out_attention), + 'top_text': out_text, + 'bot_text': out_text, + }, + } + + return attentions diff --git a/tensor2tensor/visualization/visualization.py b/tensor2tensor/visualization/visualization.py new file mode 100644 index 000000000..b700093fc --- /dev/null +++ b/tensor2tensor/visualization/visualization.py @@ -0,0 +1,211 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Shared code for visualizing transformer attentions.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import numpy as np + +# To register the hparams set +from tensor2tensor import models # pylint: disable=unused-import +from tensor2tensor import problems +from tensor2tensor.utils import registry +from tensor2tensor.utils import trainer_lib + +import tensorflow.compat.v1 as tf +from tensorflow.compat.v1 import estimator as tf_estimator + +EOS_ID = 1 + + +class AttentionVisualizer(object): + """Helper object for creating Attention visualizations.""" + + def __init__( + self, hparams_set, model_name, data_dir, problem_name, beam_size=1): + inputs, targets, samples, att_mats = build_model( + hparams_set, model_name, data_dir, problem_name, beam_size=beam_size) + + # Fetch the problem + ende_problem = problems.problem(problem_name) + encoders = ende_problem.feature_encoders(data_dir) + + self.inputs = inputs + self.targets = targets + self.att_mats = att_mats + self.samples = samples + self.encoders = encoders + + def encode(self, input_str): + """Input str to features dict, ready for inference.""" + inputs = self.encoders["inputs"].encode(input_str) + [EOS_ID] + batch_inputs = np.reshape(inputs, [1, -1, 1, 1]) # Make it 3D. + return batch_inputs + + def decode(self, integers): + """List of ints to str.""" + integers = list(np.squeeze(integers)) + return self.encoders["targets"].decode(integers) + + def encode_list(self, integers): + """List of ints to list of str.""" + integers = list(np.squeeze(integers)) + return self.encoders["inputs"].decode_list(integers) + + def decode_list(self, integers): + """List of ints to list of str.""" + integers = list(np.squeeze(integers)) + return self.encoders["targets"].decode_list(integers) + + def get_vis_data_from_string(self, sess, input_string): + """Constructs the data needed for visualizing attentions. + + Args: + sess: A tf.Session object. + input_string: The input sentence to be translated and visualized. + + Returns: + Tuple of ( + output_string: The translated sentence. + input_list: Tokenized input sentence. + output_list: Tokenized translation. + att_mats: Tuple of attention matrices; ( + enc_atts: Encoder self attention weights. + A list of `num_layers` numpy arrays of size + (batch_size, num_heads, inp_len, inp_len) + dec_atts: Decoder self attention weights. + A list of `num_layers` numpy arrays of size + (batch_size, num_heads, out_len, out_len) + encdec_atts: Encoder-Decoder attention weights. + A list of `num_layers` numpy arrays of size + (batch_size, num_heads, out_len, inp_len) + ) + """ + encoded_inputs = self.encode(input_string) + + # Run inference graph to get the translation. + out = sess.run(self.samples, { + self.inputs: encoded_inputs, + }) + + # Run the decoded translation through the training graph to get the + # attention tensors. + att_mats = sess.run(self.att_mats, { + self.inputs: encoded_inputs, + self.targets: np.reshape(out, [1, -1, 1, 1]), + }) + + output_string = self.decode(out) + input_list = self.encode_list(encoded_inputs) + output_list = self.decode_list(out) + + return output_string, input_list, output_list, att_mats + + +def build_model(hparams_set, model_name, data_dir, problem_name, beam_size=1): + """Build the graph required to fetch the attention weights. + + Args: + hparams_set: HParams set to build the model with. + model_name: Name of model. + data_dir: Path to directory containing training data. + problem_name: Name of problem. + beam_size: (Optional) Number of beams to use when decoding a translation. + If set to 1 (default) then greedy decoding is used. + + Returns: + Tuple of ( + inputs: Input placeholder to feed in ids to be translated. + targets: Targets placeholder to feed to translation when fetching + attention weights. + samples: Tensor representing the ids of the translation. + att_mats: Tensors representing the attention weights. + ) + """ + hparams = trainer_lib.create_hparams( + hparams_set, data_dir=data_dir, problem_name=problem_name) + translate_model = registry.model(model_name)( + hparams, tf_estimator.ModeKeys.EVAL) + + inputs = tf.placeholder(tf.int32, shape=(1, None, 1, 1), name="inputs") + targets = tf.placeholder(tf.int32, shape=(1, None, 1, 1), name="targets") + translate_model({ + "inputs": inputs, + "targets": targets, + }) + + # Must be called after building the training graph, so that the dict will + # have been filled with the attention tensors. BUT before creating the + # inference graph otherwise the dict will be filled with tensors from + # inside a tf.while_loop from decoding and are marked unfetchable. + att_mats = get_att_mats(translate_model) + + with tf.variable_scope(tf.get_variable_scope(), reuse=True): + samples = translate_model.infer({ + "inputs": inputs, + }, beam_size=beam_size)["outputs"] + + return inputs, targets, samples, att_mats + + +def get_att_mats(translate_model): + """Get's the tensors representing the attentions from a build model. + + The attentions are stored in a dict on the Transformer object while building + the graph. + + Args: + translate_model: Transformer object to fetch the attention weights from. + + Returns: + Tuple of attention matrices; ( + enc_atts: Encoder self attention weights. + A list of `num_layers` numpy arrays of size + (batch_size, num_heads, inp_len, inp_len) + dec_atts: Decoder self attetnion weights. + A list of `num_layers` numpy arrays of size + (batch_size, num_heads, out_len, out_len) + encdec_atts: Encoder-Decoder attention weights. + A list of `num_layers` numpy arrays of size + (batch_size, num_heads, out_len, inp_len) + ) + """ + enc_atts = [] + dec_atts = [] + encdec_atts = [] + + prefix = "transformer/body/" + postfix_self_attention = "/multihead_attention/dot_product_attention" + if translate_model.hparams.self_attention_type == "dot_product_relative": + postfix_self_attention = ("/multihead_attention/" + "dot_product_attention_relative") + postfix_encdec = "/multihead_attention/dot_product_attention" + + for i in range(translate_model.hparams.num_hidden_layers): + enc_att = translate_model.attention_weights[ + "%sencoder/layer_%i/self_attention%s" + % (prefix, i, postfix_self_attention)] + dec_att = translate_model.attention_weights[ + "%sdecoder/layer_%i/self_attention%s" + % (prefix, i, postfix_self_attention)] + encdec_att = translate_model.attention_weights[ + "%sdecoder/layer_%i/encdec_attention%s" % (prefix, i, postfix_encdec)] + enc_atts.append(enc_att) + dec_atts.append(dec_att) + encdec_atts.append(encdec_att) + + return enc_atts, dec_atts, encdec_atts diff --git a/tensor2tensor/visualization/visualization_test.py b/tensor2tensor/visualization/visualization_test.py new file mode 100644 index 000000000..ba78a8f9c --- /dev/null +++ b/tensor2tensor/visualization/visualization_test.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# Copyright 2023 The Tensor2Tensor Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for visualization library. + + IF ANY OF THESE TESTS BREAK PLEASE UPDATE THE CODE IN THE VIZ NOTEBOOK +****************************************************************************** + +Any fixes you have to make to this test or visualization.py to fix this test +might have to be reflected in the visualization notebook, for example if the +name of the hparams_set changes. + +If you need help testing the changes please contact llion@. +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os + +from tensor2tensor.utils import trainer_lib +from tensor2tensor.visualization import visualization +import tensorflow.compat.v1 as tf + + +def get_data_dir(): + pkg, _ = os.path.split(__file__) + pkg, _ = os.path.split(pkg) + return os.path.join(pkg, 'test_data') + + +problem_name = 'translate_ende_wmt32k' +model_name = 'transformer' +hparams_set = 'transformer_tiny' + + +class VisualizationTest(tf.test.TestCase): + + def setUp(self): + super(VisualizationTest, self).setUp() + self.data_dir = get_data_dir() + + def test_build_model_greedy(self): + inputs, targets, outputs, _ = visualization.build_model( + hparams_set, model_name, self.data_dir, problem_name, beam_size=1) + + self.assertAllEqual((1, None, 1, 1), inputs.shape.as_list()) + self.assertAllEqual((1, None, 1, 1), targets.shape.as_list()) + self.assertAllEqual((None, None), outputs.shape.as_list()) + + def test_build_model_beam(self): + inputs, targets, outputs, _ = visualization.build_model( + hparams_set, model_name, self.data_dir, problem_name, beam_size=8) + + self.assertAllEqual((1, None, 1, 1), inputs.shape.as_list()) + self.assertAllEqual((1, None, 1, 1), targets.shape.as_list()) + self.assertAllEqual((None, None), outputs.shape.as_list()) + + def test_get_vis_data_from_string(self): + visualizer = visualization.AttentionVisualizer( + hparams_set, model_name, self.data_dir, problem_name, beam_size=8) + + input_sentence = 'I have two dogs.' + with self.test_session() as sess: + sess.run(tf.global_variables_initializer()) + _, inp_text, out_text, att_mats = ( + visualizer.get_vis_data_from_string(sess, input_sentence)) + + self.assertAllEqual( + [u'I_', u'have_', u'two_', u'dogs_', u'._', u''], inp_text) + + hparams = trainer_lib.create_hparams( + hparams_set, data_dir=self.data_dir, problem_name=problem_name) + + enc_atts, dec_atts, encdec_atts = att_mats + + self.assertAllEqual(hparams.num_hidden_layers, len(enc_atts)) + + enc_atts = enc_atts[0] + dec_atts = dec_atts[0] + encdec_atts = encdec_atts[0] + + batch_size = 1 + num_heads = hparams.num_heads + inp_len = len(inp_text) + out_len = len(out_text) + + self.assertAllEqual( + (batch_size, num_heads, inp_len, inp_len), enc_atts.shape) + self.assertAllEqual( + (batch_size, num_heads, out_len, out_len), dec_atts.shape) + self.assertAllEqual( + (batch_size, num_heads, out_len, inp_len), encdec_atts.shape) + +if __name__ == '__main__': + tf.test.main()